python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/irq.c * * Copyright (C) 1992 Linus Torvalds * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. * * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. * Dynamic Tick Timer written by Tony Lindgren <[email protected]> and * Tuukka Tikkanen <[email protected]>. * * This file contains the code used by various IRQ handling routines: * asking for different IRQ's should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. * * IRQ's are in fact implemented a bit like signal handlers for the kernel. * Naturally it's not a 1:1 relation, but there are similarities. */ #include <linux/signal.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/random.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/kallsyms.h> #include <linux/proc_fs.h> #include <linux/export.h> #include <asm/hardware/cache-l2x0.h> #include <asm/hardware/cache-uniphier.h> #include <asm/outercache.h> #include <asm/softirq_stack.h> #include <asm/exception.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> #include "reboot.h" unsigned long irq_err_count; #ifdef CONFIG_IRQSTACKS asmlinkage DEFINE_PER_CPU_READ_MOSTLY(u8 *, irq_stack_ptr); static void __init init_irq_stacks(void) { u8 *stack; int cpu; for_each_possible_cpu(cpu) { if (!IS_ENABLED(CONFIG_VMAP_STACK)) stack = (u8 *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); else stack = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP, NUMA_NO_NODE, __builtin_return_address(0)); if (WARN_ON(!stack)) break; per_cpu(irq_stack_ptr, cpu) = &stack[THREAD_SIZE]; } } #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK static void ____do_softirq(void *arg) { __do_softirq(); } void do_softirq_own_stack(void) { call_with_stack(____do_softirq, NULL, __this_cpu_read(irq_stack_ptr)); } #endif #endif int arch_show_interrupts(struct seq_file *p, int prec) { #ifdef CONFIG_FIQ show_fiq_list(p, prec); #endif #ifdef CONFIG_SMP show_ipi_list(p, prec); #endif seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); return 0; } /* * handle_IRQ handles all hardware IRQ's. Decoded IRQs should * not come via this function. Instead, they should provide their * own 'handler'. Used by platform code implementing C-based 1st * level decoding. */ void handle_IRQ(unsigned int irq, struct pt_regs *regs) { struct irq_desc *desc; /* * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. */ if (unlikely(!irq || irq >= nr_irqs)) desc = NULL; else desc = irq_to_desc(irq); if (likely(desc)) handle_irq_desc(desc); else ack_bad_irq(irq); } void __init init_IRQ(void) { int ret; #ifdef CONFIG_IRQSTACKS init_irq_stacks(); #endif if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq) irqchip_init(); else machine_desc->init_irq(); if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_CACHE_L2X0) && (machine_desc->l2c_aux_mask || machine_desc->l2c_aux_val)) { if (!outer_cache.write_sec) outer_cache.write_sec = machine_desc->l2c_write_sec; ret = l2x0_of_init(machine_desc->l2c_aux_val, machine_desc->l2c_aux_mask); if (ret && ret != -ENODEV) pr_err("L2C: failed to init: %d\n", ret); } uniphier_cache_init(); } #ifdef CONFIG_SPARSE_IRQ int __init arch_probe_nr_irqs(void) { nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS; return nr_irqs; } #endif
linux-master
arch/arm/kernel/irq.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include <linux/types.h> #include <linux/io.h> #include <linux/spinlock.h> static DEFINE_RAW_SPINLOCK(__io_lock); /* * Generic atomic MMIO modify. * * Allows thread-safe access to registers shared by unrelated subsystems. * The access is protected by a single MMIO-wide lock. */ void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set) { unsigned long flags; u32 value; raw_spin_lock_irqsave(&__io_lock, flags); value = readl_relaxed(reg) & ~mask; value |= (set & mask); writel_relaxed(value, reg); raw_spin_unlock_irqrestore(&__io_lock, flags); } EXPORT_SYMBOL(atomic_io_modify_relaxed); void atomic_io_modify(void __iomem *reg, u32 mask, u32 set) { unsigned long flags; u32 value; raw_spin_lock_irqsave(&__io_lock, flags); value = readl_relaxed(reg) & ~mask; value |= (set & mask); writel(value, reg); raw_spin_unlock_irqrestore(&__io_lock, flags); } EXPORT_SYMBOL(atomic_io_modify); /* * Copy data from IO memory space to "real" memory space. * This needs to be optimized. */ void _memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { unsigned char *t = to; while (count) { count--; *t = readb(from); t++; from++; } } EXPORT_SYMBOL(_memcpy_fromio); /* * Copy data from "real" memory space to IO memory space. * This needs to be optimized. */ void _memcpy_toio(volatile void __iomem *to, const void *from, size_t count) { const unsigned char *f = from; while (count) { count--; writeb(*f, to); f++; to++; } } EXPORT_SYMBOL(_memcpy_toio); /* * "memset" on IO memory space. * This needs to be optimized. */ void _memset_io(volatile void __iomem *dst, int c, size_t count) { while (count) { count--; writeb(c, dst); dst++; } } EXPORT_SYMBOL(_memset_io);
linux-master
arch/arm/kernel/io.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/bug.h> #include <linux/kernel.h> #include <asm/opcodes.h> static unsigned long __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link, bool warn) { unsigned long s, j1, j2, i1, i2, imm10, imm11; unsigned long first, second; long offset; offset = (long)addr - (long)(pc + 4); if (offset < -16777216 || offset > 16777214) { WARN_ON_ONCE(warn); return 0; } s = (offset >> 24) & 0x1; i1 = (offset >> 23) & 0x1; i2 = (offset >> 22) & 0x1; imm10 = (offset >> 12) & 0x3ff; imm11 = (offset >> 1) & 0x7ff; j1 = (!i1) ^ s; j2 = (!i2) ^ s; first = 0xf000 | (s << 10) | imm10; second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11; if (link) second |= 1 << 14; return __opcode_thumb32_compose(first, second); } static unsigned long __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link, bool warn) { unsigned long opcode = 0xea000000; long offset; if (link) opcode |= 1 << 24; offset = (long)addr - (long)(pc + 8); if (unlikely(offset < -33554432 || offset > 33554428)) { WARN_ON_ONCE(warn); return 0; } offset = (offset >> 2) & 0x00ffffff; return opcode | offset; } unsigned long __arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn) { if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) return __arm_gen_branch_thumb2(pc, addr, link, warn); else return __arm_gen_branch_arm(pc, addr, link, warn); }
linux-master
arch/arm/kernel/insn.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/ptrace.c * * By Ross Biro 1/23/92 * edited by Linus Torvalds * ARM modifications Copyright (C) 2000 Russell King */ #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/elf.h> #include <linux/smp.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/security.h> #include <linux/init.h> #include <linux/signal.h> #include <linux/uaccess.h> #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/regset.h> #include <linux/audit.h> #include <linux/unistd.h> #include <asm/syscall.h> #include <asm/traps.h> #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> #define REG_PC 15 #define REG_PSR 16 /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ #if 0 /* * Breakpoint SWI instruction: SWI &9F0001 */ #define BREAKINST_ARM 0xef9f0001 #define BREAKINST_THUMB 0xdf00 /* fill this in later */ #else /* * New breakpoints - use an undefined instruction. The ARM architecture * reference manual guarantees that the following instruction space * will produce an undefined instruction exception on all CPUs: * * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx * Thumb: 1101 1110 xxxx xxxx */ #define BREAKINST_ARM 0xe7f001f0 #define BREAKINST_THUMB 0xde01 #endif struct pt_regs_offset { const char *name; int offset; }; #define REG_OFFSET_NAME(r) \ {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)} #define REG_OFFSET_END {.name = NULL, .offset = 0} static const struct pt_regs_offset regoffset_table[] = { REG_OFFSET_NAME(r0), REG_OFFSET_NAME(r1), REG_OFFSET_NAME(r2), REG_OFFSET_NAME(r3), REG_OFFSET_NAME(r4), REG_OFFSET_NAME(r5), REG_OFFSET_NAME(r6), REG_OFFSET_NAME(r7), REG_OFFSET_NAME(r8), REG_OFFSET_NAME(r9), REG_OFFSET_NAME(r10), REG_OFFSET_NAME(fp), REG_OFFSET_NAME(ip), REG_OFFSET_NAME(sp), REG_OFFSET_NAME(lr), REG_OFFSET_NAME(pc), REG_OFFSET_NAME(cpsr), REG_OFFSET_NAME(ORIG_r0), REG_OFFSET_END, }; /** * regs_query_register_offset() - query register offset from its name * @name: the name of a register * * regs_query_register_offset() returns the offset of a register in struct * pt_regs from its name. If the name is invalid, this returns -EINVAL; */ int regs_query_register_offset(const char *name) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) return roff->offset; return -EINVAL; } /** * regs_query_register_name() - query register name from its offset * @offset: the offset of a register in struct pt_regs. * * regs_query_register_name() returns the name of a register from its * offset in struct pt_regs. If the @offset is invalid, this returns NULL; */ const char *regs_query_register_name(unsigned int offset) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (roff->offset == offset) return roff->name; return NULL; } /** * regs_within_kernel_stack() - check the address in the stack * @regs: pt_regs which contains kernel stack pointer. * @addr: address which is checked. * * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). * If @addr is within the kernel stack, it returns true. If not, returns false. */ bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) { return ((addr & ~(THREAD_SIZE - 1)) == (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); } /** * regs_get_kernel_stack_nth() - get Nth entry of the stack * @regs: pt_regs which contains kernel stack pointer. * @n: stack entry number. * * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which * is specified by @regs. If the @n th entry is NOT in the kernel stack, * this returns 0. */ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) { unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); addr += n; if (regs_within_kernel_stack(regs, (unsigned long)addr)) return *addr; else return 0; } /* * this routine will get a word off of the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. * this routine assumes that all the privileged stacks are in our * data space. */ static inline long get_user_reg(struct task_struct *task, int offset) { return task_pt_regs(task)->uregs[offset]; } /* * this routine will put a word on the processes privileged stack. * the offset is how far from the base addr as stored in the THREAD. * this routine assumes that all the privileged stacks are in our * data space. */ static inline int put_user_reg(struct task_struct *task, int offset, long data) { struct pt_regs newregs, *regs = task_pt_regs(task); int ret = -EINVAL; newregs = *regs; newregs.uregs[offset] = data; if (valid_user_regs(&newregs)) { regs->uregs[offset] = data; ret = 0; } return ret; } /* * Called by kernel/ptrace.c when detaching.. */ void ptrace_disable(struct task_struct *child) { /* Nothing to do. */ } /* * Handle hitting a breakpoint. */ void ptrace_break(struct pt_regs *regs) { force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)instruction_pointer(regs)); } static int break_trap(struct pt_regs *regs, unsigned int instr) { ptrace_break(regs); return 0; } static struct undef_hook arm_break_hook = { .instr_mask = 0x0fffffff, .instr_val = 0x07f001f0, .cpsr_mask = PSR_T_BIT, .cpsr_val = 0, .fn = break_trap, }; static struct undef_hook thumb_break_hook = { .instr_mask = 0xffffffff, .instr_val = 0x0000de01, .cpsr_mask = PSR_T_BIT, .cpsr_val = PSR_T_BIT, .fn = break_trap, }; static struct undef_hook thumb2_break_hook = { .instr_mask = 0xffffffff, .instr_val = 0xf7f0a000, .cpsr_mask = PSR_T_BIT, .cpsr_val = PSR_T_BIT, .fn = break_trap, }; static int __init ptrace_break_init(void) { register_undef_hook(&arm_break_hook); register_undef_hook(&thumb_break_hook); register_undef_hook(&thumb2_break_hook); return 0; } core_initcall(ptrace_break_init); /* * Read the word at offset "off" into the "struct user". We * actually access the pt_regs stored on the kernel stack. */ static int ptrace_read_user(struct task_struct *tsk, unsigned long off, unsigned long __user *ret) { unsigned long tmp; if (off & 3) return -EIO; tmp = 0; if (off == PT_TEXT_ADDR) tmp = tsk->mm->start_code; else if (off == PT_DATA_ADDR) tmp = tsk->mm->start_data; else if (off == PT_TEXT_END_ADDR) tmp = tsk->mm->end_code; else if (off < sizeof(struct pt_regs)) tmp = get_user_reg(tsk, off >> 2); else if (off >= sizeof(struct user)) return -EIO; return put_user(tmp, ret); } /* * Write the word at offset "off" into "struct user". We * actually access the pt_regs stored on the kernel stack. */ static int ptrace_write_user(struct task_struct *tsk, unsigned long off, unsigned long val) { if (off & 3 || off >= sizeof(struct user)) return -EIO; if (off >= sizeof(struct pt_regs)) return 0; return put_user_reg(tsk, off >> 2, val); } #ifdef CONFIG_IWMMXT /* * Get the child iWMMXt state. */ static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp) { struct thread_info *thread = task_thread_info(tsk); if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) return -ENODATA; iwmmxt_task_disable(thread); /* force it to ram */ return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE) ? -EFAULT : 0; } /* * Set the child iWMMXt state. */ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp) { struct thread_info *thread = task_thread_info(tsk); if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) return -EACCES; iwmmxt_task_release(thread); /* force a reload */ return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE) ? -EFAULT : 0; } #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT /* * Convert a virtual register number into an index for a thread_info * breakpoint array. Breakpoints are identified using positive numbers * whilst watchpoints are negative. The registers are laid out as pairs * of (address, control), each pair mapping to a unique hw_breakpoint struct. * Register 0 is reserved for describing resource information. */ static int ptrace_hbp_num_to_idx(long num) { if (num < 0) num = (ARM_MAX_BRP << 1) - num; return (num - 1) >> 1; } /* * Returns the virtual register number for the address of the * breakpoint at index idx. */ static long ptrace_hbp_idx_to_num(int idx) { long mid = ARM_MAX_BRP << 1; long num = (idx << 1) + 1; return num > mid ? mid - num : num; } /* * Handle hitting a HW-breakpoint. */ static void ptrace_hbptriggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); long num; int i; for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i) if (current->thread.debug.hbp[i] == bp) break; num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i); force_sig_ptrace_errno_trap((int)num, (void __user *)(bkpt->trigger)); } /* * Set ptrace breakpoint pointers to zero for this task. * This is required in order to prevent child processes from unregistering * breakpoints held by their parent. */ void clear_ptrace_hw_breakpoint(struct task_struct *tsk) { memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp)); } /* * Unregister breakpoints from this task and reset the pointers in * the thread_struct. */ void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { int i; struct thread_struct *t = &tsk->thread; for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) { if (t->debug.hbp[i]) { unregister_hw_breakpoint(t->debug.hbp[i]); t->debug.hbp[i] = NULL; } } } static u32 ptrace_get_hbp_resource_info(void) { u8 num_brps, num_wrps, debug_arch, wp_len; u32 reg = 0; num_brps = hw_breakpoint_slots(TYPE_INST); num_wrps = hw_breakpoint_slots(TYPE_DATA); debug_arch = arch_get_debug_arch(); wp_len = arch_get_max_wp_len(); reg |= debug_arch; reg <<= 8; reg |= wp_len; reg <<= 8; reg |= num_wrps; reg <<= 8; reg |= num_brps; return reg; } static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) { struct perf_event_attr attr; ptrace_breakpoint_init(&attr); /* Initialise fields to sane defaults. */ attr.bp_addr = 0; attr.bp_len = HW_BREAKPOINT_LEN_4; attr.bp_type = type; attr.disabled = 1; return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); } static int ptrace_gethbpregs(struct task_struct *tsk, long num, unsigned long __user *data) { u32 reg; int idx, ret = 0; struct perf_event *bp; struct arch_hw_breakpoint_ctrl arch_ctrl; if (num == 0) { reg = ptrace_get_hbp_resource_info(); } else { idx = ptrace_hbp_num_to_idx(num); if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { ret = -EINVAL; goto out; } bp = tsk->thread.debug.hbp[idx]; if (!bp) { reg = 0; goto put; } arch_ctrl = counter_arch_bp(bp)->ctrl; /* * Fix up the len because we may have adjusted it * to compensate for an unaligned address. */ while (!(arch_ctrl.len & 0x1)) arch_ctrl.len >>= 1; if (num & 0x1) reg = bp->attr.bp_addr; else reg = encode_ctrl_reg(arch_ctrl); } put: if (put_user(reg, data)) ret = -EFAULT; out: return ret; } static int ptrace_sethbpregs(struct task_struct *tsk, long num, unsigned long __user *data) { int idx, gen_len, gen_type, implied_type, ret = 0; u32 user_val; struct perf_event *bp; struct arch_hw_breakpoint_ctrl ctrl; struct perf_event_attr attr; if (num == 0) goto out; else if (num < 0) implied_type = HW_BREAKPOINT_RW; else implied_type = HW_BREAKPOINT_X; idx = ptrace_hbp_num_to_idx(num); if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) { ret = -EINVAL; goto out; } if (get_user(user_val, data)) { ret = -EFAULT; goto out; } bp = tsk->thread.debug.hbp[idx]; if (!bp) { bp = ptrace_hbp_create(tsk, implied_type); if (IS_ERR(bp)) { ret = PTR_ERR(bp); goto out; } tsk->thread.debug.hbp[idx] = bp; } attr = bp->attr; if (num & 0x1) { /* Address */ attr.bp_addr = user_val; } else { /* Control */ decode_ctrl_reg(user_val, &ctrl); ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type); if (ret) goto out; if ((gen_type & implied_type) != gen_type) { ret = -EINVAL; goto out; } attr.bp_len = gen_len; attr.bp_type = gen_type; attr.disabled = !ctrl.enabled; } ret = modify_user_hw_breakpoint(bp, &attr); out: return ret; } #endif /* regset get/set implementations */ static int gpr_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { return membuf_write(&to, task_pt_regs(target), sizeof(struct pt_regs)); } static int gpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct pt_regs newregs = *task_pt_regs(target); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, sizeof(newregs)); if (ret) return ret; if (!valid_user_regs(&newregs)) return -EINVAL; *task_pt_regs(target) = newregs; return 0; } static int fpa_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { return membuf_write(&to, &task_thread_info(target)->fpstate, sizeof(struct user_fp)); } static int fpa_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct thread_info *thread = task_thread_info(target); return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &thread->fpstate, 0, sizeof(struct user_fp)); } #ifdef CONFIG_VFP /* * VFP register get/set implementations. * * With respect to the kernel, struct user_fp is divided into three chunks: * 16 or 32 real VFP registers (d0-d15 or d0-31) * These are transferred to/from the real registers in the task's * vfp_hard_struct. The number of registers depends on the kernel * configuration. * * 16 or 0 fake VFP registers (d16-d31 or empty) * i.e., the user_vfp structure has space for 32 registers even if * the kernel doesn't have them all. * * vfp_get() reads this chunk as zero where applicable * vfp_set() ignores this chunk * * 1 word for the FPSCR */ static int vfp_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { struct thread_info *thread = task_thread_info(target); struct vfp_hard_struct const *vfp = &thread->vfpstate.hard; const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); vfp_sync_hwstate(thread); membuf_write(&to, vfp->fpregs, sizeof(vfp->fpregs)); membuf_zero(&to, user_fpscr_offset - sizeof(vfp->fpregs)); return membuf_store(&to, vfp->fpscr); } /* * For vfp_set() a read-modify-write is done on the VFP registers, * in order to avoid writing back a half-modified set of registers on * failure. */ static int vfp_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct thread_info *thread = task_thread_info(target); struct vfp_hard_struct new_vfp; const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs); const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr); vfp_sync_hwstate(thread); new_vfp = thread->vfpstate.hard; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &new_vfp.fpregs, user_fpregs_offset, user_fpregs_offset + sizeof(new_vfp.fpregs)); if (ret) return ret; user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, user_fpregs_offset + sizeof(new_vfp.fpregs), user_fpscr_offset); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &new_vfp.fpscr, user_fpscr_offset, user_fpscr_offset + sizeof(new_vfp.fpscr)); if (ret) return ret; thread->vfpstate.hard = new_vfp; vfp_flush_hwstate(thread); return 0; } #endif /* CONFIG_VFP */ enum arm_regset { REGSET_GPR, REGSET_FPR, #ifdef CONFIG_VFP REGSET_VFP, #endif }; static const struct user_regset arm_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(u32), .align = sizeof(u32), .regset_get = gpr_get, .set = gpr_set }, [REGSET_FPR] = { /* * For the FPA regs in fpstate, the real fields are a mixture * of sizes, so pretend that the registers are word-sized: */ .core_note_type = NT_PRFPREG, .n = sizeof(struct user_fp) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .regset_get = fpa_get, .set = fpa_set }, #ifdef CONFIG_VFP [REGSET_VFP] = { /* * Pretend that the VFP regs are word-sized, since the FPSCR is * a single word dangling at the end of struct user_vfp: */ .core_note_type = NT_ARM_VFP, .n = ARM_VFPREGS_SIZE / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .regset_get = vfp_get, .set = vfp_set }, #endif /* CONFIG_VFP */ }; static const struct user_regset_view user_arm_view = { .name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI, .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets) }; const struct user_regset_view *task_user_regset_view(struct task_struct *task) { return &user_arm_view; } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret; unsigned long __user *datap = (unsigned long __user *) data; switch (request) { case PTRACE_PEEKUSR: ret = ptrace_read_user(child, addr, datap); break; case PTRACE_POKEUSR: ret = ptrace_write_user(child, addr, data); break; case PTRACE_GETREGS: ret = copy_regset_to_user(child, &user_arm_view, REGSET_GPR, 0, sizeof(struct pt_regs), datap); break; case PTRACE_SETREGS: ret = copy_regset_from_user(child, &user_arm_view, REGSET_GPR, 0, sizeof(struct pt_regs), datap); break; case PTRACE_GETFPREGS: ret = copy_regset_to_user(child, &user_arm_view, REGSET_FPR, 0, sizeof(union fp_state), datap); break; case PTRACE_SETFPREGS: ret = copy_regset_from_user(child, &user_arm_view, REGSET_FPR, 0, sizeof(union fp_state), datap); break; #ifdef CONFIG_IWMMXT case PTRACE_GETWMMXREGS: ret = ptrace_getwmmxregs(child, datap); break; case PTRACE_SETWMMXREGS: ret = ptrace_setwmmxregs(child, datap); break; #endif case PTRACE_GET_THREAD_AREA: ret = put_user(task_thread_info(child)->tp_value[0], datap); break; case PTRACE_SET_SYSCALL: if (data != -1) data &= __NR_SYSCALL_MASK; task_thread_info(child)->abi_syscall = data; ret = 0; break; #ifdef CONFIG_VFP case PTRACE_GETVFPREGS: ret = copy_regset_to_user(child, &user_arm_view, REGSET_VFP, 0, ARM_VFPREGS_SIZE, datap); break; case PTRACE_SETVFPREGS: ret = copy_regset_from_user(child, &user_arm_view, REGSET_VFP, 0, ARM_VFPREGS_SIZE, datap); break; #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT case PTRACE_GETHBPREGS: ret = ptrace_gethbpregs(child, addr, (unsigned long __user *)data); break; case PTRACE_SETHBPREGS: ret = ptrace_sethbpregs(child, addr, (unsigned long __user *)data); break; #endif default: ret = ptrace_request(child, request, addr, data); break; } return ret; } enum ptrace_syscall_dir { PTRACE_SYSCALL_ENTER = 0, PTRACE_SYSCALL_EXIT, }; static void report_syscall(struct pt_regs *regs, enum ptrace_syscall_dir dir) { unsigned long ip; /* * IP is used to denote syscall entry/exit: * IP = 0 -> entry, =1 -> exit */ ip = regs->ARM_ip; regs->ARM_ip = dir; if (dir == PTRACE_SYSCALL_EXIT) ptrace_report_syscall_exit(regs, 0); else if (ptrace_report_syscall_entry(regs)) current_thread_info()->abi_syscall = -1; regs->ARM_ip = ip; } asmlinkage int syscall_trace_enter(struct pt_regs *regs) { int scno; if (test_thread_flag(TIF_SYSCALL_TRACE)) report_syscall(regs, PTRACE_SYSCALL_ENTER); /* Do seccomp after ptrace; syscall may have changed. */ #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER if (secure_computing() == -1) return -1; #else /* XXX: remove this once OABI gets fixed */ secure_computing_strict(syscall_get_nr(current, regs)); #endif /* Tracer or seccomp may have changed syscall. */ scno = syscall_get_nr(current, regs); if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, scno); audit_syscall_entry(scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); return scno; } asmlinkage void syscall_trace_exit(struct pt_regs *regs) { /* * Audit the syscall before anything else, as a debugger may * come in and change the current registers. */ audit_syscall_exit(regs); /* * Note that we haven't updated the ->syscall field for the * current thread. This isn't a problem because it will have * been set on syscall entry and there hasn't been an opportunity * for a PTRACE_SET_SYSCALL since then. */ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_exit(regs, regs_return_value(regs)); if (test_thread_flag(TIF_SYSCALL_TRACE)) report_syscall(regs, PTRACE_SYSCALL_EXIT); }
linux-master
arch/arm/kernel/ptrace.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/traps.c * * Copyright (C) 1995-2009 Russell King * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds * * 'traps.c' handles hardware exceptions after we have saved some state in * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably * kill the offending process. */ #include <linux/signal.h> #include <linux/personality.h> #include <linux/kallsyms.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/hardirq.h> #include <linux/kdebug.h> #include <linux/kprobes.h> #include <linux/module.h> #include <linux/kexec.h> #include <linux/bug.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/sched/signal.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> #include <linux/irq.h> #include <linux/atomic.h> #include <asm/cacheflush.h> #include <asm/exception.h> #include <asm/spectre.h> #include <asm/unistd.h> #include <asm/traps.h> #include <asm/ptrace.h> #include <asm/unwind.h> #include <asm/tls.h> #include <asm/stacktrace.h> #include <asm/system_misc.h> #include <asm/opcodes.h> static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt", "undefined instruction", }; void *vectors_page; #ifdef CONFIG_DEBUG_USER unsigned int user_debug; static int __init user_debug_setup(char *str) { get_option(&str, &user_debug); return 1; } __setup("user_debug=", user_debug_setup); #endif void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame, const char *loglvl) { unsigned long end = frame + 4 + sizeof(struct pt_regs); if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER) && IS_ENABLED(CONFIG_CC_IS_GCC) && end > ALIGN(frame, THREAD_SIZE)) { /* * If we are walking past the end of the stack, it may be due * to the fact that we are on an IRQ or overflow stack. In this * case, we can load the address of the other stack from the * frame record. */ frame = ((unsigned long *)frame)[-2] - 4; end = frame + 4 + sizeof(struct pt_regs); } #ifndef CONFIG_KALLSYMS printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n", loglvl, where, from); #elif defined CONFIG_BACKTRACE_VERBOSE printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", loglvl, where, (void *)where, from, (void *)from); #else printk("%s %ps from %pS\n", loglvl, (void *)where, (void *)from); #endif if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE)) dump_mem(loglvl, "Exception stack", frame + 4, end); } void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl) { char str[80], *p; unsigned int x; int reg; for (reg = 10, x = 0, p = str; reg >= 0; reg--) { if (instruction & BIT(reg)) { p += sprintf(p, " r%d:%08x", reg, *stack--); if (++x == 6) { x = 0; p = str; printk("%s%s\n", loglvl, str); } } } if (p != str) printk("%s%s\n", loglvl, str); } #ifndef CONFIG_ARM_UNWIND /* * Stack pointers should always be within the kernels view of * physical memory. If it is not there, then we can't dump * out any information relating to the stack. */ static int verify_stack(unsigned long sp) { if (sp < PAGE_OFFSET || (!IS_ENABLED(CONFIG_VMAP_STACK) && sp > (unsigned long)high_memory && high_memory != NULL)) return -EFAULT; return 0; } #endif /* * Dump out the contents of some memory nicely... */ void dump_mem(const char *lvl, const char *str, unsigned long bottom, unsigned long top) { unsigned long first; int i; printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top); for (first = bottom & ~31; first < top; first += 32) { unsigned long p; char str[sizeof(" 12345678") * 8 + 1]; memset(str, ' ', sizeof(str)); str[sizeof(str) - 1] = '\0'; for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { if (p >= bottom && p < top) { unsigned long val; if (!get_kernel_nofault(val, (unsigned long *)p)) sprintf(str + i * 9, " %08lx", val); else sprintf(str + i * 9, " ????????"); } } printk("%s%04lx:%s\n", lvl, first & 0xffff, str); } } static void dump_instr(const char *lvl, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); const int thumb = thumb_mode(regs); const int width = thumb ? 4 : 8; char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; int i; /* * Note that we now dump the code first, just in case the backtrace * kills us. */ for (i = -4; i < 1 + !!thumb; i++) { unsigned int val, bad; if (thumb) { u16 tmp; if (user_mode(regs)) bad = get_user(tmp, &((u16 __user *)addr)[i]); else bad = get_kernel_nofault(tmp, &((u16 *)addr)[i]); val = __mem_to_opcode_thumb16(tmp); } else { if (user_mode(regs)) bad = get_user(val, &((u32 __user *)addr)[i]); else bad = get_kernel_nofault(val, &((u32 *)addr)[i]); val = __mem_to_opcode_arm(val); } if (!bad) p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", width, val); else { p += sprintf(p, "bad PC value"); break; } } printk("%sCode: %s\n", lvl, str); } #ifdef CONFIG_ARM_UNWIND void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, const char *loglvl) { unwind_backtrace(regs, tsk, loglvl); } #else void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, const char *loglvl) { unsigned int fp, mode; int ok = 1; printk("%sBacktrace: ", loglvl); if (!tsk) tsk = current; if (regs) { fp = frame_pointer(regs); mode = processor_mode(regs); } else if (tsk != current) { fp = thread_saved_fp(tsk); mode = 0x10; } else { asm("mov %0, fp" : "=r" (fp) : : "cc"); mode = 0x10; } if (!fp) { pr_cont("no frame pointer"); ok = 0; } else if (verify_stack(fp)) { pr_cont("invalid frame pointer 0x%08x", fp); ok = 0; } else if (fp < (unsigned long)end_of_stack(tsk)) pr_cont("frame pointer underflow"); pr_cont("\n"); if (ok) c_backtrace(fp, mode, loglvl); } #endif void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) { dump_backtrace(NULL, tsk, loglvl); barrier(); } #ifdef CONFIG_PREEMPT #define S_PREEMPT " PREEMPT" #elif defined(CONFIG_PREEMPT_RT) #define S_PREEMPT " PREEMPT_RT" #else #define S_PREEMPT "" #endif #ifdef CONFIG_SMP #define S_SMP " SMP" #else #define S_SMP "" #endif #ifdef CONFIG_THUMB2_KERNEL #define S_ISA " THUMB2" #else #define S_ISA " ARM" #endif static int __die(const char *str, int err, struct pt_regs *regs) { struct task_struct *tsk = current; static int die_counter; int ret; pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n", str, err, ++die_counter); /* trap and error numbers are mostly meaningless on ARM */ ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); if (ret == NOTIFY_STOP) return 1; print_modules(); __show_regs(regs); __show_regs_alloc_free(regs); pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); if (!user_mode(regs) || in_interrupt()) { dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, ALIGN(regs->ARM_sp - THREAD_SIZE, THREAD_ALIGN) + THREAD_SIZE); dump_backtrace(regs, tsk, KERN_EMERG); dump_instr(KERN_EMERG, regs); } return 0; } static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; static unsigned long oops_begin(void) { int cpu; unsigned long flags; oops_enter(); /* racy, but better than risking deadlock. */ raw_local_irq_save(flags); cpu = smp_processor_id(); if (!arch_spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else arch_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; console_verbose(); bust_spinlocks(1); return flags; } static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) { if (regs && kexec_should_crash(current)) crash_kexec(regs); bust_spinlocks(0); die_owner = -1; add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); die_nest_count--; if (!die_nest_count) /* Nest count reaches zero, release the lock. */ arch_spin_unlock(&die_lock); raw_local_irq_restore(flags); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (signr) make_task_dead(signr); } /* * This function is protected against re-entrancy. */ void die(const char *str, struct pt_regs *regs, int err) { enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; unsigned long flags = oops_begin(); int sig = SIGSEGV; if (!user_mode(regs)) bug_type = report_bug(regs->ARM_pc, regs); if (bug_type != BUG_TRAP_TYPE_NONE) str = "Oops - BUG"; if (__die(str, err, regs)) sig = 0; oops_end(flags, regs, sig); } void arm_notify_die(const char *str, struct pt_regs *regs, int signo, int si_code, void __user *addr, unsigned long err, unsigned long trap) { if (user_mode(regs)) { current->thread.error_code = err; current->thread.trap_no = trap; force_sig_fault(signo, si_code, addr); } else { die(str, regs, err); } } #ifdef CONFIG_GENERIC_BUG int is_valid_bugaddr(unsigned long pc) { #ifdef CONFIG_THUMB2_KERNEL u16 bkpt; u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE); #else u32 bkpt; u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE); #endif if (get_kernel_nofault(bkpt, (void *)pc)) return 0; return bkpt == insn; } #endif static LIST_HEAD(undef_hook); static DEFINE_RAW_SPINLOCK(undef_lock); void register_undef_hook(struct undef_hook *hook) { unsigned long flags; raw_spin_lock_irqsave(&undef_lock, flags); list_add(&hook->node, &undef_hook); raw_spin_unlock_irqrestore(&undef_lock, flags); } void unregister_undef_hook(struct undef_hook *hook) { unsigned long flags; raw_spin_lock_irqsave(&undef_lock, flags); list_del(&hook->node); raw_spin_unlock_irqrestore(&undef_lock, flags); } static nokprobe_inline int call_undef_hook(struct pt_regs *regs, unsigned int instr) { struct undef_hook *hook; unsigned long flags; int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL; raw_spin_lock_irqsave(&undef_lock, flags); list_for_each_entry(hook, &undef_hook, node) if ((instr & hook->instr_mask) == hook->instr_val && (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) fn = hook->fn; raw_spin_unlock_irqrestore(&undef_lock, flags); return fn ? fn(regs, instr) : 1; } asmlinkage void do_undefinstr(struct pt_regs *regs) { unsigned int instr; void __user *pc; pc = (void __user *)instruction_pointer(regs); if (processor_mode(regs) == SVC_MODE) { #ifdef CONFIG_THUMB2_KERNEL if (thumb_mode(regs)) { instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]); if (is_wide_instruction(instr)) { u16 inst2; inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]); instr = __opcode_thumb32_compose(instr, inst2); } } else #endif instr = __mem_to_opcode_arm(*(u32 *) pc); } else if (thumb_mode(regs)) { if (get_user(instr, (u16 __user *)pc)) goto die_sig; instr = __mem_to_opcode_thumb16(instr); if (is_wide_instruction(instr)) { unsigned int instr2; if (get_user(instr2, (u16 __user *)pc+1)) goto die_sig; instr2 = __mem_to_opcode_thumb16(instr2); instr = __opcode_thumb32_compose(instr, instr2); } } else { if (get_user(instr, (u32 __user *)pc)) goto die_sig; instr = __mem_to_opcode_arm(instr); } if (call_undef_hook(regs, instr) == 0) return; die_sig: #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_UNDEFINED) { pr_info("%s (%d): undefined instruction: pc=%px\n", current->comm, task_pid_nr(current), pc); __show_regs(regs); dump_instr(KERN_INFO, regs); } #endif arm_notify_die("Oops - undefined instruction", regs, SIGILL, ILL_ILLOPC, pc, 0, 6); } NOKPROBE_SYMBOL(do_undefinstr) /* * Handle FIQ similarly to NMI on x86 systems. * * The runtime environment for NMIs is extremely restrictive * (NMIs can pre-empt critical sections meaning almost all locking is * forbidden) meaning this default FIQ handling must only be used in * circumstances where non-maskability improves robustness, such as * watchdog or debug logic. * * This handler is not appropriate for general purpose use in drivers * platform code and can be overrideen using set_fiq_handler. */ asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); nmi_enter(); /* nop. FIQ handlers for special arch/arm features can be added here. */ nmi_exit(); set_irq_regs(old_regs); } /* * bad_mode handles the impossible case in the vectors. If you see one of * these, then it's extremely serious, and could mean you have buggy hardware. * It never returns, and never tries to sync. We hope that we can at least * dump out some state information... */ asmlinkage void bad_mode(struct pt_regs *regs, int reason) { console_verbose(); pr_crit("Bad mode in %s handler detected\n", handler[reason]); die("Oops - bad mode", regs, 0); local_irq_disable(); panic("bad mode"); } static int bad_syscall(int n, struct pt_regs *regs) { if ((current->personality & PER_MASK) != PER_LINUX) { send_sig(SIGSEGV, current, 1); return regs->ARM_r0; } #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_SYSCALL) { pr_err("[%d] %s: obsolete system call %08x.\n", task_pid_nr(current), current->comm, n); dump_instr(KERN_ERR, regs); } #endif arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP, (void __user *)instruction_pointer(regs) - (thumb_mode(regs) ? 2 : 4), n, 0); return regs->ARM_r0; } static inline int __do_cache_op(unsigned long start, unsigned long end) { int ret; do { unsigned long chunk = min(PAGE_SIZE, end - start); if (fatal_signal_pending(current)) return 0; ret = flush_icache_user_range(start, start + chunk); if (ret) return ret; cond_resched(); start += chunk; } while (start < end); return 0; } static inline int do_cache_op(unsigned long start, unsigned long end, int flags) { if (end < start || flags) return -EINVAL; if (!access_ok((void __user *)start, end - start)) return -EFAULT; return __do_cache_op(start, end); } /* * Handle all unrecognised system calls. * 0x9f0000 - 0x9fffff are some more esoteric system calls */ #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) asmlinkage int arm_syscall(int no, struct pt_regs *regs) { if ((no >> 16) != (__ARM_NR_BASE>> 16)) return bad_syscall(no, regs); switch (no & 0xffff) { case 0: /* branch through 0 */ arm_notify_die("branch through zero", regs, SIGSEGV, SEGV_MAPERR, NULL, 0, 0); return 0; case NR(breakpoint): /* SWI BREAK_POINT */ regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; ptrace_break(regs); return regs->ARM_r0; /* * Flush a region from virtual address 'r0' to virtual address 'r1' * _exclusive_. There is no alignment requirement on either address; * user space does not need to know the hardware cache layout. * * r2 contains flags. It should ALWAYS be passed as ZERO until it * is defined to be something else. For now we ignore it, but may * the fires of hell burn in your belly if you break this rule. ;) * * (at a later date, we may want to allow this call to not flush * various aspects of the cache. Passing '0' will guarantee that * everything necessary gets flushed to maintain consistency in * the specified region). */ case NR(cacheflush): return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2); case NR(usr26): if (!(elf_hwcap & HWCAP_26BIT)) break; regs->ARM_cpsr &= ~MODE32_BIT; return regs->ARM_r0; case NR(usr32): if (!(elf_hwcap & HWCAP_26BIT)) break; regs->ARM_cpsr |= MODE32_BIT; return regs->ARM_r0; case NR(set_tls): set_tls(regs->ARM_r0); return 0; case NR(get_tls): return current_thread_info()->tp_value[0]; default: /* Calls 9f00xx..9f07ff are defined to return -ENOSYS if not implemented, rather than raising SIGILL. This way the calling program can gracefully determine whether a feature is supported. */ if ((no & 0xffff) <= 0x7ff) return -ENOSYS; break; } #ifdef CONFIG_DEBUG_USER /* * experience shows that these seem to indicate that * something catastrophic has happened */ if (user_debug & UDBG_SYSCALL) { pr_err("[%d] %s: arm syscall %d\n", task_pid_nr(current), current->comm, no); dump_instr(KERN_ERR, regs); if (user_mode(regs)) { __show_regs(regs); c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR); } } #endif arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP, (void __user *)instruction_pointer(regs) - (thumb_mode(regs) ? 2 : 4), no, 0); return 0; } #ifdef CONFIG_TLS_REG_EMUL /* * We might be running on an ARMv6+ processor which should have the TLS * register but for some reason we can't use it, or maybe an SMP system * using a pre-ARMv6 processor (there are apparently a few prototypes like * that in existence) and therefore access to that register must be * emulated. */ static int get_tp_trap(struct pt_regs *regs, unsigned int instr) { int reg = (instr >> 12) & 15; if (reg == 15) return 1; regs->uregs[reg] = current_thread_info()->tp_value[0]; regs->ARM_pc += 4; return 0; } static struct undef_hook arm_mrc_hook = { .instr_mask = 0x0fff0fff, .instr_val = 0x0e1d0f70, .cpsr_mask = PSR_T_BIT, .cpsr_val = 0, .fn = get_tp_trap, }; static int __init arm_mrc_hook_init(void) { register_undef_hook(&arm_mrc_hook); return 0; } late_initcall(arm_mrc_hook_init); #endif /* * A data abort trap was taken, but we did not handle the instruction. * Try to abort the user program, or panic if it was the kernel. */ asmlinkage void baddataabort(int code, unsigned long instr, struct pt_regs *regs) { unsigned long addr = instruction_pointer(regs); #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_BADABORT) { pr_err("8<--- cut here ---\n"); pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n", task_pid_nr(current), current->comm, code, instr); dump_instr(KERN_ERR, regs); show_pte(KERN_ERR, current->mm, addr); } #endif arm_notify_die("unknown data abort code", regs, SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0); } void __readwrite_bug(const char *fn) { pr_err("%s called, but not implemented\n", fn); BUG(); } EXPORT_SYMBOL(__readwrite_bug); #ifdef CONFIG_MMU void __pte_error(const char *file, int line, pte_t pte) { pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte)); } void __pmd_error(const char *file, int line, pmd_t pmd) { pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd)); } void __pgd_error(const char *file, int line, pgd_t pgd) { pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd)); } #endif asmlinkage void __div0(void) { pr_err("Division by zero in kernel.\n"); dump_stack(); } EXPORT_SYMBOL(__div0); void abort(void) { BUG(); /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } #ifdef CONFIG_KUSER_HELPERS static void __init kuser_init(void *vectors) { extern char __kuser_helper_start[], __kuser_helper_end[]; int kuser_sz = __kuser_helper_end - __kuser_helper_start; memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); /* * vectors + 0xfe0 = __kuser_get_tls * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 */ if (tls_emu || has_tls_reg) memcpy(vectors + 0xfe0, vectors + 0xfe8, 4); } #else static inline void __init kuser_init(void *vectors) { } #endif #ifndef CONFIG_CPU_V7M static void copy_from_lma(void *vma, void *lma_start, void *lma_end) { memcpy(vma, lma_start, lma_end - lma_start); } static void flush_vectors(void *vma, size_t offset, size_t size) { unsigned long start = (unsigned long)vma + offset; unsigned long end = start + size; flush_icache_range(start, end); } #ifdef CONFIG_HARDEN_BRANCH_HISTORY int spectre_bhb_update_vectors(unsigned int method) { extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[]; extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[]; void *vec_start, *vec_end; if (system_state >= SYSTEM_FREEING_INITMEM) { pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n", smp_processor_id()); return SPECTRE_VULNERABLE; } switch (method) { case SPECTRE_V2_METHOD_LOOP8: vec_start = __vectors_bhb_loop8_start; vec_end = __vectors_bhb_loop8_end; break; case SPECTRE_V2_METHOD_BPIALL: vec_start = __vectors_bhb_bpiall_start; vec_end = __vectors_bhb_bpiall_end; break; default: pr_err("CPU%u: unknown Spectre BHB state %d\n", smp_processor_id(), method); return SPECTRE_VULNERABLE; } copy_from_lma(vectors_page, vec_start, vec_end); flush_vectors(vectors_page, 0, vec_end - vec_start); return SPECTRE_MITIGATED; } #endif void __init early_trap_init(void *vectors_base) { extern char __stubs_start[], __stubs_end[]; extern char __vectors_start[], __vectors_end[]; unsigned i; vectors_page = vectors_base; /* * Poison the vectors page with an undefined instruction. This * instruction is chosen to be undefined for both ARM and Thumb * ISAs. The Thumb version is an undefined instruction with a * branch back to the undefined instruction. */ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) ((u32 *)vectors_base)[i] = 0xe7fddef1; /* * Copy the vectors, stubs and kuser helpers (in entry-armv.S) * into the vector page, mapped at 0xffff0000, and ensure these * are visible to the instruction stream. */ copy_from_lma(vectors_base, __vectors_start, __vectors_end); copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end); kuser_init(vectors_base); flush_vectors(vectors_base, 0, PAGE_SIZE * 2); } #else /* ifndef CONFIG_CPU_V7M */ void __init early_trap_init(void *vectors_base) { /* * on V7-M there is no need to copy the vector table to a dedicated * memory area. The address is configurable and so a table in the kernel * image can be used. */ } #endif #ifdef CONFIG_VMAP_STACK DECLARE_PER_CPU(u8 *, irq_stack_ptr); asmlinkage DEFINE_PER_CPU(u8 *, overflow_stack_ptr); static int __init allocate_overflow_stacks(void) { u8 *stack; int cpu; for_each_possible_cpu(cpu) { stack = (u8 *)__get_free_page(GFP_KERNEL); if (WARN_ON(!stack)) return -ENOMEM; per_cpu(overflow_stack_ptr, cpu) = &stack[OVERFLOW_STACK_SIZE]; } return 0; } early_initcall(allocate_overflow_stacks); asmlinkage void handle_bad_stack(struct pt_regs *regs) { unsigned long tsk_stk = (unsigned long)current->stack; #ifdef CONFIG_IRQSTACKS unsigned long irq_stk = (unsigned long)raw_cpu_read(irq_stack_ptr); #endif unsigned long ovf_stk = (unsigned long)raw_cpu_read(overflow_stack_ptr); console_verbose(); pr_emerg("Insufficient stack space to handle exception!"); pr_emerg("Task stack: [0x%08lx..0x%08lx]\n", tsk_stk, tsk_stk + THREAD_SIZE); #ifdef CONFIG_IRQSTACKS pr_emerg("IRQ stack: [0x%08lx..0x%08lx]\n", irq_stk - THREAD_SIZE, irq_stk); #endif pr_emerg("Overflow stack: [0x%08lx..0x%08lx]\n", ovf_stk - OVERFLOW_STACK_SIZE, ovf_stk); die("kernel stack overflow", regs, 0); } #ifndef CONFIG_ARM_LPAE /* * Normally, we rely on the logic in do_translation_fault() to update stale PMD * entries covering the vmalloc space in a task's page tables when it first * accesses the region in question. Unfortunately, this is not sufficient when * the task stack resides in the vmalloc region, as do_translation_fault() is a * C function that needs a stack to run. * * So we need to ensure that these PMD entries are up to date *before* the MM * switch. As we already have some logic in the MM switch path that takes care * of this, let's trigger it by bumping the counter every time the core vmalloc * code modifies a PMD entry in the vmalloc region. Use release semantics on * the store so that other CPUs observing the counter's new value are * guaranteed to see the updated page table entries as well. */ void arch_sync_kernel_mappings(unsigned long start, unsigned long end) { if (start < VMALLOC_END && end > VMALLOC_START) atomic_inc_return_release(&init_mm.context.vmalloc_seq); } #endif #endif
linux-master
arch/arm/kernel/traps.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1995-2003 Russell King * 2001-2002 Keith Owens * * Generate definitions needed by assembly language modules. * This code generates raw asm output which is post-processed to extract * and format the required data. */ #include <linux/compiler.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/dma-mapping.h> #include <asm/cacheflush.h> #include <asm/kexec-internal.h> #include <asm/glue-df.h> #include <asm/glue-pf.h> #include <asm/mach/arch.h> #include <asm/thread_info.h> #include <asm/page.h> #include <asm/mpu.h> #include <asm/procinfo.h> #include <asm/suspend.h> #include <asm/vdso_datapage.h> #include <asm/hardware/cache-l2x0.h> #include <linux/kbuild.h> #include <linux/arm-smccc.h> #include "signal.h" /* * Make sure that the compiler and target are compatible. */ #if defined(__APCS_26__) #error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32 #endif int main(void) { DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); #ifdef CONFIG_STACKPROTECTOR DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); #endif BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain)); DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); DEFINE(TI_ABI_SYSCALL, offsetof(struct thread_info, abi_syscall)); DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); #ifdef CONFIG_VFP DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); #ifdef CONFIG_SMP DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu)); #endif #endif DEFINE(SOFTIRQ_DISABLE_OFFSET,SOFTIRQ_DISABLE_OFFSET); #ifdef CONFIG_ARM_THUMBEE DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); #endif #ifdef CONFIG_IWMMXT DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt)); #endif BLANK(); DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0)); DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1)); DEFINE(S_R2, offsetof(struct pt_regs, ARM_r2)); DEFINE(S_R3, offsetof(struct pt_regs, ARM_r3)); DEFINE(S_R4, offsetof(struct pt_regs, ARM_r4)); DEFINE(S_R5, offsetof(struct pt_regs, ARM_r5)); DEFINE(S_R6, offsetof(struct pt_regs, ARM_r6)); DEFINE(S_R7, offsetof(struct pt_regs, ARM_r7)); DEFINE(S_R8, offsetof(struct pt_regs, ARM_r8)); DEFINE(S_R9, offsetof(struct pt_regs, ARM_r9)); DEFINE(S_R10, offsetof(struct pt_regs, ARM_r10)); DEFINE(S_FP, offsetof(struct pt_regs, ARM_fp)); DEFINE(S_IP, offsetof(struct pt_regs, ARM_ip)); DEFINE(S_SP, offsetof(struct pt_regs, ARM_sp)); DEFINE(S_LR, offsetof(struct pt_regs, ARM_lr)); DEFINE(S_PC, offsetof(struct pt_regs, ARM_pc)); DEFINE(S_PSR, offsetof(struct pt_regs, ARM_cpsr)); DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr)); DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs)); BLANK(); DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3])); DEFINE(RT_SIGFRAME_RC3_OFFSET, offsetof(struct rt_sigframe, sig.retcode[3])); BLANK(); #ifdef CONFIG_CACHE_L2X0 DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base)); DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl)); DEFINE(L2X0_R_TAG_LATENCY, offsetof(struct l2x0_regs, tag_latency)); DEFINE(L2X0_R_DATA_LATENCY, offsetof(struct l2x0_regs, data_latency)); DEFINE(L2X0_R_FILTER_START, offsetof(struct l2x0_regs, filter_start)); DEFINE(L2X0_R_FILTER_END, offsetof(struct l2x0_regs, filter_end)); DEFINE(L2X0_R_PREFETCH_CTRL, offsetof(struct l2x0_regs, prefetch_ctrl)); DEFINE(L2X0_R_PWR_CTRL, offsetof(struct l2x0_regs, pwr_ctrl)); BLANK(); #endif #ifdef CONFIG_CPU_HAS_ASID DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); BLANK(); #endif DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags)); BLANK(); DEFINE(VM_EXEC, VM_EXEC); BLANK(); DEFINE(PAGE_SZ, PAGE_SIZE); BLANK(); DEFINE(SYS_ERROR0, 0x9f0000); BLANK(); DEFINE(SIZEOF_MACHINE_DESC, sizeof(struct machine_desc)); DEFINE(MACHINFO_TYPE, offsetof(struct machine_desc, nr)); DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name)); BLANK(); DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list)); DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush)); DEFINE(PROCINFO_MM_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mm_mmu_flags)); DEFINE(PROCINFO_IO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_io_mmu_flags)); BLANK(); #ifdef MULTI_DABORT DEFINE(PROCESSOR_DABT_FUNC, offsetof(struct processor, _data_abort)); #endif #ifdef MULTI_PABORT DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); #endif #ifdef MULTI_CPU DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size)); DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend)); DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume)); #endif #ifdef MULTI_CACHE DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all)); #endif #ifdef CONFIG_ARM_CPU_SUSPEND DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp)); DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys)); DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash)); #endif DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id)); DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state)); BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); BLANK(); DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER); DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE); BLANK(); #ifdef CONFIG_VDSO DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store)); #endif BLANK(); #ifdef CONFIG_ARM_MPU DEFINE(MPU_RNG_INFO_RNGS, offsetof(struct mpu_rgn_info, rgns)); DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used)); DEFINE(MPU_RNG_SIZE, sizeof(struct mpu_rgn)); DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar)); DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr)); DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr)); DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar)); DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar)); #endif DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address)); DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page)); DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type)); DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2)); return 0; }
linux-master
arch/arm/kernel/asm-offsets.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/ftrace.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/mm_types.h> #include <linux/pgtable.h> #include <asm/bugs.h> #include <asm/cacheflush.h> #include <asm/idmap.h> #include <asm/page.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #include <asm/tlbflush.h> extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid); extern void cpu_resume_mmu(void); #ifdef CONFIG_MMU int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { struct mm_struct *mm = current->active_mm; u32 __mpidr = cpu_logical_map(smp_processor_id()); int ret; if (!idmap_pgd) return -EINVAL; /* * Function graph tracer state gets incosistent when the kernel * calls functions that never return (aka suspend finishers) hence * disable graph tracing during their execution. */ pause_graph_tracing(); /* * Provide a temporary page table with an identity mapping for * the MMU-enable code, required for resuming. On successful * resume (indicated by a zero return code), we need to switch * back to the correct page tables. */ ret = __cpu_suspend(arg, fn, __mpidr); unpause_graph_tracing(); if (ret == 0) { cpu_switch_mm(mm->pgd, mm); local_flush_bp_all(); local_flush_tlb_all(); check_other_bugs(); } return ret; } #else int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { u32 __mpidr = cpu_logical_map(smp_processor_id()); int ret; pause_graph_tracing(); ret = __cpu_suspend(arg, fn, __mpidr); unpause_graph_tracing(); return ret; } #define idmap_pgd NULL #endif /* * This is called by __cpu_suspend() to save the state, and do whatever * flushing is required to ensure that when the CPU goes to sleep we have * the necessary data available when the caches are not searched. */ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr) { u32 *ctx = ptr; *save_ptr = virt_to_phys(ptr); /* This must correspond to the LDM in cpu_resume() assembly */ *ptr++ = virt_to_phys(idmap_pgd); *ptr++ = sp; *ptr++ = virt_to_phys(cpu_do_resume); cpu_do_suspend(ptr); flush_cache_louis(); /* * flush_cache_louis does not guarantee that * save_ptr and ptr are cleaned to main memory, * just up to the Level of Unification Inner Shareable. * Since the context pointer and context itself * are to be retrieved with the MMU off that * data must be cleaned from all cache levels * to main memory using "area" cache primitives. */ __cpuc_flush_dcache_area(ctx, ptrsz); __cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr)); outer_clean_range(*save_ptr, *save_ptr + ptrsz); outer_clean_range(virt_to_phys(save_ptr), virt_to_phys(save_ptr) + sizeof(*save_ptr)); } extern struct sleep_save_sp sleep_save_sp; static int cpu_suspend_alloc_sp(void) { void *ctx_ptr; /* ctx_ptr is an array of physical addresses */ ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL); if (WARN_ON(!ctx_ptr)) return -ENOMEM; sleep_save_sp.save_ptr_stash = ctx_ptr; sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); sync_cache_w(&sleep_save_sp); return 0; } early_initcall(cpu_suspend_alloc_sp);
linux-master
arch/arm/kernel/suspend.c
/* * arch/arm/kernel/topology.c * * Copyright (C) 2011 Linaro Limited. * Written by: Vincent Guittot * * based on arch/sh/kernel/topology.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/arch_topology.h> #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/cpumask.h> #include <linux/export.h> #include <linux/init.h> #include <linux/percpu.h> #include <linux/node.h> #include <linux/nodemask.h> #include <linux/of.h> #include <linux/sched.h> #include <linux/sched/topology.h> #include <linux/slab.h> #include <linux/string.h> #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/topology.h> /* * cpu capacity scale management */ /* * cpu capacity table * This per cpu data structure describes the relative capacity of each core. * On a heteregenous system, cores don't have the same computation capacity * and we reflect that difference in the cpu_capacity field so the scheduler * can take this difference into account during load balance. A per cpu * structure is preferred because each CPU updates its own cpu_capacity field * during the load balance except for idle cores. One idle core is selected * to run the rebalance_domains for all idle cores and the cpu_capacity can be * updated during this sequence. */ #ifdef CONFIG_OF struct cpu_efficiency { const char *compatible; unsigned long efficiency; }; /* * Table of relative efficiency of each processors * The efficiency value must fit in 20bit and the final * cpu_scale value must be in the range * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2 * in order to return at most 1 when DIV_ROUND_CLOSEST * is used to compute the capacity of a CPU. * Processors that are not defined in the table, * use the default SCHED_CAPACITY_SCALE value for cpu_scale. */ static const struct cpu_efficiency table_efficiency[] = { {"arm,cortex-a15", 3891}, {"arm,cortex-a7", 2048}, {NULL, }, }; static unsigned long *__cpu_capacity; #define cpu_capacity(cpu) __cpu_capacity[cpu] static unsigned long middle_capacity = 1; static bool cap_from_dt = true; /* * Iterate all CPUs' descriptor in DT and compute the efficiency * (as per table_efficiency). Also calculate a middle efficiency * as close as possible to (max{eff_i} - min{eff_i}) / 2 * This is later used to scale the cpu_capacity field such that an * 'average' CPU is of middle capacity. Also see the comments near * table_efficiency[] and update_cpu_capacity(). */ static void __init parse_dt_topology(void) { const struct cpu_efficiency *cpu_eff; struct device_node *cn = NULL; unsigned long min_capacity = ULONG_MAX; unsigned long max_capacity = 0; unsigned long capacity = 0; int cpu = 0; __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), GFP_NOWAIT); for_each_possible_cpu(cpu) { const __be32 *rate; int len; /* too early to use cpu->of_node */ cn = of_get_cpu_node(cpu, NULL); if (!cn) { pr_err("missing device node for CPU %d\n", cpu); continue; } if (topology_parse_cpu_capacity(cn, cpu)) { of_node_put(cn); continue; } cap_from_dt = false; for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) if (of_device_is_compatible(cn, cpu_eff->compatible)) break; if (cpu_eff->compatible == NULL) continue; rate = of_get_property(cn, "clock-frequency", &len); if (!rate || len != 4) { pr_err("%pOF missing clock-frequency property\n", cn); continue; } capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; /* Save min capacity of the system */ if (capacity < min_capacity) min_capacity = capacity; /* Save max capacity of the system */ if (capacity > max_capacity) max_capacity = capacity; cpu_capacity(cpu) = capacity; } /* If min and max capacities are equals, we bypass the update of the * cpu_scale because all CPUs have the same capacity. Otherwise, we * compute a middle_capacity factor that will ensure that the capacity * of an 'average' CPU of the system will be as close as possible to * SCHED_CAPACITY_SCALE, which is the default value, but with the * constraint explained near table_efficiency[]. */ if (4*max_capacity < (3*(max_capacity + min_capacity))) middle_capacity = (min_capacity + max_capacity) >> (SCHED_CAPACITY_SHIFT+1); else middle_capacity = ((max_capacity / 3) >> (SCHED_CAPACITY_SHIFT-1)) + 1; if (cap_from_dt) topology_normalize_cpu_scale(); } /* * Look for a customed capacity of a CPU in the cpu_capacity table during the * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the * function returns directly for SMP system. */ static void update_cpu_capacity(unsigned int cpu) { if (!cpu_capacity(cpu) || cap_from_dt) return; topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity); pr_info("CPU%u: update cpu_capacity %lu\n", cpu, topology_get_cpu_scale(cpu)); } #else static inline void parse_dt_topology(void) {} static inline void update_cpu_capacity(unsigned int cpuid) {} #endif /* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, * which prevents simultaneous write access to cpu_topology array */ void store_cpu_topology(unsigned int cpuid) { struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; unsigned int mpidr; if (cpuid_topo->package_id != -1) goto topology_populated; mpidr = read_cpuid_mpidr(); /* create cpu topology mapping */ if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) { /* * This is a multiprocessor system * multiprocessor format & multiprocessor mode field are set */ if (mpidr & MPIDR_MT_BITMASK) { /* core performance interdependency */ cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); } else { /* largely independent cores */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); } } else { /* * This is an uniprocessor system * we are in multiprocessor format but uniprocessor system * or in the old uniprocessor format */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = 0; cpuid_topo->package_id = -1; } update_cpu_capacity(cpuid); pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, cpu_topology[cpuid].core_id, cpu_topology[cpuid].package_id, mpidr); topology_populated: update_siblings_masks(cpuid); } /* * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array */ void __init init_cpu_topology(void) { reset_cpu_topology(); smp_wmb(); parse_dt_topology(); }
linux-master
arch/arm/kernel/topology.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/module.c * * Copyright (C) 2002 Russell King. * Modified for nommu by Hyok S. Choi * * Module allocation method suggested by Andi Kleen. */ #include <linux/module.h> #include <linux/moduleloader.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/gfp.h> #include <asm/sections.h> #include <asm/smp_plat.h> #include <asm/unwind.h> #include <asm/opcodes.h> #ifdef CONFIG_XIP_KERNEL /* * The XIP kernel text is mapped in the module area for modules and * some other stuff to work without any indirect relocations. * MODULES_VADDR is redefined here and not in asm/memory.h to avoid * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off. */ #undef MODULES_VADDR #define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK) #endif #ifdef CONFIG_MMU void *module_alloc(unsigned long size) { gfp_t gfp_mask = GFP_KERNEL; void *p; /* Silence the initial allocation */ if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS)) gfp_mask |= __GFP_NOWARN; p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p) return p; return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); } #endif bool module_init_section(const char *name) { return strstarts(name, ".init") || strstarts(name, ".ARM.extab.init") || strstarts(name, ".ARM.exidx.init"); } bool module_exit_section(const char *name) { return strstarts(name, ".exit") || strstarts(name, ".ARM.extab.exit") || strstarts(name, ".ARM.exidx.exit"); } #ifdef CONFIG_ARM_HAS_GROUP_RELOCS /* * This implements the partitioning algorithm for group relocations as * documented in the ARM AArch32 ELF psABI (IHI 0044). * * A single PC-relative symbol reference is divided in up to 3 add or subtract * operations, where the final one could be incorporated into a load/store * instruction with immediate offset. E.g., * * ADD Rd, PC, #... or ADD Rd, PC, #... * ADD Rd, Rd, #... ADD Rd, Rd, #... * LDR Rd, [Rd, #...] ADD Rd, Rd, #... * * The latter has a guaranteed range of only 16 MiB (3x8 == 24 bits), so it is * of limited use in the kernel. However, the ADD/ADD/LDR combo has a range of * -/+ 256 MiB, (2x8 + 12 == 28 bits), which means it has sufficient range for * any in-kernel symbol reference (unless module PLTs are being used). * * The main advantage of this approach over the typical pattern using a literal * load is that literal loads may miss in the D-cache, and generally lead to * lower cache efficiency for variables that are referenced often from many * different places in the code. */ static u32 get_group_rem(u32 group, u32 *offset) { u32 val = *offset; u32 shift; do { shift = val ? (31 - __fls(val)) & ~1 : 32; *offset = val; if (!val) break; val &= 0xffffff >> shift; } while (group--); return shift; } #endif int apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relindex, struct module *module) { Elf32_Shdr *symsec = sechdrs + symindex; Elf32_Shdr *relsec = sechdrs + relindex; Elf32_Shdr *dstsec = sechdrs + relsec->sh_info; Elf32_Rel *rel = (void *)relsec->sh_addr; unsigned int i; for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) { unsigned long loc; Elf32_Sym *sym; const char *symname; #ifdef CONFIG_ARM_HAS_GROUP_RELOCS u32 shift, group = 1; #endif s32 offset; u32 tmp; #ifdef CONFIG_THUMB2_KERNEL u32 upper, lower, sign, j1, j2; #endif offset = ELF32_R_SYM(rel->r_info); if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) { pr_err("%s: section %u reloc %u: bad relocation sym offset\n", module->name, relindex, i); return -ENOEXEC; } sym = ((Elf32_Sym *)symsec->sh_addr) + offset; symname = strtab + sym->st_name; if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) { pr_err("%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n", module->name, relindex, i, symname, rel->r_offset, dstsec->sh_size); return -ENOEXEC; } loc = dstsec->sh_addr + rel->r_offset; switch (ELF32_R_TYPE(rel->r_info)) { case R_ARM_NONE: /* ignore */ break; case R_ARM_ABS32: case R_ARM_TARGET1: *(u32 *)loc += sym->st_value; break; case R_ARM_PC24: case R_ARM_CALL: case R_ARM_JUMP24: if (sym->st_value & 3) { pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (ARM -> Thumb)\n", module->name, relindex, i, symname); return -ENOEXEC; } offset = __mem_to_opcode_arm(*(u32 *)loc); offset = (offset & 0x00ffffff) << 2; offset = sign_extend32(offset, 25); offset += sym->st_value - loc; /* * Route through a PLT entry if 'offset' exceeds the * supported range. Note that 'offset + loc + 8' * contains the absolute jump target, i.e., * @sym + addend, corrected for the +8 PC bias. */ if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && (offset <= (s32)0xfe000000 || offset >= (s32)0x02000000)) offset = get_module_plt(module, loc, offset + loc + 8) - loc - 8; if (offset <= (s32)0xfe000000 || offset >= (s32)0x02000000) { pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", module->name, relindex, i, symname, ELF32_R_TYPE(rel->r_info), loc, sym->st_value); return -ENOEXEC; } offset >>= 2; offset &= 0x00ffffff; *(u32 *)loc &= __opcode_to_mem_arm(0xff000000); *(u32 *)loc |= __opcode_to_mem_arm(offset); break; case R_ARM_V4BX: /* Preserve Rm and the condition code. Alter * other bits to re-code instruction as * MOV PC,Rm. */ *(u32 *)loc &= __opcode_to_mem_arm(0xf000000f); *(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000); break; case R_ARM_PREL31: offset = (*(s32 *)loc << 1) >> 1; /* sign extend */ offset += sym->st_value - loc; if (offset >= 0x40000000 || offset < -0x40000000) { pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", module->name, relindex, i, symname, ELF32_R_TYPE(rel->r_info), loc, sym->st_value); return -ENOEXEC; } *(u32 *)loc &= 0x80000000; *(u32 *)loc |= offset & 0x7fffffff; break; case R_ARM_REL32: *(u32 *)loc += sym->st_value - loc; break; case R_ARM_MOVW_ABS_NC: case R_ARM_MOVT_ABS: case R_ARM_MOVW_PREL_NC: case R_ARM_MOVT_PREL: offset = tmp = __mem_to_opcode_arm(*(u32 *)loc); offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff); offset = sign_extend32(offset, 15); offset += sym->st_value; if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_PREL || ELF32_R_TYPE(rel->r_info) == R_ARM_MOVW_PREL_NC) offset -= loc; if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS || ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_PREL) offset >>= 16; tmp &= 0xfff0f000; tmp |= ((offset & 0xf000) << 4) | (offset & 0x0fff); *(u32 *)loc = __opcode_to_mem_arm(tmp); break; #ifdef CONFIG_ARM_HAS_GROUP_RELOCS case R_ARM_ALU_PC_G0_NC: group = 0; fallthrough; case R_ARM_ALU_PC_G1_NC: tmp = __mem_to_opcode_arm(*(u32 *)loc); offset = ror32(tmp & 0xff, (tmp & 0xf00) >> 7); if (tmp & BIT(22)) offset = -offset; offset += sym->st_value - loc; if (offset < 0) { offset = -offset; tmp = (tmp & ~BIT(23)) | BIT(22); // SUB opcode } else { tmp = (tmp & ~BIT(22)) | BIT(23); // ADD opcode } shift = get_group_rem(group, &offset); if (shift < 24) { offset >>= 24 - shift; offset |= (shift + 8) << 7; } *(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset); break; case R_ARM_LDR_PC_G2: tmp = __mem_to_opcode_arm(*(u32 *)loc); offset = tmp & 0xfff; if (~tmp & BIT(23)) // U bit cleared? offset = -offset; offset += sym->st_value - loc; if (offset < 0) { offset = -offset; tmp &= ~BIT(23); // clear U bit } else { tmp |= BIT(23); // set U bit } get_group_rem(2, &offset); if (offset > 0xfff) { pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", module->name, relindex, i, symname, ELF32_R_TYPE(rel->r_info), loc, sym->st_value); return -ENOEXEC; } *(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset); break; #endif #ifdef CONFIG_THUMB2_KERNEL case R_ARM_THM_CALL: case R_ARM_THM_JUMP24: /* * For function symbols, only Thumb addresses are * allowed (no interworking). * * For non-function symbols, the destination * has no specific ARM/Thumb disposition, so * the branch is resolved under the assumption * that interworking is not required. */ if (ELF32_ST_TYPE(sym->st_info) == STT_FUNC && !(sym->st_value & 1)) { pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (Thumb -> ARM)\n", module->name, relindex, i, symname); return -ENOEXEC; } upper = __mem_to_opcode_thumb16(*(u16 *)loc); lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2)); /* * 25 bit signed address range (Thumb-2 BL and B.W * instructions): * S:I1:I2:imm10:imm11:0 * where: * S = upper[10] = offset[24] * I1 = ~(J1 ^ S) = offset[23] * I2 = ~(J2 ^ S) = offset[22] * imm10 = upper[9:0] = offset[21:12] * imm11 = lower[10:0] = offset[11:1] * J1 = lower[13] * J2 = lower[11] */ sign = (upper >> 10) & 1; j1 = (lower >> 13) & 1; j2 = (lower >> 11) & 1; offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) | ((~(j2 ^ sign) & 1) << 22) | ((upper & 0x03ff) << 12) | ((lower & 0x07ff) << 1); offset = sign_extend32(offset, 24); offset += sym->st_value - loc; /* * Route through a PLT entry if 'offset' exceeds the * supported range. */ if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && (offset <= (s32)0xff000000 || offset >= (s32)0x01000000)) offset = get_module_plt(module, loc, offset + loc + 4) - loc - 4; if (offset <= (s32)0xff000000 || offset >= (s32)0x01000000) { pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", module->name, relindex, i, symname, ELF32_R_TYPE(rel->r_info), loc, sym->st_value); return -ENOEXEC; } sign = (offset >> 24) & 1; j1 = sign ^ (~(offset >> 23) & 1); j2 = sign ^ (~(offset >> 22) & 1); upper = (u16)((upper & 0xf800) | (sign << 10) | ((offset >> 12) & 0x03ff)); lower = (u16)((lower & 0xd000) | (j1 << 13) | (j2 << 11) | ((offset >> 1) & 0x07ff)); *(u16 *)loc = __opcode_to_mem_thumb16(upper); *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower); break; case R_ARM_THM_MOVW_ABS_NC: case R_ARM_THM_MOVT_ABS: case R_ARM_THM_MOVW_PREL_NC: case R_ARM_THM_MOVT_PREL: upper = __mem_to_opcode_thumb16(*(u16 *)loc); lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2)); /* * MOVT/MOVW instructions encoding in Thumb-2: * * i = upper[10] * imm4 = upper[3:0] * imm3 = lower[14:12] * imm8 = lower[7:0] * * imm16 = imm4:i:imm3:imm8 */ offset = ((upper & 0x000f) << 12) | ((upper & 0x0400) << 1) | ((lower & 0x7000) >> 4) | (lower & 0x00ff); offset = sign_extend32(offset, 15); offset += sym->st_value; if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_PREL || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVW_PREL_NC) offset -= loc; if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_PREL) offset >>= 16; upper = (u16)((upper & 0xfbf0) | ((offset & 0xf000) >> 12) | ((offset & 0x0800) >> 1)); lower = (u16)((lower & 0x8f00) | ((offset & 0x0700) << 4) | (offset & 0x00ff)); *(u16 *)loc = __opcode_to_mem_thumb16(upper); *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower); break; #endif default: pr_err("%s: unknown relocation: %u\n", module->name, ELF32_R_TYPE(rel->r_info)); return -ENOEXEC; } } return 0; } struct mod_unwind_map { const Elf_Shdr *unw_sec; const Elf_Shdr *txt_sec; }; static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, const char *name) { const Elf_Shdr *s, *se; const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) if (strcmp(name, secstrs + s->sh_name) == 0) return s; return NULL; } extern void fixup_pv_table(const void *, unsigned long); extern void fixup_smp(const void *, unsigned long); int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { const Elf_Shdr *s = NULL; #ifdef CONFIG_ARM_UNWIND const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; struct list_head *unwind_list = &mod->arch.unwind_list; INIT_LIST_HEAD(unwind_list); mod->arch.init_table = NULL; for (s = sechdrs; s < sechdrs_end; s++) { const char *secname = secstrs + s->sh_name; const char *txtname; const Elf_Shdr *txt_sec; if (!(s->sh_flags & SHF_ALLOC) || s->sh_type != ELF_SECTION_UNWIND) continue; if (!strcmp(".ARM.exidx", secname)) txtname = ".text"; else txtname = secname + strlen(".ARM.exidx"); txt_sec = find_mod_section(hdr, sechdrs, txtname); if (txt_sec) { struct unwind_table *table = unwind_table_add(s->sh_addr, s->sh_size, txt_sec->sh_addr, txt_sec->sh_size); list_add(&table->mod_list, unwind_list); /* save init table for module_arch_freeing_init */ if (strcmp(".ARM.exidx.init.text", secname) == 0) mod->arch.init_table = table; } } #endif #ifdef CONFIG_ARM_PATCH_PHYS_VIRT s = find_mod_section(hdr, sechdrs, ".pv_table"); if (s) fixup_pv_table((void *)s->sh_addr, s->sh_size); #endif s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); if (s && !is_smp()) #ifdef CONFIG_SMP_ON_UP fixup_smp((void *)s->sh_addr, s->sh_size); #else return -EINVAL; #endif return 0; } void module_arch_cleanup(struct module *mod) { #ifdef CONFIG_ARM_UNWIND struct unwind_table *tmp; struct unwind_table *n; list_for_each_entry_safe(tmp, n, &mod->arch.unwind_list, mod_list) { list_del(&tmp->mod_list); unwind_table_del(tmp); } mod->arch.init_table = NULL; #endif } void __weak module_arch_freeing_init(struct module *mod) { #ifdef CONFIG_ARM_UNWIND struct unwind_table *init = mod->arch.init_table; if (init) { mod->arch.init_table = NULL; list_del(&init->mod_list); unwind_table_del(init); } #endif }
linux-master
arch/arm/kernel/module.c
// SPDX-License-Identifier: GPL-2.0-only /* * Adapted from arm64 version. * * Copyright (C) 2012 ARM Limited * Copyright (C) 2015 Mentor Graphics Corporation. */ #include <linux/cache.h> #include <linux/elf.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/printk.h> #include <linux/slab.h> #include <linux/timekeeper_internal.h> #include <linux/vmalloc.h> #include <asm/arch_timer.h> #include <asm/barrier.h> #include <asm/cacheflush.h> #include <asm/page.h> #include <asm/vdso.h> #include <asm/vdso_datapage.h> #include <clocksource/arm_arch_timer.h> #include <vdso/helpers.h> #include <vdso/vsyscall.h> #define MAX_SYMNAME 64 static struct page **vdso_text_pagelist; extern char vdso_start[], vdso_end[]; /* Total number of pages needed for the data and text portions of the VDSO. */ unsigned int vdso_total_pages __ro_after_init; /* * The VDSO data page. */ static union vdso_data_store vdso_data_store __page_aligned_data; struct vdso_data *vdso_data = vdso_data_store.data; static struct page *vdso_data_page __ro_after_init; static const struct vm_special_mapping vdso_data_mapping = { .name = "[vvar]", .pages = &vdso_data_page, }; static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { current->mm->context.vdso = new_vma->vm_start; return 0; } static struct vm_special_mapping vdso_text_mapping __ro_after_init = { .name = "[vdso]", .mremap = vdso_mremap, }; struct elfinfo { Elf32_Ehdr *hdr; /* ptr to ELF */ Elf32_Sym *dynsym; /* ptr to .dynsym section */ unsigned long dynsymsize; /* size of .dynsym section */ char *dynstr; /* ptr to .dynstr section */ }; /* Cached result of boot-time check for whether the arch timer exists, * and if so, whether the virtual counter is useable. */ bool cntvct_ok __ro_after_init; static bool __init cntvct_functional(void) { struct device_node *np; bool ret = false; if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) goto out; /* The arm_arch_timer core should export * arch_timer_use_virtual or similar so we don't have to do * this. */ np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer"); if (!np) np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer"); if (!np) goto out_put; if (of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) goto out_put; ret = true; out_put: of_node_put(np); out: return ret; } static void * __init find_section(Elf32_Ehdr *ehdr, const char *name, unsigned long *size) { Elf32_Shdr *sechdrs; unsigned int i; char *secnames; /* Grab section headers and strings so we can tell who is who */ sechdrs = (void *)ehdr + ehdr->e_shoff; secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset; /* Find the section they want */ for (i = 1; i < ehdr->e_shnum; i++) { if (strcmp(secnames + sechdrs[i].sh_name, name) == 0) { if (size) *size = sechdrs[i].sh_size; return (void *)ehdr + sechdrs[i].sh_offset; } } if (size) *size = 0; return NULL; } static Elf32_Sym * __init find_symbol(struct elfinfo *lib, const char *symname) { unsigned int i; for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) { char name[MAX_SYMNAME], *c; if (lib->dynsym[i].st_name == 0) continue; strscpy(name, lib->dynstr + lib->dynsym[i].st_name, MAX_SYMNAME); c = strchr(name, '@'); if (c) *c = 0; if (strcmp(symname, name) == 0) return &lib->dynsym[i]; } return NULL; } static void __init vdso_nullpatch_one(struct elfinfo *lib, const char *symname) { Elf32_Sym *sym; sym = find_symbol(lib, symname); if (!sym) return; sym->st_name = 0; } static void __init patch_vdso(void *ehdr) { struct elfinfo einfo; einfo = (struct elfinfo) { .hdr = ehdr, }; einfo.dynsym = find_section(einfo.hdr, ".dynsym", &einfo.dynsymsize); einfo.dynstr = find_section(einfo.hdr, ".dynstr", NULL); /* If the virtual counter is absent or non-functional we don't * want programs to incur the slight additional overhead of * dispatching through the VDSO only to fall back to syscalls. */ if (!cntvct_ok) { vdso_nullpatch_one(&einfo, "__vdso_gettimeofday"); vdso_nullpatch_one(&einfo, "__vdso_clock_gettime"); vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64"); } } static int __init vdso_init(void) { unsigned int text_pages; int i; if (memcmp(vdso_start, "\177ELF", 4)) { pr_err("VDSO is not a valid ELF object!\n"); return -ENOEXEC; } text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; /* Allocate the VDSO text pagelist */ vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *), GFP_KERNEL); if (vdso_text_pagelist == NULL) return -ENOMEM; /* Grab the VDSO data page. */ vdso_data_page = virt_to_page(vdso_data); /* Grab the VDSO text pages. */ for (i = 0; i < text_pages; i++) { struct page *page; page = virt_to_page(vdso_start + i * PAGE_SIZE); vdso_text_pagelist[i] = page; } vdso_text_mapping.pages = vdso_text_pagelist; vdso_total_pages = 1; /* for the data/vvar page */ vdso_total_pages += text_pages; cntvct_ok = cntvct_functional(); patch_vdso(vdso_start); return 0; } arch_initcall(vdso_init); static int install_vvar(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; vma = _install_special_mapping(mm, addr, PAGE_SIZE, VM_READ | VM_MAYREAD, &vdso_data_mapping); return PTR_ERR_OR_ZERO(vma); } /* assumes mmap_lock is write-locked */ void arm_install_vdso(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; unsigned long len; mm->context.vdso = 0; if (vdso_text_pagelist == NULL) return; if (install_vvar(mm, addr)) return; /* Account for vvar page. */ addr += PAGE_SIZE; len = (vdso_total_pages - 1) << PAGE_SHIFT; vma = _install_special_mapping(mm, addr, len, VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, &vdso_text_mapping); if (!IS_ERR(vma)) mm->context.vdso = addr; }
linux-master
arch/arm/kernel/vdso.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2009 ST-Ericsson AB * TCM memory handling for ARM systems * * Author: Linus Walleij <[email protected]> * Author: Rickard Andersson <[email protected]> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/genalloc.h> #include <linux/string.h> /* memcpy */ #include <asm/cputype.h> #include <asm/mach/map.h> #include <asm/page.h> #include <asm/system_info.h> #include <asm/traps.h> #include <asm/tcm.h> #define TCMTR_FORMAT_MASK 0xe0000000U static struct gen_pool *tcm_pool; static bool dtcm_present; static bool itcm_present; /* TCM section definitions from the linker */ extern char __itcm_start, __sitcm_text, __eitcm_text; extern char __dtcm_start, __sdtcm_data, __edtcm_data; /* These will be increased as we run */ static u32 dtcm_end = DTCM_OFFSET; static u32 itcm_end = ITCM_OFFSET; /* * TCM memory resources */ static struct resource dtcm_res = { .name = "DTCM RAM", .start = DTCM_OFFSET, .end = DTCM_OFFSET, .flags = IORESOURCE_MEM }; static struct resource itcm_res = { .name = "ITCM RAM", .start = ITCM_OFFSET, .end = ITCM_OFFSET, .flags = IORESOURCE_MEM }; static struct map_desc dtcm_iomap[] __initdata = { { .virtual = DTCM_OFFSET, .pfn = __phys_to_pfn(DTCM_OFFSET), .length = 0, .type = MT_MEMORY_RW_DTCM } }; static struct map_desc itcm_iomap[] __initdata = { { .virtual = ITCM_OFFSET, .pfn = __phys_to_pfn(ITCM_OFFSET), .length = 0, .type = MT_MEMORY_RWX_ITCM, } }; /* * Allocate a chunk of TCM memory */ void *tcm_alloc(size_t len) { unsigned long vaddr; if (!tcm_pool) return NULL; vaddr = gen_pool_alloc(tcm_pool, len); if (!vaddr) return NULL; return (void *) vaddr; } EXPORT_SYMBOL(tcm_alloc); /* * Free a chunk of TCM memory */ void tcm_free(void *addr, size_t len) { gen_pool_free(tcm_pool, (unsigned long) addr, len); } EXPORT_SYMBOL(tcm_free); bool tcm_dtcm_present(void) { return dtcm_present; } EXPORT_SYMBOL(tcm_dtcm_present); bool tcm_itcm_present(void) { return itcm_present; } EXPORT_SYMBOL(tcm_itcm_present); static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, u32 *offset) { const int tcm_sizes[16] = { 0, -1, -1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, -1, -1, -1, -1 }; u32 tcm_region; int tcm_size; /* * If there are more than one TCM bank of this type, * select the TCM bank to operate on in the TCM selection * register. */ if (banks > 1) asm("mcr p15, 0, %0, c9, c2, 0" : /* No output operands */ : "r" (bank)); /* Read the special TCM region register c9, 0 */ if (!type) asm("mrc p15, 0, %0, c9, c1, 0" : "=r" (tcm_region)); else asm("mrc p15, 0, %0, c9, c1, 1" : "=r" (tcm_region)); tcm_size = tcm_sizes[(tcm_region >> 2) & 0x0f]; if (tcm_size < 0) { pr_err("CPU: %sTCM%d of unknown size\n", type ? "I" : "D", bank); return -EINVAL; } else if (tcm_size > 32) { pr_err("CPU: %sTCM%d larger than 32k found\n", type ? "I" : "D", bank); return -EINVAL; } else { pr_info("CPU: found %sTCM%d %dk @ %08x, %senabled\n", type ? "I" : "D", bank, tcm_size, (tcm_region & 0xfffff000U), (tcm_region & 1) ? "" : "not "); } /* Not much fun you can do with a size 0 bank */ if (tcm_size == 0) return 0; /* Force move the TCM bank to where we want it, enable */ tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; if (!type) asm("mcr p15, 0, %0, c9, c1, 0" : /* No output operands */ : "r" (tcm_region)); else asm("mcr p15, 0, %0, c9, c1, 1" : /* No output operands */ : "r" (tcm_region)); /* Increase offset */ *offset += (tcm_size << 10); pr_info("CPU: moved %sTCM%d %dk to %08x, enabled\n", type ? "I" : "D", bank, tcm_size, (tcm_region & 0xfffff000U)); return 0; } /* * When we are running in the non-secure world and the secure world * has not explicitly given us access to the TCM we will get an * undefined error when reading the TCM region register in the * setup_tcm_bank function (above). * * There are two variants of this register read that we need to trap, * the read for the data TCM and the read for the instruction TCM: * c0370628: ee196f11 mrc 15, 0, r6, cr9, cr1, {0} * c0370674: ee196f31 mrc 15, 0, r6, cr9, cr1, {1} * * Our undef hook mask explicitly matches all fields of the encoded * instruction other than the destination register. The mask also * only allows operand 2 to have the values 0 or 1. * * The undefined hook is defined as __init and __initdata, and therefore * must be removed before tcm_init returns. * * In this particular case (MRC with ARM condition code ALways) the * Thumb-2 and ARM instruction encoding are identical, so this hook * will work on a Thumb-2 kernel. * * See A8.8.107, DDI0406C_C ARM Architecture Reference Manual, Encoding * T1/A1 for the bit-by-bit details. * * mrc p15, 0, XX, c9, c1, 0 * mrc p15, 0, XX, c9, c1, 1 * | | | | | | | +---- opc2 0|1 = 000|001 * | | | | | | +------- CRm 0 = 0001 * | | | | | +----------- CRn 0 = 1001 * | | | | +--------------- Rt ? = ???? * | | | +------------------- opc1 0 = 000 * | | +----------------------- coproc 15 = 1111 * | +-------------------------- condition ALways = 1110 * +----------------------------- instruction MRC = 1110 * * Encoding this as per A8.8.107 of DDI0406C, Encoding T1/A1, yields: * 1111 1111 1111 1111 0000 1111 1101 1111 Required Mask * 1110 1110 0001 1001 ???? 1111 0001 0001 mrc p15, 0, XX, c9, c1, 0 * 1110 1110 0001 1001 ???? 1111 0011 0001 mrc p15, 0, XX, c9, c1, 1 * [ ] [ ] [ ]| [ ] [ ] [ ] [ ]| +--- CRm * | | | | | | | | +----- SBO * | | | | | | | +------- opc2 * | | | | | | +----------- coproc * | | | | | +---------------- Rt * | | | | +--------------------- CRn * | | | +------------------------- SBO * | | +--------------------------- opc1 * | +------------------------------- instruction * +------------------------------------ condition */ #define TCM_REGION_READ_MASK 0xffff0fdf #define TCM_REGION_READ_INSTR 0xee190f11 #define DEST_REG_SHIFT 12 #define DEST_REG_MASK 0xf static int __init tcm_handler(struct pt_regs *regs, unsigned int instr) { regs->uregs[(instr >> DEST_REG_SHIFT) & DEST_REG_MASK] = 0; regs->ARM_pc += 4; return 0; } static struct undef_hook tcm_hook __initdata = { .instr_mask = TCM_REGION_READ_MASK, .instr_val = TCM_REGION_READ_INSTR, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = tcm_handler }; /* * This initializes the TCM memory */ void __init tcm_init(void) { u32 tcm_status; u8 dtcm_banks; u8 itcm_banks; size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data; size_t itcm_code_sz = &__eitcm_text - &__sitcm_text; char *start; char *end; char *ram; int ret; int i; /* * Prior to ARMv5 there is no TCM, and trying to read the status * register will hang the processor. */ if (cpu_architecture() < CPU_ARCH_ARMv5) { if (dtcm_code_sz || itcm_code_sz) pr_info("CPU TCM: %u bytes of DTCM and %u bytes of " "ITCM code compiled in, but no TCM present " "in pre-v5 CPU\n", dtcm_code_sz, itcm_code_sz); return; } tcm_status = read_cpuid_tcmstatus(); /* * This code only supports v6-compatible TCMTR implementations. */ if (tcm_status & TCMTR_FORMAT_MASK) return; dtcm_banks = (tcm_status >> 16) & 0x03; itcm_banks = (tcm_status & 0x03); register_undef_hook(&tcm_hook); /* Values greater than 2 for D/ITCM banks are "reserved" */ if (dtcm_banks > 2) dtcm_banks = 0; if (itcm_banks > 2) itcm_banks = 0; /* Setup DTCM if present */ if (dtcm_banks > 0) { for (i = 0; i < dtcm_banks; i++) { ret = setup_tcm_bank(0, i, dtcm_banks, &dtcm_end); if (ret) goto unregister; } /* This means you compiled more code than fits into DTCM */ if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) { pr_info("CPU DTCM: %u bytes of code compiled to " "DTCM but only %lu bytes of DTCM present\n", dtcm_code_sz, (dtcm_end - DTCM_OFFSET)); goto no_dtcm; } /* * This means that the DTCM sizes were 0 or the DTCM banks * were inaccessible due to TrustZone configuration. */ if (!(dtcm_end - DTCM_OFFSET)) goto no_dtcm; dtcm_res.end = dtcm_end - 1; request_resource(&iomem_resource, &dtcm_res); dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; iotable_init(dtcm_iomap, 1); /* Copy data from RAM to DTCM */ start = &__sdtcm_data; end = &__edtcm_data; ram = &__dtcm_start; memcpy(start, ram, dtcm_code_sz); pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); dtcm_present = true; } else if (dtcm_code_sz) { pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no " "DTCM banks present in CPU\n", dtcm_code_sz); } no_dtcm: /* Setup ITCM if present */ if (itcm_banks > 0) { for (i = 0; i < itcm_banks; i++) { ret = setup_tcm_bank(1, i, itcm_banks, &itcm_end); if (ret) goto unregister; } /* This means you compiled more code than fits into ITCM */ if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) { pr_info("CPU ITCM: %u bytes of code compiled to " "ITCM but only %lu bytes of ITCM present\n", itcm_code_sz, (itcm_end - ITCM_OFFSET)); goto unregister; } /* * This means that the ITCM sizes were 0 or the ITCM banks * were inaccessible due to TrustZone configuration. */ if (!(itcm_end - ITCM_OFFSET)) goto unregister; itcm_res.end = itcm_end - 1; request_resource(&iomem_resource, &itcm_res); itcm_iomap[0].length = itcm_end - ITCM_OFFSET; iotable_init(itcm_iomap, 1); /* Copy code from RAM to ITCM */ start = &__sitcm_text; end = &__eitcm_text; ram = &__itcm_start; memcpy(start, ram, itcm_code_sz); pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); itcm_present = true; } else if (itcm_code_sz) { pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no " "ITCM banks present in CPU\n", itcm_code_sz); } unregister: unregister_undef_hook(&tcm_hook); } /* * This creates the TCM memory pool and has to be done later, * during the core_initicalls, since the allocator is not yet * up and running when the first initialization runs. */ static int __init setup_tcm_pool(void) { u32 dtcm_pool_start = (u32) &__edtcm_data; u32 itcm_pool_start = (u32) &__eitcm_text; int ret; /* * Set up malloc pool, 2^2 = 4 bytes granularity since * the TCM is sometimes just 4 KiB. NB: pages and cache * line alignments does not matter in TCM! */ tcm_pool = gen_pool_create(2, -1); pr_debug("Setting up TCM memory pool\n"); /* Add the rest of DTCM to the TCM pool */ if (dtcm_present) { if (dtcm_pool_start < dtcm_end) { ret = gen_pool_add(tcm_pool, dtcm_pool_start, dtcm_end - dtcm_pool_start, -1); if (ret) { pr_err("CPU DTCM: could not add DTCM " \ "remainder to pool!\n"); return ret; } pr_debug("CPU DTCM: Added %08x bytes @ %08x to " \ "the TCM memory pool\n", dtcm_end - dtcm_pool_start, dtcm_pool_start); } } /* Add the rest of ITCM to the TCM pool */ if (itcm_present) { if (itcm_pool_start < itcm_end) { ret = gen_pool_add(tcm_pool, itcm_pool_start, itcm_end - itcm_pool_start, -1); if (ret) { pr_err("CPU ITCM: could not add ITCM " \ "remainder to pool!\n"); return ret; } pr_debug("CPU ITCM: Added %08x bytes @ %08x to " \ "the TCM memory pool\n", itcm_end - itcm_pool_start, itcm_pool_start); } } return 0; } core_initcall(setup_tcm_pool);
linux-master
arch/arm/kernel/tcm.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/smp_tlb.c * * Copyright (C) 2002 ARM Limited, All Rights Reserved. */ #include <linux/preempt.h> #include <linux/smp.h> #include <linux/uaccess.h> #include <asm/smp_plat.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> /**********************************************************************/ /* * TLB operations */ struct tlb_args { struct vm_area_struct *ta_vma; unsigned long ta_start; unsigned long ta_end; }; static inline void ipi_flush_tlb_all(void *ignored) { local_flush_tlb_all(); } static inline void ipi_flush_tlb_mm(void *arg) { struct mm_struct *mm = (struct mm_struct *)arg; local_flush_tlb_mm(mm); } static inline void ipi_flush_tlb_page(void *arg) { struct tlb_args *ta = (struct tlb_args *)arg; unsigned int __ua_flags = uaccess_save_and_enable(); local_flush_tlb_page(ta->ta_vma, ta->ta_start); uaccess_restore(__ua_flags); } static inline void ipi_flush_tlb_kernel_page(void *arg) { struct tlb_args *ta = (struct tlb_args *)arg; local_flush_tlb_kernel_page(ta->ta_start); } static inline void ipi_flush_tlb_range(void *arg) { struct tlb_args *ta = (struct tlb_args *)arg; unsigned int __ua_flags = uaccess_save_and_enable(); local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); uaccess_restore(__ua_flags); } static inline void ipi_flush_tlb_kernel_range(void *arg) { struct tlb_args *ta = (struct tlb_args *)arg; local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); } static inline void ipi_flush_bp_all(void *ignored) { local_flush_bp_all(); } #ifdef CONFIG_ARM_ERRATA_798181 bool (*erratum_a15_798181_handler)(void); static bool erratum_a15_798181_partial(void) { asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); dsb(ish); return false; } static bool erratum_a15_798181_broadcast(void) { asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); dsb(ish); return true; } void erratum_a15_798181_init(void) { unsigned int midr = read_cpuid_id(); unsigned int revidr = read_cpuid(CPUID_REVIDR); /* Brahma-B15 r0p0..r0p2 affected * Cortex-A15 r0p0..r3p3 w/o ECO fix affected * Fixes applied to A15 with respect to the revision and revidr are: * * r0p0-r2p1: No fixes applied * r2p2,r2p3: * REVIDR[4]: 798181 Moving a virtual page that is being accessed * by an active process can lead to unexpected behavior * REVIDR[9]: Not defined * r2p4,r3p0,r3p1,r3p2: * REVIDR[4]: 798181 Moving a virtual page that is being accessed * by an active process can lead to unexpected behavior * REVIDR[9]: 798181 Moving a virtual page that is being accessed * by an active process can lead to unexpected behavior * - This is an update to a previously released ECO. * r3p3: * REVIDR[4]: Reserved * REVIDR[9]: 798181 Moving a virtual page that is being accessed * by an active process can lead to unexpected behavior * - This is an update to a previously released ECO. * * Handling: * REVIDR[9] set -> No WA * REVIDR[4] set, REVIDR[9] cleared -> Partial WA * Both cleared -> Full WA */ if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2) { erratum_a15_798181_handler = erratum_a15_798181_broadcast; } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f2) { erratum_a15_798181_handler = erratum_a15_798181_broadcast; } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f4) { if (revidr & 0x10) erratum_a15_798181_handler = erratum_a15_798181_partial; else erratum_a15_798181_handler = erratum_a15_798181_broadcast; } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x413fc0f3) { if ((revidr & 0x210) == 0) erratum_a15_798181_handler = erratum_a15_798181_broadcast; else if (revidr & 0x10) erratum_a15_798181_handler = erratum_a15_798181_partial; } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x414fc0f0) { if ((revidr & 0x200) == 0) erratum_a15_798181_handler = erratum_a15_798181_partial; } } #endif static void ipi_flush_tlb_a15_erratum(void *arg) { dmb(); } static void broadcast_tlb_a15_erratum(void) { if (!erratum_a15_798181()) return; smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1); } static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) { int this_cpu; cpumask_t mask = { CPU_BITS_NONE }; if (!erratum_a15_798181()) return; this_cpu = get_cpu(); a15_erratum_get_cpumask(this_cpu, mm, &mask); smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); put_cpu(); } void flush_tlb_all(void) { if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_tlb_all, NULL, 1); else __flush_tlb_all(); broadcast_tlb_a15_erratum(); } void flush_tlb_mm(struct mm_struct *mm) { if (tlb_ops_need_broadcast()) on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); else __flush_tlb_mm(mm); broadcast_tlb_mm_a15_erratum(mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_vma = vma; ta.ta_start = uaddr; on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); } else __flush_tlb_page(vma, uaddr); broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_page(unsigned long kaddr) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_start = kaddr; on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else __flush_tlb_kernel_page(kaddr); broadcast_tlb_a15_erratum(); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_vma = vma; ta.ta_start = start; ta.ta_end = end; on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); } else local_flush_tlb_range(vma, start, end); broadcast_tlb_mm_a15_erratum(vma->vm_mm); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_start = start; ta.ta_end = end; on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); } else local_flush_tlb_kernel_range(start, end); broadcast_tlb_a15_erratum(); } void flush_bp_all(void) { if (tlb_ops_need_broadcast()) on_each_cpu(ipi_flush_bp_all, NULL, 1); else __flush_bp_all(); }
linux-master
arch/arm/kernel/smp_tlb.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/kernel/crash_dump.c * * Copyright (C) 2010 Nokia Corporation. * Author: Mika Westerberg * * This code is taken from arch/x86/kernel/crash_dump_64.c * Created by: Hariprasad Nellitheertha ([email protected]) * Copyright (C) IBM Corporation, 2004. All rights reserved */ #include <linux/errno.h> #include <linux/crash_dump.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/uio.h> ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize, unsigned long offset) { void *vaddr; if (!csize) return 0; vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE); if (!vaddr) return -ENOMEM; csize = copy_to_iter(vaddr + offset, csize, iter); iounmap(vaddr); return csize; }
linux-master
arch/arm/kernel/crash_dump.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/arm/kernel/bios32.c * * PCI bios-type initialisation for PCI machines * * Bits taken from various places. */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/io.h> #include <asm/mach-types.h> #include <asm/mach/map.h> #include <asm/mach/pci.h> static int debug_pci; /* * We can't use pci_get_device() here since we are * called from interrupt context. */ static void pcibios_bus_report_status(struct pci_bus *bus, u_int status_mask, int warn) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { u16 status; /* * ignore host bridge - we handle * that separately */ if (dev->bus->number == 0 && dev->devfn == 0) continue; pci_read_config_word(dev, PCI_STATUS, &status); if (status == 0xffff) continue; if ((status & status_mask) == 0) continue; /* clear the status errors */ pci_write_config_word(dev, PCI_STATUS, status & status_mask); if (warn) printk("(%s: %04X) ", pci_name(dev), status); } list_for_each_entry(dev, &bus->devices, bus_list) if (dev->subordinate) pcibios_bus_report_status(dev->subordinate, status_mask, warn); } void pcibios_report_status(u_int status_mask, int warn) { struct pci_bus *bus; list_for_each_entry(bus, &pci_root_buses, node) pcibios_bus_report_status(bus, status_mask, warn); } /* * We don't use this to fix the device, but initialisation of it. * It's not the correct use for this, but it works. * Note that the arbiter/ISA bridge appears to be buggy, specifically in * the following area: * 1. park on CPU * 2. ISA bridge ping-pong * 3. ISA bridge master handling of target RETRY * * Bug 3 is responsible for the sound DMA grinding to a halt. We now * live with bug 2. */ static void pci_fixup_83c553(struct pci_dev *dev) { /* * Set memory region to start at address 0, and enable IO */ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_SPACE_MEMORY); pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO); dev->resource[0].end -= dev->resource[0].start; dev->resource[0].start = 0; /* * All memory requests from ISA to be channelled to PCI */ pci_write_config_byte(dev, 0x48, 0xff); /* * Enable ping-pong on bus master to ISA bridge transactions. * This improves the sound DMA substantially. The fixed * priority arbiter also helps (see below). */ pci_write_config_byte(dev, 0x42, 0x01); /* * Enable PCI retry */ pci_write_config_byte(dev, 0x40, 0x22); /* * We used to set the arbiter to "park on last master" (bit * 1 set), but unfortunately the CyberPro does not park the * bus. We must therefore park on CPU. Unfortunately, this * may trigger yet another bug in the 553. */ pci_write_config_byte(dev, 0x83, 0x02); /* * Make the ISA DMA request lowest priority, and disable * rotating priorities completely. */ pci_write_config_byte(dev, 0x80, 0x11); pci_write_config_byte(dev, 0x81, 0x00); /* * Route INTA input to IRQ 11, and set IRQ11 to be level * sensitive. */ pci_write_config_word(dev, 0x44, 0xb000); outb(0x08, 0x4d1); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, pci_fixup_83c553); static void pci_fixup_unassign(struct pci_dev *dev) { dev->resource[0].end -= dev->resource[0].start; dev->resource[0].start = 0; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, pci_fixup_unassign); /* * Prevent the PCI layer from seeing the resources allocated to this device * if it is the host bridge by marking it as such. These resources are of * no consequence to the PCI layer (they are handled elsewhere). */ static void pci_fixup_dec21285(struct pci_dev *dev) { if (dev->devfn == 0) { struct resource *r; dev->class &= 0xff; dev->class |= PCI_CLASS_BRIDGE_HOST << 8; pci_dev_for_each_resource(dev, r) { r->start = 0; r->end = 0; r->flags = 0; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285); /* * PCI IDE controllers use non-standard I/O port decoding, respect it. */ static void pci_fixup_ide_bases(struct pci_dev *dev) { struct resource *r; if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) return; pci_dev_for_each_resource(dev, r) { if ((r->start & ~0x80) == 0x374) { r->start |= 2; r->end = r->start; } } } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); /* * Put the DEC21142 to sleep */ static void pci_fixup_dec21142(struct pci_dev *dev) { pci_write_config_dword(dev, 0x40, 0x80000000); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, pci_fixup_dec21142); /* * The CY82C693 needs some rather major fixups to ensure that it does * the right thing. Idea from the Alpha people, with a few additions. * * We ensure that the IDE base registers are set to 1f0/3f4 for the * primary bus, and 170/374 for the secondary bus. Also, hide them * from the PCI subsystem view as well so we won't try to perform * our own auto-configuration on them. * * In addition, we ensure that the PCI IDE interrupts are routed to * IRQ 14 and IRQ 15 respectively. * * The above gets us to a point where the IDE on this device is * functional. However, The CY82C693U _does not work_ in bus * master mode without locking the PCI bus solid. */ static void pci_fixup_cy82c693(struct pci_dev *dev) { if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) { u32 base0, base1; if (dev->class & 0x80) { /* primary */ base0 = 0x1f0; base1 = 0x3f4; } else { /* secondary */ base0 = 0x170; base1 = 0x374; } pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, base0 | PCI_BASE_ADDRESS_SPACE_IO); pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, base1 | PCI_BASE_ADDRESS_SPACE_IO); dev->resource[0].start = 0; dev->resource[0].end = 0; dev->resource[0].flags = 0; dev->resource[1].start = 0; dev->resource[1].end = 0; dev->resource[1].flags = 0; } else if (PCI_FUNC(dev->devfn) == 0) { /* * Setup IDE IRQ routing. */ pci_write_config_byte(dev, 0x4b, 14); pci_write_config_byte(dev, 0x4c, 15); /* * Disable FREQACK handshake, enable USB. */ pci_write_config_byte(dev, 0x4d, 0x41); /* * Enable PCI retry, and PCI post-write buffer. */ pci_write_config_byte(dev, 0x44, 0x17); /* * Enable ISA master and DMA post write buffering. */ pci_write_config_byte(dev, 0x45, 0x03); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693); /* * If the bus contains any of these devices, then we must not turn on * parity checking of any kind. Currently this is CyberPro 20x0 only. */ static inline int pdev_bad_for_parity(struct pci_dev *dev) { return ((dev->vendor == PCI_VENDOR_ID_INTERG && (dev->device == PCI_DEVICE_ID_INTERG_2000 || dev->device == PCI_DEVICE_ID_INTERG_2010)) || (dev->vendor == PCI_VENDOR_ID_ITE && dev->device == PCI_DEVICE_ID_ITE_8152)); } /* * pcibios_fixup_bus - Called after each bus is probed, * but before its children are examined. */ void pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev; u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK; /* * Walk the devices on this bus, working out what we can * and can't support. */ list_for_each_entry(dev, &bus->devices, bus_list) { u16 status; pci_read_config_word(dev, PCI_STATUS, &status); /* * If any device on this bus does not support fast back * to back transfers, then the bus as a whole is not able * to support them. Having fast back to back transfers * on saves us one PCI cycle per transaction. */ if (!(status & PCI_STATUS_FAST_BACK)) features &= ~PCI_COMMAND_FAST_BACK; if (pdev_bad_for_parity(dev)) features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); switch (dev->class >> 8) { case PCI_CLASS_BRIDGE_PCI: pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status); status |= PCI_BRIDGE_CTL_PARITY|PCI_BRIDGE_CTL_MASTER_ABORT; status &= ~(PCI_BRIDGE_CTL_BUS_RESET|PCI_BRIDGE_CTL_FAST_BACK); pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status); break; case PCI_CLASS_BRIDGE_CARDBUS: pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL, &status); status |= PCI_CB_BRIDGE_CTL_PARITY|PCI_CB_BRIDGE_CTL_MASTER_ABORT; pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL, status); break; } } /* * Now walk the devices again, this time setting them up. */ list_for_each_entry(dev, &bus->devices, bus_list) { u16 cmd; pci_read_config_word(dev, PCI_COMMAND, &cmd); cmd |= features; pci_write_config_word(dev, PCI_COMMAND, cmd); pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES >> 2); } /* * Propagate the flags to the PCI bridge. */ if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) { if (features & PCI_COMMAND_FAST_BACK) bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK; if (features & PCI_COMMAND_PARITY) bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY; } /* * Report what we did for this bus */ pr_info("PCI: bus%d: Fast back to back transfers %sabled\n", bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis"); } EXPORT_SYMBOL(pcibios_fixup_bus); /* * Swizzle the device pin each time we cross a bridge. If a platform does * not provide a swizzle function, we perform the standard PCI swizzling. * * The default swizzling walks up the bus tree one level at a time, applying * the standard swizzle function at each step, stopping when it finds the PCI * root bus. This will return the slot number of the bridge device on the * root bus and the interrupt pin on that device which should correspond * with the downstream device interrupt. * * Platforms may override this, in which case the slot and pin returned * depend entirely on the platform code. However, please note that the * PCI standard swizzle is implemented on plug-in cards and Cardbus based * PCI extenders, so it can not be ignored. */ static u8 pcibios_swizzle(struct pci_dev *dev, u8 *pin) { struct pci_sys_data *sys = dev->sysdata; int slot, oldpin = *pin; if (sys->swizzle) slot = sys->swizzle(dev, pin); else slot = pci_common_swizzle(dev, pin); if (debug_pci) printk("PCI: %s swizzling pin %d => pin %d slot %d\n", pci_name(dev), oldpin, *pin, slot); return slot; } /* * Map a slot/pin to an IRQ. */ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct pci_sys_data *sys = dev->sysdata; int irq = -1; if (sys->map_irq) irq = sys->map_irq(dev, slot, pin); if (debug_pci) printk("PCI: %s mapping slot %d pin %d => irq %d\n", pci_name(dev), slot, pin, irq); return irq; } static int pcibios_init_resource(int busnr, struct pci_sys_data *sys) { int ret; struct resource_entry *window; if (list_empty(&sys->resources)) { pci_add_resource_offset(&sys->resources, &iomem_resource, sys->mem_offset); } resource_list_for_each_entry(window, &sys->resources) if (resource_type(window->res) == IORESOURCE_IO) return 0; sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io; sys->io_res.end = (busnr + 1) * SZ_64K - 1; sys->io_res.flags = IORESOURCE_IO; sys->io_res.name = sys->io_res_name; sprintf(sys->io_res_name, "PCI%d I/O", busnr); ret = request_resource(&ioport_resource, &sys->io_res); if (ret) { pr_err("PCI: unable to allocate I/O port region (%d)\n", ret); return ret; } pci_add_resource_offset(&sys->resources, &sys->io_res, sys->io_offset); return 0; } static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, struct list_head *head) { struct pci_sys_data *sys = NULL; int ret; int nr, busnr; for (nr = busnr = 0; nr < hw->nr_controllers; nr++) { struct pci_host_bridge *bridge; bridge = pci_alloc_host_bridge(sizeof(struct pci_sys_data)); if (WARN(!bridge, "PCI: unable to allocate bridge!")) break; sys = pci_host_bridge_priv(bridge); sys->busnr = busnr; sys->swizzle = hw->swizzle; sys->map_irq = hw->map_irq; INIT_LIST_HEAD(&sys->resources); if (hw->private_data) sys->private_data = hw->private_data[nr]; ret = hw->setup(nr, sys); if (ret > 0) { ret = pcibios_init_resource(nr, sys); if (ret) { pci_free_host_bridge(bridge); break; } bridge->map_irq = pcibios_map_irq; bridge->swizzle_irq = pcibios_swizzle; if (hw->scan) ret = hw->scan(nr, bridge); else { list_splice_init(&sys->resources, &bridge->windows); bridge->dev.parent = parent; bridge->sysdata = sys; bridge->busnr = sys->busnr; bridge->ops = hw->ops; ret = pci_scan_root_bus_bridge(bridge); } if (WARN(ret < 0, "PCI: unable to scan bus!")) { pci_free_host_bridge(bridge); break; } sys->bus = bridge->bus; busnr = sys->bus->busn_res.end + 1; list_add(&sys->node, head); } else { pci_free_host_bridge(bridge); if (ret < 0) break; } } } void pci_common_init_dev(struct device *parent, struct hw_pci *hw) { struct pci_sys_data *sys; LIST_HEAD(head); pci_add_flags(PCI_REASSIGN_ALL_BUS); if (hw->preinit) hw->preinit(); pcibios_init_hw(parent, hw, &head); if (hw->postinit) hw->postinit(); list_for_each_entry(sys, &head, node) { struct pci_bus *bus = sys->bus; /* * We insert PCI resources into the iomem_resource and * ioport_resource trees in either pci_bus_claim_resources() * or pci_bus_assign_resources(). */ if (pci_has_flag(PCI_PROBE_ONLY)) { pci_bus_claim_resources(bus); } else { struct pci_bus *child; pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); } pci_bus_add_devices(bus); } } #ifndef CONFIG_PCI_HOST_ITE8152 void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling */ } #endif char * __init pcibios_setup(char *str) { if (!strcmp(str, "debug")) { debug_pci = 1; return NULL; } return str; } /* * From arch/i386/kernel/pci-i386.c: * * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the * addresses to be allocated in the 0x000-0x0ff region * modulo 0x400. * * Why? Because some silly external IO cards only decode * the low 10 bits of the IO address. The 0x00-0xff region * is reserved for motherboard devices that decode all 16 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, * but we want to try to avoid allocating at 0x2900-0x2bff * which might be mirrored at 0x0100-0x03ff.. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; resource_size_t start = res->start; struct pci_host_bridge *host_bridge; if (res->flags & IORESOURCE_IO && start & 0x300) start = (start + 0x3ff) & ~0x3ff; start = (start + align - 1) & ~(align - 1); host_bridge = pci_find_host_bridge(dev->bus); if (host_bridge->align_resource) return host_bridge->align_resource(dev, res, start, size, align); return start; } void __init pci_map_io_early(unsigned long pfn) { struct map_desc pci_io_desc = { .virtual = PCI_IO_VIRT_BASE, .type = MT_DEVICE, .length = SZ_64K, }; pci_io_desc.pfn = pfn; iotable_init(&pci_io_desc, 1); }
linux-master
arch/arm/kernel/bios32.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2012 Linaro Ltd. */ #include <linux/cpuidle.h> #include <linux/of.h> #include <asm/cpuidle.h> extern struct of_cpuidle_method __cpuidle_method_of_table[]; static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel __used __section("__cpuidle_method_of_table_end"); static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init; /** * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle() * @dev: not used * @drv: not used * @index: not used * * A trivial wrapper to allow the cpu_do_idle function to be assigned as a * cpuidle callback by matching the function signature. * * Returns the index passed as parameter */ __cpuidle int arm_cpuidle_simple_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { cpu_do_idle(); return index; } /** * arm_cpuidle_suspend() - function to enter low power idle states * @index: an integer used as an identifier for the low level PM callbacks * * This function calls the underlying arch specific low level PM code as * registered at the init time. * * Returns the result of the suspend callback. */ int arm_cpuidle_suspend(int index) { int cpu = smp_processor_id(); return cpuidle_ops[cpu].suspend(index); } /** * arm_cpuidle_get_ops() - find a registered cpuidle_ops by name * @method: the method name * * Search in the __cpuidle_method_of_table array the cpuidle ops matching the * method name. * * Returns a struct cpuidle_ops pointer, NULL if not found. */ static const struct cpuidle_ops *__init arm_cpuidle_get_ops(const char *method) { struct of_cpuidle_method *m = __cpuidle_method_of_table; for (; m->method; m++) if (!strcmp(m->method, method)) return m->ops; return NULL; } /** * arm_cpuidle_read_ops() - Initialize the cpuidle ops with the device tree * @dn: a pointer to a struct device node corresponding to a cpu node * @cpu: the cpu identifier * * Get the method name defined in the 'enable-method' property, retrieve the * associated cpuidle_ops and do a struct copy. This copy is needed because all * cpuidle_ops are tagged __initconst and will be unloaded after the init * process. * * Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if * no cpuidle_ops is registered for the 'enable-method', or if either init or * suspend callback isn't defined. */ static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu) { const char *enable_method; const struct cpuidle_ops *ops; enable_method = of_get_property(dn, "enable-method", NULL); if (!enable_method) return -ENOENT; ops = arm_cpuidle_get_ops(enable_method); if (!ops) { pr_warn("%pOF: unsupported enable-method property: %s\n", dn, enable_method); return -EOPNOTSUPP; } if (!ops->init || !ops->suspend) { pr_warn("cpuidle_ops '%s': no init or suspend callback\n", enable_method); return -EOPNOTSUPP; } cpuidle_ops[cpu] = *ops; /* structure copy */ pr_notice("cpuidle: enable-method property '%s'" " found operations\n", enable_method); return 0; } /** * arm_cpuidle_init() - Initialize cpuidle_ops for a specific cpu * @cpu: the cpu to be initialized * * Initialize the cpuidle ops with the device for the cpu and then call * the cpu's idle initialization callback. This may fail if the underlying HW * is not operational. * * Returns: * 0 on success, * -ENODEV if it fails to find the cpu node in the device tree, * -EOPNOTSUPP if it does not find a registered and valid cpuidle_ops for * this cpu, * -ENOENT if it fails to find an 'enable-method' property, * -ENXIO if the HW reports a failure or a misconfiguration, * -ENOMEM if the HW report an memory allocation failure */ int __init arm_cpuidle_init(int cpu) { struct device_node *cpu_node = of_cpu_device_node_get(cpu); int ret; if (!cpu_node) return -ENODEV; ret = arm_cpuidle_read_ops(cpu_node, cpu); if (!ret) ret = cpuidle_ops[cpu].init(cpu_node, cpu); of_node_put(cpu_node); return ret; }
linux-master
arch/arm/kernel/cpuidle.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1996-2000 Russell King - Converted to ARM. * Original Copyright (C) 1995 Linus Torvalds */ #include <linux/cpu.h> #include <linux/delay.h> #include <linux/reboot.h> #include <asm/cacheflush.h> #include <asm/idmap.h> #include <asm/virt.h> #include <asm/system_misc.h> #include "reboot.h" typedef void (*phys_reset_t)(unsigned long, bool); /* * Function pointers to optional machine specific functions */ void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); /* * A temporary stack to use for CPU reset. This is static so that we * don't clobber it with the identity mapping. When running with this * stack, any references to the current task *will not work* so you * should really do as little as possible before jumping to your reset * code. */ static u64 soft_restart_stack[16]; static void __soft_restart(void *addr) { phys_reset_t phys_reset; /* Take out a flat memory mapping. */ setup_mm_for_reboot(); /* Clean and invalidate caches */ flush_cache_all(); /* Turn off caching */ cpu_proc_fin(); /* Push out any further dirty data, and ensure cache is empty */ flush_cache_all(); /* Switch to the identity mapping. */ phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset); /* original stub should be restored by kvm */ phys_reset((unsigned long)addr, is_hyp_mode_available()); /* Should never get here. */ BUG(); } void _soft_restart(unsigned long addr, bool disable_l2) { u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); /* Disable interrupts first */ raw_local_irq_disable(); local_fiq_disable(); /* Disable the L2 if we're the last man standing. */ if (disable_l2) outer_disable(); /* Change to the new stack and continue with the reset. */ call_with_stack(__soft_restart, (void *)addr, (void *)stack); /* Should never get here. */ BUG(); } void soft_restart(unsigned long addr) { _soft_restart(addr, num_online_cpus() == 1); } /* * Called by kexec, immediately prior to machine_kexec(). * * This must completely disable all secondary CPUs; simply causing those CPUs * to execute e.g. a RAM-based pin loop is not sufficient. This allows the * kexec'd kernel to use any and all RAM as it sees fit, without having to * avoid any code or data used by any SW CPU pin loop. The CPU hotplug * functionality embodied in smp_shutdown_nonboot_cpus() to achieve this. */ void machine_shutdown(void) { smp_shutdown_nonboot_cpus(reboot_cpu); } /* * Halting simply requires that the secondary CPUs stop performing any * activity (executing tasks, handling interrupts). smp_send_stop() * achieves this. */ void machine_halt(void) { local_irq_disable(); smp_send_stop(); while (1); } /* * Power-off simply requires that the secondary CPUs stop performing any * activity (executing tasks, handling interrupts). smp_send_stop() * achieves this. When the system power is turned off, it will take all CPUs * with it. */ void machine_power_off(void) { local_irq_disable(); smp_send_stop(); do_kernel_power_off(); } /* * Restart requires that the secondary CPUs stop performing any activity * while the primary CPU resets the system. Systems with a single CPU can * use soft_restart() as their machine descriptor's .restart hook, since that * will cause the only available CPU to reset. Systems with multiple CPUs must * provide a HW restart implementation, to ensure that all CPUs reset at once. * This is required so that any code running after reset on the primary CPU * doesn't have to co-ordinate with other CPUs to ensure they aren't still * executing pre-reset code, and using RAM that the primary CPU's code wishes * to use. Implementing such co-ordination would be essentially impossible. */ void machine_restart(char *cmd) { local_irq_disable(); smp_send_stop(); do_kernel_restart(cmd); /* Give a grace period for failure to restart of 1s */ mdelay(1000); /* Whoops - the platform was unable to reboot. Tell the user! */ printk("Reboot failed -- System halted\n"); while (1); }
linux-master
arch/arm/kernel/reboot.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Linaro Ltd <[email protected]> */ #include <linux/efi.h> #include <linux/memblock.h> #include <linux/screen_info.h> #include <asm/efi.h> #include <asm/mach/map.h> #include <asm/mmu_context.h> static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) { efi_memory_desc_t *md = data; pte_t pte = *ptep; if (md->attribute & EFI_MEMORY_RO) pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY)); if (md->attribute & EFI_MEMORY_XP) pte = set_pte_bit(pte, __pgprot(L_PTE_XN)); set_pte_ext(ptep, pte, PTE_EXT_NG); return 0; } int __init efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool ignored) { unsigned long base, size; base = md->virt_addr; size = md->num_pages << EFI_PAGE_SHIFT; /* * We can only use apply_to_page_range() if we can guarantee that the * entire region was mapped using pages. This should be the case if the * region does not cover any naturally aligned SECTION_SIZE sized * blocks. */ if (round_down(base + size, SECTION_SIZE) < round_up(base, SECTION_SIZE) + SECTION_SIZE) return apply_to_page_range(mm, base, size, set_permissions, md); return 0; } int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) { struct map_desc desc = { .virtual = md->virt_addr, .pfn = __phys_to_pfn(md->phys_addr), .length = md->num_pages * EFI_PAGE_SIZE, }; /* * Order is important here: memory regions may have all of the * bits below set (and usually do), so we check them in order of * preference. */ if (md->attribute & EFI_MEMORY_WB) desc.type = MT_MEMORY_RWX; else if (md->attribute & EFI_MEMORY_WT) desc.type = MT_MEMORY_RWX_NONCACHED; else if (md->attribute & EFI_MEMORY_WC) desc.type = MT_DEVICE_WC; else desc.type = MT_DEVICE; create_mapping_late(mm, &desc, true); /* * If stricter permissions were specified, apply them now. */ if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP)) return efi_set_mapping_permissions(mm, md, false); return 0; } static unsigned long __initdata cpu_state_table = EFI_INVALID_TABLE_ADDR; const efi_config_table_type_t efi_arch_tables[] __initconst = { {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table}, {} }; static void __init load_cpu_state_table(void) { if (cpu_state_table != EFI_INVALID_TABLE_ADDR) { struct efi_arm_entry_state *state; bool dump_state = true; state = early_memremap_ro(cpu_state_table, sizeof(struct efi_arm_entry_state)); if (state == NULL) { pr_warn("Unable to map CPU entry state table.\n"); return; } if ((state->sctlr_before_ebs & 1) == 0) pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n"); else if ((state->sctlr_after_ebs & 1) == 0) pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n"); else dump_state = false; if (dump_state || efi_enabled(EFI_DBG)) { pr_info("CPSR at EFI stub entry : 0x%08x\n", state->cpsr_before_ebs); pr_info("SCTLR at EFI stub entry : 0x%08x\n", state->sctlr_before_ebs); pr_info("CPSR after ExitBootServices() : 0x%08x\n", state->cpsr_after_ebs); pr_info("SCTLR after ExitBootServices(): 0x%08x\n", state->sctlr_after_ebs); } early_memunmap(state, sizeof(struct efi_arm_entry_state)); } } void __init arm_efi_init(void) { efi_init(); if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) { /* dummycon on ARM needs non-zero values for columns/lines */ screen_info.orig_video_cols = 80; screen_info.orig_video_lines = 25; } /* ARM does not permit early mappings to persist across paging_init() */ efi_memmap_unmap(); load_cpu_state_table(); }
linux-master
arch/arm/kernel/efi.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/kernel/unwind.c * * Copyright (C) 2008 ARM Limited * * Stack unwinding support for ARM * * An ARM EABI version of gcc is required to generate the unwind * tables. For information about the structure of the unwind tables, * see "Exception Handling ABI for the ARM Architecture" at: * * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html */ #ifndef __CHECKER__ #if !defined (__ARM_EABI__) #warning Your compiler does not have EABI support. #warning ARM unwind is known to compile only with EABI compilers. #warning Change compiler or disable ARM_UNWIND option. #endif #endif /* __CHECKER__ */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/module.h> #include <asm/stacktrace.h> #include <asm/traps.h> #include <asm/unwind.h> #include "reboot.h" /* Dummy functions to avoid linker complaints */ void __aeabi_unwind_cpp_pr0(void) { }; EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0); void __aeabi_unwind_cpp_pr1(void) { }; EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1); void __aeabi_unwind_cpp_pr2(void) { }; EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2); struct unwind_ctrl_block { unsigned long vrs[16]; /* virtual register set */ const unsigned long *insn; /* pointer to the current instructions word */ unsigned long sp_high; /* highest value of sp allowed */ unsigned long *lr_addr; /* address of LR value on the stack */ /* * 1 : check for stack overflow for each register pop. * 0 : save overhead if there is plenty of stack remaining. */ int check_each_pop; int entries; /* number of entries left to interpret */ int byte; /* current byte number in the instructions word */ }; enum regs { #ifdef CONFIG_THUMB2_KERNEL FP = 7, #else FP = 11, #endif SP = 13, LR = 14, PC = 15 }; extern const struct unwind_idx __start_unwind_idx[]; static const struct unwind_idx *__origin_unwind_idx; extern const struct unwind_idx __stop_unwind_idx[]; static DEFINE_RAW_SPINLOCK(unwind_lock); static LIST_HEAD(unwind_tables); /* Convert a prel31 symbol to an absolute address */ #define prel31_to_addr(ptr) \ ({ \ /* sign-extend to 32 bits */ \ long offset = (((long)*(ptr)) << 1) >> 1; \ (unsigned long)(ptr) + offset; \ }) /* * Binary search in the unwind index. The entries are * guaranteed to be sorted in ascending order by the linker. * * start = first entry * origin = first entry with positive offset (or stop if there is no such entry) * stop - 1 = last entry */ static const struct unwind_idx *search_index(unsigned long addr, const struct unwind_idx *start, const struct unwind_idx *origin, const struct unwind_idx *stop) { unsigned long addr_prel31; pr_debug("%s(%08lx, %p, %p, %p)\n", __func__, addr, start, origin, stop); /* * only search in the section with the matching sign. This way the * prel31 numbers can be compared as unsigned longs. */ if (addr < (unsigned long)start) /* negative offsets: [start; origin) */ stop = origin; else /* positive offsets: [origin; stop) */ start = origin; /* prel31 for address relavive to start */ addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff; while (start < stop - 1) { const struct unwind_idx *mid = start + ((stop - start) >> 1); /* * As addr_prel31 is relative to start an offset is needed to * make it relative to mid. */ if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) < mid->addr_offset) stop = mid; else { /* keep addr_prel31 relative to start */ addr_prel31 -= ((unsigned long)mid - (unsigned long)start); start = mid; } } if (likely(start->addr_offset <= addr_prel31)) return start; else { pr_warn("unwind: Unknown symbol address %08lx\n", addr); return NULL; } } static const struct unwind_idx *unwind_find_origin( const struct unwind_idx *start, const struct unwind_idx *stop) { pr_debug("%s(%p, %p)\n", __func__, start, stop); while (start < stop) { const struct unwind_idx *mid = start + ((stop - start) >> 1); if (mid->addr_offset >= 0x40000000) /* negative offset */ start = mid + 1; else /* positive offset */ stop = mid; } pr_debug("%s -> %p\n", __func__, stop); return stop; } static const struct unwind_idx *unwind_find_idx(unsigned long addr) { const struct unwind_idx *idx = NULL; unsigned long flags; pr_debug("%s(%08lx)\n", __func__, addr); if (core_kernel_text(addr)) { if (unlikely(!__origin_unwind_idx)) __origin_unwind_idx = unwind_find_origin(__start_unwind_idx, __stop_unwind_idx); /* main unwind table */ idx = search_index(addr, __start_unwind_idx, __origin_unwind_idx, __stop_unwind_idx); } else { /* module unwind tables */ struct unwind_table *table; raw_spin_lock_irqsave(&unwind_lock, flags); list_for_each_entry(table, &unwind_tables, list) { if (addr >= table->begin_addr && addr < table->end_addr) { idx = search_index(addr, table->start, table->origin, table->stop); /* Move-to-front to exploit common traces */ list_move(&table->list, &unwind_tables); break; } } raw_spin_unlock_irqrestore(&unwind_lock, flags); } pr_debug("%s: idx = %p\n", __func__, idx); return idx; } static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl) { unsigned long ret; if (ctrl->entries <= 0) { pr_warn("unwind: Corrupt unwind table\n"); return 0; } ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff; if (ctrl->byte == 0) { ctrl->insn++; ctrl->entries--; ctrl->byte = 3; } else ctrl->byte--; return ret; } /* Before poping a register check whether it is feasible or not */ static int unwind_pop_register(struct unwind_ctrl_block *ctrl, unsigned long **vsp, unsigned int reg) { if (unlikely(ctrl->check_each_pop)) if (*vsp >= (unsigned long *)ctrl->sp_high) return -URC_FAILURE; /* Use READ_ONCE_NOCHECK here to avoid this memory access * from being tracked by KASAN. */ ctrl->vrs[reg] = READ_ONCE_NOCHECK(*(*vsp)); if (reg == 14) ctrl->lr_addr = *vsp; (*vsp)++; return URC_OK; } /* Helper functions to execute the instructions */ static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl, unsigned long mask) { unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; int load_sp, reg = 4; load_sp = mask & (1 << (13 - 4)); while (mask) { if (mask & 1) if (unwind_pop_register(ctrl, &vsp, reg)) return -URC_FAILURE; mask >>= 1; reg++; } if (!load_sp) { ctrl->vrs[SP] = (unsigned long)vsp; } return URC_OK; } static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl, unsigned long insn) { unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; int reg; /* pop R4-R[4+bbb] */ for (reg = 4; reg <= 4 + (insn & 7); reg++) if (unwind_pop_register(ctrl, &vsp, reg)) return -URC_FAILURE; if (insn & 0x8) if (unwind_pop_register(ctrl, &vsp, 14)) return -URC_FAILURE; ctrl->vrs[SP] = (unsigned long)vsp; return URC_OK; } static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl, unsigned long mask) { unsigned long *vsp = (unsigned long *)ctrl->vrs[SP]; int reg = 0; /* pop R0-R3 according to mask */ while (mask) { if (mask & 1) if (unwind_pop_register(ctrl, &vsp, reg)) return -URC_FAILURE; mask >>= 1; reg++; } ctrl->vrs[SP] = (unsigned long)vsp; return URC_OK; } static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl) { unsigned long bytes = 0; unsigned long insn; unsigned long result = 0; /* * unwind_get_byte() will advance `ctrl` one instruction at a time, so * loop until we get an instruction byte where bit 7 is not set. * * Note: This decodes a maximum of 4 bytes to output 28 bits data where * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence * it is sufficient for unwinding the stack. */ do { insn = unwind_get_byte(ctrl); result |= (insn & 0x7f) << (bytes * 7); bytes++; } while (!!(insn & 0x80) && (bytes != sizeof(result))); return result; } /* * Execute the current unwind instruction. */ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) { unsigned long insn = unwind_get_byte(ctrl); int ret = URC_OK; pr_debug("%s: insn = %08lx\n", __func__, insn); if ((insn & 0xc0) == 0x00) ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4; else if ((insn & 0xc0) == 0x40) { ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4; } else if ((insn & 0xf0) == 0x80) { unsigned long mask; insn = (insn << 8) | unwind_get_byte(ctrl); mask = insn & 0x0fff; if (mask == 0) { pr_warn("unwind: 'Refuse to unwind' instruction %04lx\n", insn); return -URC_FAILURE; } ret = unwind_exec_pop_subset_r4_to_r13(ctrl, mask); if (ret) goto error; } else if ((insn & 0xf0) == 0x90 && (insn & 0x0d) != 0x0d) { ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f]; } else if ((insn & 0xf0) == 0xa0) { ret = unwind_exec_pop_r4_to_rN(ctrl, insn); if (ret) goto error; } else if (insn == 0xb0) { if (ctrl->vrs[PC] == 0) ctrl->vrs[PC] = ctrl->vrs[LR]; /* no further processing */ ctrl->entries = 0; } else if (insn == 0xb1) { unsigned long mask = unwind_get_byte(ctrl); if (mask == 0 || mask & 0xf0) { pr_warn("unwind: Spare encoding %04lx\n", (insn << 8) | mask); return -URC_FAILURE; } ret = unwind_exec_pop_subset_r0_to_r3(ctrl, mask); if (ret) goto error; } else if (insn == 0xb2) { unsigned long uleb128 = unwind_decode_uleb128(ctrl); ctrl->vrs[SP] += 0x204 + (uleb128 << 2); } else { pr_warn("unwind: Unhandled instruction %02lx\n", insn); return -URC_FAILURE; } pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__, ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]); error: return ret; } /* * Unwind a single frame starting with *sp for the symbol at *pc. It * updates the *pc and *sp with the new values. */ int unwind_frame(struct stackframe *frame) { const struct unwind_idx *idx; struct unwind_ctrl_block ctrl; unsigned long sp_low; /* store the highest address on the stack to avoid crossing it*/ sp_low = frame->sp; ctrl.sp_high = ALIGN(sp_low - THREAD_SIZE, THREAD_ALIGN) + THREAD_SIZE; pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__, frame->pc, frame->lr, frame->sp); idx = unwind_find_idx(frame->pc); if (!idx) { if (frame->pc && kernel_text_address(frame->pc)) { if (in_module_plt(frame->pc) && frame->pc != frame->lr) { /* * Quoting Ard: Veneers only set PC using a * PC+immediate LDR, and so they don't affect * the state of the stack or the register file */ frame->pc = frame->lr; return URC_OK; } pr_warn("unwind: Index not found %08lx\n", frame->pc); } return -URC_FAILURE; } ctrl.vrs[FP] = frame->fp; ctrl.vrs[SP] = frame->sp; ctrl.vrs[LR] = frame->lr; ctrl.vrs[PC] = 0; if (idx->insn == 1) /* can't unwind */ return -URC_FAILURE; else if (frame->pc == prel31_to_addr(&idx->addr_offset)) { /* * Unwinding is tricky when we're halfway through the prologue, * since the stack frame that the unwinder expects may not be * fully set up yet. However, one thing we do know for sure is * that if we are unwinding from the very first instruction of * a function, we are still effectively in the stack frame of * the caller, and the unwind info has no relevance yet. */ if (frame->pc == frame->lr) return -URC_FAILURE; frame->pc = frame->lr; return URC_OK; } else if ((idx->insn & 0x80000000) == 0) /* prel31 to the unwind table */ ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn); else if ((idx->insn & 0xff000000) == 0x80000000) /* only personality routine 0 supported in the index */ ctrl.insn = &idx->insn; else { pr_warn("unwind: Unsupported personality routine %08lx in the index at %p\n", idx->insn, idx); return -URC_FAILURE; } /* check the personality routine */ if ((*ctrl.insn & 0xff000000) == 0x80000000) { ctrl.byte = 2; ctrl.entries = 1; } else if ((*ctrl.insn & 0xff000000) == 0x81000000) { ctrl.byte = 1; ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16); } else { pr_warn("unwind: Unsupported personality routine %08lx at %p\n", *ctrl.insn, ctrl.insn); return -URC_FAILURE; } ctrl.check_each_pop = 0; if (prel31_to_addr(&idx->addr_offset) == (u32)&call_with_stack) { /* * call_with_stack() is the only place where we permit SP to * jump from one stack to another, and since we know it is * guaranteed to happen, set up the SP bounds accordingly. */ sp_low = frame->fp; ctrl.sp_high = ALIGN(frame->fp, THREAD_SIZE); } while (ctrl.entries > 0) { int urc; if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs)) ctrl.check_each_pop = 1; urc = unwind_exec_insn(&ctrl); if (urc < 0) return urc; if (ctrl.vrs[SP] < sp_low || ctrl.vrs[SP] > ctrl.sp_high) return -URC_FAILURE; } if (ctrl.vrs[PC] == 0) ctrl.vrs[PC] = ctrl.vrs[LR]; /* check for infinite loop */ if (frame->pc == ctrl.vrs[PC] && frame->sp == ctrl.vrs[SP]) return -URC_FAILURE; frame->fp = ctrl.vrs[FP]; frame->sp = ctrl.vrs[SP]; frame->lr = ctrl.vrs[LR]; frame->pc = ctrl.vrs[PC]; frame->lr_addr = ctrl.lr_addr; return URC_OK; } void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk, const char *loglvl) { struct stackframe frame; pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); if (!tsk) tsk = current; if (regs) { arm_get_current_stackframe(regs, &frame); /* PC might be corrupted, use LR in that case. */ if (!kernel_text_address(regs->ARM_pc)) frame.pc = regs->ARM_lr; } else if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); /* We are saving the stack and execution state at this * point, so we should ensure that frame.pc is within * this block of code. */ here: frame.pc = (unsigned long)&&here; } else { /* task blocked in __switch_to */ frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); /* * The function calling __switch_to cannot be a leaf function * so LR is recovered from the stack. */ frame.lr = 0; frame.pc = thread_saved_pc(tsk); } while (1) { int urc; unsigned long where = frame.pc; urc = unwind_frame(&frame); if (urc < 0) break; dump_backtrace_entry(where, frame.pc, frame.sp - 4, loglvl); } } struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, unsigned long text_addr, unsigned long text_size) { unsigned long flags; struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL); pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size, text_addr, text_size); if (!tab) return tab; tab->start = (const struct unwind_idx *)start; tab->stop = (const struct unwind_idx *)(start + size); tab->origin = unwind_find_origin(tab->start, tab->stop); tab->begin_addr = text_addr; tab->end_addr = text_addr + text_size; raw_spin_lock_irqsave(&unwind_lock, flags); list_add_tail(&tab->list, &unwind_tables); raw_spin_unlock_irqrestore(&unwind_lock, flags); return tab; } void unwind_table_del(struct unwind_table *tab) { unsigned long flags; if (!tab) return; raw_spin_lock_irqsave(&unwind_lock, flags); list_del(&tab->list); raw_spin_unlock_irqrestore(&unwind_lock, flags); kfree(tab); }
linux-master
arch/arm/kernel/unwind.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/setup.c * * Copyright (C) 1995-2001 Russell King */ #include <linux/efi.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/utsname.h> #include <linux/initrd.h> #include <linux/console.h> #include <linux/seq_file.h> #include <linux/screen_info.h> #include <linux/of_platform.h> #include <linux/init.h> #include <linux/kexec.h> #include <linux/libfdt.h> #include <linux/of_fdt.h> #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/proc_fs.h> #include <linux/memblock.h> #include <linux/bug.h> #include <linux/compiler.h> #include <linux/sort.h> #include <linux/psci.h> #include <asm/unified.h> #include <asm/cp15.h> #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/efi.h> #include <asm/elf.h> #include <asm/early_ioremap.h> #include <asm/fixmap.h> #include <asm/procinfo.h> #include <asm/psci.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp_plat.h> #include <asm/mach-types.h> #include <asm/cacheflush.h> #include <asm/cachetype.h> #include <asm/tlbflush.h> #include <asm/xen/hypervisor.h> #include <asm/prom.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> #include <asm/system_info.h> #include <asm/system_misc.h> #include <asm/traps.h> #include <asm/unwind.h> #include <asm/memblock.h> #include <asm/virt.h> #include <asm/kasan.h> #include "atags.h" #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) char fpe_type[8]; static int __init fpe_setup(char *line) { memcpy(fpe_type, line, 8); return 1; } __setup("fpe=", fpe_setup); #endif unsigned int processor_id; EXPORT_SYMBOL(processor_id); unsigned int __machine_arch_type __read_mostly; EXPORT_SYMBOL(__machine_arch_type); unsigned int cacheid __read_mostly; EXPORT_SYMBOL(cacheid); unsigned int __atags_pointer __initdata; unsigned int system_rev; EXPORT_SYMBOL(system_rev); const char *system_serial; EXPORT_SYMBOL(system_serial); unsigned int system_serial_low; EXPORT_SYMBOL(system_serial_low); unsigned int system_serial_high; EXPORT_SYMBOL(system_serial_high); unsigned int elf_hwcap __read_mostly; EXPORT_SYMBOL(elf_hwcap); unsigned int elf_hwcap2 __read_mostly; EXPORT_SYMBOL(elf_hwcap2); #ifdef MULTI_CPU struct processor processor __ro_after_init; #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) struct processor *cpu_vtable[NR_CPUS] = { [0] = &processor, }; #endif #endif #ifdef MULTI_TLB struct cpu_tlb_fns cpu_tlb __ro_after_init; #endif #ifdef MULTI_USER struct cpu_user_fns cpu_user __ro_after_init; #endif #ifdef MULTI_CACHE struct cpu_cache_fns cpu_cache __ro_after_init; #endif #ifdef CONFIG_OUTER_CACHE struct outer_cache_fns outer_cache __ro_after_init; EXPORT_SYMBOL(outer_cache); #endif /* * Cached cpu_architecture() result for use by assembler code. * C code should use the cpu_architecture() function instead of accessing this * variable directly. */ int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN; struct stack { u32 irq[4]; u32 abt[4]; u32 und[4]; u32 fiq[4]; } ____cacheline_aligned; #ifndef CONFIG_CPU_V7M static struct stack stacks[NR_CPUS]; #endif char elf_platform[ELF_PLATFORM_SIZE]; EXPORT_SYMBOL(elf_platform); static const char *cpu_name; static const char *machine_name; static char __initdata cmd_line[COMMAND_LINE_SIZE]; const struct machine_desc *machine_desc __initdata; static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; #define ENDIANNESS ((char)endian_test.l) DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); /* * Standard memory resources */ static struct resource mem_res[] = { { .name = "Video RAM", .start = 0, .end = 0, .flags = IORESOURCE_MEM }, { .name = "Kernel code", .start = 0, .end = 0, .flags = IORESOURCE_SYSTEM_RAM }, { .name = "Kernel data", .start = 0, .end = 0, .flags = IORESOURCE_SYSTEM_RAM } }; #define video_ram mem_res[0] #define kernel_code mem_res[1] #define kernel_data mem_res[2] static struct resource io_res[] = { { .name = "reserved", .start = 0x3bc, .end = 0x3be, .flags = IORESOURCE_IO | IORESOURCE_BUSY }, { .name = "reserved", .start = 0x378, .end = 0x37f, .flags = IORESOURCE_IO | IORESOURCE_BUSY }, { .name = "reserved", .start = 0x278, .end = 0x27f, .flags = IORESOURCE_IO | IORESOURCE_BUSY } }; #define lp0 io_res[0] #define lp1 io_res[1] #define lp2 io_res[2] static const char *proc_arch[] = { "undefined/unknown", "3", "4", "4T", "5", "5T", "5TE", "5TEJ", "6TEJ", "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)", }; #ifdef CONFIG_CPU_V7M static int __get_cpu_architecture(void) { return CPU_ARCH_ARMv7M; } #else static int __get_cpu_architecture(void) { int cpu_arch; if ((read_cpuid_id() & 0x0008f000) == 0) { cpu_arch = CPU_ARCH_UNKNOWN; } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) { cpu_arch = (read_cpuid_id() >> 16) & 7; if (cpu_arch) cpu_arch += CPU_ARCH_ARMv3; } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { /* Revised CPUID format. Read the Memory Model Feature * Register 0 and check for VMSAv7 or PMSAv7 */ unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0); if ((mmfr0 & 0x0000000f) >= 0x00000003 || (mmfr0 & 0x000000f0) >= 0x00000030) cpu_arch = CPU_ARCH_ARMv7; else if ((mmfr0 & 0x0000000f) == 0x00000002 || (mmfr0 & 0x000000f0) == 0x00000020) cpu_arch = CPU_ARCH_ARMv6; else cpu_arch = CPU_ARCH_UNKNOWN; } else cpu_arch = CPU_ARCH_UNKNOWN; return cpu_arch; } #endif int __pure cpu_architecture(void) { BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN); return __cpu_architecture; } static int cpu_has_aliasing_icache(unsigned int arch) { int aliasing_icache; unsigned int id_reg, num_sets, line_size; /* PIPT caches never alias. */ if (icache_is_pipt()) return 0; /* arch specifies the register format */ switch (arch) { case CPU_ARCH_ARMv7: set_csselr(CSSELR_ICACHE | CSSELR_L1); isb(); id_reg = read_ccsidr(); line_size = 4 << ((id_reg & 0x7) + 2); num_sets = ((id_reg >> 13) & 0x7fff) + 1; aliasing_icache = (line_size * num_sets) > PAGE_SIZE; break; case CPU_ARCH_ARMv6: aliasing_icache = read_cpuid_cachetype() & (1 << 11); break; default: /* I-cache aliases will be handled by D-cache aliasing code */ aliasing_icache = 0; } return aliasing_icache; } static void __init cacheid_init(void) { unsigned int arch = cpu_architecture(); if (arch >= CPU_ARCH_ARMv6) { unsigned int cachetype = read_cpuid_cachetype(); if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) { cacheid = 0; } else if ((cachetype & (7 << 29)) == 4 << 29) { /* ARMv7 register format */ arch = CPU_ARCH_ARMv7; cacheid = CACHEID_VIPT_NONALIASING; switch (cachetype & (3 << 14)) { case (1 << 14): cacheid |= CACHEID_ASID_TAGGED; break; case (3 << 14): cacheid |= CACHEID_PIPT; break; } } else { arch = CPU_ARCH_ARMv6; if (cachetype & (1 << 23)) cacheid = CACHEID_VIPT_ALIASING; else cacheid = CACHEID_VIPT_NONALIASING; } if (cpu_has_aliasing_icache(arch)) cacheid |= CACHEID_VIPT_I_ALIASING; } else { cacheid = CACHEID_VIVT; } pr_info("CPU: %s data cache, %s instruction cache\n", cache_is_vivt() ? "VIVT" : cache_is_vipt_aliasing() ? "VIPT aliasing" : cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown", cache_is_vivt() ? "VIVT" : icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : icache_is_vipt_aliasing() ? "VIPT aliasing" : icache_is_pipt() ? "PIPT" : cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); } /* * These functions re-use the assembly code in head.S, which * already provide the required functionality. */ extern struct proc_info_list *lookup_processor_type(unsigned int); void __init early_print(const char *str, ...) { extern void printascii(const char *); char buf[256]; va_list ap; va_start(ap, str); vsnprintf(buf, sizeof(buf), str, ap); va_end(ap); #ifdef CONFIG_DEBUG_LL printascii(buf); #endif printk("%s", buf); } #ifdef CONFIG_ARM_PATCH_IDIV static inline u32 __attribute_const__ sdiv_instruction(void) { if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { /* "sdiv r0, r0, r1" */ u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1); return __opcode_to_mem_thumb32(insn); } /* "sdiv r0, r0, r1" */ return __opcode_to_mem_arm(0xe710f110); } static inline u32 __attribute_const__ udiv_instruction(void) { if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { /* "udiv r0, r0, r1" */ u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1); return __opcode_to_mem_thumb32(insn); } /* "udiv r0, r0, r1" */ return __opcode_to_mem_arm(0xe730f110); } static inline u32 __attribute_const__ bx_lr_instruction(void) { if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { /* "bx lr; nop" */ u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0); return __opcode_to_mem_thumb32(insn); } /* "bx lr" */ return __opcode_to_mem_arm(0xe12fff1e); } static void __init patch_aeabi_idiv(void) { extern void __aeabi_uidiv(void); extern void __aeabi_idiv(void); uintptr_t fn_addr; unsigned int mask; mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA; if (!(elf_hwcap & mask)) return; pr_info("CPU: div instructions available: patching division code\n"); fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1; asm ("" : "+g" (fn_addr)); ((u32 *)fn_addr)[0] = udiv_instruction(); ((u32 *)fn_addr)[1] = bx_lr_instruction(); flush_icache_range(fn_addr, fn_addr + 8); fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1; asm ("" : "+g" (fn_addr)); ((u32 *)fn_addr)[0] = sdiv_instruction(); ((u32 *)fn_addr)[1] = bx_lr_instruction(); flush_icache_range(fn_addr, fn_addr + 8); } #else static inline void patch_aeabi_idiv(void) { } #endif static void __init cpuid_init_hwcaps(void) { int block; u32 isar5; u32 isar6; u32 pfr2; if (cpu_architecture() < CPU_ARCH_ARMv7) return; block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24); if (block >= 2) elf_hwcap |= HWCAP_IDIVA; if (block >= 1) elf_hwcap |= HWCAP_IDIVT; /* LPAE implies atomic ldrd/strd instructions */ block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0); if (block >= 5) elf_hwcap |= HWCAP_LPAE; /* check for supported v8 Crypto instructions */ isar5 = read_cpuid_ext(CPUID_EXT_ISAR5); block = cpuid_feature_extract_field(isar5, 4); if (block >= 2) elf_hwcap2 |= HWCAP2_PMULL; if (block >= 1) elf_hwcap2 |= HWCAP2_AES; block = cpuid_feature_extract_field(isar5, 8); if (block >= 1) elf_hwcap2 |= HWCAP2_SHA1; block = cpuid_feature_extract_field(isar5, 12); if (block >= 1) elf_hwcap2 |= HWCAP2_SHA2; block = cpuid_feature_extract_field(isar5, 16); if (block >= 1) elf_hwcap2 |= HWCAP2_CRC32; /* Check for Speculation barrier instruction */ isar6 = read_cpuid_ext(CPUID_EXT_ISAR6); block = cpuid_feature_extract_field(isar6, 12); if (block >= 1) elf_hwcap2 |= HWCAP2_SB; /* Check for Speculative Store Bypassing control */ pfr2 = read_cpuid_ext(CPUID_EXT_PFR2); block = cpuid_feature_extract_field(pfr2, 4); if (block >= 1) elf_hwcap2 |= HWCAP2_SSBS; } static void __init elf_hwcap_fixup(void) { unsigned id = read_cpuid_id(); /* * HWCAP_TLS is available only on 1136 r1p0 and later, * see also kuser_get_tls_init. */ if (read_cpuid_part() == ARM_CPU_PART_ARM1136 && ((id >> 20) & 3) == 0) { elf_hwcap &= ~HWCAP_TLS; return; } /* Verify if CPUID scheme is implemented */ if ((id & 0x000f0000) != 0x000f0000) return; /* * If the CPU supports LDREX/STREX and LDREXB/STREXB, * avoid advertising SWP; it may not be atomic with * multiprocessing cores. */ if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 || (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 && cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3)) elf_hwcap &= ~HWCAP_SWP; } /* * cpu_init - initialise one CPU. * * cpu_init sets up the per-CPU stacks. */ void notrace cpu_init(void) { #ifndef CONFIG_CPU_V7M unsigned int cpu = smp_processor_id(); struct stack *stk = &stacks[cpu]; if (cpu >= NR_CPUS) { pr_crit("CPU%u: bad primary CPU number\n", cpu); BUG(); } /* * This only works on resume and secondary cores. For booting on the * boot cpu, smp_prepare_boot_cpu is called after percpu area setup. */ set_my_cpu_offset(per_cpu_offset(cpu)); cpu_proc_init(); /* * Define the placement constraint for the inline asm directive below. * In Thumb-2, msr with an immediate value is not allowed. */ #ifdef CONFIG_THUMB2_KERNEL #define PLC_l "l" #define PLC_r "r" #else #define PLC_l "I" #define PLC_r "I" #endif /* * setup stacks for re-entrant exception handlers */ __asm__ ( "msr cpsr_c, %1\n\t" "add r14, %0, %2\n\t" "mov sp, r14\n\t" "msr cpsr_c, %3\n\t" "add r14, %0, %4\n\t" "mov sp, r14\n\t" "msr cpsr_c, %5\n\t" "add r14, %0, %6\n\t" "mov sp, r14\n\t" "msr cpsr_c, %7\n\t" "add r14, %0, %8\n\t" "mov sp, r14\n\t" "msr cpsr_c, %9" : : "r" (stk), PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), "I" (offsetof(struct stack, irq[0])), PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE), "I" (offsetof(struct stack, abt[0])), PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE), "I" (offsetof(struct stack, und[0])), PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE), "I" (offsetof(struct stack, fiq[0])), PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE) : "r14"); #endif } u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; void __init smp_setup_processor_id(void) { int i; u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cpu_logical_map(0) = cpu; for (i = 1; i < nr_cpu_ids; ++i) cpu_logical_map(i) = i == cpu ? 0 : i; /* * clear __my_cpu_offset on boot CPU to avoid hang caused by * using percpu variable early, for example, lockdep will * access percpu variable inside lock_release */ set_my_cpu_offset(0); pr_info("Booting Linux on physical CPU 0x%x\n", mpidr); } struct mpidr_hash mpidr_hash; #ifdef CONFIG_SMP /** * smp_build_mpidr_hash - Pre-compute shifts required at each affinity * level in order to build a linear index from an * MPIDR value. Resulting algorithm is a collision * free hash carried out through shifting and ORing */ static void __init smp_build_mpidr_hash(void) { u32 i, affinity; u32 fs[3], bits[3], ls, mask = 0; /* * Pre-scan the list of MPIDRS and filter out bits that do * not contribute to affinity levels, ie they never toggle. */ for_each_possible_cpu(i) mask |= (cpu_logical_map(i) ^ cpu_logical_map(0)); pr_debug("mask of set bits 0x%x\n", mask); /* * Find and stash the last and first bit set at all affinity levels to * check how many bits are required to represent them. */ for (i = 0; i < 3; i++) { affinity = MPIDR_AFFINITY_LEVEL(mask, i); /* * Find the MSB bit and LSB bits position * to determine how many bits are required * to express the affinity level. */ ls = fls(affinity); fs[i] = affinity ? ffs(affinity) - 1 : 0; bits[i] = ls - fs[i]; } /* * An index can be created from the MPIDR by isolating the * significant bits at each affinity level and by shifting * them in order to compress the 24 bits values space to a * compressed set of values. This is equivalent to hashing * the MPIDR through shifting and ORing. It is a collision free * hash though not minimal since some levels might contain a number * of CPUs that is not an exact power of 2 and their bit * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}. */ mpidr_hash.shift_aff[0] = fs[0]; mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0]; mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] - (bits[1] + bits[0]); mpidr_hash.mask = mask; mpidr_hash.bits = bits[2] + bits[1] + bits[0]; pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n", mpidr_hash.shift_aff[0], mpidr_hash.shift_aff[1], mpidr_hash.shift_aff[2], mpidr_hash.mask, mpidr_hash.bits); /* * 4x is an arbitrary value used to warn on a hash table much bigger * than expected on most systems. */ if (mpidr_hash_size() > 4 * num_possible_cpus()) pr_warn("Large number of MPIDR hash buckets detected\n"); sync_cache_w(&mpidr_hash); } #endif /* * locate processor in the list of supported processor types. The linker * builds this table for us from the entries in arch/arm/mm/proc-*.S */ struct proc_info_list *lookup_processor(u32 midr) { struct proc_info_list *list = lookup_processor_type(midr); if (!list) { pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n", smp_processor_id(), midr); while (1) /* can't use cpu_relax() here as it may require MMU setup */; } return list; } static void __init setup_processor(void) { unsigned int midr = read_cpuid_id(); struct proc_info_list *list = lookup_processor(midr); cpu_name = list->cpu_name; __cpu_architecture = __get_cpu_architecture(); init_proc_vtable(list->proc); #ifdef MULTI_TLB cpu_tlb = *list->tlb; #endif #ifdef MULTI_USER cpu_user = *list->user; #endif #ifdef MULTI_CACHE cpu_cache = *list->cache; #endif pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", list->cpu_name, midr, midr & 15, proc_arch[cpu_architecture()], get_cr()); snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", list->arch_name, ENDIANNESS); snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", list->elf_name, ENDIANNESS); elf_hwcap = list->elf_hwcap; cpuid_init_hwcaps(); patch_aeabi_idiv(); #ifndef CONFIG_ARM_THUMB elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); #endif #ifdef CONFIG_MMU init_default_cache_policy(list->__cpu_mm_mmu_flags); #endif erratum_a15_798181_init(); elf_hwcap_fixup(); cacheid_init(); cpu_init(); } void __init dump_machine_table(void) { const struct machine_desc *p; early_print("Available machine support:\n\nID (hex)\tNAME\n"); for_each_machine_desc(p) early_print("%08x\t%s\n", p->nr, p->name); early_print("\nPlease check your kernel config and/or bootloader.\n"); while (true) /* can't use cpu_relax() here as it may require MMU setup */; } int __init arm_add_memory(u64 start, u64 size) { u64 aligned_start; /* * Ensure that start/size are aligned to a page boundary. * Size is rounded down, start is rounded up. */ aligned_start = PAGE_ALIGN(start); if (aligned_start > start + size) size = 0; else size -= aligned_start - start; #ifndef CONFIG_PHYS_ADDR_T_64BIT if (aligned_start > ULONG_MAX) { pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n", start); return -EINVAL; } if (aligned_start + size > ULONG_MAX) { pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n", (long long)start); /* * To ensure bank->start + bank->size is representable in * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB. * This means we lose a page after masking. */ size = ULONG_MAX - aligned_start; } #endif if (aligned_start < PHYS_OFFSET) { if (aligned_start + size <= PHYS_OFFSET) { pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n", aligned_start, aligned_start + size); return -EINVAL; } pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n", aligned_start, (u64)PHYS_OFFSET); size -= PHYS_OFFSET - aligned_start; aligned_start = PHYS_OFFSET; } start = aligned_start; size = size & ~(phys_addr_t)(PAGE_SIZE - 1); /* * Check whether this memory region has non-zero size or * invalid node number. */ if (size == 0) return -EINVAL; memblock_add(start, size); return 0; } /* * Pick out the memory size. We look for mem=size@start, * where start and size are "size[KkMm]" */ static int __init early_mem(char *p) { static int usermem __initdata = 0; u64 size; u64 start; char *endp; /* * If the user specifies memory size, we * blow away any automatically generated * size. */ if (usermem == 0) { usermem = 1; memblock_remove(memblock_start_of_DRAM(), memblock_end_of_DRAM() - memblock_start_of_DRAM()); } start = PHYS_OFFSET; size = memparse(p, &endp); if (*endp == '@') start = memparse(endp + 1, NULL); arm_add_memory(start, size); return 0; } early_param("mem", early_mem); static void __init request_standard_resources(const struct machine_desc *mdesc) { phys_addr_t start, end, res_end; struct resource *res; u64 i; kernel_code.start = virt_to_phys(_text); kernel_code.end = virt_to_phys(__init_begin - 1); kernel_data.start = virt_to_phys(_sdata); kernel_data.end = virt_to_phys(_end - 1); for_each_mem_range(i, &start, &end) { unsigned long boot_alias_start; /* * In memblock, end points to the first byte after the * range while in resourses, end points to the last byte in * the range. */ res_end = end - 1; /* * Some systems have a special memory alias which is only * used for booting. We need to advertise this region to * kexec-tools so they know where bootable RAM is located. */ boot_alias_start = phys_to_idmap(start); if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) { res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); if (!res) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(*res)); res->name = "System RAM (boot alias)"; res->start = boot_alias_start; res->end = phys_to_idmap(res_end); res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); } res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); if (!res) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(*res)); res->name = "System RAM"; res->start = start; res->end = res_end; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); if (kernel_code.start >= res->start && kernel_code.end <= res->end) request_resource(res, &kernel_code); if (kernel_data.start >= res->start && kernel_data.end <= res->end) request_resource(res, &kernel_data); } if (mdesc->video_start) { video_ram.start = mdesc->video_start; video_ram.end = mdesc->video_end; request_resource(&iomem_resource, &video_ram); } /* * Some machines don't have the possibility of ever * possessing lp0, lp1 or lp2 */ if (mdesc->reserve_lp0) request_resource(&ioport_resource, &lp0); if (mdesc->reserve_lp1) request_resource(&ioport_resource, &lp1); if (mdesc->reserve_lp2) request_resource(&ioport_resource, &lp2); } #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \ defined(CONFIG_EFI) struct screen_info screen_info = { .orig_video_lines = 30, .orig_video_cols = 80, .orig_video_mode = 0, .orig_video_ega_bx = 0, .orig_video_isVGA = 1, .orig_video_points = 8 }; #endif static int __init customize_machine(void) { /* * customizes platform devices, or adds new ones * On DT based machines, we fall back to populating the * machine from the device tree, if no callback is provided, * otherwise we would always need an init_machine callback. */ if (machine_desc->init_machine) machine_desc->init_machine(); return 0; } arch_initcall(customize_machine); static int __init init_machine_late(void) { struct device_node *root; int ret; if (machine_desc->init_late) machine_desc->init_late(); root = of_find_node_by_path("/"); if (root) { ret = of_property_read_string(root, "serial-number", &system_serial); if (ret) system_serial = NULL; } if (!system_serial) system_serial = kasprintf(GFP_KERNEL, "%08x%08x", system_serial_high, system_serial_low); return 0; } late_initcall(init_machine_late); #ifdef CONFIG_KEXEC /* * The crash region must be aligned to 128MB to avoid * zImage relocating below the reserved region. */ #define CRASH_ALIGN (128 << 20) static inline unsigned long long get_total_mem(void) { unsigned long total; total = max_low_pfn - min_low_pfn; return total << PAGE_SHIFT; } /** * reserve_crashkernel() - reserves memory are for crash kernel * * This function reserves memory area given in "crashkernel=" kernel command * line parameter. The memory reserved is used by a dump capture kernel when * primary kernel is crashing. */ static void __init reserve_crashkernel(void) { unsigned long long crash_size, crash_base; unsigned long long total_mem; int ret; total_mem = get_total_mem(); ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); /* invalid value specified or crashkernel=0 */ if (ret || !crash_size) return; if (crash_base <= 0) { unsigned long long crash_max = idmap_to_phys((u32)~0); unsigned long long lowmem_max = __pa(high_memory - 1) + 1; if (crash_max > lowmem_max) crash_max = lowmem_max; crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, CRASH_ALIGN, crash_max); if (!crash_base) { pr_err("crashkernel reservation failed - No suitable area found.\n"); return; } } else { unsigned long long crash_max = crash_base + crash_size; unsigned long long start; start = memblock_phys_alloc_range(crash_size, SECTION_SIZE, crash_base, crash_max); if (!start) { pr_err("crashkernel reservation failed - memory is in use.\n"); return; } } pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", (unsigned long)(crash_size >> 20), (unsigned long)(crash_base >> 20), (unsigned long)(total_mem >> 20)); /* The crashk resource must always be located in normal mem */ crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; insert_resource(&iomem_resource, &crashk_res); if (arm_has_idmap_alias()) { /* * If we have a special RAM alias for use at boot, we * need to advertise to kexec tools where the alias is. */ static struct resource crashk_boot_res = { .name = "Crash kernel (boot alias)", .flags = IORESOURCE_BUSY | IORESOURCE_MEM, }; crashk_boot_res.start = phys_to_idmap(crash_base); crashk_boot_res.end = crashk_boot_res.start + crash_size - 1; insert_resource(&iomem_resource, &crashk_boot_res); } } #else static inline void reserve_crashkernel(void) {} #endif /* CONFIG_KEXEC */ void __init hyp_mode_check(void) { #ifdef CONFIG_ARM_VIRT_EXT sync_boot_mode(); if (is_hyp_mode_available()) { pr_info("CPU: All CPU(s) started in HYP mode.\n"); pr_info("CPU: Virtualization extensions available.\n"); } else if (is_hyp_mode_mismatched()) { pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n", __boot_cpu_mode & MODE_MASK); pr_warn("CPU: This may indicate a broken bootloader or firmware.\n"); } else pr_info("CPU: All CPU(s) started in SVC mode.\n"); #endif } static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); static int arm_restart(struct notifier_block *nb, unsigned long action, void *data) { __arm_pm_restart(action, data); return NOTIFY_DONE; } static struct notifier_block arm_restart_nb = { .notifier_call = arm_restart, .priority = 128, }; void __init setup_arch(char **cmdline_p) { const struct machine_desc *mdesc = NULL; void *atags_vaddr = NULL; if (__atags_pointer) atags_vaddr = FDT_VIRT_BASE(__atags_pointer); setup_processor(); if (atags_vaddr) { mdesc = setup_machine_fdt(atags_vaddr); if (mdesc) memblock_reserve(__atags_pointer, fdt_totalsize(atags_vaddr)); } if (!mdesc) mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type); if (!mdesc) { early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n"); early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type, __atags_pointer); if (__atags_pointer) early_print(" r2[]=%*ph\n", 16, atags_vaddr); dump_machine_table(); } machine_desc = mdesc; machine_name = mdesc->name; dump_stack_set_arch_desc("%s", mdesc->name); if (mdesc->reboot_mode != REBOOT_HARD) reboot_mode = mdesc->reboot_mode; setup_initial_init_mm(_text, _etext, _edata, _end); /* populate cmd_line too for later use, preserving boot_command_line */ strscpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); *cmdline_p = cmd_line; early_fixmap_init(); early_ioremap_init(); parse_early_param(); #ifdef CONFIG_MMU early_mm_init(mdesc); #endif setup_dma_zone(mdesc); xen_early_init(); arm_efi_init(); /* * Make sure the calculation for lowmem/highmem is set appropriately * before reserving/allocating any memory */ adjust_lowmem_bounds(); arm_memblock_init(mdesc); /* Memory may have been removed so recalculate the bounds. */ adjust_lowmem_bounds(); early_ioremap_reset(); paging_init(mdesc); kasan_init(); request_standard_resources(mdesc); if (mdesc->restart) { __arm_pm_restart = mdesc->restart; register_restart_handler(&arm_restart_nb); } unflatten_device_tree(); arm_dt_init_cpu_maps(); psci_dt_init(); #ifdef CONFIG_SMP if (is_smp()) { if (!mdesc->smp_init || !mdesc->smp_init()) { if (psci_smp_available()) smp_set_ops(&psci_smp_ops); else if (mdesc->smp) smp_set_ops(mdesc->smp); } smp_init_cpus(); smp_build_mpidr_hash(); } #endif if (!is_smp()) hyp_mode_check(); reserve_crashkernel(); #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #endif #endif if (mdesc->init_early) mdesc->init_early(); } static int __init topology_init(void) { int cpu; for_each_possible_cpu(cpu) { struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu); register_cpu(&cpuinfo->cpu, cpu); } return 0; } subsys_initcall(topology_init); #ifdef CONFIG_HAVE_PROC_CPU static int __init proc_cpu_init(void) { struct proc_dir_entry *res; res = proc_mkdir("cpu", NULL); if (!res) return -ENOMEM; return 0; } fs_initcall(proc_cpu_init); #endif static const char *hwcap_str[] = { "swp", "half", "thumb", "26bit", "fastmult", "fpa", "vfp", "edsp", "java", "iwmmxt", "crunch", "thumbee", "neon", "vfpv3", "vfpv3d16", "tls", "vfpv4", "idiva", "idivt", "vfpd32", "lpae", "evtstrm", "fphp", "asimdhp", "asimddp", "asimdfhm", "asimdbf16", "i8mm", NULL }; static const char *hwcap2_str[] = { "aes", "pmull", "sha1", "sha2", "crc32", "sb", "ssbs", NULL }; static int c_show(struct seq_file *m, void *v) { int i, j; u32 cpuid; for_each_online_cpu(i) { /* * glibc reads /proc/cpuinfo to determine the number of * online processors, looking for lines beginning with * "processor". Give glibc what it expects. */ seq_printf(m, "processor\t: %d\n", i); cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id(); seq_printf(m, "model name\t: %s rev %d (%s)\n", cpu_name, cpuid & 15, elf_platform); #if defined(CONFIG_SMP) seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); #else seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", loops_per_jiffy / (500000/HZ), (loops_per_jiffy / (5000/HZ)) % 100); #endif /* dump out the processor features */ seq_puts(m, "Features\t: "); for (j = 0; hwcap_str[j]; j++) if (elf_hwcap & (1 << j)) seq_printf(m, "%s ", hwcap_str[j]); for (j = 0; hwcap2_str[j]; j++) if (elf_hwcap2 & (1 << j)) seq_printf(m, "%s ", hwcap2_str[j]); seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24); seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); if ((cpuid & 0x0008f000) == 0x00000000) { /* pre-ARM7 */ seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4); } else { if ((cpuid & 0x0008f000) == 0x00007000) { /* ARM7 */ seq_printf(m, "CPU variant\t: 0x%02x\n", (cpuid >> 16) & 127); } else { /* post-ARM7 */ seq_printf(m, "CPU variant\t: 0x%x\n", (cpuid >> 20) & 15); } seq_printf(m, "CPU part\t: 0x%03x\n", (cpuid >> 4) & 0xfff); } seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15); } seq_printf(m, "Hardware\t: %s\n", machine_name); seq_printf(m, "Revision\t: %04x\n", system_rev); seq_printf(m, "Serial\t\t: %s\n", system_serial); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { return *pos < 1 ? (void *)1 : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return NULL; } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = c_show };
linux-master
arch/arm/kernel/setup.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/arch_timer.c * * Copyright (C) 2011 ARM Ltd. * All Rights Reserved */ #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> #include <asm/delay.h> #include <asm/arch_timer.h> #include <clocksource/arm_arch_timer.h> static unsigned long arch_timer_read_counter_long(void) { return arch_timer_read_counter(); } static struct delay_timer arch_delay_timer; static void __init arch_timer_delay_timer_register(void) { /* Use the architected timer for the delay loop. */ arch_delay_timer.read_current_timer = arch_timer_read_counter_long; arch_delay_timer.freq = arch_timer_get_rate(); register_current_timer_delay(&arch_delay_timer); } int __init arch_timer_arch_init(void) { u32 arch_timer_rate = arch_timer_get_rate(); if (arch_timer_rate == 0) return -ENXIO; arch_timer_delay_timer_register(); return 0; }
linux-master
arch/arm/kernel/arch_timer.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/armksyms.c * * Copyright (C) 2000 Russell King */ #include <linux/export.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/in6.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/arm-smccc.h> #include <asm/checksum.h> #include <asm/ftrace.h> /* * libgcc functions - functions that are used internally by the * compiler... (prototypes are not correct though, but that * doesn't really matter since they're not versioned). */ extern void __ashldi3(void); extern void __ashrdi3(void); extern void __divsi3(void); extern void __lshrdi3(void); extern void __modsi3(void); extern void __muldi3(void); extern void __ucmpdi2(void); extern void __udivsi3(void); extern void __umodsi3(void); extern void __do_div64(void); extern void __bswapsi2(void); extern void __bswapdi2(void); extern void __aeabi_idiv(void); extern void __aeabi_idivmod(void); extern void __aeabi_lasr(void); extern void __aeabi_llsl(void); extern void __aeabi_llsr(void); extern void __aeabi_lmul(void); extern void __aeabi_uidiv(void); extern void __aeabi_uidivmod(void); extern void __aeabi_ulcmp(void); extern void fpundefinstr(void); void mmioset(void *, unsigned int, size_t); void mmiocpy(void *, const void *, size_t); /* platform dependent support */ EXPORT_SYMBOL(arm_delay_ops); /* networking */ EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_from_user); EXPORT_SYMBOL(csum_partial_copy_nocheck); EXPORT_SYMBOL(__csum_ipv6_magic); /* io */ #ifndef __raw_readsb EXPORT_SYMBOL(__raw_readsb); #endif #ifndef __raw_readsw EXPORT_SYMBOL(__raw_readsw); #endif #ifndef __raw_readsl EXPORT_SYMBOL(__raw_readsl); #endif #ifndef __raw_writesb EXPORT_SYMBOL(__raw_writesb); #endif #ifndef __raw_writesw EXPORT_SYMBOL(__raw_writesw); #endif #ifndef __raw_writesl EXPORT_SYMBOL(__raw_writesl); #endif /* string / mem functions */ EXPORT_SYMBOL(strchr); EXPORT_SYMBOL(strrchr); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(__memset32); EXPORT_SYMBOL(__memset64); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(mmioset); EXPORT_SYMBOL(mmiocpy); #ifdef CONFIG_MMU EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(arm_copy_from_user); EXPORT_SYMBOL(arm_copy_to_user); EXPORT_SYMBOL(arm_clear_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_4); EXPORT_SYMBOL(__get_user_8); #ifdef __ARMEB__ EXPORT_SYMBOL(__get_user_64t_1); EXPORT_SYMBOL(__get_user_64t_2); EXPORT_SYMBOL(__get_user_64t_4); EXPORT_SYMBOL(__get_user_32t_8); #endif EXPORT_SYMBOL(__put_user_1); EXPORT_SYMBOL(__put_user_2); EXPORT_SYMBOL(__put_user_4); EXPORT_SYMBOL(__put_user_8); #endif /* gcc lib functions */ EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__ucmpdi2); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(__do_div64); EXPORT_SYMBOL(__bswapsi2); EXPORT_SYMBOL(__bswapdi2); #ifdef CONFIG_AEABI EXPORT_SYMBOL(__aeabi_idiv); EXPORT_SYMBOL(__aeabi_idivmod); EXPORT_SYMBOL(__aeabi_lasr); EXPORT_SYMBOL(__aeabi_llsl); EXPORT_SYMBOL(__aeabi_llsr); EXPORT_SYMBOL(__aeabi_lmul); EXPORT_SYMBOL(__aeabi_uidiv); EXPORT_SYMBOL(__aeabi_uidivmod); EXPORT_SYMBOL(__aeabi_ulcmp); #endif /* bitops */ EXPORT_SYMBOL(_set_bit); EXPORT_SYMBOL(_test_and_set_bit); EXPORT_SYMBOL(_clear_bit); EXPORT_SYMBOL(_test_and_clear_bit); EXPORT_SYMBOL(_change_bit); EXPORT_SYMBOL(_test_and_change_bit); EXPORT_SYMBOL(_find_first_zero_bit_le); EXPORT_SYMBOL(_find_next_zero_bit_le); EXPORT_SYMBOL(_find_first_bit_le); EXPORT_SYMBOL(_find_next_bit_le); #ifdef __ARMEB__ EXPORT_SYMBOL(_find_first_zero_bit_be); EXPORT_SYMBOL(_find_next_zero_bit_be); EXPORT_SYMBOL(_find_first_bit_be); EXPORT_SYMBOL(_find_next_bit_be); #endif #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(__gnu_mcount_nc); #endif #ifdef CONFIG_ARM_PATCH_PHYS_VIRT EXPORT_SYMBOL(__pv_phys_pfn_offset); EXPORT_SYMBOL(__pv_offset); #endif #ifdef CONFIG_HAVE_ARM_SMCCC EXPORT_SYMBOL(__arm_smccc_smc); EXPORT_SYMBOL(__arm_smccc_hvc); #endif
linux-master
arch/arm/kernel/armksyms.c
// SPDX-License-Identifier: GPL-2.0-only /* * Hibernation support specific for ARM * * Derived from work on ARM hibernation support by: * * Ubuntu project, hibernation support for mach-dove * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu) * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.) * https://lkml.org/lkml/2010/6/18/4 * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html * https://patchwork.kernel.org/patch/96442/ * * Copyright (C) 2006 Rafael J. Wysocki <[email protected]> */ #include <linux/mm.h> #include <linux/suspend.h> #include <asm/system_misc.h> #include <asm/idmap.h> #include <asm/suspend.h> #include <asm/page.h> #include <asm/sections.h> #include "reboot.h" int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin); unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1); return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn); } void notrace save_processor_state(void) { WARN_ON(num_online_cpus() != 1); local_fiq_disable(); } void notrace restore_processor_state(void) { local_fiq_enable(); } /* * Snapshot kernel memory and reset the system. * * swsusp_save() is executed in the suspend finisher so that the CPU * context pointer and memory are part of the saved image, which is * required by the resume kernel image to restart execution from * swsusp_arch_suspend(). * * soft_restart is not technically needed, but is used to get success * returned from cpu_suspend. * * When soft reboot completes, the hibernation snapshot is written out. */ static int notrace arch_save_image(unsigned long unused) { int ret; ret = swsusp_save(); if (ret == 0) _soft_restart(virt_to_idmap(cpu_resume), false); return ret; } /* * Save the current CPU state before suspend / poweroff. */ int notrace swsusp_arch_suspend(void) { return cpu_suspend(0, arch_save_image); } /* * Restore page contents for physical pages that were in use during loading * hibernation image. Switch to idmap_pgd so the physical page tables * are overwritten with the same contents. */ static void notrace arch_restore_image(void *unused) { struct pbe *pbe; cpu_switch_mm(idmap_pgd, &init_mm); for (pbe = restore_pblist; pbe; pbe = pbe->next) copy_page(pbe->orig_address, pbe->address); _soft_restart(virt_to_idmap(cpu_resume), false); } static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata; /* * Resume from the hibernation image. * Due to the kernel heap / data restore, stack contents change underneath * and that would make function calls impossible; switch to a temporary * stack within the nosave region to avoid that problem. */ int swsusp_arch_resume(void) { call_with_stack(arch_restore_image, 0, resume_stack + ARRAY_SIZE(resume_stack)); return 0; }
linux-master
arch/arm/kernel/hibernate.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/dma.c * * Copyright (C) 1995-2000 Russell King * * Front-end to the DMA handling. This handles the allocation/freeing * of DMA channels, and provides a unified interface to the machines * DMA facilities. */ #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/scatterlist.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <asm/dma.h> #include <asm/mach/dma.h> DEFINE_RAW_SPINLOCK(dma_spin_lock); EXPORT_SYMBOL(dma_spin_lock); static dma_t *dma_chan[MAX_DMA_CHANNELS]; static inline dma_t *dma_channel(unsigned int chan) { if (chan >= MAX_DMA_CHANNELS) return NULL; return dma_chan[chan]; } int __init isa_dma_add(unsigned int chan, dma_t *dma) { if (!dma->d_ops) return -EINVAL; sg_init_table(&dma->buf, 1); if (dma_chan[chan]) return -EBUSY; dma_chan[chan] = dma; return 0; } /* * Request DMA channel * * On certain platforms, we have to allocate an interrupt as well... */ int request_dma(unsigned int chan, const char *device_id) { dma_t *dma = dma_channel(chan); int ret; if (!dma) goto bad_dma; if (xchg(&dma->lock, 1) != 0) goto busy; dma->device_id = device_id; dma->active = 0; dma->invalid = 1; ret = 0; if (dma->d_ops->request) ret = dma->d_ops->request(chan, dma); if (ret) xchg(&dma->lock, 0); return ret; bad_dma: pr_err("dma: trying to allocate DMA%d\n", chan); return -EINVAL; busy: return -EBUSY; } EXPORT_SYMBOL(request_dma); /* * Free DMA channel * * On certain platforms, we have to free interrupt as well... */ void free_dma(unsigned int chan) { dma_t *dma = dma_channel(chan); if (!dma) goto bad_dma; if (dma->active) { pr_err("dma%d: freeing active DMA\n", chan); dma->d_ops->disable(chan, dma); dma->active = 0; } if (xchg(&dma->lock, 0) != 0) { if (dma->d_ops->free) dma->d_ops->free(chan, dma); return; } pr_err("dma%d: trying to free free DMA\n", chan); return; bad_dma: pr_err("dma: trying to free DMA%d\n", chan); } EXPORT_SYMBOL(free_dma); /* Set DMA Scatter-Gather list */ void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg) { dma_t *dma = dma_channel(chan); if (dma->active) pr_err("dma%d: altering DMA SG while DMA active\n", chan); dma->sg = sg; dma->sgcount = nr_sg; dma->invalid = 1; } EXPORT_SYMBOL(set_dma_sg); /* Set DMA address * * Copy address to the structure, and set the invalid bit */ void __set_dma_addr (unsigned int chan, void *addr) { dma_t *dma = dma_channel(chan); if (dma->active) pr_err("dma%d: altering DMA address while DMA active\n", chan); dma->sg = NULL; dma->addr = addr; dma->invalid = 1; } EXPORT_SYMBOL(__set_dma_addr); /* Set DMA byte count * * Copy address to the structure, and set the invalid bit */ void set_dma_count (unsigned int chan, unsigned long count) { dma_t *dma = dma_channel(chan); if (dma->active) pr_err("dma%d: altering DMA count while DMA active\n", chan); dma->sg = NULL; dma->count = count; dma->invalid = 1; } EXPORT_SYMBOL(set_dma_count); /* Set DMA direction mode */ void set_dma_mode (unsigned int chan, unsigned int mode) { dma_t *dma = dma_channel(chan); if (dma->active) pr_err("dma%d: altering DMA mode while DMA active\n", chan); dma->dma_mode = mode; dma->invalid = 1; } EXPORT_SYMBOL(set_dma_mode); /* Enable DMA channel */ void enable_dma (unsigned int chan) { dma_t *dma = dma_channel(chan); if (!dma->lock) goto free_dma; if (dma->active == 0) { dma->active = 1; dma->d_ops->enable(chan, dma); } return; free_dma: pr_err("dma%d: trying to enable free DMA\n", chan); BUG(); } EXPORT_SYMBOL(enable_dma); /* Disable DMA channel */ void disable_dma (unsigned int chan) { dma_t *dma = dma_channel(chan); if (!dma->lock) goto free_dma; if (dma->active == 1) { dma->active = 0; dma->d_ops->disable(chan, dma); } return; free_dma: pr_err("dma%d: trying to disable free DMA\n", chan); BUG(); } EXPORT_SYMBOL(disable_dma); /* * Is the specified DMA channel active? */ int dma_channel_active(unsigned int chan) { dma_t *dma = dma_channel(chan); return dma->active; } EXPORT_SYMBOL(dma_channel_active); void set_dma_page(unsigned int chan, char pagenr) { pr_err("dma%d: trying to set_dma_page\n", chan); } EXPORT_SYMBOL(set_dma_page); void set_dma_speed(unsigned int chan, int cycle_ns) { dma_t *dma = dma_channel(chan); int ret = 0; if (dma->d_ops->setspeed) ret = dma->d_ops->setspeed(chan, dma, cycle_ns); dma->speed = ret; } EXPORT_SYMBOL(set_dma_speed); int get_dma_residue(unsigned int chan) { dma_t *dma = dma_channel(chan); int ret = 0; if (dma->d_ops->residue) ret = dma->d_ops->residue(chan, dma); return ret; } EXPORT_SYMBOL(get_dma_residue); #ifdef CONFIG_PROC_FS static int proc_dma_show(struct seq_file *m, void *v) { int i; for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) { dma_t *dma = dma_channel(i); if (dma && dma->lock) seq_printf(m, "%2d: %s\n", i, dma->device_id); } return 0; } static int __init proc_dma_init(void) { proc_create_single("dma", 0, NULL, proc_dma_show); return 0; } __initcall(proc_dma_init); #endif
linux-master
arch/arm/kernel/dma.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 Uwe Kleine-Koenig for Pengutronix */ #include <linux/io.h> #include <linux/reboot.h> #include <asm/barrier.h> #include <asm/v7m.h> void armv7m_restart(enum reboot_mode mode, const char *cmd) { dsb(); __raw_writel(V7M_SCB_AIRCR_VECTKEY | V7M_SCB_AIRCR_SYSRESETREQ, BASEADDR_V7M_SCB + V7M_SCB_AIRCR); dsb(); }
linux-master
arch/arm/kernel/v7m.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014-2017 Linaro Ltd. <[email protected]> */ #include <linux/elf.h> #include <linux/ftrace.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sort.h> #include <linux/moduleloader.h> #include <asm/cache.h> #include <asm/opcodes.h> #ifdef CONFIG_THUMB2_KERNEL #define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \ (PLT_ENT_STRIDE - 4)) #else #define PLT_ENT_LDR __opcode_to_mem_arm(0xe59ff000 | \ (PLT_ENT_STRIDE - 8)) #endif static const u32 fixed_plts[] = { #ifdef CONFIG_DYNAMIC_FTRACE FTRACE_ADDR, MCOUNT_ADDR, #endif }; static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt) { int i; if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count) return; pltsec->plt_count = ARRAY_SIZE(fixed_plts); for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i) plt->ldr[i] = PLT_ENT_LDR; BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit)); memcpy(plt->lit, fixed_plts, sizeof(fixed_plts)); } u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) { struct mod_plt_sec *pltsec = !within_module_init(loc, mod) ? &mod->arch.core : &mod->arch.init; struct plt_entries *plt; int idx; /* cache the address, ELF header is available only during module load */ if (!pltsec->plt_ent) pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr; plt = pltsec->plt_ent; prealloc_fixed(pltsec, plt); for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx) if (plt->lit[idx] == val) return (u32)&plt->ldr[idx]; idx = 0; /* * Look for an existing entry pointing to 'val'. Given that the * relocations are sorted, this will be the last entry we allocated. * (if one exists). */ if (pltsec->plt_count > 0) { plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT; idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT; if (plt->lit[idx] == val) return (u32)&plt->ldr[idx]; idx = (idx + 1) % PLT_ENT_COUNT; if (!idx) plt++; } pltsec->plt_count++; BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size); if (!idx) /* Populate a new set of entries */ *plt = (struct plt_entries){ { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, }, { val, } }; else plt->lit[idx] = val; return (u32)&plt->ldr[idx]; } #define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b)) static int cmp_rel(const void *a, const void *b) { const Elf32_Rel *x = a, *y = b; int i; /* sort by type and symbol index */ i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info)); if (i == 0) i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info)); return i; } static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel) { u32 *tval = (u32 *)(base + rel->r_offset); /* * Do a bitwise compare on the raw addend rather than fully decoding * the offset and doing an arithmetic comparison. * Note that a zero-addend jump/call relocation is encoded taking the * PC bias into account, i.e., -8 for ARM and -4 for Thumb2. */ switch (ELF32_R_TYPE(rel->r_info)) { u16 upper, lower; case R_ARM_THM_CALL: case R_ARM_THM_JUMP24: upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]); lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]); return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe; case R_ARM_CALL: case R_ARM_PC24: case R_ARM_JUMP24: return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe; } BUG(); } static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num) { const Elf32_Rel *prev; /* * Entries are sorted by type and symbol index. That means that, * if a duplicate entry exists, it must be in the preceding * slot. */ if (!num) return false; prev = rel + num - 1; return cmp_rel(rel + num, prev) == 0 && is_zero_addend_relocation(base, prev); } /* Count how many PLT entries we may need */ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base, const Elf32_Rel *rel, int num, Elf32_Word dstidx) { unsigned int ret = 0; const Elf32_Sym *s; int i; for (i = 0; i < num; i++) { switch (ELF32_R_TYPE(rel[i].r_info)) { case R_ARM_CALL: case R_ARM_PC24: case R_ARM_JUMP24: case R_ARM_THM_CALL: case R_ARM_THM_JUMP24: /* * We only have to consider branch targets that resolve * to symbols that are defined in a different section. * This is not simply a heuristic, it is a fundamental * limitation, since there is no guaranteed way to emit * PLT entries sufficiently close to the branch if the * section size exceeds the range of a branch * instruction. So ignore relocations against defined * symbols if they live in the same section as the * relocation target. */ s = syms + ELF32_R_SYM(rel[i].r_info); if (s->st_shndx == dstidx) break; /* * Jump relocations with non-zero addends against * undefined symbols are supported by the ELF spec, but * do not occur in practice (e.g., 'jump n bytes past * the entry point of undefined function symbol f'). * So we need to support them, but there is no need to * take them into consideration when trying to optimize * this code. So let's only check for duplicates when * the addend is zero. (Note that calls into the core * module via init PLT entries could involve section * relative symbol references with non-zero addends, for * which we may end up emitting duplicates, but the init * PLT is released along with the rest of the .init * region as soon as module loading completes.) */ if (!is_zero_addend_relocation(base, rel + i) || !duplicate_rel(base, rel, i)) ret++; } } return ret; } int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { unsigned long core_plts = ARRAY_SIZE(fixed_plts); unsigned long init_plts = ARRAY_SIZE(fixed_plts); Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; Elf32_Sym *syms = NULL; /* * To store the PLTs, we expand the .text section for core module code * and for initialization code. */ for (s = sechdrs; s < sechdrs_end; ++s) { if (strcmp(".plt", secstrings + s->sh_name) == 0) mod->arch.core.plt = s; else if (strcmp(".init.plt", secstrings + s->sh_name) == 0) mod->arch.init.plt = s; else if (s->sh_type == SHT_SYMTAB) syms = (Elf32_Sym *)s->sh_addr; } if (!mod->arch.core.plt || !mod->arch.init.plt) { pr_err("%s: module PLT section(s) missing\n", mod->name); return -ENOEXEC; } if (!syms) { pr_err("%s: module symtab section missing\n", mod->name); return -ENOEXEC; } for (s = sechdrs + 1; s < sechdrs_end; ++s) { Elf32_Rel *rels = (void *)ehdr + s->sh_offset; int numrels = s->sh_size / sizeof(Elf32_Rel); Elf32_Shdr *dstsec = sechdrs + s->sh_info; if (s->sh_type != SHT_REL) continue; /* ignore relocations that operate on non-exec sections */ if (!(dstsec->sh_flags & SHF_EXECINSTR)) continue; /* sort by type and symbol index */ sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL); if (!module_init_layout_section(secstrings + dstsec->sh_name)) core_plts += count_plts(syms, dstsec->sh_addr, rels, numrels, s->sh_info); else init_plts += count_plts(syms, dstsec->sh_addr, rels, numrels, s->sh_info); } mod->arch.core.plt->sh_type = SHT_NOBITS; mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES; mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE, sizeof(struct plt_entries)); mod->arch.core.plt_count = 0; mod->arch.core.plt_ent = NULL; mod->arch.init.plt->sh_type = SHT_NOBITS; mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES; mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE, sizeof(struct plt_entries)); mod->arch.init.plt_count = 0; mod->arch.init.plt_ent = NULL; pr_debug("%s: plt=%x, init.plt=%x\n", __func__, mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size); return 0; } bool in_module_plt(unsigned long loc) { struct module *mod; bool ret; preempt_disable(); mod = __module_text_address(loc); ret = mod && (loc - (u32)mod->arch.core.plt_ent < mod->arch.core.plt_count * PLT_ENT_SIZE || loc - (u32)mod->arch.init.plt_ent < mod->arch.init.plt_count * PLT_ENT_SIZE); preempt_enable(); return ret; }
linux-master
arch/arm/kernel/module-plts.c
// SPDX-License-Identifier: GPL-2.0 /* * ARMv5 [xscale] Performance counter handling code. * * Copyright (C) 2010, ARM Ltd., Will Deacon <[email protected]> * * Based on the previous xscale OProfile code. * * There are two variants of the xscale PMU that we support: * - xscale1pmu: 2 event counters and a cycle counter * - xscale2pmu: 4 event counters and a cycle counter * The two variants share event definitions, but have different * PMU structures. */ #ifdef CONFIG_CPU_XSCALE #include <asm/cputype.h> #include <asm/irq_regs.h> #include <linux/of.h> #include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> enum xscale_perf_types { XSCALE_PERFCTR_ICACHE_MISS = 0x00, XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01, XSCALE_PERFCTR_DATA_STALL = 0x02, XSCALE_PERFCTR_ITLB_MISS = 0x03, XSCALE_PERFCTR_DTLB_MISS = 0x04, XSCALE_PERFCTR_BRANCH = 0x05, XSCALE_PERFCTR_BRANCH_MISS = 0x06, XSCALE_PERFCTR_INSTRUCTION = 0x07, XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08, XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09, XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A, XSCALE_PERFCTR_DCACHE_MISS = 0x0B, XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C, XSCALE_PERFCTR_PC_CHANGED = 0x0D, XSCALE_PERFCTR_BCU_REQUEST = 0x10, XSCALE_PERFCTR_BCU_FULL = 0x11, XSCALE_PERFCTR_BCU_DRAIN = 0x12, XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14, XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15, XSCALE_PERFCTR_RMW = 0x16, /* XSCALE_PERFCTR_CCNT is not hardware defined */ XSCALE_PERFCTR_CCNT = 0xFE, XSCALE_PERFCTR_UNUSED = 0xFF, }; enum xscale_counters { XSCALE_CYCLE_COUNTER = 0, XSCALE_COUNTER0, XSCALE_COUNTER1, XSCALE_COUNTER2, XSCALE_COUNTER3, }; static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER, }; static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { PERF_CACHE_MAP_ALL_UNSUPPORTED, [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, }; #define XSCALE_PMU_ENABLE 0x001 #define XSCALE_PMN_RESET 0x002 #define XSCALE_CCNT_RESET 0x004 #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET) #define XSCALE_PMU_CNT64 0x008 #define XSCALE1_OVERFLOWED_MASK 0x700 #define XSCALE1_CCOUNT_OVERFLOW 0x400 #define XSCALE1_COUNT0_OVERFLOW 0x100 #define XSCALE1_COUNT1_OVERFLOW 0x200 #define XSCALE1_CCOUNT_INT_EN 0x040 #define XSCALE1_COUNT0_INT_EN 0x010 #define XSCALE1_COUNT1_INT_EN 0x020 #define XSCALE1_COUNT0_EVT_SHFT 12 #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT) #define XSCALE1_COUNT1_EVT_SHFT 20 #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT) static inline u32 xscale1pmu_read_pmnc(void) { u32 val; asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val)); return val; } static inline void xscale1pmu_write_pmnc(u32 val) { /* upper 4bits and 7, 11 are write-as-0 */ val &= 0xffff77f; asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val)); } static inline int xscale1_pmnc_counter_has_overflowed(unsigned long pmnc, enum xscale_counters counter) { int ret = 0; switch (counter) { case XSCALE_CYCLE_COUNTER: ret = pmnc & XSCALE1_CCOUNT_OVERFLOW; break; case XSCALE_COUNTER0: ret = pmnc & XSCALE1_COUNT0_OVERFLOW; break; case XSCALE_COUNTER1: ret = pmnc & XSCALE1_COUNT1_OVERFLOW; break; default: WARN_ONCE(1, "invalid counter number (%d)\n", counter); } return ret; } static irqreturn_t xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu) { unsigned long pmnc; struct perf_sample_data data; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; /* * NOTE: there's an A stepping erratum that states if an overflow * bit already exists and another occurs, the previous * Overflow bit gets cleared. There's no workaround. * Fixed in B stepping or later. */ pmnc = xscale1pmu_read_pmnc(); /* * Write the value back to clear the overflow flags. Overflow * flags remain in pmnc for use below. We also disable the PMU * while we process the interrupt. */ xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); if (!(pmnc & XSCALE1_OVERFLOWED_MASK)) return IRQ_NONE; regs = get_irq_regs(); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; if (!event) continue; if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) continue; hwc = &event->hw; armpmu_event_update(event); perf_sample_data_init(&data, 0, hwc->last_period); if (!armpmu_event_set_period(event)) continue; if (perf_event_overflow(event, &data, regs)) cpu_pmu->disable(event); } irq_work_run(); /* * Re-enable the PMU. */ pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE; xscale1pmu_write_pmnc(pmnc); return IRQ_HANDLED; } static void xscale1pmu_enable_event(struct perf_event *event) { unsigned long val, mask, evt, flags; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; switch (idx) { case XSCALE_CYCLE_COUNTER: mask = 0; evt = XSCALE1_CCOUNT_INT_EN; break; case XSCALE_COUNTER0: mask = XSCALE1_COUNT0_EVT_MASK; evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) | XSCALE1_COUNT0_INT_EN; break; case XSCALE_COUNTER1: mask = XSCALE1_COUNT1_EVT_MASK; evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) | XSCALE1_COUNT1_INT_EN; break; default: WARN_ONCE(1, "invalid counter number (%d)\n", idx); return; } raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val &= ~mask; val |= evt; xscale1pmu_write_pmnc(val); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale1pmu_disable_event(struct perf_event *event) { unsigned long val, mask, evt, flags; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; switch (idx) { case XSCALE_CYCLE_COUNTER: mask = XSCALE1_CCOUNT_INT_EN; evt = 0; break; case XSCALE_COUNTER0: mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK; evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT; break; case XSCALE_COUNTER1: mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK; evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT; break; default: WARN_ONCE(1, "invalid counter number (%d)\n", idx); return; } raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val &= ~mask; val |= evt; xscale1pmu_write_pmnc(val); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; if (XSCALE_PERFCTR_CCNT == hwc->config_base) { if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; return XSCALE_CYCLE_COUNTER; } else { if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) return XSCALE_COUNTER1; if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) return XSCALE_COUNTER0; return -EAGAIN; } } static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) { clear_bit(event->hw.idx, cpuc->used_mask); } static void xscale1pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val |= XSCALE_PMU_ENABLE; xscale1pmu_write_pmnc(val); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale1pmu_stop(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale1pmu_read_pmnc(); val &= ~XSCALE_PMU_ENABLE; xscale1pmu_write_pmnc(val); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static inline u64 xscale1pmu_read_counter(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; u32 val = 0; switch (counter) { case XSCALE_CYCLE_COUNTER: asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val)); break; case XSCALE_COUNTER0: asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val)); break; case XSCALE_COUNTER1: asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val)); break; } return val; } static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val) { struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; switch (counter) { case XSCALE_CYCLE_COUNTER: asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); break; case XSCALE_COUNTER0: asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val)); break; case XSCALE_COUNTER1: asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val)); break; } } static int xscale_map_event(struct perf_event *event) { return armpmu_map_event(event, &xscale_perf_map, &xscale_perf_cache_map, 0xFF); } static int xscale1pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "armv5_xscale1"; cpu_pmu->handle_irq = xscale1pmu_handle_irq; cpu_pmu->enable = xscale1pmu_enable_event; cpu_pmu->disable = xscale1pmu_disable_event; cpu_pmu->read_counter = xscale1pmu_read_counter; cpu_pmu->write_counter = xscale1pmu_write_counter; cpu_pmu->get_event_idx = xscale1pmu_get_event_idx; cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx; cpu_pmu->start = xscale1pmu_start; cpu_pmu->stop = xscale1pmu_stop; cpu_pmu->map_event = xscale_map_event; cpu_pmu->num_events = 3; return 0; } #define XSCALE2_OVERFLOWED_MASK 0x01f #define XSCALE2_CCOUNT_OVERFLOW 0x001 #define XSCALE2_COUNT0_OVERFLOW 0x002 #define XSCALE2_COUNT1_OVERFLOW 0x004 #define XSCALE2_COUNT2_OVERFLOW 0x008 #define XSCALE2_COUNT3_OVERFLOW 0x010 #define XSCALE2_CCOUNT_INT_EN 0x001 #define XSCALE2_COUNT0_INT_EN 0x002 #define XSCALE2_COUNT1_INT_EN 0x004 #define XSCALE2_COUNT2_INT_EN 0x008 #define XSCALE2_COUNT3_INT_EN 0x010 #define XSCALE2_COUNT0_EVT_SHFT 0 #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT) #define XSCALE2_COUNT1_EVT_SHFT 8 #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT) #define XSCALE2_COUNT2_EVT_SHFT 16 #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT) #define XSCALE2_COUNT3_EVT_SHFT 24 #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT) static inline u32 xscale2pmu_read_pmnc(void) { u32 val; asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val)); /* bits 1-2 and 4-23 are read-unpredictable */ return val & 0xff000009; } static inline void xscale2pmu_write_pmnc(u32 val) { /* bits 4-23 are write-as-0, 24-31 are write ignored */ val &= 0xf; asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val)); } static inline u32 xscale2pmu_read_overflow_flags(void) { u32 val; asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val)); return val; } static inline void xscale2pmu_write_overflow_flags(u32 val) { asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val)); } static inline u32 xscale2pmu_read_event_select(void) { u32 val; asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val)); return val; } static inline void xscale2pmu_write_event_select(u32 val) { asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val)); } static inline u32 xscale2pmu_read_int_enable(void) { u32 val; asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val)); return val; } static void xscale2pmu_write_int_enable(u32 val) { asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val)); } static inline int xscale2_pmnc_counter_has_overflowed(unsigned long of_flags, enum xscale_counters counter) { int ret = 0; switch (counter) { case XSCALE_CYCLE_COUNTER: ret = of_flags & XSCALE2_CCOUNT_OVERFLOW; break; case XSCALE_COUNTER0: ret = of_flags & XSCALE2_COUNT0_OVERFLOW; break; case XSCALE_COUNTER1: ret = of_flags & XSCALE2_COUNT1_OVERFLOW; break; case XSCALE_COUNTER2: ret = of_flags & XSCALE2_COUNT2_OVERFLOW; break; case XSCALE_COUNTER3: ret = of_flags & XSCALE2_COUNT3_OVERFLOW; break; default: WARN_ONCE(1, "invalid counter number (%d)\n", counter); } return ret; } static irqreturn_t xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu) { unsigned long pmnc, of_flags; struct perf_sample_data data; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; /* Disable the PMU. */ pmnc = xscale2pmu_read_pmnc(); xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); /* Check the overflow flag register. */ of_flags = xscale2pmu_read_overflow_flags(); if (!(of_flags & XSCALE2_OVERFLOWED_MASK)) return IRQ_NONE; /* Clear the overflow bits. */ xscale2pmu_write_overflow_flags(of_flags); regs = get_irq_regs(); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; if (!event) continue; if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx)) continue; hwc = &event->hw; armpmu_event_update(event); perf_sample_data_init(&data, 0, hwc->last_period); if (!armpmu_event_set_period(event)) continue; if (perf_event_overflow(event, &data, regs)) cpu_pmu->disable(event); } irq_work_run(); /* * Re-enable the PMU. */ pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE; xscale2pmu_write_pmnc(pmnc); return IRQ_HANDLED; } static void xscale2pmu_enable_event(struct perf_event *event) { unsigned long flags, ien, evtsel; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; ien = xscale2pmu_read_int_enable(); evtsel = xscale2pmu_read_event_select(); switch (idx) { case XSCALE_CYCLE_COUNTER: ien |= XSCALE2_CCOUNT_INT_EN; break; case XSCALE_COUNTER0: ien |= XSCALE2_COUNT0_INT_EN; evtsel &= ~XSCALE2_COUNT0_EVT_MASK; evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT; break; case XSCALE_COUNTER1: ien |= XSCALE2_COUNT1_INT_EN; evtsel &= ~XSCALE2_COUNT1_EVT_MASK; evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT; break; case XSCALE_COUNTER2: ien |= XSCALE2_COUNT2_INT_EN; evtsel &= ~XSCALE2_COUNT2_EVT_MASK; evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT; break; case XSCALE_COUNTER3: ien |= XSCALE2_COUNT3_INT_EN; evtsel &= ~XSCALE2_COUNT3_EVT_MASK; evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT; break; default: WARN_ONCE(1, "invalid counter number (%d)\n", idx); return; } raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_write_event_select(evtsel); xscale2pmu_write_int_enable(ien); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale2pmu_disable_event(struct perf_event *event) { unsigned long flags, ien, evtsel, of_flags; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; ien = xscale2pmu_read_int_enable(); evtsel = xscale2pmu_read_event_select(); switch (idx) { case XSCALE_CYCLE_COUNTER: ien &= ~XSCALE2_CCOUNT_INT_EN; of_flags = XSCALE2_CCOUNT_OVERFLOW; break; case XSCALE_COUNTER0: ien &= ~XSCALE2_COUNT0_INT_EN; evtsel &= ~XSCALE2_COUNT0_EVT_MASK; evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; of_flags = XSCALE2_COUNT0_OVERFLOW; break; case XSCALE_COUNTER1: ien &= ~XSCALE2_COUNT1_INT_EN; evtsel &= ~XSCALE2_COUNT1_EVT_MASK; evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; of_flags = XSCALE2_COUNT1_OVERFLOW; break; case XSCALE_COUNTER2: ien &= ~XSCALE2_COUNT2_INT_EN; evtsel &= ~XSCALE2_COUNT2_EVT_MASK; evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; of_flags = XSCALE2_COUNT2_OVERFLOW; break; case XSCALE_COUNTER3: ien &= ~XSCALE2_COUNT3_INT_EN; evtsel &= ~XSCALE2_COUNT3_EVT_MASK; evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; of_flags = XSCALE2_COUNT3_OVERFLOW; break; default: WARN_ONCE(1, "invalid counter number (%d)\n", idx); return; } raw_spin_lock_irqsave(&events->pmu_lock, flags); xscale2pmu_write_event_select(evtsel); xscale2pmu_write_int_enable(ien); xscale2pmu_write_overflow_flags(of_flags); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) { int idx = xscale1pmu_get_event_idx(cpuc, event); if (idx >= 0) goto out; if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask)) idx = XSCALE_COUNTER3; else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask)) idx = XSCALE_COUNTER2; out: return idx; } static void xscale2pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; val |= XSCALE_PMU_ENABLE; xscale2pmu_write_pmnc(val); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void xscale2pmu_stop(struct arm_pmu *cpu_pmu) { unsigned long flags, val; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); raw_spin_lock_irqsave(&events->pmu_lock, flags); val = xscale2pmu_read_pmnc(); val &= ~XSCALE_PMU_ENABLE; xscale2pmu_write_pmnc(val); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static inline u64 xscale2pmu_read_counter(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; u32 val = 0; switch (counter) { case XSCALE_CYCLE_COUNTER: asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val)); break; case XSCALE_COUNTER0: asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val)); break; case XSCALE_COUNTER1: asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val)); break; case XSCALE_COUNTER2: asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val)); break; case XSCALE_COUNTER3: asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val)); break; } return val; } static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val) { struct hw_perf_event *hwc = &event->hw; int counter = hwc->idx; switch (counter) { case XSCALE_CYCLE_COUNTER: asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); break; case XSCALE_COUNTER0: asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val)); break; case XSCALE_COUNTER1: asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val)); break; case XSCALE_COUNTER2: asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val)); break; case XSCALE_COUNTER3: asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val)); break; } } static int xscale2pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->name = "armv5_xscale2"; cpu_pmu->handle_irq = xscale2pmu_handle_irq; cpu_pmu->enable = xscale2pmu_enable_event; cpu_pmu->disable = xscale2pmu_disable_event; cpu_pmu->read_counter = xscale2pmu_read_counter; cpu_pmu->write_counter = xscale2pmu_write_counter; cpu_pmu->get_event_idx = xscale2pmu_get_event_idx; cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx; cpu_pmu->start = xscale2pmu_start; cpu_pmu->stop = xscale2pmu_stop; cpu_pmu->map_event = xscale_map_event; cpu_pmu->num_events = 5; return 0; } static const struct pmu_probe_info xscale_pmu_probe_table[] = { XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init), XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init), { /* sentinel value */ } }; static int xscale_pmu_device_probe(struct platform_device *pdev) { return arm_pmu_device_probe(pdev, NULL, xscale_pmu_probe_table); } static struct platform_driver xscale_pmu_driver = { .driver = { .name = "xscale-pmu", }, .probe = xscale_pmu_device_probe, }; builtin_platform_driver(xscale_pmu_driver); #endif /* CONFIG_CPU_XSCALE */
linux-master
arch/arm/kernel/perf_event_xscale.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/swp_emulate.c * * Copyright (C) 2009 ARM Limited * __user_* functions adapted from include/asm/uaccess.h * * Implements emulation of the SWP/SWPB instructions using load-exclusive and * store-exclusive for processors that have them disabled (or future ones that * might not implement them). * * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>] * Where: Rt = destination * Rt2 = source * Rn = address */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/syscalls.h> #include <linux/perf_event.h> #include <asm/opcodes.h> #include <asm/system_info.h> #include <asm/traps.h> #include <linux/uaccess.h> /* * Error-checking SWP macros implemented using ldrex{b}/strex{b} */ #define __user_swpX_asm(data, addr, res, temp, B) \ __asm__ __volatile__( \ ".arch armv7-a\n" \ "0: ldrex"B" %2, [%3]\n" \ "1: strex"B" %0, %1, [%3]\n" \ " cmp %0, #0\n" \ " moveq %1, %2\n" \ " movne %0, %4\n" \ "2:\n" \ " .section .text.fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %5\n" \ " b 2b\n" \ " .previous\n" \ " .section __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 0b, 3b\n" \ " .long 1b, 3b\n" \ " .previous" \ : "=&r" (res), "+r" (data), "=&r" (temp) \ : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ : "cc", "memory") #define __user_swp_asm(data, addr, res, temp) \ __user_swpX_asm(data, addr, res, temp, "") #define __user_swpb_asm(data, addr, res, temp) \ __user_swpX_asm(data, addr, res, temp, "b") /* * Macros/defines for extracting register numbers from instruction. */ #define EXTRACT_REG_NUM(instruction, offset) \ (((instruction) & (0xf << (offset))) >> (offset)) #define RN_OFFSET 16 #define RT_OFFSET 12 #define RT2_OFFSET 0 /* * Bit 22 of the instruction encoding distinguishes between * the SWP and SWPB variants (bit set means SWPB). */ #define TYPE_SWPB (1 << 22) static unsigned long swpcounter; static unsigned long swpbcounter; static unsigned long abtcounter; static pid_t previous_pid; #ifdef CONFIG_PROC_FS static int proc_status_show(struct seq_file *m, void *v) { seq_printf(m, "Emulated SWP:\t\t%lu\n", swpcounter); seq_printf(m, "Emulated SWPB:\t\t%lu\n", swpbcounter); seq_printf(m, "Aborted SWP{B}:\t\t%lu\n", abtcounter); if (previous_pid != 0) seq_printf(m, "Last process:\t\t%d\n", previous_pid); return 0; } #endif /* * Set up process info to signal segmentation fault - called on access error. */ static void set_segfault(struct pt_regs *regs, unsigned long addr) { int si_code; mmap_read_lock(current->mm); if (find_vma(current->mm, addr) == NULL) si_code = SEGV_MAPERR; else si_code = SEGV_ACCERR; mmap_read_unlock(current->mm); pr_debug("SWP{B} emulation: access caused memory abort!\n"); arm_notify_die("Illegal memory access", regs, SIGSEGV, si_code, (void __user *)instruction_pointer(regs), 0, 0); abtcounter++; } static int emulate_swpX(unsigned int address, unsigned int *data, unsigned int type) { unsigned int res = 0; if ((type != TYPE_SWPB) && (address & 0x3)) { /* SWP to unaligned address not permitted */ pr_debug("SWP instruction on unaligned pointer!\n"); return -EFAULT; } while (1) { unsigned long temp; unsigned int __ua_flags; __ua_flags = uaccess_save_and_enable(); if (type == TYPE_SWPB) __user_swpb_asm(*data, address, res, temp); else __user_swp_asm(*data, address, res, temp); uaccess_restore(__ua_flags); if (likely(res != -EAGAIN) || signal_pending(current)) break; cond_resched(); } if (res == 0) { if (type == TYPE_SWPB) swpbcounter++; else swpcounter++; } return res; } /* * swp_handler logs the id of calling process, dissects the instruction, sanity * checks the memory location, calls emulate_swpX for the actual operation and * deals with fixup/error handling before returning */ static int swp_handler(struct pt_regs *regs, unsigned int instr) { unsigned int address, destreg, data, type; unsigned int res = 0; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc); res = arm_check_condition(instr, regs->ARM_cpsr); switch (res) { case ARM_OPCODE_CONDTEST_PASS: break; case ARM_OPCODE_CONDTEST_FAIL: /* Condition failed - return to next instruction */ regs->ARM_pc += 4; return 0; case ARM_OPCODE_CONDTEST_UNCOND: /* If unconditional encoding - not a SWP, undef */ return -EFAULT; default: return -EINVAL; } if (current->pid != previous_pid) { pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", current->comm, (unsigned long)current->pid); previous_pid = current->pid; } address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)]; data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)]; destreg = EXTRACT_REG_NUM(instr, RT_OFFSET); type = instr & TYPE_SWPB; pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n", EXTRACT_REG_NUM(instr, RN_OFFSET), address, destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data); /* Check access in reasonable access range for both SWP and SWPB */ if (!access_ok((void __user *)(address & ~3), 4)) { pr_debug("SWP{B} emulation: access to %p not allowed!\n", (void *)address); res = -EFAULT; } else { res = emulate_swpX(address, &data, type); } if (res == 0) { /* * On successful emulation, revert the adjustment to the PC * made in kernel/traps.c in order to resume execution at the * instruction following the SWP{B}. */ regs->ARM_pc += 4; regs->uregs[destreg] = data; } else if (res == -EFAULT) { /* * Memory errors do not mean emulation failed. * Set up signal info to return SEGV, then return OK */ set_segfault(regs, address); } return 0; } /* * Only emulate SWP/SWPB executed in ARM state/User mode. * The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE. */ static struct undef_hook swp_hook = { .instr_mask = 0x0fb00ff0, .instr_val = 0x01000090, .cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT, .cpsr_val = USR_MODE, .fn = swp_handler }; /* * Register handler and create status file in /proc/cpu * Invoked as late_initcall, since not needed before init spawned. */ static int __init swp_emulation_init(void) { if (cpu_architecture() < CPU_ARCH_ARMv7) return 0; #ifdef CONFIG_PROC_FS if (!proc_create_single("cpu/swp_emulation", S_IRUGO, NULL, proc_status_show)) return -ENOMEM; #endif /* CONFIG_PROC_FS */ pr_notice("Registering SWP/SWPB emulation handler\n"); register_undef_hook(&swp_hook); return 0; } late_initcall(swp_emulation_init);
linux-master
arch/arm/kernel/swp_emulate.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/time.c * * Copyright (C) 1991, 1992, 1995 Linus Torvalds * Modifications for ARM (C) 1994-2001 Russell King * * This file contains the ARM-specific time handling details: * reading the RTC at bootup, etc... */ #include <linux/clockchips.h> #include <linux/clocksource.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/of_clk.h> #include <linux/profile.h> #include <linux/sched.h> #include <linux/sched_clock.h> #include <linux/smp.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/timer.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/stacktrace.h> #include <asm/thread_info.h> #if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || \ defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) /* this needs a better home */ DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); #endif /* pc-style 'CMOS' RTC support */ /* change this if you have some constant time drift */ #define USECS_PER_JIFFY (1000000/HZ) #ifdef CONFIG_SMP unsigned long profile_pc(struct pt_regs *regs) { struct stackframe frame; if (!in_lock_functions(regs->ARM_pc)) return regs->ARM_pc; arm_get_current_stackframe(regs, &frame); do { int ret = unwind_frame(&frame); if (ret < 0) return 0; } while (in_lock_functions(frame.pc)); return frame.pc; } EXPORT_SYMBOL(profile_pc); #endif static void dummy_clock_access(struct timespec64 *ts) { ts->tv_sec = 0; ts->tv_nsec = 0; } static clock_access_fn __read_persistent_clock = dummy_clock_access; void read_persistent_clock64(struct timespec64 *ts) { __read_persistent_clock(ts); } int __init register_persistent_clock(clock_access_fn read_persistent) { /* Only allow the clockaccess functions to be registered once */ if (__read_persistent_clock == dummy_clock_access) { if (read_persistent) __read_persistent_clock = read_persistent; return 0; } return -EINVAL; } void __init time_init(void) { if (machine_desc->init_time) { machine_desc->init_time(); } else { #ifdef CONFIG_COMMON_CLK of_clk_init(NULL); #endif timer_probe(); tick_setup_hrtimer_broadcast(); } }
linux-master
arch/arm/kernel/time.c
// SPDX-License-Identifier: GPL-2.0 /* * machine_kexec.c - handle transition of Linux booting another kernel */ #include <linux/mm.h> #include <linux/kexec.h> #include <linux/delay.h> #include <linux/reboot.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/memblock.h> #include <linux/of_fdt.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/kexec-internal.h> #include <asm/fncpy.h> #include <asm/mach-types.h> #include <asm/smp_plat.h> #include <asm/system_misc.h> #include <asm/set_memory.h> extern void relocate_new_kernel(void); extern const unsigned int relocate_new_kernel_size; static atomic_t waiting_for_crash_ipi; /* * Provide a dummy crash_notes definition while crash dump arrives to arm. * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. */ int machine_kexec_prepare(struct kimage *image) { struct kexec_segment *current_segment; __be32 header; int i, err; image->arch.kernel_r2 = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; /* * Validate that if the current HW supports SMP, then the SW supports * and implements CPU hotplug for the current HW. If not, we won't be * able to kexec reliably, so fail the prepare operation. */ if (num_possible_cpus() > 1 && platform_can_secondary_boot() && !platform_can_cpu_hotplug()) return -EINVAL; /* * No segment at default ATAGs address. try to locate * a dtb using magic. */ for (i = 0; i < image->nr_segments; i++) { current_segment = &image->segment[i]; if (!memblock_is_region_memory(idmap_to_phys(current_segment->mem), current_segment->memsz)) return -EINVAL; err = get_user(header, (__be32*)current_segment->buf); if (err) return err; if (header == cpu_to_be32(OF_DT_HEADER)) image->arch.kernel_r2 = current_segment->mem; } return 0; } void machine_kexec_cleanup(struct kimage *image) { } static void machine_crash_nonpanic_core(void *unused) { struct pt_regs regs; local_fiq_disable(); crash_setup_regs(&regs, get_irq_regs()); printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n", smp_processor_id()); crash_save_cpu(&regs, smp_processor_id()); flush_cache_all(); set_cpu_online(smp_processor_id(), false); atomic_dec(&waiting_for_crash_ipi); while (1) { cpu_relax(); wfe(); } } static DEFINE_PER_CPU(call_single_data_t, cpu_stop_csd) = CSD_INIT(machine_crash_nonpanic_core, NULL); void crash_smp_send_stop(void) { static int cpus_stopped; unsigned long msecs; call_single_data_t *csd; int cpu, this_cpu = raw_smp_processor_id(); if (cpus_stopped) return; atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); for_each_online_cpu(cpu) { if (cpu == this_cpu) continue; csd = &per_cpu(cpu_stop_csd, cpu); smp_call_function_single_async(cpu, csd); } msecs = 1000; /* Wait at most a second for the other cpus to stop */ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { mdelay(1); msecs--; } if (atomic_read(&waiting_for_crash_ipi) > 0) pr_warn("Non-crashing CPUs did not react to IPI\n"); cpus_stopped = 1; } static void machine_kexec_mask_interrupts(void) { unsigned int i; struct irq_desc *desc; for_each_irq_desc(i, desc) { struct irq_chip *chip; chip = irq_desc_get_chip(desc); if (!chip) continue; if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) chip->irq_eoi(&desc->irq_data); if (chip->irq_mask) chip->irq_mask(&desc->irq_data); if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) chip->irq_disable(&desc->irq_data); } } void machine_crash_shutdown(struct pt_regs *regs) { local_irq_disable(); crash_smp_send_stop(); crash_save_cpu(regs, smp_processor_id()); machine_kexec_mask_interrupts(); pr_info("Loading crashdump kernel...\n"); } void machine_kexec(struct kimage *image) { unsigned long page_list, reboot_entry_phys; struct kexec_relocate_data *data; void (*reboot_entry)(void); void *reboot_code_buffer; /* * This can only happen if machine_shutdown() failed to disable some * CPU, and that can only happen if the checks in * machine_kexec_prepare() were not correct. If this fails, we can't * reliably kexec anyway, so BUG_ON is appropriate. */ BUG_ON(num_online_cpus() > 1); page_list = image->head & PAGE_MASK; reboot_code_buffer = page_address(image->control_code_page); /* copy our kernel relocation code to the control code page */ reboot_entry = fncpy(reboot_code_buffer, &relocate_new_kernel, relocate_new_kernel_size); data = reboot_code_buffer + relocate_new_kernel_size; data->kexec_start_address = image->start; data->kexec_indirection_page = page_list; data->kexec_mach_type = machine_arch_type; data->kexec_r2 = image->arch.kernel_r2; /* get the identity mapping physical address for the reboot code */ reboot_entry_phys = virt_to_idmap(reboot_entry); pr_info("Bye!\n"); soft_restart(reboot_entry_phys); } void arch_crash_save_vmcoreinfo(void) { #ifdef CONFIG_ARM_LPAE VMCOREINFO_CONFIG(ARM_LPAE); #endif }
linux-master
arch/arm/kernel/machine_kexec.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/jump_label.h> #include <asm/patch.h> #include <asm/insn.h> static void __arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type, bool is_static) { void *addr = (void *)entry->code; unsigned int insn; if (type == JUMP_LABEL_JMP) insn = arm_gen_branch(entry->code, entry->target); else insn = arm_gen_nop(); if (is_static) __patch_text_early(addr, insn); else patch_text(addr, insn); } void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { __arch_jump_label_transform(entry, type, false); }
linux-master
arch/arm/kernel/jump_label.c
// SPDX-License-Identifier: GPL-2.0 /* * ARM callchain support * * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles * Copyright (C) 2010 ARM Ltd., Will Deacon <[email protected]> * * This code is based on the ARM OProfile backtrace code. */ #include <linux/perf_event.h> #include <linux/uaccess.h> #include <asm/stacktrace.h> /* * The registers we're interested in are at the end of the variable * length saved register structure. The fp points at the end of this * structure so the address of this struct is: * (struct frame_tail *)(xxx->fp)-1 * * This code has been adapted from the ARM OProfile support. */ struct frame_tail { struct frame_tail __user *fp; unsigned long sp; unsigned long lr; } __attribute__((packed)); /* * Get the return address for a single stackframe and return a pointer to the * next frame tail. */ static struct frame_tail __user * user_backtrace(struct frame_tail __user *tail, struct perf_callchain_entry_ctx *entry) { struct frame_tail buftail; unsigned long err; if (!access_ok(tail, sizeof(buftail))) return NULL; pagefault_disable(); err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); pagefault_enable(); if (err) return NULL; perf_callchain_store(entry, buftail.lr); /* * Frame pointers should strictly progress back up the stack * (towards higher addresses). */ if (tail + 1 >= buftail.fp) return NULL; return buftail.fp - 1; } void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { struct frame_tail __user *tail; perf_callchain_store(entry, regs->ARM_pc); if (!current->mm) return; tail = (struct frame_tail __user *)regs->ARM_fp - 1; while ((entry->nr < entry->max_stack) && tail && !((unsigned long)tail & 0x3)) tail = user_backtrace(tail, entry); } /* * Gets called by walk_stackframe() for every stackframe. This will be called * whist unwinding the stackframe and is like a subroutine return so we use * the PC. */ static bool callchain_trace(void *data, unsigned long pc) { struct perf_callchain_entry_ctx *entry = data; perf_callchain_store(entry, pc); return true; } void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { struct stackframe fr; arm_get_current_stackframe(regs, &fr); walk_stackframe(&fr, callchain_trace, entry); } unsigned long perf_instruction_pointer(struct pt_regs *regs) { return instruction_pointer(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) { int misc = 0; if (user_mode(regs)) misc |= PERF_RECORD_MISC_USER; else misc |= PERF_RECORD_MISC_KERNEL; return misc; }
linux-master
arch/arm/kernel/perf_callchain.c
// SPDX-License-Identifier: GPL-2.0-only /* * Tag parsing. * * Copyright (C) 1995-2001 Russell King */ /* * This is the traditional way of passing data to the kernel at boot time. Rather * than passing a fixed inflexible structure to the kernel, we pass a list * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE * tag for the list to be recognised (to distinguish the tagged list from * a param_struct). The list is terminated with a zero-length tag (this tag * is not parsed in any way). */ #include <linux/init.h> #include <linux/initrd.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/root_dev.h> #include <linux/screen_info.h> #include <linux/memblock.h> #include <uapi/linux/mount.h> #include <asm/setup.h> #include <asm/system_info.h> #include <asm/page.h> #include <asm/mach/arch.h> #include "atags.h" static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; #ifndef MEM_SIZE #define MEM_SIZE (16*1024*1024) #endif static struct { struct tag_header hdr1; struct tag_core core; struct tag_header hdr2; struct tag_mem32 mem; struct tag_header hdr3; } default_tags __initdata = { { tag_size(tag_core), ATAG_CORE }, { 1, PAGE_SIZE, 0xff }, { tag_size(tag_mem32), ATAG_MEM }, { MEM_SIZE }, { 0, ATAG_NONE } }; static int __init parse_tag_core(const struct tag *tag) { if (tag->hdr.size > 2) { if ((tag->u.core.flags & 1) == 0) root_mountflags &= ~MS_RDONLY; ROOT_DEV = old_decode_dev(tag->u.core.rootdev); } return 0; } __tagtable(ATAG_CORE, parse_tag_core); static int __init parse_tag_mem32(const struct tag *tag) { return arm_add_memory(tag->u.mem.start, tag->u.mem.size); } __tagtable(ATAG_MEM, parse_tag_mem32); #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) static int __init parse_tag_videotext(const struct tag *tag) { screen_info.orig_x = tag->u.videotext.x; screen_info.orig_y = tag->u.videotext.y; screen_info.orig_video_page = tag->u.videotext.video_page; screen_info.orig_video_mode = tag->u.videotext.video_mode; screen_info.orig_video_cols = tag->u.videotext.video_cols; screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; screen_info.orig_video_lines = tag->u.videotext.video_lines; screen_info.orig_video_isVGA = tag->u.videotext.video_isvga; screen_info.orig_video_points = tag->u.videotext.video_points; return 0; } __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); #endif #ifdef CONFIG_BLK_DEV_RAM static int __init parse_tag_ramdisk(const struct tag *tag) { rd_image_start = tag->u.ramdisk.start; if (tag->u.ramdisk.size) rd_size = tag->u.ramdisk.size; return 0; } __tagtable(ATAG_RAMDISK, parse_tag_ramdisk); #endif static int __init parse_tag_serialnr(const struct tag *tag) { system_serial_low = tag->u.serialnr.low; system_serial_high = tag->u.serialnr.high; return 0; } __tagtable(ATAG_SERIAL, parse_tag_serialnr); static int __init parse_tag_revision(const struct tag *tag) { system_rev = tag->u.revision.rev; return 0; } __tagtable(ATAG_REVISION, parse_tag_revision); static int __init parse_tag_cmdline(const struct tag *tag) { #if defined(CONFIG_CMDLINE_EXTEND) strlcat(default_command_line, " ", COMMAND_LINE_SIZE); strlcat(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); #elif defined(CONFIG_CMDLINE_FORCE) pr_warn("Ignoring tag cmdline (using the default kernel command line)\n"); #else strscpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); #endif return 0; } __tagtable(ATAG_CMDLINE, parse_tag_cmdline); /* * Scan the tag table for this tag, and call its parse function. * The tag table is built by the linker from all the __tagtable * declarations. */ static int __init parse_tag(const struct tag *tag) { extern struct tagtable __tagtable_begin, __tagtable_end; struct tagtable *t; for (t = &__tagtable_begin; t < &__tagtable_end; t++) if (tag->hdr.tag == t->tag) { t->parse(tag); break; } return t < &__tagtable_end; } /* * Parse all tags in the list, checking both the global and architecture * specific tag tables. */ static void __init parse_tags(const struct tag *t) { for (; t->hdr.size; t = tag_next(t)) if (!parse_tag(t)) pr_warn("Ignoring unrecognised tag 0x%08x\n", t->hdr.tag); } static void __init squash_mem_tags(struct tag *tag) { for (; tag->hdr.size; tag = tag_next(tag)) if (tag->hdr.tag == ATAG_MEM) tag->hdr.tag = ATAG_NONE; } const struct machine_desc * __init setup_machine_tags(void *atags_vaddr, unsigned int machine_nr) { struct tag *tags = (struct tag *)&default_tags; const struct machine_desc *mdesc = NULL, *p; char *from = default_command_line; default_tags.mem.start = PHYS_OFFSET; /* * locate machine in the list of supported machines. */ for_each_machine_desc(p) if (machine_nr == p->nr) { pr_info("Machine: %s\n", p->name); mdesc = p; break; } if (!mdesc) return NULL; if (atags_vaddr) tags = atags_vaddr; else if (mdesc->atag_offset) tags = (void *)(PAGE_OFFSET + mdesc->atag_offset); #if defined(CONFIG_DEPRECATED_PARAM_STRUCT) /* * If we have the old style parameters, convert them to * a tag list. */ if (tags->hdr.tag != ATAG_CORE) convert_to_tag_list(tags); #endif if (tags->hdr.tag != ATAG_CORE) { early_print("Warning: Neither atags nor dtb found\n"); tags = (struct tag *)&default_tags; } if (mdesc->fixup) mdesc->fixup(tags, &from); if (tags->hdr.tag == ATAG_CORE) { if (memblock_phys_mem_size()) squash_mem_tags(tags); save_atags(tags); parse_tags(tags); } /* parse_early_param needs a boot_command_line */ strscpy(boot_command_line, from, COMMAND_LINE_SIZE); return mdesc; }
linux-master
arch/arm/kernel/atags_parse.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/export.h> #include <linux/kprobes.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/stacktrace.h> #include <asm/sections.h> #include <asm/stacktrace.h> #include <asm/traps.h> #include "reboot.h" #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) /* * Unwind the current stack frame and store the new register values in the * structure passed as argument. Unwinding is equivalent to a function return, * hence the new PC value rather than LR should be used for backtrace. * * With framepointer enabled, a simple function prologue looks like this: * mov ip, sp * stmdb sp!, {fp, ip, lr, pc} * sub fp, ip, #4 * * A simple function epilogue looks like this: * ldm sp, {fp, sp, pc} * * When compiled with clang, pc and sp are not pushed. A simple function * prologue looks like this when built with clang: * * stmdb {..., fp, lr} * add fp, sp, #x * sub sp, sp, #y * * A simple function epilogue looks like this when built with clang: * * sub sp, fp, #x * ldm {..., fp, pc} * * * Note that with framepointer enabled, even the leaf functions have the same * prologue and epilogue, therefore we can ignore the LR value in this case. */ extern unsigned long call_with_stack_end; static int frame_pointer_check(struct stackframe *frame) { unsigned long high, low; unsigned long fp = frame->fp; unsigned long pc = frame->pc; /* * call_with_stack() is the only place we allow SP to jump from one * stack to another, with FP and SP pointing to different stacks, * skipping the FP boundary check at this point. */ if (pc >= (unsigned long)&call_with_stack && pc < (unsigned long)&call_with_stack_end) return 0; /* only go to a higher address on the stack */ low = frame->sp; high = ALIGN(low, THREAD_SIZE); /* check current frame pointer is within bounds */ #ifdef CONFIG_CC_IS_CLANG if (fp < low + 4 || fp > high - 4) return -EINVAL; #else if (fp < low + 12 || fp > high - 4) return -EINVAL; #endif return 0; } int notrace unwind_frame(struct stackframe *frame) { unsigned long fp = frame->fp; if (frame_pointer_check(frame)) return -EINVAL; /* * When we unwind through an exception stack, include the saved PC * value into the stack trace. */ if (frame->ex_frame) { struct pt_regs *regs = (struct pt_regs *)frame->sp; /* * We check that 'regs + sizeof(struct pt_regs)' (that is, * &regs[1]) does not exceed the bottom of the stack to avoid * accessing data outside the task's stack. This may happen * when frame->ex_frame is a false positive. */ if ((unsigned long)&regs[1] > ALIGN(frame->sp, THREAD_SIZE)) return -EINVAL; frame->pc = regs->ARM_pc; frame->ex_frame = false; return 0; } /* restore the registers from the stack frame */ #ifdef CONFIG_CC_IS_CLANG frame->sp = frame->fp; frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4)); #else frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12)); frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8)); frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4)); #endif #ifdef CONFIG_KRETPROBES if (is_kretprobe_trampoline(frame->pc)) frame->pc = kretprobe_find_ret_addr(frame->tsk, (void *)frame->fp, &frame->kr_cur); #endif if (in_entry_text(frame->pc)) frame->ex_frame = true; return 0; } #endif void notrace walk_stackframe(struct stackframe *frame, bool (*fn)(void *, unsigned long), void *data) { while (1) { int ret; if (!fn(data, frame->pc)) break; ret = unwind_frame(frame); if (ret < 0) break; } } EXPORT_SYMBOL(walk_stackframe); #ifdef CONFIG_STACKTRACE static void start_stack_trace(struct stackframe *frame, struct task_struct *task, unsigned long fp, unsigned long sp, unsigned long lr, unsigned long pc) { frame->fp = fp; frame->sp = sp; frame->lr = lr; frame->pc = pc; #ifdef CONFIG_KRETPROBES frame->kr_cur = NULL; frame->tsk = task; #endif #ifdef CONFIG_UNWINDER_FRAME_POINTER frame->ex_frame = in_entry_text(frame->pc); #endif } void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) { struct stackframe frame; if (regs) { start_stack_trace(&frame, NULL, regs->ARM_fp, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc); } else if (task != current) { #ifdef CONFIG_SMP /* * What guarantees do we have here that 'tsk' is not * running on another CPU? For now, ignore it as we * can't guarantee we won't explode. */ return; #else start_stack_trace(&frame, task, thread_saved_fp(task), thread_saved_sp(task), 0, thread_saved_pc(task)); #endif } else { here: start_stack_trace(&frame, task, (unsigned long)__builtin_frame_address(0), current_stack_pointer, (unsigned long)__builtin_return_address(0), (unsigned long)&&here); /* skip this function */ if (unwind_frame(&frame)) return; } walk_stackframe(&frame, consume_entry, cookie); } #endif
linux-master
arch/arm/kernel/stacktrace.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * linux/arch/arm/kernel/isa.c * * Copyright (C) 1999 Phil Blundell * * ISA shared memory and I/O port support, and is required to support * iopl, inb, outb and friends in userspace via glibc emulation. */ #include <linux/stddef.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/sysctl.h> #include <linux/init.h> #include <linux/io.h> static unsigned int isa_membase, isa_portbase, isa_portshift; static struct ctl_table ctl_isa_vars[4] = { { .procname = "membase", .data = &isa_membase, .maxlen = sizeof(isa_membase), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "portbase", .data = &isa_portbase, .maxlen = sizeof(isa_portbase), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "portshift", .data = &isa_portshift, .maxlen = sizeof(isa_portshift), .mode = 0444, .proc_handler = proc_dointvec, }, {} }; static struct ctl_table_header *isa_sysctl_header; void __init register_isa_ports(unsigned int membase, unsigned int portbase, unsigned int portshift) { isa_membase = membase; isa_portbase = portbase; isa_portshift = portshift; isa_sysctl_header = register_sysctl("bus/isa", ctl_isa_vars); }
linux-master
arch/arm/kernel/isa.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/slab.h> #include <linux/proc_fs.h> #include <asm/setup.h> #include <asm/types.h> #include <asm/page.h> struct buffer { size_t size; char data[]; }; static ssize_t atags_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct buffer *b = pde_data(file_inode(file)); return simple_read_from_buffer(buf, count, ppos, b->data, b->size); } static const struct proc_ops atags_proc_ops = { .proc_read = atags_read, .proc_lseek = default_llseek, }; #define BOOT_PARAMS_SIZE 1536 static char __initdata atags_copy[BOOT_PARAMS_SIZE]; void __init save_atags(const struct tag *tags) { memcpy(atags_copy, tags, sizeof(atags_copy)); } static int __init init_atags_procfs(void) { /* * This cannot go into save_atags() because kmalloc and proc don't work * yet when it is called. */ struct proc_dir_entry *tags_entry; struct tag *tag = (struct tag *)atags_copy; struct buffer *b; size_t size; if (tag->hdr.tag != ATAG_CORE) { pr_info("No ATAGs?\n"); return -EINVAL; } for (; tag->hdr.size; tag = tag_next(tag)) ; /* include the terminating ATAG_NONE */ size = (char *)tag - atags_copy + sizeof(struct tag_header); WARN_ON(tag->hdr.tag != ATAG_NONE); b = kmalloc(sizeof(*b) + size, GFP_KERNEL); if (!b) goto nomem; b->size = size; memcpy(b->data, atags_copy, size); tags_entry = proc_create_data("atags", 0400, NULL, &atags_proc_ops, b); if (!tags_entry) goto nomem; return 0; nomem: kfree(b); pr_err("Exporting ATAGs: not enough memory\n"); return -ENOMEM; } arch_initcall(init_atags_procfs);
linux-master
arch/arm/kernel/atags_proc.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/bpf.h> #include <linux/cpu.h> #include <linux/device.h> #include <asm/spectre.h> static bool _unprivileged_ebpf_enabled(void) { #ifdef CONFIG_BPF_SYSCALL return !sysctl_unprivileged_bpf_disabled; #else return false; #endif } ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "Mitigation: __user pointer sanitization\n"); } static unsigned int spectre_v2_state; static unsigned int spectre_v2_methods; void spectre_v2_update_state(unsigned int state, unsigned int method) { if (state > spectre_v2_state) spectre_v2_state = state; spectre_v2_methods |= method; } ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) { const char *method; if (spectre_v2_state == SPECTRE_UNAFFECTED) return sprintf(buf, "%s\n", "Not affected"); if (spectre_v2_state != SPECTRE_MITIGATED) return sprintf(buf, "%s\n", "Vulnerable"); if (_unprivileged_ebpf_enabled()) return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); switch (spectre_v2_methods) { case SPECTRE_V2_METHOD_BPIALL: method = "Branch predictor hardening"; break; case SPECTRE_V2_METHOD_ICIALLU: method = "I-cache invalidation"; break; case SPECTRE_V2_METHOD_SMC: case SPECTRE_V2_METHOD_HVC: method = "Firmware call"; break; case SPECTRE_V2_METHOD_LOOP8: method = "History overwrite"; break; default: method = "Multiple mitigations"; break; } return sprintf(buf, "Mitigation: %s\n", method); }
linux-master
arch/arm/kernel/spectre.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/xscale-cp0.c * * XScale DSP and iWMMXt coprocessor context switching and handling */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/io.h> #include <asm/thread_notify.h> #include <asm/cputype.h> asm(" .arch armv5te\n"); static inline void dsp_save_state(u32 *state) { __asm__ __volatile__ ( "mrrc p0, 0, %0, %1, c0\n" : "=r" (state[0]), "=r" (state[1])); } static inline void dsp_load_state(u32 *state) { __asm__ __volatile__ ( "mcrr p0, 0, %0, %1, c0\n" : : "r" (state[0]), "r" (state[1])); } static int dsp_do(struct notifier_block *self, unsigned long cmd, void *t) { struct thread_info *thread = t; switch (cmd) { case THREAD_NOTIFY_FLUSH: thread->cpu_context.extra[0] = 0; thread->cpu_context.extra[1] = 0; break; case THREAD_NOTIFY_SWITCH: dsp_save_state(current_thread_info()->cpu_context.extra); dsp_load_state(thread->cpu_context.extra); break; } return NOTIFY_DONE; } static struct notifier_block dsp_notifier_block = { .notifier_call = dsp_do, }; #ifdef CONFIG_IWMMXT static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t) { struct thread_info *thread = t; switch (cmd) { case THREAD_NOTIFY_FLUSH: /* * flush_thread() zeroes thread->fpstate, so no need * to do anything here. * * FALLTHROUGH: Ensure we don't try to overwrite our newly * initialised state information on the first fault. */ case THREAD_NOTIFY_EXIT: iwmmxt_task_release(thread); break; case THREAD_NOTIFY_SWITCH: iwmmxt_task_switch(thread); break; } return NOTIFY_DONE; } static struct notifier_block iwmmxt_notifier_block = { .notifier_call = iwmmxt_do, }; #endif static u32 __init xscale_cp_access_read(void) { u32 value; __asm__ __volatile__ ( "mrc p15, 0, %0, c15, c1, 0\n\t" : "=r" (value)); return value; } static void __init xscale_cp_access_write(u32 value) { u32 temp; __asm__ __volatile__ ( "mcr p15, 0, %1, c15, c1, 0\n\t" "mrc p15, 0, %0, c15, c1, 0\n\t" "mov %0, %0\n\t" "sub pc, pc, #4\n\t" : "=r" (temp) : "r" (value)); } /* * Detect whether we have a MAC coprocessor (40 bit register) or an * iWMMXt coprocessor (64 bit registers) by loading 00000100:00000000 * into a coprocessor register and reading it back, and checking * whether the upper word survived intact. */ static int __init cpu_has_iwmmxt(void) { u32 lo; u32 hi; /* * This sequence is interpreted by the DSP coprocessor as: * mar acc0, %2, %3 * mra %0, %1, acc0 * * And by the iWMMXt coprocessor as: * tmcrr wR0, %2, %3 * tmrrc %0, %1, wR0 */ __asm__ __volatile__ ( "mcrr p0, 0, %2, %3, c0\n" "mrrc p0, 0, %0, %1, c0\n" : "=r" (lo), "=r" (hi) : "r" (0), "r" (0x100)); return !!hi; } /* * If we detect that the CPU has iWMMXt (and CONFIG_IWMMXT=y), we * disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy * switch code handle iWMMXt context switching. If on the other * hand the CPU has a DSP coprocessor, we keep access to CP0 enabled * all the time, and save/restore acc0 on context switch in non-lazy * fashion. */ static int __init xscale_cp0_init(void) { u32 cp_access; /* do not attempt to probe iwmmxt on non-xscale family CPUs */ if (!cpu_is_xscale_family()) return 0; cp_access = xscale_cp_access_read() & ~3; xscale_cp_access_write(cp_access | 1); if (cpu_has_iwmmxt()) { #ifndef CONFIG_IWMMXT pr_warn("CAUTION: XScale iWMMXt coprocessor detected, but kernel support is missing.\n"); #else pr_info("XScale iWMMXt coprocessor detected.\n"); elf_hwcap |= HWCAP_IWMMXT; thread_register_notifier(&iwmmxt_notifier_block); register_iwmmxt_undef_handler(); #endif } else { pr_info("XScale DSP coprocessor detected.\n"); thread_register_notifier(&dsp_notifier_block); cp_access |= 1; } xscale_cp_access_write(cp_access); return 0; } late_initcall(xscale_cp0_init);
linux-master
arch/arm/kernel/xscale-cp0.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/kernel/return_address.c * * Copyright (C) 2009 Uwe Kleine-Koenig <[email protected]> * for Pengutronix */ #include <linux/export.h> #include <linux/ftrace.h> #include <linux/sched.h> #include <asm/stacktrace.h> struct return_address_data { unsigned int level; void *addr; }; static bool save_return_addr(void *d, unsigned long pc) { struct return_address_data *data = d; if (!data->level) { data->addr = (void *)pc; return false; } else { --data->level; return true; } } void *return_address(unsigned int level) { struct return_address_data data; struct stackframe frame; data.level = level + 2; data.addr = NULL; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); here: frame.pc = (unsigned long)&&here; #ifdef CONFIG_KRETPROBES frame.kr_cur = NULL; frame.tsk = current; #endif frame.ex_frame = false; walk_stackframe(&frame, save_return_addr, &data); if (!data.level) return data.addr; else return NULL; } EXPORT_SYMBOL_GPL(return_address);
linux-master
arch/arm/kernel/return_address.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/signal.c * * Copyright (C) 1995-2009 Russell King */ #include <linux/errno.h> #include <linux/random.h> #include <linux/signal.h> #include <linux/personality.h> #include <linux/uaccess.h> #include <linux/resume_user_mode.h> #include <linux/uprobes.h> #include <linux/syscalls.h> #include <asm/elf.h> #include <asm/cacheflush.h> #include <asm/traps.h> #include <asm/unistd.h> #include <asm/vfp.h> #include <asm/syscalls.h> #include "signal.h" extern const unsigned long sigreturn_codes[17]; static unsigned long signal_return_offset; #ifdef CONFIG_IWMMXT static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame) { char kbuf[sizeof(*frame) + 8]; struct iwmmxt_sigframe *kframe; int err = 0; /* the iWMMXt context must be 64 bit aligned */ kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); if (test_thread_flag(TIF_USING_IWMMXT)) { kframe->magic = IWMMXT_MAGIC; kframe->size = IWMMXT_STORAGE_SIZE; iwmmxt_task_copy(current_thread_info(), &kframe->storage); } else { /* * For bug-compatibility with older kernels, some space * has to be reserved for iWMMXt even if it's not used. * Set the magic and size appropriately so that properly * written userspace can skip it reliably: */ *kframe = (struct iwmmxt_sigframe) { .magic = DUMMY_MAGIC, .size = IWMMXT_STORAGE_SIZE, }; } err = __copy_to_user(frame, kframe, sizeof(*kframe)); return err; } static int restore_iwmmxt_context(char __user **auxp) { struct iwmmxt_sigframe __user *frame = (struct iwmmxt_sigframe __user *)*auxp; char kbuf[sizeof(*frame) + 8]; struct iwmmxt_sigframe *kframe; /* the iWMMXt context must be 64 bit aligned */ kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7); if (__copy_from_user(kframe, frame, sizeof(*frame))) return -1; /* * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy * block is discarded for compatibility with setup_sigframe() if * present, but we don't mandate its presence. If some other * magic is here, it's not for us: */ if (!test_thread_flag(TIF_USING_IWMMXT) && kframe->magic != DUMMY_MAGIC) return 0; if (kframe->size != IWMMXT_STORAGE_SIZE) return -1; if (test_thread_flag(TIF_USING_IWMMXT)) { if (kframe->magic != IWMMXT_MAGIC) return -1; iwmmxt_task_restore(current_thread_info(), &kframe->storage); } *auxp += IWMMXT_STORAGE_SIZE; return 0; } #endif #ifdef CONFIG_VFP static int preserve_vfp_context(struct vfp_sigframe __user *frame) { struct vfp_sigframe kframe; int err = 0; memset(&kframe, 0, sizeof(kframe)); kframe.magic = VFP_MAGIC; kframe.size = VFP_STORAGE_SIZE; err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc); if (err) return err; return __copy_to_user(frame, &kframe, sizeof(kframe)); } static int restore_vfp_context(char __user **auxp) { struct vfp_sigframe frame; int err; err = __copy_from_user(&frame, *auxp, sizeof(frame)); if (err) return err; if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE) return -EINVAL; *auxp += sizeof(frame); return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc); } #endif /* * Do a signal return; undo the signal stack. These are aligned to 64-bit. */ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) { struct sigcontext context; char __user *aux; sigset_t set; int err; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); if (err == 0) set_current_blocked(&set); err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context)); if (err == 0) { regs->ARM_r0 = context.arm_r0; regs->ARM_r1 = context.arm_r1; regs->ARM_r2 = context.arm_r2; regs->ARM_r3 = context.arm_r3; regs->ARM_r4 = context.arm_r4; regs->ARM_r5 = context.arm_r5; regs->ARM_r6 = context.arm_r6; regs->ARM_r7 = context.arm_r7; regs->ARM_r8 = context.arm_r8; regs->ARM_r9 = context.arm_r9; regs->ARM_r10 = context.arm_r10; regs->ARM_fp = context.arm_fp; regs->ARM_ip = context.arm_ip; regs->ARM_sp = context.arm_sp; regs->ARM_lr = context.arm_lr; regs->ARM_pc = context.arm_pc; regs->ARM_cpsr = context.arm_cpsr; } err |= !valid_user_regs(regs); aux = (char __user *) sf->uc.uc_regspace; #ifdef CONFIG_IWMMXT if (err == 0) err |= restore_iwmmxt_context(&aux); #endif #ifdef CONFIG_VFP if (err == 0) err |= restore_vfp_context(&aux); #endif return err; } asmlinkage int sys_sigreturn(struct pt_regs *regs) { struct sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, * then 'sp' should be word aligned here. If it's * not, then the user is trying to mess with us. */ if (regs->ARM_sp & 7) goto badframe; frame = (struct sigframe __user *)regs->ARM_sp; if (!access_ok(frame, sizeof (*frame))) goto badframe; if (restore_sigframe(regs, frame)) goto badframe; return regs->ARM_r0; badframe: force_sig(SIGSEGV); return 0; } asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, * then 'sp' should be word aligned here. If it's * not, then the user is trying to mess with us. */ if (regs->ARM_sp & 7) goto badframe; frame = (struct rt_sigframe __user *)regs->ARM_sp; if (!access_ok(frame, sizeof (*frame))) goto badframe; if (restore_sigframe(regs, &frame->sig)) goto badframe; if (restore_altstack(&frame->sig.uc.uc_stack)) goto badframe; return regs->ARM_r0; badframe: force_sig(SIGSEGV); return 0; } static int setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { struct aux_sigframe __user *aux; struct sigcontext context; int err = 0; context = (struct sigcontext) { .arm_r0 = regs->ARM_r0, .arm_r1 = regs->ARM_r1, .arm_r2 = regs->ARM_r2, .arm_r3 = regs->ARM_r3, .arm_r4 = regs->ARM_r4, .arm_r5 = regs->ARM_r5, .arm_r6 = regs->ARM_r6, .arm_r7 = regs->ARM_r7, .arm_r8 = regs->ARM_r8, .arm_r9 = regs->ARM_r9, .arm_r10 = regs->ARM_r10, .arm_fp = regs->ARM_fp, .arm_ip = regs->ARM_ip, .arm_sp = regs->ARM_sp, .arm_lr = regs->ARM_lr, .arm_pc = regs->ARM_pc, .arm_cpsr = regs->ARM_cpsr, .trap_no = current->thread.trap_no, .error_code = current->thread.error_code, .fault_address = current->thread.address, .oldmask = set->sig[0], }; err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context)); err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; #ifdef CONFIG_IWMMXT if (err == 0) err |= preserve_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP if (err == 0) err |= preserve_vfp_context(&aux->vfp); #endif err |= __put_user(0, &aux->end_magic); return err; } static inline void __user * get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize) { unsigned long sp = sigsp(regs->ARM_sp, ksig); void __user *frame; /* * ATPCS B01 mandates 8-byte alignment */ frame = (void __user *)((sp - framesize) & ~7); /* * Check that we can actually write to the signal frame. */ if (!access_ok(frame, framesize)) frame = NULL; return frame; } static int setup_return(struct pt_regs *regs, struct ksignal *ksig, unsigned long __user *rc, void __user *frame) { unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler; unsigned long handler_fdpic_GOT = 0; unsigned long retcode; unsigned int idx, thumb = 0; unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && (current->personality & FDPIC_FUNCPTRS); if (fdpic) { unsigned long __user *fdpic_func_desc = (unsigned long __user *)handler; if (__get_user(handler, &fdpic_func_desc[0]) || __get_user(handler_fdpic_GOT, &fdpic_func_desc[1])) return 1; } cpsr |= PSR_ENDSTATE; /* * Maybe we need to deliver a 32-bit signal to a 26-bit task. */ if (ksig->ka.sa.sa_flags & SA_THIRTYTWO) cpsr = (cpsr & ~MODE_MASK) | USR_MODE; #ifdef CONFIG_ARM_THUMB if (elf_hwcap & HWCAP_THUMB) { /* * The LSB of the handler determines if we're going to * be using THUMB or ARM mode for this signal handler. */ thumb = handler & 1; /* * Clear the If-Then Thumb-2 execution state. ARM spec * requires this to be all 000s in ARM mode. Snapdragon * S4/Krait misbehaves on a Thumb=>ARM signal transition * without this. * * We must do this whenever we are running on a Thumb-2 * capable CPU, which includes ARMv6T2. However, we elect * to always do this to simplify the code; this field is * marked UNK/SBZP for older architectures. */ cpsr &= ~PSR_IT_MASK; if (thumb) { cpsr |= PSR_T_BIT; } else cpsr &= ~PSR_T_BIT; } #endif if (ksig->ka.sa.sa_flags & SA_RESTORER) { retcode = (unsigned long)ksig->ka.sa.sa_restorer; if (fdpic) { /* * We need code to load the function descriptor. * That code follows the standard sigreturn code * (6 words), and is made of 3 + 2 words for each * variant. The 4th copied word is the actual FD * address that the assembly code expects. */ idx = 6 + thumb * 3; if (ksig->ka.sa.sa_flags & SA_SIGINFO) idx += 5; if (__put_user(sigreturn_codes[idx], rc ) || __put_user(sigreturn_codes[idx+1], rc+1) || __put_user(sigreturn_codes[idx+2], rc+2) || __put_user(retcode, rc+3)) return 1; goto rc_finish; } } else { idx = thumb << 1; if (ksig->ka.sa.sa_flags & SA_SIGINFO) idx += 3; /* * Put the sigreturn code on the stack no matter which return * mechanism we use in order to remain ABI compliant */ if (__put_user(sigreturn_codes[idx], rc) || __put_user(sigreturn_codes[idx+1], rc+1)) return 1; rc_finish: #ifdef CONFIG_MMU if (cpsr & MODE32_BIT) { struct mm_struct *mm = current->mm; /* * 32-bit code can use the signal return page * except when the MPU has protected the vectors * page from PL0 */ retcode = mm->context.sigpage + signal_return_offset + (idx << 2) + thumb; } else #endif { /* * Ensure that the instruction cache sees * the return code written onto the stack. */ flush_icache_range((unsigned long)rc, (unsigned long)(rc + 3)); retcode = ((unsigned long)rc) + thumb; } } regs->ARM_r0 = ksig->sig; regs->ARM_sp = (unsigned long)frame; regs->ARM_lr = retcode; regs->ARM_pc = handler; if (fdpic) regs->ARM_r9 = handler_fdpic_GOT; regs->ARM_cpsr = cpsr; return 0; } static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame)); int err = 0; if (!frame) return 1; /* * Set uc.uc_flags to a value which sc.trap_no would never have. */ err = __put_user(0x5ac3c35a, &frame->uc.uc_flags); err |= setup_sigframe(frame, regs, set); if (err == 0) err = setup_return(regs, ksig, frame->retcode, frame); return err; } static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame)); int err = 0; if (!frame) return 1; err |= copy_siginfo_to_user(&frame->info, &ksig->info); err |= __put_user(0, &frame->sig.uc.uc_flags); err |= __put_user(NULL, &frame->sig.uc.uc_link); err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp); err |= setup_sigframe(&frame->sig, regs, set); if (err == 0) err = setup_return(regs, ksig, frame->sig.retcode, frame); if (err == 0) { /* * For realtime signals we must also set the second and third * arguments for the signal handler. * -- Peter Maydell <[email protected]> 2000-12-06 */ regs->ARM_r1 = (unsigned long)&frame->info; regs->ARM_r2 = (unsigned long)&frame->sig.uc; } return err; } /* * OK, we're invoking a handler */ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); int ret; /* * Perform fixup for the pre-signal frame. */ rseq_signal_deliver(ksig, regs); /* * Set up the stack frame */ if (ksig->ka.sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(ksig, oldset, regs); else ret = setup_frame(ksig, oldset, regs); /* * Check that the resulting registers are actually sane. */ ret |= !valid_user_regs(regs); signal_setup_done(ret, ksig, 0); } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ static int do_signal(struct pt_regs *regs, int syscall) { unsigned int retval = 0, continue_addr = 0, restart_addr = 0; struct ksignal ksig; int restart = 0; /* * If we were from a system call, check for system call restarting... */ if (syscall) { continue_addr = regs->ARM_pc; restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4); retval = regs->ARM_r0; /* * Prepare for system call restart. We do this here so that a * debugger will see the already changed PSW. */ switch (retval) { case -ERESTART_RESTARTBLOCK: restart -= 2; fallthrough; case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: restart++; regs->ARM_r0 = regs->ARM_ORIG_r0; regs->ARM_pc = restart_addr; break; } } /* * Get the signal to deliver. When running under ptrace, at this * point the debugger may change all our registers ... */ /* * Depending on the signal settings we may need to revert the * decision to restart the system call. But skip this if a * debugger has chosen to restart at a different PC. */ if (get_signal(&ksig)) { /* handler */ if (unlikely(restart) && regs->ARM_pc == restart_addr) { if (retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK || (retval == -ERESTARTSYS && !(ksig.ka.sa.sa_flags & SA_RESTART))) { regs->ARM_r0 = -EINTR; regs->ARM_pc = continue_addr; } } handle_signal(&ksig, regs); } else { /* no handler */ restore_saved_sigmask(); if (unlikely(restart) && regs->ARM_pc == restart_addr) { regs->ARM_pc = continue_addr; return restart; } } return 0; } asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { /* * The assembly code enters us with IRQs off, but it hasn't * informed the tracing code of that for efficiency reasons. * Update the trace code with the current status. */ trace_hardirqs_off(); do { if (likely(thread_flags & _TIF_NEED_RESCHED)) { schedule(); } else { if (unlikely(!user_mode(regs))) return 0; local_irq_enable(); if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { int restart = do_signal(regs, syscall); if (unlikely(restart)) { /* * Restart without handlers. * Deal with it without leaving * the kernel space. */ return restart; } syscall = 0; } else if (thread_flags & _TIF_UPROBE) { uprobe_notify_resume(regs); } else { resume_user_mode_work(regs); } } local_irq_disable(); thread_flags = read_thread_flags(); } while (thread_flags & _TIF_WORK_MASK); return 0; } struct page *get_signal_page(void) { unsigned long ptr; unsigned offset; struct page *page; void *addr; page = alloc_pages(GFP_KERNEL, 0); if (!page) return NULL; addr = page_address(page); /* Poison the entire page */ memset32(addr, __opcode_to_mem_arm(0xe7fddef1), PAGE_SIZE / sizeof(u32)); /* Give the signal return code some randomness */ offset = 0x200 + (get_random_u16() & 0x7fc); signal_return_offset = offset; /* Copy signal return handlers into the page */ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); /* Flush out all instructions in this page */ ptr = (unsigned long)addr; flush_icache_range(ptr, ptr + PAGE_SIZE); return page; } #ifdef CONFIG_DEBUG_RSEQ asmlinkage void do_rseq_syscall(struct pt_regs *regs) { rseq_syscall(regs); } #endif /* * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as * changes likely come with new fields that should be added below. */ static_assert(NSIGILL == 11); static_assert(NSIGFPE == 15); static_assert(NSIGSEGV == 10); static_assert(NSIGBUS == 5); static_assert(NSIGTRAP == 6); static_assert(NSIGCHLD == 6); static_assert(NSIGSYS == 2); static_assert(sizeof(siginfo_t) == 128); static_assert(__alignof__(siginfo_t) == 4); static_assert(offsetof(siginfo_t, si_signo) == 0x00); static_assert(offsetof(siginfo_t, si_errno) == 0x04); static_assert(offsetof(siginfo_t, si_code) == 0x08); static_assert(offsetof(siginfo_t, si_pid) == 0x0c); static_assert(offsetof(siginfo_t, si_uid) == 0x10); static_assert(offsetof(siginfo_t, si_tid) == 0x0c); static_assert(offsetof(siginfo_t, si_overrun) == 0x10); static_assert(offsetof(siginfo_t, si_status) == 0x14); static_assert(offsetof(siginfo_t, si_utime) == 0x18); static_assert(offsetof(siginfo_t, si_stime) == 0x1c); static_assert(offsetof(siginfo_t, si_value) == 0x14); static_assert(offsetof(siginfo_t, si_int) == 0x14); static_assert(offsetof(siginfo_t, si_ptr) == 0x14); static_assert(offsetof(siginfo_t, si_addr) == 0x0c); static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x10); static_assert(offsetof(siginfo_t, si_lower) == 0x14); static_assert(offsetof(siginfo_t, si_upper) == 0x18); static_assert(offsetof(siginfo_t, si_pkey) == 0x14); static_assert(offsetof(siginfo_t, si_perf_data) == 0x10); static_assert(offsetof(siginfo_t, si_perf_type) == 0x14); static_assert(offsetof(siginfo_t, si_perf_flags) == 0x18); static_assert(offsetof(siginfo_t, si_band) == 0x0c); static_assert(offsetof(siginfo_t, si_fd) == 0x10); static_assert(offsetof(siginfo_t, si_call_addr) == 0x0c); static_assert(offsetof(siginfo_t, si_syscall) == 0x10); static_assert(offsetof(siginfo_t, si_arch) == 0x14);
linux-master
arch/arm/kernel/signal.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/init.h> #include <linux/cpu.h> #include <asm/bugs.h> #include <asm/proc-fns.h> void check_other_bugs(void) { #ifdef MULTI_CPU if (cpu_check_bugs) cpu_check_bugs(); #endif } void __init arch_cpu_finalize_init(void) { check_writebuffer_bugs(); check_other_bugs(); }
linux-master
arch/arm/kernel/bugs.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/arm/kernel/kgdb.c * * ARM KGDB support * * Copyright (c) 2002-2004 MontaVista Software, Inc * Copyright (c) 2008 Wind River Systems, Inc. * * Authors: George Davis <[email protected]> * Deepak Saxena <[email protected]> */ #include <linux/irq.h> #include <linux/kdebug.h> #include <linux/kgdb.h> #include <linux/uaccess.h> #include <asm/patch.h> #include <asm/traps.h> struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { { "r0", 4, offsetof(struct pt_regs, ARM_r0)}, { "r1", 4, offsetof(struct pt_regs, ARM_r1)}, { "r2", 4, offsetof(struct pt_regs, ARM_r2)}, { "r3", 4, offsetof(struct pt_regs, ARM_r3)}, { "r4", 4, offsetof(struct pt_regs, ARM_r4)}, { "r5", 4, offsetof(struct pt_regs, ARM_r5)}, { "r6", 4, offsetof(struct pt_regs, ARM_r6)}, { "r7", 4, offsetof(struct pt_regs, ARM_r7)}, { "r8", 4, offsetof(struct pt_regs, ARM_r8)}, { "r9", 4, offsetof(struct pt_regs, ARM_r9)}, { "r10", 4, offsetof(struct pt_regs, ARM_r10)}, { "fp", 4, offsetof(struct pt_regs, ARM_fp)}, { "ip", 4, offsetof(struct pt_regs, ARM_ip)}, { "sp", 4, offsetof(struct pt_regs, ARM_sp)}, { "lr", 4, offsetof(struct pt_regs, ARM_lr)}, { "pc", 4, offsetof(struct pt_regs, ARM_pc)}, { "f0", 12, -1 }, { "f1", 12, -1 }, { "f2", 12, -1 }, { "f3", 12, -1 }, { "f4", 12, -1 }, { "f5", 12, -1 }, { "f6", 12, -1 }, { "f7", 12, -1 }, { "fps", 4, -1 }, { "cpsr", 4, offsetof(struct pt_regs, ARM_cpsr)}, }; char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) { if (regno >= DBG_MAX_REG_NUM || regno < 0) return NULL; if (dbg_reg_def[regno].offset != -1) memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, dbg_reg_def[regno].size); else memset(mem, 0, dbg_reg_def[regno].size); return dbg_reg_def[regno].name; } int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) { if (regno >= DBG_MAX_REG_NUM || regno < 0) return -EINVAL; if (dbg_reg_def[regno].offset != -1) memcpy((void *)regs + dbg_reg_def[regno].offset, mem, dbg_reg_def[regno].size); return 0; } void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) { struct thread_info *ti; int regno; /* Just making sure... */ if (task == NULL) return; /* Initialize to zero */ for (regno = 0; regno < GDB_MAX_REGS; regno++) gdb_regs[regno] = 0; /* Otherwise, we have only some registers from switch_to() */ ti = task_thread_info(task); gdb_regs[_R4] = ti->cpu_context.r4; gdb_regs[_R5] = ti->cpu_context.r5; gdb_regs[_R6] = ti->cpu_context.r6; gdb_regs[_R7] = ti->cpu_context.r7; gdb_regs[_R8] = ti->cpu_context.r8; gdb_regs[_R9] = ti->cpu_context.r9; gdb_regs[_R10] = ti->cpu_context.sl; gdb_regs[_FP] = ti->cpu_context.fp; gdb_regs[_SPT] = ti->cpu_context.sp; gdb_regs[_PC] = ti->cpu_context.pc; } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) { regs->ARM_pc = pc; } static int compiled_break; int kgdb_arch_handle_exception(int exception_vector, int signo, int err_code, char *remcom_in_buffer, char *remcom_out_buffer, struct pt_regs *linux_regs) { unsigned long addr; char *ptr; switch (remcom_in_buffer[0]) { case 'D': case 'k': case 'c': /* * Try to read optional parameter, pc unchanged if no parm. * If this was a compiled breakpoint, we need to move * to the next instruction or we will just breakpoint * over and over again. */ ptr = &remcom_in_buffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->ARM_pc = addr; else if (compiled_break == 1) linux_regs->ARM_pc += 4; compiled_break = 0; return 0; } return -1; } static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr) { kgdb_handle_exception(1, SIGTRAP, 0, regs); return 0; } static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr) { compiled_break = 1; kgdb_handle_exception(1, SIGTRAP, 0, regs); return 0; } static struct undef_hook kgdb_brkpt_arm_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_BREAKINST, .cpsr_mask = PSR_T_BIT | MODE_MASK, .cpsr_val = SVC_MODE, .fn = kgdb_brk_fn }; static struct undef_hook kgdb_brkpt_thumb_hook = { .instr_mask = 0xffff, .instr_val = KGDB_BREAKINST & 0xffff, .cpsr_mask = PSR_T_BIT | MODE_MASK, .cpsr_val = PSR_T_BIT | SVC_MODE, .fn = kgdb_brk_fn }; static struct undef_hook kgdb_compiled_brkpt_arm_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_COMPILED_BREAK, .cpsr_mask = PSR_T_BIT | MODE_MASK, .cpsr_val = SVC_MODE, .fn = kgdb_compiled_brk_fn }; static struct undef_hook kgdb_compiled_brkpt_thumb_hook = { .instr_mask = 0xffff, .instr_val = KGDB_COMPILED_BREAK & 0xffff, .cpsr_mask = PSR_T_BIT | MODE_MASK, .cpsr_val = PSR_T_BIT | SVC_MODE, .fn = kgdb_compiled_brk_fn }; static int __kgdb_notify(struct die_args *args, unsigned long cmd) { struct pt_regs *regs = args->regs; if (kgdb_handle_exception(1, args->signr, cmd, regs)) return NOTIFY_DONE; return NOTIFY_STOP; } static int kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) { unsigned long flags; int ret; local_irq_save(flags); ret = __kgdb_notify(ptr, cmd); local_irq_restore(flags); return ret; } static struct notifier_block kgdb_notifier = { .notifier_call = kgdb_notify, .priority = -INT_MAX, }; /** * kgdb_arch_init - Perform any architecture specific initalization. * * This function will handle the initalization of any architecture * specific callbacks. */ int kgdb_arch_init(void) { int ret = register_die_notifier(&kgdb_notifier); if (ret != 0) return ret; register_undef_hook(&kgdb_brkpt_arm_hook); register_undef_hook(&kgdb_brkpt_thumb_hook); register_undef_hook(&kgdb_compiled_brkpt_arm_hook); register_undef_hook(&kgdb_compiled_brkpt_thumb_hook); return 0; } /** * kgdb_arch_exit - Perform any architecture specific uninitalization. * * This function will handle the uninitalization of any architecture * specific callbacks, for dynamic registration and unregistration. */ void kgdb_arch_exit(void) { unregister_undef_hook(&kgdb_brkpt_arm_hook); unregister_undef_hook(&kgdb_brkpt_thumb_hook); unregister_undef_hook(&kgdb_compiled_brkpt_arm_hook); unregister_undef_hook(&kgdb_compiled_brkpt_thumb_hook); unregister_die_notifier(&kgdb_notifier); } int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; /* patch_text() only supports int-sized breakpoints */ BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE); err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; /* Machine is already stopped, so we can use __patch_text() directly */ __patch_text((void *)bpt->bpt_addr, *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr); return err; } int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { /* Machine is already stopped, so we can use __patch_text() directly */ __patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr); return 0; } /* * Register our undef instruction hooks with ARM undef core. * We register a hook specifically looking for the KGB break inst * and we handle the normal undef case within the do_undefinstr * handler. */ const struct kgdb_arch arch_kgdb_ops = { #ifndef __ARMEB__ .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7} #else /* ! __ARMEB__ */ .gdb_bpt_instr = {0xe7, 0xff, 0xde, 0xfe} #endif };
linux-master
arch/arm/kernel/kgdb.c
// SPDX-License-Identifier: GPL-2.0 /* * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. * * ARMv7 support: Jean Pihet <[email protected]> * 2010 (c) MontaVista Software, LLC. * * Copied from ARMv6 code, with the low level code inspired * by the ARMv7 Oprofile code. * * Cortex-A8 has up to 4 configurable performance counters and * a single cycle counter. * Cortex-A9 has up to 31 configurable performance counters and * a single cycle counter. * * All counters can be enabled/disabled and IRQ masked separately. The cycle * counter and all 4 performance counters together can be reset separately. */ #ifdef CONFIG_CPU_V7 #include <asm/cp15.h> #include <asm/cputype.h> #include <asm/irq_regs.h> #include <asm/vfp.h> #include "../vfp/vfpinstr.h" #include <linux/of.h> #include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> /* * Common ARMv7 event types * * Note: An implementation may not be able to count all of these events * but the encodings are considered to be `reserved' in the case that * they are not available. */ #define ARMV7_PERFCTR_PMNC_SW_INCR 0x00 #define ARMV7_PERFCTR_L1_ICACHE_REFILL 0x01 #define ARMV7_PERFCTR_ITLB_REFILL 0x02 #define ARMV7_PERFCTR_L1_DCACHE_REFILL 0x03 #define ARMV7_PERFCTR_L1_DCACHE_ACCESS 0x04 #define ARMV7_PERFCTR_DTLB_REFILL 0x05 #define ARMV7_PERFCTR_MEM_READ 0x06 #define ARMV7_PERFCTR_MEM_WRITE 0x07 #define ARMV7_PERFCTR_INSTR_EXECUTED 0x08 #define ARMV7_PERFCTR_EXC_TAKEN 0x09 #define ARMV7_PERFCTR_EXC_EXECUTED 0x0A #define ARMV7_PERFCTR_CID_WRITE 0x0B /* * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. * It counts: * - all (taken) branch instructions, * - instructions that explicitly write the PC, * - exception generating instructions. */ #define ARMV7_PERFCTR_PC_WRITE 0x0C #define ARMV7_PERFCTR_PC_IMM_BRANCH 0x0D #define ARMV7_PERFCTR_PC_PROC_RETURN 0x0E #define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS 0x0F #define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED 0x10 #define ARMV7_PERFCTR_CLOCK_CYCLES 0x11 #define ARMV7_PERFCTR_PC_BRANCH_PRED 0x12 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */ #define ARMV7_PERFCTR_MEM_ACCESS 0x13 #define ARMV7_PERFCTR_L1_ICACHE_ACCESS 0x14 #define ARMV7_PERFCTR_L1_DCACHE_WB 0x15 #define ARMV7_PERFCTR_L2_CACHE_ACCESS 0x16 #define ARMV7_PERFCTR_L2_CACHE_REFILL 0x17 #define ARMV7_PERFCTR_L2_CACHE_WB 0x18 #define ARMV7_PERFCTR_BUS_ACCESS 0x19 #define ARMV7_PERFCTR_MEM_ERROR 0x1A #define ARMV7_PERFCTR_INSTR_SPEC 0x1B #define ARMV7_PERFCTR_TTBR_WRITE 0x1C #define ARMV7_PERFCTR_BUS_CYCLES 0x1D #define ARMV7_PERFCTR_CPU_CYCLES 0xFF /* ARMv7 Cortex-A8 specific event types */ #define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS 0x43 #define ARMV7_A8_PERFCTR_L2_CACHE_REFILL 0x44 #define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS 0x50 #define ARMV7_A8_PERFCTR_STALL_ISIDE 0x56 /* ARMv7 Cortex-A9 specific event types */ #define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME 0x68 #define ARMV7_A9_PERFCTR_STALL_ICACHE 0x60 #define ARMV7_A9_PERFCTR_STALL_DISPATCH 0x66 /* ARMv7 Cortex-A5 specific event types */ #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL 0xc2 #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP 0xc3 /* ARMv7 Cortex-A15 specific event types */ #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ 0x40 #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41 #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ 0x42 #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE 0x43 #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ 0x4C #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE 0x4D #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ 0x50 #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51 #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ 0x52 #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE 0x53 #define ARMV7_A15_PERFCTR_PC_WRITE_SPEC 0x76 /* ARMv7 Cortex-A12 specific event types */ #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ 0x40 #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41 #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ 0x50 #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51 #define ARMV7_A12_PERFCTR_PC_WRITE_SPEC 0x76 #define ARMV7_A12_PERFCTR_PF_TLB_REFILL 0xe7 /* ARMv7 Krait specific event types */ #define KRAIT_PMRESR0_GROUP0 0xcc #define KRAIT_PMRESR1_GROUP0 0xd0 #define KRAIT_PMRESR2_GROUP0 0xd4 #define KRAIT_VPMRESR0_GROUP0 0xd8 #define KRAIT_PERFCTR_L1_ICACHE_ACCESS 0x10011 #define KRAIT_PERFCTR_L1_ICACHE_MISS 0x10010 #define KRAIT_PERFCTR_L1_ITLB_ACCESS 0x12222 #define KRAIT_PERFCTR_L1_DTLB_ACCESS 0x12210 /* ARMv7 Scorpion specific event types */ #define SCORPION_LPM0_GROUP0 0x4c #define SCORPION_LPM1_GROUP0 0x50 #define SCORPION_LPM2_GROUP0 0x54 #define SCORPION_L2LPM_GROUP0 0x58 #define SCORPION_VLPM_GROUP0 0x5c #define SCORPION_ICACHE_ACCESS 0x10053 #define SCORPION_ICACHE_MISS 0x10052 #define SCORPION_DTLB_ACCESS 0x12013 #define SCORPION_DTLB_MISS 0x12012 #define SCORPION_ITLB_MISS 0x12021 /* * Cortex-A8 HW events mapping * * The hardware events that we support. We do support cache operations but * we have harvard caches and no way to combine instruction and data * accesses/misses in hardware. */ static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE, }; static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { PERF_CACHE_MAP_ALL_UNSUPPORTED, /* * The performance counters don't differentiate between read and write * accesses/misses so this isn't strictly correct, but it's the best we * can do. Writes and reads get combined. */ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS, [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL, [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS, [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL, [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }; /* * Cortex-A9 HW events mapping */ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE, [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH, }; static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { PERF_CACHE_MAP_ALL_UNSUPPORTED, /* * The performance counters don't differentiate between read and write * accesses/misses so this isn't strictly correct, but it's the best we * can do. Writes and reads get combined. */ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }; /* * Cortex-A5 HW events mapping */ static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }; static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { PERF_CACHE_MAP_ALL_UNSUPPORTED, [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL, [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP, [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, /* * The prefetch counters don't differentiate between the I side and the * D side. */ [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL, [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP, [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }; /* * Cortex-A15 HW events mapping */ static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, }; static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { PERF_CACHE_MAP_ALL_UNSUPPORTED, [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ, [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE, /* * Not all performance counters differentiate between read and write * accesses/misses so we're not always strictly correct, but it's the * best we can do. Writes and reads get combined in these cases. */ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ, [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ, [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE, [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE, [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ, [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE, [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }; /* * Cortex-A7 HW events mapping */ static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, }; static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { PERF_CACHE_MAP_ALL_UNSUPPORTED, /* * The performance counters don't differentiate between read and write * accesses/misses so this isn't strictly correct, but it's the best we * can do. Writes and reads get combined. */ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS, [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS, [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }; /* * Cortex-A12 HW events mapping */ static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, }; static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { PERF_CACHE_MAP_ALL_UNSUPPORTED, [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, /* * Not all performance counters differentiate between read and write * accesses/misses so we're not always strictly correct, but it's the * best we can do. Writes and reads get combined in these cases. */ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ, [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE, [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL, [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }; /* * Krait HW events mapping */ static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, }; static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, }; static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { PERF_CACHE_MAP_ALL_UNSUPPORTED, /* * The performance counters don't differentiate between read and write * accesses/misses so this isn't strictly correct, but it's the best we * can do. Writes and reads get combined. */ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS, [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS, [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS, [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS, [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS, [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }; /* * Scorpion HW events mapping */ static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = { PERF_MAP_ALL_UNSUPPORTED, [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, }; static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { PERF_CACHE_MAP_ALL_UNSUPPORTED, /* * The performance counters don't differentiate between read and write * accesses/misses so this isn't strictly correct, but it's the best we * can do. Writes and reads get combined. */ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS, [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS, /* * Only ITLB misses and DTLB refills are supported. If users want the * DTLB refills misses a raw counter must be used. */ [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS, [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS, [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS, [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS, [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, }; PMU_FORMAT_ATTR(event, "config:0-7"); static struct attribute *armv7_pmu_format_attrs[] = { &format_attr_event.attr, NULL, }; static struct attribute_group armv7_pmu_format_attr_group = { .name = "format", .attrs = armv7_pmu_format_attrs, }; #define ARMV7_EVENT_ATTR_RESOLVE(m) #m #define ARMV7_EVENT_ATTR(name, config) \ PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \ "event=" ARMV7_EVENT_ATTR_RESOLVE(config)) ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR); ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL); ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL); ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL); ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS); ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL); ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ); ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE); ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED); ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN); ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED); ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE); ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE); ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH); ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN); ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS); ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED); ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES); ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED); static struct attribute *armv7_pmuv1_event_attrs[] = { &armv7_event_attr_sw_incr.attr.attr, &armv7_event_attr_l1i_cache_refill.attr.attr, &armv7_event_attr_l1i_tlb_refill.attr.attr, &armv7_event_attr_l1d_cache_refill.attr.attr, &armv7_event_attr_l1d_cache.attr.attr, &armv7_event_attr_l1d_tlb_refill.attr.attr, &armv7_event_attr_ld_retired.attr.attr, &armv7_event_attr_st_retired.attr.attr, &armv7_event_attr_inst_retired.attr.attr, &armv7_event_attr_exc_taken.attr.attr, &armv7_event_attr_exc_return.attr.attr, &armv7_event_attr_cid_write_retired.attr.attr, &armv7_event_attr_pc_write_retired.attr.attr, &armv7_event_attr_br_immed_retired.attr.attr, &armv7_event_attr_br_return_retired.attr.attr, &armv7_event_attr_unaligned_ldst_retired.attr.attr, &armv7_event_attr_br_mis_pred.attr.attr, &armv7_event_attr_cpu_cycles.attr.attr, &armv7_event_attr_br_pred.attr.attr, NULL, }; static struct attribute_group armv7_pmuv1_events_attr_group = { .name = "events", .attrs = armv7_pmuv1_event_attrs, }; ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS); ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS); ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB); ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS); ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL); ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB); ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS); ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR); ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC); ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE); ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES); static struct attribute *armv7_pmuv2_event_attrs[] = { &armv7_event_attr_sw_incr.attr.attr, &armv7_event_attr_l1i_cache_refill.attr.attr, &armv7_event_attr_l1i_tlb_refill.attr.attr, &armv7_event_attr_l1d_cache_refill.attr.attr, &armv7_event_attr_l1d_cache.attr.attr, &armv7_event_attr_l1d_tlb_refill.attr.attr, &armv7_event_attr_ld_retired.attr.attr, &armv7_event_attr_st_retired.attr.attr, &armv7_event_attr_inst_retired.attr.attr, &armv7_event_attr_exc_taken.attr.attr, &armv7_event_attr_exc_return.attr.attr, &armv7_event_attr_cid_write_retired.attr.attr, &armv7_event_attr_pc_write_retired.attr.attr, &armv7_event_attr_br_immed_retired.attr.attr, &armv7_event_attr_br_return_retired.attr.attr, &armv7_event_attr_unaligned_ldst_retired.attr.attr, &armv7_event_attr_br_mis_pred.attr.attr, &armv7_event_attr_cpu_cycles.attr.attr, &armv7_event_attr_br_pred.attr.attr, &armv7_event_attr_mem_access.attr.attr, &armv7_event_attr_l1i_cache.attr.attr, &armv7_event_attr_l1d_cache_wb.attr.attr, &armv7_event_attr_l2d_cache.attr.attr, &armv7_event_attr_l2d_cache_refill.attr.attr, &armv7_event_attr_l2d_cache_wb.attr.attr, &armv7_event_attr_bus_access.attr.attr, &armv7_event_attr_memory_error.attr.attr, &armv7_event_attr_inst_spec.attr.attr, &armv7_event_attr_ttbr_write_retired.attr.attr, &armv7_event_attr_bus_cycles.attr.attr, NULL, }; static struct attribute_group armv7_pmuv2_events_attr_group = { .name = "events", .attrs = armv7_pmuv2_event_attrs, }; /* * Perf Events' indices */ #define ARMV7_IDX_CYCLE_COUNTER 0 #define ARMV7_IDX_COUNTER0 1 #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \ (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) #define ARMV7_MAX_COUNTERS 32 #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) /* * ARMv7 low level PMNC access */ /* * Perf Event to low level counters mapping */ #define ARMV7_IDX_TO_COUNTER(x) \ (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK) /* * Per-CPU PMNC: config reg */ #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ #define ARMV7_PMNC_N_MASK 0x1f #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ /* * FLAG: counters overflow flag status reg */ #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK /* * PMXEVTYPER: Event selection reg */ #define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */ #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ /* * Event filters for PMUv2 */ #define ARMV7_EXCLUDE_PL1 BIT(31) #define ARMV7_EXCLUDE_USER BIT(30) #define ARMV7_INCLUDE_HYP BIT(27) /* * Secure debug enable reg */ #define ARMV7_SDER_SUNIDEN BIT(1) /* Permit non-invasive debug */ static inline u32 armv7_pmnc_read(void) { u32 val; asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); return val; } static inline void armv7_pmnc_write(u32 val) { val &= ARMV7_PMNC_MASK; isb(); asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); } static inline int armv7_pmnc_has_overflowed(u32 pmnc) { return pmnc & ARMV7_OVERFLOWED_MASK; } static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx) { return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); } static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) { return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx)); } static inline void armv7_pmnc_select_counter(int idx) { u32 counter = ARMV7_IDX_TO_COUNTER(idx); asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); isb(); } static inline u64 armv7pmu_read_counter(struct perf_event *event) { struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; u32 value = 0; if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { pr_err("CPU%u reading wrong counter %d\n", smp_processor_id(), idx); } else if (idx == ARMV7_IDX_CYCLE_COUNTER) { asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); } else { armv7_pmnc_select_counter(idx); asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value)); } return value; } static inline void armv7pmu_write_counter(struct perf_event *event, u64 value) { struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); } else if (idx == ARMV7_IDX_CYCLE_COUNTER) { asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value)); } else { armv7_pmnc_select_counter(idx); asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value)); } } static inline void armv7_pmnc_write_evtsel(int idx, u32 val) { armv7_pmnc_select_counter(idx); val &= ARMV7_EVTYPE_MASK; asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); } static inline void armv7_pmnc_enable_counter(int idx) { u32 counter = ARMV7_IDX_TO_COUNTER(idx); asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); } static inline void armv7_pmnc_disable_counter(int idx) { u32 counter = ARMV7_IDX_TO_COUNTER(idx); asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); } static inline void armv7_pmnc_enable_intens(int idx) { u32 counter = ARMV7_IDX_TO_COUNTER(idx); asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); } static inline void armv7_pmnc_disable_intens(int idx) { u32 counter = ARMV7_IDX_TO_COUNTER(idx); asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); isb(); /* Clear the overflow flag in case an interrupt is pending. */ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter))); isb(); } static inline u32 armv7_pmnc_getreset_flags(void) { u32 val; /* Read */ asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); /* Write to clear flags */ val &= ARMV7_FLAG_MASK; asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); return val; } #ifdef DEBUG static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) { u32 val; unsigned int cnt; pr_info("PMNC registers dump:\n"); asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); pr_info("PMNC =0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); pr_info("CNTENS=0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); pr_info("INTENS=0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); pr_info("FLAGS =0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); pr_info("SELECT=0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); pr_info("CCNT =0x%08x\n", val); for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) { armv7_pmnc_select_counter(cnt); asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); pr_info("CNT[%d] count =0x%08x\n", ARMV7_IDX_TO_COUNTER(cnt), val); asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); pr_info("CNT[%d] evtsel=0x%08x\n", ARMV7_IDX_TO_COUNTER(cnt), val); } } #endif static void armv7pmu_enable_event(struct perf_event *event) { unsigned long flags; struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", smp_processor_id(), idx); return; } /* * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter */ armv7_pmnc_disable_counter(idx); /* * Set event (if destined for PMNx counters) * We only need to set the event for the cycle counter if we * have the ability to perform event filtering. */ if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) armv7_pmnc_write_evtsel(idx, hwc->config_base); /* * Enable interrupt for this counter */ armv7_pmnc_enable_intens(idx); /* * Enable counter */ armv7_pmnc_enable_counter(idx); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv7pmu_disable_event(struct perf_event *event) { unsigned long flags; struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); int idx = hwc->idx; if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", smp_processor_id(), idx); return; } /* * Disable counter and interrupt */ raw_spin_lock_irqsave(&events->pmu_lock, flags); /* * Disable counter */ armv7_pmnc_disable_counter(idx); /* * Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu) { u32 pmnc; struct perf_sample_data data; struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); struct pt_regs *regs; int idx; /* * Get and reset the IRQ flags */ pmnc = armv7_pmnc_getreset_flags(); /* * Did an overflow occur? */ if (!armv7_pmnc_has_overflowed(pmnc)) return IRQ_NONE; /* * Handle the counter(s) overflow(s) */ regs = get_irq_regs(); for (idx = 0; idx < cpu_pmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; /* Ignore if we don't have an event. */ if (!event) continue; /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. */ if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) continue; hwc = &event->hw; armpmu_event_update(event); perf_sample_data_init(&data, 0, hwc->last_period); if (!armpmu_event_set_period(event)) continue; if (perf_event_overflow(event, &data, regs)) cpu_pmu->disable(event); } /* * Handle the pending perf events. * * Note: this call *must* be run with interrupts disabled. For * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ irq_work_run(); return IRQ_HANDLED; } static void armv7pmu_start(struct arm_pmu *cpu_pmu) { unsigned long flags; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Enable all counters */ armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void armv7pmu_stop(struct arm_pmu *cpu_pmu) { unsigned long flags; struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable all counters */ armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) { int idx; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT; /* Always place a cycle counter into the cycle counter. */ if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; return ARMV7_IDX_CYCLE_COUNTER; } /* * For anything other than a cycle counter, try and use * the events counters */ for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { if (!test_and_set_bit(idx, cpuc->used_mask)) return idx; } /* The counters are all in use. */ return -EAGAIN; } static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) { clear_bit(event->hw.idx, cpuc->used_mask); } /* * Add an event filter to a given event. This will only work for PMUv2 PMUs. */ static int armv7pmu_set_event_filter(struct hw_perf_event *event, struct perf_event_attr *attr) { unsigned long config_base = 0; if (attr->exclude_idle) return -EPERM; if (attr->exclude_user) config_base |= ARMV7_EXCLUDE_USER; if (attr->exclude_kernel) config_base |= ARMV7_EXCLUDE_PL1; if (!attr->exclude_hv) config_base |= ARMV7_INCLUDE_HYP; /* * Install the filter into config_base as this is used to * construct the event type. */ event->config_base = config_base; return 0; } static void armv7pmu_reset(void *info) { struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; u32 idx, nb_cnt = cpu_pmu->num_events, val; if (cpu_pmu->secure_access) { asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val)); val |= ARMV7_SDER_SUNIDEN; asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val)); } /* The counter and interrupt enable registers are unknown at reset. */ for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { armv7_pmnc_disable_counter(idx); armv7_pmnc_disable_intens(idx); } /* Initialize & Reset PMNC: C and P bits */ armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); } static int armv7_a8_map_event(struct perf_event *event) { return armpmu_map_event(event, &armv7_a8_perf_map, &armv7_a8_perf_cache_map, 0xFF); } static int armv7_a9_map_event(struct perf_event *event) { return armpmu_map_event(event, &armv7_a9_perf_map, &armv7_a9_perf_cache_map, 0xFF); } static int armv7_a5_map_event(struct perf_event *event) { return armpmu_map_event(event, &armv7_a5_perf_map, &armv7_a5_perf_cache_map, 0xFF); } static int armv7_a15_map_event(struct perf_event *event) { return armpmu_map_event(event, &armv7_a15_perf_map, &armv7_a15_perf_cache_map, 0xFF); } static int armv7_a7_map_event(struct perf_event *event) { return armpmu_map_event(event, &armv7_a7_perf_map, &armv7_a7_perf_cache_map, 0xFF); } static int armv7_a12_map_event(struct perf_event *event) { return armpmu_map_event(event, &armv7_a12_perf_map, &armv7_a12_perf_cache_map, 0xFF); } static int krait_map_event(struct perf_event *event) { return armpmu_map_event(event, &krait_perf_map, &krait_perf_cache_map, 0xFFFFF); } static int krait_map_event_no_branch(struct perf_event *event) { return armpmu_map_event(event, &krait_perf_map_no_branch, &krait_perf_cache_map, 0xFFFFF); } static int scorpion_map_event(struct perf_event *event) { return armpmu_map_event(event, &scorpion_perf_map, &scorpion_perf_cache_map, 0xFFFFF); } static void armv7pmu_init(struct arm_pmu *cpu_pmu) { cpu_pmu->handle_irq = armv7pmu_handle_irq; cpu_pmu->enable = armv7pmu_enable_event; cpu_pmu->disable = armv7pmu_disable_event; cpu_pmu->read_counter = armv7pmu_read_counter; cpu_pmu->write_counter = armv7pmu_write_counter; cpu_pmu->get_event_idx = armv7pmu_get_event_idx; cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx; cpu_pmu->start = armv7pmu_start; cpu_pmu->stop = armv7pmu_stop; cpu_pmu->reset = armv7pmu_reset; }; static void armv7_read_num_pmnc_events(void *info) { int *nb_cnt = info; /* Read the nb of CNTx counters supported from PMNC */ *nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; /* Add the CPU cycles counter */ *nb_cnt += 1; } static int armv7_probe_num_events(struct arm_pmu *arm_pmu) { return smp_call_function_any(&arm_pmu->supported_cpus, armv7_read_num_pmnc_events, &arm_pmu->num_events, 1); } static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "armv7_cortex_a8"; cpu_pmu->map_event = armv7_a8_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv7_pmuv1_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv7_pmu_format_attr_group; return armv7_probe_num_events(cpu_pmu); } static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "armv7_cortex_a9"; cpu_pmu->map_event = armv7_a9_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv7_pmuv1_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv7_pmu_format_attr_group; return armv7_probe_num_events(cpu_pmu); } static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "armv7_cortex_a5"; cpu_pmu->map_event = armv7_a5_map_event; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv7_pmuv1_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv7_pmu_format_attr_group; return armv7_probe_num_events(cpu_pmu); } static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "armv7_cortex_a15"; cpu_pmu->map_event = armv7_a15_map_event; cpu_pmu->set_event_filter = armv7pmu_set_event_filter; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv7_pmuv2_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv7_pmu_format_attr_group; return armv7_probe_num_events(cpu_pmu); } static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "armv7_cortex_a7"; cpu_pmu->map_event = armv7_a7_map_event; cpu_pmu->set_event_filter = armv7pmu_set_event_filter; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv7_pmuv2_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv7_pmu_format_attr_group; return armv7_probe_num_events(cpu_pmu); } static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "armv7_cortex_a12"; cpu_pmu->map_event = armv7_a12_map_event; cpu_pmu->set_event_filter = armv7pmu_set_event_filter; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv7_pmuv2_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv7_pmu_format_attr_group; return armv7_probe_num_events(cpu_pmu); } static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu) { int ret = armv7_a12_pmu_init(cpu_pmu); cpu_pmu->name = "armv7_cortex_a17"; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv7_pmuv2_events_attr_group; cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv7_pmu_format_attr_group; return ret; } /* * Krait Performance Monitor Region Event Selection Register (PMRESRn) * * 31 30 24 16 8 0 * +--------------------------------+ * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0 * +--------------------------------+ * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1 * +--------------------------------+ * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2 * +--------------------------------+ * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ? * +--------------------------------+ * EN | G=3 | G=2 | G=1 | G=0 * * Event Encoding: * * hwc->config_base = 0xNRCCG * * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR) * R = region register * CC = class of events the group G is choosing from * G = group or particular event * * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2 * * A region (R) corresponds to a piece of the CPU (execution unit, instruction * unit, etc.) while the event code (CC) corresponds to a particular class of * events (interrupts for example). An event code is broken down into * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for * example). */ #define KRAIT_EVENT (1 << 16) #define VENUM_EVENT (2 << 16) #define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT) #define PMRESRn_EN BIT(31) #define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */ #define EVENT_GROUP(event) ((event) & 0xf) /* G */ #define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */ #define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */ #define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */ static u32 krait_read_pmresrn(int n) { u32 val; switch (n) { case 0: asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val)); break; case 1: asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val)); break; case 2: asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val)); break; default: BUG(); /* Should be validated in krait_pmu_get_event_idx() */ } return val; } static void krait_write_pmresrn(int n, u32 val) { switch (n) { case 0: asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val)); break; case 1: asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val)); break; case 2: asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val)); break; default: BUG(); /* Should be validated in krait_pmu_get_event_idx() */ } } static u32 venum_read_pmresr(void) { u32 val; asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val)); return val; } static void venum_write_pmresr(u32 val) { asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val)); } static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val) { u32 venum_new_val; u32 fp_new_val; BUG_ON(preemptible()); /* CPACR Enable CP10 and CP11 access */ *venum_orig_val = get_copro_access(); venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11); set_copro_access(venum_new_val); /* Enable FPEXC */ *fp_orig_val = fmrx(FPEXC); fp_new_val = *fp_orig_val | FPEXC_EN; fmxr(FPEXC, fp_new_val); } static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val) { BUG_ON(preemptible()); /* Restore FPEXC */ fmxr(FPEXC, fp_orig_val); isb(); /* Restore CPACR */ set_copro_access(venum_orig_val); } static u32 krait_get_pmresrn_event(unsigned int region) { static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0, KRAIT_PMRESR1_GROUP0, KRAIT_PMRESR2_GROUP0 }; return pmresrn_table[region]; } static void krait_evt_setup(int idx, u32 config_base) { u32 val; u32 mask; u32 vval, fval; unsigned int region = EVENT_REGION(config_base); unsigned int group = EVENT_GROUP(config_base); unsigned int code = EVENT_CODE(config_base); unsigned int group_shift; bool venum_event = EVENT_VENUM(config_base); group_shift = group * 8; mask = 0xff << group_shift; /* Configure evtsel for the region and group */ if (venum_event) val = KRAIT_VPMRESR0_GROUP0; else val = krait_get_pmresrn_event(region); val += group; /* Mix in mode-exclusion bits */ val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); armv7_pmnc_write_evtsel(idx, val); if (venum_event) { venum_pre_pmresr(&vval, &fval); val = venum_read_pmresr(); val &= ~mask; val |= code << group_shift; val |= PMRESRn_EN; venum_write_pmresr(val); venum_post_pmresr(vval, fval); } else { val = krait_read_pmresrn(region); val &= ~mask; val |= code << group_shift; val |= PMRESRn_EN; krait_write_pmresrn(region, val); } } static u32 clear_pmresrn_group(u32 val, int group) { u32 mask; int group_shift; group_shift = group * 8; mask = 0xff << group_shift; val &= ~mask; /* Don't clear enable bit if entire region isn't disabled */ if (val & ~PMRESRn_EN) return val |= PMRESRn_EN; return 0; } static void krait_clearpmu(u32 config_base) { u32 val; u32 vval, fval; unsigned int region = EVENT_REGION(config_base); unsigned int group = EVENT_GROUP(config_base); bool venum_event = EVENT_VENUM(config_base); if (venum_event) { venum_pre_pmresr(&vval, &fval); val = venum_read_pmresr(); val = clear_pmresrn_group(val, group); venum_write_pmresr(val); venum_post_pmresr(vval, fval); } else { val = krait_read_pmresrn(region); val = clear_pmresrn_group(val, group); krait_write_pmresrn(region, val); } } static void krait_pmu_disable_event(struct perf_event *event) { unsigned long flags; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* Disable counter and interrupt */ raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); /* * Clear pmresr code (if destined for PMNx counters) */ if (hwc->config_base & KRAIT_EVENT_MASK) krait_clearpmu(hwc->config_base); /* Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void krait_pmu_enable_event(struct perf_event *event) { unsigned long flags; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); /* * Set event (if destined for PMNx counters) * We set the event for the cycle counter because we * have the ability to perform event filtering. */ if (hwc->config_base & KRAIT_EVENT_MASK) krait_evt_setup(idx, hwc->config_base); else armv7_pmnc_write_evtsel(idx, hwc->config_base); /* Enable interrupt for this counter */ armv7_pmnc_enable_intens(idx); /* Enable counter */ armv7_pmnc_enable_counter(idx); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void krait_pmu_reset(void *info) { u32 vval, fval; struct arm_pmu *cpu_pmu = info; u32 idx, nb_cnt = cpu_pmu->num_events; armv7pmu_reset(info); /* Clear all pmresrs */ krait_write_pmresrn(0, 0); krait_write_pmresrn(1, 0); krait_write_pmresrn(2, 0); venum_pre_pmresr(&vval, &fval); venum_write_pmresr(0); venum_post_pmresr(vval, fval); /* Reset PMxEVNCTCR to sane default */ for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { armv7_pmnc_select_counter(idx); asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); } } static int krait_event_to_bit(struct perf_event *event, unsigned int region, unsigned int group) { int bit; struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); if (hwc->config_base & VENUM_EVENT) bit = KRAIT_VPMRESR0_GROUP0; else bit = krait_get_pmresrn_event(region); bit -= krait_get_pmresrn_event(0); bit += group; /* * Lower bits are reserved for use by the counters (see * armv7pmu_get_event_idx() for more info) */ bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; return bit; } /* * We check for column exclusion constraints here. * Two events cant use the same group within a pmresr register. */ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) { int idx; int bit = -1; struct hw_perf_event *hwc = &event->hw; unsigned int region = EVENT_REGION(hwc->config_base); unsigned int code = EVENT_CODE(hwc->config_base); unsigned int group = EVENT_GROUP(hwc->config_base); bool venum_event = EVENT_VENUM(hwc->config_base); bool krait_event = EVENT_CPU(hwc->config_base); if (venum_event || krait_event) { /* Ignore invalid events */ if (group > 3 || region > 2) return -EINVAL; if (venum_event && (code & 0xe0)) return -EINVAL; bit = krait_event_to_bit(event, region, group); if (test_and_set_bit(bit, cpuc->used_mask)) return -EAGAIN; } idx = armv7pmu_get_event_idx(cpuc, event); if (idx < 0 && bit >= 0) clear_bit(bit, cpuc->used_mask); return idx; } static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) { int bit; struct hw_perf_event *hwc = &event->hw; unsigned int region = EVENT_REGION(hwc->config_base); unsigned int group = EVENT_GROUP(hwc->config_base); bool venum_event = EVENT_VENUM(hwc->config_base); bool krait_event = EVENT_CPU(hwc->config_base); armv7pmu_clear_event_idx(cpuc, event); if (venum_event || krait_event) { bit = krait_event_to_bit(event, region, group); clear_bit(bit, cpuc->used_mask); } } static int krait_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "armv7_krait"; /* Some early versions of Krait don't support PC write events */ if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node, "qcom,no-pc-write")) cpu_pmu->map_event = krait_map_event_no_branch; else cpu_pmu->map_event = krait_map_event; cpu_pmu->set_event_filter = armv7pmu_set_event_filter; cpu_pmu->reset = krait_pmu_reset; cpu_pmu->enable = krait_pmu_enable_event; cpu_pmu->disable = krait_pmu_disable_event; cpu_pmu->get_event_idx = krait_pmu_get_event_idx; cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx; return armv7_probe_num_events(cpu_pmu); } /* * Scorpion Local Performance Monitor Register (LPMn) * * 31 30 24 16 8 0 * +--------------------------------+ * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0 * +--------------------------------+ * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1 * +--------------------------------+ * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2 * +--------------------------------+ * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3 * +--------------------------------+ * VLPM | EN | CC | CC | CC | CC | N = 2, R = ? * +--------------------------------+ * EN | G=3 | G=2 | G=1 | G=0 * * * Event Encoding: * * hwc->config_base = 0xNRCCG * * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM) * R = region register * CC = class of events the group G is choosing from * G = group or particular event * * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2 * * A region (R) corresponds to a piece of the CPU (execution unit, instruction * unit, etc.) while the event code (CC) corresponds to a particular class of * events (interrupts for example). An event code is broken down into * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for * example). */ static u32 scorpion_read_pmresrn(int n) { u32 val; switch (n) { case 0: asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val)); break; case 1: asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val)); break; case 2: asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val)); break; case 3: asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val)); break; default: BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */ } return val; } static void scorpion_write_pmresrn(int n, u32 val) { switch (n) { case 0: asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val)); break; case 1: asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val)); break; case 2: asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val)); break; case 3: asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val)); break; default: BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */ } } static u32 scorpion_get_pmresrn_event(unsigned int region) { static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0, SCORPION_LPM1_GROUP0, SCORPION_LPM2_GROUP0, SCORPION_L2LPM_GROUP0 }; return pmresrn_table[region]; } static void scorpion_evt_setup(int idx, u32 config_base) { u32 val; u32 mask; u32 vval, fval; unsigned int region = EVENT_REGION(config_base); unsigned int group = EVENT_GROUP(config_base); unsigned int code = EVENT_CODE(config_base); unsigned int group_shift; bool venum_event = EVENT_VENUM(config_base); group_shift = group * 8; mask = 0xff << group_shift; /* Configure evtsel for the region and group */ if (venum_event) val = SCORPION_VLPM_GROUP0; else val = scorpion_get_pmresrn_event(region); val += group; /* Mix in mode-exclusion bits */ val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); armv7_pmnc_write_evtsel(idx, val); asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); if (venum_event) { venum_pre_pmresr(&vval, &fval); val = venum_read_pmresr(); val &= ~mask; val |= code << group_shift; val |= PMRESRn_EN; venum_write_pmresr(val); venum_post_pmresr(vval, fval); } else { val = scorpion_read_pmresrn(region); val &= ~mask; val |= code << group_shift; val |= PMRESRn_EN; scorpion_write_pmresrn(region, val); } } static void scorpion_clearpmu(u32 config_base) { u32 val; u32 vval, fval; unsigned int region = EVENT_REGION(config_base); unsigned int group = EVENT_GROUP(config_base); bool venum_event = EVENT_VENUM(config_base); if (venum_event) { venum_pre_pmresr(&vval, &fval); val = venum_read_pmresr(); val = clear_pmresrn_group(val, group); venum_write_pmresr(val); venum_post_pmresr(vval, fval); } else { val = scorpion_read_pmresrn(region); val = clear_pmresrn_group(val, group); scorpion_write_pmresrn(region, val); } } static void scorpion_pmu_disable_event(struct perf_event *event) { unsigned long flags; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* Disable counter and interrupt */ raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); /* * Clear pmresr code (if destined for PMNx counters) */ if (hwc->config_base & KRAIT_EVENT_MASK) scorpion_clearpmu(hwc->config_base); /* Disable interrupt for this counter */ armv7_pmnc_disable_intens(idx); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void scorpion_pmu_enable_event(struct perf_event *event) { unsigned long flags; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); /* * Enable counter and interrupt, and set the counter to count * the event that we're interested in. */ raw_spin_lock_irqsave(&events->pmu_lock, flags); /* Disable counter */ armv7_pmnc_disable_counter(idx); /* * Set event (if destined for PMNx counters) * We don't set the event for the cycle counter because we * don't have the ability to perform event filtering. */ if (hwc->config_base & KRAIT_EVENT_MASK) scorpion_evt_setup(idx, hwc->config_base); else if (idx != ARMV7_IDX_CYCLE_COUNTER) armv7_pmnc_write_evtsel(idx, hwc->config_base); /* Enable interrupt for this counter */ armv7_pmnc_enable_intens(idx); /* Enable counter */ armv7_pmnc_enable_counter(idx); raw_spin_unlock_irqrestore(&events->pmu_lock, flags); } static void scorpion_pmu_reset(void *info) { u32 vval, fval; struct arm_pmu *cpu_pmu = info; u32 idx, nb_cnt = cpu_pmu->num_events; armv7pmu_reset(info); /* Clear all pmresrs */ scorpion_write_pmresrn(0, 0); scorpion_write_pmresrn(1, 0); scorpion_write_pmresrn(2, 0); scorpion_write_pmresrn(3, 0); venum_pre_pmresr(&vval, &fval); venum_write_pmresr(0); venum_post_pmresr(vval, fval); /* Reset PMxEVNCTCR to sane default */ for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { armv7_pmnc_select_counter(idx); asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); } } static int scorpion_event_to_bit(struct perf_event *event, unsigned int region, unsigned int group) { int bit; struct hw_perf_event *hwc = &event->hw; struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); if (hwc->config_base & VENUM_EVENT) bit = SCORPION_VLPM_GROUP0; else bit = scorpion_get_pmresrn_event(region); bit -= scorpion_get_pmresrn_event(0); bit += group; /* * Lower bits are reserved for use by the counters (see * armv7pmu_get_event_idx() for more info) */ bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; return bit; } /* * We check for column exclusion constraints here. * Two events cant use the same group within a pmresr register. */ static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) { int idx; int bit = -1; struct hw_perf_event *hwc = &event->hw; unsigned int region = EVENT_REGION(hwc->config_base); unsigned int group = EVENT_GROUP(hwc->config_base); bool venum_event = EVENT_VENUM(hwc->config_base); bool scorpion_event = EVENT_CPU(hwc->config_base); if (venum_event || scorpion_event) { /* Ignore invalid events */ if (group > 3 || region > 3) return -EINVAL; bit = scorpion_event_to_bit(event, region, group); if (test_and_set_bit(bit, cpuc->used_mask)) return -EAGAIN; } idx = armv7pmu_get_event_idx(cpuc, event); if (idx < 0 && bit >= 0) clear_bit(bit, cpuc->used_mask); return idx; } static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc, struct perf_event *event) { int bit; struct hw_perf_event *hwc = &event->hw; unsigned int region = EVENT_REGION(hwc->config_base); unsigned int group = EVENT_GROUP(hwc->config_base); bool venum_event = EVENT_VENUM(hwc->config_base); bool scorpion_event = EVENT_CPU(hwc->config_base); armv7pmu_clear_event_idx(cpuc, event); if (venum_event || scorpion_event) { bit = scorpion_event_to_bit(event, region, group); clear_bit(bit, cpuc->used_mask); } } static int scorpion_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "armv7_scorpion"; cpu_pmu->map_event = scorpion_map_event; cpu_pmu->reset = scorpion_pmu_reset; cpu_pmu->enable = scorpion_pmu_enable_event; cpu_pmu->disable = scorpion_pmu_disable_event; cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx; cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx; return armv7_probe_num_events(cpu_pmu); } static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu) { armv7pmu_init(cpu_pmu); cpu_pmu->name = "armv7_scorpion_mp"; cpu_pmu->map_event = scorpion_map_event; cpu_pmu->reset = scorpion_pmu_reset; cpu_pmu->enable = scorpion_pmu_enable_event; cpu_pmu->disable = scorpion_pmu_disable_event; cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx; cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx; return armv7_probe_num_events(cpu_pmu); } static const struct of_device_id armv7_pmu_of_device_ids[] = { {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init}, {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init}, {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init}, {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init}, {.compatible = "qcom,krait-pmu", .data = krait_pmu_init}, {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init}, {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init}, {}, }; static const struct pmu_probe_info armv7_pmu_probe_table[] = { ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init), ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init), { /* sentinel value */ } }; static int armv7_pmu_device_probe(struct platform_device *pdev) { return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids, armv7_pmu_probe_table); } static struct platform_driver armv7_pmu_driver = { .driver = { .name = "armv7-pmu", .of_match_table = armv7_pmu_of_device_ids, .suppress_bind_attrs = true, }, .probe = armv7_pmu_device_probe, }; builtin_platform_driver(armv7_pmu_driver); #endif /* CONFIG_CPU_V7 */
linux-master
arch/arm/kernel/perf_event_v7.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/errno.h> #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/bug.h> #include <linux/sched/task_stack.h> #include <asm/perf_regs.h> #include <asm/ptrace.h> u64 perf_reg_value(struct pt_regs *regs, int idx) { if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM_MAX)) return 0; return regs->uregs[idx]; } #define REG_RESERVED (~((1ULL << PERF_REG_ARM_MAX) - 1)) int perf_reg_validate(u64 mask) { if (!mask || mask & REG_RESERVED) return -EINVAL; return 0; } u64 perf_reg_abi(struct task_struct *task) { return PERF_SAMPLE_REGS_ABI_32; } void perf_get_regs_user(struct perf_regs *regs_user, struct pt_regs *regs) { regs_user->regs = task_pt_regs(current); regs_user->abi = perf_reg_abi(current); }
linux-master
arch/arm/kernel/perf_regs.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/smp_scu.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved */ #include <linux/init.h> #include <linux/io.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #define SCU_CTRL 0x00 #define SCU_ENABLE (1 << 0) #define SCU_STANDBY_ENABLE (1 << 5) #define SCU_CONFIG 0x04 #define SCU_CPU_STATUS 0x08 #define SCU_CPU_STATUS_MASK GENMASK(1, 0) #define SCU_INVALIDATE 0x0c #define SCU_FPGA_REVISION 0x10 #ifdef CONFIG_SMP /* * Get the number of CPU cores from the SCU configuration */ unsigned int __init scu_get_core_count(void __iomem *scu_base) { unsigned int ncores = readl_relaxed(scu_base + SCU_CONFIG); return (ncores & 0x03) + 1; } /* * Enable the SCU */ void scu_enable(void __iomem *scu_base) { u32 scu_ctrl; #ifdef CONFIG_ARM_ERRATA_764369 /* Cortex-A9 only */ if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) { scu_ctrl = readl_relaxed(scu_base + 0x30); if (!(scu_ctrl & 1)) writel_relaxed(scu_ctrl | 0x1, scu_base + 0x30); } #endif scu_ctrl = readl_relaxed(scu_base + SCU_CTRL); /* already enabled? */ if (scu_ctrl & SCU_ENABLE) return; scu_ctrl |= SCU_ENABLE; /* Cortex-A9 earlier than r2p0 has no standby bit in SCU */ if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090 && (read_cpuid_id() & 0x00f0000f) >= 0x00200000) scu_ctrl |= SCU_STANDBY_ENABLE; writel_relaxed(scu_ctrl, scu_base + SCU_CTRL); /* * Ensure that the data accessed by CPU0 before the SCU was * initialised is visible to the other CPUs. */ flush_cache_all(); } #endif static int scu_set_power_mode_internal(void __iomem *scu_base, unsigned int logical_cpu, unsigned int mode) { unsigned int val; int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(logical_cpu), 0); if (mode > 3 || mode == 1 || cpu > 3) return -EINVAL; val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu); val &= ~SCU_CPU_STATUS_MASK; val |= mode; writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu); return 0; } /* * Set the executing CPUs power mode as defined. This will be in * preparation for it executing a WFI instruction. * * This function must be called with preemption disabled, and as it * has the side effect of disabling coherency, caches must have been * flushed. Interrupts must also have been disabled. */ int scu_power_mode(void __iomem *scu_base, unsigned int mode) { return scu_set_power_mode_internal(scu_base, smp_processor_id(), mode); } /* * Set the given (logical) CPU's power mode to SCU_PM_NORMAL. */ int scu_cpu_power_enable(void __iomem *scu_base, unsigned int cpu) { return scu_set_power_mode_internal(scu_base, cpu, SCU_PM_NORMAL); } int scu_get_cpu_power_mode(void __iomem *scu_base, unsigned int logical_cpu) { unsigned int val; int cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(logical_cpu), 0); if (cpu > 3) return -EINVAL; val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu); val &= SCU_CPU_STATUS_MASK; return val; }
linux-master
arch/arm/kernel/smp_scu.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/arm/kernel/sys_oabi-compat.c * * Compatibility wrappers for syscalls that are used from * old ABI user space binaries with an EABI kernel. * * Author: Nicolas Pitre * Created: Oct 7, 2005 * Copyright: MontaVista Software, Inc. */ #include <asm/syscalls.h> /* * The legacy ABI and the new ARM EABI have different rules making some * syscalls incompatible especially with structure arguments. * Most notably, Eabi says 64-bit members should be 64-bit aligned instead of * simply word aligned. EABI also pads structures to the size of the largest * member it contains instead of the invariant 32-bit. * * The following syscalls are affected: * * sys_stat64: * sys_lstat64: * sys_fstat64: * sys_fstatat64: * * struct stat64 has different sizes and some members are shifted * Compatibility wrappers are needed for them and provided below. * * sys_fcntl64: * * struct flock64 has different sizes and some members are shifted * A compatibility wrapper is needed and provided below. * * sys_statfs64: * sys_fstatfs64: * * struct statfs64 has extra padding with EABI growing its size from * 84 to 88. This struct is now __attribute__((packed,aligned(4))) * with a small assembly wrapper to force the sz argument to 84 if it is 88 * to avoid copying the extra padding over user space unexpecting it. * * sys_newuname: * * struct new_utsname has no padding with EABI. No problem there. * * sys_epoll_ctl: * sys_epoll_wait: * * struct epoll_event has its second member shifted also affecting the * structure size. Compatibility wrappers are needed and provided below. * * sys_ipc: * sys_semop: * sys_semtimedop: * * struct sembuf loses its padding with EABI. Since arrays of them are * used they have to be copyed to remove the padding. Compatibility wrappers * provided below. * * sys_bind: * sys_connect: * sys_sendmsg: * sys_sendto: * sys_socketcall: * * struct sockaddr_un loses its padding with EABI. Since the size of the * structure is used as a validation test in unix_mkname(), we need to * change the length argument to 110 whenever it is 112. Compatibility * wrappers provided below. */ #include <linux/syscalls.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/filelock.h> #include <linux/cred.h> #include <linux/fcntl.h> #include <linux/eventpoll.h> #include <linux/sem.h> #include <linux/socket.h> #include <linux/net.h> #include <linux/ipc.h> #include <linux/ipc_namespace.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <asm/syscall.h> struct oldabi_stat64 { unsigned long long st_dev; unsigned int __pad1; unsigned long __st_ino; unsigned int st_mode; unsigned int st_nlink; unsigned long st_uid; unsigned long st_gid; unsigned long long st_rdev; unsigned int __pad2; long long st_size; unsigned long st_blksize; unsigned long long st_blocks; unsigned long st_atime; unsigned long st_atime_nsec; unsigned long st_mtime; unsigned long st_mtime_nsec; unsigned long st_ctime; unsigned long st_ctime_nsec; unsigned long long st_ino; } __attribute__ ((packed,aligned(4))); static long cp_oldabi_stat64(struct kstat *stat, struct oldabi_stat64 __user *statbuf) { struct oldabi_stat64 tmp; tmp.st_dev = huge_encode_dev(stat->dev); tmp.__pad1 = 0; tmp.__st_ino = stat->ino; tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); tmp.st_rdev = huge_encode_dev(stat->rdev); tmp.st_size = stat->size; tmp.st_blocks = stat->blocks; tmp.__pad2 = 0; tmp.st_blksize = stat->blksize; tmp.st_atime = stat->atime.tv_sec; tmp.st_atime_nsec = stat->atime.tv_nsec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_mtime_nsec = stat->mtime.tv_nsec; tmp.st_ctime = stat->ctime.tv_sec; tmp.st_ctime_nsec = stat->ctime.tv_nsec; tmp.st_ino = stat->ino; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; } asmlinkage long sys_oabi_stat64(const char __user * filename, struct oldabi_stat64 __user * statbuf) { struct kstat stat; int error = vfs_stat(filename, &stat); if (!error) error = cp_oldabi_stat64(&stat, statbuf); return error; } asmlinkage long sys_oabi_lstat64(const char __user * filename, struct oldabi_stat64 __user * statbuf) { struct kstat stat; int error = vfs_lstat(filename, &stat); if (!error) error = cp_oldabi_stat64(&stat, statbuf); return error; } asmlinkage long sys_oabi_fstat64(unsigned long fd, struct oldabi_stat64 __user * statbuf) { struct kstat stat; int error = vfs_fstat(fd, &stat); if (!error) error = cp_oldabi_stat64(&stat, statbuf); return error; } asmlinkage long sys_oabi_fstatat64(int dfd, const char __user *filename, struct oldabi_stat64 __user *statbuf, int flag) { struct kstat stat; int error; error = vfs_fstatat(dfd, filename, &stat, flag); if (error) return error; return cp_oldabi_stat64(&stat, statbuf); } struct oabi_flock64 { short l_type; short l_whence; loff_t l_start; loff_t l_len; pid_t l_pid; } __attribute__ ((packed,aligned(4))); static int get_oabi_flock(struct flock64 *kernel, struct oabi_flock64 __user *arg) { struct oabi_flock64 user; if (copy_from_user(&user, (struct oabi_flock64 __user *)arg, sizeof(user))) return -EFAULT; kernel->l_type = user.l_type; kernel->l_whence = user.l_whence; kernel->l_start = user.l_start; kernel->l_len = user.l_len; kernel->l_pid = user.l_pid; return 0; } static int put_oabi_flock(struct flock64 *kernel, struct oabi_flock64 __user *arg) { struct oabi_flock64 user; user.l_type = kernel->l_type; user.l_whence = kernel->l_whence; user.l_start = kernel->l_start; user.l_len = kernel->l_len; user.l_pid = kernel->l_pid; if (copy_to_user((struct oabi_flock64 __user *)arg, &user, sizeof(user))) return -EFAULT; return 0; } asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct fd f = fdget_raw(fd); struct flock64 flock; long err = -EBADF; if (!f.file) goto out; switch (cmd) { case F_GETLK64: case F_OFD_GETLK: err = security_file_fcntl(f.file, cmd, arg); if (err) break; err = get_oabi_flock(&flock, argp); if (err) break; err = fcntl_getlk64(f.file, cmd, &flock); if (!err) err = put_oabi_flock(&flock, argp); break; case F_SETLK64: case F_SETLKW64: case F_OFD_SETLK: case F_OFD_SETLKW: err = security_file_fcntl(f.file, cmd, arg); if (err) break; err = get_oabi_flock(&flock, argp); if (err) break; err = fcntl_setlk64(fd, f.file, cmd, &flock); break; default: err = sys_fcntl64(fd, cmd, arg); break; } fdput(f); out: return err; } struct oabi_epoll_event { __poll_t events; __u64 data; } __attribute__ ((packed,aligned(4))); #ifdef CONFIG_EPOLL asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd, struct oabi_epoll_event __user *event) { struct oabi_epoll_event user; struct epoll_event kernel; if (ep_op_has_event(op) && copy_from_user(&user, event, sizeof(user))) return -EFAULT; kernel.events = user.events; kernel.data = user.data; return do_epoll_ctl(epfd, op, fd, &kernel, false); } #else asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd, struct oabi_epoll_event __user *event) { return -EINVAL; } #endif struct epoll_event __user * epoll_put_uevent(__poll_t revents, __u64 data, struct epoll_event __user *uevent) { if (in_oabi_syscall()) { struct oabi_epoll_event __user *oevent = (void __user *)uevent; if (__put_user(revents, &oevent->events) || __put_user(data, &oevent->data)) return NULL; return (void __user *)(oevent+1); } if (__put_user(revents, &uevent->events) || __put_user(data, &uevent->data)) return NULL; return uevent+1; } struct oabi_sembuf { unsigned short sem_num; short sem_op; short sem_flg; unsigned short __pad; }; #define sc_semopm sem_ctls[2] #ifdef CONFIG_SYSVIPC asmlinkage long sys_oabi_semtimedop(int semid, struct oabi_sembuf __user *tsops, unsigned nsops, const struct old_timespec32 __user *timeout) { struct ipc_namespace *ns; struct sembuf *sops; long err; int i; ns = current->nsproxy->ipc_ns; if (nsops > ns->sc_semopm) return -E2BIG; if (nsops < 1 || nsops > SEMOPM) return -EINVAL; sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL); if (!sops) return -ENOMEM; err = 0; for (i = 0; i < nsops; i++) { struct oabi_sembuf osb; err |= copy_from_user(&osb, tsops, sizeof(osb)); sops[i].sem_num = osb.sem_num; sops[i].sem_op = osb.sem_op; sops[i].sem_flg = osb.sem_flg; tsops++; } if (err) { err = -EFAULT; goto out; } if (timeout) { struct timespec64 ts; err = get_old_timespec32(&ts, timeout); if (err) goto out; err = __do_semtimedop(semid, sops, nsops, &ts, ns); goto out; } err = __do_semtimedop(semid, sops, nsops, NULL, ns); out: kvfree(sops); return err; } asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops, unsigned nsops) { return sys_oabi_semtimedop(semid, tsops, nsops, NULL); } asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, void __user *ptr, long fifth) { switch (call & 0xffff) { case SEMOP: return sys_oabi_semtimedop(first, (struct oabi_sembuf __user *)ptr, second, NULL); case SEMTIMEDOP: return sys_oabi_semtimedop(first, (struct oabi_sembuf __user *)ptr, second, (const struct old_timespec32 __user *)fifth); default: return sys_ipc(call, first, second, third, ptr, fifth); } } #else asmlinkage long sys_oabi_semtimedop(int semid, struct oabi_sembuf __user *tsops, unsigned nsops, const struct old_timespec32 __user *timeout) { return -ENOSYS; } asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops, unsigned nsops) { return -ENOSYS; } asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, void __user *ptr, long fifth) { return -ENOSYS; } #endif asmlinkage long sys_oabi_bind(int fd, struct sockaddr __user *addr, int addrlen) { sa_family_t sa_family; if (addrlen == 112 && get_user(sa_family, &addr->sa_family) == 0 && sa_family == AF_UNIX) addrlen = 110; return sys_bind(fd, addr, addrlen); } asmlinkage long sys_oabi_connect(int fd, struct sockaddr __user *addr, int addrlen) { sa_family_t sa_family; if (addrlen == 112 && get_user(sa_family, &addr->sa_family) == 0 && sa_family == AF_UNIX) addrlen = 110; return sys_connect(fd, addr, addrlen); } asmlinkage long sys_oabi_sendto(int fd, void __user *buff, size_t len, unsigned flags, struct sockaddr __user *addr, int addrlen) { sa_family_t sa_family; if (addrlen == 112 && get_user(sa_family, &addr->sa_family) == 0 && sa_family == AF_UNIX) addrlen = 110; return sys_sendto(fd, buff, len, flags, addr, addrlen); } asmlinkage long sys_oabi_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags) { struct sockaddr __user *addr; int msg_namelen; sa_family_t sa_family; if (msg && get_user(msg_namelen, &msg->msg_namelen) == 0 && msg_namelen == 112 && get_user(addr, &msg->msg_name) == 0 && get_user(sa_family, &addr->sa_family) == 0 && sa_family == AF_UNIX) { /* * HACK ALERT: there is a limit to how much backward bending * we should do for what is actually a transitional * compatibility layer. This already has known flaws with * a few ioctls that we don't intend to fix. Therefore * consider this blatent hack as another one... and take care * to run for cover. In most cases it will "just work fine". * If it doesn't, well, tough. */ put_user(110, &msg->msg_namelen); } return sys_sendmsg(fd, msg, flags); } asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args) { unsigned long r = -EFAULT, a[6]; switch (call) { case SYS_BIND: if (copy_from_user(a, args, 3 * sizeof(long)) == 0) r = sys_oabi_bind(a[0], (struct sockaddr __user *)a[1], a[2]); break; case SYS_CONNECT: if (copy_from_user(a, args, 3 * sizeof(long)) == 0) r = sys_oabi_connect(a[0], (struct sockaddr __user *)a[1], a[2]); break; case SYS_SENDTO: if (copy_from_user(a, args, 6 * sizeof(long)) == 0) r = sys_oabi_sendto(a[0], (void __user *)a[1], a[2], a[3], (struct sockaddr __user *)a[4], a[5]); break; case SYS_SENDMSG: if (copy_from_user(a, args, 3 * sizeof(long)) == 0) r = sys_oabi_sendmsg(a[0], (struct user_msghdr __user *)a[1], a[2]); break; default: r = sys_socketcall(call, args); } return r; }
linux-master
arch/arm/kernel/sys_oabi-compat.c
// SPDX-License-Identifier: GPL-2.0-only /* * XIP kernel .data segment decompressor * * Created by: Nicolas Pitre, August 2017 * Copyright: (C) 2017 Linaro Limited */ #include <linux/init.h> #include <linux/zutil.h> #include "head.h" /* for struct inflate_state */ #include "../../../lib/zlib_inflate/inftrees.h" #include "../../../lib/zlib_inflate/inflate.h" #include "../../../lib/zlib_inflate/infutil.h" /* * This code is called very early during the boot process to decompress * the .data segment stored compressed in ROM. Therefore none of the global * variables are valid yet, hence no kernel services such as memory * allocation is available. Everything must be allocated on the stack and * we must avoid any global data access. We use a temporary stack located * in the .bss area. The linker script makes sure the .bss is big enough * to hold our stack frame plus some room for called functions. * * We mimic the code in lib/decompress_inflate.c to use the smallest work * area possible. And because everything is statically allocated on the * stack then there is no need to clean up before returning. */ int __init __inflate_kernel_data(void) { struct z_stream_s stream, *strm = &stream; struct inflate_state state; char *in = __data_loc; int rc; /* Check and skip gzip header (assume no filename) */ if (in[0] != 0x1f || in[1] != 0x8b || in[2] != 0x08 || in[3] & ~3) return -1; in += 10; strm->workspace = &state; strm->next_in = in; strm->avail_in = _edata_loc - __data_loc; /* upper bound */ strm->next_out = _sdata; strm->avail_out = _edata_loc - __data_loc; zlib_inflateInit2(strm, -MAX_WBITS); WS(strm)->inflate_state.wsize = 0; WS(strm)->inflate_state.window = NULL; rc = zlib_inflate(strm, Z_FINISH); if (rc == Z_OK || rc == Z_STREAM_END) rc = strm->avail_out; /* should be 0 */ return rc; }
linux-master
arch/arm/kernel/head-inflate-data.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include <linux/sched.h> #include <linux/personality.h> #include <linux/binfmts.h> #include <linux/elf.h> #include <linux/elf-fdpic.h> #include <asm/system_info.h> int elf_check_arch(const struct elf32_hdr *x) { unsigned int eflags; /* Make sure it's an ARM executable */ if (x->e_machine != EM_ARM) return 0; /* Make sure the entry address is reasonable */ if (x->e_entry & 1) { if (!(elf_hwcap & HWCAP_THUMB)) return 0; } else if (x->e_entry & 3) return 0; eflags = x->e_flags; if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { unsigned int flt_fmt; /* APCS26 is only allowed if the CPU supports it */ if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT)) return 0; flt_fmt = eflags & (EF_ARM_VFP_FLOAT | EF_ARM_SOFT_FLOAT); /* VFP requires the supporting code */ if (flt_fmt == EF_ARM_VFP_FLOAT && !(elf_hwcap & HWCAP_VFP)) return 0; } return 1; } EXPORT_SYMBOL(elf_check_arch); void elf_set_personality(const struct elf32_hdr *x) { unsigned int eflags = x->e_flags; unsigned int personality = current->personality & ~PER_MASK; /* * We only support Linux ELF executables, so always set the * personality to LINUX. */ personality |= PER_LINUX; /* * APCS-26 is only valid for OABI executables */ if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN && (eflags & EF_ARM_APCS_26)) personality &= ~ADDR_LIMIT_32BIT; else personality |= ADDR_LIMIT_32BIT; set_personality(personality); /* * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0 * and CP1, we only enable access to the iWMMXt coprocessor if the * binary is EABI or softfloat (and thus, guaranteed not to use * FPA instructions.) */ if (elf_hwcap & HWCAP_IWMMXT && eflags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) { set_thread_flag(TIF_USING_IWMMXT); } else { clear_thread_flag(TIF_USING_IWMMXT); } } EXPORT_SYMBOL(elf_set_personality); /* * An executable for which elf_read_implies_exec() returns TRUE will * have the READ_IMPLIES_EXEC personality flag set automatically. * * The decision process for determining the results are: * *              CPU: | lacks NX*  | has NX | * ELF:              |            |           | * ---------------------|------------|------------| * missing PT_GNU_STACK | exec-all   | exec-all  | * PT_GNU_STACK == RWX  | exec-all   | exec-stack | * PT_GNU_STACK == RW   | exec-all  | exec-none | * * exec-all : all PROT_READ user mappings are executable, except when * backed by files on a noexec-filesystem. * exec-none : only PROT_EXEC user mappings are executable. * exec-stack: only the stack and PROT_EXEC user mappings are executable. * * *this column has no architectural effect: NX markings are ignored by * hardware, but may have behavioral effects when "wants X" collides with * "cannot be X" constraints in memory permission flags, as in * https://lkml.kernel.org/r/[email protected] * */ int arm_elf_read_implies_exec(int executable_stack) { if (executable_stack == EXSTACK_DEFAULT) return 1; if (cpu_architecture() < CPU_ARCH_ARMv6) return 1; return 0; } EXPORT_SYMBOL(arm_elf_read_implies_exec); #if defined(CONFIG_MMU) && defined(CONFIG_BINFMT_ELF_FDPIC) void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params, struct elf_fdpic_params *interp_params, unsigned long *start_stack, unsigned long *start_brk) { elf_set_personality(&exec_params->hdr); exec_params->load_addr = 0x8000; interp_params->load_addr = ELF_ET_DYN_BASE; *start_stack = TASK_SIZE - SZ_16M; if ((exec_params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) == ELF_FDPIC_FLAG_INDEPENDENT) { exec_params->flags &= ~ELF_FDPIC_FLAG_ARRANGEMENT; exec_params->flags |= ELF_FDPIC_FLAG_CONSTDISP; } } #endif
linux-master
arch/arm/kernel/elf.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/atags_compat.c * * Copyright (C) 2001 Russell King * * We keep the old params compatibility cruft in one place (here) * so we don't end up with lots of mess around other places. * * NOTE: * The old struct param_struct is deprecated, but it will be kept in * the kernel for 5 years from now (2001). This will allow boot loaders * to convert to the new struct tag way. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/page.h> #include <asm/mach/arch.h> #include "atags.h" /* * Usage: * - do not go blindly adding fields, add them at the end * - when adding fields, don't rely on the address until * a patch from me has been released * - unused fields should be zero (for future expansion) * - this structure is relatively short-lived - only * guaranteed to contain useful data in setup_arch() * * This is the old deprecated way to pass parameters to the kernel */ struct param_struct { union { struct { unsigned long page_size; /* 0 */ unsigned long nr_pages; /* 4 */ unsigned long ramdisk_size; /* 8 */ unsigned long flags; /* 12 */ #define FLAG_READONLY 1 #define FLAG_RDLOAD 4 #define FLAG_RDPROMPT 8 unsigned long rootdev; /* 16 */ unsigned long video_num_cols; /* 20 */ unsigned long video_num_rows; /* 24 */ unsigned long video_x; /* 28 */ unsigned long video_y; /* 32 */ unsigned long memc_control_reg; /* 36 */ unsigned char sounddefault; /* 40 */ unsigned char adfsdrives; /* 41 */ unsigned char bytes_per_char_h; /* 42 */ unsigned char bytes_per_char_v; /* 43 */ unsigned long pages_in_bank[4]; /* 44 */ unsigned long pages_in_vram; /* 60 */ unsigned long initrd_start; /* 64 */ unsigned long initrd_size; /* 68 */ unsigned long rd_start; /* 72 */ unsigned long system_rev; /* 76 */ unsigned long system_serial_low; /* 80 */ unsigned long system_serial_high; /* 84 */ unsigned long mem_fclk_21285; /* 88 */ } s; char unused[256]; } u1; union { char paths[8][128]; struct { unsigned long magic; char n[1024 - sizeof(unsigned long)]; } s; } u2; char commandline[COMMAND_LINE_SIZE]; }; static struct tag * __init memtag(struct tag *tag, unsigned long start, unsigned long size) { tag = tag_next(tag); tag->hdr.tag = ATAG_MEM; tag->hdr.size = tag_size(tag_mem32); tag->u.mem.size = size; tag->u.mem.start = start; return tag; } static void __init build_tag_list(struct param_struct *params, void *taglist) { struct tag *tag = taglist; if (params->u1.s.page_size != PAGE_SIZE) { pr_warn("Warning: bad configuration page, trying to continue\n"); return; } printk(KERN_DEBUG "Converting old-style param struct to taglist\n"); #ifdef CONFIG_ARCH_NETWINDER if (params->u1.s.nr_pages != 0x02000 && params->u1.s.nr_pages != 0x04000 && params->u1.s.nr_pages != 0x08000 && params->u1.s.nr_pages != 0x10000) { pr_warn("Warning: bad NeTTrom parameters detected, using defaults\n"); params->u1.s.nr_pages = 0x1000; /* 16MB */ params->u1.s.ramdisk_size = 0; params->u1.s.flags = FLAG_READONLY; params->u1.s.initrd_start = 0; params->u1.s.initrd_size = 0; params->u1.s.rd_start = 0; } #endif tag->hdr.tag = ATAG_CORE; tag->hdr.size = tag_size(tag_core); tag->u.core.flags = params->u1.s.flags & FLAG_READONLY; tag->u.core.pagesize = params->u1.s.page_size; tag->u.core.rootdev = params->u1.s.rootdev; tag = tag_next(tag); tag->hdr.tag = ATAG_RAMDISK; tag->hdr.size = tag_size(tag_ramdisk); tag->u.ramdisk.flags = (params->u1.s.flags & FLAG_RDLOAD ? 1 : 0) | (params->u1.s.flags & FLAG_RDPROMPT ? 2 : 0); tag->u.ramdisk.size = params->u1.s.ramdisk_size; tag->u.ramdisk.start = params->u1.s.rd_start; tag = tag_next(tag); tag->hdr.tag = ATAG_INITRD; tag->hdr.size = tag_size(tag_initrd); tag->u.initrd.start = params->u1.s.initrd_start; tag->u.initrd.size = params->u1.s.initrd_size; tag = tag_next(tag); tag->hdr.tag = ATAG_SERIAL; tag->hdr.size = tag_size(tag_serialnr); tag->u.serialnr.low = params->u1.s.system_serial_low; tag->u.serialnr.high = params->u1.s.system_serial_high; tag = tag_next(tag); tag->hdr.tag = ATAG_REVISION; tag->hdr.size = tag_size(tag_revision); tag->u.revision.rev = params->u1.s.system_rev; #ifdef CONFIG_ARCH_ACORN if (machine_is_riscpc()) { int i; for (i = 0; i < 4; i++) tag = memtag(tag, PHYS_OFFSET + (i << 26), params->u1.s.pages_in_bank[i] * PAGE_SIZE); } else #endif tag = memtag(tag, PHYS_OFFSET, params->u1.s.nr_pages * PAGE_SIZE); #ifdef CONFIG_FOOTBRIDGE if (params->u1.s.mem_fclk_21285) { tag = tag_next(tag); tag->hdr.tag = ATAG_MEMCLK; tag->hdr.size = tag_size(tag_memclk); tag->u.memclk.fmemclk = params->u1.s.mem_fclk_21285; } #endif #ifdef CONFIG_ARCH_EBSA285 if (machine_is_ebsa285()) { tag = tag_next(tag); tag->hdr.tag = ATAG_VIDEOTEXT; tag->hdr.size = tag_size(tag_videotext); tag->u.videotext.x = params->u1.s.video_x; tag->u.videotext.y = params->u1.s.video_y; tag->u.videotext.video_page = 0; tag->u.videotext.video_mode = 0; tag->u.videotext.video_cols = params->u1.s.video_num_cols; tag->u.videotext.video_ega_bx = 0; tag->u.videotext.video_lines = params->u1.s.video_num_rows; tag->u.videotext.video_isvga = 1; tag->u.videotext.video_points = 8; } #endif #ifdef CONFIG_ARCH_ACORN tag = tag_next(tag); tag->hdr.tag = ATAG_ACORN; tag->hdr.size = tag_size(tag_acorn); tag->u.acorn.memc_control_reg = params->u1.s.memc_control_reg; tag->u.acorn.vram_pages = params->u1.s.pages_in_vram; tag->u.acorn.sounddefault = params->u1.s.sounddefault; tag->u.acorn.adfsdrives = params->u1.s.adfsdrives; #endif tag = tag_next(tag); tag->hdr.tag = ATAG_CMDLINE; tag->hdr.size = (strlen(params->commandline) + 3 + sizeof(struct tag_header)) >> 2; strcpy(tag->u.cmdline.cmdline, params->commandline); tag = tag_next(tag); tag->hdr.tag = ATAG_NONE; tag->hdr.size = 0; memmove(params, taglist, ((int)tag) - ((int)taglist) + sizeof(struct tag_header)); } void __init convert_to_tag_list(struct tag *tags) { struct param_struct *params = (struct param_struct *)tags; build_tag_list(params, &params->u2); }
linux-master
arch/arm/kernel/atags_compat.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2013 Citrix Systems * * Author: Stefano Stabellini <[email protected]> */ #include <linux/export.h> #include <linux/jump_label.h> #include <linux/types.h> #include <linux/static_call.h> #include <asm/paravirt.h> struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; static u64 native_steal_clock(int cpu) { return 0; } DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
linux-master
arch/arm/kernel/paravirt.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/smp.c * * Copyright (C) 2002 ARM Limited, All Rights Reserved. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/sched/mm.h> #include <linux/sched/hotplug.h> #include <linux/sched/task_stack.h> #include <linux/interrupt.h> #include <linux/cache.h> #include <linux/profile.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/cpu.h> #include <linux/seq_file.h> #include <linux/irq.h> #include <linux/nmi.h> #include <linux/percpu.h> #include <linux/clockchips.h> #include <linux/completion.h> #include <linux/cpufreq.h> #include <linux/irq_work.h> #include <linux/kernel_stat.h> #include <linux/atomic.h> #include <asm/bugs.h> #include <asm/smp.h> #include <asm/cacheflush.h> #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/exception.h> #include <asm/idmap.h> #include <asm/topology.h> #include <asm/mmu_context.h> #include <asm/procinfo.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> #include <asm/smp_plat.h> #include <asm/virt.h> #include <asm/mach/arch.h> #include <asm/mpu.h> #include <trace/events/ipi.h> /* * as from 2.5, kernels no longer have an init_tasks structure * so we need some other way of telling a new secondary core * where to place its SVC stack */ struct secondary_data secondary_data; enum ipi_msg_type { IPI_WAKEUP, IPI_TIMER, IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_CPU_STOP, IPI_IRQ_WORK, IPI_COMPLETION, NR_IPI, /* * CPU_BACKTRACE is special and not included in NR_IPI * or tracable with trace_ipi_* */ IPI_CPU_BACKTRACE = NR_IPI, /* * SGI8-15 can be reserved by secure firmware, and thus may * not be usable by the kernel. Please keep the above limited * to at most 8 entries. */ MAX_IPI }; static int ipi_irq_base __read_mostly; static int nr_ipi __read_mostly = NR_IPI; static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly; static void ipi_setup(int cpu); static DECLARE_COMPLETION(cpu_running); static struct smp_operations smp_ops __ro_after_init; void __init smp_set_ops(const struct smp_operations *ops) { if (ops) smp_ops = *ops; }; static unsigned long get_arch_pgd(pgd_t *pgd) { #ifdef CONFIG_ARM_LPAE return __phys_to_pfn(virt_to_phys(pgd)); #else return virt_to_phys(pgd); #endif } #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) static int secondary_biglittle_prepare(unsigned int cpu) { if (!cpu_vtable[cpu]) cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL); return cpu_vtable[cpu] ? 0 : -ENOMEM; } static void secondary_biglittle_init(void) { init_proc_vtable(lookup_processor(read_cpuid_id())->proc); } #else static int secondary_biglittle_prepare(unsigned int cpu) { return 0; } static void secondary_biglittle_init(void) { } #endif int __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; if (!smp_ops.smp_boot_secondary) return -ENOSYS; ret = secondary_biglittle_prepare(cpu); if (ret) return ret; /* * We need to tell the secondary core where to find * its stack and the page tables. */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; #ifdef CONFIG_ARM_MPU secondary_data.mpu_rgn_info = &mpu_rgn_info; #endif #ifdef CONFIG_MMU secondary_data.pgdir = virt_to_phys(idmap_pgd); secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); #endif secondary_data.task = idle; sync_cache_w(&secondary_data); /* * Now bring the CPU into our world. */ ret = smp_ops.smp_boot_secondary(cpu, idle); if (ret == 0) { /* * CPU was successfully started, wait for it * to come online or time out. */ wait_for_completion_timeout(&cpu_running, msecs_to_jiffies(1000)); if (!cpu_online(cpu)) { pr_crit("CPU%u: failed to come online\n", cpu); ret = -EIO; } } else { pr_err("CPU%u: failed to boot: %d\n", cpu, ret); } memset(&secondary_data, 0, sizeof(secondary_data)); return ret; } /* platform specific SMP operations */ void __init smp_init_cpus(void) { if (smp_ops.smp_init_cpus) smp_ops.smp_init_cpus(); } int platform_can_secondary_boot(void) { return !!smp_ops.smp_boot_secondary; } int platform_can_cpu_hotplug(void) { #ifdef CONFIG_HOTPLUG_CPU if (smp_ops.cpu_kill) return 1; #endif return 0; } #ifdef CONFIG_HOTPLUG_CPU static int platform_cpu_kill(unsigned int cpu) { if (smp_ops.cpu_kill) return smp_ops.cpu_kill(cpu); return 1; } static int platform_cpu_disable(unsigned int cpu) { if (smp_ops.cpu_disable) return smp_ops.cpu_disable(cpu); return 0; } int platform_can_hotplug_cpu(unsigned int cpu) { /* cpu_die must be specified to support hotplug */ if (!smp_ops.cpu_die) return 0; if (smp_ops.cpu_can_disable) return smp_ops.cpu_can_disable(cpu); /* * By default, allow disabling all CPUs except the first one, * since this is special on a lot of platforms, e.g. because * of clock tick interrupts. */ return cpu != 0; } static void ipi_teardown(int cpu) { int i; if (WARN_ON_ONCE(!ipi_irq_base)) return; for (i = 0; i < nr_ipi; i++) disable_percpu_irq(ipi_irq_base + i); } /* * __cpu_disable runs on the processor to be shutdown. */ int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); int ret; ret = platform_cpu_disable(cpu); if (ret) return ret; #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY remove_cpu_topology(cpu); #endif /* * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); ipi_teardown(cpu); /* * OK - migrate IRQs away from this CPU */ irq_migrate_all_off_this_cpu(); /* * Flush user cache and TLB mappings, and then remove this CPU * from the vm mask set of all processes. * * Caches are flushed to the Level of Unification Inner Shareable * to write-back dirty lines to unified caches shared by all CPUs. */ flush_cache_louis(); local_flush_tlb_all(); return 0; } /* * called on the thread which is asking for a CPU to be shutdown after the * shutdown completed. */ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { pr_debug("CPU%u: shutdown\n", cpu); clear_tasks_mm_cpumask(cpu); /* * platform_cpu_kill() is generally expected to do the powering off * and/or cutting of clocks to the dying CPU. Optionally, this may * be done by the CPU which is dying in preference to supporting * this call, but that means there is _no_ synchronisation between * the requesting CPU and the dying CPU actually losing power. */ if (!platform_cpu_kill(cpu)) pr_err("CPU%u: unable to kill\n", cpu); } /* * Called from the idle thread for the CPU which has been shutdown. * * Note that we disable IRQs here, but do not re-enable them * before returning to the caller. This is also the behaviour * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ void __noreturn arch_cpu_idle_dead(void) { unsigned int cpu = smp_processor_id(); idle_task_exit(); local_irq_disable(); /* * Flush the data out of the L1 cache for this CPU. This must be * before the completion to ensure that data is safely written out * before platform_cpu_kill() gets called - which may disable * *this* CPU and power down its cache. */ flush_cache_louis(); /* * Tell cpuhp_bp_sync_dead() that this CPU is now safe to dispose * of. Once this returns, power and/or clocks can be removed at * any point from this CPU and its cache by platform_cpu_kill(). */ cpuhp_ap_report_dead(); /* * Ensure that the cache lines associated with that completion are * written out. This covers the case where _this_ CPU is doing the * powering down, to ensure that the completion is visible to the * CPU waiting for this one. */ flush_cache_louis(); /* * The actual CPU shutdown procedure is at least platform (if not * CPU) specific. This may remove power, or it may simply spin. * * Platforms are generally expected *NOT* to return from this call, * although there are some which do because they have no way to * power down the CPU. These platforms are the _only_ reason we * have a return path which uses the fragment of assembly below. * * The return path should not be used for platforms which can * power off the CPU. */ if (smp_ops.cpu_die) smp_ops.cpu_die(cpu); pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n", cpu); /* * Do not return to the idle loop - jump back to the secondary * cpu initialisation. There's some initialisation which needs * to be repeated to undo the effects of taking the CPU offline. */ __asm__("mov sp, %0\n" " mov fp, #0\n" " mov r0, %1\n" " b secondary_start_kernel" : : "r" (task_stack_page(current) + THREAD_SIZE - 8), "r" (current) : "r0"); unreachable(); } #endif /* CONFIG_HOTPLUG_CPU */ /* * Called by both boot and secondaries to move global data into * per-processor storage. */ static void smp_store_cpu_info(unsigned int cpuid) { struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); cpu_info->loops_per_jiffy = loops_per_jiffy; cpu_info->cpuid = read_cpuid_id(); store_cpu_topology(cpuid); check_cpu_icache_size(cpuid); } static void set_current(struct task_struct *cur) { /* Set TPIDRURO */ asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory"); } /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */ asmlinkage void secondary_start_kernel(struct task_struct *task) { struct mm_struct *mm = &init_mm; unsigned int cpu; set_current(task); secondary_biglittle_init(); /* * The identity mapping is uncached (strongly ordered), so * switch away from it before attempting any exclusive accesses. */ cpu_switch_mm(mm->pgd, mm); local_flush_bp_all(); enter_lazy_tlb(mm, current); local_flush_tlb_all(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ cpu = smp_processor_id(); mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); cpu_init(); #ifndef CONFIG_MMU setup_vectors_base(); #endif pr_debug("CPU%u: Booted secondary processor\n", cpu); trace_hardirqs_off(); /* * Give the platform a chance to do its own initialisation. */ if (smp_ops.smp_secondary_init) smp_ops.smp_secondary_init(cpu); notify_cpu_starting(cpu); ipi_setup(cpu); calibrate_delay(); smp_store_cpu_info(cpu); /* * OK, now it's safe to let the boot CPU continue. Wait for * the CPU migration code to notice that the CPU is online * before we continue - which happens after __cpu_up returns. */ set_cpu_online(cpu, true); check_other_bugs(); complete(&cpu_running); local_irq_enable(); local_fiq_enable(); local_abt_enable(); /* * OK, it's off to the idle thread for us */ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } void __init smp_cpus_done(unsigned int max_cpus) { int cpu; unsigned long bogosum = 0; for_each_online_cpu(cpu) bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; printk(KERN_INFO "SMP: Total of %d processors activated " "(%lu.%02lu BogoMIPS).\n", num_online_cpus(), bogosum / (500000/HZ), (bogosum / (5000/HZ)) % 100); hyp_mode_check(); } void __init smp_prepare_boot_cpu(void) { set_my_cpu_offset(per_cpu_offset(smp_processor_id())); } void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = num_possible_cpus(); init_cpu_topology(); smp_store_cpu_info(smp_processor_id()); /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; if (ncores > 1 && max_cpus) { /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. A platform should * re-initialize the map in the platforms smp_prepare_cpus() * if present != possible (e.g. physical hotplug). */ init_cpu_present(cpu_possible_mask); /* * Initialise the SCU if there are more than one CPU * and let them know where to start. */ if (smp_ops.smp_prepare_cpus) smp_ops.smp_prepare_cpus(max_cpus); } } static const char *ipi_types[NR_IPI] __tracepoint_string = { [IPI_WAKEUP] = "CPU wakeup interrupts", [IPI_TIMER] = "Timer broadcast interrupts", [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNC] = "Function call interrupts", [IPI_CPU_STOP] = "CPU stop interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts", [IPI_COMPLETION] = "completion interrupts", }; static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); void show_ipi_list(struct seq_file *p, int prec) { unsigned int cpu, i; for (i = 0; i < NR_IPI; i++) { if (!ipi_desc[i]) continue; seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); for_each_online_cpu(cpu) seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu)); seq_printf(p, " %s\n", ipi_types[i]); } } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_CALL_FUNC); } void arch_send_wakeup_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_WAKEUP); } void arch_send_call_function_single_ipi(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); } #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) { if (arch_irq_work_has_interrupt()) smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); } #endif #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST void tick_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); } #endif static DEFINE_RAW_SPINLOCK(stop_lock); /* * ipi_cpu_stop - handle IPI from smp_send_stop() */ static void ipi_cpu_stop(unsigned int cpu) { local_fiq_disable(); if (system_state <= SYSTEM_RUNNING) { raw_spin_lock(&stop_lock); pr_crit("CPU%u: stopping\n", cpu); dump_stack(); raw_spin_unlock(&stop_lock); } set_cpu_online(cpu, false); while (1) { cpu_relax(); wfe(); } } static DEFINE_PER_CPU(struct completion *, cpu_completion); int register_ipi_completion(struct completion *completion, int cpu) { per_cpu(cpu_completion, cpu) = completion; return IPI_COMPLETION; } static void ipi_complete(unsigned int cpu) { complete(per_cpu(cpu_completion, cpu)); } /* * Main handler for inter-processor interrupts */ static void do_handle_IPI(int ipinr) { unsigned int cpu = smp_processor_id(); if ((unsigned)ipinr < NR_IPI) trace_ipi_entry(ipi_types[ipinr]); switch (ipinr) { case IPI_WAKEUP: break; #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: tick_receive_broadcast(); break; #endif case IPI_RESCHEDULE: scheduler_ipi(); break; case IPI_CALL_FUNC: generic_smp_call_function_interrupt(); break; case IPI_CPU_STOP: ipi_cpu_stop(cpu); break; #ifdef CONFIG_IRQ_WORK case IPI_IRQ_WORK: irq_work_run(); break; #endif case IPI_COMPLETION: ipi_complete(cpu); break; case IPI_CPU_BACKTRACE: printk_deferred_enter(); nmi_cpu_backtrace(get_irq_regs()); printk_deferred_exit(); break; default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; } if ((unsigned)ipinr < NR_IPI) trace_ipi_exit(ipi_types[ipinr]); } /* Legacy version, should go away once all irqchips have been converted */ void handle_IPI(int ipinr, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); do_handle_IPI(ipinr); irq_exit(); set_irq_regs(old_regs); } static irqreturn_t ipi_handler(int irq, void *data) { do_handle_IPI(irq - ipi_irq_base); return IRQ_HANDLED; } static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) { trace_ipi_raise(target, ipi_types[ipinr]); __ipi_send_mask(ipi_desc[ipinr], target); } static void ipi_setup(int cpu) { int i; if (WARN_ON_ONCE(!ipi_irq_base)) return; for (i = 0; i < nr_ipi; i++) enable_percpu_irq(ipi_irq_base + i, 0); } void __init set_smp_ipi_range(int ipi_base, int n) { int i; WARN_ON(n < MAX_IPI); nr_ipi = min(n, MAX_IPI); for (i = 0; i < nr_ipi; i++) { int err; err = request_percpu_irq(ipi_base + i, ipi_handler, "IPI", &irq_stat); WARN_ON(err); ipi_desc[i] = irq_to_desc(ipi_base + i); irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); } ipi_irq_base = ipi_base; /* Setup the boot CPU immediately */ ipi_setup(smp_processor_id()); } void arch_smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } void smp_send_stop(void) { unsigned long timeout; struct cpumask mask; cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); if (!cpumask_empty(&mask)) smp_cross_call(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; while (num_online_cpus() > 1 && timeout--) udelay(1); if (num_online_cpus() > 1) pr_warn("SMP: failed to stop secondary CPUs\n"); } /* In case panic() and panic() called at the same time on CPU1 and CPU2, * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop() * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online, * kdump fails. So split out the panic_smp_self_stop() and add * set_cpu_online(smp_processor_id(), false). */ void __noreturn panic_smp_self_stop(void) { pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n", smp_processor_id()); set_cpu_online(smp_processor_id(), false); while (1) cpu_relax(); } #ifdef CONFIG_CPU_FREQ static DEFINE_PER_CPU(unsigned long, l_p_j_ref); static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq); static unsigned long global_l_p_j_ref; static unsigned long global_l_p_j_ref_freq; static int cpufreq_callback(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct cpumask *cpus = freq->policy->cpus; int cpu, first = cpumask_first(cpus); unsigned int lpj; if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; if (!per_cpu(l_p_j_ref, first)) { for_each_cpu(cpu, cpus) { per_cpu(l_p_j_ref, cpu) = per_cpu(cpu_data, cpu).loops_per_jiffy; per_cpu(l_p_j_ref_freq, cpu) = freq->old; } if (!global_l_p_j_ref) { global_l_p_j_ref = loops_per_jiffy; global_l_p_j_ref_freq = freq->old; } } if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, global_l_p_j_ref_freq, freq->new); lpj = cpufreq_scale(per_cpu(l_p_j_ref, first), per_cpu(l_p_j_ref_freq, first), freq->new); for_each_cpu(cpu, cpus) per_cpu(cpu_data, cpu).loops_per_jiffy = lpj; } return NOTIFY_OK; } static struct notifier_block cpufreq_notifier = { .notifier_call = cpufreq_callback, }; static int __init register_cpufreq_notifier(void) { return cpufreq_register_notifier(&cpufreq_notifier, CPUFREQ_TRANSITION_NOTIFIER); } core_initcall(register_cpufreq_notifier); #endif static void raise_nmi(cpumask_t *mask) { __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask); } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) { nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_nmi); }
linux-master
arch/arm/kernel/smp.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/opcodes.c * * A32 condition code lookup feature moved from nwfpe/fpopcode.c */ #include <linux/module.h> #include <asm/opcodes.h> #define ARM_OPCODE_CONDITION_UNCOND 0xf /* * condition code lookup table * index into the table is test code: EQ, NE, ... LT, GT, AL, NV * * bit position in short is condition code: NZCV */ static const unsigned short cc_map[16] = { 0xF0F0, /* EQ == Z set */ 0x0F0F, /* NE */ 0xCCCC, /* CS == C set */ 0x3333, /* CC */ 0xFF00, /* MI == N set */ 0x00FF, /* PL */ 0xAAAA, /* VS == V set */ 0x5555, /* VC */ 0x0C0C, /* HI == C set && Z clear */ 0xF3F3, /* LS == C clear || Z set */ 0xAA55, /* GE == (N==V) */ 0x55AA, /* LT == (N!=V) */ 0x0A05, /* GT == (!Z && (N==V)) */ 0xF5FA, /* LE == (Z || (N!=V)) */ 0xFFFF, /* AL always */ 0 /* NV */ }; /* * Returns: * ARM_OPCODE_CONDTEST_FAIL - if condition fails * ARM_OPCODE_CONDTEST_PASS - if condition passes (including AL) * ARM_OPCODE_CONDTEST_UNCOND - if NV condition, or separate unconditional * opcode space from v5 onwards * * Code that tests whether a conditional instruction would pass its condition * check should check that return value == ARM_OPCODE_CONDTEST_PASS. * * Code that tests if a condition means that the instruction would be executed * (regardless of conditional or unconditional) should instead check that the * return value != ARM_OPCODE_CONDTEST_FAIL. */ asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr) { u32 cc_bits = opcode >> 28; u32 psr_cond = psr >> 28; unsigned int ret; if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) { if ((cc_map[cc_bits] >> (psr_cond)) & 1) ret = ARM_OPCODE_CONDTEST_PASS; else ret = ARM_OPCODE_CONDTEST_FAIL; } else { ret = ARM_OPCODE_CONDTEST_UNCOND; } return ret; } EXPORT_SYMBOL_GPL(arm_check_condition);
linux-master
arch/arm/kernel/opcodes.c
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2009, 2010 ARM Limited * * Author: Will Deacon <[email protected]> */ /* * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, * using the CPU's debug registers. */ #define pr_fmt(fmt) "hw-breakpoint: " fmt #include <linux/errno.h> #include <linux/hardirq.h> #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <linux/smp.h> #include <linux/cpu_pm.h> #include <linux/coresight.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <asm/current.h> #include <asm/hw_breakpoint.h> #include <asm/traps.h> /* Breakpoint currently in use for each BRP. */ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); /* Watchpoint currently in use for each WRP. */ static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); /* Number of BRP/WRP registers on this CPU. */ static int core_num_brps __ro_after_init; static int core_num_wrps __ro_after_init; /* Debug architecture version. */ static u8 debug_arch __ro_after_init; /* Does debug architecture support OS Save and Restore? */ static bool has_ossr __ro_after_init; /* Maximum supported watchpoint length. */ static u8 max_watchpoint_len __ro_after_init; #define READ_WB_REG_CASE(OP2, M, VAL) \ case ((OP2 << 4) + M): \ ARM_DBG_READ(c0, c ## M, OP2, VAL); \ break #define WRITE_WB_REG_CASE(OP2, M, VAL) \ case ((OP2 << 4) + M): \ ARM_DBG_WRITE(c0, c ## M, OP2, VAL); \ break #define GEN_READ_WB_REG_CASES(OP2, VAL) \ READ_WB_REG_CASE(OP2, 0, VAL); \ READ_WB_REG_CASE(OP2, 1, VAL); \ READ_WB_REG_CASE(OP2, 2, VAL); \ READ_WB_REG_CASE(OP2, 3, VAL); \ READ_WB_REG_CASE(OP2, 4, VAL); \ READ_WB_REG_CASE(OP2, 5, VAL); \ READ_WB_REG_CASE(OP2, 6, VAL); \ READ_WB_REG_CASE(OP2, 7, VAL); \ READ_WB_REG_CASE(OP2, 8, VAL); \ READ_WB_REG_CASE(OP2, 9, VAL); \ READ_WB_REG_CASE(OP2, 10, VAL); \ READ_WB_REG_CASE(OP2, 11, VAL); \ READ_WB_REG_CASE(OP2, 12, VAL); \ READ_WB_REG_CASE(OP2, 13, VAL); \ READ_WB_REG_CASE(OP2, 14, VAL); \ READ_WB_REG_CASE(OP2, 15, VAL) #define GEN_WRITE_WB_REG_CASES(OP2, VAL) \ WRITE_WB_REG_CASE(OP2, 0, VAL); \ WRITE_WB_REG_CASE(OP2, 1, VAL); \ WRITE_WB_REG_CASE(OP2, 2, VAL); \ WRITE_WB_REG_CASE(OP2, 3, VAL); \ WRITE_WB_REG_CASE(OP2, 4, VAL); \ WRITE_WB_REG_CASE(OP2, 5, VAL); \ WRITE_WB_REG_CASE(OP2, 6, VAL); \ WRITE_WB_REG_CASE(OP2, 7, VAL); \ WRITE_WB_REG_CASE(OP2, 8, VAL); \ WRITE_WB_REG_CASE(OP2, 9, VAL); \ WRITE_WB_REG_CASE(OP2, 10, VAL); \ WRITE_WB_REG_CASE(OP2, 11, VAL); \ WRITE_WB_REG_CASE(OP2, 12, VAL); \ WRITE_WB_REG_CASE(OP2, 13, VAL); \ WRITE_WB_REG_CASE(OP2, 14, VAL); \ WRITE_WB_REG_CASE(OP2, 15, VAL) static u32 read_wb_reg(int n) { u32 val = 0; switch (n) { GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val); GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val); GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val); GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val); default: pr_warn("attempt to read from unknown breakpoint register %d\n", n); } return val; } static void write_wb_reg(int n, u32 val) { switch (n) { GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val); GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val); GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val); GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val); default: pr_warn("attempt to write to unknown breakpoint register %d\n", n); } isb(); } /* Determine debug architecture. */ static u8 get_debug_arch(void) { u32 didr; /* Do we implement the extended CPUID interface? */ if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { pr_warn_once("CPUID feature registers not supported. " "Assuming v6 debug is present.\n"); return ARM_DEBUG_ARCH_V6; } ARM_DBG_READ(c0, c0, 0, didr); return (didr >> 16) & 0xf; } u8 arch_get_debug_arch(void) { return debug_arch; } static int debug_arch_supported(void) { u8 arch = get_debug_arch(); /* We don't support the memory-mapped interface. */ return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) || arch >= ARM_DEBUG_ARCH_V7_1; } /* Can we determine the watchpoint access type from the fsr? */ static int debug_exception_updates_fsr(void) { return get_debug_arch() >= ARM_DEBUG_ARCH_V8; } /* Determine number of WRP registers available. */ static int get_num_wrp_resources(void) { u32 didr; ARM_DBG_READ(c0, c0, 0, didr); return ((didr >> 28) & 0xf) + 1; } /* Determine number of BRP registers available. */ static int get_num_brp_resources(void) { u32 didr; ARM_DBG_READ(c0, c0, 0, didr); return ((didr >> 24) & 0xf) + 1; } /* Does this core support mismatch breakpoints? */ static int core_has_mismatch_brps(void) { return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 && get_num_brp_resources() > 1); } /* Determine number of usable WRPs available. */ static int get_num_wrps(void) { /* * On debug architectures prior to 7.1, when a watchpoint fires, the * only way to work out which watchpoint it was is by disassembling * the faulting instruction and working out the address of the memory * access. * * Furthermore, we can only do this if the watchpoint was precise * since imprecise watchpoints prevent us from calculating register * based addresses. * * Providing we have more than 1 breakpoint register, we only report * a single watchpoint register for the time being. This way, we always * know which watchpoint fired. In the future we can either add a * disassembler and address generation emulator, or we can insert a * check to see if the DFAR is set on watchpoint exception entry * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows * that it is set on some implementations]. */ if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1) return 1; return get_num_wrp_resources(); } /* Determine number of usable BRPs available. */ static int get_num_brps(void) { int brps = get_num_brp_resources(); return core_has_mismatch_brps() ? brps - 1 : brps; } /* * In order to access the breakpoint/watchpoint control registers, * we must be running in debug monitor mode. Unfortunately, we can * be put into halting debug mode at any time by an external debugger * but there is nothing we can do to prevent that. */ static int monitor_mode_enabled(void) { u32 dscr; ARM_DBG_READ(c0, c1, 0, dscr); return !!(dscr & ARM_DSCR_MDBGEN); } static int enable_monitor_mode(void) { u32 dscr; ARM_DBG_READ(c0, c1, 0, dscr); /* If monitor mode is already enabled, just return. */ if (dscr & ARM_DSCR_MDBGEN) goto out; /* Write to the corresponding DSCR. */ switch (get_debug_arch()) { case ARM_DEBUG_ARCH_V6: case ARM_DEBUG_ARCH_V6_1: ARM_DBG_WRITE(c0, c1, 0, (dscr | ARM_DSCR_MDBGEN)); break; case ARM_DEBUG_ARCH_V7_ECP14: case ARM_DEBUG_ARCH_V7_1: case ARM_DEBUG_ARCH_V8: case ARM_DEBUG_ARCH_V8_1: case ARM_DEBUG_ARCH_V8_2: case ARM_DEBUG_ARCH_V8_4: ARM_DBG_WRITE(c0, c2, 2, (dscr | ARM_DSCR_MDBGEN)); isb(); break; default: return -ENODEV; } /* Check that the write made it through. */ ARM_DBG_READ(c0, c1, 0, dscr); if (!(dscr & ARM_DSCR_MDBGEN)) { pr_warn_once("Failed to enable monitor mode on CPU %d.\n", smp_processor_id()); return -EPERM; } out: return 0; } int hw_breakpoint_slots(int type) { if (!debug_arch_supported()) return 0; /* * We can be called early, so don't rely on * our static variables being initialised. */ switch (type) { case TYPE_INST: return get_num_brps(); case TYPE_DATA: return get_num_wrps(); default: pr_warn("unknown slot type: %d\n", type); return 0; } } /* * Check if 8-bit byte-address select is available. * This clobbers WRP 0. */ static u8 get_max_wp_len(void) { u32 ctrl_reg; struct arch_hw_breakpoint_ctrl ctrl; u8 size = 4; if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14) goto out; memset(&ctrl, 0, sizeof(ctrl)); ctrl.len = ARM_BREAKPOINT_LEN_8; ctrl_reg = encode_ctrl_reg(ctrl); write_wb_reg(ARM_BASE_WVR, 0); write_wb_reg(ARM_BASE_WCR, ctrl_reg); if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg) size = 8; out: return size; } u8 arch_get_max_wp_len(void) { return max_watchpoint_len; } /* * Install a perf counter breakpoint. */ int arch_install_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); struct perf_event **slot, **slots; int i, max_slots, ctrl_base, val_base; u32 addr, ctrl; addr = info->address; ctrl = encode_ctrl_reg(info->ctrl) | 0x1; if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { /* Breakpoint */ ctrl_base = ARM_BASE_BCR; val_base = ARM_BASE_BVR; slots = this_cpu_ptr(bp_on_reg); max_slots = core_num_brps; } else { /* Watchpoint */ ctrl_base = ARM_BASE_WCR; val_base = ARM_BASE_WVR; slots = this_cpu_ptr(wp_on_reg); max_slots = core_num_wrps; } for (i = 0; i < max_slots; ++i) { slot = &slots[i]; if (!*slot) { *slot = bp; break; } } if (i == max_slots) { pr_warn("Can't find any breakpoint slot\n"); return -EBUSY; } /* Override the breakpoint data with the step data. */ if (info->step_ctrl.enabled) { addr = info->trigger & ~0x3; ctrl = encode_ctrl_reg(info->step_ctrl); if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) { i = 0; ctrl_base = ARM_BASE_BCR + core_num_brps; val_base = ARM_BASE_BVR + core_num_brps; } } /* Setup the address register. */ write_wb_reg(val_base + i, addr); /* Setup the control register. */ write_wb_reg(ctrl_base + i, ctrl); return 0; } void arch_uninstall_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); struct perf_event **slot, **slots; int i, max_slots, base; if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { /* Breakpoint */ base = ARM_BASE_BCR; slots = this_cpu_ptr(bp_on_reg); max_slots = core_num_brps; } else { /* Watchpoint */ base = ARM_BASE_WCR; slots = this_cpu_ptr(wp_on_reg); max_slots = core_num_wrps; } /* Remove the breakpoint. */ for (i = 0; i < max_slots; ++i) { slot = &slots[i]; if (*slot == bp) { *slot = NULL; break; } } if (i == max_slots) { pr_warn("Can't find any breakpoint slot\n"); return; } /* Ensure that we disable the mismatch breakpoint. */ if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE && info->step_ctrl.enabled) { i = 0; base = ARM_BASE_BCR + core_num_brps; } /* Reset the control register. */ write_wb_reg(base + i, 0); } static int get_hbp_len(u8 hbp_len) { unsigned int len_in_bytes = 0; switch (hbp_len) { case ARM_BREAKPOINT_LEN_1: len_in_bytes = 1; break; case ARM_BREAKPOINT_LEN_2: len_in_bytes = 2; break; case ARM_BREAKPOINT_LEN_4: len_in_bytes = 4; break; case ARM_BREAKPOINT_LEN_8: len_in_bytes = 8; break; } return len_in_bytes; } /* * Check whether bp virtual address is in kernel space. */ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { unsigned int len; unsigned long va; va = hw->address; len = get_hbp_len(hw->ctrl.len); return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } /* * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. * Hopefully this will disappear when ptrace can bypass the conversion * to generic breakpoint descriptions. */ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, int *gen_len, int *gen_type) { /* Type */ switch (ctrl.type) { case ARM_BREAKPOINT_EXECUTE: *gen_type = HW_BREAKPOINT_X; break; case ARM_BREAKPOINT_LOAD: *gen_type = HW_BREAKPOINT_R; break; case ARM_BREAKPOINT_STORE: *gen_type = HW_BREAKPOINT_W; break; case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE: *gen_type = HW_BREAKPOINT_RW; break; default: return -EINVAL; } /* Len */ switch (ctrl.len) { case ARM_BREAKPOINT_LEN_1: *gen_len = HW_BREAKPOINT_LEN_1; break; case ARM_BREAKPOINT_LEN_2: *gen_len = HW_BREAKPOINT_LEN_2; break; case ARM_BREAKPOINT_LEN_4: *gen_len = HW_BREAKPOINT_LEN_4; break; case ARM_BREAKPOINT_LEN_8: *gen_len = HW_BREAKPOINT_LEN_8; break; default: return -EINVAL; } return 0; } /* * Construct an arch_hw_breakpoint from a perf_event. */ static int arch_build_bp_info(struct perf_event *bp, const struct perf_event_attr *attr, struct arch_hw_breakpoint *hw) { /* Type */ switch (attr->bp_type) { case HW_BREAKPOINT_X: hw->ctrl.type = ARM_BREAKPOINT_EXECUTE; break; case HW_BREAKPOINT_R: hw->ctrl.type = ARM_BREAKPOINT_LOAD; break; case HW_BREAKPOINT_W: hw->ctrl.type = ARM_BREAKPOINT_STORE; break; case HW_BREAKPOINT_RW: hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; break; default: return -EINVAL; } /* Len */ switch (attr->bp_len) { case HW_BREAKPOINT_LEN_1: hw->ctrl.len = ARM_BREAKPOINT_LEN_1; break; case HW_BREAKPOINT_LEN_2: hw->ctrl.len = ARM_BREAKPOINT_LEN_2; break; case HW_BREAKPOINT_LEN_4: hw->ctrl.len = ARM_BREAKPOINT_LEN_4; break; case HW_BREAKPOINT_LEN_8: hw->ctrl.len = ARM_BREAKPOINT_LEN_8; if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE) && max_watchpoint_len >= 8) break; fallthrough; default: return -EINVAL; } /* * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes. * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported * by the hardware and must be aligned to the appropriate number of * bytes. */ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE && hw->ctrl.len != ARM_BREAKPOINT_LEN_2 && hw->ctrl.len != ARM_BREAKPOINT_LEN_4) return -EINVAL; /* Address */ hw->address = attr->bp_addr; /* Privilege */ hw->ctrl.privilege = ARM_BREAKPOINT_USER; if (arch_check_bp_in_kernelspace(hw)) hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV; /* Enabled? */ hw->ctrl.enabled = !attr->disabled; /* Mismatch */ hw->ctrl.mismatch = 0; return 0; } /* * Validate the arch-specific HW Breakpoint register settings. */ int hw_breakpoint_arch_parse(struct perf_event *bp, const struct perf_event_attr *attr, struct arch_hw_breakpoint *hw) { int ret = 0; u32 offset, alignment_mask = 0x3; /* Ensure that we are in monitor debug mode. */ if (!monitor_mode_enabled()) return -ENODEV; /* Build the arch_hw_breakpoint. */ ret = arch_build_bp_info(bp, attr, hw); if (ret) goto out; /* Check address alignment. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8) alignment_mask = 0x7; offset = hw->address & alignment_mask; switch (offset) { case 0: /* Aligned */ break; case 1: case 2: /* Allow halfword watchpoints and breakpoints. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; fallthrough; case 3: /* Allow single byte watchpoint. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; fallthrough; default: ret = -EINVAL; goto out; } hw->address &= ~alignment_mask; hw->ctrl.len <<= offset; if (uses_default_overflow_handler(bp)) { /* * Mismatch breakpoints are required for single-stepping * breakpoints. */ if (!core_has_mismatch_brps()) return -EINVAL; /* We don't allow mismatch breakpoints in kernel space. */ if (arch_check_bp_in_kernelspace(hw)) return -EPERM; /* * Per-cpu breakpoints are not supported by our stepping * mechanism. */ if (!bp->hw.target) return -EINVAL; /* * We only support specific access types if the fsr * reports them. */ if (!debug_exception_updates_fsr() && (hw->ctrl.type == ARM_BREAKPOINT_LOAD || hw->ctrl.type == ARM_BREAKPOINT_STORE)) return -EINVAL; } out: return ret; } /* * Enable/disable single-stepping over the breakpoint bp at address addr. */ static void enable_single_step(struct perf_event *bp, u32 addr) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); arch_uninstall_hw_breakpoint(bp); info->step_ctrl.mismatch = 1; info->step_ctrl.len = ARM_BREAKPOINT_LEN_4; info->step_ctrl.type = ARM_BREAKPOINT_EXECUTE; info->step_ctrl.privilege = info->ctrl.privilege; info->step_ctrl.enabled = 1; info->trigger = addr; arch_install_hw_breakpoint(bp); } static void disable_single_step(struct perf_event *bp) { arch_uninstall_hw_breakpoint(bp); counter_arch_bp(bp)->step_ctrl.enabled = 0; arch_install_hw_breakpoint(bp); } /* * Arm32 hardware does not always report a watchpoint hit address that matches * one of the watchpoints set. It can also report an address "near" the * watchpoint if a single instruction access both watched and unwatched * addresses. There is no straight-forward way, short of disassembling the * offending instruction, to map that address back to the watchpoint. This * function computes the distance of the memory access from the watchpoint as a * heuristic for the likelyhood that a given access triggered the watchpoint. * * See this same function in the arm64 platform code, which has the same * problem. * * The function returns the distance of the address from the bytes watched by * the watchpoint. In case of an exact match, it returns 0. */ static u32 get_distance_from_watchpoint(unsigned long addr, u32 val, struct arch_hw_breakpoint_ctrl *ctrl) { u32 wp_low, wp_high; u32 lens, lene; lens = __ffs(ctrl->len); lene = __fls(ctrl->len); wp_low = val + lens; wp_high = val + lene; if (addr < wp_low) return wp_low - addr; else if (addr > wp_high) return addr - wp_high; else return 0; } static int watchpoint_fault_on_uaccess(struct pt_regs *regs, struct arch_hw_breakpoint *info) { return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER; } static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { int i, access, closest_match = 0; u32 min_dist = -1, dist; u32 val, ctrl_reg; struct perf_event *wp, **slots; struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(wp_on_reg); /* * Find all watchpoints that match the reported address. If no exact * match is found. Attribute the hit to the closest watchpoint. */ rcu_read_lock(); for (i = 0; i < core_num_wrps; ++i) { wp = slots[i]; if (wp == NULL) continue; /* * The DFAR is an unknown value on debug architectures prior * to 7.1. Since we only allow a single watchpoint on these * older CPUs, we can set the trigger to the lowest possible * faulting address. */ if (debug_arch < ARM_DEBUG_ARCH_V7_1) { BUG_ON(i > 0); info = counter_arch_bp(wp); info->trigger = wp->attr.bp_addr; } else { /* Check that the access type matches. */ if (debug_exception_updates_fsr()) { access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : HW_BREAKPOINT_R; if (!(access & hw_breakpoint_type(wp))) continue; } val = read_wb_reg(ARM_BASE_WVR + i); ctrl_reg = read_wb_reg(ARM_BASE_WCR + i); decode_ctrl_reg(ctrl_reg, &ctrl); dist = get_distance_from_watchpoint(addr, val, &ctrl); if (dist < min_dist) { min_dist = dist; closest_match = i; } /* Is this an exact match? */ if (dist != 0) continue; /* We have a winner. */ info = counter_arch_bp(wp); info->trigger = addr; } pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); /* * If we triggered a user watchpoint from a uaccess routine, * then handle the stepping ourselves since userspace really * can't help us with this. */ if (watchpoint_fault_on_uaccess(regs, info)) goto step; perf_bp_event(wp, regs); /* * Defer stepping to the overflow handler if one is installed. * Otherwise, insert a temporary mismatch breakpoint so that * we can single-step over the watchpoint trigger. */ if (!uses_default_overflow_handler(wp)) continue; step: enable_single_step(wp, instruction_pointer(regs)); } if (min_dist > 0 && min_dist != -1) { /* No exact match found. */ wp = slots[closest_match]; info = counter_arch_bp(wp); info->trigger = addr; pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); perf_bp_event(wp, regs); if (uses_default_overflow_handler(wp)) enable_single_step(wp, instruction_pointer(regs)); } rcu_read_unlock(); } static void watchpoint_single_step_handler(unsigned long pc) { int i; struct perf_event *wp, **slots; struct arch_hw_breakpoint *info; slots = this_cpu_ptr(wp_on_reg); for (i = 0; i < core_num_wrps; ++i) { rcu_read_lock(); wp = slots[i]; if (wp == NULL) goto unlock; info = counter_arch_bp(wp); if (!info->step_ctrl.enabled) goto unlock; /* * Restore the original watchpoint if we've completed the * single-step. */ if (info->trigger != pc) disable_single_step(wp); unlock: rcu_read_unlock(); } } static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) { int i; u32 ctrl_reg, val, addr; struct perf_event *bp, **slots; struct arch_hw_breakpoint *info; struct arch_hw_breakpoint_ctrl ctrl; slots = this_cpu_ptr(bp_on_reg); /* The exception entry code places the amended lr in the PC. */ addr = regs->ARM_pc; /* Check the currently installed breakpoints first. */ for (i = 0; i < core_num_brps; ++i) { rcu_read_lock(); bp = slots[i]; if (bp == NULL) goto unlock; info = counter_arch_bp(bp); /* Check if the breakpoint value matches. */ val = read_wb_reg(ARM_BASE_BVR + i); if (val != (addr & ~0x3)) goto mismatch; /* Possible match, check the byte address select to confirm. */ ctrl_reg = read_wb_reg(ARM_BASE_BCR + i); decode_ctrl_reg(ctrl_reg, &ctrl); if ((1 << (addr & 0x3)) & ctrl.len) { info->trigger = addr; pr_debug("breakpoint fired: address = 0x%x\n", addr); perf_bp_event(bp, regs); if (uses_default_overflow_handler(bp)) enable_single_step(bp, addr); goto unlock; } mismatch: /* If we're stepping a breakpoint, it can now be restored. */ if (info->step_ctrl.enabled) disable_single_step(bp); unlock: rcu_read_unlock(); } /* Handle any pending watchpoint single-step breakpoints. */ watchpoint_single_step_handler(addr); } /* * Called from either the Data Abort Handler [watchpoint] or the * Prefetch Abort Handler [breakpoint] with interrupts disabled. */ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { int ret = 0; u32 dscr; preempt_disable(); if (interrupts_enabled(regs)) local_irq_enable(); /* We only handle watchpoints and hardware breakpoints. */ ARM_DBG_READ(c0, c1, 0, dscr); /* Perform perf callbacks. */ switch (ARM_DSCR_MOE(dscr)) { case ARM_ENTRY_BREAKPOINT: breakpoint_handler(addr, regs); break; case ARM_ENTRY_ASYNC_WATCHPOINT: WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); fallthrough; case ARM_ENTRY_SYNC_WATCHPOINT: watchpoint_handler(addr, fsr, regs); break; default: ret = 1; /* Unhandled fault. */ } preempt_enable(); return ret; } #ifdef CONFIG_ARM_ERRATA_764319 static int oslsr_fault; static int debug_oslsr_trap(struct pt_regs *regs, unsigned int instr) { oslsr_fault = 1; instruction_pointer(regs) += 4; return 0; } static struct undef_hook debug_oslsr_hook = { .instr_mask = 0xffffffff, .instr_val = 0xee115e91, .fn = debug_oslsr_trap, }; #endif /* * One-time initialisation. */ static cpumask_t debug_err_mask; static int debug_reg_trap(struct pt_regs *regs, unsigned int instr) { int cpu = smp_processor_id(); pr_warn("Debug register access (0x%x) caused undefined instruction on CPU %d\n", instr, cpu); /* Set the error flag for this CPU and skip the faulting instruction. */ cpumask_set_cpu(cpu, &debug_err_mask); instruction_pointer(regs) += 4; return 0; } static struct undef_hook debug_reg_hook = { .instr_mask = 0x0fe80f10, .instr_val = 0x0e000e10, .fn = debug_reg_trap, }; /* Does this core support OS Save and Restore? */ static bool core_has_os_save_restore(void) { u32 oslsr; switch (get_debug_arch()) { case ARM_DEBUG_ARCH_V7_1: return true; case ARM_DEBUG_ARCH_V7_ECP14: #ifdef CONFIG_ARM_ERRATA_764319 oslsr_fault = 0; register_undef_hook(&debug_oslsr_hook); ARM_DBG_READ(c1, c1, 4, oslsr); unregister_undef_hook(&debug_oslsr_hook); if (oslsr_fault) return false; #else ARM_DBG_READ(c1, c1, 4, oslsr); #endif if (oslsr & ARM_OSLSR_OSLM0) return true; fallthrough; default: return false; } } static void reset_ctrl_regs(unsigned int cpu) { int i, raw_num_brps, err = 0; u32 val; /* * v7 debug contains save and restore registers so that debug state * can be maintained across low-power modes without leaving the debug * logic powered up. It is IMPLEMENTATION DEFINED whether we can access * the debug registers out of reset, so we must unlock the OS Lock * Access Register to avoid taking undefined instruction exceptions * later on. */ switch (debug_arch) { case ARM_DEBUG_ARCH_V6: case ARM_DEBUG_ARCH_V6_1: /* ARMv6 cores clear the registers out of reset. */ goto out_mdbgen; case ARM_DEBUG_ARCH_V7_ECP14: /* * Ensure sticky power-down is clear (i.e. debug logic is * powered up). */ ARM_DBG_READ(c1, c5, 4, val); if ((val & 0x1) == 0) err = -EPERM; if (!has_ossr) goto clear_vcr; break; case ARM_DEBUG_ARCH_V7_1: /* * Ensure the OS double lock is clear. */ ARM_DBG_READ(c1, c3, 4, val); if ((val & 0x1) == 1) err = -EPERM; break; } if (err) { pr_warn_once("CPU %d debug is powered down!\n", cpu); cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); return; } /* * Unconditionally clear the OS lock by writing a value * other than CS_LAR_KEY to the access register. */ ARM_DBG_WRITE(c1, c0, 4, ~CORESIGHT_UNLOCK); isb(); /* * Clear any configured vector-catch events before * enabling monitor mode. */ clear_vcr: ARM_DBG_WRITE(c0, c7, 0, 0); isb(); if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { pr_warn_once("CPU %d failed to disable vector catch\n", cpu); return; } /* * The control/value register pairs are UNKNOWN out of reset so * clear them to avoid spurious debug events. */ raw_num_brps = get_num_brp_resources(); for (i = 0; i < raw_num_brps; ++i) { write_wb_reg(ARM_BASE_BCR + i, 0UL); write_wb_reg(ARM_BASE_BVR + i, 0UL); } for (i = 0; i < core_num_wrps; ++i) { write_wb_reg(ARM_BASE_WCR + i, 0UL); write_wb_reg(ARM_BASE_WVR + i, 0UL); } if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu); return; } /* * Have a crack at enabling monitor mode. We don't actually need * it yet, but reporting an error early is useful if it fails. */ out_mdbgen: if (enable_monitor_mode()) cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); } static int dbg_reset_online(unsigned int cpu) { local_irq_disable(); reset_ctrl_regs(cpu); local_irq_enable(); return 0; } #ifdef CONFIG_CPU_PM static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action, void *v) { if (action == CPU_PM_EXIT) reset_ctrl_regs(smp_processor_id()); return NOTIFY_OK; } static struct notifier_block dbg_cpu_pm_nb = { .notifier_call = dbg_cpu_pm_notify, }; static void __init pm_init(void) { cpu_pm_register_notifier(&dbg_cpu_pm_nb); } #else static inline void pm_init(void) { } #endif static int __init arch_hw_breakpoint_init(void) { int ret; debug_arch = get_debug_arch(); if (!debug_arch_supported()) { pr_info("debug architecture 0x%x unsupported.\n", debug_arch); return 0; } /* * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD * whenever a WFI is issued, even if the core is not powered down, in * violation of the architecture. When DBGPRSR.SPD is set, accesses to * breakpoint and watchpoint registers are treated as undefined, so * this results in boot time and runtime failures when these are * accessed and we unexpectedly take a trap. * * It's not clear if/how this can be worked around, so we blacklist * Scorpion CPUs to avoid these issues. */ if (read_cpuid_part() == ARM_CPU_PART_SCORPION) { pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n"); return 0; } has_ossr = core_has_os_save_restore(); /* Determine how many BRPs/WRPs are available. */ core_num_brps = get_num_brps(); core_num_wrps = get_num_wrps(); /* * We need to tread carefully here because DBGSWENABLE may be * driven low on this core and there isn't an architected way to * determine that. */ cpus_read_lock(); register_undef_hook(&debug_reg_hook); /* * Register CPU notifier which resets the breakpoint resources. We * assume that a halting debugger will leave the world in a nice state * for us. */ ret = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN, "arm/hw_breakpoint:online", dbg_reset_online, NULL); unregister_undef_hook(&debug_reg_hook); if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) { core_num_brps = 0; core_num_wrps = 0; if (ret > 0) cpuhp_remove_state_nocalls_cpuslocked(ret); cpus_read_unlock(); return 0; } pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n", core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " : "", core_num_wrps); /* Work out the maximum supported watchpoint length. */ max_watchpoint_len = get_max_wp_len(); pr_info("maximum watchpoint size is %u bytes.\n", max_watchpoint_len); /* Register debug fault handler. */ hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, "watchpoint debug exception"); hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, "breakpoint debug exception"); cpus_read_unlock(); /* Register PM notifiers. */ pm_init(); return 0; } arch_initcall(arch_hw_breakpoint_init); void hw_breakpoint_pmu_read(struct perf_event *bp) { } /* * Dummy function to register with die_notifier. */ int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data) { return NOTIFY_DONE; }
linux-master
arch/arm/kernel/hw_breakpoint.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013-2014 Linaro Ltd. * Copyright (c) 2013-2014 HiSilicon Limited. */ #include <linux/init.h> #include <linux/smp.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/memblock.h> #include <linux/of_address.h> #include <asm/cputype.h> #include <asm/cp15.h> #include <asm/cacheflush.h> #include <asm/smp.h> #include <asm/smp_plat.h> #include "core.h" /* bits definition in SC_CPU_RESET_REQ[x]/SC_CPU_RESET_DREQ[x] * 1 -- unreset; 0 -- reset */ #define CORE_RESET_BIT(x) (1 << x) #define NEON_RESET_BIT(x) (1 << (x + 4)) #define CORE_DEBUG_RESET_BIT(x) (1 << (x + 9)) #define CLUSTER_L2_RESET_BIT (1 << 8) #define CLUSTER_DEBUG_RESET_BIT (1 << 13) /* * bits definition in SC_CPU_RESET_STATUS[x] * 1 -- reset status; 0 -- unreset status */ #define CORE_RESET_STATUS(x) (1 << x) #define NEON_RESET_STATUS(x) (1 << (x + 4)) #define CORE_DEBUG_RESET_STATUS(x) (1 << (x + 9)) #define CLUSTER_L2_RESET_STATUS (1 << 8) #define CLUSTER_DEBUG_RESET_STATUS (1 << 13) #define CORE_WFI_STATUS(x) (1 << (x + 16)) #define CORE_WFE_STATUS(x) (1 << (x + 20)) #define CORE_DEBUG_ACK(x) (1 << (x + 24)) #define SC_CPU_RESET_REQ(x) (0x520 + (x << 3)) /* reset */ #define SC_CPU_RESET_DREQ(x) (0x524 + (x << 3)) /* unreset */ #define SC_CPU_RESET_STATUS(x) (0x1520 + (x << 3)) #define FAB_SF_MODE 0x0c #define FAB_SF_INVLD 0x10 /* bits definition in FB_SF_INVLD */ #define FB_SF_INVLD_START (1 << 8) #define HIP04_MAX_CLUSTERS 4 #define HIP04_MAX_CPUS_PER_CLUSTER 4 #define POLL_MSEC 10 #define TIMEOUT_MSEC 1000 static void __iomem *sysctrl, *fabric; static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER]; static DEFINE_SPINLOCK(boot_lock); static u32 fabric_phys_addr; /* * [0]: bootwrapper physical address * [1]: bootwrapper size * [2]: relocation address * [3]: relocation size */ static u32 hip04_boot_method[4]; static bool hip04_cluster_is_down(unsigned int cluster) { int i; for (i = 0; i < HIP04_MAX_CPUS_PER_CLUSTER; i++) if (hip04_cpu_table[cluster][i]) return false; return true; } static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on) { unsigned long data; if (!fabric) BUG(); data = readl_relaxed(fabric + FAB_SF_MODE); if (on) data |= 1 << cluster; else data &= ~(1 << cluster); writel_relaxed(data, fabric + FAB_SF_MODE); do { cpu_relax(); } while (data != readl_relaxed(fabric + FAB_SF_MODE)); } static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle) { unsigned int mpidr, cpu, cluster; unsigned long data; void __iomem *sys_dreq, *sys_status; mpidr = cpu_logical_map(l_cpu); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); if (!sysctrl) return -ENODEV; if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) return -EINVAL; spin_lock_irq(&boot_lock); if (hip04_cpu_table[cluster][cpu]) goto out; sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster); sys_status = sysctrl + SC_CPU_RESET_STATUS(cluster); if (hip04_cluster_is_down(cluster)) { data = CLUSTER_DEBUG_RESET_BIT; writel_relaxed(data, sys_dreq); do { cpu_relax(); data = readl_relaxed(sys_status); } while (data & CLUSTER_DEBUG_RESET_STATUS); hip04_set_snoop_filter(cluster, 1); } data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \ CORE_DEBUG_RESET_BIT(cpu); writel_relaxed(data, sys_dreq); do { cpu_relax(); } while (data == readl_relaxed(sys_status)); /* * We may fail to power up core again without this delay. * It's not mentioned in document. It's found by test. */ udelay(20); arch_send_wakeup_ipi_mask(cpumask_of(l_cpu)); out: hip04_cpu_table[cluster][cpu]++; spin_unlock_irq(&boot_lock); return 0; } #ifdef CONFIG_HOTPLUG_CPU static void hip04_cpu_die(unsigned int l_cpu) { unsigned int mpidr, cpu, cluster; bool last_man; mpidr = cpu_logical_map(l_cpu); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); spin_lock(&boot_lock); hip04_cpu_table[cluster][cpu]--; if (hip04_cpu_table[cluster][cpu] == 1) { /* A power_up request went ahead of us. */ spin_unlock(&boot_lock); return; } else if (hip04_cpu_table[cluster][cpu] > 1) { pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); BUG(); } last_man = hip04_cluster_is_down(cluster); spin_unlock(&boot_lock); if (last_man) { /* Since it's Cortex A15, disable L2 prefetching. */ asm volatile( "mcr p15, 1, %0, c15, c0, 3 \n\t" "isb \n\t" "dsb " : : "r" (0x400) ); v7_exit_coherency_flush(all); } else { v7_exit_coherency_flush(louis); } for (;;) wfi(); } static int hip04_cpu_kill(unsigned int l_cpu) { unsigned int mpidr, cpu, cluster; unsigned int data, tries, count; mpidr = cpu_logical_map(l_cpu); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); BUG_ON(cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER); count = TIMEOUT_MSEC / POLL_MSEC; spin_lock_irq(&boot_lock); for (tries = 0; tries < count; tries++) { if (hip04_cpu_table[cluster][cpu]) goto err; cpu_relax(); data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); if (data & CORE_WFI_STATUS(cpu)) break; spin_unlock_irq(&boot_lock); /* Wait for clean L2 when the whole cluster is down. */ msleep(POLL_MSEC); spin_lock_irq(&boot_lock); } if (tries >= count) goto err; data = CORE_RESET_BIT(cpu) | NEON_RESET_BIT(cpu) | \ CORE_DEBUG_RESET_BIT(cpu); writel_relaxed(data, sysctrl + SC_CPU_RESET_REQ(cluster)); for (tries = 0; tries < count; tries++) { cpu_relax(); data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); if (data & CORE_RESET_STATUS(cpu)) break; } if (tries >= count) goto err; if (hip04_cluster_is_down(cluster)) hip04_set_snoop_filter(cluster, 0); spin_unlock_irq(&boot_lock); return 1; err: spin_unlock_irq(&boot_lock); return 0; } #endif static const struct smp_operations hip04_smp_ops __initconst = { .smp_boot_secondary = hip04_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = hip04_cpu_die, .cpu_kill = hip04_cpu_kill, #endif }; static bool __init hip04_cpu_table_init(void) { unsigned int mpidr, cpu, cluster; mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) { pr_err("%s: boot CPU is out of bound!\n", __func__); return false; } hip04_set_snoop_filter(cluster, 1); hip04_cpu_table[cluster][cpu] = 1; return true; } static int __init hip04_smp_init(void) { struct device_node *np, *np_sctl, *np_fab; struct resource fab_res; void __iomem *relocation; int ret = -ENODEV; np = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-bootwrapper"); if (!np) goto err; ret = of_property_read_u32_array(np, "boot-method", &hip04_boot_method[0], 4); if (ret) goto err; ret = -ENODEV; np_sctl = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl"); if (!np_sctl) goto err; np_fab = of_find_compatible_node(NULL, NULL, "hisilicon,hip04-fabric"); if (!np_fab) goto err; ret = memblock_reserve(hip04_boot_method[0], hip04_boot_method[1]); if (ret) goto err; relocation = ioremap(hip04_boot_method[2], hip04_boot_method[3]); if (!relocation) { pr_err("failed to map relocation space\n"); ret = -ENOMEM; goto err_reloc; } sysctrl = of_iomap(np_sctl, 0); if (!sysctrl) { pr_err("failed to get sysctrl base\n"); ret = -ENOMEM; goto err_sysctrl; } ret = of_address_to_resource(np_fab, 0, &fab_res); if (ret) { pr_err("failed to get fabric base phys\n"); goto err_fabric; } fabric_phys_addr = fab_res.start; sync_cache_w(&fabric_phys_addr); fabric = of_iomap(np_fab, 0); if (!fabric) { pr_err("failed to get fabric base\n"); ret = -ENOMEM; goto err_fabric; } if (!hip04_cpu_table_init()) { ret = -EINVAL; goto err_table; } /* * Fill the instruction address that is used after secondary core * out of reset. */ writel_relaxed(hip04_boot_method[0], relocation); writel_relaxed(0xa5a5a5a5, relocation + 4); /* magic number */ writel_relaxed(__pa_symbol(secondary_startup), relocation + 8); writel_relaxed(0, relocation + 12); iounmap(relocation); smp_set_ops(&hip04_smp_ops); return ret; err_table: iounmap(fabric); err_fabric: iounmap(sysctrl); err_sysctrl: iounmap(relocation); err_reloc: memblock_phys_free(hip04_boot_method[0], hip04_boot_method[1]); err: return ret; } early_initcall(hip04_smp_init);
linux-master
arch/arm/mach-hisi/platmcpm.c
// SPDX-License-Identifier: GPL-2.0-only /* * (HiSilicon's SoC based) flattened device tree enabled machine * * Copyright (c) 2012-2013 HiSilicon Ltd. * Copyright (c) 2012-2013 Linaro Ltd. * * Author: Haojian Zhuang <[email protected]> */ #include <linux/clocksource.h> #include <linux/irqchip.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #define HI3620_SYSCTRL_PHYS_BASE 0xfc802000 #define HI3620_SYSCTRL_VIRT_BASE 0xfe802000 /* * This table is only for optimization. Since ioremap() could always share * the same mapping if it's defined as static IO mapping. * * Without this table, system could also work. The cost is some virtual address * spaces wasted since ioremap() may be called multi times for the same * IO space. */ static struct map_desc hi3620_io_desc[] __initdata = { { /* sysctrl */ .pfn = __phys_to_pfn(HI3620_SYSCTRL_PHYS_BASE), .virtual = HI3620_SYSCTRL_VIRT_BASE, .length = 0x1000, .type = MT_DEVICE, }, }; static void __init hi3620_map_io(void) { debug_ll_io_init(); iotable_init(hi3620_io_desc, ARRAY_SIZE(hi3620_io_desc)); } static const char *const hi3xxx_compat[] __initconst = { "hisilicon,hi3620-hi4511", NULL, }; DT_MACHINE_START(HI3620, "Hisilicon Hi3620 (Flattened Device Tree)") .map_io = hi3620_map_io, .dt_compat = hi3xxx_compat, MACHINE_END
linux-master
arch/arm/mach-hisi/hisilicon.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 Linaro Ltd. * Copyright (c) 2013 HiSilicon Limited. * Based on arch/arm/mach-vexpress/platsmp.c, Copyright (C) 2002 ARM Ltd. */ #include <linux/smp.h> #include <linux/io.h> #include <linux/of_address.h> #include <linux/delay.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include <asm/smp_scu.h> #include <asm/mach/map.h> #include "core.h" #define HIX5HD2_BOOT_ADDRESS 0xffff0000 static void __iomem *ctrl_base; void hi3xxx_set_cpu_jump(int cpu, void *jump_addr) { cpu = cpu_logical_map(cpu); if (!cpu || !ctrl_base) return; writel_relaxed(__pa_symbol(jump_addr), ctrl_base + ((cpu - 1) << 2)); } int hi3xxx_get_cpu_jump(int cpu) { cpu = cpu_logical_map(cpu); if (!cpu || !ctrl_base) return 0; return readl_relaxed(ctrl_base + ((cpu - 1) << 2)); } static void __init hisi_enable_scu_a9(void) { unsigned long base = 0; void __iomem *scu_base = NULL; if (scu_a9_has_base()) { base = scu_a9_get_base(); scu_base = ioremap(base, SZ_4K); if (!scu_base) { pr_err("ioremap(scu_base) failed\n"); return; } scu_enable(scu_base); iounmap(scu_base); } } static void __init hi3xxx_smp_prepare_cpus(unsigned int max_cpus) { struct device_node *np = NULL; u32 offset = 0; hisi_enable_scu_a9(); if (!ctrl_base) { np = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl"); if (!np) { pr_err("failed to find hisilicon,sysctrl node\n"); return; } ctrl_base = of_iomap(np, 0); if (!ctrl_base) { of_node_put(np); pr_err("failed to map address\n"); return; } if (of_property_read_u32(np, "smp-offset", &offset) < 0) { of_node_put(np); pr_err("failed to find smp-offset property\n"); return; } ctrl_base += offset; of_node_put(np); } } static int hi3xxx_boot_secondary(unsigned int cpu, struct task_struct *idle) { hi3xxx_set_cpu(cpu, true); hi3xxx_set_cpu_jump(cpu, secondary_startup); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); return 0; } static const struct smp_operations hi3xxx_smp_ops __initconst = { .smp_prepare_cpus = hi3xxx_smp_prepare_cpus, .smp_boot_secondary = hi3xxx_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = hi3xxx_cpu_die, .cpu_kill = hi3xxx_cpu_kill, #endif }; static void __init hisi_common_smp_prepare_cpus(unsigned int max_cpus) { hisi_enable_scu_a9(); } static void hix5hd2_set_scu_boot_addr(phys_addr_t start_addr, phys_addr_t jump_addr) { void __iomem *virt; virt = ioremap(start_addr, PAGE_SIZE); writel_relaxed(0xe51ff004, virt); /* ldr pc, [pc, #-4] */ writel_relaxed(jump_addr, virt + 4); /* pc jump phy address */ iounmap(virt); } static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle) { phys_addr_t jumpaddr; jumpaddr = __pa_symbol(secondary_startup); hix5hd2_set_scu_boot_addr(HIX5HD2_BOOT_ADDRESS, jumpaddr); hix5hd2_set_cpu(cpu, true); arch_send_wakeup_ipi_mask(cpumask_of(cpu)); return 0; } static const struct smp_operations hix5hd2_smp_ops __initconst = { .smp_prepare_cpus = hisi_common_smp_prepare_cpus, .smp_boot_secondary = hix5hd2_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = hix5hd2_cpu_die, #endif }; #define SC_SCTL_REMAP_CLR 0x00000100 #define HIP01_BOOT_ADDRESS 0x80000000 #define REG_SC_CTRL 0x000 static void hip01_set_boot_addr(phys_addr_t start_addr, phys_addr_t jump_addr) { void __iomem *virt; virt = phys_to_virt(start_addr); writel_relaxed(0xe51ff004, virt); writel_relaxed(jump_addr, virt + 4); } static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle) { phys_addr_t jumpaddr; unsigned int remap_reg_value = 0; struct device_node *node; jumpaddr = __pa_symbol(secondary_startup); hip01_set_boot_addr(HIP01_BOOT_ADDRESS, jumpaddr); node = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl"); if (WARN_ON(!node)) return -1; ctrl_base = of_iomap(node, 0); of_node_put(node); /* set the secondary core boot from DDR */ remap_reg_value = readl_relaxed(ctrl_base + REG_SC_CTRL); barrier(); remap_reg_value |= SC_SCTL_REMAP_CLR; barrier(); writel_relaxed(remap_reg_value, ctrl_base + REG_SC_CTRL); hip01_set_cpu(cpu, true); return 0; } static const struct smp_operations hip01_smp_ops __initconst = { .smp_prepare_cpus = hisi_common_smp_prepare_cpus, .smp_boot_secondary = hip01_boot_secondary, }; CPU_METHOD_OF_DECLARE(hi3xxx_smp, "hisilicon,hi3620-smp", &hi3xxx_smp_ops); CPU_METHOD_OF_DECLARE(hix5hd2_smp, "hisilicon,hix5hd2-smp", &hix5hd2_smp_ops); CPU_METHOD_OF_DECLARE(hip01_smp, "hisilicon,hip01-smp", &hip01_smp_ops);
linux-master
arch/arm/mach-hisi/platsmp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 Linaro Ltd. * Copyright (c) 2013 HiSilicon Limited. */ #include <linux/cpu.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/of_address.h> #include <asm/cacheflush.h> #include <asm/smp_plat.h> #include "core.h" /* Sysctrl registers in Hi3620 SoC */ #define SCISOEN 0xc0 #define SCISODIS 0xc4 #define SCPERPWREN 0xd0 #define SCPERPWRDIS 0xd4 #define SCCPUCOREEN 0xf4 #define SCCPUCOREDIS 0xf8 #define SCPERCTRL0 0x200 #define SCCPURSTEN 0x410 #define SCCPURSTDIS 0x414 /* * bit definition in SCISOEN/SCPERPWREN/... * * CPU2_ISO_CTRL (1 << 5) * CPU3_ISO_CTRL (1 << 6) * ... */ #define CPU2_ISO_CTRL (1 << 5) /* * bit definition in SCPERCTRL0 * * CPU0_WFI_MASK_CFG (1 << 28) * CPU1_WFI_MASK_CFG (1 << 29) * ... */ #define CPU0_WFI_MASK_CFG (1 << 28) /* * bit definition in SCCPURSTEN/... * * CPU0_SRST_REQ_EN (1 << 0) * CPU1_SRST_REQ_EN (1 << 1) * ... */ #define CPU0_HPM_SRST_REQ_EN (1 << 22) #define CPU0_DBG_SRST_REQ_EN (1 << 12) #define CPU0_NEON_SRST_REQ_EN (1 << 4) #define CPU0_SRST_REQ_EN (1 << 0) #define HIX5HD2_PERI_CRG20 0x50 #define CRG20_CPU1_RESET (1 << 17) #define HIX5HD2_PERI_PMC0 0x1000 #define PMC0_CPU1_WAIT_MTCOMS_ACK (1 << 8) #define PMC0_CPU1_PMC_ENABLE (1 << 7) #define PMC0_CPU1_POWERDOWN (1 << 3) #define HIP01_PERI9 0x50 #define PERI9_CPU1_RESET (1 << 1) enum { HI3620_CTRL, ERROR_CTRL, }; static void __iomem *ctrl_base; static int id; static void set_cpu_hi3620(int cpu, bool enable) { u32 val = 0; if (enable) { /* MTCMOS set */ if ((cpu == 2) || (cpu == 3)) writel_relaxed(CPU2_ISO_CTRL << (cpu - 2), ctrl_base + SCPERPWREN); udelay(100); /* Enable core */ writel_relaxed(0x01 << cpu, ctrl_base + SCCPUCOREEN); /* unreset */ val = CPU0_DBG_SRST_REQ_EN | CPU0_NEON_SRST_REQ_EN | CPU0_SRST_REQ_EN; writel_relaxed(val << cpu, ctrl_base + SCCPURSTDIS); /* reset */ val |= CPU0_HPM_SRST_REQ_EN; writel_relaxed(val << cpu, ctrl_base + SCCPURSTEN); /* ISO disable */ if ((cpu == 2) || (cpu == 3)) writel_relaxed(CPU2_ISO_CTRL << (cpu - 2), ctrl_base + SCISODIS); udelay(1); /* WFI Mask */ val = readl_relaxed(ctrl_base + SCPERCTRL0); val &= ~(CPU0_WFI_MASK_CFG << cpu); writel_relaxed(val, ctrl_base + SCPERCTRL0); /* Unreset */ val = CPU0_DBG_SRST_REQ_EN | CPU0_NEON_SRST_REQ_EN | CPU0_SRST_REQ_EN | CPU0_HPM_SRST_REQ_EN; writel_relaxed(val << cpu, ctrl_base + SCCPURSTDIS); } else { /* wfi mask */ val = readl_relaxed(ctrl_base + SCPERCTRL0); val |= (CPU0_WFI_MASK_CFG << cpu); writel_relaxed(val, ctrl_base + SCPERCTRL0); /* disable core*/ writel_relaxed(0x01 << cpu, ctrl_base + SCCPUCOREDIS); if ((cpu == 2) || (cpu == 3)) { /* iso enable */ writel_relaxed(CPU2_ISO_CTRL << (cpu - 2), ctrl_base + SCISOEN); udelay(1); } /* reset */ val = CPU0_DBG_SRST_REQ_EN | CPU0_NEON_SRST_REQ_EN | CPU0_SRST_REQ_EN | CPU0_HPM_SRST_REQ_EN; writel_relaxed(val << cpu, ctrl_base + SCCPURSTEN); if ((cpu == 2) || (cpu == 3)) { /* MTCMOS unset */ writel_relaxed(CPU2_ISO_CTRL << (cpu - 2), ctrl_base + SCPERPWRDIS); udelay(100); } } } static int hi3xxx_hotplug_init(void) { struct device_node *node; node = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl"); if (!node) { id = ERROR_CTRL; return -ENOENT; } ctrl_base = of_iomap(node, 0); of_node_put(node); if (!ctrl_base) { id = ERROR_CTRL; return -ENOMEM; } id = HI3620_CTRL; return 0; } void hi3xxx_set_cpu(int cpu, bool enable) { if (!ctrl_base) { if (hi3xxx_hotplug_init() < 0) return; } if (id == HI3620_CTRL) set_cpu_hi3620(cpu, enable); } static bool hix5hd2_hotplug_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "hisilicon,cpuctrl"); if (!np) return false; ctrl_base = of_iomap(np, 0); of_node_put(np); if (!ctrl_base) return false; return true; } void hix5hd2_set_cpu(int cpu, bool enable) { u32 val = 0; if (!ctrl_base) if (!hix5hd2_hotplug_init()) BUG(); if (enable) { /* power on cpu1 */ val = readl_relaxed(ctrl_base + HIX5HD2_PERI_PMC0); val &= ~(PMC0_CPU1_WAIT_MTCOMS_ACK | PMC0_CPU1_POWERDOWN); val |= PMC0_CPU1_PMC_ENABLE; writel_relaxed(val, ctrl_base + HIX5HD2_PERI_PMC0); /* unreset */ val = readl_relaxed(ctrl_base + HIX5HD2_PERI_CRG20); val &= ~CRG20_CPU1_RESET; writel_relaxed(val, ctrl_base + HIX5HD2_PERI_CRG20); } else { /* power down cpu1 */ val = readl_relaxed(ctrl_base + HIX5HD2_PERI_PMC0); val |= PMC0_CPU1_PMC_ENABLE | PMC0_CPU1_POWERDOWN; val &= ~PMC0_CPU1_WAIT_MTCOMS_ACK; writel_relaxed(val, ctrl_base + HIX5HD2_PERI_PMC0); /* reset */ val = readl_relaxed(ctrl_base + HIX5HD2_PERI_CRG20); val |= CRG20_CPU1_RESET; writel_relaxed(val, ctrl_base + HIX5HD2_PERI_CRG20); } } void hip01_set_cpu(int cpu, bool enable) { unsigned int temp; struct device_node *np; if (!ctrl_base) { np = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl"); BUG_ON(!np); ctrl_base = of_iomap(np, 0); of_node_put(np); BUG_ON(!ctrl_base); } if (enable) { /* reset on CPU1 */ temp = readl_relaxed(ctrl_base + HIP01_PERI9); temp |= PERI9_CPU1_RESET; writel_relaxed(temp, ctrl_base + HIP01_PERI9); udelay(50); /* unreset on CPU1 */ temp = readl_relaxed(ctrl_base + HIP01_PERI9); temp &= ~PERI9_CPU1_RESET; writel_relaxed(temp, ctrl_base + HIP01_PERI9); } } static inline void cpu_enter_lowpower(void) { unsigned int v; flush_cache_all(); /* * Turn off coherency and L1 D-cache */ asm volatile( " mrc p15, 0, %0, c1, c0, 1\n" " bic %0, %0, #0x40\n" " mcr p15, 0, %0, c1, c0, 1\n" " mrc p15, 0, %0, c1, c0, 0\n" " bic %0, %0, #0x04\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "r" (0) : "cc"); } #ifdef CONFIG_HOTPLUG_CPU void hi3xxx_cpu_die(unsigned int cpu) { cpu_enter_lowpower(); hi3xxx_set_cpu_jump(cpu, phys_to_virt(0)); cpu_do_idle(); /* We should have never returned from idle */ panic("cpu %d unexpectedly exit from shutdown\n", cpu); } int hi3xxx_cpu_kill(unsigned int cpu) { unsigned long timeout = jiffies + msecs_to_jiffies(50); while (hi3xxx_get_cpu_jump(cpu)) if (time_after(jiffies, timeout)) return 0; hi3xxx_set_cpu(cpu, false); return 1; } void hix5hd2_cpu_die(unsigned int cpu) { flush_cache_all(); hix5hd2_set_cpu(cpu, false); } #endif
linux-master
arch/arm/mach-hisi/hotplug.c
// SPDX-License-Identifier: GPL-2.0-only /* * Device Tree board file for NXP LPC18xx/43xx * * Copyright (C) 2015 Joachim Eastwood <[email protected]> */ #include <asm/mach/arch.h> static const char *const lpc18xx_43xx_compat[] __initconst = { "nxp,lpc1850", "nxp,lpc4350", "nxp,lpc4370", NULL }; DT_MACHINE_START(LPC18XXDT, "NXP LPC18xx/43xx (Device Tree)") .dt_compat = lpc18xx_43xx_compat, MACHINE_END
linux-master
arch/arm/mach-lpc18xx/board-dt.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/ptrace.h> #include <linux/uaccess.h> #include <abi/reg_ops.h> #define MTCR_MASK 0xFC00FFE0 #define MFCR_MASK 0xFC00FFE0 #define MTCR_DIST 0xC0006420 #define MFCR_DIST 0xC0006020 /* * fpu_libc_helper() is to help libc to excute: * - mfcr %a, cr<1, 2> * - mfcr %a, cr<2, 2> * - mtcr %a, cr<1, 2> * - mtcr %a, cr<2, 2> */ int fpu_libc_helper(struct pt_regs *regs) { int fault; unsigned long instrptr, regx = 0; unsigned long index = 0, tmp = 0; unsigned long tinstr = 0; u16 instr_hi, instr_low; instrptr = instruction_pointer(regs); if (instrptr & 1) return 0; fault = __get_user(instr_low, (u16 *)instrptr); if (fault) return 0; fault = __get_user(instr_hi, (u16 *)(instrptr + 2)); if (fault) return 0; tinstr = instr_hi | ((unsigned long)instr_low << 16); if (((tinstr >> 21) & 0x1F) != 2) return 0; if ((tinstr & MTCR_MASK) == MTCR_DIST) { index = (tinstr >> 16) & 0x1F; if (index > 13) return 0; tmp = tinstr & 0x1F; if (tmp > 2) return 0; regx = *(&regs->a0 + index); if (tmp == 1) mtcr("cr<1, 2>", regx); else if (tmp == 2) mtcr("cr<2, 2>", regx); else return 0; regs->pc += 4; return 1; } if ((tinstr & MFCR_MASK) == MFCR_DIST) { index = tinstr & 0x1F; if (index > 13) return 0; tmp = ((tinstr >> 16) & 0x1F); if (tmp > 2) return 0; if (tmp == 1) regx = mfcr("cr<1, 2>"); else if (tmp == 2) regx = mfcr("cr<2, 2>"); else return 0; *(&regs->a0 + index) = regx; regs->pc += 4; return 1; } return 0; } void fpu_fpe(struct pt_regs *regs) { int sig, code; unsigned int fesr; fesr = mfcr("cr<2, 2>"); sig = SIGFPE; code = FPE_FLTUNK; if (fesr & FPE_ILLE) { sig = SIGILL; code = ILL_ILLOPC; } else if (fesr & FPE_IDC) { sig = SIGILL; code = ILL_ILLOPN; } else if (fesr & FPE_FEC) { sig = SIGFPE; if (fesr & FPE_IOC) code = FPE_FLTINV; else if (fesr & FPE_DZC) code = FPE_FLTDIV; else if (fesr & FPE_UFC) code = FPE_FLTUND; else if (fesr & FPE_OFC) code = FPE_FLTOVF; else if (fesr & FPE_IXC) code = FPE_FLTRES; } force_sig_fault(sig, code, (void __user *)regs->pc); } #define FMFVR_FPU_REGS(vrx, vry) \ "fmfvrl %0, "#vrx"\n" \ "fmfvrh %1, "#vrx"\n" \ "fmfvrl %2, "#vry"\n" \ "fmfvrh %3, "#vry"\n" #define FMTVR_FPU_REGS(vrx, vry) \ "fmtvrl "#vrx", %0\n" \ "fmtvrh "#vrx", %1\n" \ "fmtvrl "#vry", %2\n" \ "fmtvrh "#vry", %3\n" #define STW_FPU_REGS(a, b, c, d) \ "stw %0, (%4, "#a")\n" \ "stw %1, (%4, "#b")\n" \ "stw %2, (%4, "#c")\n" \ "stw %3, (%4, "#d")\n" #define LDW_FPU_REGS(a, b, c, d) \ "ldw %0, (%4, "#a")\n" \ "ldw %1, (%4, "#b")\n" \ "ldw %2, (%4, "#c")\n" \ "ldw %3, (%4, "#d")\n" void save_to_user_fp(struct user_fp *user_fp) { unsigned long flg; unsigned long tmp1, tmp2; unsigned long *fpregs; local_irq_save(flg); tmp1 = mfcr("cr<1, 2>"); tmp2 = mfcr("cr<2, 2>"); user_fp->fcr = tmp1; user_fp->fesr = tmp2; fpregs = &user_fp->vr[0]; #ifdef CONFIG_CPU_HAS_FPUV2 #ifdef CONFIG_CPU_HAS_VDSP asm volatile( "vstmu.32 vr0-vr3, (%0)\n" "vstmu.32 vr4-vr7, (%0)\n" "vstmu.32 vr8-vr11, (%0)\n" "vstmu.32 vr12-vr15, (%0)\n" "fstmu.64 vr16-vr31, (%0)\n" : "+a"(fpregs) ::"memory"); #else asm volatile( "fstmu.64 vr0-vr31, (%0)\n" : "+a"(fpregs) ::"memory"); #endif #else { unsigned long tmp3, tmp4; asm volatile( FMFVR_FPU_REGS(vr0, vr1) STW_FPU_REGS(0, 4, 16, 20) FMFVR_FPU_REGS(vr2, vr3) STW_FPU_REGS(32, 36, 48, 52) FMFVR_FPU_REGS(vr4, vr5) STW_FPU_REGS(64, 68, 80, 84) FMFVR_FPU_REGS(vr6, vr7) STW_FPU_REGS(96, 100, 112, 116) "addi %4, 128\n" FMFVR_FPU_REGS(vr8, vr9) STW_FPU_REGS(0, 4, 16, 20) FMFVR_FPU_REGS(vr10, vr11) STW_FPU_REGS(32, 36, 48, 52) FMFVR_FPU_REGS(vr12, vr13) STW_FPU_REGS(64, 68, 80, 84) FMFVR_FPU_REGS(vr14, vr15) STW_FPU_REGS(96, 100, 112, 116) : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3), "=a"(tmp4), "+a"(fpregs) ::"memory"); } #endif local_irq_restore(flg); } void restore_from_user_fp(struct user_fp *user_fp) { unsigned long flg; unsigned long tmp1, tmp2; unsigned long *fpregs; local_irq_save(flg); tmp1 = user_fp->fcr; tmp2 = user_fp->fesr; mtcr("cr<1, 2>", tmp1); mtcr("cr<2, 2>", tmp2); fpregs = &user_fp->vr[0]; #ifdef CONFIG_CPU_HAS_FPUV2 #ifdef CONFIG_CPU_HAS_VDSP asm volatile( "vldmu.32 vr0-vr3, (%0)\n" "vldmu.32 vr4-vr7, (%0)\n" "vldmu.32 vr8-vr11, (%0)\n" "vldmu.32 vr12-vr15, (%0)\n" "fldmu.64 vr16-vr31, (%0)\n" : "+a"(fpregs) ::"memory"); #else asm volatile( "fldmu.64 vr0-vr31, (%0)\n" : "+a"(fpregs) ::"memory"); #endif #else { unsigned long tmp3, tmp4; asm volatile( LDW_FPU_REGS(0, 4, 16, 20) FMTVR_FPU_REGS(vr0, vr1) LDW_FPU_REGS(32, 36, 48, 52) FMTVR_FPU_REGS(vr2, vr3) LDW_FPU_REGS(64, 68, 80, 84) FMTVR_FPU_REGS(vr4, vr5) LDW_FPU_REGS(96, 100, 112, 116) FMTVR_FPU_REGS(vr6, vr7) "addi %4, 128\n" LDW_FPU_REGS(0, 4, 16, 20) FMTVR_FPU_REGS(vr8, vr9) LDW_FPU_REGS(32, 36, 48, 52) FMTVR_FPU_REGS(vr10, vr11) LDW_FPU_REGS(64, 68, 80, 84) FMTVR_FPU_REGS(vr12, vr13) LDW_FPU_REGS(96, 100, 112, 116) FMTVR_FPU_REGS(vr14, vr15) : "=a"(tmp1), "=a"(tmp2), "=a"(tmp3), "=a"(tmp4), "+a"(fpregs) ::"memory"); } #endif local_irq_restore(flg); }
linux-master
arch/csky/abiv2/fpu.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/cache.h> #include <linux/highmem.h> #include <linux/mm.h> #include <asm/cache.h> #include <asm/tlbflush.h> void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int nr) { unsigned long pfn = pte_pfn(*pte); struct folio *folio; unsigned int i; flush_tlb_page(vma, address); if (!pfn_valid(pfn)) return; folio = page_folio(pfn_to_page(pfn)); if (test_and_set_bit(PG_dcache_clean, &folio->flags)) return; icache_inv_range(address, address + nr*PAGE_SIZE); for (i = 0; i < folio_nr_pages(folio); i++) { unsigned long addr = (unsigned long) kmap_local_folio(folio, i * PAGE_SIZE); dcache_wb_range(addr, addr + PAGE_SIZE); if (vma->vm_flags & VM_EXEC) icache_inv_range(addr, addr + PAGE_SIZE); kunmap_local((void *) addr); } } void flush_icache_deferred(struct mm_struct *mm) { unsigned int cpu = smp_processor_id(); cpumask_t *mask = &mm->context.icache_stale_mask; if (cpumask_test_cpu(cpu, mask)) { cpumask_clear_cpu(cpu, mask); /* * Ensure the remote hart's writes are visible to this hart. * This pairs with a barrier in flush_icache_mm. */ smp_mb(); local_icache_inv_all(NULL); } } void flush_icache_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end) { unsigned int cpu; cpumask_t others, *mask; preempt_disable(); #ifdef CONFIG_CPU_HAS_ICACHE_INS if (mm == current->mm) { icache_inv_range(start, end); preempt_enable(); return; } #endif /* Mark every hart's icache as needing a flush for this MM. */ mask = &mm->context.icache_stale_mask; cpumask_setall(mask); /* Flush this hart's I$ now, and mark it as flushed. */ cpu = smp_processor_id(); cpumask_clear_cpu(cpu, mask); local_icache_inv_all(NULL); /* * Flush the I$ of other harts concurrently executing, and mark them as * flushed. */ cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); if (mm != current->active_mm || !cpumask_empty(&others)) { on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); cpumask_clear(mask); } preempt_enable(); }
linux-master
arch/csky/abiv2/cacheflush.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/module.h> #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_STRING_OPS EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); #endif EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strcpy); EXPORT_SYMBOL(strlen);
linux-master
arch/csky/abiv2/strksyms.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> void __aligned(8) __delay(unsigned long loops) { asm volatile ( "mov r0, r0\n" "1:declt %0\n" "bf 1b" : "=r"(loops) : "0"(loops)); } EXPORT_SYMBOL(__delay); void __const_udelay(unsigned long xloops) { unsigned long long loops; loops = (unsigned long long)xloops * loops_per_jiffy * HZ; __delay(loops >> 32); } EXPORT_SYMBOL(__const_udelay); void __udelay(unsigned long usecs) { __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */ } EXPORT_SYMBOL(__udelay); void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */ } EXPORT_SYMBOL(__ndelay);
linux-master
arch/csky/lib/delay.c
// SPDX-License-Identifier: GPL-2.0-only /* * String functions optimized for hardware which doesn't * handle unaligned memory accesses efficiently. * * Copyright (C) 2021 Matteo Croce */ #include <linux/types.h> #include <linux/module.h> /* Minimum size for a word copy to be convenient */ #define BYTES_LONG sizeof(long) #define WORD_MASK (BYTES_LONG - 1) #define MIN_THRESHOLD (BYTES_LONG * 2) /* convenience union to avoid cast between different pointer types */ union types { u8 *as_u8; unsigned long *as_ulong; uintptr_t as_uptr; }; union const_types { const u8 *as_u8; unsigned long *as_ulong; uintptr_t as_uptr; }; void *memcpy(void *dest, const void *src, size_t count) { union const_types s = { .as_u8 = src }; union types d = { .as_u8 = dest }; int distance = 0; if (count < MIN_THRESHOLD) goto copy_remainder; /* Copy a byte at time until destination is aligned. */ for (; d.as_uptr & WORD_MASK; count--) *d.as_u8++ = *s.as_u8++; distance = s.as_uptr & WORD_MASK; if (distance) { unsigned long last, next; /* * s is distance bytes ahead of d, and d just reached * the alignment boundary. Move s backward to word align it * and shift data to compensate for distance, in order to do * word-by-word copy. */ s.as_u8 -= distance; next = s.as_ulong[0]; for (; count >= BYTES_LONG; count -= BYTES_LONG) { last = next; next = s.as_ulong[1]; d.as_ulong[0] = last >> (distance * 8) | next << ((BYTES_LONG - distance) * 8); d.as_ulong++; s.as_ulong++; } /* Restore s with the original offset. */ s.as_u8 += distance; } else { /* * If the source and dest lower bits are the same, do a simple * 32/64 bit wide copy. */ for (; count >= BYTES_LONG; count -= BYTES_LONG) *d.as_ulong++ = *s.as_ulong++; } copy_remainder: while (count--) *d.as_u8++ = *s.as_u8++; return dest; } EXPORT_SYMBOL(memcpy); /* * Simply check if the buffer overlaps an call memcpy() in case, * otherwise do a simple one byte at time backward copy. */ void *memmove(void *dest, const void *src, size_t count) { if (dest < src || src + count <= dest) return memcpy(dest, src, count); if (dest > src) { const char *s = src + count; char *tmp = dest + count; while (count--) *--tmp = *--s; } return dest; } EXPORT_SYMBOL(memmove); void *memset(void *s, int c, size_t count) { union types dest = { .as_u8 = s }; if (count >= MIN_THRESHOLD) { unsigned long cu = (unsigned long)c; /* Compose an ulong with 'c' repeated 4/8 times */ cu |= cu << 8; cu |= cu << 16; /* Suppress warning on 32 bit machines */ cu |= (cu << 16) << 16; for (; count && dest.as_uptr & WORD_MASK; count--) *dest.as_u8++ = c; /* Copy using the largest size allowed */ for (; count >= BYTES_LONG; count -= BYTES_LONG) *dest.as_ulong++ = cu; } /* copy the remainder */ while (count--) *dest.as_u8++ = c; return s; } EXPORT_SYMBOL(memset);
linux-master
arch/csky/lib/string.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/uaccess.h> #include <linux/types.h> unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n) { int tmp, nsave; __asm__ __volatile__( "0: cmpnei %1, 0 \n" " bf 7f \n" " mov %3, %1 \n" " or %3, %2 \n" " andi %3, 3 \n" " cmpnei %3, 0 \n" " bf 1f \n" " br 5f \n" "1: cmplti %0, 16 \n" " bt 3f \n" "2: ldw %3, (%2, 0) \n" "10: ldw %4, (%2, 4) \n" " stw %3, (%1, 0) \n" " stw %4, (%1, 4) \n" "11: ldw %3, (%2, 8) \n" "12: ldw %4, (%2, 12) \n" " stw %3, (%1, 8) \n" " stw %4, (%1, 12) \n" " addi %2, 16 \n" " addi %1, 16 \n" " subi %0, 16 \n" " br 1b \n" "3: cmplti %0, 4 \n" " bt 5f \n" "4: ldw %3, (%2, 0) \n" " stw %3, (%1, 0) \n" " addi %2, 4 \n" " addi %1, 4 \n" " subi %0, 4 \n" " br 3b \n" "5: cmpnei %0, 0 \n" " bf 7f \n" "6: ldb %3, (%2, 0) \n" " stb %3, (%1, 0) \n" " addi %2, 1 \n" " addi %1, 1 \n" " subi %0, 1 \n" " br 5b \n" "8: stw %3, (%1, 0) \n" " subi %0, 4 \n" " bf 7f \n" "9: subi %0, 8 \n" " bf 7f \n" "13: stw %3, (%1, 8) \n" " subi %0, 12 \n" " bf 7f \n" ".section __ex_table, \"a\" \n" ".align 2 \n" ".long 2b, 7f \n" ".long 4b, 7f \n" ".long 6b, 7f \n" ".long 10b, 8b \n" ".long 11b, 9b \n" ".long 12b,13b \n" ".previous \n" "7: \n" : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), "=r"(tmp) : "0"(n), "1"(to), "2"(from) : "memory"); return n; } EXPORT_SYMBOL(raw_copy_from_user); unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n) { int w0, w1, w2, w3; __asm__ __volatile__( "0: cmpnei %1, 0 \n" " bf 8f \n" " mov %3, %1 \n" " or %3, %2 \n" " andi %3, 3 \n" " cmpnei %3, 0 \n" " bf 1f \n" " br 5f \n" "1: cmplti %0, 16 \n" /* 4W */ " bt 3f \n" " ldw %3, (%2, 0) \n" " ldw %4, (%2, 4) \n" " ldw %5, (%2, 8) \n" " ldw %6, (%2, 12) \n" "2: stw %3, (%1, 0) \n" "9: stw %4, (%1, 4) \n" "10: stw %5, (%1, 8) \n" "11: stw %6, (%1, 12) \n" " addi %2, 16 \n" " addi %1, 16 \n" " subi %0, 16 \n" " br 1b \n" "3: cmplti %0, 4 \n" /* 1W */ " bt 5f \n" " ldw %3, (%2, 0) \n" "4: stw %3, (%1, 0) \n" " addi %2, 4 \n" " addi %1, 4 \n" " subi %0, 4 \n" " br 3b \n" "5: cmpnei %0, 0 \n" /* 1B */ " bf 13f \n" " ldb %3, (%2, 0) \n" "6: stb %3, (%1, 0) \n" " addi %2, 1 \n" " addi %1, 1 \n" " subi %0, 1 \n" " br 5b \n" "7: subi %0, 4 \n" "8: subi %0, 4 \n" "12: subi %0, 4 \n" " br 13f \n" ".section __ex_table, \"a\" \n" ".align 2 \n" ".long 2b, 13f \n" ".long 4b, 13f \n" ".long 6b, 13f \n" ".long 9b, 12b \n" ".long 10b, 8b \n" ".long 11b, 7b \n" ".previous \n" "13: \n" : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), "=r"(w1), "=r"(w2), "=r"(w3) : "0"(n), "1"(to), "2"(from) : "memory"); return n; } EXPORT_SYMBOL(raw_copy_to_user); /* * __clear_user: - Zero a block of memory in user space, with less checking. * @to: Destination address, in user space. * @n: Number of bytes to zero. * * Zero a block of memory in user space. Caller must check * the specified block with access_ok() before calling this function. * * Returns number of bytes that could not be cleared. * On success, this will be zero. */ unsigned long __clear_user(void __user *to, unsigned long n) { int data, value, tmp; __asm__ __volatile__( "0: cmpnei %1, 0 \n" " bf 7f \n" " mov %3, %1 \n" " andi %3, 3 \n" " cmpnei %3, 0 \n" " bf 1f \n" " br 5f \n" "1: cmplti %0, 32 \n" /* 4W */ " bt 3f \n" "8: stw %2, (%1, 0) \n" "10: stw %2, (%1, 4) \n" "11: stw %2, (%1, 8) \n" "12: stw %2, (%1, 12) \n" "13: stw %2, (%1, 16) \n" "14: stw %2, (%1, 20) \n" "15: stw %2, (%1, 24) \n" "16: stw %2, (%1, 28) \n" " addi %1, 32 \n" " subi %0, 32 \n" " br 1b \n" "3: cmplti %0, 4 \n" /* 1W */ " bt 5f \n" "4: stw %2, (%1, 0) \n" " addi %1, 4 \n" " subi %0, 4 \n" " br 3b \n" "5: cmpnei %0, 0 \n" /* 1B */ "9: bf 7f \n" "6: stb %2, (%1, 0) \n" " addi %1, 1 \n" " subi %0, 1 \n" " br 5b \n" ".section __ex_table,\"a\" \n" ".align 2 \n" ".long 8b, 9b \n" ".long 10b, 9b \n" ".long 11b, 9b \n" ".long 12b, 9b \n" ".long 13b, 9b \n" ".long 14b, 9b \n" ".long 15b, 9b \n" ".long 16b, 9b \n" ".long 4b, 9b \n" ".long 6b, 9b \n" ".previous \n" "7: \n" : "=r"(n), "=r" (data), "=r"(value), "=r"(tmp) : "0"(n), "1"(to), "2"(0) : "memory"); return n; } EXPORT_SYMBOL(__clear_user);
linux-master
arch/csky/lib/usercopy.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/error-injection.h> #include <linux/kprobes.h> void override_function_with_return(struct pt_regs *regs) { instruction_pointer_set(regs, regs->lr); } NOKPROBE_SYMBOL(override_function_with_return);
linux-master
arch/csky/lib/error-inject.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/cache.h> #include <linux/dma-map-ops.h> #include <linux/genalloc.h> #include <linux/highmem.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/types.h> #include <asm/cache.h> static inline void cache_op(phys_addr_t paddr, size_t size, void (*fn)(unsigned long start, unsigned long end)) { struct page *page = phys_to_page(paddr); void *start = __va(page_to_phys(page)); unsigned long offset = offset_in_page(paddr); size_t left = size; do { size_t len = left; if (offset + len > PAGE_SIZE) len = PAGE_SIZE - offset; if (PageHighMem(page)) { start = kmap_atomic(page); fn((unsigned long)start + offset, (unsigned long)start + offset + len); kunmap_atomic(start); } else { fn((unsigned long)start + offset, (unsigned long)start + offset + len); } offset = 0; page++; start += PAGE_SIZE; left -= len; } while (left); } static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end) { memset((void *)start, 0, end - start); dma_wbinv_range(start, end); } void arch_dma_prep_coherent(struct page *page, size_t size) { cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range); } void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_TO_DEVICE: cache_op(paddr, size, dma_wb_range); break; case DMA_FROM_DEVICE: case DMA_BIDIRECTIONAL: cache_op(paddr, size, dma_wbinv_range); break; default: BUG(); } } void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_TO_DEVICE: return; case DMA_FROM_DEVICE: case DMA_BIDIRECTIONAL: cache_op(paddr, size, dma_inv_range); break; default: BUG(); } }
linux-master
arch/csky/mm/dma-mapping.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/bug.h> #include <linux/module.h> #include <linux/init.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/pagemap.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/memblock.h> #include <linux/swap.h> #include <linux/proc_fs.h> #include <linux/pfn.h> #include <linux/initrd.h> #include <asm/setup.h> #include <asm/cachectl.h> #include <asm/dma.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/sections.h> #include <asm/tlb.h> #include <asm/cacheflush.h> #define PTRS_KERN_TABLE \ ((PTRS_PER_PGD - USER_PTRS_PER_PGD) * PTRS_PER_PTE) pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; pte_t kernel_pte_tables[PTRS_KERN_TABLE] __page_aligned_bss; EXPORT_SYMBOL(invalid_pte_table); unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); #ifdef CONFIG_BLK_DEV_INITRD static void __init setup_initrd(void) { unsigned long size; if (initrd_start >= initrd_end) { pr_err("initrd not found or empty"); goto disable; } if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { pr_err("initrd extends beyond end of memory"); goto disable; } size = initrd_end - initrd_start; if (memblock_is_region_reserved(__pa(initrd_start), size)) { pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region", __pa(initrd_start), size); goto disable; } memblock_reserve(__pa(initrd_start), size); pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", (void *)(initrd_start), size); initrd_below_start_ok = 1; return; disable: initrd_start = initrd_end = 0; pr_err(" - disabling initrd\n"); } #endif void __init mem_init(void) { #ifdef CONFIG_HIGHMEM unsigned long tmp; set_max_mapnr(highend_pfn - ARCH_PFN_OFFSET); #else set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); #endif high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); #ifdef CONFIG_BLK_DEV_INITRD setup_initrd(); #endif memblock_free_all(); #ifdef CONFIG_HIGHMEM for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { struct page *page = pfn_to_page(tmp); /* FIXME not sure about */ if (!memblock_is_reserved(tmp << PAGE_SHIFT)) free_highmem_page(page); } #endif } void free_initmem(void) { free_initmem_default(-1); } void pgd_init(unsigned long *p) { int i; for (i = 0; i < PTRS_PER_PGD; i++) p[i] = __pa(invalid_pte_table); flush_tlb_all(); local_icache_inv_all(NULL); } void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn) { int i; for (i = 0; i < USER_PTRS_PER_PGD; i++) swapper_pg_dir[i].pgd = __pa(invalid_pte_table); for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) swapper_pg_dir[i].pgd = __pa(kernel_pte_tables + (PTRS_PER_PTE * (i - USER_PTRS_PER_PGD))); for (i = 0; i < PTRS_KERN_TABLE; i++) set_pte(&kernel_pte_tables[i], __pte(_PAGE_GLOBAL)); for (i = min_pfn; i < max_pfn; i++) set_pte(&kernel_pte_tables[i - PFN_DOWN(va_pa_offset)], pfn_pte(i, PAGE_KERNEL)); flush_tlb_all(); local_icache_inv_all(NULL); /* Setup page mask to 4k */ write_mmu_pagemask(0); setup_pgd(swapper_pg_dir, 0); } void __init fixrange_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; int i, j, k; unsigned long vaddr; vaddr = start; i = pgd_index(vaddr); j = pud_index(vaddr); k = pmd_index(vaddr); pgd = pgd_base + i; for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { pud = (pud_t *)pgd; for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { pmd = (pmd_t *)pud; for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); if (!pte) panic("%s: Failed to allocate %lu bytes align=%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); set_pmd(pmd, __pmd(__pa(pte))); BUG_ON(pte != pte_offset_kernel(pmd, 0)); } vaddr += PMD_SIZE; } k = 0; } j = 0; } } void __init fixaddr_init(void) { unsigned long vaddr; vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); } static const pgprot_t protection_map[16] = { [VM_NONE] = PAGE_NONE, [VM_READ] = PAGE_READ, [VM_WRITE] = PAGE_READ, [VM_WRITE | VM_READ] = PAGE_READ, [VM_EXEC] = PAGE_READ, [VM_EXEC | VM_READ] = PAGE_READ, [VM_EXEC | VM_WRITE] = PAGE_READ, [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READ, [VM_SHARED] = PAGE_NONE, [VM_SHARED | VM_READ] = PAGE_READ, [VM_SHARED | VM_WRITE] = PAGE_WRITE, [VM_SHARED | VM_WRITE | VM_READ] = PAGE_WRITE, [VM_SHARED | VM_EXEC] = PAGE_READ, [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ, [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_WRITE, [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_WRITE }; DECLARE_VM_GET_PAGE_PROT
linux-master
arch/csky/mm/init.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/highmem.h> #include <linux/genalloc.h> #include <asm/tlbflush.h> #include <asm/fixmap.h> #if (CONFIG_ITCM_RAM_BASE == 0xffffffff) #error "You should define ITCM_RAM_BASE" #endif #ifdef CONFIG_HAVE_DTCM #if (CONFIG_DTCM_RAM_BASE == 0xffffffff) #error "You should define DTCM_RAM_BASE" #endif #if (CONFIG_DTCM_RAM_BASE == CONFIG_ITCM_RAM_BASE) #error "You should define correct DTCM_RAM_BASE" #endif #endif extern char __tcm_start, __tcm_end, __dtcm_start; static struct gen_pool *tcm_pool; static void __init tcm_mapping_init(void) { pte_t *tcm_pte; unsigned long vaddr, paddr; int i; paddr = CONFIG_ITCM_RAM_BASE; if (pfn_valid(PFN_DOWN(CONFIG_ITCM_RAM_BASE))) goto panic; #ifndef CONFIG_HAVE_DTCM for (i = 0; i < TCM_NR_PAGES; i++) { #else for (i = 0; i < CONFIG_ITCM_NR_PAGES; i++) { #endif vaddr = __fix_to_virt(FIX_TCM - i); tcm_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL)); flush_tlb_one(vaddr); paddr = paddr + PAGE_SIZE; } #ifdef CONFIG_HAVE_DTCM if (pfn_valid(PFN_DOWN(CONFIG_DTCM_RAM_BASE))) goto panic; paddr = CONFIG_DTCM_RAM_BASE; for (i = 0; i < CONFIG_DTCM_NR_PAGES; i++) { vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i); tcm_pte = pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr); set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL)); flush_tlb_one(vaddr); paddr = paddr + PAGE_SIZE; } #endif #ifndef CONFIG_HAVE_DTCM memcpy((void *)__fix_to_virt(FIX_TCM), &__tcm_start, &__tcm_end - &__tcm_start); pr_info("%s: mapping tcm va:0x%08lx to pa:0x%08x\n", __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE); pr_info("%s: __tcm_start va:0x%08lx size:%d\n", __func__, (unsigned long)&__tcm_start, &__tcm_end - &__tcm_start); #else memcpy((void *)__fix_to_virt(FIX_TCM), &__tcm_start, &__dtcm_start - &__tcm_start); pr_info("%s: mapping itcm va:0x%08lx to pa:0x%08x\n", __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE); pr_info("%s: __itcm_start va:0x%08lx size:%d\n", __func__, (unsigned long)&__tcm_start, &__dtcm_start - &__tcm_start); memcpy((void *)__fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES), &__dtcm_start, &__tcm_end - &__dtcm_start); pr_info("%s: mapping dtcm va:0x%08lx to pa:0x%08x\n", __func__, __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES), CONFIG_DTCM_RAM_BASE); pr_info("%s: __dtcm_start va:0x%08lx size:%d\n", __func__, (unsigned long)&__dtcm_start, &__tcm_end - &__dtcm_start); #endif return; panic: panic("TCM init error"); } void *tcm_alloc(size_t len) { unsigned long vaddr; if (!tcm_pool) return NULL; vaddr = gen_pool_alloc(tcm_pool, len); if (!vaddr) return NULL; return (void *) vaddr; } EXPORT_SYMBOL(tcm_alloc); void tcm_free(void *addr, size_t len) { gen_pool_free(tcm_pool, (unsigned long) addr, len); } EXPORT_SYMBOL(tcm_free); static int __init tcm_setup_pool(void) { #ifndef CONFIG_HAVE_DTCM u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE) - (u32) (&__tcm_end - &__tcm_start); u32 tcm_pool_start = __fix_to_virt(FIX_TCM) + (u32) (&__tcm_end - &__tcm_start); #else u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE) - (u32) (&__tcm_end - &__dtcm_start); u32 tcm_pool_start = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES) + (u32) (&__tcm_end - &__dtcm_start); #endif int ret; tcm_pool = gen_pool_create(2, -1); ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1); if (ret) { pr_err("%s: gen_pool add failed!\n", __func__); return ret; } pr_info("%s: Added %d bytes @ 0x%08x to memory pool\n", __func__, pool_size, tcm_pool_start); return 0; } static int __init tcm_init(void) { tcm_mapping_init(); tcm_setup_pool(); return 0; } arch_initcall(tcm_init);
linux-master
arch/csky/mm/tcm.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/export.h> #include <linux/mm.h> #include <linux/io.h> pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { if (!pfn_valid(pfn)) { return pgprot_noncached(vma_prot); } else if (file->f_flags & O_SYNC) { return pgprot_writecombine(vma_prot); } return vma_prot; } EXPORT_SYMBOL(phys_mem_access_prot);
linux-master
arch/csky/mm/ioremap.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/syscalls.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/cachectl.h> SYSCALL_DEFINE3(cacheflush, void __user *, addr, unsigned long, bytes, int, cache) { switch (cache) { case BCACHE: case DCACHE: dcache_wb_range((unsigned long)addr, (unsigned long)addr + bytes); if (cache != BCACHE) break; fallthrough; case ICACHE: flush_icache_mm_range(current->mm, (unsigned long)addr, (unsigned long)addr + bytes); break; default: return -EINVAL; } return 0; }
linux-master
arch/csky/mm/syscache.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/mm.h> #include <asm/cache.h> #include <asm/barrier.h> /* for L1-cache */ #define INS_CACHE (1 << 0) #define DATA_CACHE (1 << 1) #define CACHE_INV (1 << 4) #define CACHE_CLR (1 << 5) #define CACHE_OMS (1 << 6) void local_icache_inv_all(void *priv) { mtcr("cr17", INS_CACHE|CACHE_INV); sync_is(); } #ifdef CONFIG_CPU_HAS_ICACHE_INS void icache_inv_range(unsigned long start, unsigned long end) { unsigned long i = start & ~(L1_CACHE_BYTES - 1); for (; i < end; i += L1_CACHE_BYTES) asm volatile("icache.iva %0\n"::"r"(i):"memory"); sync_is(); } #else struct cache_range { unsigned long start; unsigned long end; }; static DEFINE_SPINLOCK(cache_lock); static inline void cache_op_line(unsigned long i, unsigned int val) { mtcr("cr22", i); mtcr("cr17", val); } void local_icache_inv_range(void *priv) { struct cache_range *param = priv; unsigned long i = param->start & ~(L1_CACHE_BYTES - 1); unsigned long flags; spin_lock_irqsave(&cache_lock, flags); for (; i < param->end; i += L1_CACHE_BYTES) cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS); spin_unlock_irqrestore(&cache_lock, flags); sync_is(); } void icache_inv_range(unsigned long start, unsigned long end) { struct cache_range param = { start, end }; if (irqs_disabled()) local_icache_inv_range(&param); else on_each_cpu(local_icache_inv_range, &param, 1); } #endif inline void dcache_wb_line(unsigned long start) { asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); sync_is(); } void dcache_wb_range(unsigned long start, unsigned long end) { unsigned long i = start & ~(L1_CACHE_BYTES - 1); for (; i < end; i += L1_CACHE_BYTES) asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); sync_is(); } void cache_wbinv_range(unsigned long start, unsigned long end) { dcache_wb_range(start, end); icache_inv_range(start, end); } EXPORT_SYMBOL(cache_wbinv_range); void dma_wbinv_range(unsigned long start, unsigned long end) { unsigned long i = start & ~(L1_CACHE_BYTES - 1); for (; i < end; i += L1_CACHE_BYTES) asm volatile("dcache.civa %0\n"::"r"(i):"memory"); sync_is(); } void dma_inv_range(unsigned long start, unsigned long end) { unsigned long i = start & ~(L1_CACHE_BYTES - 1); for (; i < end; i += L1_CACHE_BYTES) asm volatile("dcache.iva %0\n"::"r"(i):"memory"); sync_is(); } void dma_wb_range(unsigned long start, unsigned long end) { unsigned long i = start & ~(L1_CACHE_BYTES - 1); for (; i < end; i += L1_CACHE_BYTES) asm volatile("dcache.cva %0\n"::"r"(i):"memory"); sync_is(); }
linux-master
arch/csky/mm/cachev2.c
// SPDX-License-Identifier: GPL-2.0 /* * Generic ASID allocator. * * Based on arch/arm/mm/context.c * * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. * Copyright (C) 2012 ARM Ltd. */ #include <linux/slab.h> #include <linux/mm_types.h> #include <asm/asid.h> #define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu) #define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0)) #define ASID_FIRST_VERSION(info) (1UL << ((info)->bits)) #define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) #define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info)) static void flush_context(struct asid_info *info) { int i; u64 asid; /* Update the list of reserved ASIDs and the ASID bitmap. */ bitmap_zero(info->map, NUM_CTXT_ASIDS(info)); for_each_possible_cpu(i) { asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); /* * If this CPU has already been through a * rollover, but hasn't run another task in * the meantime, we must preserve its reserved * ASID, as this is the only trace we have of * the process it is still running. */ if (asid == 0) asid = reserved_asid(info, i); __set_bit(asid2idx(info, asid), info->map); reserved_asid(info, i) = asid; } /* * Queue a TLB invalidation for each CPU to perform on next * context-switch */ cpumask_setall(&info->flush_pending); } static bool check_update_reserved_asid(struct asid_info *info, u64 asid, u64 newasid) { int cpu; bool hit = false; /* * Iterate over the set of reserved ASIDs looking for a match. * If we find one, then we can update our mm to use newasid * (i.e. the same ASID in the current generation) but we can't * exit the loop early, since we need to ensure that all copies * of the old ASID are updated to reflect the mm. Failure to do * so could result in us missing the reserved ASID in a future * generation. */ for_each_possible_cpu(cpu) { if (reserved_asid(info, cpu) == asid) { hit = true; reserved_asid(info, cpu) = newasid; } } return hit; } static u64 new_context(struct asid_info *info, atomic64_t *pasid, struct mm_struct *mm) { static u32 cur_idx = 1; u64 asid = atomic64_read(pasid); u64 generation = atomic64_read(&info->generation); if (asid != 0) { u64 newasid = generation | (asid & ~ASID_MASK(info)); /* * If our current ASID was active during a rollover, we * can continue to use it and this was just a false alarm. */ if (check_update_reserved_asid(info, asid, newasid)) return newasid; /* * We had a valid ASID in a previous life, so try to re-use * it if possible. */ if (!__test_and_set_bit(asid2idx(info, asid), info->map)) return newasid; } /* * Allocate a free ASID. If we can't find one, take a note of the * currently active ASIDs and mark the TLBs as requiring flushes. We * always count from ASID #2 (index 1), as we use ASID #0 when setting * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd * pairs. */ asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx); if (asid != NUM_CTXT_ASIDS(info)) goto set_asid; /* We're out of ASIDs, so increment the global generation count */ generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info), &info->generation); flush_context(info); /* We have more ASIDs than CPUs, so this will always succeed */ asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1); set_asid: __set_bit(asid, info->map); cur_idx = asid; cpumask_clear(mm_cpumask(mm)); return idx2asid(info, asid) | generation; } /* * Generate a new ASID for the context. * * @pasid: Pointer to the current ASID batch allocated. It will be updated * with the new ASID batch. * @cpu: current CPU ID. Must have been acquired through get_cpu() */ void asid_new_context(struct asid_info *info, atomic64_t *pasid, unsigned int cpu, struct mm_struct *mm) { unsigned long flags; u64 asid; raw_spin_lock_irqsave(&info->lock, flags); /* Check that our ASID belongs to the current generation. */ asid = atomic64_read(pasid); if ((asid ^ atomic64_read(&info->generation)) >> info->bits) { asid = new_context(info, pasid, mm); atomic64_set(pasid, asid); } if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending)) info->flush_cpu_ctxt_cb(); atomic64_set(&active_asid(info, cpu), asid); cpumask_set_cpu(cpu, mm_cpumask(mm)); raw_spin_unlock_irqrestore(&info->lock, flags); } /* * Initialize the ASID allocator * * @info: Pointer to the asid allocator structure * @bits: Number of ASIDs available * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are * allocated contiguously for a given context. This value should be a power of * 2. */ int asid_allocator_init(struct asid_info *info, u32 bits, unsigned int asid_per_ctxt, void (*flush_cpu_ctxt_cb)(void)) { info->bits = bits; info->ctxt_shift = ilog2(asid_per_ctxt); info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb; /* * Expect allocation after rollover to fail if we don't have at least * one more ASID than CPUs. ASID #0 is always reserved. */ WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus()); atomic64_set(&info->generation, ASID_FIRST_VERSION(info)); info->map = bitmap_zalloc(NUM_CTXT_ASIDS(info), GFP_KERNEL); if (!info->map) return -ENOMEM; raw_spin_lock_init(&info->lock); return 0; }
linux-master
arch/csky/mm/asid.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/bitops.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/mm.h> #include <asm/asid.h> #include <asm/mmu_context.h> #include <asm/smp.h> #include <asm/tlbflush.h> static DEFINE_PER_CPU(atomic64_t, active_asids); static DEFINE_PER_CPU(u64, reserved_asids); struct asid_info asid_info; void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) { asid_check_context(&asid_info, &mm->context.asid, cpu, mm); } static void asid_flush_cpu_ctxt(void) { local_tlb_invalid_all(); } static int asids_init(void) { BUG_ON(((1 << CONFIG_CPU_ASID_BITS) - 1) <= num_possible_cpus()); if (asid_allocator_init(&asid_info, CONFIG_CPU_ASID_BITS, 1, asid_flush_cpu_ctxt)) panic("Unable to initialize ASID allocator for %lu ASIDs\n", NUM_ASIDS(&asid_info)); asid_info.active = &active_asids; asid_info.reserved = &reserved_asids; pr_info("ASID allocator initialised with %lu entries\n", NUM_CTXT_ASIDS(&asid_info)); return 0; } early_initcall(asids_init);
linux-master
arch/csky/mm/context.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/module.h> #include <linux/highmem.h> #include <linux/smp.h> #include <linux/memblock.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> unsigned long highstart_pfn, highend_pfn; void kmap_flush_tlb(unsigned long addr) { flush_tlb_one(addr); } EXPORT_SYMBOL(kmap_flush_tlb); void __init kmap_init(void) { unsigned long vaddr; pgd_t *pgd; pmd_t *pmd; pud_t *pud; pte_t *pte; vaddr = PKMAP_BASE; fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); pgd = swapper_pg_dir + pgd_index(vaddr); pud = (pud_t *)pgd; pmd = pmd_offset(pud, vaddr); pte = pte_offset_kernel(pmd, vaddr); pkmap_page_table = pte; }
linux-master
arch/csky/mm/highmem.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/extable.h> #include <linux/kprobes.h> #include <linux/mmu_context.h> #include <linux/perf_event.h> int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; fixup = search_exception_tables(instruction_pointer(regs)); if (fixup) { regs->pc = fixup->fixup; return 1; } return 0; } static inline bool is_write(struct pt_regs *regs) { switch (trap_no(regs)) { case VEC_TLBINVALIDS: return true; case VEC_TLBMODIFIED: return true; } return false; } #ifdef CONFIG_CPU_HAS_LDSTEX static inline void csky_cmpxchg_fixup(struct pt_regs *regs) { return; } #else extern unsigned long csky_cmpxchg_ldw; extern unsigned long csky_cmpxchg_stw; static inline void csky_cmpxchg_fixup(struct pt_regs *regs) { if (trap_no(regs) != VEC_TLBMODIFIED) return; if (instruction_pointer(regs) == csky_cmpxchg_stw) instruction_pointer_set(regs, csky_cmpxchg_ldw); return; } #endif static inline void no_context(struct pt_regs *regs, unsigned long addr) { current->thread.trap_no = trap_no(regs); /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ bust_spinlocks(1); pr_alert("Unable to handle kernel paging request at virtual " "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc); die(regs, "Oops"); make_task_dead(SIGKILL); } static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault) { current->thread.trap_no = trap_no(regs); if (fault & VM_FAULT_OOM) { /* * We ran out of memory, call the OOM killer, and return the userspace * (which will retry the fault, or kill us if we got oom-killed). */ if (!user_mode(regs)) { no_context(regs, addr); return; } pagefault_out_of_memory(); return; } else if (fault & VM_FAULT_SIGBUS) { /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) { no_context(regs, addr); return; } do_trap(regs, SIGBUS, BUS_ADRERR, addr); return; } BUG(); } static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) { /* * Something tried to access memory that isn't in our memory map. * Fix it, but check if it's kernel or user first. */ /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { do_trap(regs, SIGSEGV, code, addr); return; } no_context(regs, addr); } static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr) { pgd_t *pgd, *pgd_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; int offset; /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { do_trap(regs, SIGSEGV, code, addr); return; } /* * Synchronize this task's top level page-table * with the 'reference' page table. * * Do _not_ use "tsk" here. We might be inside * an interrupt in the middle of a task switch.. */ offset = pgd_index(addr); pgd = get_pgd() + offset; pgd_k = init_mm.pgd + offset; if (!pgd_present(*pgd_k)) { no_context(regs, addr); return; } set_pgd(pgd, *pgd_k); pud = (pud_t *)pgd; pud_k = (pud_t *)pgd_k; if (!pud_present(*pud_k)) { no_context(regs, addr); return; } pmd = pmd_offset(pud, addr); pmd_k = pmd_offset(pud_k, addr); if (!pmd_present(*pmd_k)) { no_context(regs, addr); return; } set_pmd(pmd, *pmd_k); pte_k = pte_offset_kernel(pmd_k, addr); if (!pte_present(*pte_k)) { no_context(regs, addr); return; } flush_tlb_one(addr); } static inline bool access_error(struct pt_regs *regs, struct vm_area_struct *vma) { if (is_write(regs)) { if (!(vma->vm_flags & VM_WRITE)) return true; } else { if (unlikely(!vma_is_accessible(vma))) return true; } return false; } /* * This routine handles page faults. It determines the address and the * problem, and then passes it off to one of the appropriate routines. */ asmlinkage void do_page_fault(struct pt_regs *regs) { struct task_struct *tsk; struct vm_area_struct *vma; struct mm_struct *mm; unsigned long addr = read_mmu_entryhi() & PAGE_MASK; unsigned int flags = FAULT_FLAG_DEFAULT; int code = SEGV_MAPERR; vm_fault_t fault; tsk = current; mm = tsk->mm; csky_cmpxchg_fixup(regs); if (kprobe_page_fault(regs, tsk->thread.trap_no)) return; /* * Fault-in kernel-space virtual memory on-demand. * The 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) { vmalloc_fault(regs, code, addr); return; } /* Enable interrupts if they were enabled in the parent context. */ if (likely(regs->sr & BIT(6))) local_irq_enable(); /* * If we're in an interrupt, have no user context, or are running * in an atomic region, then we must not take the fault. */ if (unlikely(faulthandler_disabled() || !mm)) { no_context(regs, addr); return; } if (user_mode(regs)) flags |= FAULT_FLAG_USER; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); if (is_write(regs)) flags |= FAULT_FLAG_WRITE; retry: vma = lock_mm_and_find_vma(mm, addr, regs); if (unlikely(!vma)) { bad_area_nosemaphore(regs, mm, code, addr); return; } /* * Ok, we have a good vm_area for this memory access, so * we can handle it. */ code = SEGV_ACCERR; if (unlikely(access_error(regs, vma))) { mmap_read_unlock(mm); bad_area_nosemaphore(regs, mm, code, addr); return; } /* * If for any reason at all we could not handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(vma, addr, flags, regs); /* * If we need to retry but a fatal signal is pending, handle the * signal first. We do not need to release the mmap_lock because it * would already be released in __lock_page_or_retry in mm/filemap.c. */ if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) no_context(regs, addr); return; } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) return; if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) { flags |= FAULT_FLAG_TRIED; /* * No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ goto retry; } mmap_read_unlock(mm); if (unlikely(fault & VM_FAULT_ERROR)) { mm_fault_error(regs, addr, fault); return; } return; }
linux-master
arch/csky/mm/fault.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> #include <asm/mmu_context.h> #include <asm/setup.h> /* * One C-SKY MMU TLB entry contain two PFN/page entry, ie: * 1VPN -> 2PFN */ #define TLB_ENTRY_SIZE (PAGE_SIZE * 2) #define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1) void flush_tlb_all(void) { tlb_invalid_all(); } void flush_tlb_mm(struct mm_struct *mm) { #ifdef CONFIG_CPU_HAS_TLBI sync_is(); asm volatile( "tlbi.asids %0 \n" "sync.i \n" : : "r" (cpu_asid(mm)) : "memory"); #else tlb_invalid_all(); #endif } /* * MMU operation regs only could invalid tlb entry in jtlb and we * need change asid field to invalid I-utlb & D-utlb. */ #ifndef CONFIG_CPU_HAS_TLBI #define restore_asid_inv_utlb(oldpid, newpid) \ do { \ if (oldpid == newpid) \ write_mmu_entryhi(oldpid + 1); \ write_mmu_entryhi(oldpid); \ } while (0) #endif void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { unsigned long newpid = cpu_asid(vma->vm_mm); start &= TLB_ENTRY_SIZE_MASK; end += TLB_ENTRY_SIZE - 1; end &= TLB_ENTRY_SIZE_MASK; #ifdef CONFIG_CPU_HAS_TLBI sync_is(); while (start < end) { asm volatile( "tlbi.vas %0 \n" : : "r" (start | newpid) : "memory"); start += 2*PAGE_SIZE; } asm volatile("sync.i\n"); #else { unsigned long flags, oldpid; local_irq_save(flags); oldpid = read_mmu_entryhi() & ASID_MASK; while (start < end) { int idx; write_mmu_entryhi(start | newpid); start += 2*PAGE_SIZE; tlb_probe(); idx = read_mmu_index(); if (idx >= 0) tlb_invalid_indexed(); } restore_asid_inv_utlb(oldpid, newpid); local_irq_restore(flags); } #endif } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { start &= TLB_ENTRY_SIZE_MASK; end += TLB_ENTRY_SIZE - 1; end &= TLB_ENTRY_SIZE_MASK; #ifdef CONFIG_CPU_HAS_TLBI sync_is(); while (start < end) { asm volatile( "tlbi.vaas %0 \n" : : "r" (start) : "memory"); start += 2*PAGE_SIZE; } asm volatile("sync.i\n"); #else { unsigned long flags, oldpid; local_irq_save(flags); oldpid = read_mmu_entryhi() & ASID_MASK; while (start < end) { int idx; write_mmu_entryhi(start | oldpid); start += 2*PAGE_SIZE; tlb_probe(); idx = read_mmu_index(); if (idx >= 0) tlb_invalid_indexed(); } restore_asid_inv_utlb(oldpid, oldpid); local_irq_restore(flags); } #endif } void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { int newpid = cpu_asid(vma->vm_mm); addr &= TLB_ENTRY_SIZE_MASK; #ifdef CONFIG_CPU_HAS_TLBI sync_is(); asm volatile( "tlbi.vas %0 \n" "sync.i \n" : : "r" (addr | newpid) : "memory"); #else { int oldpid, idx; unsigned long flags; local_irq_save(flags); oldpid = read_mmu_entryhi() & ASID_MASK; write_mmu_entryhi(addr | newpid); tlb_probe(); idx = read_mmu_index(); if (idx >= 0) tlb_invalid_indexed(); restore_asid_inv_utlb(oldpid, newpid); local_irq_restore(flags); } #endif } void flush_tlb_one(unsigned long addr) { addr &= TLB_ENTRY_SIZE_MASK; #ifdef CONFIG_CPU_HAS_TLBI sync_is(); asm volatile( "tlbi.vaas %0 \n" "sync.i \n" : : "r" (addr) : "memory"); #else { int oldpid, idx; unsigned long flags; local_irq_save(flags); oldpid = read_mmu_entryhi() & ASID_MASK; write_mmu_entryhi(addr | oldpid); tlb_probe(); idx = read_mmu_index(); if (idx >= 0) tlb_invalid_indexed(); restore_asid_inv_utlb(oldpid, oldpid); local_irq_restore(flags); } #endif } EXPORT_SYMBOL(flush_tlb_one);
linux-master
arch/csky/mm/tlb.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/spinlock.h> #include <asm/cache.h> #include <abi/reg_ops.h> /* for L1-cache */ #define INS_CACHE (1 << 0) #define DATA_CACHE (1 << 1) #define CACHE_INV (1 << 4) #define CACHE_CLR (1 << 5) #define CACHE_OMS (1 << 6) #define CACHE_ITS (1 << 7) #define CACHE_LICF (1 << 31) /* for L2-cache */ #define CR22_LEVEL_SHIFT (1) #define CR22_SET_SHIFT (7) #define CR22_WAY_SHIFT (30) #define CR22_WAY_SHIFT_L2 (29) static DEFINE_SPINLOCK(cache_lock); static inline void cache_op_line(unsigned long i, unsigned int val) { mtcr("cr22", i); mtcr("cr17", val); } #define CCR2_L2E (1 << 3) static void cache_op_all(unsigned int value, unsigned int l2) { mtcr("cr17", value | CACHE_CLR); mb(); if (l2 && (mfcr_ccr2() & CCR2_L2E)) { mtcr("cr24", value | CACHE_CLR); mb(); } } static void cache_op_range( unsigned int start, unsigned int end, unsigned int value, unsigned int l2) { unsigned long i, flags; unsigned int val = value | CACHE_CLR | CACHE_OMS; bool l2_sync; if (unlikely((end - start) >= PAGE_SIZE) || unlikely(start < PAGE_OFFSET) || unlikely(start >= PAGE_OFFSET + LOWMEM_LIMIT)) { cache_op_all(value, l2); return; } if ((mfcr_ccr2() & CCR2_L2E) && l2) l2_sync = 1; else l2_sync = 0; spin_lock_irqsave(&cache_lock, flags); i = start & ~(L1_CACHE_BYTES - 1); for (; i < end; i += L1_CACHE_BYTES) { cache_op_line(i, val); if (l2_sync) { mb(); mtcr("cr24", val); } } spin_unlock_irqrestore(&cache_lock, flags); mb(); } void dcache_wb_line(unsigned long start) { asm volatile("idly4\n":::"memory"); cache_op_line(start, DATA_CACHE|CACHE_CLR); mb(); } void icache_inv_range(unsigned long start, unsigned long end) { cache_op_range(start, end, INS_CACHE|CACHE_INV, 0); } void icache_inv_all(void) { cache_op_all(INS_CACHE|CACHE_INV, 0); } void local_icache_inv_all(void *priv) { cache_op_all(INS_CACHE|CACHE_INV, 0); } void dcache_wb_range(unsigned long start, unsigned long end) { cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0); } void dcache_wbinv_all(void) { cache_op_all(DATA_CACHE|CACHE_CLR|CACHE_INV, 0); } void cache_wbinv_range(unsigned long start, unsigned long end) { cache_op_range(start, end, INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); } EXPORT_SYMBOL(cache_wbinv_range); void cache_wbinv_all(void) { cache_op_all(INS_CACHE|DATA_CACHE|CACHE_CLR|CACHE_INV, 0); } void dma_wbinv_range(unsigned long start, unsigned long end) { cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); } void dma_inv_range(unsigned long start, unsigned long end) { cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); } void dma_wb_range(unsigned long start, unsigned long end) { cache_op_range(start, end, DATA_CACHE|CACHE_CLR|CACHE_INV, 1); }
linux-master
arch/csky/mm/cachev1.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/ftrace.h> #include <linux/uaccess.h> #include <linux/stop_machine.h> #include <asm/cacheflush.h> #ifdef CONFIG_DYNAMIC_FTRACE #define NOP 0x4000 #define NOP32_HI 0xc400 #define NOP32_LO 0x4820 #define PUSH_LR 0x14d0 #define MOVIH_LINK 0xea3a #define ORI_LINK 0xef5a #define JSR_LINK 0xe8fa #define BSR_LINK 0xe000 /* * Gcc-csky with -pg will insert stub in function prologue: * push lr * jbsr _mcount * nop32 * nop32 * * If the (callee - current_pc) is less then 64MB, we'll use bsr: * push lr * bsr _mcount * nop32 * nop32 * else we'll use (movih + ori + jsr): * push lr * movih r26, ... * ori r26, ... * jsr r26 * * (r26 is our reserved link-reg) * */ static inline void make_jbsr(unsigned long callee, unsigned long pc, uint16_t *call, bool nolr) { long offset; call[0] = nolr ? NOP : PUSH_LR; offset = (long) callee - (long) pc; if (unlikely(offset < -67108864 || offset > 67108864)) { call[1] = MOVIH_LINK; call[2] = callee >> 16; call[3] = ORI_LINK; call[4] = callee & 0xffff; call[5] = JSR_LINK; call[6] = 0; } else { offset = offset >> 1; call[1] = BSR_LINK | ((uint16_t)((unsigned long) offset >> 16) & 0x3ff); call[2] = (uint16_t)((unsigned long) offset & 0xffff); call[3] = call[5] = NOP32_HI; call[4] = call[6] = NOP32_LO; } } static uint16_t nops[7] = {NOP, NOP32_HI, NOP32_LO, NOP32_HI, NOP32_LO, NOP32_HI, NOP32_LO}; static int ftrace_check_current_nop(unsigned long hook) { uint16_t olds[7]; unsigned long hook_pos = hook - 2; if (copy_from_kernel_nofault((void *)olds, (void *)hook_pos, sizeof(nops))) return -EFAULT; if (memcmp((void *)nops, (void *)olds, sizeof(nops))) { pr_err("%p: nop but get (%04x %04x %04x %04x %04x %04x %04x)\n", (void *)hook_pos, olds[0], olds[1], olds[2], olds[3], olds[4], olds[5], olds[6]); return -EINVAL; } return 0; } static int ftrace_modify_code(unsigned long hook, unsigned long target, bool enable, bool nolr) { uint16_t call[7]; unsigned long hook_pos = hook - 2; int ret = 0; make_jbsr(target, hook, call, nolr); ret = copy_to_kernel_nofault((void *)hook_pos, enable ? call : nops, sizeof(nops)); if (ret) return -EPERM; flush_icache_range(hook_pos, hook_pos + MCOUNT_INSN_SIZE); return 0; } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { int ret = ftrace_check_current_nop(rec->ip); if (ret) return ret; return ftrace_modify_code(rec->ip, addr, true, false); } int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { return ftrace_modify_code(rec->ip, addr, false, false); } int ftrace_update_ftrace_func(ftrace_func_t func) { int ret = ftrace_modify_code((unsigned long)&ftrace_call, (unsigned long)func, true, true); if (!ret) ret = ftrace_modify_code((unsigned long)&ftrace_regs_call, (unsigned long)func, true, true); return ret; } #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) { return ftrace_modify_code(rec->ip, addr, true, true); } #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long frame_pointer) { unsigned long return_hooker = (unsigned long)&return_to_handler; unsigned long old; if (unlikely(atomic_read(&current->tracing_graph_pause))) return; old = *parent; if (!function_graph_enter(old, self_addr, *(unsigned long *)frame_pointer, parent)) { /* * For csky-gcc function has sub-call: * subi sp, sp, 8 * stw r8, (sp, 0) * mov r8, sp * st.w r15, (sp, 0x4) * push r15 * jl _mcount * We only need set *parent for resume * * For csky-gcc function has no sub-call: * subi sp, sp, 4 * stw r8, (sp, 0) * mov r8, sp * push r15 * jl _mcount * We need set *parent and *(frame_pointer + 4) for resume, * because lr is resumed twice. */ *parent = return_hooker; frame_pointer += 4; if (*(unsigned long *)frame_pointer == old) *(unsigned long *)frame_pointer = return_hooker; } } #ifdef CONFIG_DYNAMIC_FTRACE int ftrace_enable_ftrace_graph_caller(void) { return ftrace_modify_code((unsigned long)&ftrace_graph_call, (unsigned long)&ftrace_graph_caller, true, true); } int ftrace_disable_ftrace_graph_caller(void) { return ftrace_modify_code((unsigned long)&ftrace_graph_call, (unsigned long)&ftrace_graph_caller, false, true); } #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_DYNAMIC_FTRACE #ifndef CONFIG_CPU_HAS_ICACHE_INS struct ftrace_modify_param { int command; atomic_t cpu_count; }; static int __ftrace_modify_code(void *data) { struct ftrace_modify_param *param = data; if (atomic_inc_return(&param->cpu_count) == 1) { ftrace_modify_all_code(param->command); atomic_inc(&param->cpu_count); } else { while (atomic_read(&param->cpu_count) <= num_online_cpus()) cpu_relax(); local_icache_inv_all(NULL); } return 0; } void arch_ftrace_update_code(int command) { struct ftrace_modify_param param = { command, ATOMIC_INIT(0) }; stop_machine(__ftrace_modify_code, &param, cpu_online_mask); } #endif #endif /* CONFIG_DYNAMIC_FTRACE */ /* _mcount is defined in abi's mcount.S */ EXPORT_SYMBOL(_mcount);
linux-master
arch/csky/kernel/ftrace.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/module.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/sched/debug.h> #include <linux/delay.h> #include <linux/kallsyms.h> #include <linux/uaccess.h> #include <linux/ptrace.h> #include <linux/elfcore.h> #include <asm/elf.h> #include <abi/reg_ops.h> struct cpuinfo_csky cpu_data[NR_CPUS]; #ifdef CONFIG_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); /* * Some archs flush debug and FPU info here */ void flush_thread(void){} int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { unsigned long clone_flags = args->flags; unsigned long usp = args->stack; unsigned long tls = args->tls; struct switch_stack *childstack; struct pt_regs *childregs = task_pt_regs(p); #ifdef CONFIG_CPU_HAS_FPU save_to_user_fp(&p->thread.user_fp); #endif childstack = ((struct switch_stack *) childregs) - 1; memset(childstack, 0, sizeof(struct switch_stack)); /* setup thread.sp for switch_to !!! */ p->thread.sp = (unsigned long)childstack; if (unlikely(args->fn)) { memset(childregs, 0, sizeof(struct pt_regs)); childstack->r15 = (unsigned long) ret_from_kernel_thread; childstack->r10 = (unsigned long) args->fn_arg; childstack->r9 = (unsigned long) args->fn; childregs->sr = mfcr("psr"); } else { *childregs = *(current_pt_regs()); if (usp) childregs->usp = usp; if (clone_flags & CLONE_SETTLS) task_thread_info(p)->tp_value = childregs->tls = tls; childregs->a0 = 0; childstack->r15 = (unsigned long) ret_from_fork; } return 0; } /* Fill in the fpu structure for a core dump. */ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu) { memcpy(fpu, &current->thread.user_fp, sizeof(*fpu)); return 1; } int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs) { struct pt_regs *regs = task_pt_regs(tsk); /* NOTE: usp is error value. */ ELF_CORE_COPY_REGS((*pr_regs), regs) return 1; } #ifndef CONFIG_CPU_PM_NONE void arch_cpu_idle(void) { #ifdef CONFIG_CPU_PM_WAIT asm volatile("wait\n"); #endif #ifdef CONFIG_CPU_PM_DOZE asm volatile("doze\n"); #endif #ifdef CONFIG_CPU_PM_STOP asm volatile("stop\n"); #endif } #endif
linux-master
arch/csky/kernel/process.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <asm/traps.h> #include <asm/smp.h> void __init init_IRQ(void) { irqchip_init(); #ifdef CONFIG_SMP setup_smp_ipi(); #endif }
linux-master
arch/csky/kernel/irq.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include <linux/types.h> #include <linux/io.h> /* * Copy data from IO memory space to "real" memory space. */ void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { while (count && !IS_ALIGNED((unsigned long)from, 4)) { *(u8 *)to = __raw_readb(from); from++; to++; count--; } while (count >= 4) { *(u32 *)to = __raw_readl(from); from += 4; to += 4; count -= 4; } while (count) { *(u8 *)to = __raw_readb(from); from++; to++; count--; } } EXPORT_SYMBOL(__memcpy_fromio); /* * Copy data from "real" memory space to IO memory space. */ void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) { while (count && !IS_ALIGNED((unsigned long)to, 4)) { __raw_writeb(*(u8 *)from, to); from++; to++; count--; } while (count >= 4) { __raw_writel(*(u32 *)from, to); from += 4; to += 4; count -= 4; } while (count) { __raw_writeb(*(u8 *)from, to); from++; to++; count--; } } EXPORT_SYMBOL(__memcpy_toio); /* * "memset" on IO memory space. */ void __memset_io(volatile void __iomem *dst, int c, size_t count) { u32 qc = (u8)c; qc |= qc << 8; qc |= qc << 16; while (count && !IS_ALIGNED((unsigned long)dst, 4)) { __raw_writeb(c, dst); dst++; count--; } while (count >= 4) { __raw_writel(qc, dst); dst += 4; count -= 4; } while (count) { __raw_writeb(c, dst); dst++; count--; } } EXPORT_SYMBOL(__memset_io);
linux-master
arch/csky/kernel/io.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/audit.h> #include <linux/elf.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/uaccess.h> #include <linux/user.h> #include <asm/thread_info.h> #include <asm/page.h> #include <asm/processor.h> #include <asm/asm-offsets.h> #include <abi/regdef.h> #include <abi/ckmmu.h> #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> /* sets the trace bits. */ #define TRACE_MODE_SI (1 << 14) #define TRACE_MODE_RUN 0 #define TRACE_MODE_MASK ~(0x3 << 14) /* * Make sure the single step bit is not set. */ static void singlestep_disable(struct task_struct *tsk) { struct pt_regs *regs; regs = task_pt_regs(tsk); regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN; /* Enable irq */ regs->sr |= BIT(6); } static void singlestep_enable(struct task_struct *tsk) { struct pt_regs *regs; regs = task_pt_regs(tsk); regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI; /* Disable irq */ regs->sr &= ~BIT(6); } /* * Make sure the single step bit is set. */ void user_enable_single_step(struct task_struct *child) { singlestep_enable(child); } void user_disable_single_step(struct task_struct *child) { singlestep_disable(child); } enum csky_regset { REGSET_GPR, REGSET_FPR, }; static int gpr_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { struct pt_regs *regs = task_pt_regs(target); /* Abiv1 regs->tls is fake and we need sync here. */ regs->tls = task_thread_info(target)->tp_value; return membuf_write(&to, regs, sizeof(*regs)); } static int gpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct pt_regs regs; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, -1); if (ret) return ret; /* BIT(0) of regs.sr is Condition Code/Carry bit */ regs.sr = (regs.sr & BIT(0)) | (task_pt_regs(target)->sr & ~BIT(0)); #ifdef CONFIG_CPU_HAS_HILO regs.dcsr = task_pt_regs(target)->dcsr; #endif task_thread_info(target)->tp_value = regs.tls; *task_pt_regs(target) = regs; return 0; } static int fpr_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { struct user_fp *regs = (struct user_fp *)&target->thread.user_fp; #if defined(CONFIG_CPU_HAS_FPUV2) && !defined(CONFIG_CPU_HAS_VDSP) int i; struct user_fp tmp = *regs; for (i = 0; i < 16; i++) { tmp.vr[i*4] = regs->vr[i*2]; tmp.vr[i*4 + 1] = regs->vr[i*2 + 1]; } for (i = 0; i < 32; i++) tmp.vr[64 + i] = regs->vr[32 + i]; return membuf_write(&to, &tmp, sizeof(tmp)); #else return membuf_write(&to, regs, sizeof(*regs)); #endif } static int fpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct user_fp *regs = (struct user_fp *)&target->thread.user_fp; #if defined(CONFIG_CPU_HAS_FPUV2) && !defined(CONFIG_CPU_HAS_VDSP) int i; struct user_fp tmp; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tmp, 0, -1); *regs = tmp; for (i = 0; i < 16; i++) { regs->vr[i*2] = tmp.vr[i*4]; regs->vr[i*2 + 1] = tmp.vr[i*4 + 1]; } for (i = 0; i < 32; i++) regs->vr[32 + i] = tmp.vr[64 + i]; #else ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1); #endif return ret; } static const struct user_regset csky_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = sizeof(struct pt_regs) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .regset_get = gpr_get, .set = gpr_set, }, [REGSET_FPR] = { .core_note_type = NT_PRFPREG, .n = sizeof(struct user_fp) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .regset_get = fpr_get, .set = fpr_set, }, }; static const struct user_regset_view user_csky_view = { .name = "csky", .e_machine = ELF_ARCH, .regsets = csky_regsets, .n = ARRAY_SIZE(csky_regsets), }; const struct user_regset_view *task_user_regset_view(struct task_struct *task) { return &user_csky_view; } struct pt_regs_offset { const char *name; int offset; }; #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} #define REG_OFFSET_END {.name = NULL, .offset = 0} static const struct pt_regs_offset regoffset_table[] = { REG_OFFSET_NAME(tls), REG_OFFSET_NAME(lr), REG_OFFSET_NAME(pc), REG_OFFSET_NAME(sr), REG_OFFSET_NAME(usp), REG_OFFSET_NAME(orig_a0), REG_OFFSET_NAME(a0), REG_OFFSET_NAME(a1), REG_OFFSET_NAME(a2), REG_OFFSET_NAME(a3), REG_OFFSET_NAME(regs[0]), REG_OFFSET_NAME(regs[1]), REG_OFFSET_NAME(regs[2]), REG_OFFSET_NAME(regs[3]), REG_OFFSET_NAME(regs[4]), REG_OFFSET_NAME(regs[5]), REG_OFFSET_NAME(regs[6]), REG_OFFSET_NAME(regs[7]), REG_OFFSET_NAME(regs[8]), REG_OFFSET_NAME(regs[9]), #if defined(__CSKYABIV2__) REG_OFFSET_NAME(exregs[0]), REG_OFFSET_NAME(exregs[1]), REG_OFFSET_NAME(exregs[2]), REG_OFFSET_NAME(exregs[3]), REG_OFFSET_NAME(exregs[4]), REG_OFFSET_NAME(exregs[5]), REG_OFFSET_NAME(exregs[6]), REG_OFFSET_NAME(exregs[7]), REG_OFFSET_NAME(exregs[8]), REG_OFFSET_NAME(exregs[9]), REG_OFFSET_NAME(exregs[10]), REG_OFFSET_NAME(exregs[11]), REG_OFFSET_NAME(exregs[12]), REG_OFFSET_NAME(exregs[13]), REG_OFFSET_NAME(exregs[14]), REG_OFFSET_NAME(rhi), REG_OFFSET_NAME(rlo), REG_OFFSET_NAME(dcsr), #endif REG_OFFSET_END, }; /** * regs_query_register_offset() - query register offset from its name * @name: the name of a register * * regs_query_register_offset() returns the offset of a register in struct * pt_regs from its name. If the name is invalid, this returns -EINVAL; */ int regs_query_register_offset(const char *name) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) return roff->offset; return -EINVAL; } /** * regs_within_kernel_stack() - check the address in the stack * @regs: pt_regs which contains kernel stack pointer. * @addr: address which is checked. * * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). * If @addr is within the kernel stack, it returns true. If not, returns false. */ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) { return (addr & ~(THREAD_SIZE - 1)) == (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)); } /** * regs_get_kernel_stack_nth() - get Nth entry of the stack * @regs: pt_regs which contains kernel stack pointer. * @n: stack entry number. * * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which * is specified by @regs. If the @n th entry is NOT in the kernel stack, * this returns 0. */ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) { unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); addr += n; if (regs_within_kernel_stack(regs, (unsigned long)addr)) return *addr; else return 0; } void ptrace_disable(struct task_struct *child) { singlestep_disable(child); } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { long ret = -EIO; switch (request) { default: ret = ptrace_request(child, request, addr, data); break; } return ret; } asmlinkage int syscall_trace_enter(struct pt_regs *regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) if (ptrace_report_syscall_entry(regs)) return -1; if (secure_computing() == -1) return -1; if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, syscall_get_nr(current, regs)); audit_syscall_entry(regs_syscallid(regs), regs->a0, regs->a1, regs->a2, regs->a3); return 0; } asmlinkage void syscall_trace_exit(struct pt_regs *regs) { audit_syscall_exit(regs); if (test_thread_flag(TIF_SYSCALL_TRACE)) ptrace_report_syscall_exit(regs, 0); if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_exit(regs, syscall_get_return_value(current, regs)); } #ifdef CONFIG_CPU_CK860 static void show_iutlb(void) { int entry, i; unsigned long flags; unsigned long oldpid; unsigned long entryhi[16], entrylo0[16], entrylo1[16]; oldpid = read_mmu_entryhi(); entry = 0x8000; local_irq_save(flags); for (i = 0; i < 16; i++) { write_mmu_index(entry); tlb_read(); entryhi[i] = read_mmu_entryhi(); entrylo0[i] = read_mmu_entrylo0(); entrylo1[i] = read_mmu_entrylo1(); entry++; } local_irq_restore(flags); write_mmu_entryhi(oldpid); printk("\n\n\n"); for (i = 0; i < 16; i++) printk("iutlb[%d]: entryhi - 0x%lx; entrylo0 - 0x%lx;" " entrylo1 - 0x%lx\n", i, entryhi[i], entrylo0[i], entrylo1[i]); printk("\n\n\n"); } static void show_dutlb(void) { int entry, i; unsigned long flags; unsigned long oldpid; unsigned long entryhi[16], entrylo0[16], entrylo1[16]; oldpid = read_mmu_entryhi(); entry = 0x4000; local_irq_save(flags); for (i = 0; i < 16; i++) { write_mmu_index(entry); tlb_read(); entryhi[i] = read_mmu_entryhi(); entrylo0[i] = read_mmu_entrylo0(); entrylo1[i] = read_mmu_entrylo1(); entry++; } local_irq_restore(flags); write_mmu_entryhi(oldpid); printk("\n\n\n"); for (i = 0; i < 16; i++) printk("dutlb[%d]: entryhi - 0x%lx; entrylo0 - 0x%lx;" " entrylo1 - 0x%lx\n", i, entryhi[i], entrylo0[i], entrylo1[i]); printk("\n\n\n"); } static unsigned long entryhi[1024], entrylo0[1024], entrylo1[1024]; static void show_jtlb(void) { int entry; unsigned long flags; unsigned long oldpid; oldpid = read_mmu_entryhi(); entry = 0; local_irq_save(flags); while (entry < 1024) { write_mmu_index(entry); tlb_read(); entryhi[entry] = read_mmu_entryhi(); entrylo0[entry] = read_mmu_entrylo0(); entrylo1[entry] = read_mmu_entrylo1(); entry++; } local_irq_restore(flags); write_mmu_entryhi(oldpid); printk("\n\n\n"); for (entry = 0; entry < 1024; entry++) printk("jtlb[%x]: entryhi - 0x%lx; entrylo0 - 0x%lx;" " entrylo1 - 0x%lx\n", entry, entryhi[entry], entrylo0[entry], entrylo1[entry]); printk("\n\n\n"); } static void show_tlb(void) { show_iutlb(); show_dutlb(); show_jtlb(); } #else static void show_tlb(void) { return; } #endif void show_regs(struct pt_regs *fp) { pr_info("\nCURRENT PROCESS:\n\n"); pr_info("COMM=%s PID=%d\n", current->comm, current->pid); if (current->mm) { pr_info("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n", (int) current->mm->start_code, (int) current->mm->end_code, (int) current->mm->start_data, (int) current->mm->end_data, (int) current->mm->end_data, (int) current->mm->brk); pr_info("USER-STACK=%08x KERNEL-STACK=%08x\n\n", (int) current->mm->start_stack, (int) (((unsigned long) current) + 2 * PAGE_SIZE)); } pr_info("PC: 0x%08lx (%pS)\n", (long)fp->pc, (void *)fp->pc); pr_info("LR: 0x%08lx (%pS)\n", (long)fp->lr, (void *)fp->lr); pr_info("SP: 0x%08lx\n", (long)fp->usp); pr_info("PSR: 0x%08lx\n", (long)fp->sr); pr_info("orig_a0: 0x%08lx\n", fp->orig_a0); pr_info("PT_REGS: 0x%08lx\n", (long)fp); pr_info(" a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n", fp->a0, fp->a1, fp->a2, fp->a3); #if defined(__CSKYABIV2__) pr_info(" r4: 0x%08lx r5: 0x%08lx r6: 0x%08lx r7: 0x%08lx\n", fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]); pr_info(" r8: 0x%08lx r9: 0x%08lx r10: 0x%08lx r11: 0x%08lx\n", fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]); pr_info("r12: 0x%08lx r13: 0x%08lx r15: 0x%08lx\n", fp->regs[8], fp->regs[9], fp->lr); pr_info("r16: 0x%08lx r17: 0x%08lx r18: 0x%08lx r19: 0x%08lx\n", fp->exregs[0], fp->exregs[1], fp->exregs[2], fp->exregs[3]); pr_info("r20: 0x%08lx r21: 0x%08lx r22: 0x%08lx r23: 0x%08lx\n", fp->exregs[4], fp->exregs[5], fp->exregs[6], fp->exregs[7]); pr_info("r24: 0x%08lx r25: 0x%08lx r26: 0x%08lx r27: 0x%08lx\n", fp->exregs[8], fp->exregs[9], fp->exregs[10], fp->exregs[11]); pr_info("r28: 0x%08lx r29: 0x%08lx r30: 0x%08lx tls: 0x%08lx\n", fp->exregs[12], fp->exregs[13], fp->exregs[14], fp->tls); pr_info(" hi: 0x%08lx lo: 0x%08lx\n", fp->rhi, fp->rlo); #else pr_info(" r6: 0x%08lx r7: 0x%08lx r8: 0x%08lx r9: 0x%08lx\n", fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]); pr_info("r10: 0x%08lx r11: 0x%08lx r12: 0x%08lx r13: 0x%08lx\n", fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]); pr_info("r14: 0x%08lx r1: 0x%08lx\n", fp->regs[8], fp->regs[9]); #endif show_tlb(); return; }
linux-master
arch/csky/kernel/ptrace.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/sched.h> #include <linux/signal.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/user.h> #include <linux/string.h> #include <linux/linkage.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/kallsyms.h> #include <linux/rtc.h> #include <linux/uaccess.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/sched/debug.h> #include <asm/setup.h> #include <asm/traps.h> #include <asm/pgalloc.h> #include <asm/siginfo.h> #include <asm/mmu_context.h> #ifdef CONFIG_CPU_HAS_FPU #include <abi/fpu.h> #endif int show_unhandled_signals = 1; /* Defined in entry.S */ asmlinkage void csky_trap(void); asmlinkage void csky_systemcall(void); asmlinkage void csky_cmpxchg(void); asmlinkage void csky_get_tls(void); asmlinkage void csky_irq(void); asmlinkage void csky_pagefault(void); /* Defined in head.S */ asmlinkage void _start_smp_secondary(void); void __init pre_trap_init(void) { int i; mtcr("vbr", vec_base); for (i = 1; i < 128; i++) VEC_INIT(i, csky_trap); } void __init trap_init(void) { VEC_INIT(VEC_AUTOVEC, csky_irq); /* setup trap0 trap2 trap3 */ VEC_INIT(VEC_TRAP0, csky_systemcall); VEC_INIT(VEC_TRAP2, csky_cmpxchg); VEC_INIT(VEC_TRAP3, csky_get_tls); /* setup MMU TLB exception */ VEC_INIT(VEC_TLBINVALIDL, csky_pagefault); VEC_INIT(VEC_TLBINVALIDS, csky_pagefault); VEC_INIT(VEC_TLBMODIFIED, csky_pagefault); #ifdef CONFIG_CPU_HAS_FPU init_fpu(); #endif #ifdef CONFIG_SMP mtcr("cr<28, 0>", virt_to_phys(vec_base)); VEC_INIT(VEC_RESET, (void *)virt_to_phys(_start_smp_secondary)); #endif } static DEFINE_SPINLOCK(die_lock); void die(struct pt_regs *regs, const char *str) { static int die_counter; int ret; oops_enter(); spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); pr_emerg("%s [#%d]\n", str, ++die_counter); print_modules(); show_regs(regs); show_stack(current, (unsigned long *)regs->regs[4], KERN_INFO); ret = notify_die(DIE_OOPS, str, regs, 0, trap_no(regs), SIGSEGV); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irq(&die_lock); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) make_task_dead(SIGSEGV); } void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) { struct task_struct *tsk = current; if (show_unhandled_signals && unhandled_signal(tsk, signo) && printk_ratelimit()) { pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x%08lx", tsk->comm, task_pid_nr(tsk), signo, code, addr); print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); pr_cont("\n"); show_regs(regs); } force_sig_fault(signo, code, (void __user *)addr); } static void do_trap_error(struct pt_regs *regs, int signo, int code, unsigned long addr, const char *str) { current->thread.trap_no = trap_no(regs); if (user_mode(regs)) { do_trap(regs, signo, code, addr); } else { if (!fixup_exception(regs)) die(regs, str); } } #define DO_ERROR_INFO(name, signo, code, str) \ asmlinkage __visible void name(struct pt_regs *regs) \ { \ do_trap_error(regs, signo, code, regs->pc, "Oops - " str); \ } DO_ERROR_INFO(do_trap_unknown, SIGILL, ILL_ILLTRP, "unknown exception"); DO_ERROR_INFO(do_trap_zdiv, SIGFPE, FPE_INTDIV, "error zero div exception"); DO_ERROR_INFO(do_trap_buserr, SIGSEGV, ILL_ILLADR, "error bus error exception"); asmlinkage void do_trap_misaligned(struct pt_regs *regs) { #ifdef CONFIG_CPU_NEED_SOFTALIGN csky_alignment(regs); #else current->thread.trap_no = trap_no(regs); do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->pc, "Oops - load/store address misaligned"); #endif } asmlinkage void do_trap_bkpt(struct pt_regs *regs) { #ifdef CONFIG_KPROBES if (kprobe_single_step_handler(regs)) return; #endif #ifdef CONFIG_UPROBES if (uprobe_single_step_handler(regs)) return; #endif if (user_mode(regs)) { send_sig(SIGTRAP, current, 0); return; } do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->pc, "Oops - illegal trap exception"); } asmlinkage void do_trap_illinsn(struct pt_regs *regs) { current->thread.trap_no = trap_no(regs); #ifdef CONFIG_KPROBES if (kprobe_breakpoint_handler(regs)) return; #endif #ifdef CONFIG_UPROBES if (uprobe_breakpoint_handler(regs)) return; #endif #ifndef CONFIG_CPU_NO_USER_BKPT if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT) { send_sig(SIGTRAP, current, 0); return; } #endif do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc, "Oops - illegal instruction exception"); } asmlinkage void do_trap_fpe(struct pt_regs *regs) { #ifdef CONFIG_CPU_HAS_FPU return fpu_fpe(regs); #else do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc, "Oops - fpu instruction exception"); #endif } asmlinkage void do_trap_priv(struct pt_regs *regs) { #ifdef CONFIG_CPU_HAS_FPU if (user_mode(regs) && fpu_libc_helper(regs)) return; #endif do_trap_error(regs, SIGILL, ILL_PRVOPC, regs->pc, "Oops - illegal privileged exception"); } asmlinkage void trap_c(struct pt_regs *regs) { switch (trap_no(regs)) { case VEC_ZERODIV: do_trap_zdiv(regs); break; case VEC_TRACE: do_trap_bkpt(regs); break; case VEC_ILLEGAL: do_trap_illinsn(regs); break; case VEC_TRAP1: case VEC_BREAKPOINT: do_trap_bkpt(regs); break; case VEC_ACCESS: do_trap_buserr(regs); break; case VEC_ALIGN: do_trap_misaligned(regs); break; case VEC_FPE: do_trap_fpe(regs); break; case VEC_PRIV: do_trap_priv(regs); break; default: do_trap_unknown(regs); break; } }
linux-master
arch/csky/kernel/traps.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/kbuild.h> #include <abi/regdef.h> int main(void) { /* offsets into the task struct */ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack)); DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags)); DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace)); DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); DEFINE(TASK_MM, offsetof(struct task_struct, mm)); DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); /* offsets into the thread struct */ DEFINE(THREAD_KSP, offsetof(struct thread_struct, sp)); DEFINE(THREAD_FESR, offsetof(struct thread_struct, user_fp.fesr)); DEFINE(THREAD_FCR, offsetof(struct thread_struct, user_fp.fcr)); DEFINE(THREAD_FPREG, offsetof(struct thread_struct, user_fp.vr)); /* offsets into the thread_info struct */ DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TINFO_TP_VALUE, offsetof(struct thread_info, tp_value)); DEFINE(TINFO_TASK, offsetof(struct thread_info, task)); /* offsets into the pt_regs */ DEFINE(PT_PC, offsetof(struct pt_regs, pc)); DEFINE(PT_ORIG_AO, offsetof(struct pt_regs, orig_a0)); DEFINE(PT_SR, offsetof(struct pt_regs, sr)); DEFINE(PT_A0, offsetof(struct pt_regs, a0)); DEFINE(PT_A1, offsetof(struct pt_regs, a1)); DEFINE(PT_A2, offsetof(struct pt_regs, a2)); DEFINE(PT_A3, offsetof(struct pt_regs, a3)); DEFINE(PT_REGS0, offsetof(struct pt_regs, regs[0])); DEFINE(PT_REGS1, offsetof(struct pt_regs, regs[1])); DEFINE(PT_REGS2, offsetof(struct pt_regs, regs[2])); DEFINE(PT_REGS3, offsetof(struct pt_regs, regs[3])); DEFINE(PT_REGS4, offsetof(struct pt_regs, regs[4])); DEFINE(PT_REGS5, offsetof(struct pt_regs, regs[5])); DEFINE(PT_REGS6, offsetof(struct pt_regs, regs[6])); DEFINE(PT_REGS7, offsetof(struct pt_regs, regs[7])); DEFINE(PT_REGS8, offsetof(struct pt_regs, regs[8])); DEFINE(PT_REGS9, offsetof(struct pt_regs, regs[9])); DEFINE(PT_R15, offsetof(struct pt_regs, lr)); #if defined(__CSKYABIV2__) DEFINE(PT_R16, offsetof(struct pt_regs, exregs[0])); DEFINE(PT_R17, offsetof(struct pt_regs, exregs[1])); DEFINE(PT_R18, offsetof(struct pt_regs, exregs[2])); DEFINE(PT_R19, offsetof(struct pt_regs, exregs[3])); DEFINE(PT_R20, offsetof(struct pt_regs, exregs[4])); DEFINE(PT_R21, offsetof(struct pt_regs, exregs[5])); DEFINE(PT_R22, offsetof(struct pt_regs, exregs[6])); DEFINE(PT_R23, offsetof(struct pt_regs, exregs[7])); DEFINE(PT_R24, offsetof(struct pt_regs, exregs[8])); DEFINE(PT_R25, offsetof(struct pt_regs, exregs[9])); DEFINE(PT_R26, offsetof(struct pt_regs, exregs[10])); DEFINE(PT_R27, offsetof(struct pt_regs, exregs[11])); DEFINE(PT_R28, offsetof(struct pt_regs, exregs[12])); DEFINE(PT_R29, offsetof(struct pt_regs, exregs[13])); DEFINE(PT_R30, offsetof(struct pt_regs, exregs[14])); DEFINE(PT_R31, offsetof(struct pt_regs, exregs[15])); DEFINE(PT_RHI, offsetof(struct pt_regs, rhi)); DEFINE(PT_RLO, offsetof(struct pt_regs, rlo)); #endif DEFINE(PT_USP, offsetof(struct pt_regs, usp)); DEFINE(PT_FRAME_SIZE, sizeof(struct pt_regs)); /* offsets into the irq_cpustat_t struct */ DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending)); /* signal defines */ DEFINE(SIGSEGV, SIGSEGV); DEFINE(SIGTRAP, SIGTRAP); return 0; }
linux-master
arch/csky/kernel/asm-offsets.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/moduleloader.h> #include <linux/elf.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/spinlock.h> #ifdef CONFIG_CPU_CK810 #define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000) #define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0) #define CHANGE_JSRI_TO_LRW(addr) do { \ *(uint16_t *)(addr) = (*(uint16_t *)(addr) & 0xFF9F) | 0x001a; \ *((uint16_t *)(addr) + 1) = *((uint16_t *)(addr) + 1) & 0xFFFF; \ } while (0) #define SET_JSR32_R26(addr) do { \ *(uint16_t *)(addr) = 0xE8Fa; \ *((uint16_t *)(addr) + 1) = 0x0000; \ } while (0) static void jsri_2_lrw_jsr(uint32_t *location) { uint16_t *location_tmp = (uint16_t *)location; if (IS_BSR32(*location_tmp, *(location_tmp + 1))) return; if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) { /* jsri 0x... --> lrw r26, 0x... */ CHANGE_JSRI_TO_LRW(location); /* lsli r0, r0 --> jsr r26 */ SET_JSR32_R26(location + 1); } } #else static inline void jsri_2_lrw_jsr(uint32_t *location) { return; } #endif int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { unsigned int i; Elf32_Rela *rel = (void *) sechdrs[relsec].sh_addr; Elf32_Sym *sym; uint32_t *location; short *temp; for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM(rel[i].r_info); switch (ELF32_R_TYPE(rel[i].r_info)) { case R_CSKY_32: /* We add the value into the location given */ *location = rel[i].r_addend + sym->st_value; break; case R_CSKY_PC32: /* Add the value, subtract its position */ *location = rel[i].r_addend + sym->st_value - (uint32_t)location; break; case R_CSKY_PCRELJSR_IMM11BY2: break; case R_CSKY_PCRELJSR_IMM26BY2: jsri_2_lrw_jsr(location); break; case R_CSKY_ADDR_HI16: temp = ((short *)location) + 1; *temp = (short) ((rel[i].r_addend + sym->st_value) >> 16); break; case R_CSKY_ADDR_LO16: temp = ((short *)location) + 1; *temp = (short) ((rel[i].r_addend + sym->st_value) & 0xffff); break; default: pr_err("module %s: Unknown relocation: %u\n", me->name, ELF32_R_TYPE(rel[i].r_info)); return -ENOEXEC; } } return 0; }
linux-master
arch/csky/kernel/module.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/binfmts.h> #include <linux/elf.h> #include <linux/err.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/page.h> #ifdef GENERIC_TIME_VSYSCALL #include <vdso/datapage.h> #else #include <asm/vdso.h> #endif extern char vdso_start[], vdso_end[]; static unsigned int vdso_pages; static struct page **vdso_pagelist; /* * The vDSO data page. */ static union { struct vdso_data data; u8 page[PAGE_SIZE]; } vdso_data_store __page_aligned_data; struct vdso_data *vdso_data = &vdso_data_store.data; static int __init vdso_init(void) { unsigned int i; vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL); if (unlikely(vdso_pagelist == NULL)) { pr_err("vdso: pagelist allocation failed\n"); return -ENOMEM; } for (i = 0; i < vdso_pages; i++) { struct page *pg; pg = virt_to_page(vdso_start + (i << PAGE_SHIFT)); vdso_pagelist[i] = pg; } vdso_pagelist[i] = virt_to_page(vdso_data); return 0; } arch_initcall(vdso_init); int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long vdso_base, vdso_len; int ret; vdso_len = (vdso_pages + 1) << PAGE_SHIFT; mmap_write_lock(mm); vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0); if (IS_ERR_VALUE(vdso_base)) { ret = vdso_base; goto end; } /* * Put vDSO base into mm struct. We need to do this before calling * install_special_mapping or the perf counter mmap tracking code * will fail to recognise it as a vDSO (since arch_vma_name fails). */ mm->context.vdso = (void *)vdso_base; ret = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), vdso_pagelist); if (unlikely(ret)) { mm->context.vdso = NULL; goto end; } vdso_base += (vdso_pages << PAGE_SHIFT); ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]); if (unlikely(ret)) mm->context.vdso = NULL; end: mmap_write_unlock(mm); return ret; } const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) return "[vdso]"; if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso + PAGE_SIZE)) return "[vdso_data]"; return NULL; }
linux-master
arch/csky/kernel/vdso.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/syscalls.h> SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) { struct thread_info *ti = task_thread_info(current); struct pt_regs *reg = current_pt_regs(); reg->tls = addr; ti->tp_value = addr; return 0; } SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, offset) { if (unlikely(offset & (~PAGE_MASK >> 12))) return -EINVAL; return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - 12)); } /* * for abiv1 the 64bits args should be even th, So we need mov the advice * forward. */ SYSCALL_DEFINE4(csky_fadvise64_64, int, fd, int, advice, loff_t, offset, loff_t, len) { return ksys_fadvise64_64(fd, offset, len, advice); }
linux-master
arch/csky/kernel/syscall.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/console.h> #include <linux/memblock.h> #include <linux/initrd.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/start_kernel.h> #include <linux/dma-map-ops.h> #include <linux/screen_info.h> #include <asm/sections.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #ifdef CONFIG_DUMMY_CONSOLE struct screen_info screen_info = { .orig_video_lines = 30, .orig_video_cols = 80, .orig_video_mode = 0, .orig_video_ega_bx = 0, .orig_video_isVGA = 1, .orig_video_points = 8 }; #endif static void __init csky_memblock_init(void) { unsigned long lowmem_size = PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET); unsigned long sseg_size = PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET); unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; signed long size; memblock_reserve(__pa(_start), _end - _start); early_init_fdt_reserve_self(); early_init_fdt_scan_reserved_mem(); memblock_dump_all(); min_low_pfn = PFN_UP(memblock_start_of_DRAM()); max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM()); size = max_pfn - min_low_pfn; if (size >= lowmem_size) { max_low_pfn = min_low_pfn + lowmem_size; #ifdef CONFIG_PAGE_OFFSET_80000000 write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE); #endif } else if (size > sseg_size) { max_low_pfn = min_low_pfn + sseg_size; } max_zone_pfn[ZONE_NORMAL] = max_low_pfn; mmu_init(min_low_pfn, max_low_pfn); #ifdef CONFIG_HIGHMEM max_zone_pfn[ZONE_HIGHMEM] = max_pfn; highstart_pfn = max_low_pfn; highend_pfn = max_pfn; #endif memblock_set_current_limit(PFN_PHYS(max_low_pfn)); dma_contiguous_reserve(0); free_area_init(max_zone_pfn); } void __init setup_arch(char **cmdline_p) { *cmdline_p = boot_command_line; console_verbose(); pr_info("Phys. mem: %ldMB\n", (unsigned long) memblock_phys_mem_size()/1024/1024); setup_initial_init_mm(_start, _etext, _edata, _end); parse_early_param(); csky_memblock_init(); unflatten_and_copy_device_tree(); #ifdef CONFIG_SMP setup_smp(); #endif sparse_init(); fixaddr_init(); #ifdef CONFIG_HIGHMEM kmap_init(); #endif } unsigned long va_pa_offset; EXPORT_SYMBOL(va_pa_offset); static inline unsigned long read_mmu_msa(void) { #ifdef CONFIG_PAGE_OFFSET_80000000 return read_mmu_msa0(); #endif #ifdef CONFIG_PAGE_OFFSET_A0000000 return read_mmu_msa1(); #endif } asmlinkage __visible void __init csky_start(unsigned int unused, void *dtb_start) { /* Clean up bss section */ memset(__bss_start, 0, __bss_stop - __bss_start); va_pa_offset = read_mmu_msa() & ~(SSEG_SIZE - 1); pre_trap_init(); if (dtb_start == NULL) early_init_dt_scan(__dtb_start); else early_init_dt_scan(dtb_start); start_kernel(); asm volatile("br .\n"); }
linux-master
arch/csky/kernel/setup.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/reboot.h> void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); void machine_power_off(void) { local_irq_disable(); do_kernel_power_off(); asm volatile ("bkpt"); } void machine_halt(void) { local_irq_disable(); do_kernel_power_off(); asm volatile ("bkpt"); } void machine_restart(char *cmd) { local_irq_disable(); do_kernel_restart(cmd); asm volatile ("bkpt"); }
linux-master
arch/csky/kernel/power.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/clocksource.h> #include <linux/of_clk.h> void __init time_init(void) { of_clk_init(NULL); timer_probe(); }
linux-master
arch/csky/kernel/time.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/perf_event.h> #include <linux/platform_device.h> #define CSKY_PMU_MAX_EVENTS 32 #define DEFAULT_COUNT_WIDTH 48 #define HPCR "<0, 0x0>" /* PMU Control reg */ #define HPSPR "<0, 0x1>" /* Start PC reg */ #define HPEPR "<0, 0x2>" /* End PC reg */ #define HPSIR "<0, 0x3>" /* Soft Counter reg */ #define HPCNTENR "<0, 0x4>" /* Count Enable reg */ #define HPINTENR "<0, 0x5>" /* Interrupt Enable reg */ #define HPOFSR "<0, 0x6>" /* Interrupt Status reg */ /* The events for a given PMU register set. */ struct pmu_hw_events { /* * The events that are active on the PMU for the given index. */ struct perf_event *events[CSKY_PMU_MAX_EVENTS]; /* * A 1 bit for an index indicates that the counter is being used for * an event. A 0 means that the counter can be used. */ unsigned long used_mask[BITS_TO_LONGS(CSKY_PMU_MAX_EVENTS)]; }; static uint64_t (*hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])(void); static void (*hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])(uint64_t val); static struct csky_pmu_t { struct pmu pmu; struct pmu_hw_events __percpu *hw_events; struct platform_device *plat_device; uint32_t count_width; uint32_t hpcr; u64 max_period; } csky_pmu; static int csky_pmu_irq; #define to_csky_pmu(p) (container_of(p, struct csky_pmu, pmu)) #define cprgr(reg) \ ({ \ unsigned int tmp; \ asm volatile("cprgr %0, "reg"\n" \ : "=r"(tmp) \ : \ : "memory"); \ tmp; \ }) #define cpwgr(reg, val) \ ({ \ asm volatile( \ "cpwgr %0, "reg"\n" \ : \ : "r"(val) \ : "memory"); \ }) #define cprcr(reg) \ ({ \ unsigned int tmp; \ asm volatile("cprcr %0, "reg"\n" \ : "=r"(tmp) \ : \ : "memory"); \ tmp; \ }) #define cpwcr(reg, val) \ ({ \ asm volatile( \ "cpwcr %0, "reg"\n" \ : \ : "r"(val) \ : "memory"); \ }) /* cycle counter */ uint64_t csky_pmu_read_cc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x3>"); lo = cprgr("<0, 0x2>"); hi = cprgr("<0, 0x3>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_cc(uint64_t val) { cpwgr("<0, 0x2>", (uint32_t) val); cpwgr("<0, 0x3>", (uint32_t) (val >> 32)); } /* instruction counter */ static uint64_t csky_pmu_read_ic(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x5>"); lo = cprgr("<0, 0x4>"); hi = cprgr("<0, 0x5>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_ic(uint64_t val) { cpwgr("<0, 0x4>", (uint32_t) val); cpwgr("<0, 0x5>", (uint32_t) (val >> 32)); } /* l1 icache access counter */ static uint64_t csky_pmu_read_icac(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x7>"); lo = cprgr("<0, 0x6>"); hi = cprgr("<0, 0x7>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_icac(uint64_t val) { cpwgr("<0, 0x6>", (uint32_t) val); cpwgr("<0, 0x7>", (uint32_t) (val >> 32)); } /* l1 icache miss counter */ static uint64_t csky_pmu_read_icmc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x9>"); lo = cprgr("<0, 0x8>"); hi = cprgr("<0, 0x9>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_icmc(uint64_t val) { cpwgr("<0, 0x8>", (uint32_t) val); cpwgr("<0, 0x9>", (uint32_t) (val >> 32)); } /* l1 dcache access counter */ static uint64_t csky_pmu_read_dcac(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0xb>"); lo = cprgr("<0, 0xa>"); hi = cprgr("<0, 0xb>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_dcac(uint64_t val) { cpwgr("<0, 0xa>", (uint32_t) val); cpwgr("<0, 0xb>", (uint32_t) (val >> 32)); } /* l1 dcache miss counter */ static uint64_t csky_pmu_read_dcmc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0xd>"); lo = cprgr("<0, 0xc>"); hi = cprgr("<0, 0xd>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_dcmc(uint64_t val) { cpwgr("<0, 0xc>", (uint32_t) val); cpwgr("<0, 0xd>", (uint32_t) (val >> 32)); } /* l2 cache access counter */ static uint64_t csky_pmu_read_l2ac(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0xf>"); lo = cprgr("<0, 0xe>"); hi = cprgr("<0, 0xf>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_l2ac(uint64_t val) { cpwgr("<0, 0xe>", (uint32_t) val); cpwgr("<0, 0xf>", (uint32_t) (val >> 32)); } /* l2 cache miss counter */ static uint64_t csky_pmu_read_l2mc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x11>"); lo = cprgr("<0, 0x10>"); hi = cprgr("<0, 0x11>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_l2mc(uint64_t val) { cpwgr("<0, 0x10>", (uint32_t) val); cpwgr("<0, 0x11>", (uint32_t) (val >> 32)); } /* I-UTLB miss counter */ static uint64_t csky_pmu_read_iutlbmc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x15>"); lo = cprgr("<0, 0x14>"); hi = cprgr("<0, 0x15>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_iutlbmc(uint64_t val) { cpwgr("<0, 0x14>", (uint32_t) val); cpwgr("<0, 0x15>", (uint32_t) (val >> 32)); } /* D-UTLB miss counter */ static uint64_t csky_pmu_read_dutlbmc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x17>"); lo = cprgr("<0, 0x16>"); hi = cprgr("<0, 0x17>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_dutlbmc(uint64_t val) { cpwgr("<0, 0x16>", (uint32_t) val); cpwgr("<0, 0x17>", (uint32_t) (val >> 32)); } /* JTLB miss counter */ static uint64_t csky_pmu_read_jtlbmc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x19>"); lo = cprgr("<0, 0x18>"); hi = cprgr("<0, 0x19>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_jtlbmc(uint64_t val) { cpwgr("<0, 0x18>", (uint32_t) val); cpwgr("<0, 0x19>", (uint32_t) (val >> 32)); } /* software counter */ static uint64_t csky_pmu_read_softc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x1b>"); lo = cprgr("<0, 0x1a>"); hi = cprgr("<0, 0x1b>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_softc(uint64_t val) { cpwgr("<0, 0x1a>", (uint32_t) val); cpwgr("<0, 0x1b>", (uint32_t) (val >> 32)); } /* conditional branch mispredict counter */ static uint64_t csky_pmu_read_cbmc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x1d>"); lo = cprgr("<0, 0x1c>"); hi = cprgr("<0, 0x1d>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_cbmc(uint64_t val) { cpwgr("<0, 0x1c>", (uint32_t) val); cpwgr("<0, 0x1d>", (uint32_t) (val >> 32)); } /* conditional branch instruction counter */ static uint64_t csky_pmu_read_cbic(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x1f>"); lo = cprgr("<0, 0x1e>"); hi = cprgr("<0, 0x1f>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_cbic(uint64_t val) { cpwgr("<0, 0x1e>", (uint32_t) val); cpwgr("<0, 0x1f>", (uint32_t) (val >> 32)); } /* indirect branch mispredict counter */ static uint64_t csky_pmu_read_ibmc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x21>"); lo = cprgr("<0, 0x20>"); hi = cprgr("<0, 0x21>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_ibmc(uint64_t val) { cpwgr("<0, 0x20>", (uint32_t) val); cpwgr("<0, 0x21>", (uint32_t) (val >> 32)); } /* indirect branch instruction counter */ static uint64_t csky_pmu_read_ibic(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x23>"); lo = cprgr("<0, 0x22>"); hi = cprgr("<0, 0x23>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_ibic(uint64_t val) { cpwgr("<0, 0x22>", (uint32_t) val); cpwgr("<0, 0x23>", (uint32_t) (val >> 32)); } /* LSU spec fail counter */ static uint64_t csky_pmu_read_lsfc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x25>"); lo = cprgr("<0, 0x24>"); hi = cprgr("<0, 0x25>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_lsfc(uint64_t val) { cpwgr("<0, 0x24>", (uint32_t) val); cpwgr("<0, 0x25>", (uint32_t) (val >> 32)); } /* store instruction counter */ static uint64_t csky_pmu_read_sic(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x27>"); lo = cprgr("<0, 0x26>"); hi = cprgr("<0, 0x27>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_sic(uint64_t val) { cpwgr("<0, 0x26>", (uint32_t) val); cpwgr("<0, 0x27>", (uint32_t) (val >> 32)); } /* dcache read access counter */ static uint64_t csky_pmu_read_dcrac(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x29>"); lo = cprgr("<0, 0x28>"); hi = cprgr("<0, 0x29>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_dcrac(uint64_t val) { cpwgr("<0, 0x28>", (uint32_t) val); cpwgr("<0, 0x29>", (uint32_t) (val >> 32)); } /* dcache read miss counter */ static uint64_t csky_pmu_read_dcrmc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x2b>"); lo = cprgr("<0, 0x2a>"); hi = cprgr("<0, 0x2b>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_dcrmc(uint64_t val) { cpwgr("<0, 0x2a>", (uint32_t) val); cpwgr("<0, 0x2b>", (uint32_t) (val >> 32)); } /* dcache write access counter */ static uint64_t csky_pmu_read_dcwac(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x2d>"); lo = cprgr("<0, 0x2c>"); hi = cprgr("<0, 0x2d>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_dcwac(uint64_t val) { cpwgr("<0, 0x2c>", (uint32_t) val); cpwgr("<0, 0x2d>", (uint32_t) (val >> 32)); } /* dcache write miss counter */ static uint64_t csky_pmu_read_dcwmc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x2f>"); lo = cprgr("<0, 0x2e>"); hi = cprgr("<0, 0x2f>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_dcwmc(uint64_t val) { cpwgr("<0, 0x2e>", (uint32_t) val); cpwgr("<0, 0x2f>", (uint32_t) (val >> 32)); } /* l2cache read access counter */ static uint64_t csky_pmu_read_l2rac(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x31>"); lo = cprgr("<0, 0x30>"); hi = cprgr("<0, 0x31>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_l2rac(uint64_t val) { cpwgr("<0, 0x30>", (uint32_t) val); cpwgr("<0, 0x31>", (uint32_t) (val >> 32)); } /* l2cache read miss counter */ static uint64_t csky_pmu_read_l2rmc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x33>"); lo = cprgr("<0, 0x32>"); hi = cprgr("<0, 0x33>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_l2rmc(uint64_t val) { cpwgr("<0, 0x32>", (uint32_t) val); cpwgr("<0, 0x33>", (uint32_t) (val >> 32)); } /* l2cache write access counter */ static uint64_t csky_pmu_read_l2wac(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x35>"); lo = cprgr("<0, 0x34>"); hi = cprgr("<0, 0x35>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_l2wac(uint64_t val) { cpwgr("<0, 0x34>", (uint32_t) val); cpwgr("<0, 0x35>", (uint32_t) (val >> 32)); } /* l2cache write miss counter */ static uint64_t csky_pmu_read_l2wmc(void) { uint32_t lo, hi, tmp; uint64_t result; do { tmp = cprgr("<0, 0x37>"); lo = cprgr("<0, 0x36>"); hi = cprgr("<0, 0x37>"); } while (hi != tmp); result = (uint64_t) (hi) << 32; result |= lo; return result; } static void csky_pmu_write_l2wmc(uint64_t val) { cpwgr("<0, 0x36>", (uint32_t) val); cpwgr("<0, 0x37>", (uint32_t) (val >> 32)); } #define HW_OP_UNSUPPORTED 0xffff static const int csky_pmu_hw_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x1, [PERF_COUNT_HW_INSTRUCTIONS] = 0x2, [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0xf, [PERF_COUNT_HW_BRANCH_MISSES] = 0xe, [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, [PERF_COUNT_HW_REF_CPU_CYCLES] = HW_OP_UNSUPPORTED, }; #define C(_x) PERF_COUNT_HW_CACHE_##_x #define CACHE_OP_UNSUPPORTED 0xffff static const int csky_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { #ifdef CONFIG_CPU_CK810 [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = 0x5, [C(RESULT_MISS)] = 0x6, }, #else [C(OP_READ)] = { [C(RESULT_ACCESS)] = 0x14, [C(RESULT_MISS)] = 0x15, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = 0x16, [C(RESULT_MISS)] = 0x17, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, #endif }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = 0x3, [C(RESULT_MISS)] = 0x4, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(LL)] = { #ifdef CONFIG_CPU_CK810 [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = 0x7, [C(RESULT_MISS)] = 0x8, }, #else [C(OP_READ)] = { [C(RESULT_ACCESS)] = 0x18, [C(RESULT_MISS)] = 0x19, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = 0x1a, [C(RESULT_MISS)] = 0x1b, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, #endif }, [C(DTLB)] = { #ifdef CONFIG_CPU_CK810 [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, #else [C(OP_READ)] = { [C(RESULT_ACCESS)] = 0x14, [C(RESULT_MISS)] = 0xb, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = 0x16, [C(RESULT_MISS)] = 0xb, }, #endif [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(ITLB)] = { #ifdef CONFIG_CPU_CK810 [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, #else [C(OP_READ)] = { [C(RESULT_ACCESS)] = 0x3, [C(RESULT_MISS)] = 0xa, }, #endif [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(BPU)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, [C(NODE)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, }, }, }; int csky_pmu_event_set_period(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; s64 left = local64_read(&hwc->period_left); s64 period = hwc->sample_period; int ret = 0; if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (left > (s64)csky_pmu.max_period) left = csky_pmu.max_period; /* * The hw event starts counting from this event offset, * mark it to be able to extract future "deltas": */ local64_set(&hwc->prev_count, (u64)(-left)); if (hw_raw_write_mapping[hwc->idx] != NULL) hw_raw_write_mapping[hwc->idx]((u64)(-left) & csky_pmu.max_period); cpwcr(HPOFSR, ~BIT(hwc->idx) & cprcr(HPOFSR)); perf_event_update_userpage(event); return ret; } static void csky_perf_event_update(struct perf_event *event, struct hw_perf_event *hwc) { uint64_t prev_raw_count = local64_read(&hwc->prev_count); /* * Sign extend count value to 64bit, otherwise delta calculation * would be incorrect when overflow occurs. */ uint64_t new_raw_count = sign_extend64( hw_raw_read_mapping[hwc->idx](), csky_pmu.count_width - 1); int64_t delta = new_raw_count - prev_raw_count; /* * We aren't afraid of hwc->prev_count changing beneath our feet * because there's no way for us to re-enter this function anytime. */ local64_set(&hwc->prev_count, new_raw_count); local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); } static void csky_pmu_reset(void *info) { cpwcr(HPCR, BIT(31) | BIT(30) | BIT(1)); } static void csky_pmu_read(struct perf_event *event) { csky_perf_event_update(event, &event->hw); } static int csky_pmu_cache_event(u64 config) { unsigned int cache_type, cache_op, cache_result; cache_type = (config >> 0) & 0xff; cache_op = (config >> 8) & 0xff; cache_result = (config >> 16) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return -EINVAL; if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return -EINVAL; if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; return csky_pmu_cache_map[cache_type][cache_op][cache_result]; } static int csky_pmu_event_init(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int ret; switch (event->attr.type) { case PERF_TYPE_HARDWARE: if (event->attr.config >= PERF_COUNT_HW_MAX) return -ENOENT; ret = csky_pmu_hw_map[event->attr.config]; if (ret == HW_OP_UNSUPPORTED) return -ENOENT; hwc->idx = ret; break; case PERF_TYPE_HW_CACHE: ret = csky_pmu_cache_event(event->attr.config); if (ret == CACHE_OP_UNSUPPORTED) return -ENOENT; hwc->idx = ret; break; case PERF_TYPE_RAW: if (hw_raw_read_mapping[event->attr.config] == NULL) return -ENOENT; hwc->idx = event->attr.config; break; default: return -ENOENT; } if (event->attr.exclude_user) csky_pmu.hpcr = BIT(2); else if (event->attr.exclude_kernel) csky_pmu.hpcr = BIT(3); else csky_pmu.hpcr = BIT(2) | BIT(3); csky_pmu.hpcr |= BIT(1) | BIT(0); return 0; } /* starts all counters */ static void csky_pmu_enable(struct pmu *pmu) { cpwcr(HPCR, csky_pmu.hpcr); } /* stops all counters */ static void csky_pmu_disable(struct pmu *pmu) { cpwcr(HPCR, BIT(1)); } static void csky_pmu_start(struct perf_event *event, int flags) { unsigned long flg; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; if (WARN_ON_ONCE(idx == -1)) return; if (flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); hwc->state = 0; csky_pmu_event_set_period(event); local_irq_save(flg); cpwcr(HPINTENR, BIT(idx) | cprcr(HPINTENR)); cpwcr(HPCNTENR, BIT(idx) | cprcr(HPCNTENR)); local_irq_restore(flg); } static void csky_pmu_stop_event(struct perf_event *event) { unsigned long flg; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; local_irq_save(flg); cpwcr(HPINTENR, ~BIT(idx) & cprcr(HPINTENR)); cpwcr(HPCNTENR, ~BIT(idx) & cprcr(HPCNTENR)); local_irq_restore(flg); } static void csky_pmu_stop(struct perf_event *event, int flags) { if (!(event->hw.state & PERF_HES_STOPPED)) { csky_pmu_stop_event(event); event->hw.state |= PERF_HES_STOPPED; } if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { csky_perf_event_update(event, &event->hw); event->hw.state |= PERF_HES_UPTODATE; } } static void csky_pmu_del(struct perf_event *event, int flags) { struct pmu_hw_events *hw_events = this_cpu_ptr(csky_pmu.hw_events); struct hw_perf_event *hwc = &event->hw; csky_pmu_stop(event, PERF_EF_UPDATE); hw_events->events[hwc->idx] = NULL; perf_event_update_userpage(event); } /* allocate hardware counter and optionally start counting */ static int csky_pmu_add(struct perf_event *event, int flags) { struct pmu_hw_events *hw_events = this_cpu_ptr(csky_pmu.hw_events); struct hw_perf_event *hwc = &event->hw; hw_events->events[hwc->idx] = event; hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; if (flags & PERF_EF_START) csky_pmu_start(event, PERF_EF_RELOAD); perf_event_update_userpage(event); return 0; } static irqreturn_t csky_pmu_handle_irq(int irq_num, void *dev) { struct perf_sample_data data; struct pmu_hw_events *cpuc = this_cpu_ptr(csky_pmu.hw_events); struct pt_regs *regs; int idx; /* * Did an overflow occur? */ if (!cprcr(HPOFSR)) return IRQ_NONE; /* * Handle the counter(s) overflow(s) */ regs = get_irq_regs(); csky_pmu_disable(&csky_pmu.pmu); for (idx = 0; idx < CSKY_PMU_MAX_EVENTS; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; /* Ignore if we don't have an event. */ if (!event) continue; /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. */ if (!(cprcr(HPOFSR) & BIT(idx))) continue; hwc = &event->hw; csky_perf_event_update(event, &event->hw); perf_sample_data_init(&data, 0, hwc->last_period); csky_pmu_event_set_period(event); if (perf_event_overflow(event, &data, regs)) csky_pmu_stop_event(event); } csky_pmu_enable(&csky_pmu.pmu); /* * Handle the pending perf events. * * Note: this call *must* be run with interrupts disabled. For * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ irq_work_run(); return IRQ_HANDLED; } static int csky_pmu_request_irq(irq_handler_t handler) { int err, irqs; struct platform_device *pmu_device = csky_pmu.plat_device; if (!pmu_device) return -ENODEV; irqs = min(pmu_device->num_resources, num_possible_cpus()); if (irqs < 1) { pr_err("no irqs for PMUs defined\n"); return -ENODEV; } csky_pmu_irq = platform_get_irq(pmu_device, 0); if (csky_pmu_irq < 0) return -ENODEV; err = request_percpu_irq(csky_pmu_irq, handler, "csky-pmu", this_cpu_ptr(csky_pmu.hw_events)); if (err) { pr_err("unable to request IRQ%d for CSKY PMU counters\n", csky_pmu_irq); return err; } return 0; } static void csky_pmu_free_irq(void) { int irq; struct platform_device *pmu_device = csky_pmu.plat_device; irq = platform_get_irq(pmu_device, 0); if (irq >= 0) free_percpu_irq(irq, this_cpu_ptr(csky_pmu.hw_events)); } int init_hw_perf_events(void) { csky_pmu.hw_events = alloc_percpu_gfp(struct pmu_hw_events, GFP_KERNEL); if (!csky_pmu.hw_events) { pr_info("failed to allocate per-cpu PMU data.\n"); return -ENOMEM; } csky_pmu.pmu = (struct pmu) { .pmu_enable = csky_pmu_enable, .pmu_disable = csky_pmu_disable, .event_init = csky_pmu_event_init, .add = csky_pmu_add, .del = csky_pmu_del, .start = csky_pmu_start, .stop = csky_pmu_stop, .read = csky_pmu_read, }; memset((void *)hw_raw_read_mapping, 0, sizeof(hw_raw_read_mapping[CSKY_PMU_MAX_EVENTS])); hw_raw_read_mapping[0x1] = csky_pmu_read_cc; hw_raw_read_mapping[0x2] = csky_pmu_read_ic; hw_raw_read_mapping[0x3] = csky_pmu_read_icac; hw_raw_read_mapping[0x4] = csky_pmu_read_icmc; hw_raw_read_mapping[0x5] = csky_pmu_read_dcac; hw_raw_read_mapping[0x6] = csky_pmu_read_dcmc; hw_raw_read_mapping[0x7] = csky_pmu_read_l2ac; hw_raw_read_mapping[0x8] = csky_pmu_read_l2mc; hw_raw_read_mapping[0xa] = csky_pmu_read_iutlbmc; hw_raw_read_mapping[0xb] = csky_pmu_read_dutlbmc; hw_raw_read_mapping[0xc] = csky_pmu_read_jtlbmc; hw_raw_read_mapping[0xd] = csky_pmu_read_softc; hw_raw_read_mapping[0xe] = csky_pmu_read_cbmc; hw_raw_read_mapping[0xf] = csky_pmu_read_cbic; hw_raw_read_mapping[0x10] = csky_pmu_read_ibmc; hw_raw_read_mapping[0x11] = csky_pmu_read_ibic; hw_raw_read_mapping[0x12] = csky_pmu_read_lsfc; hw_raw_read_mapping[0x13] = csky_pmu_read_sic; hw_raw_read_mapping[0x14] = csky_pmu_read_dcrac; hw_raw_read_mapping[0x15] = csky_pmu_read_dcrmc; hw_raw_read_mapping[0x16] = csky_pmu_read_dcwac; hw_raw_read_mapping[0x17] = csky_pmu_read_dcwmc; hw_raw_read_mapping[0x18] = csky_pmu_read_l2rac; hw_raw_read_mapping[0x19] = csky_pmu_read_l2rmc; hw_raw_read_mapping[0x1a] = csky_pmu_read_l2wac; hw_raw_read_mapping[0x1b] = csky_pmu_read_l2wmc; memset((void *)hw_raw_write_mapping, 0, sizeof(hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])); hw_raw_write_mapping[0x1] = csky_pmu_write_cc; hw_raw_write_mapping[0x2] = csky_pmu_write_ic; hw_raw_write_mapping[0x3] = csky_pmu_write_icac; hw_raw_write_mapping[0x4] = csky_pmu_write_icmc; hw_raw_write_mapping[0x5] = csky_pmu_write_dcac; hw_raw_write_mapping[0x6] = csky_pmu_write_dcmc; hw_raw_write_mapping[0x7] = csky_pmu_write_l2ac; hw_raw_write_mapping[0x8] = csky_pmu_write_l2mc; hw_raw_write_mapping[0xa] = csky_pmu_write_iutlbmc; hw_raw_write_mapping[0xb] = csky_pmu_write_dutlbmc; hw_raw_write_mapping[0xc] = csky_pmu_write_jtlbmc; hw_raw_write_mapping[0xd] = csky_pmu_write_softc; hw_raw_write_mapping[0xe] = csky_pmu_write_cbmc; hw_raw_write_mapping[0xf] = csky_pmu_write_cbic; hw_raw_write_mapping[0x10] = csky_pmu_write_ibmc; hw_raw_write_mapping[0x11] = csky_pmu_write_ibic; hw_raw_write_mapping[0x12] = csky_pmu_write_lsfc; hw_raw_write_mapping[0x13] = csky_pmu_write_sic; hw_raw_write_mapping[0x14] = csky_pmu_write_dcrac; hw_raw_write_mapping[0x15] = csky_pmu_write_dcrmc; hw_raw_write_mapping[0x16] = csky_pmu_write_dcwac; hw_raw_write_mapping[0x17] = csky_pmu_write_dcwmc; hw_raw_write_mapping[0x18] = csky_pmu_write_l2rac; hw_raw_write_mapping[0x19] = csky_pmu_write_l2rmc; hw_raw_write_mapping[0x1a] = csky_pmu_write_l2wac; hw_raw_write_mapping[0x1b] = csky_pmu_write_l2wmc; return 0; } static int csky_pmu_starting_cpu(unsigned int cpu) { enable_percpu_irq(csky_pmu_irq, 0); return 0; } static int csky_pmu_dying_cpu(unsigned int cpu) { disable_percpu_irq(csky_pmu_irq); return 0; } int csky_pmu_device_probe(struct platform_device *pdev, const struct of_device_id *of_table) { struct device_node *node = pdev->dev.of_node; int ret; ret = init_hw_perf_events(); if (ret) { pr_notice("[perf] failed to probe PMU!\n"); return ret; } if (of_property_read_u32(node, "count-width", &csky_pmu.count_width)) { csky_pmu.count_width = DEFAULT_COUNT_WIDTH; } csky_pmu.max_period = BIT_ULL(csky_pmu.count_width) - 1; csky_pmu.plat_device = pdev; /* Ensure the PMU has sane values out of reset. */ on_each_cpu(csky_pmu_reset, &csky_pmu, 1); ret = csky_pmu_request_irq(csky_pmu_handle_irq); if (ret) { csky_pmu.pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; pr_notice("[perf] PMU request irq fail!\n"); } ret = cpuhp_setup_state(CPUHP_AP_PERF_CSKY_ONLINE, "AP_PERF_ONLINE", csky_pmu_starting_cpu, csky_pmu_dying_cpu); if (ret) { csky_pmu_free_irq(); free_percpu(csky_pmu.hw_events); return ret; } ret = perf_pmu_register(&csky_pmu.pmu, "cpu", PERF_TYPE_RAW); if (ret) { csky_pmu_free_irq(); free_percpu(csky_pmu.hw_events); } return ret; } static const struct of_device_id csky_pmu_of_device_ids[] = { {.compatible = "csky,csky-pmu"}, {}, }; static int csky_pmu_dev_probe(struct platform_device *pdev) { return csky_pmu_device_probe(pdev, csky_pmu_of_device_ids); } static struct platform_driver csky_pmu_driver = { .driver = { .name = "csky-pmu", .of_match_table = csky_pmu_of_device_ids, }, .probe = csky_pmu_dev_probe, }; static int __init csky_pmu_probe(void) { int ret; ret = platform_driver_register(&csky_pmu_driver); if (ret) pr_notice("[perf] PMU initialization failed\n"); else pr_notice("[perf] PMU initialization done\n"); return ret; } device_initcall(csky_pmu_probe);
linux-master
arch/csky/kernel/perf_event.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/syscalls.h> #include <asm/syscalls.h> #undef __SYSCALL #define __SYSCALL(nr, call)[nr] = (call), #define sys_fadvise64_64 sys_csky_fadvise64_64 void * const sys_call_table[__NR_syscalls] __page_aligned_data = { [0 ... __NR_syscalls - 1] = sys_ni_syscall, #include <asm/unistd.h> };
linux-master
arch/csky/kernel/syscall_table.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/memory.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #define NOP32_HI 0xc400 #define NOP32_LO 0x4820 #define BSR_LINK 0xe000 void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { unsigned long addr = jump_entry_code(entry); u16 insn[2]; int ret = 0; if (type == JUMP_LABEL_JMP) { long offset = jump_entry_target(entry) - jump_entry_code(entry); if (WARN_ON(offset & 1 || offset < -67108864 || offset >= 67108864)) return; offset = offset >> 1; insn[0] = BSR_LINK | ((uint16_t)((unsigned long) offset >> 16) & 0x3ff); insn[1] = (uint16_t)((unsigned long) offset & 0xffff); } else { insn[0] = NOP32_HI; insn[1] = NOP32_LO; } ret = copy_to_kernel_nofault((void *)addr, insn, 4); WARN_ON(ret); flush_icache_range(addr, addr + 4); } void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type) { /* * We use the same instructions in the arch_static_branch and * arch_static_branch_jump inline functions, so there's no * need to patch them up here. * The core will call arch_jump_label_transform when those * instructions need to be replaced. */ arch_jump_label_transform(entry, type); }
linux-master
arch/csky/kernel/jump_label.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/perf_event.h> #include <linux/uaccess.h> /* Kernel callchain */ struct stackframe { unsigned long fp; unsigned long lr; }; static int unwind_frame_kernel(struct stackframe *frame) { unsigned long low = (unsigned long)task_stack_page(current); unsigned long high = low + THREAD_SIZE; if (unlikely(frame->fp < low || frame->fp > high)) return -EPERM; if (kstack_end((void *)frame->fp) || frame->fp & 0x3) return -EPERM; *frame = *(struct stackframe *)frame->fp; if (__kernel_text_address(frame->lr)) { int graph = 0; frame->lr = ftrace_graph_ret_addr(NULL, &graph, frame->lr, NULL); } return 0; } static void notrace walk_stackframe(struct stackframe *fr, struct perf_callchain_entry_ctx *entry) { do { perf_callchain_store(entry, fr->lr); } while (unwind_frame_kernel(fr) >= 0); } /* * Get the return address for a single stackframe and return a pointer to the * next frame tail. */ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp, unsigned long reg_lr) { struct stackframe buftail; unsigned long lr = 0; unsigned long __user *user_frame_tail = (unsigned long __user *)fp; /* Check accessibility of one struct frame_tail beyond */ if (!access_ok(user_frame_tail, sizeof(buftail))) return 0; if (__copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail))) return 0; if (reg_lr != 0) lr = reg_lr; else lr = buftail.lr; fp = buftail.fp; perf_callchain_store(entry, lr); return fp; } /* * This will be called when the target is in user mode * This function will only be called when we use * "PERF_SAMPLE_CALLCHAIN" in * kernel/events/core.c:perf_prepare_sample() * * How to trigger perf_callchain_[user/kernel] : * $ perf record -e cpu-clock --call-graph fp ./program * $ perf report --call-graph * * On C-SKY platform, the program being sampled and the C library * need to be compiled with * -mbacktrace, otherwise the user * stack will not contain function frame. */ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { unsigned long fp = 0; fp = regs->regs[4]; perf_callchain_store(entry, regs->pc); /* * While backtrace from leaf function, lr is normally * not saved inside frame on C-SKY, so get lr from pt_regs * at the sample point. However, lr value can be incorrect if * lr is used as temp register */ fp = user_backtrace(entry, fp, regs->lr); while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) fp = user_backtrace(entry, fp, 0); } void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { struct stackframe fr; fr.fp = regs->regs[4]; fr.lr = regs->lr; walk_stackframe(&fr, entry); }
linux-master
arch/csky/kernel/perf_callchain.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <linux/ftrace.h> #include <linux/ptrace.h> #ifdef CONFIG_FRAME_POINTER struct stackframe { unsigned long fp; unsigned long ra; }; void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) { unsigned long fp, sp, pc; if (regs) { fp = frame_pointer(regs); sp = user_stack_pointer(regs); pc = instruction_pointer(regs); } else if (task == NULL || task == current) { const register unsigned long current_fp __asm__ ("r8"); fp = current_fp; sp = current_stack_pointer; pc = (unsigned long)walk_stackframe; } else { /* task blocked in __switch_to */ fp = thread_saved_fp(task); sp = thread_saved_sp(task); pc = thread_saved_lr(task); } for (;;) { unsigned long low, high; struct stackframe *frame; if (unlikely(!__kernel_text_address(pc) || fn(pc, arg))) break; /* Validate frame pointer */ low = sp; high = ALIGN(sp, THREAD_SIZE); if (unlikely(fp < low || fp > high || fp & 0x3)) break; /* Unwind stack frame */ frame = (struct stackframe *)fp; sp = fp; fp = frame->fp; pc = ftrace_graph_ret_addr(current, NULL, frame->ra, (unsigned long *)(fp - 8)); } } #else /* !CONFIG_FRAME_POINTER */ static void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg) { unsigned long sp, pc; unsigned long *ksp; if (regs) { sp = user_stack_pointer(regs); pc = instruction_pointer(regs); } else if (task == NULL || task == current) { sp = current_stack_pointer; pc = (unsigned long)walk_stackframe; } else { /* task blocked in __switch_to */ sp = thread_saved_sp(task); pc = thread_saved_lr(task); } if (unlikely(sp & 0x3)) return; ksp = (unsigned long *)sp; while (!kstack_end(ksp)) { if (__kernel_text_address(pc) && unlikely(fn(pc, arg))) break; pc = (*ksp++) - 0x4; } } #endif /* CONFIG_FRAME_POINTER */ static bool print_trace_address(unsigned long pc, void *arg) { print_ip_sym((const char *)arg, pc); return false; } void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { pr_cont("Call Trace:\n"); walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); } static bool save_wchan(unsigned long pc, void *arg) { if (!in_sched_functions(pc)) { unsigned long *p = arg; *p = pc; return true; } return false; } unsigned long __get_wchan(struct task_struct *task) { unsigned long pc = 0; walk_stackframe(task, NULL, save_wchan, &pc); return pc; } #ifdef CONFIG_STACKTRACE static bool __save_trace(unsigned long pc, void *arg, bool nosched) { struct stack_trace *trace = arg; if (unlikely(nosched && in_sched_functions(pc))) return false; if (unlikely(trace->skip > 0)) { trace->skip--; return false; } trace->entries[trace->nr_entries++] = pc; return (trace->nr_entries >= trace->max_entries); } static bool save_trace(unsigned long pc, void *arg) { return __save_trace(pc, arg, false); } /* * Save stack-backtrace addresses into a stack_trace buffer. */ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { walk_stackframe(tsk, NULL, save_trace, trace); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); void save_stack_trace(struct stack_trace *trace) { save_stack_trace_tsk(NULL, trace); } EXPORT_SYMBOL_GPL(save_stack_trace); #endif /* CONFIG_STACKTRACE */
linux-master
arch/csky/kernel/stacktrace.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/of.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/memblock.h> #include <abi/reg_ops.h> static void percpu_print(void *arg) { struct seq_file *m = (struct seq_file *)arg; unsigned int cur, next, i; seq_printf(m, "processor : %d\n", smp_processor_id()); seq_printf(m, "C-SKY CPU model : %s\n", CSKYCPU_DEF_NAME); /* read processor id, max is 100 */ cur = mfcr("cr13"); for (i = 0; i < 100; i++) { seq_printf(m, "product info[%d] : 0x%08x\n", i, cur); next = mfcr("cr13"); /* some CPU only has one id reg */ if (cur == next) break; cur = next; /* cpid index is 31-28, reset */ if (!(next >> 28)) { while ((mfcr("cr13") >> 28) != i); break; } } /* CPU feature regs, setup by bootloader or gdbinit */ seq_printf(m, "hint (CPU funcs): 0x%08x\n", mfcr_hint()); seq_printf(m, "ccr (L1C & MMU): 0x%08x\n", mfcr("cr18")); seq_printf(m, "ccr2 (L2C) : 0x%08x\n", mfcr_ccr2()); seq_printf(m, "\n"); } static int c_show(struct seq_file *m, void *v) { int cpu; for_each_online_cpu(cpu) smp_call_function_single(cpu, percpu_print, m, true); #ifdef CSKY_ARCH_VERSION seq_printf(m, "arch-version : %s\n", CSKY_ARCH_VERSION); seq_printf(m, "\n"); #endif return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { return *pos < 1 ? (void *)1 : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return NULL; } static void c_stop(struct seq_file *m, void *v) {} const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = c_show, };
linux-master
arch/csky/kernel/cpu-probe.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/signal.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <linux/resume_user_mode.h> #include <asm/traps.h> #include <asm/ucontext.h> #include <asm/vdso.h> #include <abi/regdef.h> #ifdef CONFIG_CPU_HAS_FPU #include <abi/fpu.h> static int restore_fpu_state(struct sigcontext __user *sc) { int err = 0; struct user_fp user_fp; err = __copy_from_user(&user_fp, &sc->sc_user_fp, sizeof(user_fp)); restore_from_user_fp(&user_fp); return err; } static int save_fpu_state(struct sigcontext __user *sc) { struct user_fp user_fp; save_to_user_fp(&user_fp); return __copy_to_user(&sc->sc_user_fp, &user_fp, sizeof(user_fp)); } #else #define restore_fpu_state(sigcontext) (0) #define save_fpu_state(sigcontext) (0) #endif struct rt_sigframe { /* * pad[3] is compatible with the same struct defined in * gcc/libgcc/config/csky/linux-unwind.h */ int pad[3]; struct siginfo info; struct ucontext uc; }; static long restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; unsigned long sr = regs->sr; /* sc_pt_regs is structured the same as the start of pt_regs */ err |= __copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs)); /* BIT(0) of regs->sr is Condition Code/Carry bit */ regs->sr = (sr & ~1) | (regs->sr & 1); /* Restore the floating-point state. */ err |= restore_fpu_state(sc); return err; } SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe __user *frame; sigset_t set; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; frame = (struct rt_sigframe __user *)regs->usp; if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return regs->a0; badframe: force_sig(SIGSEGV); return 0; } static int setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs) { struct sigcontext __user *sc = &frame->uc.uc_mcontext; int err = 0; err |= __copy_to_user(&sc->sc_pt_regs, regs, sizeof(struct pt_regs)); err |= save_fpu_state(sc); return err; } static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t framesize) { unsigned long sp; /* Default to using normal stack */ sp = regs->usp; /* * If we are on the alternate signal stack and would overflow it, don't. * Return an always-bogus address instead so we will die with SIGSEGV. */ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) return (void __user __force *)(-1UL); /* This is the X/Open sanctioned signal stack switching. */ sp = sigsp(sp, ksig) - framesize; /* Align the stack frame. */ sp &= -8UL; return (void __user *)sp; } static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int err = 0; frame = get_sigframe(ksig, regs, sizeof(*frame)); if (!access_ok(frame, sizeof(*frame))) return -EFAULT; err |= copy_siginfo_to_user(&frame->info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __save_altstack(&frame->uc.uc_stack, regs->usp); err |= setup_sigcontext(frame, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) return -EFAULT; /* Set up to return from userspace. */ regs->lr = (unsigned long)VDSO_SYMBOL( current->mm->context.vdso, rt_sigreturn); /* * Set up registers for signal handler. * Registers that we don't modify keep the value they had from * user-space at the time we took the signal. * We always pass siginfo and mcontext, regardless of SA_SIGINFO, * since some things rely on this (e.g. glibc's debug/segfault.c). */ regs->pc = (unsigned long)ksig->ka.sa.sa_handler; regs->usp = (unsigned long)frame; regs->a0 = ksig->sig; /* a0: signal number */ regs->a1 = (unsigned long)(&(frame->info)); /* a1: siginfo pointer */ regs->a2 = (unsigned long)(&(frame->uc)); /* a2: ucontext pointer */ return 0; } static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); int ret; /* Are we from a system call? */ if (in_syscall(regs)) { /* Avoid additional syscall restarting via ret_from_exception */ forget_syscall(regs); /* If so, check system call restarting.. */ switch (regs->a0) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->a0 = -EINTR; break; case -ERESTARTSYS: if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { regs->a0 = -EINTR; break; } fallthrough; case -ERESTARTNOINTR: regs->a0 = regs->orig_a0; regs->pc -= TRAP0_SIZE; break; } } /* Set up the stack frame */ ret = setup_rt_frame(ksig, oldset, regs); signal_setup_done(ret, ksig, 0); } static void do_signal(struct pt_regs *regs) { struct ksignal ksig; if (get_signal(&ksig)) { /* Actually deliver the signal */ handle_signal(&ksig, regs); return; } /* Did we come from a system call? */ if (in_syscall(regs)) { /* Avoid additional syscall restarting via ret_from_exception */ forget_syscall(regs); /* Restart the system call - no handlers present */ switch (regs->a0) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->a0 = regs->orig_a0; regs->pc -= TRAP0_SIZE; break; case -ERESTART_RESTARTBLOCK: regs->a0 = regs->orig_a0; regs_syscallid(regs) = __NR_restart_syscall; regs->pc -= TRAP0_SIZE; break; } } /* * If there is no signal to deliver, we just put the saved * sigmask back. */ restore_saved_sigmask(); } /* * notification of userspace execution resumption * - triggered by the _TIF_WORK_MASK flags */ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); /* Handle pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) resume_user_mode_work(regs); }
linux-master
arch/csky/kernel/signal.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. #include <linux/errno.h> #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/bug.h> #include <asm/perf_regs.h> #include <asm/ptrace.h> u64 perf_reg_value(struct pt_regs *regs, int idx) { if (WARN_ON_ONCE((u32)idx >= PERF_REG_CSKY_MAX)) return 0; return (u64)*((u32 *)regs + idx); } #define REG_RESERVED (~((1ULL << PERF_REG_CSKY_MAX) - 1)) int perf_reg_validate(u64 mask) { if (!mask || mask & REG_RESERVED) return -EINVAL; return 0; } u64 perf_reg_abi(struct task_struct *task) { return PERF_SAMPLE_REGS_ABI_32; } void perf_get_regs_user(struct perf_regs *regs_user, struct pt_regs *regs) { regs_user->regs = task_pt_regs(current); regs_user->abi = perf_reg_abi(current); }
linux-master
arch/csky/kernel/perf_regs.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/percpu.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/irq.h> #include <linux/irq_work.h> #include <linux/irqdomain.h> #include <linux/of.h> #include <linux/seq_file.h> #include <linux/sched/task_stack.h> #include <linux/sched/mm.h> #include <linux/sched/hotplug.h> #include <asm/irq.h> #include <asm/traps.h> #include <asm/sections.h> #include <asm/mmu_context.h> #ifdef CONFIG_CPU_HAS_FPU #include <abi/fpu.h> #endif enum ipi_message_type { IPI_EMPTY, IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_IRQ_WORK, IPI_MAX }; struct ipi_data_struct { unsigned long bits ____cacheline_aligned; unsigned long stats[IPI_MAX] ____cacheline_aligned; }; static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data); static irqreturn_t handle_ipi(int irq, void *dev) { unsigned long *stats = this_cpu_ptr(&ipi_data)->stats; while (true) { unsigned long ops; ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0); if (ops == 0) return IRQ_HANDLED; if (ops & (1 << IPI_RESCHEDULE)) { stats[IPI_RESCHEDULE]++; scheduler_ipi(); } if (ops & (1 << IPI_CALL_FUNC)) { stats[IPI_CALL_FUNC]++; generic_smp_call_function_interrupt(); } if (ops & (1 << IPI_IRQ_WORK)) { stats[IPI_IRQ_WORK]++; irq_work_run(); } BUG_ON((ops >> IPI_MAX) != 0); } return IRQ_HANDLED; } static void (*send_arch_ipi)(const struct cpumask *mask); static int ipi_irq; void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq) { if (send_arch_ipi) return; send_arch_ipi = func; ipi_irq = irq; } static void send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) { int i; for_each_cpu(i, to_whom) set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits); smp_mb(); send_arch_ipi(to_whom); } static const char * const ipi_names[] = { [IPI_EMPTY] = "Empty interrupts", [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNC] = "Function call interrupts", [IPI_IRQ_WORK] = "Irq work interrupts", }; int arch_show_interrupts(struct seq_file *p, int prec) { unsigned int cpu, i; for (i = 0; i < IPI_MAX; i++) { seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : ""); for_each_online_cpu(cpu) seq_printf(p, "%10lu ", per_cpu_ptr(&ipi_data, cpu)->stats[i]); seq_printf(p, " %s\n", ipi_names[i]); } return 0; } void arch_send_call_function_ipi_mask(struct cpumask *mask) { send_ipi_message(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); } static void ipi_stop(void *unused) { while (1); } void smp_send_stop(void) { on_each_cpu(ipi_stop, NULL, 1); } void arch_smp_send_reschedule(int cpu) { send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) { send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); } #endif void __init smp_prepare_boot_cpu(void) { } void __init smp_prepare_cpus(unsigned int max_cpus) { } static int ipi_dummy_dev; void __init setup_smp_ipi(void) { int rc; if (ipi_irq == 0) return; rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", &ipi_dummy_dev); if (rc) panic("%s IRQ request failed\n", __func__); enable_percpu_irq(ipi_irq, 0); } void __init setup_smp(void) { struct device_node *node = NULL; unsigned int cpu; for_each_of_cpu_node(node) { if (!of_device_is_available(node)) continue; cpu = of_get_cpu_hwid(node, 0); if (cpu >= NR_CPUS) continue; set_cpu_possible(cpu, true); set_cpu_present(cpu, true); } } extern void _start_smp_secondary(void); volatile unsigned int secondary_hint; volatile unsigned int secondary_hint2; volatile unsigned int secondary_ccr; volatile unsigned int secondary_stack; volatile unsigned int secondary_msa1; volatile unsigned int secondary_pgd; int __cpu_up(unsigned int cpu, struct task_struct *tidle) { unsigned long mask = 1 << cpu; secondary_stack = (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; secondary_hint = mfcr("cr31"); secondary_hint2 = mfcr("cr<21, 1>"); secondary_ccr = mfcr("cr18"); secondary_msa1 = read_mmu_msa1(); secondary_pgd = mfcr("cr<29, 15>"); /* * Because other CPUs are in reset status, we must flush data * from cache to out and secondary CPUs use them in * csky_start_secondary(void) */ mtcr("cr17", 0x22); if (mask & mfcr("cr<29, 0>")) { send_arch_ipi(cpumask_of(cpu)); } else { /* Enable cpu in SMP reset ctrl reg */ mask |= mfcr("cr<29, 0>"); mtcr("cr<29, 0>", mask); } /* Wait for the cpu online */ while (!cpu_online(cpu)); secondary_stack = 0; return 0; } void __init smp_cpus_done(unsigned int max_cpus) { } void csky_start_secondary(void) { struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); mtcr("cr31", secondary_hint); mtcr("cr<21, 1>", secondary_hint2); mtcr("cr18", secondary_ccr); mtcr("vbr", vec_base); flush_tlb_all(); write_mmu_pagemask(0); #ifdef CONFIG_CPU_HAS_FPU init_fpu(); #endif enable_percpu_irq(ipi_irq, 0); mmget(mm); mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); notify_cpu_starting(cpu); set_cpu_online(cpu, true); pr_info("CPU%u Online: %s...\n", cpu, __func__); local_irq_enable(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } #ifdef CONFIG_HOTPLUG_CPU int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); set_cpu_online(cpu, false); irq_migrate_all_off_this_cpu(); clear_tasks_mm_cpumask(cpu); return 0; } void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { pr_notice("CPU%u: shutdown\n", cpu); } void __noreturn arch_cpu_idle_dead(void) { idle_task_exit(); cpuhp_ap_report_dead(); while (!secondary_stack) arch_cpu_idle(); raw_local_irq_disable(); asm volatile( "mov sp, %0\n" "mov r8, %0\n" "jmpi csky_start_secondary" : : "r" (secondary_stack)); BUG(); } #endif
linux-master
arch/csky/kernel/smp.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kprobes.h> /* Ftrace callback handler for kprobes -- called under preepmt disabled */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs) { int bit; bool lr_saver = false; struct kprobe *p; struct kprobe_ctlblk *kcb; struct pt_regs *regs; bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; regs = ftrace_get_regs(fregs); p = get_kprobe((kprobe_opcode_t *)ip); if (!p) { p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE)); if (unlikely(!p) || kprobe_disabled(p)) goto out; lr_saver = true; } kcb = get_kprobe_ctlblk(); if (kprobe_running()) { kprobes_inc_nmissed_count(p); } else { unsigned long orig_ip = instruction_pointer(regs); if (lr_saver) ip -= MCOUNT_INSN_SIZE; instruction_pointer_set(regs, ip); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (!p->pre_handler || !p->pre_handler(p, regs)) { /* * Emulate singlestep (and also recover regs->pc) * as if there is a nop */ instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE); if (unlikely(p->post_handler)) { kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); } instruction_pointer_set(regs, orig_ip); } /* * If pre_handler returns !0, it changes regs->pc. We have to * skip emulating post_handler. */ __this_cpu_write(current_kprobe, NULL); } out: ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); int arch_prepare_kprobe_ftrace(struct kprobe *p) { p->ainsn.api.insn = NULL; return 0; }
linux-master
arch/csky/kernel/probes/ftrace.c
// SPDX-License-Identifier: GPL-2.0+ #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/kprobes.h> #include "decode-insn.h" #include "simulate-insn.h" static inline bool csky_insn_reg_get_val(struct pt_regs *regs, unsigned long index, unsigned long *ptr) { if (index < 14) *ptr = *(&regs->a0 + index); if (index > 15 && index < 31) *ptr = *(&regs->exregs[0] + index - 16); switch (index) { case 14: *ptr = regs->usp; break; case 15: *ptr = regs->lr; break; case 31: *ptr = regs->tls; break; default: goto fail; } return true; fail: return false; } static inline bool csky_insn_reg_set_val(struct pt_regs *regs, unsigned long index, unsigned long val) { if (index < 14) *(&regs->a0 + index) = val; if (index > 15 && index < 31) *(&regs->exregs[0] + index - 16) = val; switch (index) { case 14: regs->usp = val; break; case 15: regs->lr = val; break; case 31: regs->tls = val; break; default: goto fail; } return true; fail: return false; } void __kprobes simulate_br16(u32 opcode, long addr, struct pt_regs *regs) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0x3ff) << 1, 9)); } void __kprobes simulate_br32(u32 opcode, long addr, struct pt_regs *regs) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); } void __kprobes simulate_bt16(u32 opcode, long addr, struct pt_regs *regs) { if (regs->sr & 1) instruction_pointer_set(regs, addr + sign_extend32((opcode & 0x3ff) << 1, 9)); else instruction_pointer_set(regs, addr + 2); } void __kprobes simulate_bt32(u32 opcode, long addr, struct pt_regs *regs) { if (regs->sr & 1) instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); else instruction_pointer_set(regs, addr + 4); } void __kprobes simulate_bf16(u32 opcode, long addr, struct pt_regs *regs) { if (!(regs->sr & 1)) instruction_pointer_set(regs, addr + sign_extend32((opcode & 0x3ff) << 1, 9)); else instruction_pointer_set(regs, addr + 2); } void __kprobes simulate_bf32(u32 opcode, long addr, struct pt_regs *regs) { if (!(regs->sr & 1)) instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); else instruction_pointer_set(regs, addr + 4); } void __kprobes simulate_jmp16(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp = (opcode >> 2) & 0xf; csky_insn_reg_get_val(regs, tmp, &tmp); instruction_pointer_set(regs, tmp & 0xfffffffe); } void __kprobes simulate_jmp32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp = opcode & 0x1f; csky_insn_reg_get_val(regs, tmp, &tmp); instruction_pointer_set(regs, tmp & 0xfffffffe); } void __kprobes simulate_jsr16(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp = (opcode >> 2) & 0xf; csky_insn_reg_get_val(regs, tmp, &tmp); regs->lr = addr + 2; instruction_pointer_set(regs, tmp & 0xfffffffe); } void __kprobes simulate_jsr32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp = opcode & 0x1f; csky_insn_reg_get_val(regs, tmp, &tmp); regs->lr = addr + 4; instruction_pointer_set(regs, tmp & 0xfffffffe); } void __kprobes simulate_lrw16(u32 opcode, long addr, struct pt_regs *regs) { unsigned long val; unsigned long tmp = (opcode & 0x300) >> 3; unsigned long offset = ((opcode & 0x1f) | tmp) << 2; tmp = (opcode & 0xe0) >> 5; val = *(unsigned int *)(instruction_pointer(regs) + offset); csky_insn_reg_set_val(regs, tmp, val); } void __kprobes simulate_lrw32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long val; unsigned long offset = (opcode & 0xffff0000) >> 14; unsigned long tmp = opcode & 0x0000001f; val = *(unsigned int *) ((instruction_pointer(regs) + offset) & 0xfffffffc); csky_insn_reg_set_val(regs, tmp, val); } void __kprobes simulate_pop16(u32 opcode, long addr, struct pt_regs *regs) { unsigned long *tmp = (unsigned long *)regs->usp; int i; for (i = 0; i < (opcode & 0xf); i++) { csky_insn_reg_set_val(regs, i + 4, *tmp); tmp += 1; } if (opcode & 0x10) { csky_insn_reg_set_val(regs, 15, *tmp); tmp += 1; } regs->usp = (unsigned long)tmp; instruction_pointer_set(regs, regs->lr); } void __kprobes simulate_pop32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long *tmp = (unsigned long *)regs->usp; int i; for (i = 0; i < ((opcode & 0xf0000) >> 16); i++) { csky_insn_reg_set_val(regs, i + 4, *tmp); tmp += 1; } if (opcode & 0x100000) { csky_insn_reg_set_val(regs, 15, *tmp); tmp += 1; } for (i = 0; i < ((opcode & 0xe00000) >> 21); i++) { csky_insn_reg_set_val(regs, i + 16, *tmp); tmp += 1; } if (opcode & 0x1000000) { csky_insn_reg_set_val(regs, 29, *tmp); tmp += 1; } regs->usp = (unsigned long)tmp; instruction_pointer_set(regs, regs->lr); } void __kprobes simulate_bez32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp = opcode & 0x1f; csky_insn_reg_get_val(regs, tmp, &tmp); if (tmp == 0) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); } else instruction_pointer_set(regs, addr + 4); } void __kprobes simulate_bnez32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp = opcode & 0x1f; csky_insn_reg_get_val(regs, tmp, &tmp); if (tmp != 0) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); } else instruction_pointer_set(regs, addr + 4); } void __kprobes simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp = opcode & 0x1f; long val; csky_insn_reg_get_val(regs, tmp, (unsigned long *)&val); val -= 1; if (val > 0) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); } else instruction_pointer_set(regs, addr + 4); csky_insn_reg_set_val(regs, tmp, (unsigned long)val); } void __kprobes simulate_bhsz32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp = opcode & 0x1f; unsigned long val; csky_insn_reg_get_val(regs, tmp, &val); if ((long) val >= 0) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); } else instruction_pointer_set(regs, addr + 4); } void __kprobes simulate_bhz32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp = opcode & 0x1f; unsigned long val; csky_insn_reg_get_val(regs, tmp, &val); if ((long) val > 0) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); } else instruction_pointer_set(regs, addr + 4); } void __kprobes simulate_blsz32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp = opcode & 0x1f; unsigned long val; csky_insn_reg_get_val(regs, tmp, &val); if ((long) val <= 0) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); } else instruction_pointer_set(regs, addr + 4); } void __kprobes simulate_blz32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp = opcode & 0x1f; unsigned long val; csky_insn_reg_get_val(regs, tmp, &val); if ((long) val < 0) { instruction_pointer_set(regs, addr + sign_extend32((opcode & 0xffff0000) >> 15, 15)); } else instruction_pointer_set(regs, addr + 4); } void __kprobes simulate_bsr32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long tmp; tmp = (opcode & 0xffff) << 16; tmp |= (opcode & 0xffff0000) >> 16; instruction_pointer_set(regs, addr + sign_extend32((tmp & 0x3ffffff) << 1, 15)); regs->lr = addr + 4; } void __kprobes simulate_jmpi32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long val; unsigned long offset = ((opcode & 0xffff0000) >> 14); val = *(unsigned int *) ((instruction_pointer(regs) + offset) & 0xfffffffc); instruction_pointer_set(regs, val); } void __kprobes simulate_jsri32(u32 opcode, long addr, struct pt_regs *regs) { unsigned long val; unsigned long offset = ((opcode & 0xffff0000) >> 14); val = *(unsigned int *) ((instruction_pointer(regs) + offset) & 0xfffffffc); regs->lr = addr + 4; instruction_pointer_set(regs, val); }
linux-master
arch/csky/kernel/probes/simulate-insn.c