python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2017 SiFive * Copyright (C) 2018 Christoph Hellwig */ #include <linux/interrupt.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/seq_file.h> #include <asm/sbi.h> #include <asm/smp.h> #include <asm/softirq_stack.h> #include <asm/stacktrace.h> static struct fwnode_handle *(*__get_intc_node)(void); void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void)) { __get_intc_node = fn; } struct fwnode_handle *riscv_get_intc_hwnode(void) { if (__get_intc_node) return __get_intc_node(); return NULL; } EXPORT_SYMBOL_GPL(riscv_get_intc_hwnode); #ifdef CONFIG_IRQ_STACKS #include <asm/irq_stack.h> DEFINE_PER_CPU(ulong *, irq_stack_ptr); #ifdef CONFIG_VMAP_STACK static void init_irq_stacks(void) { int cpu; ulong *p; for_each_possible_cpu(cpu) { p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu)); per_cpu(irq_stack_ptr, cpu) = p; } } #else /* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */ DEFINE_PER_CPU_ALIGNED(ulong [IRQ_STACK_SIZE/sizeof(ulong)], irq_stack); static void init_irq_stacks(void) { int cpu; for_each_possible_cpu(cpu) per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu); } #endif /* CONFIG_VMAP_STACK */ #ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK void do_softirq_own_stack(void) { #ifdef CONFIG_IRQ_STACKS if (on_thread_stack()) { ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id()) + IRQ_STACK_SIZE/sizeof(ulong); __asm__ __volatile( "addi sp, sp, -"RISCV_SZPTR "\n" REG_S" ra, (sp) \n" "addi sp, sp, -"RISCV_SZPTR "\n" REG_S" s0, (sp) \n" "addi s0, sp, 2*"RISCV_SZPTR "\n" "move sp, %[sp] \n" "call __do_softirq \n" "addi sp, s0, -2*"RISCV_SZPTR"\n" REG_L" s0, (sp) \n" "addi sp, sp, "RISCV_SZPTR "\n" REG_L" ra, (sp) \n" "addi sp, sp, "RISCV_SZPTR "\n" : : [sp] "r" (sp) : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", #ifndef CONFIG_FRAME_POINTER "s0", #endif "memory"); } else #endif __do_softirq(); } #endif /* CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK */ #else static void init_irq_stacks(void) {} #endif /* CONFIG_IRQ_STACKS */ int arch_show_interrupts(struct seq_file *p, int prec) { show_ipi_stats(p, prec); return 0; } void __init init_IRQ(void) { init_irq_stacks(); irqchip_init(); if (!handle_arch_irq) panic("No interrupt controller found."); sbi_ipi_init(); }
linux-master
arch/riscv/kernel/irq.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Regents of the University of California */ #include <linux/acpi.h> #include <linux/cpu.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/of.h> #include <asm/acpi.h> #include <asm/cpufeature.h> #include <asm/csr.h> #include <asm/hwcap.h> #include <asm/sbi.h> #include <asm/smp.h> #include <asm/pgtable.h> bool arch_match_cpu_phys_id(int cpu, u64 phys_id) { return phys_id == cpuid_to_hartid_map(cpu); } /* * Returns the hart ID of the given device tree node, or -ENODEV if the node * isn't an enabled and valid RISC-V hart node. */ int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart) { int cpu; *hart = (unsigned long)of_get_cpu_hwid(node, 0); if (*hart == ~0UL) { pr_warn("Found CPU without hart ID\n"); return -ENODEV; } cpu = riscv_hartid_to_cpuid(*hart); if (cpu < 0) return cpu; if (!cpu_possible(cpu)) return -ENODEV; return 0; } int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hart) { const char *isa; if (!of_device_is_compatible(node, "riscv")) { pr_warn("Found incompatible CPU\n"); return -ENODEV; } *hart = (unsigned long)of_get_cpu_hwid(node, 0); if (*hart == ~0UL) { pr_warn("Found CPU without hart ID\n"); return -ENODEV; } if (!of_device_is_available(node)) { pr_info("CPU with hartid=%lu is not available\n", *hart); return -ENODEV; } if (of_property_read_string(node, "riscv,isa-base", &isa)) goto old_interface; if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32i", 5)) { pr_warn("CPU with hartid=%lu does not support rv32i", *hart); return -ENODEV; } if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64i", 5)) { pr_warn("CPU with hartid=%lu does not support rv64i", *hart); return -ENODEV; } if (!of_property_present(node, "riscv,isa-extensions")) return -ENODEV; if (of_property_match_string(node, "riscv,isa-extensions", "i") < 0 || of_property_match_string(node, "riscv,isa-extensions", "m") < 0 || of_property_match_string(node, "riscv,isa-extensions", "a") < 0) { pr_warn("CPU with hartid=%lu does not support ima", *hart); return -ENODEV; } return 0; old_interface: if (!riscv_isa_fallback) { pr_warn("CPU with hartid=%lu is invalid: this kernel does not parse \"riscv,isa\"", *hart); return -ENODEV; } if (of_property_read_string(node, "riscv,isa", &isa)) { pr_warn("CPU with hartid=%lu has no \"riscv,isa-base\" or \"riscv,isa\" property\n", *hart); return -ENODEV; } if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32ima", 7)) { pr_warn("CPU with hartid=%lu does not support rv32ima", *hart); return -ENODEV; } if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64ima", 7)) { pr_warn("CPU with hartid=%lu does not support rv64ima", *hart); return -ENODEV; } return 0; } /* * Find hart ID of the CPU DT node under which given DT node falls. * * To achieve this, we walk up the DT tree until we find an active * RISC-V core (HART) node and extract the cpuid from it. */ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid) { int rc; for (; node; node = node->parent) { if (of_device_is_compatible(node, "riscv")) { rc = riscv_of_processor_hartid(node, hartid); if (!rc) return 0; } } return -1; } DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); unsigned long riscv_cached_mvendorid(unsigned int cpu_id) { struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id); return ci->mvendorid; } EXPORT_SYMBOL(riscv_cached_mvendorid); unsigned long riscv_cached_marchid(unsigned int cpu_id) { struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id); return ci->marchid; } EXPORT_SYMBOL(riscv_cached_marchid); unsigned long riscv_cached_mimpid(unsigned int cpu_id) { struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id); return ci->mimpid; } EXPORT_SYMBOL(riscv_cached_mimpid); static int riscv_cpuinfo_starting(unsigned int cpu) { struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo); #if IS_ENABLED(CONFIG_RISCV_SBI) ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid(); ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid(); ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid(); #elif IS_ENABLED(CONFIG_RISCV_M_MODE) ci->mvendorid = csr_read(CSR_MVENDORID); ci->marchid = csr_read(CSR_MARCHID); ci->mimpid = csr_read(CSR_MIMPID); #else ci->mvendorid = 0; ci->marchid = 0; ci->mimpid = 0; #endif return 0; } static int __init riscv_cpuinfo_init(void) { int ret; ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/cpuinfo:starting", riscv_cpuinfo_starting, NULL); if (ret < 0) { pr_err("cpuinfo: failed to register hotplug callbacks.\n"); return ret; } return 0; } arch_initcall(riscv_cpuinfo_init); #ifdef CONFIG_PROC_FS static void print_isa(struct seq_file *f) { seq_puts(f, "isa\t\t: "); if (IS_ENABLED(CONFIG_32BIT)) seq_write(f, "rv32", 4); else seq_write(f, "rv64", 4); for (int i = 0; i < riscv_isa_ext_count; i++) { if (!__riscv_isa_extension_available(NULL, riscv_isa_ext[i].id)) continue; /* Only multi-letter extensions are split by underscores */ if (strnlen(riscv_isa_ext[i].name, 2) != 1) seq_puts(f, "_"); seq_printf(f, "%s", riscv_isa_ext[i].name); } seq_puts(f, "\n"); } static void print_mmu(struct seq_file *f) { const char *sv_type; #ifdef CONFIG_MMU #if defined(CONFIG_32BIT) sv_type = "sv32"; #elif defined(CONFIG_64BIT) if (pgtable_l5_enabled) sv_type = "sv57"; else if (pgtable_l4_enabled) sv_type = "sv48"; else sv_type = "sv39"; #endif #else sv_type = "none"; #endif /* CONFIG_MMU */ seq_printf(f, "mmu\t\t: %s\n", sv_type); } static void *c_start(struct seq_file *m, loff_t *pos) { if (*pos == nr_cpu_ids) return NULL; *pos = cpumask_next(*pos - 1, cpu_online_mask); if ((*pos) < nr_cpu_ids) return (void *)(uintptr_t)(1 + *pos); return NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { (*pos)++; return c_start(m, pos); } static void c_stop(struct seq_file *m, void *v) { } static int c_show(struct seq_file *m, void *v) { unsigned long cpu_id = (unsigned long)v - 1; struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id); struct device_node *node; const char *compat; seq_printf(m, "processor\t: %lu\n", cpu_id); seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id)); print_isa(m); print_mmu(m); if (acpi_disabled) { node = of_get_cpu_node(cpu_id, NULL); if (!of_property_read_string(node, "compatible", &compat) && strcmp(compat, "riscv")) seq_printf(m, "uarch\t\t: %s\n", compat); of_node_put(node); } seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid); seq_printf(m, "marchid\t\t: 0x%lx\n", ci->marchid); seq_printf(m, "mimpid\t\t: 0x%lx\n", ci->mimpid); seq_puts(m, "\n"); return 0; } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = c_show }; #endif /* CONFIG_PROC_FS */
linux-master
arch/riscv/kernel/cpu.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Western Digital Corporation or its affiliates. */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/err.h> #include <linux/irq.h> #include <linux/cpuhotplug.h> #include <linux/cpu.h> #include <linux/sched/hotplug.h> #include <asm/irq.h> #include <asm/cpu_ops.h> #include <asm/numa.h> #include <asm/smp.h> bool cpu_has_hotplug(unsigned int cpu) { if (cpu_ops[cpu]->cpu_stop) return true; return false; } /* * __cpu_disable runs on the processor to be shutdown. */ int __cpu_disable(void) { int ret = 0; unsigned int cpu = smp_processor_id(); if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_stop) return -EOPNOTSUPP; if (cpu_ops[cpu]->cpu_disable) ret = cpu_ops[cpu]->cpu_disable(cpu); if (ret) return ret; remove_cpu_topology(cpu); numa_remove_cpu(cpu); set_cpu_online(cpu, false); riscv_ipi_disable(); irq_migrate_all_off_this_cpu(); return ret; } #ifdef CONFIG_HOTPLUG_CPU /* * Called on the thread which is asking for a CPU to be shutdown, if the * CPU reported dead to the hotplug core. */ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { int ret = 0; pr_notice("CPU%u: off\n", cpu); /* Verify from the firmware if the cpu is really stopped*/ if (cpu_ops[cpu]->cpu_is_stopped) ret = cpu_ops[cpu]->cpu_is_stopped(cpu); if (ret) pr_warn("CPU%d may not have stopped: %d\n", cpu, ret); } /* * Called from the idle thread for the CPU which has been shutdown. */ void __noreturn arch_cpu_idle_dead(void) { idle_task_exit(); cpuhp_ap_report_dead(); cpu_ops[smp_processor_id()]->cpu_stop(); /* It should never reach here */ BUG(); } #endif
linux-master
arch/riscv/kernel/cpu-hotplug.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2010 Tilera Corporation. All Rights Reserved. * Copyright 2015 Regents of the University of California * Copyright 2017 SiFive * * Copied from arch/tile/kernel/ptrace.c */ #include <asm/vector.h> #include <asm/ptrace.h> #include <asm/syscall.h> #include <asm/thread_info.h> #include <asm/switch_to.h> #include <linux/audit.h> #include <linux/compat.h> #include <linux/ptrace.h> #include <linux/elf.h> #include <linux/regset.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> enum riscv_regset { REGSET_X, #ifdef CONFIG_FPU REGSET_F, #endif #ifdef CONFIG_RISCV_ISA_V REGSET_V, #endif }; static int riscv_gpr_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { return membuf_write(&to, task_pt_regs(target), sizeof(struct user_regs_struct)); } static int riscv_gpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct pt_regs *regs; regs = task_pt_regs(target); return user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1); } #ifdef CONFIG_FPU static int riscv_fpr_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { struct __riscv_d_ext_state *fstate = &target->thread.fstate; if (target == current) fstate_save(current, task_pt_regs(current)); membuf_write(&to, fstate, offsetof(struct __riscv_d_ext_state, fcsr)); membuf_store(&to, fstate->fcsr); return membuf_zero(&to, 4); // explicitly pad } static int riscv_fpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct __riscv_d_ext_state *fstate = &target->thread.fstate; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0, offsetof(struct __riscv_d_ext_state, fcsr)); if (!ret) { ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0, offsetof(struct __riscv_d_ext_state, fcsr) + sizeof(fstate->fcsr)); } return ret; } #endif #ifdef CONFIG_RISCV_ISA_V static int riscv_vr_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { struct __riscv_v_ext_state *vstate = &target->thread.vstate; struct __riscv_v_regset_state ptrace_vstate; if (!riscv_v_vstate_query(task_pt_regs(target))) return -EINVAL; /* * Ensure the vector registers have been saved to the memory before * copying them to membuf. */ if (target == current) riscv_v_vstate_save(current, task_pt_regs(current)); ptrace_vstate.vstart = vstate->vstart; ptrace_vstate.vl = vstate->vl; ptrace_vstate.vtype = vstate->vtype; ptrace_vstate.vcsr = vstate->vcsr; ptrace_vstate.vlenb = vstate->vlenb; /* Copy vector header from vstate. */ membuf_write(&to, &ptrace_vstate, sizeof(struct __riscv_v_regset_state)); /* Copy all the vector registers from vstate. */ return membuf_write(&to, vstate->datap, riscv_v_vsize); } static int riscv_vr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct __riscv_v_ext_state *vstate = &target->thread.vstate; struct __riscv_v_regset_state ptrace_vstate; if (!riscv_v_vstate_query(task_pt_regs(target))) return -EINVAL; /* Copy rest of the vstate except datap */ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ptrace_vstate, 0, sizeof(struct __riscv_v_regset_state)); if (unlikely(ret)) return ret; if (vstate->vlenb != ptrace_vstate.vlenb) return -EINVAL; vstate->vstart = ptrace_vstate.vstart; vstate->vl = ptrace_vstate.vl; vstate->vtype = ptrace_vstate.vtype; vstate->vcsr = ptrace_vstate.vcsr; /* Copy all the vector registers. */ pos = 0; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vstate->datap, 0, riscv_v_vsize); return ret; } #endif static const struct user_regset riscv_user_regset[] = { [REGSET_X] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t), .regset_get = riscv_gpr_get, .set = riscv_gpr_set, }, #ifdef CONFIG_FPU [REGSET_F] = { .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), .regset_get = riscv_fpr_get, .set = riscv_fpr_set, }, #endif #ifdef CONFIG_RISCV_ISA_V [REGSET_V] = { .core_note_type = NT_RISCV_VECTOR, .align = 16, .n = ((32 * RISCV_MAX_VLENB) + sizeof(struct __riscv_v_regset_state)) / sizeof(__u32), .size = sizeof(__u32), .regset_get = riscv_vr_get, .set = riscv_vr_set, }, #endif }; static const struct user_regset_view riscv_user_native_view = { .name = "riscv", .e_machine = EM_RISCV, .regsets = riscv_user_regset, .n = ARRAY_SIZE(riscv_user_regset), }; struct pt_regs_offset { const char *name; int offset; }; #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} #define REG_OFFSET_END {.name = NULL, .offset = 0} static const struct pt_regs_offset regoffset_table[] = { REG_OFFSET_NAME(epc), REG_OFFSET_NAME(ra), REG_OFFSET_NAME(sp), REG_OFFSET_NAME(gp), REG_OFFSET_NAME(tp), REG_OFFSET_NAME(t0), REG_OFFSET_NAME(t1), REG_OFFSET_NAME(t2), REG_OFFSET_NAME(s0), REG_OFFSET_NAME(s1), REG_OFFSET_NAME(a0), REG_OFFSET_NAME(a1), REG_OFFSET_NAME(a2), REG_OFFSET_NAME(a3), REG_OFFSET_NAME(a4), REG_OFFSET_NAME(a5), REG_OFFSET_NAME(a6), REG_OFFSET_NAME(a7), REG_OFFSET_NAME(s2), REG_OFFSET_NAME(s3), REG_OFFSET_NAME(s4), REG_OFFSET_NAME(s5), REG_OFFSET_NAME(s6), REG_OFFSET_NAME(s7), REG_OFFSET_NAME(s8), REG_OFFSET_NAME(s9), REG_OFFSET_NAME(s10), REG_OFFSET_NAME(s11), REG_OFFSET_NAME(t3), REG_OFFSET_NAME(t4), REG_OFFSET_NAME(t5), REG_OFFSET_NAME(t6), REG_OFFSET_NAME(status), REG_OFFSET_NAME(badaddr), REG_OFFSET_NAME(cause), REG_OFFSET_NAME(orig_a0), REG_OFFSET_END, }; /** * regs_query_register_offset() - query register offset from its name * @name: the name of a register * * regs_query_register_offset() returns the offset of a register in struct * pt_regs from its name. If the name is invalid, this returns -EINVAL; */ int regs_query_register_offset(const char *name) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) return roff->offset; return -EINVAL; } /** * regs_within_kernel_stack() - check the address in the stack * @regs: pt_regs which contains kernel stack pointer. * @addr: address which is checked. * * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). * If @addr is within the kernel stack, it returns true. If not, returns false. */ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) { return (addr & ~(THREAD_SIZE - 1)) == (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)); } /** * regs_get_kernel_stack_nth() - get Nth entry of the stack * @regs: pt_regs which contains kernel stack pointer. * @n: stack entry number. * * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which * is specified by @regs. If the @n th entry is NOT in the kernel stack, * this returns 0. */ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) { unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); addr += n; if (regs_within_kernel_stack(regs, (unsigned long)addr)) return *addr; else return 0; } void ptrace_disable(struct task_struct *child) { } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { long ret = -EIO; switch (request) { default: ret = ptrace_request(child, request, addr, data); break; } return ret; } #ifdef CONFIG_COMPAT static int compat_riscv_gpr_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { struct compat_user_regs_struct cregs; regs_to_cregs(&cregs, task_pt_regs(target)); return membuf_write(&to, &cregs, sizeof(struct compat_user_regs_struct)); } static int compat_riscv_gpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct compat_user_regs_struct cregs; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &cregs, 0, -1); cregs_to_regs(&cregs, task_pt_regs(target)); return ret; } static const struct user_regset compat_riscv_user_regset[] = { [REGSET_X] = { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(compat_elf_greg_t), .align = sizeof(compat_elf_greg_t), .regset_get = compat_riscv_gpr_get, .set = compat_riscv_gpr_set, }, #ifdef CONFIG_FPU [REGSET_F] = { .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), .regset_get = riscv_fpr_get, .set = riscv_fpr_set, }, #endif }; static const struct user_regset_view compat_riscv_user_native_view = { .name = "riscv", .e_machine = EM_RISCV, .regsets = compat_riscv_user_regset, .n = ARRAY_SIZE(compat_riscv_user_regset), }; long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { long ret = -EIO; switch (request) { default: ret = compat_ptrace_request(child, request, caddr, cdata); break; } return ret; } #endif /* CONFIG_COMPAT */ const struct user_regset_view *task_user_regset_view(struct task_struct *task) { #ifdef CONFIG_COMPAT if (test_tsk_thread_flag(task, TIF_32BIT)) return &compat_riscv_user_native_view; else #endif return &riscv_user_native_view; }
linux-master
arch/riscv/kernel/ptrace.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Regents of the University of California */ #include <linux/cpu.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/signal.h> #include <linux/signal.h> #include <linux/kdebug.h> #include <linux/uaccess.h> #include <linux/kprobes.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/irq.h> #include <linux/kexec.h> #include <linux/entry-common.h> #include <asm/asm-prototypes.h> #include <asm/bug.h> #include <asm/cfi.h> #include <asm/csr.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/syscall.h> #include <asm/thread_info.h> #include <asm/vector.h> #include <asm/irq_stack.h> int show_unhandled_signals = 1; static DEFINE_SPINLOCK(die_lock); static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs) { char str[sizeof("0000 ") * 12 + 2 + 1], *p = str; const u16 *insns = (u16 *)instruction_pointer(regs); long bad; u16 val; int i; for (i = -10; i < 2; i++) { bad = get_kernel_nofault(val, &insns[i]); if (!bad) { p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val); } else { printk("%sCode: Unable to access instruction at 0x%px.\n", loglvl, &insns[i]); return; } } printk("%sCode: %s\n", loglvl, str); } void die(struct pt_regs *regs, const char *str) { static int die_counter; int ret; long cause; unsigned long flags; oops_enter(); spin_lock_irqsave(&die_lock, flags); console_verbose(); bust_spinlocks(1); pr_emerg("%s [#%d]\n", str, ++die_counter); print_modules(); if (regs) { show_regs(regs); dump_kernel_instr(KERN_EMERG, regs); } cause = regs ? regs->cause : -1; ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV); if (kexec_should_crash(current)) crash_kexec(regs); bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irqrestore(&die_lock, flags); oops_exit(); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); if (ret != NOTIFY_STOP) make_task_dead(SIGSEGV); } void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) { struct task_struct *tsk = current; if (show_unhandled_signals && unhandled_signal(tsk, signo) && printk_ratelimit()) { pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT, tsk->comm, task_pid_nr(tsk), signo, code, addr); print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); pr_cont("\n"); __show_regs(regs); } force_sig_fault(signo, code, (void __user *)addr); } static void do_trap_error(struct pt_regs *regs, int signo, int code, unsigned long addr, const char *str) { current->thread.bad_cause = regs->cause; if (user_mode(regs)) { do_trap(regs, signo, code, addr); } else { if (!fixup_exception(regs)) die(regs, str); } } #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE) #define __trap_section __noinstr_section(".xip.traps") #else #define __trap_section noinstr #endif #define DO_ERROR_INFO(name, signo, code, str) \ asmlinkage __visible __trap_section void name(struct pt_regs *regs) \ { \ if (user_mode(regs)) { \ irqentry_enter_from_user_mode(regs); \ do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ irqentry_exit_to_user_mode(regs); \ } else { \ irqentry_state_t state = irqentry_nmi_enter(regs); \ do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ irqentry_nmi_exit(regs, state); \ } \ } DO_ERROR_INFO(do_trap_unknown, SIGILL, ILL_ILLTRP, "unknown exception"); DO_ERROR_INFO(do_trap_insn_misaligned, SIGBUS, BUS_ADRALN, "instruction address misaligned"); DO_ERROR_INFO(do_trap_insn_fault, SIGSEGV, SEGV_ACCERR, "instruction access fault"); asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs) { bool handled; if (user_mode(regs)) { irqentry_enter_from_user_mode(regs); local_irq_enable(); handled = riscv_v_first_use_handler(regs); local_irq_disable(); if (!handled) do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, "Oops - illegal instruction"); irqentry_exit_to_user_mode(regs); } else { irqentry_state_t state = irqentry_nmi_enter(regs); do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, "Oops - illegal instruction"); irqentry_nmi_exit(regs, state); } } DO_ERROR_INFO(do_trap_load_fault, SIGSEGV, SEGV_ACCERR, "load access fault"); #ifndef CONFIG_RISCV_M_MODE DO_ERROR_INFO(do_trap_load_misaligned, SIGBUS, BUS_ADRALN, "Oops - load address misaligned"); DO_ERROR_INFO(do_trap_store_misaligned, SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned"); #else int handle_misaligned_load(struct pt_regs *regs); int handle_misaligned_store(struct pt_regs *regs); asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) { if (user_mode(regs)) { irqentry_enter_from_user_mode(regs); if (handle_misaligned_load(regs)) do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, "Oops - load address misaligned"); irqentry_exit_to_user_mode(regs); } else { irqentry_state_t state = irqentry_nmi_enter(regs); if (handle_misaligned_load(regs)) do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, "Oops - load address misaligned"); irqentry_nmi_exit(regs, state); } } asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs) { if (user_mode(regs)) { irqentry_enter_from_user_mode(regs); if (handle_misaligned_store(regs)) do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, "Oops - store (or AMO) address misaligned"); irqentry_exit_to_user_mode(regs); } else { irqentry_state_t state = irqentry_nmi_enter(regs); if (handle_misaligned_store(regs)) do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, "Oops - store (or AMO) address misaligned"); irqentry_nmi_exit(regs, state); } } #endif DO_ERROR_INFO(do_trap_store_fault, SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); DO_ERROR_INFO(do_trap_ecall_s, SIGILL, ILL_ILLTRP, "environment call from S-mode"); DO_ERROR_INFO(do_trap_ecall_m, SIGILL, ILL_ILLTRP, "environment call from M-mode"); static inline unsigned long get_break_insn_length(unsigned long pc) { bug_insn_t insn; if (get_kernel_nofault(insn, (bug_insn_t *)pc)) return 0; return GET_INSN_LENGTH(insn); } void handle_break(struct pt_regs *regs) { #ifdef CONFIG_KPROBES if (kprobe_single_step_handler(regs)) return; if (kprobe_breakpoint_handler(regs)) return; #endif #ifdef CONFIG_UPROBES if (uprobe_single_step_handler(regs)) return; if (uprobe_breakpoint_handler(regs)) return; #endif current->thread.bad_cause = regs->cause; if (user_mode(regs)) force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc); #ifdef CONFIG_KGDB else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP) == NOTIFY_STOP) return; #endif else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN || handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) regs->epc += get_break_insn_length(regs->epc); else die(regs, "Kernel BUG"); } asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs) { if (user_mode(regs)) { irqentry_enter_from_user_mode(regs); handle_break(regs); irqentry_exit_to_user_mode(regs); } else { irqentry_state_t state = irqentry_nmi_enter(regs); handle_break(regs); irqentry_nmi_exit(regs, state); } } asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs) { if (user_mode(regs)) { long syscall = regs->a7; regs->epc += 4; regs->orig_a0 = regs->a0; riscv_v_vstate_discard(regs); syscall = syscall_enter_from_user_mode(regs, syscall); if (syscall >= 0 && syscall < NR_syscalls) syscall_handler(regs, syscall); else if (syscall != -1) regs->a0 = -ENOSYS; syscall_exit_to_user_mode(regs); } else { irqentry_state_t state = irqentry_nmi_enter(regs); do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc, "Oops - environment call from U-mode"); irqentry_nmi_exit(regs, state); } } #ifdef CONFIG_MMU asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs) { irqentry_state_t state = irqentry_enter(regs); handle_page_fault(regs); local_irq_disable(); irqentry_exit(regs, state); } #endif static void noinstr handle_riscv_irq(struct pt_regs *regs) { struct pt_regs *old_regs; irq_enter_rcu(); old_regs = set_irq_regs(regs); handle_arch_irq(regs); set_irq_regs(old_regs); irq_exit_rcu(); } asmlinkage void noinstr do_irq(struct pt_regs *regs) { irqentry_state_t state = irqentry_enter(regs); #ifdef CONFIG_IRQ_STACKS if (on_thread_stack()) { ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id()) + IRQ_STACK_SIZE/sizeof(ulong); __asm__ __volatile( "addi sp, sp, -"RISCV_SZPTR "\n" REG_S" ra, (sp) \n" "addi sp, sp, -"RISCV_SZPTR "\n" REG_S" s0, (sp) \n" "addi s0, sp, 2*"RISCV_SZPTR "\n" "move sp, %[sp] \n" "move a0, %[regs] \n" "call handle_riscv_irq \n" "addi sp, s0, -2*"RISCV_SZPTR"\n" REG_L" s0, (sp) \n" "addi sp, sp, "RISCV_SZPTR "\n" REG_L" ra, (sp) \n" "addi sp, sp, "RISCV_SZPTR "\n" : : [sp] "r" (sp), [regs] "r" (regs) : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", #ifndef CONFIG_FRAME_POINTER "s0", #endif "memory"); } else #endif handle_riscv_irq(regs); irqentry_exit(regs, state); } #ifdef CONFIG_GENERIC_BUG int is_valid_bugaddr(unsigned long pc) { bug_insn_t insn; if (pc < VMALLOC_START) return 0; if (get_kernel_nofault(insn, (bug_insn_t *)pc)) return 0; if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) return (insn == __BUG_INSN_32); else return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16); } #endif /* CONFIG_GENERIC_BUG */ #ifdef CONFIG_VMAP_STACK /* * Extra stack space that allows us to provide panic messages when the kernel * has overflowed its stack. */ static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)__aligned(16); /* * A temporary stack for use by handle_kernel_stack_overflow. This is used so * we can call into C code to get the per-hart overflow stack. Usage of this * stack must be protected by spin_shadow_stack. */ long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16); /* * A pseudo spinlock to protect the shadow stack from being used by multiple * harts concurrently. This isn't a real spinlock because the lock side must * be taken without a valid stack and only a single register, it's only taken * while in the process of panicing anyway so the performance and error * checking a proper spinlock gives us doesn't matter. */ unsigned long spin_shadow_stack; asmlinkage unsigned long get_overflow_stack(void) { return (unsigned long)this_cpu_ptr(overflow_stack) + OVERFLOW_STACK_SIZE; } asmlinkage void handle_bad_stack(struct pt_regs *regs) { unsigned long tsk_stk = (unsigned long)current->stack; unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); /* * We're done with the shadow stack by this point, as we're on the * overflow stack. Tell any other concurrent overflowing harts that * they can proceed with panicing by releasing the pseudo-spinlock. * * This pairs with an amoswap.aq in handle_kernel_stack_overflow. */ smp_store_release(&spin_shadow_stack, 0); console_verbose(); pr_emerg("Insufficient stack space to handle exception!\n"); pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", tsk_stk, tsk_stk + THREAD_SIZE); pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); __show_regs(regs); panic("Kernel stack overflow"); for (;;) wait_for_interrupt(); } #endif
linux-master
arch/riscv/kernel/traps.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2017 SiFive */ #define GENERATING_ASM_OFFSETS #include <linux/kbuild.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/suspend.h> #include <asm/kvm_host.h> #include <asm/thread_info.h> #include <asm/ptrace.h> #include <asm/cpu_ops_sbi.h> #include <asm/suspend.h> void asm_offsets(void); void asm_offsets(void) { OFFSET(TASK_THREAD_RA, task_struct, thread.ra); OFFSET(TASK_THREAD_SP, task_struct, thread.sp); OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]); OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]); OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]); OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]); OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]); OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]); OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]); OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]); OFFSET(TASK_THREAD_S8, task_struct, thread.s[8]); OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]); OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]); OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]); OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]); OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]); OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]); OFFSET(TASK_THREAD_F3, task_struct, thread.fstate.f[3]); OFFSET(TASK_THREAD_F4, task_struct, thread.fstate.f[4]); OFFSET(TASK_THREAD_F5, task_struct, thread.fstate.f[5]); OFFSET(TASK_THREAD_F6, task_struct, thread.fstate.f[6]); OFFSET(TASK_THREAD_F7, task_struct, thread.fstate.f[7]); OFFSET(TASK_THREAD_F8, task_struct, thread.fstate.f[8]); OFFSET(TASK_THREAD_F9, task_struct, thread.fstate.f[9]); OFFSET(TASK_THREAD_F10, task_struct, thread.fstate.f[10]); OFFSET(TASK_THREAD_F11, task_struct, thread.fstate.f[11]); OFFSET(TASK_THREAD_F12, task_struct, thread.fstate.f[12]); OFFSET(TASK_THREAD_F13, task_struct, thread.fstate.f[13]); OFFSET(TASK_THREAD_F14, task_struct, thread.fstate.f[14]); OFFSET(TASK_THREAD_F15, task_struct, thread.fstate.f[15]); OFFSET(TASK_THREAD_F16, task_struct, thread.fstate.f[16]); OFFSET(TASK_THREAD_F17, task_struct, thread.fstate.f[17]); OFFSET(TASK_THREAD_F18, task_struct, thread.fstate.f[18]); OFFSET(TASK_THREAD_F19, task_struct, thread.fstate.f[19]); OFFSET(TASK_THREAD_F20, task_struct, thread.fstate.f[20]); OFFSET(TASK_THREAD_F21, task_struct, thread.fstate.f[21]); OFFSET(TASK_THREAD_F22, task_struct, thread.fstate.f[22]); OFFSET(TASK_THREAD_F23, task_struct, thread.fstate.f[23]); OFFSET(TASK_THREAD_F24, task_struct, thread.fstate.f[24]); OFFSET(TASK_THREAD_F25, task_struct, thread.fstate.f[25]); OFFSET(TASK_THREAD_F26, task_struct, thread.fstate.f[26]); OFFSET(TASK_THREAD_F27, task_struct, thread.fstate.f[27]); OFFSET(TASK_THREAD_F28, task_struct, thread.fstate.f[28]); OFFSET(TASK_THREAD_F29, task_struct, thread.fstate.f[29]); OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]); OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]); OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr); #ifdef CONFIG_STACKPROTECTOR OFFSET(TSK_STACK_CANARY, task_struct, stack_canary); #endif DEFINE(PT_SIZE, sizeof(struct pt_regs)); OFFSET(PT_EPC, pt_regs, epc); OFFSET(PT_RA, pt_regs, ra); OFFSET(PT_FP, pt_regs, s0); OFFSET(PT_S0, pt_regs, s0); OFFSET(PT_S1, pt_regs, s1); OFFSET(PT_S2, pt_regs, s2); OFFSET(PT_S3, pt_regs, s3); OFFSET(PT_S4, pt_regs, s4); OFFSET(PT_S5, pt_regs, s5); OFFSET(PT_S6, pt_regs, s6); OFFSET(PT_S7, pt_regs, s7); OFFSET(PT_S8, pt_regs, s8); OFFSET(PT_S9, pt_regs, s9); OFFSET(PT_S10, pt_regs, s10); OFFSET(PT_S11, pt_regs, s11); OFFSET(PT_SP, pt_regs, sp); OFFSET(PT_TP, pt_regs, tp); OFFSET(PT_A0, pt_regs, a0); OFFSET(PT_A1, pt_regs, a1); OFFSET(PT_A2, pt_regs, a2); OFFSET(PT_A3, pt_regs, a3); OFFSET(PT_A4, pt_regs, a4); OFFSET(PT_A5, pt_regs, a5); OFFSET(PT_A6, pt_regs, a6); OFFSET(PT_A7, pt_regs, a7); OFFSET(PT_T0, pt_regs, t0); OFFSET(PT_T1, pt_regs, t1); OFFSET(PT_T2, pt_regs, t2); OFFSET(PT_T3, pt_regs, t3); OFFSET(PT_T4, pt_regs, t4); OFFSET(PT_T5, pt_regs, t5); OFFSET(PT_T6, pt_regs, t6); OFFSET(PT_GP, pt_regs, gp); OFFSET(PT_ORIG_A0, pt_regs, orig_a0); OFFSET(PT_STATUS, pt_regs, status); OFFSET(PT_BADADDR, pt_regs, badaddr); OFFSET(PT_CAUSE, pt_regs, cause); OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs); OFFSET(HIBERN_PBE_ADDR, pbe, address); OFFSET(HIBERN_PBE_ORIG, pbe, orig_address); OFFSET(HIBERN_PBE_NEXT, pbe, next); OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero); OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra); OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp); OFFSET(KVM_ARCH_GUEST_GP, kvm_vcpu_arch, guest_context.gp); OFFSET(KVM_ARCH_GUEST_TP, kvm_vcpu_arch, guest_context.tp); OFFSET(KVM_ARCH_GUEST_T0, kvm_vcpu_arch, guest_context.t0); OFFSET(KVM_ARCH_GUEST_T1, kvm_vcpu_arch, guest_context.t1); OFFSET(KVM_ARCH_GUEST_T2, kvm_vcpu_arch, guest_context.t2); OFFSET(KVM_ARCH_GUEST_S0, kvm_vcpu_arch, guest_context.s0); OFFSET(KVM_ARCH_GUEST_S1, kvm_vcpu_arch, guest_context.s1); OFFSET(KVM_ARCH_GUEST_A0, kvm_vcpu_arch, guest_context.a0); OFFSET(KVM_ARCH_GUEST_A1, kvm_vcpu_arch, guest_context.a1); OFFSET(KVM_ARCH_GUEST_A2, kvm_vcpu_arch, guest_context.a2); OFFSET(KVM_ARCH_GUEST_A3, kvm_vcpu_arch, guest_context.a3); OFFSET(KVM_ARCH_GUEST_A4, kvm_vcpu_arch, guest_context.a4); OFFSET(KVM_ARCH_GUEST_A5, kvm_vcpu_arch, guest_context.a5); OFFSET(KVM_ARCH_GUEST_A6, kvm_vcpu_arch, guest_context.a6); OFFSET(KVM_ARCH_GUEST_A7, kvm_vcpu_arch, guest_context.a7); OFFSET(KVM_ARCH_GUEST_S2, kvm_vcpu_arch, guest_context.s2); OFFSET(KVM_ARCH_GUEST_S3, kvm_vcpu_arch, guest_context.s3); OFFSET(KVM_ARCH_GUEST_S4, kvm_vcpu_arch, guest_context.s4); OFFSET(KVM_ARCH_GUEST_S5, kvm_vcpu_arch, guest_context.s5); OFFSET(KVM_ARCH_GUEST_S6, kvm_vcpu_arch, guest_context.s6); OFFSET(KVM_ARCH_GUEST_S7, kvm_vcpu_arch, guest_context.s7); OFFSET(KVM_ARCH_GUEST_S8, kvm_vcpu_arch, guest_context.s8); OFFSET(KVM_ARCH_GUEST_S9, kvm_vcpu_arch, guest_context.s9); OFFSET(KVM_ARCH_GUEST_S10, kvm_vcpu_arch, guest_context.s10); OFFSET(KVM_ARCH_GUEST_S11, kvm_vcpu_arch, guest_context.s11); OFFSET(KVM_ARCH_GUEST_T3, kvm_vcpu_arch, guest_context.t3); OFFSET(KVM_ARCH_GUEST_T4, kvm_vcpu_arch, guest_context.t4); OFFSET(KVM_ARCH_GUEST_T5, kvm_vcpu_arch, guest_context.t5); OFFSET(KVM_ARCH_GUEST_T6, kvm_vcpu_arch, guest_context.t6); OFFSET(KVM_ARCH_GUEST_SEPC, kvm_vcpu_arch, guest_context.sepc); OFFSET(KVM_ARCH_GUEST_SSTATUS, kvm_vcpu_arch, guest_context.sstatus); OFFSET(KVM_ARCH_GUEST_HSTATUS, kvm_vcpu_arch, guest_context.hstatus); OFFSET(KVM_ARCH_GUEST_SCOUNTEREN, kvm_vcpu_arch, guest_csr.scounteren); OFFSET(KVM_ARCH_HOST_ZERO, kvm_vcpu_arch, host_context.zero); OFFSET(KVM_ARCH_HOST_RA, kvm_vcpu_arch, host_context.ra); OFFSET(KVM_ARCH_HOST_SP, kvm_vcpu_arch, host_context.sp); OFFSET(KVM_ARCH_HOST_GP, kvm_vcpu_arch, host_context.gp); OFFSET(KVM_ARCH_HOST_TP, kvm_vcpu_arch, host_context.tp); OFFSET(KVM_ARCH_HOST_T0, kvm_vcpu_arch, host_context.t0); OFFSET(KVM_ARCH_HOST_T1, kvm_vcpu_arch, host_context.t1); OFFSET(KVM_ARCH_HOST_T2, kvm_vcpu_arch, host_context.t2); OFFSET(KVM_ARCH_HOST_S0, kvm_vcpu_arch, host_context.s0); OFFSET(KVM_ARCH_HOST_S1, kvm_vcpu_arch, host_context.s1); OFFSET(KVM_ARCH_HOST_A0, kvm_vcpu_arch, host_context.a0); OFFSET(KVM_ARCH_HOST_A1, kvm_vcpu_arch, host_context.a1); OFFSET(KVM_ARCH_HOST_A2, kvm_vcpu_arch, host_context.a2); OFFSET(KVM_ARCH_HOST_A3, kvm_vcpu_arch, host_context.a3); OFFSET(KVM_ARCH_HOST_A4, kvm_vcpu_arch, host_context.a4); OFFSET(KVM_ARCH_HOST_A5, kvm_vcpu_arch, host_context.a5); OFFSET(KVM_ARCH_HOST_A6, kvm_vcpu_arch, host_context.a6); OFFSET(KVM_ARCH_HOST_A7, kvm_vcpu_arch, host_context.a7); OFFSET(KVM_ARCH_HOST_S2, kvm_vcpu_arch, host_context.s2); OFFSET(KVM_ARCH_HOST_S3, kvm_vcpu_arch, host_context.s3); OFFSET(KVM_ARCH_HOST_S4, kvm_vcpu_arch, host_context.s4); OFFSET(KVM_ARCH_HOST_S5, kvm_vcpu_arch, host_context.s5); OFFSET(KVM_ARCH_HOST_S6, kvm_vcpu_arch, host_context.s6); OFFSET(KVM_ARCH_HOST_S7, kvm_vcpu_arch, host_context.s7); OFFSET(KVM_ARCH_HOST_S8, kvm_vcpu_arch, host_context.s8); OFFSET(KVM_ARCH_HOST_S9, kvm_vcpu_arch, host_context.s9); OFFSET(KVM_ARCH_HOST_S10, kvm_vcpu_arch, host_context.s10); OFFSET(KVM_ARCH_HOST_S11, kvm_vcpu_arch, host_context.s11); OFFSET(KVM_ARCH_HOST_T3, kvm_vcpu_arch, host_context.t3); OFFSET(KVM_ARCH_HOST_T4, kvm_vcpu_arch, host_context.t4); OFFSET(KVM_ARCH_HOST_T5, kvm_vcpu_arch, host_context.t5); OFFSET(KVM_ARCH_HOST_T6, kvm_vcpu_arch, host_context.t6); OFFSET(KVM_ARCH_HOST_SEPC, kvm_vcpu_arch, host_context.sepc); OFFSET(KVM_ARCH_HOST_SSTATUS, kvm_vcpu_arch, host_context.sstatus); OFFSET(KVM_ARCH_HOST_HSTATUS, kvm_vcpu_arch, host_context.hstatus); OFFSET(KVM_ARCH_HOST_SSCRATCH, kvm_vcpu_arch, host_sscratch); OFFSET(KVM_ARCH_HOST_STVEC, kvm_vcpu_arch, host_stvec); OFFSET(KVM_ARCH_HOST_SCOUNTEREN, kvm_vcpu_arch, host_scounteren); OFFSET(KVM_ARCH_TRAP_SEPC, kvm_cpu_trap, sepc); OFFSET(KVM_ARCH_TRAP_SCAUSE, kvm_cpu_trap, scause); OFFSET(KVM_ARCH_TRAP_STVAL, kvm_cpu_trap, stval); OFFSET(KVM_ARCH_TRAP_HTVAL, kvm_cpu_trap, htval); OFFSET(KVM_ARCH_TRAP_HTINST, kvm_cpu_trap, htinst); /* F extension */ OFFSET(KVM_ARCH_FP_F_F0, kvm_cpu_context, fp.f.f[0]); OFFSET(KVM_ARCH_FP_F_F1, kvm_cpu_context, fp.f.f[1]); OFFSET(KVM_ARCH_FP_F_F2, kvm_cpu_context, fp.f.f[2]); OFFSET(KVM_ARCH_FP_F_F3, kvm_cpu_context, fp.f.f[3]); OFFSET(KVM_ARCH_FP_F_F4, kvm_cpu_context, fp.f.f[4]); OFFSET(KVM_ARCH_FP_F_F5, kvm_cpu_context, fp.f.f[5]); OFFSET(KVM_ARCH_FP_F_F6, kvm_cpu_context, fp.f.f[6]); OFFSET(KVM_ARCH_FP_F_F7, kvm_cpu_context, fp.f.f[7]); OFFSET(KVM_ARCH_FP_F_F8, kvm_cpu_context, fp.f.f[8]); OFFSET(KVM_ARCH_FP_F_F9, kvm_cpu_context, fp.f.f[9]); OFFSET(KVM_ARCH_FP_F_F10, kvm_cpu_context, fp.f.f[10]); OFFSET(KVM_ARCH_FP_F_F11, kvm_cpu_context, fp.f.f[11]); OFFSET(KVM_ARCH_FP_F_F12, kvm_cpu_context, fp.f.f[12]); OFFSET(KVM_ARCH_FP_F_F13, kvm_cpu_context, fp.f.f[13]); OFFSET(KVM_ARCH_FP_F_F14, kvm_cpu_context, fp.f.f[14]); OFFSET(KVM_ARCH_FP_F_F15, kvm_cpu_context, fp.f.f[15]); OFFSET(KVM_ARCH_FP_F_F16, kvm_cpu_context, fp.f.f[16]); OFFSET(KVM_ARCH_FP_F_F17, kvm_cpu_context, fp.f.f[17]); OFFSET(KVM_ARCH_FP_F_F18, kvm_cpu_context, fp.f.f[18]); OFFSET(KVM_ARCH_FP_F_F19, kvm_cpu_context, fp.f.f[19]); OFFSET(KVM_ARCH_FP_F_F20, kvm_cpu_context, fp.f.f[20]); OFFSET(KVM_ARCH_FP_F_F21, kvm_cpu_context, fp.f.f[21]); OFFSET(KVM_ARCH_FP_F_F22, kvm_cpu_context, fp.f.f[22]); OFFSET(KVM_ARCH_FP_F_F23, kvm_cpu_context, fp.f.f[23]); OFFSET(KVM_ARCH_FP_F_F24, kvm_cpu_context, fp.f.f[24]); OFFSET(KVM_ARCH_FP_F_F25, kvm_cpu_context, fp.f.f[25]); OFFSET(KVM_ARCH_FP_F_F26, kvm_cpu_context, fp.f.f[26]); OFFSET(KVM_ARCH_FP_F_F27, kvm_cpu_context, fp.f.f[27]); OFFSET(KVM_ARCH_FP_F_F28, kvm_cpu_context, fp.f.f[28]); OFFSET(KVM_ARCH_FP_F_F29, kvm_cpu_context, fp.f.f[29]); OFFSET(KVM_ARCH_FP_F_F30, kvm_cpu_context, fp.f.f[30]); OFFSET(KVM_ARCH_FP_F_F31, kvm_cpu_context, fp.f.f[31]); OFFSET(KVM_ARCH_FP_F_FCSR, kvm_cpu_context, fp.f.fcsr); /* D extension */ OFFSET(KVM_ARCH_FP_D_F0, kvm_cpu_context, fp.d.f[0]); OFFSET(KVM_ARCH_FP_D_F1, kvm_cpu_context, fp.d.f[1]); OFFSET(KVM_ARCH_FP_D_F2, kvm_cpu_context, fp.d.f[2]); OFFSET(KVM_ARCH_FP_D_F3, kvm_cpu_context, fp.d.f[3]); OFFSET(KVM_ARCH_FP_D_F4, kvm_cpu_context, fp.d.f[4]); OFFSET(KVM_ARCH_FP_D_F5, kvm_cpu_context, fp.d.f[5]); OFFSET(KVM_ARCH_FP_D_F6, kvm_cpu_context, fp.d.f[6]); OFFSET(KVM_ARCH_FP_D_F7, kvm_cpu_context, fp.d.f[7]); OFFSET(KVM_ARCH_FP_D_F8, kvm_cpu_context, fp.d.f[8]); OFFSET(KVM_ARCH_FP_D_F9, kvm_cpu_context, fp.d.f[9]); OFFSET(KVM_ARCH_FP_D_F10, kvm_cpu_context, fp.d.f[10]); OFFSET(KVM_ARCH_FP_D_F11, kvm_cpu_context, fp.d.f[11]); OFFSET(KVM_ARCH_FP_D_F12, kvm_cpu_context, fp.d.f[12]); OFFSET(KVM_ARCH_FP_D_F13, kvm_cpu_context, fp.d.f[13]); OFFSET(KVM_ARCH_FP_D_F14, kvm_cpu_context, fp.d.f[14]); OFFSET(KVM_ARCH_FP_D_F15, kvm_cpu_context, fp.d.f[15]); OFFSET(KVM_ARCH_FP_D_F16, kvm_cpu_context, fp.d.f[16]); OFFSET(KVM_ARCH_FP_D_F17, kvm_cpu_context, fp.d.f[17]); OFFSET(KVM_ARCH_FP_D_F18, kvm_cpu_context, fp.d.f[18]); OFFSET(KVM_ARCH_FP_D_F19, kvm_cpu_context, fp.d.f[19]); OFFSET(KVM_ARCH_FP_D_F20, kvm_cpu_context, fp.d.f[20]); OFFSET(KVM_ARCH_FP_D_F21, kvm_cpu_context, fp.d.f[21]); OFFSET(KVM_ARCH_FP_D_F22, kvm_cpu_context, fp.d.f[22]); OFFSET(KVM_ARCH_FP_D_F23, kvm_cpu_context, fp.d.f[23]); OFFSET(KVM_ARCH_FP_D_F24, kvm_cpu_context, fp.d.f[24]); OFFSET(KVM_ARCH_FP_D_F25, kvm_cpu_context, fp.d.f[25]); OFFSET(KVM_ARCH_FP_D_F26, kvm_cpu_context, fp.d.f[26]); OFFSET(KVM_ARCH_FP_D_F27, kvm_cpu_context, fp.d.f[27]); OFFSET(KVM_ARCH_FP_D_F28, kvm_cpu_context, fp.d.f[28]); OFFSET(KVM_ARCH_FP_D_F29, kvm_cpu_context, fp.d.f[29]); OFFSET(KVM_ARCH_FP_D_F30, kvm_cpu_context, fp.d.f[30]); OFFSET(KVM_ARCH_FP_D_F31, kvm_cpu_context, fp.d.f[31]); OFFSET(KVM_ARCH_FP_D_FCSR, kvm_cpu_context, fp.d.fcsr); /* * THREAD_{F,X}* might be larger than a S-type offset can handle, but * these are used in performance-sensitive assembly so we can't resort * to loading the long immediate every time. */ DEFINE(TASK_THREAD_RA_RA, offsetof(struct task_struct, thread.ra) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_SP_RA, offsetof(struct task_struct, thread.sp) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_S0_RA, offsetof(struct task_struct, thread.s[0]) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_S1_RA, offsetof(struct task_struct, thread.s[1]) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_S2_RA, offsetof(struct task_struct, thread.s[2]) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_S3_RA, offsetof(struct task_struct, thread.s[3]) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_S4_RA, offsetof(struct task_struct, thread.s[4]) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_S5_RA, offsetof(struct task_struct, thread.s[5]) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_S6_RA, offsetof(struct task_struct, thread.s[6]) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_S7_RA, offsetof(struct task_struct, thread.s[7]) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_S8_RA, offsetof(struct task_struct, thread.s[8]) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_S9_RA, offsetof(struct task_struct, thread.s[9]) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_S10_RA, offsetof(struct task_struct, thread.s[10]) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_S11_RA, offsetof(struct task_struct, thread.s[11]) - offsetof(struct task_struct, thread.ra) ); DEFINE(TASK_THREAD_F0_F0, offsetof(struct task_struct, thread.fstate.f[0]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F1_F0, offsetof(struct task_struct, thread.fstate.f[1]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F2_F0, offsetof(struct task_struct, thread.fstate.f[2]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F3_F0, offsetof(struct task_struct, thread.fstate.f[3]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F4_F0, offsetof(struct task_struct, thread.fstate.f[4]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F5_F0, offsetof(struct task_struct, thread.fstate.f[5]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F6_F0, offsetof(struct task_struct, thread.fstate.f[6]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F7_F0, offsetof(struct task_struct, thread.fstate.f[7]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F8_F0, offsetof(struct task_struct, thread.fstate.f[8]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F9_F0, offsetof(struct task_struct, thread.fstate.f[9]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F10_F0, offsetof(struct task_struct, thread.fstate.f[10]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F11_F0, offsetof(struct task_struct, thread.fstate.f[11]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F12_F0, offsetof(struct task_struct, thread.fstate.f[12]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F13_F0, offsetof(struct task_struct, thread.fstate.f[13]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F14_F0, offsetof(struct task_struct, thread.fstate.f[14]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F15_F0, offsetof(struct task_struct, thread.fstate.f[15]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F16_F0, offsetof(struct task_struct, thread.fstate.f[16]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F17_F0, offsetof(struct task_struct, thread.fstate.f[17]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F18_F0, offsetof(struct task_struct, thread.fstate.f[18]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F19_F0, offsetof(struct task_struct, thread.fstate.f[19]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F20_F0, offsetof(struct task_struct, thread.fstate.f[20]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F21_F0, offsetof(struct task_struct, thread.fstate.f[21]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F22_F0, offsetof(struct task_struct, thread.fstate.f[22]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F23_F0, offsetof(struct task_struct, thread.fstate.f[23]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F24_F0, offsetof(struct task_struct, thread.fstate.f[24]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F25_F0, offsetof(struct task_struct, thread.fstate.f[25]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F26_F0, offsetof(struct task_struct, thread.fstate.f[26]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F27_F0, offsetof(struct task_struct, thread.fstate.f[27]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F28_F0, offsetof(struct task_struct, thread.fstate.f[28]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F29_F0, offsetof(struct task_struct, thread.fstate.f[29]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F30_F0, offsetof(struct task_struct, thread.fstate.f[30]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_F31_F0, offsetof(struct task_struct, thread.fstate.f[31]) - offsetof(struct task_struct, thread.fstate.f[0]) ); DEFINE(TASK_THREAD_FCSR_F0, offsetof(struct task_struct, thread.fstate.fcsr) - offsetof(struct task_struct, thread.fstate.f[0]) ); /* * We allocate a pt_regs on the stack when entering the kernel. This * ensures the alignment is sane. */ DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN)); OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr); OFFSET(SBI_HART_BOOT_TASK_PTR_OFFSET, sbi_hart_boot_data, task_ptr); OFFSET(SBI_HART_BOOT_STACK_PTR_OFFSET, sbi_hart_boot_data, stack_ptr); }
linux-master
arch/riscv/kernel/asm-offsets.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021 Western Digital Corporation or its affiliates. * Copyright (c) 2022 Ventana Micro Systems Inc. */ #include <linux/ftrace.h> #include <asm/csr.h> #include <asm/suspend.h> void suspend_save_csrs(struct suspend_context *context) { context->scratch = csr_read(CSR_SCRATCH); context->tvec = csr_read(CSR_TVEC); context->ie = csr_read(CSR_IE); /* * No need to save/restore IP CSR (i.e. MIP or SIP) because: * * 1. For no-MMU (M-mode) kernel, the bits in MIP are set by * external devices (such as interrupt controller, timer, etc). * 2. For MMU (S-mode) kernel, the bits in SIP are set by * M-mode firmware and external devices (such as interrupt * controller, etc). */ #ifdef CONFIG_MMU context->satp = csr_read(CSR_SATP); #endif } void suspend_restore_csrs(struct suspend_context *context) { csr_write(CSR_SCRATCH, context->scratch); csr_write(CSR_TVEC, context->tvec); csr_write(CSR_IE, context->ie); #ifdef CONFIG_MMU csr_write(CSR_SATP, context->satp); #endif } int cpu_suspend(unsigned long arg, int (*finish)(unsigned long arg, unsigned long entry, unsigned long context)) { int rc = 0; struct suspend_context context = { 0 }; /* Finisher should be non-NULL */ if (!finish) return -EINVAL; /* Save additional CSRs*/ suspend_save_csrs(&context); /* * Function graph tracer state gets incosistent when the kernel * calls functions that never return (aka finishers) hence disable * graph tracing during their execution. */ pause_graph_tracing(); /* Save context on stack */ if (__cpu_suspend_enter(&context)) { /* Call the finisher */ rc = finish(arg, __pa_symbol(__cpu_resume_enter), (ulong)&context); /* * Should never reach here, unless the suspend finisher * fails. Successful cpu_suspend() should return from * __cpu_resume_entry() */ if (!rc) rc = -EOPNOTSUPP; } /* Enable function graph tracer */ unpause_graph_tracing(); /* Restore additional CSRs */ suspend_restore_csrs(&context); return rc; }
linux-master
arch/riscv/kernel/suspend.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2023 SiFive * Author: Andy Chiu <[email protected]> */ #include <linux/export.h> #include <linux/sched/signal.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/prctl.h> #include <asm/thread_info.h> #include <asm/processor.h> #include <asm/insn.h> #include <asm/vector.h> #include <asm/csr.h> #include <asm/elf.h> #include <asm/ptrace.h> #include <asm/bug.h> static bool riscv_v_implicit_uacc = IS_ENABLED(CONFIG_RISCV_ISA_V_DEFAULT_ENABLE); unsigned long riscv_v_vsize __read_mostly; EXPORT_SYMBOL_GPL(riscv_v_vsize); int riscv_v_setup_vsize(void) { unsigned long this_vsize; /* There are 32 vector registers with vlenb length. */ riscv_v_enable(); this_vsize = csr_read(CSR_VLENB) * 32; riscv_v_disable(); if (!riscv_v_vsize) { riscv_v_vsize = this_vsize; return 0; } if (riscv_v_vsize != this_vsize) { WARN(1, "RISCV_ISA_V only supports one vlenb on SMP systems"); return -EOPNOTSUPP; } return 0; } static bool insn_is_vector(u32 insn_buf) { u32 opcode = insn_buf & __INSN_OPCODE_MASK; u32 width, csr; /* * All V-related instructions, including CSR operations are 4-Byte. So, * do not handle if the instruction length is not 4-Byte. */ if (unlikely(GET_INSN_LENGTH(insn_buf) != 4)) return false; switch (opcode) { case RVV_OPCODE_VECTOR: return true; case RVV_OPCODE_VL: case RVV_OPCODE_VS: width = RVV_EXRACT_VL_VS_WIDTH(insn_buf); if (width == RVV_VL_VS_WIDTH_8 || width == RVV_VL_VS_WIDTH_16 || width == RVV_VL_VS_WIDTH_32 || width == RVV_VL_VS_WIDTH_64) return true; break; case RVG_OPCODE_SYSTEM: csr = RVG_EXTRACT_SYSTEM_CSR(insn_buf); if ((csr >= CSR_VSTART && csr <= CSR_VCSR) || (csr >= CSR_VL && csr <= CSR_VLENB)) return true; } return false; } static int riscv_v_thread_zalloc(void) { void *datap; datap = kzalloc(riscv_v_vsize, GFP_KERNEL); if (!datap) return -ENOMEM; current->thread.vstate.datap = datap; memset(&current->thread.vstate, 0, offsetof(struct __riscv_v_ext_state, datap)); return 0; } #define VSTATE_CTRL_GET_CUR(x) ((x) & PR_RISCV_V_VSTATE_CTRL_CUR_MASK) #define VSTATE_CTRL_GET_NEXT(x) (((x) & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK) >> 2) #define VSTATE_CTRL_MAKE_NEXT(x) (((x) << 2) & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK) #define VSTATE_CTRL_GET_INHERIT(x) (!!((x) & PR_RISCV_V_VSTATE_CTRL_INHERIT)) static inline int riscv_v_ctrl_get_cur(struct task_struct *tsk) { return VSTATE_CTRL_GET_CUR(tsk->thread.vstate_ctrl); } static inline int riscv_v_ctrl_get_next(struct task_struct *tsk) { return VSTATE_CTRL_GET_NEXT(tsk->thread.vstate_ctrl); } static inline bool riscv_v_ctrl_test_inherit(struct task_struct *tsk) { return VSTATE_CTRL_GET_INHERIT(tsk->thread.vstate_ctrl); } static inline void riscv_v_ctrl_set(struct task_struct *tsk, int cur, int nxt, bool inherit) { unsigned long ctrl; ctrl = cur & PR_RISCV_V_VSTATE_CTRL_CUR_MASK; ctrl |= VSTATE_CTRL_MAKE_NEXT(nxt); if (inherit) ctrl |= PR_RISCV_V_VSTATE_CTRL_INHERIT; tsk->thread.vstate_ctrl = ctrl; } bool riscv_v_vstate_ctrl_user_allowed(void) { return riscv_v_ctrl_get_cur(current) == PR_RISCV_V_VSTATE_CTRL_ON; } EXPORT_SYMBOL_GPL(riscv_v_vstate_ctrl_user_allowed); bool riscv_v_first_use_handler(struct pt_regs *regs) { u32 __user *epc = (u32 __user *)regs->epc; u32 insn = (u32)regs->badaddr; /* Do not handle if V is not supported, or disabled */ if (!(ELF_HWCAP & COMPAT_HWCAP_ISA_V)) return false; /* If V has been enabled then it is not the first-use trap */ if (riscv_v_vstate_query(regs)) return false; /* Get the instruction */ if (!insn) { if (__get_user(insn, epc)) return false; } /* Filter out non-V instructions */ if (!insn_is_vector(insn)) return false; /* Sanity check. datap should be null by the time of the first-use trap */ WARN_ON(current->thread.vstate.datap); /* * Now we sure that this is a V instruction. And it executes in the * context where VS has been off. So, try to allocate the user's V * context and resume execution. */ if (riscv_v_thread_zalloc()) { force_sig(SIGBUS); return true; } riscv_v_vstate_on(regs); riscv_v_vstate_restore(current, regs); return true; } void riscv_v_vstate_ctrl_init(struct task_struct *tsk) { bool inherit; int cur, next; if (!has_vector()) return; next = riscv_v_ctrl_get_next(tsk); if (!next) { if (READ_ONCE(riscv_v_implicit_uacc)) cur = PR_RISCV_V_VSTATE_CTRL_ON; else cur = PR_RISCV_V_VSTATE_CTRL_OFF; } else { cur = next; } /* Clear next mask if inherit-bit is not set */ inherit = riscv_v_ctrl_test_inherit(tsk); if (!inherit) next = PR_RISCV_V_VSTATE_CTRL_DEFAULT; riscv_v_ctrl_set(tsk, cur, next, inherit); } long riscv_v_vstate_ctrl_get_current(void) { if (!has_vector()) return -EINVAL; return current->thread.vstate_ctrl & PR_RISCV_V_VSTATE_CTRL_MASK; } long riscv_v_vstate_ctrl_set_current(unsigned long arg) { bool inherit; int cur, next; if (!has_vector()) return -EINVAL; if (arg & ~PR_RISCV_V_VSTATE_CTRL_MASK) return -EINVAL; cur = VSTATE_CTRL_GET_CUR(arg); switch (cur) { case PR_RISCV_V_VSTATE_CTRL_OFF: /* Do not allow user to turn off V if current is not off */ if (riscv_v_ctrl_get_cur(current) != PR_RISCV_V_VSTATE_CTRL_OFF) return -EPERM; break; case PR_RISCV_V_VSTATE_CTRL_ON: break; case PR_RISCV_V_VSTATE_CTRL_DEFAULT: cur = riscv_v_ctrl_get_cur(current); break; default: return -EINVAL; } next = VSTATE_CTRL_GET_NEXT(arg); inherit = VSTATE_CTRL_GET_INHERIT(arg); switch (next) { case PR_RISCV_V_VSTATE_CTRL_DEFAULT: case PR_RISCV_V_VSTATE_CTRL_OFF: case PR_RISCV_V_VSTATE_CTRL_ON: riscv_v_ctrl_set(current, cur, next, inherit); return 0; } return -EINVAL; } #ifdef CONFIG_SYSCTL static struct ctl_table riscv_v_default_vstate_table[] = { { .procname = "riscv_v_default_allow", .data = &riscv_v_implicit_uacc, .maxlen = sizeof(riscv_v_implicit_uacc), .mode = 0644, .proc_handler = proc_dobool, }, { } }; static int __init riscv_v_sysctl_init(void) { if (has_vector()) if (!register_sysctl("abi", riscv_v_default_vstate_table)) return -EINVAL; return 0; } #else /* ! CONFIG_SYSCTL */ static int __init riscv_v_sysctl_init(void) { return 0; } #endif /* ! CONFIG_SYSCTL */ static int riscv_v_init(void) { return riscv_v_sysctl_init(); } core_initcall(riscv_v_init);
linux-master
arch/riscv/kernel/vector.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) 2017 Zihao Yu */ #include <linux/elf.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/moduleloader.h> #include <linux/vmalloc.h> #include <linux/sizes.h> #include <linux/pgtable.h> #include <asm/alternative.h> #include <asm/sections.h> /* * The auipc+jalr instruction pair can reach any PC-relative offset * in the range [-2^31 - 2^11, 2^31 - 2^11) */ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val) { #ifdef CONFIG_32BIT return true; #else return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11)); #endif } static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) { if (v != (u32)v) { pr_err("%s: value %016llx out of range for 32-bit field\n", me->name, (long long)v); return -EINVAL; } *location = v; return 0; } static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v) { *(u64 *)location = v; return 0; } static int apply_r_riscv_branch_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; u32 imm12 = (offset & 0x1000) << (31 - 12); u32 imm11 = (offset & 0x800) >> (11 - 7); u32 imm10_5 = (offset & 0x7e0) << (30 - 10); u32 imm4_1 = (offset & 0x1e) << (11 - 4); *location = (*location & 0x1fff07f) | imm12 | imm11 | imm10_5 | imm4_1; return 0; } static int apply_r_riscv_jal_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; u32 imm20 = (offset & 0x100000) << (31 - 20); u32 imm19_12 = (offset & 0xff000); u32 imm11 = (offset & 0x800) << (20 - 11); u32 imm10_1 = (offset & 0x7fe) << (30 - 10); *location = (*location & 0xfff) | imm20 | imm19_12 | imm11 | imm10_1; return 0; } static int apply_r_riscv_rvc_branch_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; u16 imm8 = (offset & 0x100) << (12 - 8); u16 imm7_6 = (offset & 0xc0) >> (6 - 5); u16 imm5 = (offset & 0x20) >> (5 - 2); u16 imm4_3 = (offset & 0x18) << (12 - 5); u16 imm2_1 = (offset & 0x6) << (12 - 10); *(u16 *)location = (*(u16 *)location & 0xe383) | imm8 | imm7_6 | imm5 | imm4_3 | imm2_1; return 0; } static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; u16 imm11 = (offset & 0x800) << (12 - 11); u16 imm10 = (offset & 0x400) >> (10 - 8); u16 imm9_8 = (offset & 0x300) << (12 - 11); u16 imm7 = (offset & 0x80) >> (7 - 6); u16 imm6 = (offset & 0x40) << (12 - 11); u16 imm5 = (offset & 0x20) >> (5 - 2); u16 imm4 = (offset & 0x10) << (12 - 5); u16 imm3_1 = (offset & 0xe) << (12 - 10); *(u16 *)location = (*(u16 *)location & 0xe003) | imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1; return 0; } static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; s32 hi20; if (!riscv_insn_valid_32bit_offset(offset)) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", me->name, (long long)v, location); return -EINVAL; } hi20 = (offset + 0x800) & 0xfffff000; *location = (*location & 0xfff) | hi20; return 0; } static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, u32 *location, Elf_Addr v) { /* * v is the lo12 value to fill. It is calculated before calling this * handler. */ *location = (*location & 0xfffff) | ((v & 0xfff) << 20); return 0; } static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location, Elf_Addr v) { /* * v is the lo12 value to fill. It is calculated before calling this * handler. */ u32 imm11_5 = (v & 0xfe0) << (31 - 11); u32 imm4_0 = (v & 0x1f) << (11 - 4); *location = (*location & 0x1fff07f) | imm11_5 | imm4_0; return 0; } static int apply_r_riscv_hi20_rela(struct module *me, u32 *location, Elf_Addr v) { s32 hi20; if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", me->name, (long long)v, location); return -EINVAL; } hi20 = ((s32)v + 0x800) & 0xfffff000; *location = (*location & 0xfff) | hi20; return 0; } static int apply_r_riscv_lo12_i_rela(struct module *me, u32 *location, Elf_Addr v) { /* Skip medlow checking because of filtering by HI20 already */ s32 hi20 = ((s32)v + 0x800) & 0xfffff000; s32 lo12 = ((s32)v - hi20); *location = (*location & 0xfffff) | ((lo12 & 0xfff) << 20); return 0; } static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location, Elf_Addr v) { /* Skip medlow checking because of filtering by HI20 already */ s32 hi20 = ((s32)v + 0x800) & 0xfffff000; s32 lo12 = ((s32)v - hi20); u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11); u32 imm4_0 = (lo12 & 0x1f) << (11 - 4); *location = (*location & 0x1fff07f) | imm11_5 | imm4_0; return 0; } static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; s32 hi20; /* Always emit the got entry */ if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) { offset = module_emit_got_entry(me, v); offset = (void *)offset - (void *)location; } else { pr_err( "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n", me->name, (long long)v, location); return -EINVAL; } hi20 = (offset + 0x800) & 0xfffff000; *location = (*location & 0xfff) | hi20; return 0; } static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; u32 hi20, lo12; if (!riscv_insn_valid_32bit_offset(offset)) { /* Only emit the plt entry if offset over 32-bit range */ if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) { offset = module_emit_plt_entry(me, v); offset = (void *)offset - (void *)location; } else { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", me->name, (long long)v, location); return -EINVAL; } } hi20 = (offset + 0x800) & 0xfffff000; lo12 = (offset - hi20) & 0xfff; *location = (*location & 0xfff) | hi20; *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20); return 0; } static int apply_r_riscv_call_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; u32 hi20, lo12; if (!riscv_insn_valid_32bit_offset(offset)) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", me->name, (long long)v, location); return -EINVAL; } hi20 = (offset + 0x800) & 0xfffff000; lo12 = (offset - hi20) & 0xfff; *location = (*location & 0xfff) | hi20; *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20); return 0; } static int apply_r_riscv_relax_rela(struct module *me, u32 *location, Elf_Addr v) { return 0; } static int apply_r_riscv_align_rela(struct module *me, u32 *location, Elf_Addr v) { pr_err( "%s: The unexpected relocation type 'R_RISCV_ALIGN' from PC = %p\n", me->name, location); return -EINVAL; } static int apply_r_riscv_add16_rela(struct module *me, u32 *location, Elf_Addr v) { *(u16 *)location += (u16)v; return 0; } static int apply_r_riscv_add32_rela(struct module *me, u32 *location, Elf_Addr v) { *(u32 *)location += (u32)v; return 0; } static int apply_r_riscv_add64_rela(struct module *me, u32 *location, Elf_Addr v) { *(u64 *)location += (u64)v; return 0; } static int apply_r_riscv_sub16_rela(struct module *me, u32 *location, Elf_Addr v) { *(u16 *)location -= (u16)v; return 0; } static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, Elf_Addr v) { *(u32 *)location -= (u32)v; return 0; } static int apply_r_riscv_sub64_rela(struct module *me, u32 *location, Elf_Addr v) { *(u64 *)location -= (u64)v; return 0; } static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, Elf_Addr v) = { [R_RISCV_32] = apply_r_riscv_32_rela, [R_RISCV_64] = apply_r_riscv_64_rela, [R_RISCV_BRANCH] = apply_r_riscv_branch_rela, [R_RISCV_JAL] = apply_r_riscv_jal_rela, [R_RISCV_RVC_BRANCH] = apply_r_riscv_rvc_branch_rela, [R_RISCV_RVC_JUMP] = apply_r_riscv_rvc_jump_rela, [R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela, [R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela, [R_RISCV_PCREL_LO12_S] = apply_r_riscv_pcrel_lo12_s_rela, [R_RISCV_HI20] = apply_r_riscv_hi20_rela, [R_RISCV_LO12_I] = apply_r_riscv_lo12_i_rela, [R_RISCV_LO12_S] = apply_r_riscv_lo12_s_rela, [R_RISCV_GOT_HI20] = apply_r_riscv_got_hi20_rela, [R_RISCV_CALL_PLT] = apply_r_riscv_call_plt_rela, [R_RISCV_CALL] = apply_r_riscv_call_rela, [R_RISCV_RELAX] = apply_r_riscv_relax_rela, [R_RISCV_ALIGN] = apply_r_riscv_align_rela, [R_RISCV_ADD16] = apply_r_riscv_add16_rela, [R_RISCV_ADD32] = apply_r_riscv_add32_rela, [R_RISCV_ADD64] = apply_r_riscv_add64_rela, [R_RISCV_SUB16] = apply_r_riscv_sub16_rela, [R_RISCV_SUB32] = apply_r_riscv_sub32_rela, [R_RISCV_SUB64] = apply_r_riscv_sub64_rela, }; int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr; int (*handler)(struct module *me, u32 *location, Elf_Addr v); Elf_Sym *sym; u32 *location; unsigned int i, type; Elf_Addr v; int res; pr_debug("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* This is the symbol it is referring to */ sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_RISCV_R_SYM(rel[i].r_info); if (IS_ERR_VALUE(sym->st_value)) { /* Ignore unresolved weak symbol */ if (ELF_ST_BIND(sym->st_info) == STB_WEAK) continue; pr_warn("%s: Unknown symbol %s\n", me->name, strtab + sym->st_name); return -ENOENT; } type = ELF_RISCV_R_TYPE(rel[i].r_info); if (type < ARRAY_SIZE(reloc_handlers_rela)) handler = reloc_handlers_rela[type]; else handler = NULL; if (!handler) { pr_err("%s: Unknown relocation type %u\n", me->name, type); return -EINVAL; } v = sym->st_value + rel[i].r_addend; if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) { unsigned int j; for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) { unsigned long hi20_loc = sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[j].r_offset; u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info); /* Find the corresponding HI20 relocation entry */ if (hi20_loc == sym->st_value && (hi20_type == R_RISCV_PCREL_HI20 || hi20_type == R_RISCV_GOT_HI20)) { s32 hi20, lo12; Elf_Sym *hi20_sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_RISCV_R_SYM(rel[j].r_info); unsigned long hi20_sym_val = hi20_sym->st_value + rel[j].r_addend; /* Calculate lo12 */ size_t offset = hi20_sym_val - hi20_loc; if (IS_ENABLED(CONFIG_MODULE_SECTIONS) && hi20_type == R_RISCV_GOT_HI20) { offset = module_emit_got_entry( me, hi20_sym_val); offset = offset - hi20_loc; } hi20 = (offset + 0x800) & 0xfffff000; lo12 = offset - hi20; v = lo12; break; } } if (j == sechdrs[relsec].sh_size / sizeof(*rel)) { pr_err( "%s: Can not find HI20 relocation information\n", me->name); return -EINVAL; } } res = handler(me, location, v); if (res) return res; } return 0; } #if defined(CONFIG_MMU) && defined(CONFIG_64BIT) void *module_alloc(unsigned long size) { return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); } #endif int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { const Elf_Shdr *s; s = find_section(hdr, sechdrs, ".alternative"); if (s) apply_module_alternatives((void *)s->sh_addr, s->sh_size); return 0; }
linux-master
arch/riscv/kernel/module.c
// SPDX-License-Identifier: GPL-2.0-only #define __SYSCALL_COMPAT #include <linux/compat.h> #include <linux/syscalls.h> #include <asm-generic/mman-common.h> #include <asm-generic/syscalls.h> #include <asm/syscall.h> #undef __SYSCALL #define __SYSCALL(nr, call) asmlinkage long __riscv_##call(const struct pt_regs *); #include <asm/unistd.h> #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = __riscv_##call, asmlinkage long compat_sys_rt_sigreturn(void); void * const compat_sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls - 1] = __riscv_sys_ni_syscall, #include <asm/unistd.h> };
linux-master
arch/riscv/kernel/compat_syscall_table.c
/* SPDX-License-Identifier: GPL-2.0 * * Copyright (C) 2014-2017 Linaro Ltd. <[email protected]> * * Copyright (C) 2018 Andes Technology Corporation <[email protected]> */ #include <linux/elf.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleloader.h> unsigned long module_emit_got_entry(struct module *mod, unsigned long val) { struct mod_section *got_sec = &mod->arch.got; int i = got_sec->num_entries; struct got_entry *got = get_got_entry(val, got_sec); if (got) return (unsigned long)got; /* There is no duplicate entry, create a new one */ got = (struct got_entry *)got_sec->shdr->sh_addr; got[i] = emit_got_entry(val); got_sec->num_entries++; BUG_ON(got_sec->num_entries > got_sec->max_entries); return (unsigned long)&got[i]; } unsigned long module_emit_plt_entry(struct module *mod, unsigned long val) { struct mod_section *got_plt_sec = &mod->arch.got_plt; struct got_entry *got_plt; struct mod_section *plt_sec = &mod->arch.plt; struct plt_entry *plt = get_plt_entry(val, plt_sec, got_plt_sec); int i = plt_sec->num_entries; if (plt) return (unsigned long)plt; /* There is no duplicate entry, create a new one */ got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr; got_plt[i] = emit_got_entry(val); plt = (struct plt_entry *)plt_sec->shdr->sh_addr; plt[i] = emit_plt_entry(val, (unsigned long)&plt[i], (unsigned long)&got_plt[i]); plt_sec->num_entries++; got_plt_sec->num_entries++; BUG_ON(plt_sec->num_entries > plt_sec->max_entries); return (unsigned long)&plt[i]; } static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y) { return x->r_info == y->r_info && x->r_addend == y->r_addend; } static bool duplicate_rela(const Elf_Rela *rela, int idx) { int i; for (i = 0; i < idx; i++) { if (is_rela_equal(&rela[i], &rela[idx])) return true; } return false; } static void count_max_entries(Elf_Rela *relas, int num, unsigned int *plts, unsigned int *gots) { unsigned int type, i; for (i = 0; i < num; i++) { type = ELF_RISCV_R_TYPE(relas[i].r_info); if (type == R_RISCV_CALL_PLT) { if (!duplicate_rela(relas, i)) (*plts)++; } else if (type == R_RISCV_GOT_HI20) { if (!duplicate_rela(relas, i)) (*gots)++; } } } int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { unsigned int num_plts = 0; unsigned int num_gots = 0; int i; /* * Find the empty .got and .plt sections. */ for (i = 0; i < ehdr->e_shnum; i++) { if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt")) mod->arch.plt.shdr = sechdrs + i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".got")) mod->arch.got.shdr = sechdrs + i; else if (!strcmp(secstrings + sechdrs[i].sh_name, ".got.plt")) mod->arch.got_plt.shdr = sechdrs + i; } if (!mod->arch.plt.shdr) { pr_err("%s: module PLT section(s) missing\n", mod->name); return -ENOEXEC; } if (!mod->arch.got.shdr) { pr_err("%s: module GOT section(s) missing\n", mod->name); return -ENOEXEC; } if (!mod->arch.got_plt.shdr) { pr_err("%s: module GOT.PLT section(s) missing\n", mod->name); return -ENOEXEC; } /* Calculate the maxinum number of entries */ for (i = 0; i < ehdr->e_shnum; i++) { Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset; int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela); Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info; if (sechdrs[i].sh_type != SHT_RELA) continue; /* ignore relocations that operate on non-exec sections */ if (!(dst_sec->sh_flags & SHF_EXECINSTR)) continue; count_max_entries(relas, num_rela, &num_plts, &num_gots); } mod->arch.plt.shdr->sh_type = SHT_NOBITS; mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC; mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES; mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry); mod->arch.plt.num_entries = 0; mod->arch.plt.max_entries = num_plts; mod->arch.got.shdr->sh_type = SHT_NOBITS; mod->arch.got.shdr->sh_flags = SHF_ALLOC; mod->arch.got.shdr->sh_addralign = L1_CACHE_BYTES; mod->arch.got.shdr->sh_size = (num_gots + 1) * sizeof(struct got_entry); mod->arch.got.num_entries = 0; mod->arch.got.max_entries = num_gots; mod->arch.got_plt.shdr->sh_type = SHT_NOBITS; mod->arch.got_plt.shdr->sh_flags = SHF_ALLOC; mod->arch.got_plt.shdr->sh_addralign = L1_CACHE_BYTES; mod->arch.got_plt.shdr->sh_size = (num_plts + 1) * sizeof(struct got_entry); mod->arch.got_plt.num_entries = 0; mod->arch.got_plt.max_entries = num_plts; return 0; }
linux-master
arch/riscv/kernel/module-sections.c
// SPDX-License-Identifier: GPL-2.0-or-later #include <linux/compat.h> #include <linux/signal.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <linux/linkage.h> #include <asm/csr.h> #include <asm/signal32.h> #include <asm/switch_to.h> #include <asm/ucontext.h> #include <asm/vdso.h> #define COMPAT_DEBUG_SIG 0 struct compat_sigcontext { struct compat_user_regs_struct sc_regs; union __riscv_fp_state sc_fpregs; }; struct compat_ucontext { compat_ulong_t uc_flags; struct compat_ucontext *uc_link; compat_stack_t uc_stack; sigset_t uc_sigmask; /* There's some padding here to allow sigset_t to be expanded in the * future. Though this is unlikely, other architectures put uc_sigmask * at the end of this structure and explicitly state it can be * expanded, so we didn't want to box ourselves in here. */ __u8 __unused[1024 / 8 - sizeof(sigset_t)]; /* We can't put uc_sigmask at the end of this structure because we need * to be able to expand sigcontext in the future. For example, the * vector ISA extension will almost certainly add ISA state. We want * to ensure all user-visible ISA state can be saved and restored via a * ucontext, so we're putting this at the end in order to allow for * infinite extensibility. Since we know this will be extended and we * assume sigset_t won't be extended an extreme amount, we're * prioritizing this. */ struct compat_sigcontext uc_mcontext; }; struct compat_rt_sigframe { struct compat_siginfo info; struct compat_ucontext uc; }; #ifdef CONFIG_FPU static long compat_restore_fp_state(struct pt_regs *regs, union __riscv_fp_state __user *sc_fpregs) { long err; struct __riscv_d_ext_state __user *state = &sc_fpregs->d; size_t i; err = __copy_from_user(&current->thread.fstate, state, sizeof(*state)); if (unlikely(err)) return err; fstate_restore(current, regs); /* We support no other extension state at this time. */ for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) { u32 value; err = __get_user(value, &sc_fpregs->q.reserved[i]); if (unlikely(err)) break; if (value != 0) return -EINVAL; } return err; } static long compat_save_fp_state(struct pt_regs *regs, union __riscv_fp_state __user *sc_fpregs) { long err; struct __riscv_d_ext_state __user *state = &sc_fpregs->d; size_t i; fstate_save(current, regs); err = __copy_to_user(state, &current->thread.fstate, sizeof(*state)); if (unlikely(err)) return err; /* We support no other extension state at this time. */ for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) { err = __put_user(0, &sc_fpregs->q.reserved[i]); if (unlikely(err)) break; } return err; } #else #define compat_save_fp_state(task, regs) (0) #define compat_restore_fp_state(task, regs) (0) #endif static long compat_restore_sigcontext(struct pt_regs *regs, struct compat_sigcontext __user *sc) { long err; struct compat_user_regs_struct cregs; /* sc_regs is structured the same as the start of pt_regs */ err = __copy_from_user(&cregs, &sc->sc_regs, sizeof(sc->sc_regs)); cregs_to_regs(&cregs, regs); /* Restore the floating-point state. */ if (has_fpu()) err |= compat_restore_fp_state(regs, &sc->sc_fpregs); return err; } COMPAT_SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct compat_rt_sigframe __user *frame; struct task_struct *task; sigset_t set; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; frame = (struct compat_rt_sigframe __user *)regs->sp; if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); if (compat_restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; return regs->a0; badframe: task = current; if (show_unhandled_signals) { pr_info_ratelimited( "%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n", task->comm, task_pid_nr(task), __func__, frame, (void *)regs->epc, (void *)regs->sp); } force_sig(SIGSEGV); return 0; } static long compat_setup_sigcontext(struct compat_rt_sigframe __user *frame, struct pt_regs *regs) { struct compat_sigcontext __user *sc = &frame->uc.uc_mcontext; struct compat_user_regs_struct cregs; long err; regs_to_cregs(&cregs, regs); /* sc_regs is structured the same as the start of pt_regs */ err = __copy_to_user(&sc->sc_regs, &cregs, sizeof(sc->sc_regs)); /* Save the floating-point state. */ if (has_fpu()) err |= compat_save_fp_state(regs, &sc->sc_fpregs); return err; } static inline void __user *compat_get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t framesize) { unsigned long sp; /* Default to using normal stack */ sp = regs->sp; /* * If we are on the alternate signal stack and would overflow it, don't. * Return an always-bogus address instead so we will die with SIGSEGV. */ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) return (void __user __force *)(-1UL); /* This is the X/Open sanctioned signal stack switching. */ sp = sigsp(sp, ksig) - framesize; /* Align the stack frame. */ sp &= ~0xfUL; return (void __user *)sp; } int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { struct compat_rt_sigframe __user *frame; long err = 0; frame = compat_get_sigframe(ksig, regs, sizeof(*frame)); if (!access_ok(frame, sizeof(*frame))) return -EFAULT; err |= copy_siginfo_to_user32(&frame->info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp); err |= compat_setup_sigcontext(frame, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) return -EFAULT; regs->ra = (unsigned long)COMPAT_VDSO_SYMBOL( current->mm->context.vdso, rt_sigreturn); /* * Set up registers for signal handler. * Registers that we don't modify keep the value they had from * user-space at the time we took the signal. * We always pass siginfo and mcontext, regardless of SA_SIGINFO, * since some things rely on this (e.g. glibc's debug/segfault.c). */ regs->epc = (unsigned long)ksig->ka.sa.sa_handler; regs->sp = (unsigned long)frame; regs->a0 = ksig->sig; /* a0: signal number */ regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */ regs->a2 = (unsigned long)(&frame->uc); /* a2: ucontext pointer */ #if COMPAT_DEBUG_SIG pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n", current->comm, task_pid_nr(current), ksig->sig, (void *)regs->epc, (void *)regs->ra, frame); #endif return 0; }
linux-master
arch/riscv/kernel/compat_signal.c
// SPDX-License-Identifier: GPL-2.0 /* * Clang Control Flow Integrity (CFI) support. * * Copyright (C) 2023 Google LLC */ #include <asm/cfi.h> #include <asm/insn.h> /* * Returns the target address and the expected type when regs->epc points * to a compiler-generated CFI trap. */ static bool decode_cfi_insn(struct pt_regs *regs, unsigned long *target, u32 *type) { unsigned long *regs_ptr = (unsigned long *)regs; int rs1_num; u32 insn; *target = *type = 0; /* * The compiler generates the following instruction sequence * for indirect call checks: * *   lw t1, -4(<reg>) * lui t2, <hi20> * addiw t2, t2, <lo12> * beq t1, t2, .Ltmp1 * ebreak ; <- regs->epc * .Ltmp1: * jalr <reg> * * We can read the expected type and the target address from the * registers passed to the beq/jalr instructions. */ if (get_kernel_nofault(insn, (void *)regs->epc - 4)) return false; if (!riscv_insn_is_beq(insn)) return false; *type = (u32)regs_ptr[RV_EXTRACT_RS1_REG(insn)]; if (get_kernel_nofault(insn, (void *)regs->epc) || get_kernel_nofault(insn, (void *)regs->epc + GET_INSN_LENGTH(insn))) return false; if (riscv_insn_is_jalr(insn)) rs1_num = RV_EXTRACT_RS1_REG(insn); else if (riscv_insn_is_c_jalr(insn)) rs1_num = RVC_EXTRACT_C2_RS1_REG(insn); else return false; *target = regs_ptr[rs1_num]; return true; } /* * Checks if the ebreak trap is because of a CFI failure, and handles the trap * if needed. Returns a bug_trap_type value similarly to report_bug. */ enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) { unsigned long target; u32 type; if (!is_cfi_trap(regs->epc)) return BUG_TRAP_TYPE_NONE; if (!decode_cfi_insn(regs, &target, &type)) return report_cfi_failure_noaddr(regs, regs->epc); return report_cfi_failure(regs, regs->epc, &target, type); }
linux-master
arch/riscv/kernel/cfi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. * <[email protected]> * Copyright (C) 2012 ARM Limited * Copyright (C) 2015 Regents of the University of California */ #include <linux/elf.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/binfmts.h> #include <linux/err.h> #include <asm/page.h> #include <asm/vdso.h> #include <linux/time_namespace.h> #include <vdso/datapage.h> #include <vdso/vsyscall.h> enum vvar_pages { VVAR_DATA_PAGE_OFFSET, VVAR_TIMENS_PAGE_OFFSET, VVAR_NR_PAGES, }; enum rv_vdso_map { RV_VDSO_MAP_VVAR, RV_VDSO_MAP_VDSO, }; #define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT) /* * The vDSO data page. */ static union { struct vdso_data data; u8 page[PAGE_SIZE]; } vdso_data_store __page_aligned_data; struct vdso_data *vdso_data = &vdso_data_store.data; struct __vdso_info { const char *name; const char *vdso_code_start; const char *vdso_code_end; unsigned long vdso_pages; /* Data Mapping */ struct vm_special_mapping *dm; /* Code Mapping */ struct vm_special_mapping *cm; }; static struct __vdso_info vdso_info; #ifdef CONFIG_COMPAT static struct __vdso_info compat_vdso_info; #endif static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { current->mm->context.vdso = (void *)new_vma->vm_start; return 0; } static void __init __vdso_init(struct __vdso_info *vdso_info) { unsigned int i; struct page **vdso_pagelist; unsigned long pfn; if (memcmp(vdso_info->vdso_code_start, "\177ELF", 4)) panic("vDSO is not a valid ELF object!\n"); vdso_info->vdso_pages = ( vdso_info->vdso_code_end - vdso_info->vdso_code_start) >> PAGE_SHIFT; vdso_pagelist = kcalloc(vdso_info->vdso_pages, sizeof(struct page *), GFP_KERNEL); if (vdso_pagelist == NULL) panic("vDSO kcalloc failed!\n"); /* Grab the vDSO code pages. */ pfn = sym_to_pfn(vdso_info->vdso_code_start); for (i = 0; i < vdso_info->vdso_pages; i++) vdso_pagelist[i] = pfn_to_page(pfn + i); vdso_info->cm->pages = vdso_pagelist; } #ifdef CONFIG_TIME_NS struct vdso_data *arch_get_vdso_data(void *vvar_page) { return (struct vdso_data *)(vvar_page); } /* * The vvar mapping contains data for a specific time namespace, so when a task * changes namespace we must unmap its vvar data for the old namespace. * Subsequent faults will map in data for the new namespace. * * For more details see timens_setup_vdso_data(). */ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) { struct mm_struct *mm = task->mm; struct vm_area_struct *vma; VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); for_each_vma(vmi, vma) { if (vma_is_special_mapping(vma, vdso_info.dm)) zap_vma_pages(vma); #ifdef CONFIG_COMPAT if (vma_is_special_mapping(vma, compat_vdso_info.dm)) zap_vma_pages(vma); #endif } mmap_read_unlock(mm); return 0; } #endif static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *timens_page = find_timens_vvar_page(vma); unsigned long pfn; switch (vmf->pgoff) { case VVAR_DATA_PAGE_OFFSET: if (timens_page) pfn = page_to_pfn(timens_page); else pfn = sym_to_pfn(vdso_data); break; #ifdef CONFIG_TIME_NS case VVAR_TIMENS_PAGE_OFFSET: /* * If a task belongs to a time namespace then a namespace * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET * offset. * See also the comment near timens_setup_vdso_data(). */ if (!timens_page) return VM_FAULT_SIGBUS; pfn = sym_to_pfn(vdso_data); break; #endif /* CONFIG_TIME_NS */ default: return VM_FAULT_SIGBUS; } return vmf_insert_pfn(vma, vmf->address, pfn); } static struct vm_special_mapping rv_vdso_maps[] __ro_after_init = { [RV_VDSO_MAP_VVAR] = { .name = "[vvar]", .fault = vvar_fault, }, [RV_VDSO_MAP_VDSO] = { .name = "[vdso]", .mremap = vdso_mremap, }, }; static struct __vdso_info vdso_info __ro_after_init = { .name = "vdso", .vdso_code_start = vdso_start, .vdso_code_end = vdso_end, .dm = &rv_vdso_maps[RV_VDSO_MAP_VVAR], .cm = &rv_vdso_maps[RV_VDSO_MAP_VDSO], }; #ifdef CONFIG_COMPAT static struct vm_special_mapping rv_compat_vdso_maps[] __ro_after_init = { [RV_VDSO_MAP_VVAR] = { .name = "[vvar]", .fault = vvar_fault, }, [RV_VDSO_MAP_VDSO] = { .name = "[vdso]", .mremap = vdso_mremap, }, }; static struct __vdso_info compat_vdso_info __ro_after_init = { .name = "compat_vdso", .vdso_code_start = compat_vdso_start, .vdso_code_end = compat_vdso_end, .dm = &rv_compat_vdso_maps[RV_VDSO_MAP_VVAR], .cm = &rv_compat_vdso_maps[RV_VDSO_MAP_VDSO], }; #endif static int __init vdso_init(void) { __vdso_init(&vdso_info); #ifdef CONFIG_COMPAT __vdso_init(&compat_vdso_info); #endif return 0; } arch_initcall(vdso_init); static int __setup_additional_pages(struct mm_struct *mm, struct linux_binprm *bprm, int uses_interp, struct __vdso_info *vdso_info) { unsigned long vdso_base, vdso_text_len, vdso_mapping_len; void *ret; BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); vdso_text_len = vdso_info->vdso_pages << PAGE_SHIFT; /* Be sure to map the data page */ vdso_mapping_len = vdso_text_len + VVAR_SIZE; vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); if (IS_ERR_VALUE(vdso_base)) { ret = ERR_PTR(vdso_base); goto up_fail; } ret = _install_special_mapping(mm, vdso_base, VVAR_SIZE, (VM_READ | VM_MAYREAD | VM_PFNMAP), vdso_info->dm); if (IS_ERR(ret)) goto up_fail; vdso_base += VVAR_SIZE; mm->context.vdso = (void *)vdso_base; ret = _install_special_mapping(mm, vdso_base, vdso_text_len, (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), vdso_info->cm); if (IS_ERR(ret)) goto up_fail; return 0; up_fail: mm->context.vdso = NULL; return PTR_ERR(ret); } #ifdef CONFIG_COMPAT int compat_arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; int ret; if (mmap_write_lock_killable(mm)) return -EINTR; ret = __setup_additional_pages(mm, bprm, uses_interp, &compat_vdso_info); mmap_write_unlock(mm); return ret; } #endif int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; int ret; if (mmap_write_lock_killable(mm)) return -EINTR; ret = __setup_additional_pages(mm, bprm, uses_interp, &vdso_info); mmap_write_unlock(mm); return ret; }
linux-master
arch/riscv/kernel/vdso.c
// SPDX-License-Identifier: GPL-2.0-only /* * SBI initialilization and all extension implementation. * * Copyright (c) 2020 Western Digital Corporation or its affiliates. */ #include <linux/bits.h> #include <linux/init.h> #include <linux/pm.h> #include <linux/reboot.h> #include <asm/sbi.h> #include <asm/smp.h> /* default SBI version is 0.1 */ unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; EXPORT_SYMBOL(sbi_spec_version); static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init; static void (*__sbi_send_ipi)(unsigned int cpu) __ro_after_init; static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long arg4, unsigned long arg5) __ro_after_init; struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { struct sbiret ret; register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4); register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5); register uintptr_t a6 asm ("a6") = (uintptr_t)(fid); register uintptr_t a7 asm ("a7") = (uintptr_t)(ext); asm volatile ("ecall" : "+r" (a0), "+r" (a1) : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7) : "memory"); ret.error = a0; ret.value = a1; return ret; } EXPORT_SYMBOL(sbi_ecall); int sbi_err_map_linux_errno(int err) { switch (err) { case SBI_SUCCESS: return 0; case SBI_ERR_DENIED: return -EPERM; case SBI_ERR_INVALID_PARAM: return -EINVAL; case SBI_ERR_INVALID_ADDRESS: return -EFAULT; case SBI_ERR_NOT_SUPPORTED: case SBI_ERR_FAILURE: default: return -ENOTSUPP; }; } EXPORT_SYMBOL(sbi_err_map_linux_errno); #ifdef CONFIG_RISCV_SBI_V01 static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask) { unsigned long cpuid, hartid; unsigned long hmask = 0; /* * There is no maximum hartid concept in RISC-V and NR_CPUS must not be * associated with hartid. As SBI v0.1 is only kept for backward compatibility * and will be removed in the future, there is no point in supporting hartid * greater than BITS_PER_LONG (32 for RV32 and 64 for RV64). Ideally, SBI v0.2 * should be used for platforms with hartid greater than BITS_PER_LONG. */ for_each_cpu(cpuid, cpu_mask) { hartid = cpuid_to_hartid_map(cpuid); if (hartid >= BITS_PER_LONG) { pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n"); break; } hmask |= BIT(hartid); } return hmask; } /** * sbi_console_putchar() - Writes given character to the console device. * @ch: The data to be written to the console. * * Return: None */ void sbi_console_putchar(int ch) { sbi_ecall(SBI_EXT_0_1_CONSOLE_PUTCHAR, 0, ch, 0, 0, 0, 0, 0); } EXPORT_SYMBOL(sbi_console_putchar); /** * sbi_console_getchar() - Reads a byte from console device. * * Returns the value read from console. */ int sbi_console_getchar(void) { struct sbiret ret; ret = sbi_ecall(SBI_EXT_0_1_CONSOLE_GETCHAR, 0, 0, 0, 0, 0, 0, 0); return ret.error; } EXPORT_SYMBOL(sbi_console_getchar); /** * sbi_shutdown() - Remove all the harts from executing supervisor code. * * Return: None */ void sbi_shutdown(void) { sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0); } EXPORT_SYMBOL(sbi_shutdown); /** * __sbi_set_timer_v01() - Program the timer for next timer event. * @stime_value: The value after which next timer event should fire. * * Return: None */ static void __sbi_set_timer_v01(uint64_t stime_value) { #if __riscv_xlen == 32 sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value, stime_value >> 32, 0, 0, 0, 0); #else sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value, 0, 0, 0, 0, 0); #endif } static void __sbi_send_ipi_v01(unsigned int cpu) { unsigned long hart_mask = __sbi_v01_cpumask_to_hartmask(cpumask_of(cpu)); sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)(&hart_mask), 0, 0, 0, 0, 0); } static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long arg4, unsigned long arg5) { int result = 0; unsigned long hart_mask; if (!cpu_mask || cpumask_empty(cpu_mask)) cpu_mask = cpu_online_mask; hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask); /* v0.2 function IDs are equivalent to v0.1 extension IDs */ switch (fid) { case SBI_EXT_RFENCE_REMOTE_FENCE_I: sbi_ecall(SBI_EXT_0_1_REMOTE_FENCE_I, 0, (unsigned long)&hart_mask, 0, 0, 0, 0, 0); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA: sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA, 0, (unsigned long)&hart_mask, start, size, 0, 0, 0); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID: sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID, 0, (unsigned long)&hart_mask, start, size, arg4, 0, 0); break; default: pr_err("SBI call [%d]not supported in SBI v0.1\n", fid); result = -EINVAL; } return result; } static void sbi_set_power_off(void) { pm_power_off = sbi_shutdown; } #else static void __sbi_set_timer_v01(uint64_t stime_value) { pr_warn("Timer extension is not available in SBI v%lu.%lu\n", sbi_major_version(), sbi_minor_version()); } static void __sbi_send_ipi_v01(unsigned int cpu) { pr_warn("IPI extension is not available in SBI v%lu.%lu\n", sbi_major_version(), sbi_minor_version()); } static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long arg4, unsigned long arg5) { pr_warn("remote fence extension is not available in SBI v%lu.%lu\n", sbi_major_version(), sbi_minor_version()); return 0; } static void sbi_set_power_off(void) {} #endif /* CONFIG_RISCV_SBI_V01 */ static void __sbi_set_timer_v02(uint64_t stime_value) { #if __riscv_xlen == 32 sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value, stime_value >> 32, 0, 0, 0, 0); #else sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value, 0, 0, 0, 0, 0); #endif } static void __sbi_send_ipi_v02(unsigned int cpu) { int result; struct sbiret ret = {0}; ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI, 1UL, cpuid_to_hartid_map(cpu), 0, 0, 0, 0); if (ret.error) { result = sbi_err_map_linux_errno(ret.error); pr_err("%s: hbase = [%lu] failed (error [%d])\n", __func__, cpuid_to_hartid_map(cpu), result); } } static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask, unsigned long hbase, unsigned long start, unsigned long size, unsigned long arg4, unsigned long arg5) { struct sbiret ret = {0}; int ext = SBI_EXT_RFENCE; int result = 0; switch (fid) { case SBI_EXT_RFENCE_REMOTE_FENCE_I: ret = sbi_ecall(ext, fid, hmask, hbase, 0, 0, 0, 0); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA: ret = sbi_ecall(ext, fid, hmask, hbase, start, size, 0, 0); break; case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID: ret = sbi_ecall(ext, fid, hmask, hbase, start, size, arg4, 0); break; case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA: ret = sbi_ecall(ext, fid, hmask, hbase, start, size, 0, 0); break; case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID: ret = sbi_ecall(ext, fid, hmask, hbase, start, size, arg4, 0); break; case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA: ret = sbi_ecall(ext, fid, hmask, hbase, start, size, 0, 0); break; case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID: ret = sbi_ecall(ext, fid, hmask, hbase, start, size, arg4, 0); break; default: pr_err("unknown function ID [%lu] for SBI extension [%d]\n", fid, ext); result = -EINVAL; } if (ret.error) { result = sbi_err_map_linux_errno(ret.error); pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n", __func__, hbase, hmask, result); } return result; } static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long arg4, unsigned long arg5) { unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0; int result; if (!cpu_mask || cpumask_empty(cpu_mask)) cpu_mask = cpu_online_mask; for_each_cpu(cpuid, cpu_mask) { hartid = cpuid_to_hartid_map(cpuid); if (hmask) { if (hartid + BITS_PER_LONG <= htop || hbase + BITS_PER_LONG <= hartid) { result = __sbi_rfence_v02_call(fid, hmask, hbase, start, size, arg4, arg5); if (result) return result; hmask = 0; } else if (hartid < hbase) { /* shift the mask to fit lower hartid */ hmask <<= hbase - hartid; hbase = hartid; } } if (!hmask) { hbase = hartid; htop = hartid; } else if (hartid > htop) { htop = hartid; } hmask |= BIT(hartid - hbase); } if (hmask) { result = __sbi_rfence_v02_call(fid, hmask, hbase, start, size, arg4, arg5); if (result) return result; } return 0; } /** * sbi_set_timer() - Program the timer for next timer event. * @stime_value: The value after which next timer event should fire. * * Return: None. */ void sbi_set_timer(uint64_t stime_value) { __sbi_set_timer(stime_value); } /** * sbi_send_ipi() - Send an IPI to any hart. * @cpu: Logical id of the target CPU. */ void sbi_send_ipi(unsigned int cpu) { __sbi_send_ipi(cpu); } EXPORT_SYMBOL(sbi_send_ipi); /** * sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts. * @cpu_mask: A cpu mask containing all the target harts. * * Return: 0 on success, appropriate linux error code otherwise. */ int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I, cpu_mask, 0, 0, 0, 0); } EXPORT_SYMBOL(sbi_remote_fence_i); /** * sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote * harts for the specified virtual address range. * @cpu_mask: A cpu mask containing all the target harts. * @start: Start of the virtual address * @size: Total size of the virtual address range. * * Return: 0 on success, appropriate linux error code otherwise. */ int sbi_remote_sfence_vma(const struct cpumask *cpu_mask, unsigned long start, unsigned long size) { return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA, cpu_mask, start, size, 0, 0); } EXPORT_SYMBOL(sbi_remote_sfence_vma); /** * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given * remote harts for a virtual address range belonging to a specific ASID. * * @cpu_mask: A cpu mask containing all the target harts. * @start: Start of the virtual address * @size: Total size of the virtual address range. * @asid: The value of address space identifier (ASID). * * Return: 0 on success, appropriate linux error code otherwise. */ int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long asid) { return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, cpu_mask, start, size, asid, 0); } EXPORT_SYMBOL(sbi_remote_sfence_vma_asid); /** * sbi_remote_hfence_gvma() - Execute HFENCE.GVMA instructions on given remote * harts for the specified guest physical address range. * @cpu_mask: A cpu mask containing all the target harts. * @start: Start of the guest physical address * @size: Total size of the guest physical address range. * * Return: None */ int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask, unsigned long start, unsigned long size) { return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA, cpu_mask, start, size, 0, 0); } EXPORT_SYMBOL_GPL(sbi_remote_hfence_gvma); /** * sbi_remote_hfence_gvma_vmid() - Execute HFENCE.GVMA instructions on given * remote harts for a guest physical address range belonging to a specific VMID. * * @cpu_mask: A cpu mask containing all the target harts. * @start: Start of the guest physical address * @size: Total size of the guest physical address range. * @vmid: The value of guest ID (VMID). * * Return: 0 if success, Error otherwise. */ int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long vmid) { return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID, cpu_mask, start, size, vmid, 0); } EXPORT_SYMBOL(sbi_remote_hfence_gvma_vmid); /** * sbi_remote_hfence_vvma() - Execute HFENCE.VVMA instructions on given remote * harts for the current guest virtual address range. * @cpu_mask: A cpu mask containing all the target harts. * @start: Start of the current guest virtual address * @size: Total size of the current guest virtual address range. * * Return: None */ int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask, unsigned long start, unsigned long size) { return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA, cpu_mask, start, size, 0, 0); } EXPORT_SYMBOL(sbi_remote_hfence_vvma); /** * sbi_remote_hfence_vvma_asid() - Execute HFENCE.VVMA instructions on given * remote harts for current guest virtual address range belonging to a specific * ASID. * * @cpu_mask: A cpu mask containing all the target harts. * @start: Start of the current guest virtual address * @size: Total size of the current guest virtual address range. * @asid: The value of address space identifier (ASID). * * Return: None */ int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long asid) { return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID, cpu_mask, start, size, asid, 0); } EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid); static void sbi_srst_reset(unsigned long type, unsigned long reason) { sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET, type, reason, 0, 0, 0, 0); pr_warn("%s: type=0x%lx reason=0x%lx failed\n", __func__, type, reason); } static int sbi_srst_reboot(struct notifier_block *this, unsigned long mode, void *cmd) { sbi_srst_reset((mode == REBOOT_WARM || mode == REBOOT_SOFT) ? SBI_SRST_RESET_TYPE_WARM_REBOOT : SBI_SRST_RESET_TYPE_COLD_REBOOT, SBI_SRST_RESET_REASON_NONE); return NOTIFY_DONE; } static struct notifier_block sbi_srst_reboot_nb; static void sbi_srst_power_off(void) { sbi_srst_reset(SBI_SRST_RESET_TYPE_SHUTDOWN, SBI_SRST_RESET_REASON_NONE); } /** * sbi_probe_extension() - Check if an SBI extension ID is supported or not. * @extid: The extension ID to be probed. * * Return: 1 or an extension specific nonzero value if yes, 0 otherwise. */ long sbi_probe_extension(int extid) { struct sbiret ret; ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid, 0, 0, 0, 0, 0); if (!ret.error) return ret.value; return 0; } EXPORT_SYMBOL(sbi_probe_extension); static long __sbi_base_ecall(int fid) { struct sbiret ret; ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0); if (!ret.error) return ret.value; else return sbi_err_map_linux_errno(ret.error); } static inline long sbi_get_spec_version(void) { return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION); } static inline long sbi_get_firmware_id(void) { return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_ID); } static inline long sbi_get_firmware_version(void) { return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION); } long sbi_get_mvendorid(void) { return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID); } EXPORT_SYMBOL_GPL(sbi_get_mvendorid); long sbi_get_marchid(void) { return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID); } EXPORT_SYMBOL_GPL(sbi_get_marchid); long sbi_get_mimpid(void) { return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID); } EXPORT_SYMBOL_GPL(sbi_get_mimpid); void __init sbi_init(void) { int ret; sbi_set_power_off(); ret = sbi_get_spec_version(); if (ret > 0) sbi_spec_version = ret; pr_info("SBI specification v%lu.%lu detected\n", sbi_major_version(), sbi_minor_version()); if (!sbi_spec_is_0_1()) { pr_info("SBI implementation ID=0x%lx Version=0x%lx\n", sbi_get_firmware_id(), sbi_get_firmware_version()); if (sbi_probe_extension(SBI_EXT_TIME)) { __sbi_set_timer = __sbi_set_timer_v02; pr_info("SBI TIME extension detected\n"); } else { __sbi_set_timer = __sbi_set_timer_v01; } if (sbi_probe_extension(SBI_EXT_IPI)) { __sbi_send_ipi = __sbi_send_ipi_v02; pr_info("SBI IPI extension detected\n"); } else { __sbi_send_ipi = __sbi_send_ipi_v01; } if (sbi_probe_extension(SBI_EXT_RFENCE)) { __sbi_rfence = __sbi_rfence_v02; pr_info("SBI RFENCE extension detected\n"); } else { __sbi_rfence = __sbi_rfence_v01; } if ((sbi_spec_version >= sbi_mk_version(0, 3)) && sbi_probe_extension(SBI_EXT_SRST)) { pr_info("SBI SRST extension detected\n"); pm_power_off = sbi_srst_power_off; sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot; sbi_srst_reboot_nb.priority = 192; register_restart_handler(&sbi_srst_reboot_nb); } } else { __sbi_set_timer = __sbi_set_timer_v01; __sbi_send_ipi = __sbi_send_ipi_v01; __sbi_rfence = __sbi_rfence_v01; } }
linux-master
arch/riscv/kernel/sbi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 Zihao Yu */ #include <linux/export.h> #include <linux/uaccess.h> /* * Assembly functions that may be used (directly or indirectly) by modules */ EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(__memset); EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(__memmove);
linux-master
arch/riscv/kernel/riscv_ksyms.c
// SPDX-License-Identifier: GPL-2.0 /* * This code comes from arch/arm64/kernel/crash_dump.c * Created by: AKASHI Takahiro <[email protected]> * Copyright (C) 2017 Linaro Limited */ #include <linux/crash_dump.h> #include <linux/io.h> #include <linux/uio.h> ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize, unsigned long offset) { void *vaddr; if (!csize) return 0; vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB); if (!vaddr) return -ENOMEM; csize = copy_to_iter(vaddr + offset, csize, iter); memunmap(vaddr); return csize; }
linux-master
arch/riscv/kernel/crash_dump.c
// SPDX-License-Identifier: GPL-2.0-only /* * alternative runtime patching * inspired by the ARM64 and x86 version * * Copyright (C) 2021 Sifive. */ #include <linux/init.h> #include <linux/module.h> #include <linux/cpu.h> #include <linux/uaccess.h> #include <asm/alternative.h> #include <asm/module.h> #include <asm/sections.h> #include <asm/vdso.h> #include <asm/vendorid_list.h> #include <asm/sbi.h> #include <asm/csr.h> #include <asm/insn.h> #include <asm/patch.h> struct cpu_manufacturer_info_t { unsigned long vendor_id; unsigned long arch_id; unsigned long imp_id; void (*patch_func)(struct alt_entry *begin, struct alt_entry *end, unsigned long archid, unsigned long impid, unsigned int stage); }; static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info) { #ifdef CONFIG_RISCV_M_MODE cpu_mfr_info->vendor_id = csr_read(CSR_MVENDORID); cpu_mfr_info->arch_id = csr_read(CSR_MARCHID); cpu_mfr_info->imp_id = csr_read(CSR_MIMPID); #else cpu_mfr_info->vendor_id = sbi_get_mvendorid(); cpu_mfr_info->arch_id = sbi_get_marchid(); cpu_mfr_info->imp_id = sbi_get_mimpid(); #endif switch (cpu_mfr_info->vendor_id) { #ifdef CONFIG_ERRATA_ANDES case ANDESTECH_VENDOR_ID: cpu_mfr_info->patch_func = andes_errata_patch_func; break; #endif #ifdef CONFIG_ERRATA_SIFIVE case SIFIVE_VENDOR_ID: cpu_mfr_info->patch_func = sifive_errata_patch_func; break; #endif #ifdef CONFIG_ERRATA_THEAD case THEAD_VENDOR_ID: cpu_mfr_info->patch_func = thead_errata_patch_func; break; #endif default: cpu_mfr_info->patch_func = NULL; } } static u32 riscv_instruction_at(void *p) { u16 *parcel = p; return (u32)parcel[0] | (u32)parcel[1] << 16; } static void riscv_alternative_fix_auipc_jalr(void *ptr, u32 auipc_insn, u32 jalr_insn, int patch_offset) { u32 call[2] = { auipc_insn, jalr_insn }; s32 imm; /* get and adjust new target address */ imm = riscv_insn_extract_utype_itype_imm(auipc_insn, jalr_insn); imm -= patch_offset; /* update instructions */ riscv_insn_insert_utype_itype_imm(&call[0], &call[1], imm); /* patch the call place again */ patch_text_nosync(ptr, call, sizeof(u32) * 2); } static void riscv_alternative_fix_jal(void *ptr, u32 jal_insn, int patch_offset) { s32 imm; /* get and adjust new target address */ imm = riscv_insn_extract_jtype_imm(jal_insn); imm -= patch_offset; /* update instruction */ riscv_insn_insert_jtype_imm(&jal_insn, imm); /* patch the call place again */ patch_text_nosync(ptr, &jal_insn, sizeof(u32)); } void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len, int patch_offset) { int num_insn = len / sizeof(u32); int i; for (i = 0; i < num_insn; i++) { u32 insn = riscv_instruction_at(alt_ptr + i * sizeof(u32)); /* * May be the start of an auipc + jalr pair * Needs to check that at least one more instruction * is in the list. */ if (riscv_insn_is_auipc(insn) && i < num_insn - 1) { u32 insn2 = riscv_instruction_at(alt_ptr + (i + 1) * sizeof(u32)); if (!riscv_insn_is_jalr(insn2)) continue; /* if instruction pair is a call, it will use the ra register */ if (RV_EXTRACT_RD_REG(insn) != 1) continue; riscv_alternative_fix_auipc_jalr(alt_ptr + i * sizeof(u32), insn, insn2, patch_offset); i++; } if (riscv_insn_is_jal(insn)) { s32 imm = riscv_insn_extract_jtype_imm(insn); /* Don't modify jumps inside the alternative block */ if ((alt_ptr + i * sizeof(u32) + imm) >= alt_ptr && (alt_ptr + i * sizeof(u32) + imm) < (alt_ptr + len)) continue; riscv_alternative_fix_jal(alt_ptr + i * sizeof(u32), insn, patch_offset); } } } /* * This is called very early in the boot process (directly after we run * a feature detect on the boot CPU). No need to worry about other CPUs * here. */ static void __init_or_module _apply_alternatives(struct alt_entry *begin, struct alt_entry *end, unsigned int stage) { struct cpu_manufacturer_info_t cpu_mfr_info; riscv_fill_cpu_mfr_info(&cpu_mfr_info); riscv_cpufeature_patch_func(begin, end, stage); if (!cpu_mfr_info.patch_func) return; cpu_mfr_info.patch_func(begin, end, cpu_mfr_info.arch_id, cpu_mfr_info.imp_id, stage); } #ifdef CONFIG_MMU static void __init apply_vdso_alternatives(void) { const Elf_Ehdr *hdr; const Elf_Shdr *shdr; const Elf_Shdr *alt; struct alt_entry *begin, *end; hdr = (Elf_Ehdr *)vdso_start; shdr = (void *)hdr + hdr->e_shoff; alt = find_section(hdr, shdr, ".alternative"); if (!alt) return; begin = (void *)hdr + alt->sh_offset, end = (void *)hdr + alt->sh_offset + alt->sh_size, _apply_alternatives((struct alt_entry *)begin, (struct alt_entry *)end, RISCV_ALTERNATIVES_BOOT); } #else static void __init apply_vdso_alternatives(void) { } #endif void __init apply_boot_alternatives(void) { /* If called on non-boot cpu things could go wrong */ WARN_ON(smp_processor_id() != 0); _apply_alternatives((struct alt_entry *)__alt_start, (struct alt_entry *)__alt_end, RISCV_ALTERNATIVES_BOOT); apply_vdso_alternatives(); } /* * apply_early_boot_alternatives() is called from setup_vm() with MMU-off. * * Following requirements should be honoured for it to work correctly: * 1) It should use PC-relative addressing for accessing kernel symbols. * To achieve this we always use GCC cmodel=medany. * 2) The compiler instrumentation for FTRACE will not work for setup_vm() * so disable compiler instrumentation when FTRACE is enabled. * * Currently, the above requirements are honoured by using custom CFLAGS * for alternative.o in kernel/Makefile. */ void __init apply_early_boot_alternatives(void) { #ifdef CONFIG_RISCV_ALTERNATIVE_EARLY _apply_alternatives((struct alt_entry *)__alt_start, (struct alt_entry *)__alt_end, RISCV_ALTERNATIVES_EARLY_BOOT); #endif } #ifdef CONFIG_MODULES void apply_module_alternatives(void *start, size_t length) { _apply_alternatives((struct alt_entry *)start, (struct alt_entry *)(start + length), RISCV_ALTERNATIVES_MODULE); } #endif
linux-master
arch/riscv/kernel/alternative.c
// SPDX-License-Identifier: GPL-2.0-only /* * Load ELF vmlinux file for the kexec_file_load syscall. * * Copyright (C) 2021 Huawei Technologies Co, Ltd. * * Author: Liao Chang ([email protected]) * * Based on kexec-tools' kexec-elf-riscv.c, heavily modified * for kernel. */ #define pr_fmt(fmt) "kexec_image: " fmt #include <linux/elf.h> #include <linux/kexec.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/libfdt.h> #include <linux/types.h> #include <linux/memblock.h> #include <asm/setup.h> int arch_kimage_file_post_load_cleanup(struct kimage *image) { kvfree(image->arch.fdt); image->arch.fdt = NULL; vfree(image->elf_headers); image->elf_headers = NULL; image->elf_headers_sz = 0; return kexec_image_post_load_cleanup_default(image); } static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, struct kexec_elf_info *elf_info, unsigned long old_pbase, unsigned long new_pbase) { int i; int ret = 0; size_t size; struct kexec_buf kbuf; const struct elf_phdr *phdr; kbuf.image = image; for (i = 0; i < ehdr->e_phnum; i++) { phdr = &elf_info->proghdrs[i]; if (phdr->p_type != PT_LOAD) continue; size = phdr->p_filesz; if (size > phdr->p_memsz) size = phdr->p_memsz; kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset; kbuf.bufsz = size; kbuf.buf_align = phdr->p_align; kbuf.mem = phdr->p_paddr - old_pbase + new_pbase; kbuf.memsz = phdr->p_memsz; kbuf.top_down = false; ret = kexec_add_buffer(&kbuf); if (ret) break; } return ret; } /* * Go through the available phsyical memory regions and find one that hold * an image of the specified size. */ static int elf_find_pbase(struct kimage *image, unsigned long kernel_len, struct elfhdr *ehdr, struct kexec_elf_info *elf_info, unsigned long *old_pbase, unsigned long *new_pbase) { int i; int ret; struct kexec_buf kbuf; const struct elf_phdr *phdr; unsigned long lowest_paddr = ULONG_MAX; unsigned long lowest_vaddr = ULONG_MAX; for (i = 0; i < ehdr->e_phnum; i++) { phdr = &elf_info->proghdrs[i]; if (phdr->p_type != PT_LOAD) continue; if (lowest_paddr > phdr->p_paddr) lowest_paddr = phdr->p_paddr; if (lowest_vaddr > phdr->p_vaddr) lowest_vaddr = phdr->p_vaddr; } kbuf.image = image; kbuf.buf_min = lowest_paddr; kbuf.buf_max = ULONG_MAX; /* * Current riscv boot protocol requires 2MB alignment for * RV64 and 4MB alignment for RV32 * */ kbuf.buf_align = PMD_SIZE; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE); kbuf.top_down = false; ret = arch_kexec_locate_mem_hole(&kbuf); if (!ret) { *old_pbase = lowest_paddr; *new_pbase = kbuf.mem; image->start = ehdr->e_entry - lowest_vaddr + kbuf.mem; } return ret; } static int get_nr_ram_ranges_callback(struct resource *res, void *arg) { unsigned int *nr_ranges = arg; (*nr_ranges)++; return 0; } static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg) { struct crash_mem *cmem = arg; cmem->ranges[cmem->nr_ranges].start = res->start; cmem->ranges[cmem->nr_ranges].end = res->end; cmem->nr_ranges++; return 0; } static int prepare_elf_headers(void **addr, unsigned long *sz) { struct crash_mem *cmem; unsigned int nr_ranges; int ret; nr_ranges = 1; /* For exclusion of crashkernel region */ walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback); cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL); if (!cmem) return -ENOMEM; cmem->max_nr_ranges = nr_ranges; cmem->nr_ranges = 0; ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback); if (ret) goto out; /* Exclude crashkernel region */ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); if (!ret) ret = crash_prepare_elf64_headers(cmem, true, addr, sz); out: kfree(cmem); return ret; } static char *setup_kdump_cmdline(struct kimage *image, char *cmdline, unsigned long cmdline_len) { int elfcorehdr_strlen; char *cmdline_ptr; cmdline_ptr = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL); if (!cmdline_ptr) return NULL; elfcorehdr_strlen = sprintf(cmdline_ptr, "elfcorehdr=0x%lx ", image->elf_load_addr); if (elfcorehdr_strlen + cmdline_len > COMMAND_LINE_SIZE) { pr_err("Appending elfcorehdr=<addr> exceeds cmdline size\n"); kfree(cmdline_ptr); return NULL; } memcpy(cmdline_ptr + elfcorehdr_strlen, cmdline, cmdline_len); /* Ensure it's nul terminated */ cmdline_ptr[COMMAND_LINE_SIZE - 1] = '\0'; return cmdline_ptr; } static void *elf_kexec_load(struct kimage *image, char *kernel_buf, unsigned long kernel_len, char *initrd, unsigned long initrd_len, char *cmdline, unsigned long cmdline_len) { int ret; unsigned long old_kernel_pbase = ULONG_MAX; unsigned long new_kernel_pbase = 0UL; unsigned long initrd_pbase = 0UL; unsigned long headers_sz; unsigned long kernel_start; void *fdt, *headers; struct elfhdr ehdr; struct kexec_buf kbuf; struct kexec_elf_info elf_info; char *modified_cmdline = NULL; ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info); if (ret) return ERR_PTR(ret); ret = elf_find_pbase(image, kernel_len, &ehdr, &elf_info, &old_kernel_pbase, &new_kernel_pbase); if (ret) goto out; kernel_start = image->start; pr_notice("The entry point of kernel at 0x%lx\n", image->start); /* Add the kernel binary to the image */ ret = riscv_kexec_elf_load(image, &ehdr, &elf_info, old_kernel_pbase, new_kernel_pbase); if (ret) goto out; kbuf.image = image; kbuf.buf_min = new_kernel_pbase + kernel_len; kbuf.buf_max = ULONG_MAX; /* Add elfcorehdr */ if (image->type == KEXEC_TYPE_CRASH) { ret = prepare_elf_headers(&headers, &headers_sz); if (ret) { pr_err("Preparing elf core header failed\n"); goto out; } kbuf.buffer = headers; kbuf.bufsz = headers_sz; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.memsz = headers_sz; kbuf.buf_align = ELF_CORE_HEADER_ALIGN; kbuf.top_down = true; ret = kexec_add_buffer(&kbuf); if (ret) { vfree(headers); goto out; } image->elf_headers = headers; image->elf_load_addr = kbuf.mem; image->elf_headers_sz = headers_sz; pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", image->elf_load_addr, kbuf.bufsz, kbuf.memsz); /* Setup cmdline for kdump kernel case */ modified_cmdline = setup_kdump_cmdline(image, cmdline, cmdline_len); if (!modified_cmdline) { pr_err("Setting up cmdline for kdump kernel failed\n"); ret = -EINVAL; goto out; } cmdline = modified_cmdline; } #ifdef CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY /* Add purgatory to the image */ kbuf.top_down = true; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ret = kexec_load_purgatory(image, &kbuf); if (ret) { pr_err("Error loading purgatory ret=%d\n", ret); goto out; } ret = kexec_purgatory_get_set_symbol(image, "riscv_kernel_entry", &kernel_start, sizeof(kernel_start), 0); if (ret) pr_err("Error update purgatory ret=%d\n", ret); #endif /* CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY */ /* Add the initrd to the image */ if (initrd != NULL) { kbuf.buffer = initrd; kbuf.bufsz = kbuf.memsz = initrd_len; kbuf.buf_align = PAGE_SIZE; kbuf.top_down = true; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ret = kexec_add_buffer(&kbuf); if (ret) goto out; initrd_pbase = kbuf.mem; pr_notice("Loaded initrd at 0x%lx\n", initrd_pbase); } /* Add the DTB to the image */ fdt = of_kexec_alloc_and_setup_fdt(image, initrd_pbase, initrd_len, cmdline, 0); if (!fdt) { pr_err("Error setting up the new device tree.\n"); ret = -EINVAL; goto out; } fdt_pack(fdt); kbuf.buffer = fdt; kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt); kbuf.buf_align = PAGE_SIZE; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.top_down = true; ret = kexec_add_buffer(&kbuf); if (ret) { pr_err("Error add DTB kbuf ret=%d\n", ret); goto out_free_fdt; } /* Cache the fdt buffer address for memory cleanup */ image->arch.fdt = fdt; pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem); goto out; out_free_fdt: kvfree(fdt); out: kfree(modified_cmdline); kexec_free_elf_info(&elf_info); return ret ? ERR_PTR(ret) : NULL; } #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) #define RISCV_IMM_BITS 12 #define RISCV_IMM_REACH (1LL << RISCV_IMM_BITS) #define RISCV_CONST_HIGH_PART(x) \ (((x) + (RISCV_IMM_REACH >> 1)) & ~(RISCV_IMM_REACH - 1)) #define RISCV_CONST_LOW_PART(x) ((x) - RISCV_CONST_HIGH_PART(x)) #define ENCODE_ITYPE_IMM(x) \ (RV_X(x, 0, 12) << 20) #define ENCODE_BTYPE_IMM(x) \ ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | \ (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31)) #define ENCODE_UTYPE_IMM(x) \ (RV_X(x, 12, 20) << 12) #define ENCODE_JTYPE_IMM(x) \ ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | \ (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31)) #define ENCODE_CBTYPE_IMM(x) \ ((RV_X(x, 1, 2) << 3) | (RV_X(x, 3, 2) << 10) | (RV_X(x, 5, 1) << 2) | \ (RV_X(x, 6, 2) << 5) | (RV_X(x, 8, 1) << 12)) #define ENCODE_CJTYPE_IMM(x) \ ((RV_X(x, 1, 3) << 3) | (RV_X(x, 4, 1) << 11) | (RV_X(x, 5, 1) << 2) | \ (RV_X(x, 6, 1) << 7) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 8, 2) << 9) | \ (RV_X(x, 10, 1) << 8) | (RV_X(x, 11, 1) << 12)) #define ENCODE_UJTYPE_IMM(x) \ (ENCODE_UTYPE_IMM(RISCV_CONST_HIGH_PART(x)) | \ (ENCODE_ITYPE_IMM(RISCV_CONST_LOW_PART(x)) << 32)) #define ENCODE_UITYPE_IMM(x) \ (ENCODE_UTYPE_IMM(x) | (ENCODE_ITYPE_IMM(x) << 32)) #define CLEAN_IMM(type, x) \ ((~ENCODE_##type##_IMM((uint64_t)(-1))) & (x)) int arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, const Elf_Shdr *relsec, const Elf_Shdr *symtab) { const char *strtab, *name, *shstrtab; const Elf_Shdr *sechdrs; Elf64_Rela *relas; int i, r_type; /* String & section header string table */ sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff; strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset; shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset; relas = (void *)pi->ehdr + relsec->sh_offset; for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) { const Elf_Sym *sym; /* symbol to relocate */ unsigned long addr; /* final location after relocation */ unsigned long val; /* relocated symbol value */ unsigned long sec_base; /* relocated symbol value */ void *loc; /* tmp location to modify */ sym = (void *)pi->ehdr + symtab->sh_offset; sym += ELF64_R_SYM(relas[i].r_info); if (sym->st_name) name = strtab + sym->st_name; else name = shstrtab + sechdrs[sym->st_shndx].sh_name; loc = pi->purgatory_buf; loc += section->sh_offset; loc += relas[i].r_offset; if (sym->st_shndx == SHN_ABS) sec_base = 0; else if (sym->st_shndx >= pi->ehdr->e_shnum) { pr_err("Invalid section %d for symbol %s\n", sym->st_shndx, name); return -ENOEXEC; } else sec_base = pi->sechdrs[sym->st_shndx].sh_addr; val = sym->st_value; val += sec_base; val += relas[i].r_addend; addr = section->sh_addr + relas[i].r_offset; r_type = ELF64_R_TYPE(relas[i].r_info); switch (r_type) { case R_RISCV_BRANCH: *(u32 *)loc = CLEAN_IMM(BTYPE, *(u32 *)loc) | ENCODE_BTYPE_IMM(val - addr); break; case R_RISCV_JAL: *(u32 *)loc = CLEAN_IMM(JTYPE, *(u32 *)loc) | ENCODE_JTYPE_IMM(val - addr); break; /* * With no R_RISCV_PCREL_LO12_S, R_RISCV_PCREL_LO12_I * sym is expected to be next to R_RISCV_PCREL_HI20 * in purgatory relsec. Handle it like R_RISCV_CALL * sym, instead of searching the whole relsec. */ case R_RISCV_PCREL_HI20: case R_RISCV_CALL_PLT: case R_RISCV_CALL: *(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) | ENCODE_UJTYPE_IMM(val - addr); break; case R_RISCV_RVC_BRANCH: *(u32 *)loc = CLEAN_IMM(CBTYPE, *(u32 *)loc) | ENCODE_CBTYPE_IMM(val - addr); break; case R_RISCV_RVC_JUMP: *(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) | ENCODE_CJTYPE_IMM(val - addr); break; case R_RISCV_ADD32: *(u32 *)loc += val; break; case R_RISCV_SUB32: *(u32 *)loc -= val; break; /* It has been applied by R_RISCV_PCREL_HI20 sym */ case R_RISCV_PCREL_LO12_I: case R_RISCV_ALIGN: case R_RISCV_RELAX: break; default: pr_err("Unknown rela relocation: %d\n", r_type); return -ENOEXEC; } } return 0; } const struct kexec_file_ops elf_kexec_ops = { .probe = kexec_elf_probe, .load = elf_kexec_load, };
linux-master
arch/riscv/kernel/elf_kexec.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Regents of the University of California */ #include <linux/reboot.h> #include <linux/pm.h> static void default_power_off(void) { while (1) wait_for_interrupt(); } void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); void machine_restart(char *cmd) { do_kernel_restart(cmd); while (1); } void machine_halt(void) { do_kernel_power_off(); default_power_off(); } void machine_power_off(void) { do_kernel_power_off(); default_power_off(); }
linux-master
arch/riscv/kernel/reset.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020 Western Digital Corporation or its affiliates. * Adapted from arch/arm64/kernel/efi.c */ #include <linux/efi.h> #include <linux/init.h> #include <asm/efi.h> #include <asm/pgtable.h> #include <asm/pgtable-bits.h> /* * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be * executable, everything else can be mapped with the XN bits * set. Also take the new (optional) RO/XP bits into account. */ static __init pgprot_t efimem_to_pgprot_map(efi_memory_desc_t *md) { u64 attr = md->attribute; u32 type = md->type; if (type == EFI_MEMORY_MAPPED_IO) return PAGE_KERNEL; /* R-- */ if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) == (EFI_MEMORY_XP | EFI_MEMORY_RO)) return PAGE_KERNEL_READ; /* R-X */ if (attr & EFI_MEMORY_RO) return PAGE_KERNEL_READ_EXEC; /* RW- */ if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) == EFI_MEMORY_XP) || type != EFI_RUNTIME_SERVICES_CODE) return PAGE_KERNEL; /* RWX */ return PAGE_KERNEL_EXEC; } int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) { pgprot_t prot = __pgprot(pgprot_val(efimem_to_pgprot_map(md)) & ~(_PAGE_GLOBAL)); int i; /* RISC-V maps one page at a time */ for (i = 0; i < md->num_pages; i++) create_pgd_mapping(mm->pgd, md->virt_addr + i * PAGE_SIZE, md->phys_addr + i * PAGE_SIZE, PAGE_SIZE, prot); return 0; } static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) { efi_memory_desc_t *md = data; pte_t pte = READ_ONCE(*ptep); unsigned long val; if (md->attribute & EFI_MEMORY_RO) { val = pte_val(pte) & ~_PAGE_WRITE; val |= _PAGE_READ; pte = __pte(val); } if (md->attribute & EFI_MEMORY_XP) { val = pte_val(pte) & ~_PAGE_EXEC; pte = __pte(val); } set_pte(ptep, pte); return 0; } int __init efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool ignored) { BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE && md->type != EFI_RUNTIME_SERVICES_DATA); /* * Calling apply_to_page_range() is only safe on regions that are * guaranteed to be mapped down to pages. Since we are only called * for regions that have been mapped using efi_create_mapping() above * (and this is checked by the generic Memory Attributes table parsing * routines), there is no need to check that again here. */ return apply_to_page_range(mm, md->virt_addr, md->num_pages << EFI_PAGE_SHIFT, set_permissions, md); }
linux-master
arch/riscv/kernel/efi.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/crash_core.h> #include <linux/pagemap.h> void arch_crash_save_vmcoreinfo(void) { VMCOREINFO_NUMBER(VA_BITS); VMCOREINFO_NUMBER(phys_ram_base); vmcoreinfo_append_str("NUMBER(PAGE_OFFSET)=0x%lx\n", PAGE_OFFSET); vmcoreinfo_append_str("NUMBER(VMALLOC_START)=0x%lx\n", VMALLOC_START); vmcoreinfo_append_str("NUMBER(VMALLOC_END)=0x%lx\n", VMALLOC_END); vmcoreinfo_append_str("NUMBER(VMEMMAP_START)=0x%lx\n", VMEMMAP_START); vmcoreinfo_append_str("NUMBER(VMEMMAP_END)=0x%lx\n", VMEMMAP_END); #ifdef CONFIG_64BIT vmcoreinfo_append_str("NUMBER(MODULES_VADDR)=0x%lx\n", MODULES_VADDR); vmcoreinfo_append_str("NUMBER(MODULES_END)=0x%lx\n", MODULES_END); #endif vmcoreinfo_append_str("NUMBER(KERNEL_LINK_ADDR)=0x%lx\n", KERNEL_LINK_ADDR); vmcoreinfo_append_str("NUMBER(va_kernel_pa_offset)=0x%lx\n", kernel_map.va_kernel_pa_offset); }
linux-master
arch/riscv/kernel/crash_core.c
// SPDX-License-Identifier: GPL-2.0-only /* * HSM extension and cpu_ops implementation. * * Copyright (c) 2020 Western Digital Corporation or its affiliates. */ #include <linux/init.h> #include <linux/mm.h> #include <linux/sched/task_stack.h> #include <asm/cpu_ops.h> #include <asm/cpu_ops_sbi.h> #include <asm/sbi.h> #include <asm/smp.h> extern char secondary_start_sbi[]; const struct cpu_operations cpu_ops_sbi; /* * Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can * be invoked from multiple threads in parallel. Define a per cpu data * to handle that. */ static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data); static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr, unsigned long priv) { struct sbiret ret; ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_START, hartid, saddr, priv, 0, 0, 0); if (ret.error) return sbi_err_map_linux_errno(ret.error); else return 0; } #ifdef CONFIG_HOTPLUG_CPU static int sbi_hsm_hart_stop(void) { struct sbiret ret; ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STOP, 0, 0, 0, 0, 0, 0); if (ret.error) return sbi_err_map_linux_errno(ret.error); else return 0; } static int sbi_hsm_hart_get_status(unsigned long hartid) { struct sbiret ret; ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STATUS, hartid, 0, 0, 0, 0, 0); if (ret.error) return sbi_err_map_linux_errno(ret.error); else return ret.value; } #endif static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle) { unsigned long boot_addr = __pa_symbol(secondary_start_sbi); unsigned long hartid = cpuid_to_hartid_map(cpuid); unsigned long hsm_data; struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid); /* Make sure tidle is updated */ smp_mb(); bdata->task_ptr = tidle; bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE; /* Make sure boot data is updated */ smp_mb(); hsm_data = __pa(bdata); return sbi_hsm_hart_start(hartid, boot_addr, hsm_data); } static int sbi_cpu_prepare(unsigned int cpuid) { if (!cpu_ops_sbi.cpu_start) { pr_err("cpu start method not defined for CPU [%d]\n", cpuid); return -ENODEV; } return 0; } #ifdef CONFIG_HOTPLUG_CPU static int sbi_cpu_disable(unsigned int cpuid) { if (!cpu_ops_sbi.cpu_stop) return -EOPNOTSUPP; return 0; } static void sbi_cpu_stop(void) { int ret; ret = sbi_hsm_hart_stop(); pr_crit("Unable to stop the cpu %u (%d)\n", smp_processor_id(), ret); } static int sbi_cpu_is_stopped(unsigned int cpuid) { int rc; unsigned long hartid = cpuid_to_hartid_map(cpuid); rc = sbi_hsm_hart_get_status(hartid); if (rc == SBI_HSM_STATE_STOPPED) return 0; return rc; } #endif const struct cpu_operations cpu_ops_sbi = { .name = "sbi", .cpu_prepare = sbi_cpu_prepare, .cpu_start = sbi_cpu_start, #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = sbi_cpu_disable, .cpu_stop = sbi_cpu_stop, .cpu_is_stopped = sbi_cpu_is_stopped, #endif };
linux-master
arch/riscv/kernel/cpu_ops_sbi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Chen Liqin <[email protected]> * Lennox Wu <[email protected]> * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2020 FORTH-ICS/CARV * Nick Kossifidis <[email protected]> */ #include <linux/acpi.h> #include <linux/cpu.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/memblock.h> #include <linux/sched.h> #include <linux/console.h> #include <linux/screen_info.h> #include <linux/of_fdt.h> #include <linux/sched/task.h> #include <linux/smp.h> #include <linux/efi.h> #include <linux/crash_dump.h> #include <linux/panic_notifier.h> #include <asm/acpi.h> #include <asm/alternative.h> #include <asm/cacheflush.h> #include <asm/cpu_ops.h> #include <asm/early_ioremap.h> #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/set_memory.h> #include <asm/sections.h> #include <asm/sbi.h> #include <asm/tlbflush.h> #include <asm/thread_info.h> #include <asm/kasan.h> #include <asm/efi.h> #include "head.h" #if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI) struct screen_info screen_info __section(".data") = { .orig_video_lines = 30, .orig_video_cols = 80, .orig_video_mode = 0, .orig_video_ega_bx = 0, .orig_video_isVGA = 1, .orig_video_points = 8 }; #endif /* * The lucky hart to first increment this variable will boot the other cores. * This is used before the kernel initializes the BSS so it can't be in the * BSS. */ atomic_t hart_lottery __section(".sdata") #ifdef CONFIG_XIP_KERNEL = ATOMIC_INIT(0xC001BEEF) #endif ; unsigned long boot_cpu_hartid; static DEFINE_PER_CPU(struct cpu, cpu_devices); /* * Place kernel memory regions on the resource tree so that * kexec-tools can retrieve them from /proc/iomem. While there * also add "System RAM" regions for compatibility with other * archs, and the rest of the known regions for completeness. */ static struct resource kimage_res = { .name = "Kernel image", }; static struct resource code_res = { .name = "Kernel code", }; static struct resource data_res = { .name = "Kernel data", }; static struct resource rodata_res = { .name = "Kernel rodata", }; static struct resource bss_res = { .name = "Kernel bss", }; #ifdef CONFIG_CRASH_DUMP static struct resource elfcorehdr_res = { .name = "ELF Core hdr", }; #endif static int __init add_resource(struct resource *parent, struct resource *res) { int ret = 0; ret = insert_resource(parent, res); if (ret < 0) { pr_err("Failed to add a %s resource at %llx\n", res->name, (unsigned long long) res->start); return ret; } return 1; } static int __init add_kernel_resources(void) { int ret = 0; /* * The memory region of the kernel image is continuous and * was reserved on setup_bootmem, register it here as a * resource, with the various segments of the image as * child nodes. */ code_res.start = __pa_symbol(_text); code_res.end = __pa_symbol(_etext) - 1; code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; rodata_res.start = __pa_symbol(__start_rodata); rodata_res.end = __pa_symbol(__end_rodata) - 1; rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; data_res.start = __pa_symbol(_data); data_res.end = __pa_symbol(_edata) - 1; data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; bss_res.start = __pa_symbol(__bss_start); bss_res.end = __pa_symbol(__bss_stop) - 1; bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; kimage_res.start = code_res.start; kimage_res.end = bss_res.end; kimage_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; ret = add_resource(&iomem_resource, &kimage_res); if (ret < 0) return ret; ret = add_resource(&kimage_res, &code_res); if (ret < 0) return ret; ret = add_resource(&kimage_res, &rodata_res); if (ret < 0) return ret; ret = add_resource(&kimage_res, &data_res); if (ret < 0) return ret; ret = add_resource(&kimage_res, &bss_res); return ret; } static void __init init_resources(void) { struct memblock_region *region = NULL; struct resource *res = NULL; struct resource *mem_res = NULL; size_t mem_res_sz = 0; int num_resources = 0, res_idx = 0; int ret = 0; /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */ num_resources = memblock.memory.cnt + memblock.reserved.cnt + 1; res_idx = num_resources - 1; mem_res_sz = num_resources * sizeof(*mem_res); mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES); if (!mem_res) panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz); /* * Start by adding the reserved regions, if they overlap * with /memory regions, insert_resource later on will take * care of it. */ ret = add_kernel_resources(); if (ret < 0) goto error; #ifdef CONFIG_KEXEC_CORE if (crashk_res.start != crashk_res.end) { ret = add_resource(&iomem_resource, &crashk_res); if (ret < 0) goto error; } if (crashk_low_res.start != crashk_low_res.end) { ret = add_resource(&iomem_resource, &crashk_low_res); if (ret < 0) goto error; } #endif #ifdef CONFIG_CRASH_DUMP if (elfcorehdr_size > 0) { elfcorehdr_res.start = elfcorehdr_addr; elfcorehdr_res.end = elfcorehdr_addr + elfcorehdr_size - 1; elfcorehdr_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; add_resource(&iomem_resource, &elfcorehdr_res); } #endif for_each_reserved_mem_region(region) { res = &mem_res[res_idx--]; res->name = "Reserved"; res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE; res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1; /* * Ignore any other reserved regions within * system memory. */ if (memblock_is_memory(res->start)) { /* Re-use this pre-allocated resource */ res_idx++; continue; } ret = add_resource(&iomem_resource, res); if (ret < 0) goto error; } /* Add /memory regions to the resource tree */ for_each_mem_region(region) { res = &mem_res[res_idx--]; if (unlikely(memblock_is_nomap(region))) { res->name = "Reserved"; res->flags = IORESOURCE_MEM | IORESOURCE_EXCLUSIVE; } else { res->name = "System RAM"; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; } res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; ret = add_resource(&iomem_resource, res); if (ret < 0) goto error; } /* Clean-up any unused pre-allocated resources */ if (res_idx >= 0) memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res)); return; error: /* Better an empty resource tree than an inconsistent one */ release_child_resources(&iomem_resource); memblock_free(mem_res, mem_res_sz); } static void __init parse_dtb(void) { /* Early scan of device tree from init memory */ if (early_init_dt_scan(dtb_early_va)) { const char *name = of_flat_dt_get_machine_name(); if (name) { pr_info("Machine model: %s\n", name); dump_stack_set_arch_desc("%s (DT)", name); } } else { pr_err("No DTB passed to the kernel\n"); } #ifdef CONFIG_CMDLINE_FORCE strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); pr_info("Forcing kernel command line to: %s\n", boot_command_line); #endif } extern void __init init_rt_signal_env(void); void __init setup_arch(char **cmdline_p) { parse_dtb(); setup_initial_init_mm(_stext, _etext, _edata, _end); *cmdline_p = boot_command_line; early_ioremap_setup(); sbi_init(); jump_label_init(); parse_early_param(); efi_init(); paging_init(); /* Parse the ACPI tables for possible boot-time configuration */ acpi_boot_table_init(); #if IS_ENABLED(CONFIG_BUILTIN_DTB) unflatten_and_copy_device_tree(); #else unflatten_device_tree(); #endif misc_mem_init(); init_resources(); #ifdef CONFIG_KASAN kasan_init(); #endif #ifdef CONFIG_SMP setup_smp(); #endif if (!acpi_disabled) acpi_init_rintc_map(); riscv_init_cbo_blocksizes(); riscv_fill_hwcap(); init_rt_signal_env(); apply_boot_alternatives(); if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) && riscv_isa_extension_available(NULL, ZICBOM)) riscv_noncoherent_supported(); riscv_set_dma_cache_alignment(); } static int __init topology_init(void) { int i, ret; for_each_possible_cpu(i) { struct cpu *cpu = &per_cpu(cpu_devices, i); cpu->hotpluggable = cpu_has_hotplug(i); ret = register_cpu(cpu, i); if (unlikely(ret)) pr_warn("Warning: %s: register_cpu %d failed (%d)\n", __func__, i, ret); } return 0; } subsys_initcall(topology_init); void free_initmem(void) { if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) { set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end), set_memory_rw_nx); if (IS_ENABLED(CONFIG_64BIT)) set_kernel_memory(__init_begin, __init_end, set_memory_nx); } free_initmem_default(POISON_FREE_INITMEM); } static int dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) { pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n", kernel_map.virt_offset, KERNEL_LINK_ADDR); return 0; } static struct notifier_block kernel_offset_notifier = { .notifier_call = dump_kernel_offset }; static int __init register_kernel_offset_dumper(void) { if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) atomic_notifier_chain_register(&panic_notifier_list, &kernel_offset_notifier); return 0; } device_initcall(register_kernel_offset_dumper);
linux-master
arch/riscv/kernel/setup.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 SiFive */ #include <linux/cpu.h> #include <linux/of.h> #include <asm/cacheinfo.h> static struct riscv_cacheinfo_ops *rv_cache_ops; void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops) { rv_cache_ops = ops; } EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops); const struct attribute_group * cache_get_priv_group(struct cacheinfo *this_leaf) { if (rv_cache_ops && rv_cache_ops->get_priv_group) return rv_cache_ops->get_priv_group(this_leaf); return NULL; } static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type) { /* * Using raw_smp_processor_id() elides a preemptability check, but this * is really indicative of a larger problem: the cacheinfo UABI assumes * that cores have a homonogenous view of the cache hierarchy. That * happens to be the case for the current set of RISC-V systems, but * likely won't be true in general. Since there's no way to provide * correct information for these systems via the current UABI we're * just eliding the check for now. */ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id()); struct cacheinfo *this_leaf; int index; for (index = 0; index < this_cpu_ci->num_leaves; index++) { this_leaf = this_cpu_ci->info_list + index; if (this_leaf->level == level && this_leaf->type == type) return this_leaf; } return NULL; } uintptr_t get_cache_size(u32 level, enum cache_type type) { struct cacheinfo *this_leaf = get_cacheinfo(level, type); return this_leaf ? this_leaf->size : 0; } uintptr_t get_cache_geometry(u32 level, enum cache_type type) { struct cacheinfo *this_leaf = get_cacheinfo(level, type); return this_leaf ? (this_leaf->ways_of_associativity << 16 | this_leaf->coherency_line_size) : 0; } static void ci_leaf_init(struct cacheinfo *this_leaf, struct device_node *node, enum cache_type type, unsigned int level) { this_leaf->level = level; this_leaf->type = type; } int populate_cache_leaves(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf = this_cpu_ci->info_list; struct device_node *np = of_cpu_device_node_get(cpu); struct device_node *prev = NULL; int levels = 1, level = 1; if (of_property_read_bool(np, "cache-size")) ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level); if (of_property_read_bool(np, "i-cache-size")) ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level); if (of_property_read_bool(np, "d-cache-size")) ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level); prev = np; while ((np = of_find_next_cache_node(np))) { of_node_put(prev); prev = np; if (!of_device_is_compatible(np, "cache")) break; if (of_property_read_u32(np, "cache-level", &level)) break; if (level <= levels) break; if (of_property_read_bool(np, "cache-size")) ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level); if (of_property_read_bool(np, "i-cache-size")) ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level); if (of_property_read_bool(np, "d-cache-size")) ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level); levels = level; } of_node_put(np); return 0; }
linux-master
arch/riscv/kernel/cacheinfo.c
// SPDX-License-Identifier: GPL-2.0-only /* * Hibernation support for RISCV * * Copyright (C) 2023 StarFive Technology Co., Ltd. * * Author: Jee Heng Sia <[email protected]> */ #include <asm/barrier.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/sections.h> #include <asm/set_memory.h> #include <asm/smp.h> #include <asm/suspend.h> #include <linux/cpu.h> #include <linux/memblock.h> #include <linux/pm.h> #include <linux/sched.h> #include <linux/suspend.h> #include <linux/utsname.h> /* The logical cpu number we should resume on, initialised to a non-cpu number. */ static int sleep_cpu = -EINVAL; /* Pointer to the temporary resume page table. */ static pgd_t *resume_pg_dir; /* CPU context to be saved. */ struct suspend_context *hibernate_cpu_context; EXPORT_SYMBOL_GPL(hibernate_cpu_context); unsigned long relocated_restore_code; EXPORT_SYMBOL_GPL(relocated_restore_code); /** * struct arch_hibernate_hdr_invariants - container to store kernel build version. * @uts_version: to save the build number and date so that we do not resume with * a different kernel. */ struct arch_hibernate_hdr_invariants { char uts_version[__NEW_UTS_LEN + 1]; }; /** * struct arch_hibernate_hdr - helper parameters that help us to restore the image. * @invariants: container to store kernel build version. * @hartid: to make sure same boot_cpu executes the hibernate/restore code. * @saved_satp: original page table used by the hibernated image. * @restore_cpu_addr: the kernel's image address to restore the CPU context. */ static struct arch_hibernate_hdr { struct arch_hibernate_hdr_invariants invariants; unsigned long hartid; unsigned long saved_satp; unsigned long restore_cpu_addr; } resume_hdr; static void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i) { memset(i, 0, sizeof(*i)); memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version)); } /* * Check if the given pfn is in the 'nosave' section. */ int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin); unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1); return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)); } void notrace save_processor_state(void) { } void notrace restore_processor_state(void) { } /* * Helper parameters need to be saved to the hibernation image header. */ int arch_hibernation_header_save(void *addr, unsigned int max_size) { struct arch_hibernate_hdr *hdr = addr; if (max_size < sizeof(*hdr)) return -EOVERFLOW; arch_hdr_invariants(&hdr->invariants); hdr->hartid = cpuid_to_hartid_map(sleep_cpu); hdr->saved_satp = csr_read(CSR_SATP); hdr->restore_cpu_addr = (unsigned long)__hibernate_cpu_resume; return 0; } EXPORT_SYMBOL_GPL(arch_hibernation_header_save); /* * Retrieve the helper parameters from the hibernation image header. */ int arch_hibernation_header_restore(void *addr) { struct arch_hibernate_hdr_invariants invariants; struct arch_hibernate_hdr *hdr = addr; int ret = 0; arch_hdr_invariants(&invariants); if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) { pr_crit("Hibernate image not generated by this kernel!\n"); return -EINVAL; } sleep_cpu = riscv_hartid_to_cpuid(hdr->hartid); if (sleep_cpu < 0) { pr_crit("Hibernated on a CPU not known to this kernel!\n"); sleep_cpu = -EINVAL; return -EINVAL; } #ifdef CONFIG_SMP ret = bringup_hibernate_cpu(sleep_cpu); if (ret) { sleep_cpu = -EINVAL; return ret; } #endif resume_hdr = *hdr; return ret; } EXPORT_SYMBOL_GPL(arch_hibernation_header_restore); int swsusp_arch_suspend(void) { int ret = 0; if (__cpu_suspend_enter(hibernate_cpu_context)) { sleep_cpu = smp_processor_id(); suspend_save_csrs(hibernate_cpu_context); ret = swsusp_save(); } else { suspend_restore_csrs(hibernate_cpu_context); flush_tlb_all(); flush_icache_all(); /* * Tell the hibernation core that we've just restored the memory. */ in_suspend = 0; sleep_cpu = -EINVAL; } return ret; } static int temp_pgtable_map_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start, unsigned long end, pgprot_t prot) { pte_t *src_ptep; pte_t *dst_ptep; if (pmd_none(READ_ONCE(*dst_pmdp))) { dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC); if (!dst_ptep) return -ENOMEM; pmd_populate_kernel(NULL, dst_pmdp, dst_ptep); } dst_ptep = pte_offset_kernel(dst_pmdp, start); src_ptep = pte_offset_kernel(src_pmdp, start); do { pte_t pte = READ_ONCE(*src_ptep); if (pte_present(pte)) set_pte(dst_ptep, __pte(pte_val(pte) | pgprot_val(prot))); } while (dst_ptep++, src_ptep++, start += PAGE_SIZE, start < end); return 0; } static int temp_pgtable_map_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start, unsigned long end, pgprot_t prot) { unsigned long next; unsigned long ret; pmd_t *src_pmdp; pmd_t *dst_pmdp; if (pud_none(READ_ONCE(*dst_pudp))) { dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC); if (!dst_pmdp) return -ENOMEM; pud_populate(NULL, dst_pudp, dst_pmdp); } dst_pmdp = pmd_offset(dst_pudp, start); src_pmdp = pmd_offset(src_pudp, start); do { pmd_t pmd = READ_ONCE(*src_pmdp); next = pmd_addr_end(start, end); if (pmd_none(pmd)) continue; if (pmd_leaf(pmd)) { set_pmd(dst_pmdp, __pmd(pmd_val(pmd) | pgprot_val(prot))); } else { ret = temp_pgtable_map_pte(dst_pmdp, src_pmdp, start, next, prot); if (ret) return -ENOMEM; } } while (dst_pmdp++, src_pmdp++, start = next, start != end); return 0; } static int temp_pgtable_map_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start, unsigned long end, pgprot_t prot) { unsigned long next; unsigned long ret; pud_t *dst_pudp; pud_t *src_pudp; if (p4d_none(READ_ONCE(*dst_p4dp))) { dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC); if (!dst_pudp) return -ENOMEM; p4d_populate(NULL, dst_p4dp, dst_pudp); } dst_pudp = pud_offset(dst_p4dp, start); src_pudp = pud_offset(src_p4dp, start); do { pud_t pud = READ_ONCE(*src_pudp); next = pud_addr_end(start, end); if (pud_none(pud)) continue; if (pud_leaf(pud)) { set_pud(dst_pudp, __pud(pud_val(pud) | pgprot_val(prot))); } else { ret = temp_pgtable_map_pmd(dst_pudp, src_pudp, start, next, prot); if (ret) return -ENOMEM; } } while (dst_pudp++, src_pudp++, start = next, start != end); return 0; } static int temp_pgtable_map_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start, unsigned long end, pgprot_t prot) { unsigned long next; unsigned long ret; p4d_t *dst_p4dp; p4d_t *src_p4dp; if (pgd_none(READ_ONCE(*dst_pgdp))) { dst_p4dp = (p4d_t *)get_safe_page(GFP_ATOMIC); if (!dst_p4dp) return -ENOMEM; pgd_populate(NULL, dst_pgdp, dst_p4dp); } dst_p4dp = p4d_offset(dst_pgdp, start); src_p4dp = p4d_offset(src_pgdp, start); do { p4d_t p4d = READ_ONCE(*src_p4dp); next = p4d_addr_end(start, end); if (p4d_none(p4d)) continue; if (p4d_leaf(p4d)) { set_p4d(dst_p4dp, __p4d(p4d_val(p4d) | pgprot_val(prot))); } else { ret = temp_pgtable_map_pud(dst_p4dp, src_p4dp, start, next, prot); if (ret) return -ENOMEM; } } while (dst_p4dp++, src_p4dp++, start = next, start != end); return 0; } static int temp_pgtable_mapping(pgd_t *pgdp, unsigned long start, unsigned long end, pgprot_t prot) { pgd_t *dst_pgdp = pgd_offset_pgd(pgdp, start); pgd_t *src_pgdp = pgd_offset_k(start); unsigned long next; unsigned long ret; do { pgd_t pgd = READ_ONCE(*src_pgdp); next = pgd_addr_end(start, end); if (pgd_none(pgd)) continue; if (pgd_leaf(pgd)) { set_pgd(dst_pgdp, __pgd(pgd_val(pgd) | pgprot_val(prot))); } else { ret = temp_pgtable_map_p4d(dst_pgdp, src_pgdp, start, next, prot); if (ret) return -ENOMEM; } } while (dst_pgdp++, src_pgdp++, start = next, start != end); return 0; } static unsigned long relocate_restore_code(void) { void *page = (void *)get_safe_page(GFP_ATOMIC); if (!page) return -ENOMEM; copy_page(page, hibernate_core_restore_code); /* Make the page containing the relocated code executable. */ set_memory_x((unsigned long)page, 1); return (unsigned long)page; } int swsusp_arch_resume(void) { unsigned long end = (unsigned long)pfn_to_virt(max_low_pfn); unsigned long start = PAGE_OFFSET; int ret; /* * Memory allocated by get_safe_page() will be dealt with by the hibernation core, * we don't need to free it here. */ resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); if (!resume_pg_dir) return -ENOMEM; /* * Create a temporary page table and map the whole linear region as executable and * writable. */ ret = temp_pgtable_mapping(resume_pg_dir, start, end, __pgprot(_PAGE_WRITE | _PAGE_EXEC)); if (ret) return ret; /* Move the restore code to a new page so that it doesn't get overwritten by itself. */ relocated_restore_code = relocate_restore_code(); if (relocated_restore_code == -ENOMEM) return -ENOMEM; /* * Map the __hibernate_cpu_resume() address to the temporary page table so that the * restore code can jumps to it after finished restore the image. The next execution * code doesn't find itself in a different address space after switching over to the * original page table used by the hibernated image. * The __hibernate_cpu_resume() mapping is unnecessary for RV32 since the kernel and * linear addresses are identical, but different for RV64. To ensure consistency, we * map it for both RV32 and RV64 kernels. * Additionally, we should ensure that the page is writable before restoring the image. */ start = (unsigned long)resume_hdr.restore_cpu_addr; end = start + PAGE_SIZE; ret = temp_pgtable_mapping(resume_pg_dir, start, end, __pgprot(_PAGE_WRITE)); if (ret) return ret; hibernate_restore_image(resume_hdr.saved_satp, (PFN_DOWN(__pa(resume_pg_dir)) | satp_mode), resume_hdr.restore_cpu_addr); return 0; } #ifdef CONFIG_PM_SLEEP_SMP int hibernate_resume_nonboot_cpu_disable(void) { if (sleep_cpu < 0) { pr_err("Failing to resume from hibernate on an unknown CPU\n"); return -ENODEV; } return freeze_secondary_cpus(sleep_cpu); } #endif static int __init riscv_hibernate_init(void) { hibernate_cpu_context = kzalloc(sizeof(*hibernate_cpu_context), GFP_KERNEL); if (WARN_ON(!hibernate_cpu_context)) return -ENOMEM; return 0; } early_initcall(riscv_hibernate_init);
linux-master
arch/riscv/kernel/hibernate.c
// SPDX-License-Identifier: GPL-2.0-only /* * Multiplex several IPIs over a single HW IPI. * * Copyright (c) 2022 Ventana Micro Systems Inc. */ #define pr_fmt(fmt) "riscv: " fmt #include <linux/cpu.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <asm/sbi.h> static int sbi_ipi_virq; static void sbi_ipi_handle(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); chained_irq_enter(chip, desc); csr_clear(CSR_IP, IE_SIE); ipi_mux_process(); chained_irq_exit(chip, desc); } static int sbi_ipi_starting_cpu(unsigned int cpu) { enable_percpu_irq(sbi_ipi_virq, irq_get_trigger_type(sbi_ipi_virq)); return 0; } void __init sbi_ipi_init(void) { int virq; struct irq_domain *domain; if (riscv_ipi_have_virq_range()) return; domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); if (!domain) { pr_err("unable to find INTC IRQ domain\n"); return; } sbi_ipi_virq = irq_create_mapping(domain, RV_IRQ_SOFT); if (!sbi_ipi_virq) { pr_err("unable to create INTC IRQ mapping\n"); return; } virq = ipi_mux_create(BITS_PER_BYTE, sbi_send_ipi); if (virq <= 0) { pr_err("unable to create muxed IPIs\n"); irq_dispose_mapping(sbi_ipi_virq); return; } irq_set_chained_handler(sbi_ipi_virq, sbi_ipi_handle); /* * Don't disable IPI when CPU goes offline because * the masking/unmasking of virtual IPIs is done * via generic IPI-Mux */ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "irqchip/sbi-ipi:starting", sbi_ipi_starting_cpu, NULL); riscv_ipi_set_virq_range(virq, BITS_PER_BYTE, false); pr_info("providing IPIs using SBI IPI extension\n"); }
linux-master
arch/riscv/kernel/sbi-ipi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2020 Western Digital Corporation or its affiliates. */ #include <linux/errno.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/string.h> #include <linux/sched.h> #include <asm/cpu_ops.h> #include <asm/cpu_ops_sbi.h> #include <asm/sbi.h> #include <asm/smp.h> const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; extern const struct cpu_operations cpu_ops_sbi; #ifndef CONFIG_RISCV_BOOT_SPINWAIT const struct cpu_operations cpu_ops_spinwait = { .name = "", .cpu_prepare = NULL, .cpu_start = NULL, }; #endif void __init cpu_set_ops(int cpuid) { #if IS_ENABLED(CONFIG_RISCV_SBI) if (sbi_probe_extension(SBI_EXT_HSM)) { if (!cpuid) pr_info("SBI HSM extension detected\n"); cpu_ops[cpuid] = &cpu_ops_sbi; } else #endif cpu_ops[cpuid] = &cpu_ops_spinwait; }
linux-master
arch/riscv/kernel/cpu_ops.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2020 Western Digital Corporation or its affiliates. */ #include <linux/errno.h> #include <linux/of.h> #include <linux/string.h> #include <linux/sched/task_stack.h> #include <asm/cpu_ops.h> #include <asm/sbi.h> #include <asm/smp.h> #include "head.h" const struct cpu_operations cpu_ops_spinwait; void *__cpu_spinwait_stack_pointer[NR_CPUS] __section(".data"); void *__cpu_spinwait_task_pointer[NR_CPUS] __section(".data"); static void cpu_update_secondary_bootdata(unsigned int cpuid, struct task_struct *tidle) { unsigned long hartid = cpuid_to_hartid_map(cpuid); /* * The hartid must be less than NR_CPUS to avoid out-of-bound access * errors for __cpu_spinwait_stack/task_pointer. That is not always possible * for platforms with discontiguous hartid numbering scheme. That's why * spinwait booting is not the recommended approach for any platforms * booting Linux in S-mode and can be disabled in the future. */ if (hartid == INVALID_HARTID || hartid >= (unsigned long) NR_CPUS) return; /* Make sure tidle is updated */ smp_mb(); WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], task_stack_page(tidle) + THREAD_SIZE); WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle); } static int spinwait_cpu_prepare(unsigned int cpuid) { if (!cpu_ops_spinwait.cpu_start) { pr_err("cpu start method not defined for CPU [%d]\n", cpuid); return -ENODEV; } return 0; } static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle) { /* * In this protocol, all cpus boot on their own accord. _start * selects the first cpu to boot the kernel and causes the remainder * of the cpus to spin in a loop waiting for their stack pointer to be * setup by that main cpu. Writing to bootdata * (i.e __cpu_spinwait_stack_pointer) signals to the spinning cpus that they * can continue the boot process. */ cpu_update_secondary_bootdata(cpuid, tidle); return 0; } const struct cpu_operations cpu_ops_spinwait = { .name = "spinwait", .cpu_prepare = spinwait_cpu_prepare, .cpu_start = spinwait_cpu_start, };
linux-master
arch/riscv/kernel/cpu_ops_spinwait.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2017 SiFive */ #include <linux/acpi.h> #include <linux/of_clk.h> #include <linux/clockchips.h> #include <linux/clocksource.h> #include <linux/delay.h> #include <asm/sbi.h> #include <asm/processor.h> #include <asm/timex.h> unsigned long riscv_timebase __ro_after_init; EXPORT_SYMBOL_GPL(riscv_timebase); void __init time_init(void) { struct device_node *cpu; struct acpi_table_rhct *rhct; acpi_status status; u32 prop; if (acpi_disabled) { cpu = of_find_node_by_path("/cpus"); if (!cpu || of_property_read_u32(cpu, "timebase-frequency", &prop)) panic("RISC-V system with no 'timebase-frequency' in DTS\n"); of_node_put(cpu); riscv_timebase = prop; of_clk_init(NULL); } else { status = acpi_get_table(ACPI_SIG_RHCT, 0, (struct acpi_table_header **)&rhct); if (ACPI_FAILURE(status)) panic("RISC-V ACPI system with no RHCT table\n"); riscv_timebase = rhct->time_base_freq; acpi_put_table((struct acpi_table_header *)rhct); } lpj_fine = riscv_timebase / HZ; timer_probe(); tick_setup_hrtimer_broadcast(); }
linux-master
arch/riscv/kernel/time.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 FORTH-ICS/CARV * Nick Kossifidis <[email protected]> */ #include <linux/kexec.h> #include <asm/kexec.h> /* For riscv_kexec_* symbol defines */ #include <linux/smp.h> /* For smp_send_stop () */ #include <asm/cacheflush.h> /* For local_flush_icache_all() */ #include <asm/barrier.h> /* For smp_wmb() */ #include <asm/page.h> /* For PAGE_MASK */ #include <linux/libfdt.h> /* For fdt_check_header() */ #include <asm/set_memory.h> /* For set_memory_x() */ #include <linux/compiler.h> /* For unreachable() */ #include <linux/cpu.h> /* For cpu_down() */ #include <linux/reboot.h> #include <linux/interrupt.h> #include <linux/irq.h> /* * kexec_image_info - Print received image details */ static void kexec_image_info(const struct kimage *image) { unsigned long i; pr_debug("Kexec image info:\n"); pr_debug("\ttype: %d\n", image->type); pr_debug("\tstart: %lx\n", image->start); pr_debug("\thead: %lx\n", image->head); pr_debug("\tnr_segments: %lu\n", image->nr_segments); for (i = 0; i < image->nr_segments; i++) { pr_debug("\t segment[%lu]: %016lx - %016lx", i, image->segment[i].mem, image->segment[i].mem + image->segment[i].memsz); pr_debug("\t\t0x%lx bytes, %lu pages\n", (unsigned long) image->segment[i].memsz, (unsigned long) image->segment[i].memsz / PAGE_SIZE); } } /* * machine_kexec_prepare - Initialize kexec * * This function is called from do_kexec_load, when the user has * provided us with an image to be loaded. Its goal is to validate * the image and prepare the control code buffer as needed. * Note that kimage_alloc_init has already been called and the * control buffer has already been allocated. */ int machine_kexec_prepare(struct kimage *image) { struct kimage_arch *internal = &image->arch; struct fdt_header fdt = {0}; void *control_code_buffer = NULL; unsigned int control_code_buffer_sz = 0; int i = 0; kexec_image_info(image); /* Find the Flattened Device Tree and save its physical address */ for (i = 0; i < image->nr_segments; i++) { if (image->segment[i].memsz <= sizeof(fdt)) continue; if (image->file_mode) memcpy(&fdt, image->segment[i].buf, sizeof(fdt)); else if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt))) continue; if (fdt_check_header(&fdt)) continue; internal->fdt_addr = (unsigned long) image->segment[i].mem; break; } if (!internal->fdt_addr) { pr_err("Device tree not included in the provided image\n"); return -EINVAL; } /* Copy the assembler code for relocation to the control page */ if (image->type != KEXEC_TYPE_CRASH) { control_code_buffer = page_address(image->control_code_page); control_code_buffer_sz = page_size(image->control_code_page); if (unlikely(riscv_kexec_relocate_size > control_code_buffer_sz)) { pr_err("Relocation code doesn't fit within a control page\n"); return -EINVAL; } memcpy(control_code_buffer, riscv_kexec_relocate, riscv_kexec_relocate_size); /* Mark the control page executable */ set_memory_x((unsigned long) control_code_buffer, 1); } return 0; } /* * machine_kexec_cleanup - Cleanup any leftovers from * machine_kexec_prepare * * This function is called by kimage_free to handle any arch-specific * allocations done on machine_kexec_prepare. Since we didn't do any * allocations there, this is just an empty function. Note that the * control buffer is freed by kimage_free. */ void machine_kexec_cleanup(struct kimage *image) { } /* * machine_shutdown - Prepare for a kexec reboot * * This function is called by kernel_kexec just before machine_kexec * below. Its goal is to prepare the rest of the system (the other * harts and possibly devices etc) for a kexec reboot. */ void machine_shutdown(void) { /* * No more interrupts on this hart * until we are back up. */ local_irq_disable(); #if defined(CONFIG_HOTPLUG_CPU) smp_shutdown_nonboot_cpus(smp_processor_id()); #endif } static void machine_kexec_mask_interrupts(void) { unsigned int i; struct irq_desc *desc; for_each_irq_desc(i, desc) { struct irq_chip *chip; int ret; chip = irq_desc_get_chip(desc); if (!chip) continue; /* * First try to remove the active state. If this * fails, try to EOI the interrupt. */ ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false); if (ret && irqd_irq_inprogress(&desc->irq_data) && chip->irq_eoi) chip->irq_eoi(&desc->irq_data); if (chip->irq_mask) chip->irq_mask(&desc->irq_data); if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) chip->irq_disable(&desc->irq_data); } } /* * machine_crash_shutdown - Prepare to kexec after a kernel crash * * This function is called by crash_kexec just before machine_kexec * and its goal is to shutdown non-crashing cpus and save registers. */ void machine_crash_shutdown(struct pt_regs *regs) { local_irq_disable(); /* shutdown non-crashing cpus */ crash_smp_send_stop(); crash_save_cpu(regs, smp_processor_id()); machine_kexec_mask_interrupts(); pr_info("Starting crashdump kernel...\n"); } /* * machine_kexec - Jump to the loaded kimage * * This function is called by kernel_kexec which is called by the * reboot system call when the reboot cmd is LINUX_REBOOT_CMD_KEXEC, * or by crash_kernel which is called by the kernel's arch-specific * trap handler in case of a kernel panic. It's the final stage of * the kexec process where the pre-loaded kimage is ready to be * executed. We assume at this point that all other harts are * suspended and this hart will be the new boot hart. */ void __noreturn machine_kexec(struct kimage *image) { struct kimage_arch *internal = &image->arch; unsigned long jump_addr = (unsigned long) image->start; unsigned long first_ind_entry = (unsigned long) &image->head; unsigned long this_cpu_id = __smp_processor_id(); unsigned long this_hart_id = cpuid_to_hartid_map(this_cpu_id); unsigned long fdt_addr = internal->fdt_addr; void *control_code_buffer = page_address(image->control_code_page); riscv_kexec_method kexec_method = NULL; #ifdef CONFIG_SMP WARN(smp_crash_stop_failed(), "Some CPUs may be stale, kdump will be unreliable.\n"); #endif if (image->type != KEXEC_TYPE_CRASH) kexec_method = control_code_buffer; else kexec_method = (riscv_kexec_method) &riscv_kexec_norelocate; pr_notice("Will call new kernel at %08lx from hart id %lx\n", jump_addr, this_hart_id); pr_notice("FDT image at %08lx\n", fdt_addr); /* Make sure the relocation code is visible to the hart */ local_flush_icache_all(); /* Jump to the relocation code */ pr_notice("Bye...\n"); kexec_method(first_ind_entry, jump_addr, fdt_addr, this_hart_id, kernel_map.va_pa_offset); unreachable(); }
linux-master
arch/riscv/kernel/machine_kexec.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2009 Arnd Bergmann <[email protected]> * Copyright (C) 2012 Regents of the University of California */ #include <linux/linkage.h> #include <linux/syscalls.h> #include <asm-generic/syscalls.h> #include <asm/syscall.h> #undef __SYSCALL #define __SYSCALL(nr, call) asmlinkage long __riscv_##call(const struct pt_regs *); #include <asm/unistd.h> #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = __riscv_##call, void * const sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls - 1] = __riscv_sys_ni_syscall, #include <asm/unistd.h> };
linux-master
arch/riscv/kernel/syscall_table.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020 Emil Renner Berthing * * Based on arch/arm64/kernel/jump_label.c */ #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/memory.h> #include <linux/mutex.h> #include <asm/bug.h> #include <asm/patch.h> #define RISCV_INSN_NOP 0x00000013U #define RISCV_INSN_JAL 0x0000006fU void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { void *addr = (void *)jump_entry_code(entry); u32 insn; if (type == JUMP_LABEL_JMP) { long offset = jump_entry_target(entry) - jump_entry_code(entry); if (WARN_ON(offset & 1 || offset < -524288 || offset >= 524288)) return; insn = RISCV_INSN_JAL | (((u32)offset & GENMASK(19, 12)) << (12 - 12)) | (((u32)offset & GENMASK(11, 11)) << (20 - 11)) | (((u32)offset & GENMASK(10, 1)) << (21 - 1)) | (((u32)offset & GENMASK(20, 20)) << (31 - 20)); } else { insn = RISCV_INSN_NOP; } mutex_lock(&text_mutex); patch_text_nosync(addr, &insn, sizeof(insn)); mutex_unlock(&text_mutex); }
linux-master
arch/riscv/kernel/jump_label.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */ #include <linux/perf_event.h> #include <linux/uaccess.h> #include <asm/stacktrace.h> /* * Get the return address for a single stackframe and return a pointer to the * next frame tail. */ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp, unsigned long reg_ra) { struct stackframe buftail; unsigned long ra = 0; unsigned long __user *user_frame_tail = (unsigned long __user *)(fp - sizeof(struct stackframe)); /* Check accessibility of one struct frame_tail beyond */ if (!access_ok(user_frame_tail, sizeof(buftail))) return 0; if (__copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail))) return 0; if (reg_ra != 0) ra = reg_ra; else ra = buftail.ra; fp = buftail.fp; if (ra != 0) perf_callchain_store(entry, ra); else return 0; return fp; } /* * This will be called when the target is in user mode * This function will only be called when we use * "PERF_SAMPLE_CALLCHAIN" in * kernel/events/core.c:perf_prepare_sample() * * How to trigger perf_callchain_[user/kernel] : * $ perf record -e cpu-clock --call-graph fp ./program * $ perf report --call-graph * * On RISC-V platform, the program being sampled and the C library * need to be compiled with -fno-omit-frame-pointer, otherwise * the user stack will not contain function frame. */ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { unsigned long fp = 0; fp = regs->s0; perf_callchain_store(entry, regs->epc); fp = user_backtrace(entry, fp, regs->ra); while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) fp = user_backtrace(entry, fp, 0); } static bool fill_callchain(void *entry, unsigned long pc) { return perf_callchain_store(entry, pc) == 0; } void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { walk_stackframe(NULL, regs, fill_callchain, entry); }
linux-master
arch/riscv/kernel/perf_callchain.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008 ARM Limited * Copyright (C) 2014 Regents of the University of California */ #include <linux/export.h> #include <linux/kallsyms.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <linux/ftrace.h> #include <asm/stacktrace.h> #ifdef CONFIG_FRAME_POINTER extern asmlinkage void ret_from_exception(void); void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg) { unsigned long fp, sp, pc; int level = 0; if (regs) { fp = frame_pointer(regs); sp = user_stack_pointer(regs); pc = instruction_pointer(regs); } else if (task == NULL || task == current) { fp = (unsigned long)__builtin_frame_address(0); sp = current_stack_pointer; pc = (unsigned long)walk_stackframe; level = -1; } else { /* task blocked in __switch_to */ fp = task->thread.s[0]; sp = task->thread.sp; pc = task->thread.ra; } for (;;) { unsigned long low, high; struct stackframe *frame; if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc)))) break; /* Validate frame pointer */ low = sp + sizeof(struct stackframe); high = ALIGN(sp, THREAD_SIZE); if (unlikely(fp < low || fp > high || fp & 0x7)) break; /* Unwind stack frame */ frame = (struct stackframe *)fp - 1; sp = fp; if (regs && (regs->epc == pc) && (frame->fp & 0x7)) { fp = frame->ra; pc = regs->ra; } else { fp = frame->fp; pc = ftrace_graph_ret_addr(current, NULL, frame->ra, &frame->ra); if (pc == (unsigned long)ret_from_exception) { if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc))) break; pc = ((struct pt_regs *)sp)->epc; fp = ((struct pt_regs *)sp)->s0; } } } } #else /* !CONFIG_FRAME_POINTER */ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg) { unsigned long sp, pc; unsigned long *ksp; if (regs) { sp = user_stack_pointer(regs); pc = instruction_pointer(regs); } else if (task == NULL || task == current) { sp = current_stack_pointer; pc = (unsigned long)walk_stackframe; } else { /* task blocked in __switch_to */ sp = task->thread.sp; pc = task->thread.ra; } if (unlikely(sp & 0x7)) return; ksp = (unsigned long *)sp; while (!kstack_end(ksp)) { if (__kernel_text_address(pc) && unlikely(!fn(arg, pc))) break; pc = READ_ONCE_NOCHECK(*ksp++) - 0x4; } } #endif /* CONFIG_FRAME_POINTER */ static bool print_trace_address(void *arg, unsigned long pc) { const char *loglvl = arg; print_ip_sym(loglvl, pc); return true; } noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task, const char *loglvl) { walk_stackframe(task, regs, print_trace_address, (void *)loglvl); } void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { pr_cont("%sCall Trace:\n", loglvl); dump_backtrace(NULL, task, loglvl); } static bool save_wchan(void *arg, unsigned long pc) { if (!in_sched_functions(pc)) { unsigned long *p = arg; *p = pc; return false; } return true; } unsigned long __get_wchan(struct task_struct *task) { unsigned long pc = 0; if (!try_get_task_stack(task)) return 0; walk_stackframe(task, NULL, save_wchan, &pc); put_task_stack(task); return pc; } noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) { walk_stackframe(task, regs, consume_entry, cookie); }
linux-master
arch/riscv/kernel/stacktrace.c
// SPDX-License-Identifier: GPL-2.0-only /* * kexec_file for riscv, use vmlinux as the dump-capture kernel image. * * Copyright (C) 2021 Huawei Technologies Co, Ltd. * * Author: Liao Chang ([email protected]) */ #include <linux/kexec.h> const struct kexec_file_ops * const kexec_file_loaders[] = { &elf_kexec_ops, NULL };
linux-master
arch/riscv/kernel/machine_kexec_file.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2009 Sunplus Core Technology Co., Ltd. * Chen Liqin <[email protected]> * Lennox Wu <[email protected]> * Copyright (C) 2012 Regents of the University of California */ #include <linux/compat.h> #include <linux/signal.h> #include <linux/uaccess.h> #include <linux/syscalls.h> #include <linux/resume_user_mode.h> #include <linux/linkage.h> #include <linux/entry-common.h> #include <asm/ucontext.h> #include <asm/vdso.h> #include <asm/signal.h> #include <asm/signal32.h> #include <asm/switch_to.h> #include <asm/vector.h> #include <asm/csr.h> #include <asm/cacheflush.h> unsigned long signal_minsigstksz __ro_after_init; extern u32 __user_rt_sigreturn[2]; static size_t riscv_v_sc_size __ro_after_init; #define DEBUG_SIG 0 struct rt_sigframe { struct siginfo info; struct ucontext uc; #ifndef CONFIG_MMU u32 sigreturn_code[2]; #endif }; #ifdef CONFIG_FPU static long restore_fp_state(struct pt_regs *regs, union __riscv_fp_state __user *sc_fpregs) { long err; struct __riscv_d_ext_state __user *state = &sc_fpregs->d; err = __copy_from_user(&current->thread.fstate, state, sizeof(*state)); if (unlikely(err)) return err; fstate_restore(current, regs); return 0; } static long save_fp_state(struct pt_regs *regs, union __riscv_fp_state __user *sc_fpregs) { long err; struct __riscv_d_ext_state __user *state = &sc_fpregs->d; fstate_save(current, regs); err = __copy_to_user(state, &current->thread.fstate, sizeof(*state)); return err; } #else #define save_fp_state(task, regs) (0) #define restore_fp_state(task, regs) (0) #endif #ifdef CONFIG_RISCV_ISA_V static long save_v_state(struct pt_regs *regs, void __user **sc_vec) { struct __riscv_ctx_hdr __user *hdr; struct __sc_riscv_v_state __user *state; void __user *datap; long err; hdr = *sc_vec; /* Place state to the user's signal context space after the hdr */ state = (struct __sc_riscv_v_state __user *)(hdr + 1); /* Point datap right after the end of __sc_riscv_v_state */ datap = state + 1; /* datap is designed to be 16 byte aligned for better performance */ WARN_ON(unlikely(!IS_ALIGNED((unsigned long)datap, 16))); riscv_v_vstate_save(current, regs); /* Copy everything of vstate but datap. */ err = __copy_to_user(&state->v_state, &current->thread.vstate, offsetof(struct __riscv_v_ext_state, datap)); /* Copy the pointer datap itself. */ err |= __put_user(datap, &state->v_state.datap); /* Copy the whole vector content to user space datap. */ err |= __copy_to_user(datap, current->thread.vstate.datap, riscv_v_vsize); /* Copy magic to the user space after saving all vector conetext */ err |= __put_user(RISCV_V_MAGIC, &hdr->magic); err |= __put_user(riscv_v_sc_size, &hdr->size); if (unlikely(err)) return err; /* Only progress the sv_vec if everything has done successfully */ *sc_vec += riscv_v_sc_size; return 0; } /* * Restore Vector extension context from the user's signal frame. This function * assumes a valid extension header. So magic and size checking must be done by * the caller. */ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec) { long err; struct __sc_riscv_v_state __user *state = sc_vec; void __user *datap; /* Copy everything of __sc_riscv_v_state except datap. */ err = __copy_from_user(&current->thread.vstate, &state->v_state, offsetof(struct __riscv_v_ext_state, datap)); if (unlikely(err)) return err; /* Copy the pointer datap itself. */ err = __get_user(datap, &state->v_state.datap); if (unlikely(err)) return err; /* * Copy the whole vector content from user space datap. Use * copy_from_user to prevent information leak. */ err = copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize); if (unlikely(err)) return err; riscv_v_vstate_restore(current, regs); return err; } #else #define save_v_state(task, regs) (0) #define __restore_v_state(task, regs) (0) #endif static long restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { void __user *sc_ext_ptr = &sc->sc_extdesc.hdr; __u32 rsvd; long err; /* sc_regs is structured the same as the start of pt_regs */ err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs)); if (unlikely(err)) return err; /* Restore the floating-point state. */ if (has_fpu()) { err = restore_fp_state(regs, &sc->sc_fpregs); if (unlikely(err)) return err; } /* Check the reserved word before extensions parsing */ err = __get_user(rsvd, &sc->sc_extdesc.reserved); if (unlikely(err)) return err; if (unlikely(rsvd)) return -EINVAL; while (!err) { __u32 magic, size; struct __riscv_ctx_hdr __user *head = sc_ext_ptr; err |= __get_user(magic, &head->magic); err |= __get_user(size, &head->size); if (unlikely(err)) return err; sc_ext_ptr += sizeof(*head); switch (magic) { case END_MAGIC: if (size != END_HDR_SIZE) return -EINVAL; return 0; case RISCV_V_MAGIC: if (!has_vector() || !riscv_v_vstate_query(regs) || size != riscv_v_sc_size) return -EINVAL; err = __restore_v_state(regs, sc_ext_ptr); break; default: return -EINVAL; } sc_ext_ptr = (void __user *)head + size; } return err; } static size_t get_rt_frame_size(bool cal_all) { struct rt_sigframe __user *frame; size_t frame_size; size_t total_context_size = 0; frame_size = sizeof(*frame); if (has_vector()) { if (cal_all || riscv_v_vstate_query(task_pt_regs(current))) total_context_size += riscv_v_sc_size; } /* * Preserved a __riscv_ctx_hdr for END signal context header if an * extension uses __riscv_extra_ext_header */ if (total_context_size) total_context_size += sizeof(struct __riscv_ctx_hdr); frame_size += total_context_size; frame_size = round_up(frame_size, 16); return frame_size; } SYSCALL_DEFINE0(rt_sigreturn) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe __user *frame; struct task_struct *task; sigset_t set; size_t frame_size = get_rt_frame_size(false); /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; frame = (struct rt_sigframe __user *)regs->sp; if (!access_ok(frame, frame_size)) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_altstack(&frame->uc.uc_stack)) goto badframe; regs->cause = -1UL; return regs->a0; badframe: task = current; if (show_unhandled_signals) { pr_info_ratelimited( "%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n", task->comm, task_pid_nr(task), __func__, frame, (void *)regs->epc, (void *)regs->sp); } force_sig(SIGSEGV); return 0; } static long setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs) { struct sigcontext __user *sc = &frame->uc.uc_mcontext; struct __riscv_ctx_hdr __user *sc_ext_ptr = &sc->sc_extdesc.hdr; long err; /* sc_regs is structured the same as the start of pt_regs */ err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs)); /* Save the floating-point state. */ if (has_fpu()) err |= save_fp_state(regs, &sc->sc_fpregs); /* Save the vector state. */ if (has_vector() && riscv_v_vstate_query(regs)) err |= save_v_state(regs, (void __user **)&sc_ext_ptr); /* Write zero to fp-reserved space and check it on restore_sigcontext */ err |= __put_user(0, &sc->sc_extdesc.reserved); /* And put END __riscv_ctx_hdr at the end. */ err |= __put_user(END_MAGIC, &sc_ext_ptr->magic); err |= __put_user(END_HDR_SIZE, &sc_ext_ptr->size); return err; } static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t framesize) { unsigned long sp; /* Default to using normal stack */ sp = regs->sp; /* * If we are on the alternate signal stack and would overflow it, don't. * Return an always-bogus address instead so we will die with SIGSEGV. */ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) return (void __user __force *)(-1UL); /* This is the X/Open sanctioned signal stack switching. */ sp = sigsp(sp, ksig) - framesize; /* Align the stack frame. */ sp &= ~0xfUL; /* * Fail if the size of the altstack is not large enough for the * sigframe construction. */ if (current->sas_ss_size && sp < current->sas_ss_sp) return (void __user __force *)-1UL; return (void __user *)sp; } static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; long err = 0; unsigned long __maybe_unused addr; size_t frame_size = get_rt_frame_size(false); frame = get_sigframe(ksig, regs, frame_size); if (!access_ok(frame, frame_size)) return -EFAULT; err |= copy_siginfo_to_user(&frame->info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(NULL, &frame->uc.uc_link); err |= __save_altstack(&frame->uc.uc_stack, regs->sp); err |= setup_sigcontext(frame, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) return -EFAULT; /* Set up to return from userspace. */ #ifdef CONFIG_MMU regs->ra = (unsigned long)VDSO_SYMBOL( current->mm->context.vdso, rt_sigreturn); #else /* * For the nommu case we don't have a VDSO. Instead we push two * instructions to call the rt_sigreturn syscall onto the user stack. */ if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn, sizeof(frame->sigreturn_code))) return -EFAULT; addr = (unsigned long)&frame->sigreturn_code; /* Make sure the two instructions are pushed to icache. */ flush_icache_range(addr, addr + sizeof(frame->sigreturn_code)); regs->ra = addr; #endif /* CONFIG_MMU */ /* * Set up registers for signal handler. * Registers that we don't modify keep the value they had from * user-space at the time we took the signal. * We always pass siginfo and mcontext, regardless of SA_SIGINFO, * since some things rely on this (e.g. glibc's debug/segfault.c). */ regs->epc = (unsigned long)ksig->ka.sa.sa_handler; regs->sp = (unsigned long)frame; regs->a0 = ksig->sig; /* a0: signal number */ regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */ regs->a2 = (unsigned long)(&frame->uc); /* a2: ucontext pointer */ #if DEBUG_SIG pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n", current->comm, task_pid_nr(current), ksig->sig, (void *)regs->epc, (void *)regs->ra, frame); #endif return 0; } static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); int ret; /* Are we from a system call? */ if (regs->cause == EXC_SYSCALL) { /* Avoid additional syscall restarting via ret_from_exception */ regs->cause = -1UL; /* If so, check system call restarting.. */ switch (regs->a0) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->a0 = -EINTR; break; case -ERESTARTSYS: if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { regs->a0 = -EINTR; break; } fallthrough; case -ERESTARTNOINTR: regs->a0 = regs->orig_a0; regs->epc -= 0x4; break; } } rseq_signal_deliver(ksig, regs); /* Set up the stack frame */ if (is_compat_task()) ret = compat_setup_rt_frame(ksig, oldset, regs); else ret = setup_rt_frame(ksig, oldset, regs); signal_setup_done(ret, ksig, 0); } void arch_do_signal_or_restart(struct pt_regs *regs) { struct ksignal ksig; if (get_signal(&ksig)) { /* Actually deliver the signal */ handle_signal(&ksig, regs); return; } /* Did we come from a system call? */ if (regs->cause == EXC_SYSCALL) { /* Avoid additional syscall restarting via ret_from_exception */ regs->cause = -1UL; /* Restart the system call - no handlers present */ switch (regs->a0) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->a0 = regs->orig_a0; regs->epc -= 0x4; break; case -ERESTART_RESTARTBLOCK: regs->a0 = regs->orig_a0; regs->a7 = __NR_restart_syscall; regs->epc -= 0x4; break; } } /* * If there is no signal to deliver, we just put the saved * sigmask back. */ restore_saved_sigmask(); } void init_rt_signal_env(void); void __init init_rt_signal_env(void) { riscv_v_sc_size = sizeof(struct __riscv_ctx_hdr) + sizeof(struct __sc_riscv_v_state) + riscv_v_vsize; /* * Determine the stack space required for guaranteed signal delivery. * The signal_minsigstksz will be populated into the AT_MINSIGSTKSZ entry * in the auxiliary array at process startup. */ signal_minsigstksz = get_rt_frame_size(true); } #ifdef CONFIG_DYNAMIC_SIGFRAME bool sigaltstack_size_valid(size_t ss_size) { return ss_size > get_rt_frame_size(false); } #endif /* CONFIG_DYNAMIC_SIGFRAME */
linux-master
arch/riscv/kernel/signal.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020 SiFive */ #include <linux/ptrace.h> #include <linux/kdebug.h> #include <linux/bug.h> #include <linux/kgdb.h> #include <linux/irqflags.h> #include <linux/string.h> #include <asm/cacheflush.h> #include <asm/gdb_xml.h> #include <asm/insn.h> enum { NOT_KGDB_BREAK = 0, KGDB_SW_BREAK, KGDB_COMPILED_BREAK, KGDB_SW_SINGLE_STEP }; static unsigned long stepped_address; static unsigned int stepped_opcode; static int decode_register_index(unsigned long opcode, int offset) { return (opcode >> offset) & 0x1F; } static int decode_register_index_short(unsigned long opcode, int offset) { return ((opcode >> offset) & 0x7) + 8; } /* Calculate the new address for after a step */ static int get_step_address(struct pt_regs *regs, unsigned long *next_addr) { unsigned long pc = regs->epc; unsigned long *regs_ptr = (unsigned long *)regs; unsigned int rs1_num, rs2_num; int op_code; if (get_kernel_nofault(op_code, (void *)pc)) return -EINVAL; if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) { if (riscv_insn_is_c_jalr(op_code) || riscv_insn_is_c_jr(op_code)) { rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF); *next_addr = regs_ptr[rs1_num]; } else if (riscv_insn_is_c_j(op_code) || riscv_insn_is_c_jal(op_code)) { *next_addr = RVC_EXTRACT_JTYPE_IMM(op_code) + pc; } else if (riscv_insn_is_c_beqz(op_code)) { rs1_num = decode_register_index_short(op_code, RVC_C1_RS1_OPOFF); if (!rs1_num || regs_ptr[rs1_num] == 0) *next_addr = RVC_EXTRACT_BTYPE_IMM(op_code) + pc; else *next_addr = pc + 2; } else if (riscv_insn_is_c_bnez(op_code)) { rs1_num = decode_register_index_short(op_code, RVC_C1_RS1_OPOFF); if (rs1_num && regs_ptr[rs1_num] != 0) *next_addr = RVC_EXTRACT_BTYPE_IMM(op_code) + pc; else *next_addr = pc + 2; } else { *next_addr = pc + 2; } } else { if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) { bool result = false; long imm = RV_EXTRACT_BTYPE_IMM(op_code); unsigned long rs1_val = 0, rs2_val = 0; rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF); rs2_num = decode_register_index(op_code, RVG_RS2_OPOFF); if (rs1_num) rs1_val = regs_ptr[rs1_num]; if (rs2_num) rs2_val = regs_ptr[rs2_num]; if (riscv_insn_is_beq(op_code)) result = (rs1_val == rs2_val) ? true : false; else if (riscv_insn_is_bne(op_code)) result = (rs1_val != rs2_val) ? true : false; else if (riscv_insn_is_blt(op_code)) result = ((long)rs1_val < (long)rs2_val) ? true : false; else if (riscv_insn_is_bge(op_code)) result = ((long)rs1_val >= (long)rs2_val) ? true : false; else if (riscv_insn_is_bltu(op_code)) result = (rs1_val < rs2_val) ? true : false; else if (riscv_insn_is_bgeu(op_code)) result = (rs1_val >= rs2_val) ? true : false; if (result) *next_addr = imm + pc; else *next_addr = pc + 4; } else if (riscv_insn_is_jal(op_code)) { *next_addr = RV_EXTRACT_JTYPE_IMM(op_code) + pc; } else if (riscv_insn_is_jalr(op_code)) { rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF); if (rs1_num) *next_addr = ((unsigned long *)regs)[rs1_num]; *next_addr += RV_EXTRACT_ITYPE_IMM(op_code); } else if (riscv_insn_is_sret(op_code)) { *next_addr = pc; } else { *next_addr = pc + 4; } } return 0; } static int do_single_step(struct pt_regs *regs) { /* Determine where the target instruction will send us to */ unsigned long addr = 0; int error = get_step_address(regs, &addr); if (error) return error; /* Store the op code in the stepped address */ error = get_kernel_nofault(stepped_opcode, (void *)addr); if (error) return error; stepped_address = addr; /* Replace the op code with the break instruction */ error = copy_to_kernel_nofault((void *)stepped_address, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); /* Flush and return */ if (!error) { flush_icache_range(addr, addr + BREAK_INSTR_SIZE); kgdb_single_step = 1; atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id()); } else { stepped_address = 0; stepped_opcode = 0; } return error; } /* Undo a single step */ static void undo_single_step(struct pt_regs *regs) { if (stepped_opcode != 0) { copy_to_kernel_nofault((void *)stepped_address, (void *)&stepped_opcode, BREAK_INSTR_SIZE); flush_icache_range(stepped_address, stepped_address + BREAK_INSTR_SIZE); } stepped_address = 0; stepped_opcode = 0; kgdb_single_step = 0; atomic_set(&kgdb_cpu_doing_single_step, -1); } struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { {DBG_REG_ZERO, GDB_SIZEOF_REG, -1}, {DBG_REG_RA, GDB_SIZEOF_REG, offsetof(struct pt_regs, ra)}, {DBG_REG_SP, GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)}, {DBG_REG_GP, GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)}, {DBG_REG_TP, GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)}, {DBG_REG_T0, GDB_SIZEOF_REG, offsetof(struct pt_regs, t0)}, {DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)}, {DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)}, {DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)}, {DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)}, {DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)}, {DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)}, {DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)}, {DBG_REG_A3, GDB_SIZEOF_REG, offsetof(struct pt_regs, a3)}, {DBG_REG_A4, GDB_SIZEOF_REG, offsetof(struct pt_regs, a4)}, {DBG_REG_A5, GDB_SIZEOF_REG, offsetof(struct pt_regs, a5)}, {DBG_REG_A6, GDB_SIZEOF_REG, offsetof(struct pt_regs, a6)}, {DBG_REG_A7, GDB_SIZEOF_REG, offsetof(struct pt_regs, a7)}, {DBG_REG_S2, GDB_SIZEOF_REG, offsetof(struct pt_regs, s2)}, {DBG_REG_S3, GDB_SIZEOF_REG, offsetof(struct pt_regs, s3)}, {DBG_REG_S4, GDB_SIZEOF_REG, offsetof(struct pt_regs, s4)}, {DBG_REG_S5, GDB_SIZEOF_REG, offsetof(struct pt_regs, s5)}, {DBG_REG_S6, GDB_SIZEOF_REG, offsetof(struct pt_regs, s6)}, {DBG_REG_S7, GDB_SIZEOF_REG, offsetof(struct pt_regs, s7)}, {DBG_REG_S8, GDB_SIZEOF_REG, offsetof(struct pt_regs, s8)}, {DBG_REG_S9, GDB_SIZEOF_REG, offsetof(struct pt_regs, s9)}, {DBG_REG_S10, GDB_SIZEOF_REG, offsetof(struct pt_regs, s10)}, {DBG_REG_S11, GDB_SIZEOF_REG, offsetof(struct pt_regs, s11)}, {DBG_REG_T3, GDB_SIZEOF_REG, offsetof(struct pt_regs, t3)}, {DBG_REG_T4, GDB_SIZEOF_REG, offsetof(struct pt_regs, t4)}, {DBG_REG_T5, GDB_SIZEOF_REG, offsetof(struct pt_regs, t5)}, {DBG_REG_T6, GDB_SIZEOF_REG, offsetof(struct pt_regs, t6)}, {DBG_REG_EPC, GDB_SIZEOF_REG, offsetof(struct pt_regs, epc)}, {DBG_REG_STATUS, GDB_SIZEOF_REG, offsetof(struct pt_regs, status)}, {DBG_REG_BADADDR, GDB_SIZEOF_REG, offsetof(struct pt_regs, badaddr)}, {DBG_REG_CAUSE, GDB_SIZEOF_REG, offsetof(struct pt_regs, cause)}, }; char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) { if (regno >= DBG_MAX_REG_NUM || regno < 0) return NULL; if (dbg_reg_def[regno].offset != -1) memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, dbg_reg_def[regno].size); else memset(mem, 0, dbg_reg_def[regno].size); return dbg_reg_def[regno].name; } int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) { if (regno >= DBG_MAX_REG_NUM || regno < 0) return -EINVAL; if (dbg_reg_def[regno].offset != -1) memcpy((void *)regs + dbg_reg_def[regno].offset, mem, dbg_reg_def[regno].size); return 0; } void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) { /* Initialize to zero */ memset((char *)gdb_regs, 0, NUMREGBYTES); gdb_regs[DBG_REG_SP_OFF] = task->thread.sp; gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0]; gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1]; gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2]; gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3]; gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4]; gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5]; gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6]; gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7]; gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8]; gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10]; gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11]; gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra; } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) { regs->epc = pc; } void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer, char *remcom_out_buffer) { if (!strncmp(remcom_in_buffer, gdb_xfer_read_target, sizeof(gdb_xfer_read_target))) strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc); else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml, sizeof(gdb_xfer_read_cpuxml))) strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml); } static inline void kgdb_arch_update_addr(struct pt_regs *regs, char *remcom_in_buffer) { unsigned long addr; char *ptr; ptr = &remcom_in_buffer[1]; if (kgdb_hex2long(&ptr, &addr)) regs->epc = addr; } int kgdb_arch_handle_exception(int vector, int signo, int err_code, char *remcom_in_buffer, char *remcom_out_buffer, struct pt_regs *regs) { int err = 0; undo_single_step(regs); switch (remcom_in_buffer[0]) { case 'c': case 'D': case 'k': if (remcom_in_buffer[0] == 'c') kgdb_arch_update_addr(regs, remcom_in_buffer); break; case 's': kgdb_arch_update_addr(regs, remcom_in_buffer); err = do_single_step(regs); break; default: err = -1; } return err; } static int kgdb_riscv_kgdbbreak(unsigned long addr) { if (stepped_address == addr) return KGDB_SW_SINGLE_STEP; if (atomic_read(&kgdb_setting_breakpoint)) if (addr == (unsigned long)&kgdb_compiled_break) return KGDB_COMPILED_BREAK; return kgdb_has_hit_break(addr); } static int kgdb_riscv_notify(struct notifier_block *self, unsigned long cmd, void *ptr) { struct die_args *args = (struct die_args *)ptr; struct pt_regs *regs = args->regs; unsigned long flags; int type; if (user_mode(regs)) return NOTIFY_DONE; type = kgdb_riscv_kgdbbreak(regs->epc); if (type == NOT_KGDB_BREAK && cmd == DIE_TRAP) return NOTIFY_DONE; local_irq_save(flags); if (kgdb_handle_exception(type == KGDB_SW_SINGLE_STEP ? 0 : 1, args->signr, cmd, regs)) return NOTIFY_DONE; if (type == KGDB_COMPILED_BREAK) regs->epc += 4; local_irq_restore(flags); return NOTIFY_STOP; } static struct notifier_block kgdb_notifier = { .notifier_call = kgdb_riscv_notify, }; int kgdb_arch_init(void) { register_die_notifier(&kgdb_notifier); return 0; } void kgdb_arch_exit(void) { unregister_die_notifier(&kgdb_notifier); } /* * Global data */ #ifdef CONFIG_RISCV_ISA_C const struct kgdb_arch arch_kgdb_ops = { .gdb_bpt_instr = {0x02, 0x90}, /* c.ebreak */ }; #else const struct kgdb_arch arch_kgdb_ops = { .gdb_bpt_instr = {0x73, 0x00, 0x10, 0x00}, /* ebreak */ }; #endif
linux-master
arch/riscv/kernel/kgdb.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/perf_event.h> #include <linux/bug.h> #include <asm/perf_regs.h> #include <asm/ptrace.h> u64 perf_reg_value(struct pt_regs *regs, int idx) { if (WARN_ON_ONCE((u32)idx >= PERF_REG_RISCV_MAX)) return 0; return ((unsigned long *)regs)[idx]; } #define REG_RESERVED (~((1ULL << PERF_REG_RISCV_MAX) - 1)) int perf_reg_validate(u64 mask) { if (!mask || mask & REG_RESERVED) return -EINVAL; return 0; } u64 perf_reg_abi(struct task_struct *task) { #if __riscv_xlen == 64 return PERF_SAMPLE_REGS_ABI_64; #else return PERF_SAMPLE_REGS_ABI_32; #endif } void perf_get_regs_user(struct perf_regs *regs_user, struct pt_regs *regs) { regs_user->regs = task_pt_regs(current); regs_user->abi = perf_reg_abi(current); }
linux-master
arch/riscv/kernel/perf_regs.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2020 Western Digital Corporation or its affiliates. */ #include <linux/init.h> #include <linux/libfdt.h> #include <linux/pgtable.h> #include <asm/soc.h> /* * This is called extremly early, before parse_dtb(), to allow initializing * SoC hardware before memory or any device driver initialization. */ void __init soc_early_init(void) { void (*early_fn)(const void *fdt); const struct of_device_id *s; const void *fdt = dtb_early_va; for (s = (void *)&__soc_early_init_table_start; (void *)s < (void *)&__soc_early_init_table_end; s++) { if (!fdt_node_check_compatible(fdt, 0, s->compatible)) { early_fn = s->data; early_fn(fdt); return; } } }
linux-master
arch/riscv/kernel/soc.c
// SPDX-License-Identifier: GPL-2.0-only /* * SMP initialisation and IPI support * Based on arch/arm64/kernel/smp.c * * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2015 Regents of the University of California * Copyright (C) 2017 SiFive */ #include <linux/cpu.h> #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kexec.h> #include <linux/percpu.h> #include <linux/profile.h> #include <linux/smp.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/irq_work.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/cpu_ops.h> enum ipi_message_type { IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_CPU_STOP, IPI_CPU_CRASH_STOP, IPI_IRQ_WORK, IPI_TIMER, IPI_MAX }; unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = { [0 ... NR_CPUS-1] = INVALID_HARTID }; void __init smp_setup_processor_id(void) { cpuid_to_hartid_map(0) = boot_cpu_hartid; } static DEFINE_PER_CPU_READ_MOSTLY(int, ipi_dummy_dev); static int ipi_virq_base __ro_after_init; static int nr_ipi __ro_after_init = IPI_MAX; static struct irq_desc *ipi_desc[IPI_MAX] __read_mostly; int riscv_hartid_to_cpuid(unsigned long hartid) { int i; for (i = 0; i < NR_CPUS; i++) if (cpuid_to_hartid_map(i) == hartid) return i; return -ENOENT; } static void ipi_stop(void) { set_cpu_online(smp_processor_id(), false); while (1) wait_for_interrupt(); } #ifdef CONFIG_KEXEC_CORE static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0); static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) { crash_save_cpu(regs, cpu); atomic_dec(&waiting_for_crash_ipi); local_irq_disable(); #ifdef CONFIG_HOTPLUG_CPU if (cpu_has_hotplug(cpu)) cpu_ops[cpu]->cpu_stop(); #endif for(;;) wait_for_interrupt(); } #else static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) { unreachable(); } #endif static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) { __ipi_send_mask(ipi_desc[op], mask); } static void send_ipi_single(int cpu, enum ipi_message_type op) { __ipi_send_mask(ipi_desc[op], cpumask_of(cpu)); } #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) { send_ipi_single(smp_processor_id(), IPI_IRQ_WORK); } #endif static irqreturn_t handle_IPI(int irq, void *data) { int ipi = irq - ipi_virq_base; switch (ipi) { case IPI_RESCHEDULE: scheduler_ipi(); break; case IPI_CALL_FUNC: generic_smp_call_function_interrupt(); break; case IPI_CPU_STOP: ipi_stop(); break; case IPI_CPU_CRASH_STOP: ipi_cpu_crash_stop(smp_processor_id(), get_irq_regs()); break; case IPI_IRQ_WORK: irq_work_run(); break; #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: tick_receive_broadcast(); break; #endif default: pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi); break; } return IRQ_HANDLED; } void riscv_ipi_enable(void) { int i; if (WARN_ON_ONCE(!ipi_virq_base)) return; for (i = 0; i < nr_ipi; i++) enable_percpu_irq(ipi_virq_base + i, 0); } void riscv_ipi_disable(void) { int i; if (WARN_ON_ONCE(!ipi_virq_base)) return; for (i = 0; i < nr_ipi; i++) disable_percpu_irq(ipi_virq_base + i); } bool riscv_ipi_have_virq_range(void) { return (ipi_virq_base) ? true : false; } DEFINE_STATIC_KEY_FALSE(riscv_ipi_for_rfence); EXPORT_SYMBOL_GPL(riscv_ipi_for_rfence); void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence) { int i, err; if (WARN_ON(ipi_virq_base)) return; WARN_ON(nr < IPI_MAX); nr_ipi = min(nr, IPI_MAX); ipi_virq_base = virq; /* Request IPIs */ for (i = 0; i < nr_ipi; i++) { err = request_percpu_irq(ipi_virq_base + i, handle_IPI, "IPI", &ipi_dummy_dev); WARN_ON(err); ipi_desc[i] = irq_to_desc(ipi_virq_base + i); irq_set_status_flags(ipi_virq_base + i, IRQ_HIDDEN); } /* Enabled IPIs for boot CPU immediately */ riscv_ipi_enable(); /* Update RFENCE static key */ if (use_for_rfence) static_branch_enable(&riscv_ipi_for_rfence); else static_branch_disable(&riscv_ipi_for_rfence); } static const char * const ipi_names[] = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNC] = "Function call interrupts", [IPI_CPU_STOP] = "CPU stop interrupts", [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", [IPI_IRQ_WORK] = "IRQ work interrupts", [IPI_TIMER] = "Timer broadcast interrupts", }; void show_ipi_stats(struct seq_file *p, int prec) { unsigned int cpu, i; for (i = 0; i < IPI_MAX; i++) { seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : ""); for_each_online_cpu(cpu) seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu)); seq_printf(p, " %s\n", ipi_names[i]); } } void arch_send_call_function_ipi_mask(struct cpumask *mask) { send_ipi_mask(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { send_ipi_single(cpu, IPI_CALL_FUNC); } #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST void tick_broadcast(const struct cpumask *mask) { send_ipi_mask(mask, IPI_TIMER); } #endif void smp_send_stop(void) { unsigned long timeout; if (num_online_cpus() > 1) { cpumask_t mask; cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); if (system_state <= SYSTEM_RUNNING) pr_crit("SMP: stopping secondary CPUs\n"); send_ipi_mask(&mask, IPI_CPU_STOP); } /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; while (num_online_cpus() > 1 && timeout--) udelay(1); if (num_online_cpus() > 1) pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", cpumask_pr_args(cpu_online_mask)); } #ifdef CONFIG_KEXEC_CORE /* * The number of CPUs online, not counting this CPU (which may not be * fully online and so not counted in num_online_cpus()). */ static inline unsigned int num_other_online_cpus(void) { unsigned int this_cpu_online = cpu_online(smp_processor_id()); return num_online_cpus() - this_cpu_online; } void crash_smp_send_stop(void) { static int cpus_stopped; cpumask_t mask; unsigned long timeout; /* * This function can be called twice in panic path, but obviously * we execute this only once. */ if (cpus_stopped) return; cpus_stopped = 1; /* * If this cpu is the only one alive at this point in time, online or * not, there are no stop messages to be sent around, so just back out. */ if (num_other_online_cpus() == 0) return; cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); atomic_set(&waiting_for_crash_ipi, num_other_online_cpus()); pr_crit("SMP: stopping secondary CPUs\n"); send_ipi_mask(&mask, IPI_CPU_CRASH_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--) udelay(1); if (atomic_read(&waiting_for_crash_ipi) > 0) pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", cpumask_pr_args(&mask)); } bool smp_crash_stop_failed(void) { return (atomic_read(&waiting_for_crash_ipi) > 0); } #endif void arch_smp_send_reschedule(int cpu) { send_ipi_single(cpu, IPI_RESCHEDULE); } EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
linux-master
arch/riscv/kernel/smp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2012 Regents of the University of California * Copyright (C) 2014 Darius Rad <[email protected]> * Copyright (C) 2017 SiFive */ #include <linux/syscalls.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> #include <asm/hwprobe.h> #include <asm/sbi.h> #include <asm/vector.h> #include <asm/switch_to.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm-generic/mman-common.h> #include <vdso/vsyscall.h> static long riscv_sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, off_t offset, unsigned long page_shift_offset) { if (unlikely(offset & (~PAGE_MASK >> page_shift_offset))) return -EINVAL; return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> (PAGE_SHIFT - page_shift_offset)); } #ifdef CONFIG_64BIT SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, offset) { return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0); } #endif #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, offset) { /* * Note that the shift for mmap2 is constant (12), * regardless of PAGE_SIZE */ return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12); } #endif /* * Allows the instruction cache to be flushed from userspace. Despite RISC-V * having a direct 'fence.i' instruction available to userspace (which we * can't trap!), that's not actually viable when running on Linux because the * kernel might schedule a process on another hart. There is no way for * userspace to handle this without invoking the kernel (as it doesn't know the * thread->hart mappings), so we've defined a RISC-V specific system call to * flush the instruction cache. * * sys_riscv_flush_icache() is defined to flush the instruction cache over an * address range, with the flush applying to either all threads or just the * caller. We don't currently do anything with the address range, that's just * in there for forwards compatibility. */ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, uintptr_t, flags) { /* Check the reserved flags. */ if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) return -EINVAL; flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL); return 0; } /* * The hwprobe interface, for allowing userspace to probe to see which features * are supported by the hardware. See Documentation/riscv/hwprobe.rst for more * details. */ static void hwprobe_arch_id(struct riscv_hwprobe *pair, const struct cpumask *cpus) { u64 id = -1ULL; bool first = true; int cpu; for_each_cpu(cpu, cpus) { u64 cpu_id; switch (pair->key) { case RISCV_HWPROBE_KEY_MVENDORID: cpu_id = riscv_cached_mvendorid(cpu); break; case RISCV_HWPROBE_KEY_MIMPID: cpu_id = riscv_cached_mimpid(cpu); break; case RISCV_HWPROBE_KEY_MARCHID: cpu_id = riscv_cached_marchid(cpu); break; } if (first) { id = cpu_id; first = false; } /* * If there's a mismatch for the given set, return -1 in the * value. */ if (id != cpu_id) { id = -1ULL; break; } } pair->value = id; } static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, const struct cpumask *cpus) { int cpu; u64 missing = 0; pair->value = 0; if (has_fpu()) pair->value |= RISCV_HWPROBE_IMA_FD; if (riscv_isa_extension_available(NULL, c)) pair->value |= RISCV_HWPROBE_IMA_C; if (has_vector()) pair->value |= RISCV_HWPROBE_IMA_V; /* * Loop through and record extensions that 1) anyone has, and 2) anyone * doesn't have. */ for_each_cpu(cpu, cpus) { struct riscv_isainfo *isainfo = &hart_isa[cpu]; if (riscv_isa_extension_available(isainfo->isa, ZBA)) pair->value |= RISCV_HWPROBE_EXT_ZBA; else missing |= RISCV_HWPROBE_EXT_ZBA; if (riscv_isa_extension_available(isainfo->isa, ZBB)) pair->value |= RISCV_HWPROBE_EXT_ZBB; else missing |= RISCV_HWPROBE_EXT_ZBB; if (riscv_isa_extension_available(isainfo->isa, ZBS)) pair->value |= RISCV_HWPROBE_EXT_ZBS; else missing |= RISCV_HWPROBE_EXT_ZBS; } /* Now turn off reporting features if any CPU is missing it. */ pair->value &= ~missing; } static u64 hwprobe_misaligned(const struct cpumask *cpus) { int cpu; u64 perf = -1ULL; for_each_cpu(cpu, cpus) { int this_perf = per_cpu(misaligned_access_speed, cpu); if (perf == -1ULL) perf = this_perf; if (perf != this_perf) { perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; break; } } if (perf == -1ULL) return RISCV_HWPROBE_MISALIGNED_UNKNOWN; return perf; } static void hwprobe_one_pair(struct riscv_hwprobe *pair, const struct cpumask *cpus) { switch (pair->key) { case RISCV_HWPROBE_KEY_MVENDORID: case RISCV_HWPROBE_KEY_MARCHID: case RISCV_HWPROBE_KEY_MIMPID: hwprobe_arch_id(pair, cpus); break; /* * The kernel already assumes that the base single-letter ISA * extensions are supported on all harts, and only supports the * IMA base, so just cheat a bit here and tell that to * userspace. */ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; break; case RISCV_HWPROBE_KEY_IMA_EXT_0: hwprobe_isa_ext0(pair, cpus); break; case RISCV_HWPROBE_KEY_CPUPERF_0: pair->value = hwprobe_misaligned(cpus); break; /* * For forward compatibility, unknown keys don't fail the whole * call, but get their element key set to -1 and value set to 0 * indicating they're unrecognized. */ default: pair->key = -1; pair->value = 0; break; } } static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, size_t pair_count, size_t cpu_count, unsigned long __user *cpus_user, unsigned int flags) { size_t out; int ret; cpumask_t cpus; /* Check the reserved flags. */ if (flags != 0) return -EINVAL; /* * The interface supports taking in a CPU mask, and returns values that * are consistent across that mask. Allow userspace to specify NULL and * 0 as a shortcut to all online CPUs. */ cpumask_clear(&cpus); if (!cpu_count && !cpus_user) { cpumask_copy(&cpus, cpu_online_mask); } else { if (cpu_count > cpumask_size()) cpu_count = cpumask_size(); ret = copy_from_user(&cpus, cpus_user, cpu_count); if (ret) return -EFAULT; /* * Userspace must provide at least one online CPU, without that * there's no way to define what is supported. */ cpumask_and(&cpus, &cpus, cpu_online_mask); if (cpumask_empty(&cpus)) return -EINVAL; } for (out = 0; out < pair_count; out++, pairs++) { struct riscv_hwprobe pair; if (get_user(pair.key, &pairs->key)) return -EFAULT; pair.value = 0; hwprobe_one_pair(&pair, &cpus); ret = put_user(pair.key, &pairs->key); if (ret == 0) ret = put_user(pair.value, &pairs->value); if (ret) return -EFAULT; } return 0; } #ifdef CONFIG_MMU static int __init init_hwprobe_vdso_data(void) { struct vdso_data *vd = __arch_get_k_vdso_data(); struct arch_vdso_data *avd = &vd->arch_data; u64 id_bitsmash = 0; struct riscv_hwprobe pair; int key; /* * Initialize vDSO data with the answers for the "all CPUs" case, to * save a syscall in the common case. */ for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { pair.key = key; hwprobe_one_pair(&pair, cpu_online_mask); WARN_ON_ONCE(pair.key < 0); avd->all_cpu_hwprobe_values[key] = pair.value; /* * Smash together the vendor, arch, and impl IDs to see if * they're all 0 or any negative. */ if (key <= RISCV_HWPROBE_KEY_MIMPID) id_bitsmash |= pair.value; } /* * If the arch, vendor, and implementation ID are all the same across * all harts, then assume all CPUs are the same, and allow the vDSO to * answer queries for arbitrary masks. However if all values are 0 (not * populated) or any value returns -1 (varies across CPUs), then the * vDSO should defer to the kernel for exotic cpu masks. */ avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; return 0; } arch_initcall_sync(init_hwprobe_vdso_data); #endif /* CONFIG_MMU */ SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, size_t, pair_count, size_t, cpu_count, unsigned long __user *, cpus, unsigned int, flags) { return do_riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags); } /* Not defined using SYSCALL_DEFINE0 to avoid error injection */ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused) { return -ENOSYS; }
linux-master
arch/riscv/kernel/sys_riscv.c
// SPDX-License-Identifier: GPL-2.0-only /* * RISC-V Specific Low-Level ACPI Boot Support * * Copyright (C) 2013-2014, Linaro Ltd. * Author: Al Stone <[email protected]> * Author: Graeme Gregory <[email protected]> * Author: Hanjun Guo <[email protected]> * Author: Tomasz Nowicki <[email protected]> * Author: Naresh Bhat <[email protected]> * * Copyright (C) 2021-2023, Ventana Micro Systems Inc. * Author: Sunil V L <[email protected]> */ #include <linux/acpi.h> #include <linux/io.h> #include <linux/pci.h> #include <linux/efi.h> int acpi_noirq = 1; /* skip ACPI IRQ initialization */ int acpi_disabled = 1; EXPORT_SYMBOL(acpi_disabled); int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */ EXPORT_SYMBOL(acpi_pci_disabled); static bool param_acpi_off __initdata; static bool param_acpi_on __initdata; static bool param_acpi_force __initdata; static struct acpi_madt_rintc cpu_madt_rintc[NR_CPUS]; static int __init parse_acpi(char *arg) { if (!arg) return -EINVAL; /* "acpi=off" disables both ACPI table parsing and interpreter */ if (strcmp(arg, "off") == 0) param_acpi_off = true; else if (strcmp(arg, "on") == 0) /* prefer ACPI over DT */ param_acpi_on = true; else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */ param_acpi_force = true; else return -EINVAL; /* Core will print when we return error */ return 0; } early_param("acpi", parse_acpi); /* * acpi_fadt_sanity_check() - Check FADT presence and carry out sanity * checks on it * * Return 0 on success, <0 on failure */ static int __init acpi_fadt_sanity_check(void) { struct acpi_table_header *table; struct acpi_table_fadt *fadt; acpi_status status; int ret = 0; /* * FADT is required on riscv; retrieve it to check its presence * and carry out revision and ACPI HW reduced compliancy tests */ status = acpi_get_table(ACPI_SIG_FADT, 0, &table); if (ACPI_FAILURE(status)) { const char *msg = acpi_format_exception(status); pr_err("Failed to get FADT table, %s\n", msg); return -ENODEV; } fadt = (struct acpi_table_fadt *)table; /* * The revision in the table header is the FADT's Major revision. The * FADT also has a minor revision, which is stored in the FADT itself. * * TODO: Currently, we check for 6.5 as the minimum version to check * for HW_REDUCED flag. However, once RISC-V updates are released in * the ACPI spec, we need to update this check for exact minor revision */ if (table->revision < 6 || (table->revision == 6 && fadt->minor_revision < 5)) pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 6.5+\n", table->revision, fadt->minor_revision); if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) { pr_err("FADT not ACPI hardware reduced compliant\n"); ret = -EINVAL; } /* * acpi_get_table() creates FADT table mapping that * should be released after parsing and before resuming boot */ acpi_put_table(table); return ret; } /* * acpi_boot_table_init() called from setup_arch(), always. * 1. find RSDP and get its address, and then find XSDT * 2. extract all tables and checksums them all * 3. check ACPI FADT HW reduced flag * * We can parse ACPI boot-time tables such as MADT after * this function is called. * * On return ACPI is enabled if either: * * - ACPI tables are initialized and sanity checks passed * - acpi=force was passed in the command line and ACPI was not disabled * explicitly through acpi=off command line parameter * * ACPI is disabled on function return otherwise */ void __init acpi_boot_table_init(void) { /* * Enable ACPI instead of device tree unless * - ACPI has been disabled explicitly (acpi=off), or * - firmware has not populated ACPI ptr in EFI system table * and ACPI has not been [force] enabled (acpi=on|force) */ if (param_acpi_off || (!param_acpi_on && !param_acpi_force && efi.acpi20 == EFI_INVALID_TABLE_ADDR)) return; /* * ACPI is disabled at this point. Enable it in order to parse * the ACPI tables and carry out sanity checks */ enable_acpi(); /* * If ACPI tables are initialized and FADT sanity checks passed, * leave ACPI enabled and carry on booting; otherwise disable ACPI * on initialization error. * If acpi=force was passed on the command line it forces ACPI * to be enabled even if its initialization failed. */ if (acpi_table_init() || acpi_fadt_sanity_check()) { pr_err("Failed to init ACPI tables\n"); if (!param_acpi_force) disable_acpi(); } } static int acpi_parse_madt_rintc(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_rintc *rintc = (struct acpi_madt_rintc *)header; int cpuid; if (!(rintc->flags & ACPI_MADT_ENABLED)) return 0; cpuid = riscv_hartid_to_cpuid(rintc->hart_id); /* * When CONFIG_SMP is disabled, mapping won't be created for * all cpus. * CPUs more than num_possible_cpus, will be ignored. */ if (cpuid >= 0 && cpuid < num_possible_cpus()) cpu_madt_rintc[cpuid] = *rintc; return 0; } /* * Instead of parsing (and freeing) the ACPI table, cache * the RINTC structures since they are frequently used * like in cpuinfo. */ void __init acpi_init_rintc_map(void) { if (acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_madt_rintc, 0) <= 0) { pr_err("No valid RINTC entries exist\n"); BUG(); } } struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu) { return &cpu_madt_rintc[cpu]; } u32 get_acpi_id_for_cpu(int cpu) { return acpi_cpu_get_madt_rintc(cpu)->uid; } /* * __acpi_map_table() will be called before paging_init(), so early_ioremap() * or early_memremap() should be called here to for ACPI table mapping. */ void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size) { if (!size) return NULL; return early_ioremap(phys, size); } void __init __acpi_unmap_table(void __iomem *map, unsigned long size) { if (!map || !size) return; early_iounmap(map, size); } void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) { return (void __iomem *)memremap(phys, size, MEMREMAP_WB); } #ifdef CONFIG_PCI /* * These interfaces are defined just to enable building ACPI core. * TODO: Update it with actual implementation when external interrupt * controller support is added in RISC-V ACPI. */ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 *val) { return PCIBIOS_DEVICE_NOT_FOUND; } int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, int reg, int len, u32 val) { return PCIBIOS_DEVICE_NOT_FOUND; } int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) { return -1; } struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) { return NULL; } #endif /* CONFIG_PCI */
linux-master
arch/riscv/kernel/acpi.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kprobes.h> /* Ftrace callback handler for kprobes -- called under preepmt disabled */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs) { struct kprobe *p; struct pt_regs *regs; struct kprobe_ctlblk *kcb; int bit; bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; p = get_kprobe((kprobe_opcode_t *)ip); if (unlikely(!p) || kprobe_disabled(p)) goto out; regs = ftrace_get_regs(fregs); kcb = get_kprobe_ctlblk(); if (kprobe_running()) { kprobes_inc_nmissed_count(p); } else { unsigned long orig_ip = instruction_pointer(regs); instruction_pointer_set(regs, ip); __this_cpu_write(current_kprobe, p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (!p->pre_handler || !p->pre_handler(p, regs)) { /* * Emulate singlestep (and also recover regs->pc) * as if there is a nop */ instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE); if (unlikely(p->post_handler)) { kcb->kprobe_status = KPROBE_HIT_SSDONE; p->post_handler(p, regs, 0); } instruction_pointer_set(regs, orig_ip); } /* * If pre_handler returns !0, it changes regs->pc. We have to * skip emulating post_handler. */ __this_cpu_write(current_kprobe, NULL); } out: ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); int arch_prepare_kprobe_ftrace(struct kprobe *p) { p->ainsn.api.insn = NULL; return 0; }
linux-master
arch/riscv/kernel/probes/ftrace.c
// SPDX-License-Identifier: GPL-2.0+ #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/kprobes.h> #include "decode-insn.h" #include "simulate-insn.h" static inline bool rv_insn_reg_get_val(struct pt_regs *regs, u32 index, unsigned long *ptr) { if (index == 0) *ptr = 0; else if (index <= 31) *ptr = *((unsigned long *)regs + index); else return false; return true; } static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index, unsigned long val) { if (index == 0) return false; else if (index <= 31) *((unsigned long *)regs + index) = val; else return false; return true; } bool __kprobes simulate_jal(u32 opcode, unsigned long addr, struct pt_regs *regs) { /* * 31 30 21 20 19 12 11 7 6 0 * imm [20] | imm[10:1] | imm[11] | imm[19:12] | rd | opcode * 1 10 1 8 5 JAL/J */ bool ret; u32 imm; u32 index = (opcode >> 7) & 0x1f; ret = rv_insn_reg_set_val(regs, index, addr + 4); if (!ret) return ret; imm = ((opcode >> 21) & 0x3ff) << 1; imm |= ((opcode >> 20) & 0x1) << 11; imm |= ((opcode >> 12) & 0xff) << 12; imm |= ((opcode >> 31) & 0x1) << 20; instruction_pointer_set(regs, addr + sign_extend32((imm), 20)); return ret; } bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs) { /* * 31 20 19 15 14 12 11 7 6 0 * offset[11:0] | rs1 | 010 | rd | opcode * 12 5 3 5 JALR/JR */ bool ret; unsigned long base_addr; u32 imm = (opcode >> 20) & 0xfff; u32 rd_index = (opcode >> 7) & 0x1f; u32 rs1_index = (opcode >> 15) & 0x1f; ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr); if (!ret) return ret; ret = rv_insn_reg_set_val(regs, rd_index, addr + 4); if (!ret) return ret; instruction_pointer_set(regs, (base_addr + sign_extend32((imm), 11))&~1); return ret; } #define auipc_rd_idx(opcode) \ ((opcode >> 7) & 0x1f) #define auipc_imm(opcode) \ ((((opcode) >> 12) & 0xfffff) << 12) #if __riscv_xlen == 64 #define auipc_offset(opcode) sign_extend64(auipc_imm(opcode), 31) #elif __riscv_xlen == 32 #define auipc_offset(opcode) auipc_imm(opcode) #else #error "Unexpected __riscv_xlen" #endif bool __kprobes simulate_auipc(u32 opcode, unsigned long addr, struct pt_regs *regs) { /* * auipc instruction: * 31 12 11 7 6 0 * | imm[31:12] | rd | opcode | * 20 5 7 */ u32 rd_idx = auipc_rd_idx(opcode); unsigned long rd_val = addr + auipc_offset(opcode); if (!rv_insn_reg_set_val(regs, rd_idx, rd_val)) return false; instruction_pointer_set(regs, addr + 4); return true; } #define branch_rs1_idx(opcode) \ (((opcode) >> 15) & 0x1f) #define branch_rs2_idx(opcode) \ (((opcode) >> 20) & 0x1f) #define branch_funct3(opcode) \ (((opcode) >> 12) & 0x7) #define branch_imm(opcode) \ (((((opcode) >> 8) & 0xf ) << 1) | \ ((((opcode) >> 25) & 0x3f) << 5) | \ ((((opcode) >> 7) & 0x1 ) << 11) | \ ((((opcode) >> 31) & 0x1 ) << 12)) #define branch_offset(opcode) \ sign_extend32((branch_imm(opcode)), 12) bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *regs) { /* * branch instructions: * 31 30 25 24 20 19 15 14 12 11 8 7 6 0 * | imm[12] | imm[10:5] | rs2 | rs1 | funct3 | imm[4:1] | imm[11] | opcode | * 1 6 5 5 3 4 1 7 * imm[12|10:5] rs2 rs1 000 imm[4:1|11] 1100011 BEQ * imm[12|10:5] rs2 rs1 001 imm[4:1|11] 1100011 BNE * imm[12|10:5] rs2 rs1 100 imm[4:1|11] 1100011 BLT * imm[12|10:5] rs2 rs1 101 imm[4:1|11] 1100011 BGE * imm[12|10:5] rs2 rs1 110 imm[4:1|11] 1100011 BLTU * imm[12|10:5] rs2 rs1 111 imm[4:1|11] 1100011 BGEU */ s32 offset; s32 offset_tmp; unsigned long rs1_val; unsigned long rs2_val; if (!rv_insn_reg_get_val(regs, branch_rs1_idx(opcode), &rs1_val) || !rv_insn_reg_get_val(regs, branch_rs2_idx(opcode), &rs2_val)) return false; offset_tmp = branch_offset(opcode); switch (branch_funct3(opcode)) { case RVG_FUNCT3_BEQ: offset = (rs1_val == rs2_val) ? offset_tmp : 4; break; case RVG_FUNCT3_BNE: offset = (rs1_val != rs2_val) ? offset_tmp : 4; break; case RVG_FUNCT3_BLT: offset = ((long)rs1_val < (long)rs2_val) ? offset_tmp : 4; break; case RVG_FUNCT3_BGE: offset = ((long)rs1_val >= (long)rs2_val) ? offset_tmp : 4; break; case RVG_FUNCT3_BLTU: offset = (rs1_val < rs2_val) ? offset_tmp : 4; break; case RVG_FUNCT3_BGEU: offset = (rs1_val >= rs2_val) ? offset_tmp : 4; break; default: return false; } instruction_pointer_set(regs, addr + offset); return true; } bool __kprobes simulate_c_j(u32 opcode, unsigned long addr, struct pt_regs *regs) { /* * 15 13 12 2 1 0 * | funct3 | offset[11|4|9:8|10|6|7|3:1|5] | opcode | * 3 11 2 */ s32 offset; offset = ((opcode >> 3) & 0x7) << 1; offset |= ((opcode >> 11) & 0x1) << 4; offset |= ((opcode >> 2) & 0x1) << 5; offset |= ((opcode >> 7) & 0x1) << 6; offset |= ((opcode >> 6) & 0x1) << 7; offset |= ((opcode >> 9) & 0x3) << 8; offset |= ((opcode >> 8) & 0x1) << 10; offset |= ((opcode >> 12) & 0x1) << 11; instruction_pointer_set(regs, addr + sign_extend32(offset, 11)); return true; } static bool __kprobes simulate_c_jr_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs, bool is_jalr) { /* * 15 12 11 7 6 2 1 0 * | funct4 | rs1 | rs2 | op | * 4 5 5 2 */ unsigned long jump_addr; u32 rs1 = (opcode >> 7) & 0x1f; if (rs1 == 0) /* C.JR is only valid when rs1 != x0 */ return false; if (!rv_insn_reg_get_val(regs, rs1, &jump_addr)) return false; if (is_jalr && !rv_insn_reg_set_val(regs, 1, addr + 2)) return false; instruction_pointer_set(regs, jump_addr); return true; } bool __kprobes simulate_c_jr(u32 opcode, unsigned long addr, struct pt_regs *regs) { return simulate_c_jr_jalr(opcode, addr, regs, false); } bool __kprobes simulate_c_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs) { return simulate_c_jr_jalr(opcode, addr, regs, true); } static bool __kprobes simulate_c_bnez_beqz(u32 opcode, unsigned long addr, struct pt_regs *regs, bool is_bnez) { /* * 15 13 12 10 9 7 6 2 1 0 * | funct3 | offset[8|4:3] | rs1' | offset[7:6|2:1|5] | op | * 3 3 3 5 2 */ s32 offset; u32 rs1; unsigned long rs1_val; rs1 = 0x8 | ((opcode >> 7) & 0x7); if (!rv_insn_reg_get_val(regs, rs1, &rs1_val)) return false; if ((rs1_val != 0 && is_bnez) || (rs1_val == 0 && !is_bnez)) { offset = ((opcode >> 3) & 0x3) << 1; offset |= ((opcode >> 10) & 0x3) << 3; offset |= ((opcode >> 2) & 0x1) << 5; offset |= ((opcode >> 5) & 0x3) << 6; offset |= ((opcode >> 12) & 0x1) << 8; offset = sign_extend32(offset, 8); } else { offset = 2; } instruction_pointer_set(regs, addr + offset); return true; } bool __kprobes simulate_c_bnez(u32 opcode, unsigned long addr, struct pt_regs *regs) { return simulate_c_bnez_beqz(opcode, addr, regs, true); } bool __kprobes simulate_c_beqz(u32 opcode, unsigned long addr, struct pt_regs *regs) { return simulate_c_bnez_beqz(opcode, addr, regs, false); }
linux-master
arch/riscv/kernel/probes/simulate-insn.c
// SPDX-License-Identifier: GPL-2.0+ #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> #include <linux/kallsyms.h> #include <asm/sections.h> #include "decode-insn.h" #include "simulate-insn.h" /* Return: * INSN_REJECTED If instruction is one not allowed to kprobe, * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. */ enum probe_insn __kprobes riscv_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api) { probe_opcode_t insn = *addr; /* * Reject instructions list: */ RISCV_INSN_REJECTED(system, insn); RISCV_INSN_REJECTED(fence, insn); /* * Simulate instructions list: * TODO: the REJECTED ones below need to be implemented */ #ifdef CONFIG_RISCV_ISA_C RISCV_INSN_REJECTED(c_jal, insn); RISCV_INSN_REJECTED(c_ebreak, insn); RISCV_INSN_SET_SIMULATE(c_j, insn); RISCV_INSN_SET_SIMULATE(c_jr, insn); RISCV_INSN_SET_SIMULATE(c_jalr, insn); RISCV_INSN_SET_SIMULATE(c_beqz, insn); RISCV_INSN_SET_SIMULATE(c_bnez, insn); #endif RISCV_INSN_SET_SIMULATE(jal, insn); RISCV_INSN_SET_SIMULATE(jalr, insn); RISCV_INSN_SET_SIMULATE(auipc, insn); RISCV_INSN_SET_SIMULATE(branch, insn); return INSN_GOOD; }
linux-master
arch/riscv/kernel/probes/decode-insn.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic return hook for riscv. */ #include <linux/kprobes.h> #include <linux/rethook.h> #include "rethook.h" /* This is called from arch_rethook_trampoline() */ unsigned long __used arch_rethook_trampoline_callback(struct pt_regs *regs) { return rethook_trampoline_handler(regs, regs->s0); } NOKPROBE_SYMBOL(arch_rethook_trampoline_callback); void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount) { rhn->ret_addr = regs->ra; rhn->frame = regs->s0; /* replace return addr with trampoline */ regs->ra = (unsigned long)arch_rethook_trampoline; } NOKPROBE_SYMBOL(arch_rethook_prepare);
linux-master
arch/riscv/kernel/probes/rethook.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/highmem.h> #include <linux/ptrace.h> #include <linux/uprobes.h> #include "decode-insn.h" #define UPROBE_TRAP_NR UINT_MAX bool is_swbp_insn(uprobe_opcode_t *insn) { #ifdef CONFIG_RISCV_ISA_C return (*insn & 0xffff) == UPROBE_SWBP_INSN; #else return *insn == UPROBE_SWBP_INSN; #endif } unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) { return instruction_pointer(regs); } int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr) { probe_opcode_t opcode; opcode = *(probe_opcode_t *)(&auprobe->insn[0]); auprobe->insn_size = GET_INSN_LENGTH(opcode); switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) { case INSN_REJECTED: return -EINVAL; case INSN_GOOD_NO_SLOT: auprobe->simulate = true; break; case INSN_GOOD: auprobe->simulate = false; break; default: return -EINVAL; } return 0; } int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; utask->autask.saved_cause = current->thread.bad_cause; current->thread.bad_cause = UPROBE_TRAP_NR; instruction_pointer_set(regs, utask->xol_vaddr); return 0; } int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR); current->thread.bad_cause = utask->autask.saved_cause; instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size); return 0; } bool arch_uprobe_xol_was_trapped(struct task_struct *t) { if (t->thread.bad_cause != UPROBE_TRAP_NR) return true; return false; } bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) { probe_opcode_t insn; unsigned long addr; if (!auprobe->simulate) return false; insn = *(probe_opcode_t *)(&auprobe->insn[0]); addr = instruction_pointer(regs); if (auprobe->api.handler) auprobe->api.handler(insn, addr, regs); return true; } void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; current->thread.bad_cause = utask->autask.saved_cause; /* * Task has received a fatal signal, so reset back to probbed * address. */ instruction_pointer_set(regs, utask->vaddr); } bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs) { if (ctx == RP_CHECK_CHAIN_CALL) return regs->sp <= ret->stack; else return regs->sp < ret->stack; } unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) { unsigned long ra; ra = regs->ra; regs->ra = trampoline_vaddr; return ra; } int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data) { return NOTIFY_DONE; } bool uprobe_breakpoint_handler(struct pt_regs *regs) { if (uprobe_pre_sstep_notifier(regs)) return true; return false; } bool uprobe_single_step_handler(struct pt_regs *regs) { if (uprobe_post_sstep_notifier(regs)) return true; return false; } void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len) { /* Initialize the slot */ void *kaddr = kmap_atomic(page); void *dst = kaddr + (vaddr & ~PAGE_MASK); memcpy(dst, src, len); /* Add ebreak behind opcode to simulate singlestep */ if (vaddr) { dst += GET_INSN_LENGTH(*(probe_opcode_t *)src); *(uprobe_opcode_t *)dst = __BUG_INSN_32; } kunmap_atomic(kaddr); /* * We probably need flush_icache_user_page() but it needs vma. * This should work on most of architectures by default. If * architecture needs to do something different it can define * its own version of the function. */ flush_dcache_page(page); }
linux-master
arch/riscv/kernel/probes/uprobes.c
// SPDX-License-Identifier: GPL-2.0+ #define pr_fmt(fmt) "kprobes: " fmt #include <linux/kprobes.h> #include <linux/extable.h> #include <linux/slab.h> #include <linux/stop_machine.h> #include <asm/ptrace.h> #include <linux/uaccess.h> #include <asm/sections.h> #include <asm/cacheflush.h> #include <asm/bug.h> #include <asm/patch.h> #include "decode-insn.h" DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static void __kprobes post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { u32 insn = __BUG_INSN_32; unsigned long offset = GET_INSN_LENGTH(p->opcode); p->ainsn.api.restore = (unsigned long)p->addr + offset; patch_text(p->ainsn.api.insn, &p->opcode, 1); patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset), &insn, 1); } static void __kprobes arch_prepare_simulate(struct kprobe *p) { p->ainsn.api.restore = 0; } static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); if (p->ainsn.api.handler) p->ainsn.api.handler((u32)p->opcode, (unsigned long)p->addr, regs); post_kprobe_handler(p, kcb, regs); } static bool __kprobes arch_check_kprobe(struct kprobe *p) { unsigned long tmp = (unsigned long)p->addr - p->offset; unsigned long addr = (unsigned long)p->addr; while (tmp <= addr) { if (tmp == addr) return true; tmp += GET_INSN_LENGTH(*(u16 *)tmp); } return false; } int __kprobes arch_prepare_kprobe(struct kprobe *p) { u16 *insn = (u16 *)p->addr; if ((unsigned long)insn & 0x1) return -EILSEQ; if (!arch_check_kprobe(p)) return -EILSEQ; /* copy instruction */ p->opcode = (kprobe_opcode_t)(*insn++); if (GET_INSN_LENGTH(p->opcode) == 4) p->opcode |= (kprobe_opcode_t)(*insn) << 16; /* decode instruction */ switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) { case INSN_REJECTED: /* insn not supported */ return -EINVAL; case INSN_GOOD_NO_SLOT: /* insn need simulation */ p->ainsn.api.insn = NULL; break; case INSN_GOOD: /* instruction uses slot */ p->ainsn.api.insn = get_insn_slot(); if (!p->ainsn.api.insn) return -ENOMEM; break; } /* prepare the instruction */ if (p->ainsn.api.insn) arch_prepare_ss_slot(p); else arch_prepare_simulate(p); return 0; } #ifdef CONFIG_MMU void *alloc_insn_page(void) { return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_READ_EXEC, VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, __builtin_return_address(0)); } #endif /* install breakpoint in text */ void __kprobes arch_arm_kprobe(struct kprobe *p) { u32 insn = (p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32 ? __BUG_INSN_32 : __BUG_INSN_16; patch_text(p->addr, &insn, 1); } /* remove breakpoint from text */ void __kprobes arch_disarm_kprobe(struct kprobe *p) { patch_text(p->addr, &p->opcode, 1); } void __kprobes arch_remove_kprobe(struct kprobe *p) { } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; } static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; } static void __kprobes set_current_kprobe(struct kprobe *p) { __this_cpu_write(current_kprobe, p); } /* * Interrupts need to be disabled before single-step mode is set, and not * reenabled until after single-step mode ends. * Without disabling interrupt on local CPU, there is a chance of * interrupt occurrence in the period of exception return and start of * out-of-line single-step, that result in wrongly single stepping * into the interrupt handler. */ static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, struct pt_regs *regs) { kcb->saved_status = regs->status; regs->status &= ~SR_SPIE; } static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, struct pt_regs *regs) { regs->status = kcb->saved_status; } static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) { unsigned long slot; if (reenter) { save_previous_kprobe(kcb); set_current_kprobe(p); kcb->kprobe_status = KPROBE_REENTER; } else { kcb->kprobe_status = KPROBE_HIT_SS; } if (p->ainsn.api.insn) { /* prepare for single stepping */ slot = (unsigned long)p->ainsn.api.insn; /* IRQs and single stepping do not mix well. */ kprobes_save_local_irqflag(kcb, regs); instruction_pointer_set(regs, slot); } else { /* insn simulation */ arch_simulate_insn(p, regs); } } static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { switch (kcb->kprobe_status) { case KPROBE_HIT_SSDONE: case KPROBE_HIT_ACTIVE: kprobes_inc_nmissed_count(p); setup_singlestep(p, regs, kcb, 1); break; case KPROBE_HIT_SS: case KPROBE_REENTER: pr_warn("Failed to recover from reentered kprobes.\n"); dump_kprobe(p); BUG(); break; default: WARN_ON(1); return 0; } return 1; } static void __kprobes post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs) { /* return addr restore if non-branching insn */ if (cur->ainsn.api.restore != 0) regs->epc = cur->ainsn.api.restore; /* restore back original saved kprobe variables and continue */ if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); return; } /* call post handler */ kcb->kprobe_status = KPROBE_HIT_SSDONE; if (cur->post_handler) { /* post_handler can hit breakpoint and single step * again, so we enable D-flag for recursive exception. */ cur->post_handler(cur, regs, 0); } reset_current_kprobe(); } int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); switch (kcb->kprobe_status) { case KPROBE_HIT_SS: case KPROBE_REENTER: /* * We are here because the instruction being single * stepped caused a page fault. We reset the current * kprobe and the ip points back to the probe address * and allow the page fault handler to continue as a * normal page fault. */ regs->epc = (unsigned long) cur->addr; BUG_ON(!instruction_pointer(regs)); if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); else { kprobes_restore_local_irqflag(kcb, regs); reset_current_kprobe(); } break; case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: /* * In case the user-specified fault handler returned * zero, try to fix up. */ if (fixup_exception(regs)) return 1; } return 0; } bool __kprobes kprobe_breakpoint_handler(struct pt_regs *regs) { struct kprobe *p, *cur_kprobe; struct kprobe_ctlblk *kcb; unsigned long addr = instruction_pointer(regs); kcb = get_kprobe_ctlblk(); cur_kprobe = kprobe_running(); p = get_kprobe((kprobe_opcode_t *) addr); if (p) { if (cur_kprobe) { if (reenter_kprobe(p, regs, kcb)) return true; } else { /* Probe hit */ set_current_kprobe(p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; /* * If we have no pre-handler or it returned 0, we * continue with normal processing. If we have a * pre-handler and it returned non-zero, it will * modify the execution path and no need to single * stepping. Let's just reset current kprobe and exit. * * pre_handler can hit a breakpoint and can step thru * before return. */ if (!p->pre_handler || !p->pre_handler(p, regs)) setup_singlestep(p, regs, kcb, 0); else reset_current_kprobe(); } return true; } /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed * either a probepoint or a debugger breakpoint * at this address. In either case, no further * handling of this interrupt is appropriate. * Return back to original instruction, and continue. */ return false; } bool __kprobes kprobe_single_step_handler(struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); unsigned long addr = instruction_pointer(regs); struct kprobe *cur = kprobe_running(); if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) && ((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) { kprobes_restore_local_irqflag(kcb, regs); post_kprobe_handler(cur, kcb, regs); return true; } /* not ours, kprobes should ignore it */ return false; } /* * Provide a blacklist of symbols identifying ranges which cannot be kprobed. * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). */ int __init arch_populate_kprobe_blacklist(void) { int ret; ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, (unsigned long)__irqentry_text_end); return ret; } int __kprobes arch_trampoline_kprobe(struct kprobe *p) { return 0; } int __init arch_init_kprobes(void) { return 0; }
linux-master
arch/riscv/kernel/probes/kprobes.c
// SPDX-License-Identifier: GPL-2.0 /* * Copied from arch/arm64/kernel/vdso/vgettimeofday.c * * Copyright (C) 2018 ARM Ltd. * Copyright (C) 2020 SiFive */ #include <linux/time.h> #include <linux/types.h> extern int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) { return __cvdso_clock_gettime(clock, ts); } extern int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } extern int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res); int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res) { return __cvdso_clock_getres(clock_id, res); }
linux-master
arch/riscv/kernel/vdso/vgettimeofday.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2023 Rivos, Inc */ #include <linux/types.h> #include <vdso/datapage.h> #include <vdso/helpers.h> extern int riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, size_t cpu_count, unsigned long *cpus, unsigned int flags); /* Add a prototype to avoid -Wmissing-prototypes warning. */ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, size_t cpu_count, unsigned long *cpus, unsigned int flags); int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, size_t cpu_count, unsigned long *cpus, unsigned int flags) { const struct vdso_data *vd = __arch_get_vdso_data(); const struct arch_vdso_data *avd = &vd->arch_data; bool all_cpus = !cpu_count && !cpus; struct riscv_hwprobe *p = pairs; struct riscv_hwprobe *end = pairs + pair_count; /* * Defer to the syscall for exotic requests. The vdso has answers * stashed away only for the "all cpus" case. If all CPUs are * homogeneous, then this function can handle requests for arbitrary * masks. */ if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus)) return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags); /* This is something we can handle, fill out the pairs. */ while (p < end) { if (p->key <= RISCV_HWPROBE_MAX_KEY) { p->value = avd->all_cpu_hwprobe_values[p->key]; } else { p->key = -1; p->value = 0; } p++; } return 0; }
linux-master
arch/riscv/kernel/vdso/hwprobe.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/types.h> #include <linux/init.h> #include <linux/libfdt.h> /* * Declare the functions that are exported (but prefixed) here so that LLVM * does not complain it lacks the 'static' keyword (which, if added, makes * LLVM complain because the function is actually unused in this file). */ u64 get_kaslr_seed(uintptr_t dtb_pa); u64 get_kaslr_seed(uintptr_t dtb_pa) { int node, len; fdt64_t *prop; u64 ret; node = fdt_path_offset((void *)dtb_pa, "/chosen"); if (node < 0) return 0; prop = fdt_getprop_w((void *)dtb_pa, node, "kaslr-seed", &len); if (!prop || len != sizeof(u64)) return 0; ret = fdt64_to_cpu(*prop); *prop = 0; return ret; }
linux-master
arch/riscv/kernel/pi/fdt_early.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/types.h> #include <linux/init.h> #include <linux/libfdt.h> #include <linux/string.h> #include <asm/pgtable.h> #include <asm/setup.h> static char early_cmdline[COMMAND_LINE_SIZE]; /* * Declare the functions that are exported (but prefixed) here so that LLVM * does not complain it lacks the 'static' keyword (which, if added, makes * LLVM complain because the function is actually unused in this file). */ u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa); bool set_nokaslr_from_cmdline(uintptr_t dtb_pa); static char *get_early_cmdline(uintptr_t dtb_pa) { const char *fdt_cmdline = NULL; unsigned int fdt_cmdline_size = 0; int chosen_node; if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) { chosen_node = fdt_path_offset((void *)dtb_pa, "/chosen"); if (chosen_node >= 0) { fdt_cmdline = fdt_getprop((void *)dtb_pa, chosen_node, "bootargs", NULL); if (fdt_cmdline) { fdt_cmdline_size = strlen(fdt_cmdline); strscpy(early_cmdline, fdt_cmdline, COMMAND_LINE_SIZE); } } } if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || IS_ENABLED(CONFIG_CMDLINE_FORCE) || fdt_cmdline_size == 0 /* CONFIG_CMDLINE_FALLBACK */) { strncat(early_cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE - fdt_cmdline_size); } return early_cmdline; } static u64 match_noXlvl(char *cmdline) { if (strstr(cmdline, "no4lvl")) return SATP_MODE_48; else if (strstr(cmdline, "no5lvl")) return SATP_MODE_57; return 0; } u64 set_satp_mode_from_cmdline(uintptr_t dtb_pa) { char *cmdline = get_early_cmdline(dtb_pa); return match_noXlvl(cmdline); } static bool match_nokaslr(char *cmdline) { return strstr(cmdline, "nokaslr"); } bool set_nokaslr_from_cmdline(uintptr_t dtb_pa) { char *cmdline = get_early_cmdline(dtb_pa); return match_nokaslr(cmdline); }
linux-master
arch/riscv/kernel/pi/cmdline_early.c
// SPDX-License-Identifier: GPL-2.0-only /* * purgatory: Runs between two kernels * * Copyright (C) 2022 Huawei Technologies Co, Ltd. * * Author: Li Zhengyu ([email protected]) * */ #include <linux/purgatory.h> #include <linux/kernel.h> #include <linux/string.h> #include <asm/string.h> u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(".kexec-purgatory"); struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(".kexec-purgatory"); static int verify_sha256_digest(void) { struct kexec_sha_region *ptr, *end; struct sha256_state ss; u8 digest[SHA256_DIGEST_SIZE]; sha256_init(&ss); end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions); for (ptr = purgatory_sha_regions; ptr < end; ptr++) sha256_update(&ss, (uint8_t *)(ptr->start), ptr->len); sha256_final(&ss, digest); if (memcmp(digest, purgatory_sha256_digest, sizeof(digest)) != 0) return 1; return 0; } /* workaround for a warning with -Wmissing-prototypes */ void purgatory(void); void purgatory(void) { if (verify_sha256_digest()) for (;;) /* loop forever */ ; }
linux-master
arch/riscv/purgatory/purgatory.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <asm/ptrace.h> #include <linux/uaccess.h> #include "sfp-util.h" #include <math-emu/soft-fp.h> #include <math-emu/single.h> #include <math-emu/double.h> #define OPC_PAL 0x00 #define OPC_INTA 0x10 #define OPC_INTL 0x11 #define OPC_INTS 0x12 #define OPC_INTM 0x13 #define OPC_FLTC 0x14 #define OPC_FLTV 0x15 #define OPC_FLTI 0x16 #define OPC_FLTL 0x17 #define OPC_MISC 0x18 #define OPC_JSR 0x1a #define FOP_SRC_S 0 #define FOP_SRC_T 2 #define FOP_SRC_Q 3 #define FOP_FNC_ADDx 0 #define FOP_FNC_CVTQL 0 #define FOP_FNC_SUBx 1 #define FOP_FNC_MULx 2 #define FOP_FNC_DIVx 3 #define FOP_FNC_CMPxUN 4 #define FOP_FNC_CMPxEQ 5 #define FOP_FNC_CMPxLT 6 #define FOP_FNC_CMPxLE 7 #define FOP_FNC_SQRTx 11 #define FOP_FNC_CVTxS 12 #define FOP_FNC_CVTxT 14 #define FOP_FNC_CVTxQ 15 #define MISC_TRAPB 0x0000 #define MISC_EXCB 0x0400 extern unsigned long alpha_read_fp_reg (unsigned long reg); extern void alpha_write_fp_reg (unsigned long reg, unsigned long val); extern unsigned long alpha_read_fp_reg_s (unsigned long reg); extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val); #ifdef MODULE MODULE_DESCRIPTION("FP Software completion module"); MODULE_LICENSE("GPL v2"); extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long); extern long (*alpha_fp_emul) (unsigned long pc); static long (*save_emul_imprecise)(struct pt_regs *, unsigned long); static long (*save_emul) (unsigned long pc); long do_alpha_fp_emul_imprecise(struct pt_regs *, unsigned long); long do_alpha_fp_emul(unsigned long); static int alpha_fp_emul_init_module(void) { save_emul_imprecise = alpha_fp_emul_imprecise; save_emul = alpha_fp_emul; alpha_fp_emul_imprecise = do_alpha_fp_emul_imprecise; alpha_fp_emul = do_alpha_fp_emul; return 0; } module_init(alpha_fp_emul_init_module); static void alpha_fp_emul_cleanup_module(void) { alpha_fp_emul_imprecise = save_emul_imprecise; alpha_fp_emul = save_emul; } module_exit(alpha_fp_emul_cleanup_module); #undef alpha_fp_emul_imprecise #define alpha_fp_emul_imprecise do_alpha_fp_emul_imprecise #undef alpha_fp_emul #define alpha_fp_emul do_alpha_fp_emul #endif /* MODULE */ /* * Emulate the floating point instruction at address PC. Returns -1 if the * instruction to be emulated is illegal (such as with the opDEC trap), else * the SI_CODE for a SIGFPE signal, else 0 if everything's ok. * * Notice that the kernel does not and cannot use FP regs. This is good * because it means that instead of saving/restoring all fp regs, we simply * stick the result of the operation into the appropriate register. */ long alpha_fp_emul (unsigned long pc) { FP_DECL_EX; FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); unsigned long fa, fb, fc, func, mode, src; unsigned long res, va, vb, vc, swcr, fpcr; __u32 insn; long si_code; get_user(insn, (__u32 __user *)pc); fc = (insn >> 0) & 0x1f; /* destination register */ fb = (insn >> 16) & 0x1f; fa = (insn >> 21) & 0x1f; func = (insn >> 5) & 0xf; src = (insn >> 9) & 0x3; mode = (insn >> 11) & 0x3; fpcr = rdfpcr(); swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); if (mode == 3) { /* Dynamic -- get rounding mode from fpcr. */ mode = (fpcr >> FPCR_DYN_SHIFT) & 3; } switch (src) { case FOP_SRC_S: va = alpha_read_fp_reg_s(fa); vb = alpha_read_fp_reg_s(fb); FP_UNPACK_SP(SA, &va); FP_UNPACK_SP(SB, &vb); switch (func) { case FOP_FNC_SUBx: FP_SUB_S(SR, SA, SB); goto pack_s; case FOP_FNC_ADDx: FP_ADD_S(SR, SA, SB); goto pack_s; case FOP_FNC_MULx: FP_MUL_S(SR, SA, SB); goto pack_s; case FOP_FNC_DIVx: FP_DIV_S(SR, SA, SB); goto pack_s; case FOP_FNC_SQRTx: FP_SQRT_S(SR, SB); goto pack_s; } goto bad_insn; case FOP_SRC_T: va = alpha_read_fp_reg(fa); vb = alpha_read_fp_reg(fb); if ((func & ~3) == FOP_FNC_CMPxUN) { FP_UNPACK_RAW_DP(DA, &va); FP_UNPACK_RAW_DP(DB, &vb); if (!DA_e && !_FP_FRAC_ZEROP_1(DA)) { FP_SET_EXCEPTION(FP_EX_DENORM); if (FP_DENORM_ZERO) _FP_FRAC_SET_1(DA, _FP_ZEROFRAC_1); } if (!DB_e && !_FP_FRAC_ZEROP_1(DB)) { FP_SET_EXCEPTION(FP_EX_DENORM); if (FP_DENORM_ZERO) _FP_FRAC_SET_1(DB, _FP_ZEROFRAC_1); } FP_CMP_D(res, DA, DB, 3); vc = 0x4000000000000000UL; /* CMPTEQ, CMPTUN don't trap on QNaN, while CMPTLT and CMPTLE do */ if (res == 3 && ((func & 3) >= 2 || FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB))) { FP_SET_EXCEPTION(FP_EX_INVALID); } switch (func) { case FOP_FNC_CMPxUN: if (res != 3) vc = 0; break; case FOP_FNC_CMPxEQ: if (res) vc = 0; break; case FOP_FNC_CMPxLT: if (res != -1) vc = 0; break; case FOP_FNC_CMPxLE: if ((long)res > 0) vc = 0; break; } goto done_d; } FP_UNPACK_DP(DA, &va); FP_UNPACK_DP(DB, &vb); switch (func) { case FOP_FNC_SUBx: FP_SUB_D(DR, DA, DB); goto pack_d; case FOP_FNC_ADDx: FP_ADD_D(DR, DA, DB); goto pack_d; case FOP_FNC_MULx: FP_MUL_D(DR, DA, DB); goto pack_d; case FOP_FNC_DIVx: FP_DIV_D(DR, DA, DB); goto pack_d; case FOP_FNC_SQRTx: FP_SQRT_D(DR, DB); goto pack_d; case FOP_FNC_CVTxS: /* It is irritating that DEC encoded CVTST with SRC == T_floating. It is also interesting that the bit used to tell the two apart is /U... */ if (insn & 0x2000) { FP_CONV(S,D,1,1,SR,DB); goto pack_s; } else { vb = alpha_read_fp_reg_s(fb); FP_UNPACK_SP(SB, &vb); DR_c = DB_c; DR_s = DB_s; DR_e = DB_e + (1024 - 128); DR_f = SB_f << (52 - 23); goto pack_d; } case FOP_FNC_CVTxQ: if (DB_c == FP_CLS_NAN && (_FP_FRAC_HIGH_RAW_D(DB) & _FP_QNANBIT_D)) { /* AAHB Table B-2 says QNaN should not trigger INV */ vc = 0; } else FP_TO_INT_ROUND_D(vc, DB, 64, 2); goto done_d; } goto bad_insn; case FOP_SRC_Q: vb = alpha_read_fp_reg(fb); switch (func) { case FOP_FNC_CVTQL: /* Notice: We can get here only due to an integer overflow. Such overflows are reported as invalid ops. We return the result the hw would have computed. */ vc = ((vb & 0xc0000000) << 32 | /* sign and msb */ (vb & 0x3fffffff) << 29); /* rest of the int */ FP_SET_EXCEPTION (FP_EX_INVALID); goto done_d; case FOP_FNC_CVTxS: FP_FROM_INT_S(SR, ((long)vb), 64, long); goto pack_s; case FOP_FNC_CVTxT: FP_FROM_INT_D(DR, ((long)vb), 64, long); goto pack_d; } goto bad_insn; } goto bad_insn; pack_s: FP_PACK_SP(&vc, SR); if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) vc = 0; alpha_write_fp_reg_s(fc, vc); goto done; pack_d: FP_PACK_DP(&vc, DR); if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) vc = 0; done_d: alpha_write_fp_reg(fc, vc); goto done; /* * Take the appropriate action for each possible * floating-point result: * * - Set the appropriate bits in the FPCR * - If the specified exception is enabled in the FPCR, * return. The caller (entArith) will dispatch * the appropriate signal to the translated program. * * In addition, properly track the exception state in software * as described in the Alpha Architecture Handbook section 4.7.7.3. */ done: if (_fex) { /* Record exceptions in software control word. */ swcr |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); current_thread_info()->ieee_state |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); /* Update hardware control register. */ fpcr &= (~FPCR_MASK | FPCR_DYN_MASK); fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); /* Do we generate a signal? */ _fex = _fex & swcr & IEEE_TRAP_ENABLE_MASK; si_code = 0; if (_fex) { if (_fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND; if (_fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES; if (_fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND; if (_fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF; if (_fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV; if (_fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV; } return si_code; } /* We used to write the destination register here, but DEC FORTRAN requires that the result *always* be written... so we do the write immediately after the operations above. */ return 0; bad_insn: printk(KERN_ERR "alpha_fp_emul: Invalid FP insn %#x at %#lx\n", insn, pc); return -1; } long alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask) { unsigned long trigger_pc = regs->pc - 4; unsigned long insn, opcode, rc, si_code = 0; /* * Turn off the bits corresponding to registers that are the * target of instructions that set bits in the exception * summary register. We have some slack doing this because a * register that is the target of a trapping instruction can * be written at most once in the trap shadow. * * Branches, jumps, TRAPBs, EXCBs and calls to PALcode all * bound the trap shadow, so we need not look any further than * up to the first occurrence of such an instruction. */ while (write_mask) { get_user(insn, (__u32 __user *)(trigger_pc)); opcode = insn >> 26; rc = insn & 0x1f; switch (opcode) { case OPC_PAL: case OPC_JSR: case 0x30 ... 0x3f: /* branches */ goto egress; case OPC_MISC: switch (insn & 0xffff) { case MISC_TRAPB: case MISC_EXCB: goto egress; default: break; } break; case OPC_INTA: case OPC_INTL: case OPC_INTS: case OPC_INTM: write_mask &= ~(1UL << rc); break; case OPC_FLTC: case OPC_FLTV: case OPC_FLTI: case OPC_FLTL: write_mask &= ~(1UL << (rc + 32)); break; } if (!write_mask) { /* Re-execute insns in the trap-shadow. */ regs->pc = trigger_pc + 4; si_code = alpha_fp_emul(trigger_pc); goto egress; } trigger_pc -= 4; } egress: return si_code; }
linux-master
arch/alpha/math-emu/math.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) Paul Mackerras 1997. */ #include <linux/string.h> #include <linux/stdarg.h> size_t strnlen(const char * s, size_t count) { const char *sc; for (sc = s; count-- && *sc != '\0'; ++sc) /* nothing */; return sc - s; } # define do_div(n, base) ({ \ unsigned int __base = (base); \ unsigned int __rem; \ __rem = ((unsigned long long)(n)) % __base; \ (n) = ((unsigned long long)(n)) / __base; \ __rem; \ }) static int skip_atoi(const char **s) { int i, c; for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s) i = i*10 + c - '0'; return i; } #define ZEROPAD 1 /* pad with zero */ #define SIGN 2 /* unsigned/signed long */ #define PLUS 4 /* show plus */ #define SPACE 8 /* space if plus */ #define LEFT 16 /* left justified */ #define SPECIAL 32 /* 0x */ #define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ static char * number(char * str, unsigned long long num, int base, int size, int precision, int type) { char c, sign, tmp[66]; const char *digits = "0123456789abcdefghijklmnopqrstuvwxyz"; int i; if (type & LARGE) digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; if (type & LEFT) type &= ~ZEROPAD; if (base < 2 || base > 36) return 0; c = (type & ZEROPAD) ? '0' : ' '; sign = 0; if (type & SIGN) { if ((signed long long)num < 0) { sign = '-'; num = - (signed long long)num; size--; } else if (type & PLUS) { sign = '+'; size--; } else if (type & SPACE) { sign = ' '; size--; } } if (type & SPECIAL) { if (base == 16) size -= 2; else if (base == 8) size--; } i = 0; if (num == 0) tmp[i++]='0'; else while (num != 0) { tmp[i++] = digits[do_div(num, base)]; } if (i > precision) precision = i; size -= precision; if (!(type&(ZEROPAD+LEFT))) while (size-- > 0) *str++ = ' '; if (sign) *str++ = sign; if (type & SPECIAL) { if (base==8) *str++ = '0'; else if (base == 16) { *str++ = '0'; *str++ = digits[33]; } } if (!(type & LEFT)) while (size-- > 0) *str++ = c; while (i < precision--) *str++ = '0'; while (i-- > 0) *str++ = tmp[i]; while (size-- > 0) *str++ = ' '; return str; } int vsprintf(char *buf, const char *fmt, va_list args) { int len; unsigned long long num; int i, base; char * str; const char *s; int flags; /* flags to number() */ int field_width; /* width of output field */ int precision; /* min. # of digits for integers; max number of chars for from string */ int qualifier; /* 'h', 'l', or 'L' for integer fields */ /* 'z' support added 23/7/1999 S.H. */ /* 'z' changed to 'Z' --davidm 1/25/99 */ for (str = buf ; *fmt ; ++fmt) { if (*fmt != '%') { *str++ = *fmt; continue; } /* process flags */ flags = 0; repeat: ++fmt; /* this also skips first '%' */ switch (*fmt) { case '-': flags |= LEFT; goto repeat; case '+': flags |= PLUS; goto repeat; case ' ': flags |= SPACE; goto repeat; case '#': flags |= SPECIAL; goto repeat; case '0': flags |= ZEROPAD; goto repeat; } /* get field width */ field_width = -1; if ('0' <= *fmt && *fmt <= '9') field_width = skip_atoi(&fmt); else if (*fmt == '*') { ++fmt; /* it's the next argument */ field_width = va_arg(args, int); if (field_width < 0) { field_width = -field_width; flags |= LEFT; } } /* get the precision */ precision = -1; if (*fmt == '.') { ++fmt; if ('0' <= *fmt && *fmt <= '9') precision = skip_atoi(&fmt); else if (*fmt == '*') { ++fmt; /* it's the next argument */ precision = va_arg(args, int); } if (precision < 0) precision = 0; } /* get the conversion qualifier */ qualifier = -1; if (*fmt == 'l' && *(fmt + 1) == 'l') { qualifier = 'q'; fmt += 2; } else if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt == 'Z') { qualifier = *fmt; ++fmt; } /* default base */ base = 10; switch (*fmt) { case 'c': if (!(flags & LEFT)) while (--field_width > 0) *str++ = ' '; *str++ = (unsigned char) va_arg(args, int); while (--field_width > 0) *str++ = ' '; continue; case 's': s = va_arg(args, char *); if (!s) s = "<NULL>"; len = strnlen(s, precision); if (!(flags & LEFT)) while (len < field_width--) *str++ = ' '; for (i = 0; i < len; ++i) *str++ = *s++; while (len < field_width--) *str++ = ' '; continue; case 'p': if (field_width == -1) { field_width = 2*sizeof(void *); flags |= ZEROPAD; } str = number(str, (unsigned long) va_arg(args, void *), 16, field_width, precision, flags); continue; case 'n': if (qualifier == 'l') { long * ip = va_arg(args, long *); *ip = (str - buf); } else if (qualifier == 'Z') { size_t * ip = va_arg(args, size_t *); *ip = (str - buf); } else { int * ip = va_arg(args, int *); *ip = (str - buf); } continue; case '%': *str++ = '%'; continue; /* integer number formats - set up the flags and "break" */ case 'o': base = 8; break; case 'X': flags |= LARGE; case 'x': base = 16; break; case 'd': case 'i': flags |= SIGN; case 'u': break; default: *str++ = '%'; if (*fmt) *str++ = *fmt; else --fmt; continue; } if (qualifier == 'l') { num = va_arg(args, unsigned long); if (flags & SIGN) num = (signed long) num; } else if (qualifier == 'q') { num = va_arg(args, unsigned long long); if (flags & SIGN) num = (signed long long) num; } else if (qualifier == 'Z') { num = va_arg(args, size_t); } else if (qualifier == 'h') { num = (unsigned short) va_arg(args, int); if (flags & SIGN) num = (signed short) num; } else { num = va_arg(args, unsigned int); if (flags & SIGN) num = (signed int) num; } str = number(str, num, base, field_width, precision, flags); } *str = '\0'; return str-buf; } int sprintf(char * buf, const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vsprintf(buf, fmt, args); va_end(args); return i; }
linux-master
arch/alpha/boot/stdio.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/alpha/boot/main.c * * Copyright (C) 1994, 1995 Linus Torvalds * * This file is the bootloader for the Linux/AXP kernel */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <generated/utsrelease.h> #include <linux/mm.h> #include <asm/console.h> #include <asm/hwrpb.h> #include <linux/stdarg.h> #include "ksize.h" extern unsigned long switch_to_osf_pal(unsigned long nr, struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa, unsigned long *vptb); struct hwrpb_struct *hwrpb = INIT_HWRPB; static struct pcb_struct pcb_va[1]; /* * Find a physical address of a virtual object.. * * This is easy using the virtual page table address. */ static inline void * find_pa(unsigned long *vptb, void *ptr) { unsigned long address = (unsigned long) ptr; unsigned long result; result = vptb[address >> 13]; result >>= 32; result <<= 13; result |= address & 0x1fff; return (void *) result; } /* * This function moves into OSF/1 pal-code, and has a temporary * PCB for that. The kernel proper should replace this PCB with * the real one as soon as possible. * * The page table muckery in here depends on the fact that the boot * code has the L1 page table identity-map itself in the second PTE * in the L1 page table. Thus the L1-page is virtually addressable * itself (through three levels) at virtual address 0x200802000. */ #define VPTB ((unsigned long *) 0x200000000) #define L1 ((unsigned long *) 0x200802000) void pal_init(void) { unsigned long i, rev; struct percpu_struct * percpu; struct pcb_struct * pcb_pa; /* Create the dummy PCB. */ pcb_va->ksp = 0; pcb_va->usp = 0; pcb_va->ptbr = L1[1] >> 32; pcb_va->asn = 0; pcb_va->pcc = 0; pcb_va->unique = 0; pcb_va->flags = 1; pcb_va->res1 = 0; pcb_va->res2 = 0; pcb_pa = find_pa(VPTB, pcb_va); /* * a0 = 2 (OSF) * a1 = return address, but we give the asm the vaddr of the PCB * a2 = physical addr of PCB * a3 = new virtual page table pointer * a4 = KSP (but the asm sets it) */ srm_printk("Switching to OSF PAL-code .. "); i = switch_to_osf_pal(2, pcb_va, pcb_pa, VPTB); if (i) { srm_printk("failed, code %ld\n", i); __halt(); } percpu = (struct percpu_struct *) (INIT_HWRPB->processor_offset + (unsigned long) INIT_HWRPB); rev = percpu->pal_revision = percpu->palcode_avail[2]; srm_printk("Ok (rev %lx)\n", rev); tbia(); /* do it directly in case we are SMP */ } static inline long openboot(void) { char bootdev[256]; long result; result = callback_getenv(ENV_BOOTED_DEV, bootdev, 255); if (result < 0) return result; return callback_open(bootdev, result & 255); } static inline long close(long dev) { return callback_close(dev); } static inline long load(long dev, unsigned long addr, unsigned long count) { char bootfile[256]; extern char _end; long result, boot_size = &_end - (char *) BOOT_ADDR; result = callback_getenv(ENV_BOOTED_FILE, bootfile, 255); if (result < 0) return result; result &= 255; bootfile[result] = '\0'; if (result) srm_printk("Boot file specification (%s) not implemented\n", bootfile); return callback_read(dev, count, (void *)addr, boot_size/512 + 1); } /* * Start the kernel. */ static void runkernel(void) { __asm__ __volatile__( "bis %1,%1,$30\n\t" "bis %0,%0,$26\n\t" "ret ($26)" : /* no outputs: it doesn't even return */ : "r" (START_ADDR), "r" (PAGE_SIZE + INIT_STACK)); } void start_kernel(void) { long i; long dev; int nbytes; char envval[256]; srm_printk("Linux/AXP bootloader for Linux " UTS_RELEASE "\n"); if (INIT_HWRPB->pagesize != 8192) { srm_printk("Expected 8kB pages, got %ldkB\n", INIT_HWRPB->pagesize >> 10); return; } pal_init(); dev = openboot(); if (dev < 0) { srm_printk("Unable to open boot device: %016lx\n", dev); return; } dev &= 0xffffffff; srm_printk("Loading vmlinux ..."); i = load(dev, START_ADDR, KERNEL_SIZE); close(dev); if (i != KERNEL_SIZE) { srm_printk("Failed (%lx)\n", i); return; } nbytes = callback_getenv(ENV_BOOTED_OSFLAGS, envval, sizeof(envval)); if (nbytes < 0) { nbytes = 0; } envval[nbytes] = '\0'; strcpy((char*)ZERO_PGE, envval); srm_printk(" Ok\nNow booting the kernel\n"); runkernel(); for (i = 0 ; i < 0x100000000 ; i++) /* nothing */; __halt(); }
linux-master
arch/alpha/boot/main.c
// SPDX-License-Identifier: GPL-2.0 /* * misc.c * * This is a collection of several routines from gzip-1.0.3 * adapted for Linux. * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * * Modified for ARM Linux by Russell King * * Nicolas Pitre <[email protected]> 1999/04/14 : * For this code to run directly from Flash, all constant variables must * be marked with 'const' and all other variables initialized at run-time * only. This way all non constant variables will end up in the bss segment, * which should point to addresses in RAM and cleared to 0 on start. * This allows for a much quicker boot time. * * Modified for Alpha, from the ARM version, by Jay Estabrook 2003. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/uaccess.h> #define memzero(s,n) memset ((s),0,(n)) #define puts srm_printk extern long srm_printk(const char *, ...) __attribute__ ((format (printf, 1, 2))); /* * gzip declarations */ #define OF(args) args #define STATIC static typedef unsigned char uch; typedef unsigned short ush; typedef unsigned long ulg; #define WSIZE 0x8000 /* Window size must be at least 32k, */ /* and a power of two */ static uch *inbuf; /* input buffer */ static uch *window; /* Sliding window buffer */ static unsigned insize; /* valid bytes in inbuf */ static unsigned inptr; /* index of next byte to be processed in inbuf */ static unsigned outcnt; /* bytes in output buffer */ /* gzip flag byte */ #define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */ #define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ #define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ #define ORIG_NAME 0x08 /* bit 3 set: original file name present */ #define COMMENT 0x10 /* bit 4 set: file comment present */ #define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ #define RESERVED 0xC0 /* bit 6,7: reserved */ #define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) /* Diagnostic functions */ #ifdef DEBUG # define Assert(cond,msg) {if(!(cond)) error(msg);} # define Trace(x) fprintf x # define Tracev(x) {if (verbose) fprintf x ;} # define Tracevv(x) {if (verbose>1) fprintf x ;} # define Tracec(c,x) {if (verbose && (c)) fprintf x ;} # define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} #else # define Assert(cond,msg) # define Trace(x) # define Tracev(x) # define Tracevv(x) # define Tracec(c,x) # define Tracecv(c,x) #endif static int fill_inbuf(void); static void flush_window(void); static void error(char *m); static char *input_data; static int input_data_size; static uch *output_data; static ulg output_ptr; static ulg bytes_out; static void error(char *m); extern int end; static ulg free_mem_ptr; static ulg free_mem_end_ptr; #define HEAP_SIZE 0x3000 #include "../../../lib/inflate.c" /* =========================================================================== * Fill the input buffer. This is called only when the buffer is empty * and at least one byte is really needed. */ int fill_inbuf(void) { if (insize != 0) error("ran out of input data"); inbuf = input_data; insize = input_data_size; inptr = 1; return inbuf[0]; } /* =========================================================================== * Write the output window window[0..outcnt-1] and update crc and bytes_out. * (Used for the decompressed data only.) */ void flush_window(void) { ulg c = crc; unsigned n; uch *in, *out, ch; in = window; out = &output_data[output_ptr]; for (n = 0; n < outcnt; n++) { ch = *out++ = *in++; c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8); } crc = c; bytes_out += (ulg)outcnt; output_ptr += (ulg)outcnt; outcnt = 0; /* puts("."); */ } static void error(char *x) { puts("\n\n"); puts(x); puts("\n\n -- System halted"); while(1); /* Halt */ } unsigned int decompress_kernel(void *output_start, void *input_start, size_t ksize, size_t kzsize) { output_data = (uch *)output_start; input_data = (uch *)input_start; input_data_size = kzsize; /* use compressed size */ /* FIXME FIXME FIXME */ free_mem_ptr = (ulg)output_start + ksize; free_mem_end_ptr = (ulg)output_start + ksize + 0x200000; /* FIXME FIXME FIXME */ /* put in temp area to reduce initial footprint */ window = malloc(WSIZE); makecrc(); /* puts("Uncompressing Linux..."); */ gunzip(); /* puts(" done, booting the kernel.\n"); */ return output_ptr; }
linux-master
arch/alpha/boot/misc.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/alpha/boot/bootp.c * * Copyright (C) 1997 Jay Estabrook * * This file is used for creating a bootp file for the Linux/AXP kernel * * based significantly on the arch/alpha/boot/main.c of Linus Torvalds */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <generated/utsrelease.h> #include <linux/mm.h> #include <asm/console.h> #include <asm/hwrpb.h> #include <asm/io.h> #include <linux/stdarg.h> #include "ksize.h" extern unsigned long switch_to_osf_pal(unsigned long nr, struct pcb_struct *pcb_va, struct pcb_struct *pcb_pa, unsigned long *vptb); extern void move_stack(unsigned long new_stack); struct hwrpb_struct *hwrpb = INIT_HWRPB; static struct pcb_struct pcb_va[1]; /* * Find a physical address of a virtual object.. * * This is easy using the virtual page table address. */ static inline void * find_pa(unsigned long *vptb, void *ptr) { unsigned long address = (unsigned long) ptr; unsigned long result; result = vptb[address >> 13]; result >>= 32; result <<= 13; result |= address & 0x1fff; return (void *) result; } /* * This function moves into OSF/1 pal-code, and has a temporary * PCB for that. The kernel proper should replace this PCB with * the real one as soon as possible. * * The page table muckery in here depends on the fact that the boot * code has the L1 page table identity-map itself in the second PTE * in the L1 page table. Thus the L1-page is virtually addressable * itself (through three levels) at virtual address 0x200802000. */ #define VPTB ((unsigned long *) 0x200000000) #define L1 ((unsigned long *) 0x200802000) void pal_init(void) { unsigned long i, rev; struct percpu_struct * percpu; struct pcb_struct * pcb_pa; /* Create the dummy PCB. */ pcb_va->ksp = 0; pcb_va->usp = 0; pcb_va->ptbr = L1[1] >> 32; pcb_va->asn = 0; pcb_va->pcc = 0; pcb_va->unique = 0; pcb_va->flags = 1; pcb_va->res1 = 0; pcb_va->res2 = 0; pcb_pa = find_pa(VPTB, pcb_va); /* * a0 = 2 (OSF) * a1 = return address, but we give the asm the vaddr of the PCB * a2 = physical addr of PCB * a3 = new virtual page table pointer * a4 = KSP (but the asm sets it) */ srm_printk("Switching to OSF PAL-code .. "); i = switch_to_osf_pal(2, pcb_va, pcb_pa, VPTB); if (i) { srm_printk("failed, code %ld\n", i); __halt(); } percpu = (struct percpu_struct *) (INIT_HWRPB->processor_offset + (unsigned long) INIT_HWRPB); rev = percpu->pal_revision = percpu->palcode_avail[2]; srm_printk("Ok (rev %lx)\n", rev); tbia(); /* do it directly in case we are SMP */ } static inline void load(unsigned long dst, unsigned long src, unsigned long count) { memcpy((void *)dst, (void *)src, count); } /* * Start the kernel. */ static inline void runkernel(void) { __asm__ __volatile__( "bis %0,%0,$27\n\t" "jmp ($27)" : /* no outputs: it doesn't even return */ : "r" (START_ADDR)); } extern char _end; #define KERNEL_ORIGIN \ ((((unsigned long)&_end) + 511) & ~511) void start_kernel(void) { /* * Note that this crufty stuff with static and envval * and envbuf is because: * * 1. Frequently, the stack is short, and we don't want to overrun; * 2. Frequently the stack is where we are going to copy the kernel to; * 3. A certain SRM console required the GET_ENV output to stack. * ??? A comment in the aboot sources indicates that the GET_ENV * destination must be quadword aligned. Might this explain the * behaviour, rather than requiring output to the stack, which * seems rather far-fetched. */ static long nbytes; static char envval[256] __attribute__((aligned(8))); static unsigned long initrd_start; srm_printk("Linux/AXP bootp loader for Linux " UTS_RELEASE "\n"); if (INIT_HWRPB->pagesize != 8192) { srm_printk("Expected 8kB pages, got %ldkB\n", INIT_HWRPB->pagesize >> 10); return; } if (INIT_HWRPB->vptb != (unsigned long) VPTB) { srm_printk("Expected vptb at %p, got %p\n", VPTB, (void *)INIT_HWRPB->vptb); return; } pal_init(); /* The initrd must be page-aligned. See below for the cause of the magic number 5. */ initrd_start = ((START_ADDR + 5*KERNEL_SIZE + PAGE_SIZE) | (PAGE_SIZE-1)) + 1; #ifdef INITRD_IMAGE_SIZE srm_printk("Initrd positioned at %#lx\n", initrd_start); #endif /* * Move the stack to a safe place to ensure it won't be * overwritten by kernel image. */ move_stack(initrd_start - PAGE_SIZE); nbytes = callback_getenv(ENV_BOOTED_OSFLAGS, envval, sizeof(envval)); if (nbytes < 0 || nbytes >= sizeof(envval)) { nbytes = 0; } envval[nbytes] = '\0'; srm_printk("Loading the kernel...'%s'\n", envval); /* NOTE: *no* callbacks or printouts from here on out!!! */ /* This is a hack, as some consoles seem to get virtual 20000000 (ie * where the SRM console puts the kernel bootp image) memory * overlapping physical memory where the kernel wants to be put, * which causes real problems when attempting to copy the former to * the latter... :-( * * So, we first move the kernel virtual-to-physical way above where * we physically want the kernel to end up, then copy it from there * to its final resting place... ;-} * * Sigh... */ #ifdef INITRD_IMAGE_SIZE load(initrd_start, KERNEL_ORIGIN+KERNEL_SIZE, INITRD_IMAGE_SIZE); #endif load(START_ADDR+(4*KERNEL_SIZE), KERNEL_ORIGIN, KERNEL_SIZE); load(START_ADDR, START_ADDR+(4*KERNEL_SIZE), KERNEL_SIZE); memset((char*)ZERO_PGE, 0, PAGE_SIZE); strcpy((char*)ZERO_PGE, envval); #ifdef INITRD_IMAGE_SIZE ((long *)(ZERO_PGE+256))[0] = initrd_start; ((long *)(ZERO_PGE+256))[1] = INITRD_IMAGE_SIZE; #endif runkernel(); }
linux-master
arch/alpha/boot/bootp.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/alpha/boot/bootpz.c * * Copyright (C) 1997 Jay Estabrook * * This file is used for creating a compressed BOOTP file for the * Linux/AXP kernel * * based significantly on the arch/alpha/boot/main.c of Linus Torvalds * and the decompression code from MILO. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <generated/utsrelease.h> #include <linux/mm.h> #include <asm/console.h> #include <asm/hwrpb.h> #include <asm/io.h> #include <linux/stdarg.h> #include "kzsize.h" /* FIXME FIXME FIXME */ #define MALLOC_AREA_SIZE 0x200000 /* 2MB for now */ /* FIXME FIXME FIXME */ /* WARNING NOTE It is very possible that turning on additional messages may cause kernel image corruption due to stack usage to do the printing. */ #undef DEBUG_CHECK_RANGE #undef DEBUG_ADDRESSES #undef DEBUG_LAST_STEPS extern unsigned long switch_to_osf_pal(unsigned long nr, struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa, unsigned long *vptb); extern int decompress_kernel(void* destination, void *source, size_t ksize, size_t kzsize); extern void move_stack(unsigned long new_stack); struct hwrpb_struct *hwrpb = INIT_HWRPB; static struct pcb_struct pcb_va[1]; /* * Find a physical address of a virtual object.. * * This is easy using the virtual page table address. */ #define VPTB ((unsigned long *) 0x200000000) static inline unsigned long find_pa(unsigned long address) { unsigned long result; result = VPTB[address >> 13]; result >>= 32; result <<= 13; result |= address & 0x1fff; return result; } int check_range(unsigned long vstart, unsigned long vend, unsigned long kstart, unsigned long kend) { unsigned long vaddr, kaddr; #ifdef DEBUG_CHECK_RANGE srm_printk("check_range: V[0x%lx:0x%lx] K[0x%lx:0x%lx]\n", vstart, vend, kstart, kend); #endif /* do some range checking for detecting an overlap... */ for (vaddr = vstart; vaddr <= vend; vaddr += PAGE_SIZE) { kaddr = (find_pa(vaddr) | PAGE_OFFSET); if (kaddr >= kstart && kaddr <= kend) { #ifdef DEBUG_CHECK_RANGE srm_printk("OVERLAP: vaddr 0x%lx kaddr 0x%lx" " [0x%lx:0x%lx]\n", vaddr, kaddr, kstart, kend); #endif return 1; } } return 0; } /* * This function moves into OSF/1 pal-code, and has a temporary * PCB for that. The kernel proper should replace this PCB with * the real one as soon as possible. * * The page table muckery in here depends on the fact that the boot * code has the L1 page table identity-map itself in the second PTE * in the L1 page table. Thus the L1-page is virtually addressable * itself (through three levels) at virtual address 0x200802000. */ #define L1 ((unsigned long *) 0x200802000) void pal_init(void) { unsigned long i, rev; struct percpu_struct * percpu; struct pcb_struct * pcb_pa; /* Create the dummy PCB. */ pcb_va->ksp = 0; pcb_va->usp = 0; pcb_va->ptbr = L1[1] >> 32; pcb_va->asn = 0; pcb_va->pcc = 0; pcb_va->unique = 0; pcb_va->flags = 1; pcb_va->res1 = 0; pcb_va->res2 = 0; pcb_pa = (struct pcb_struct *)find_pa((unsigned long)pcb_va); /* * a0 = 2 (OSF) * a1 = return address, but we give the asm the vaddr of the PCB * a2 = physical addr of PCB * a3 = new virtual page table pointer * a4 = KSP (but the asm sets it) */ srm_printk("Switching to OSF PAL-code... "); i = switch_to_osf_pal(2, pcb_va, pcb_pa, VPTB); if (i) { srm_printk("failed, code %ld\n", i); __halt(); } percpu = (struct percpu_struct *) (INIT_HWRPB->processor_offset + (unsigned long) INIT_HWRPB); rev = percpu->pal_revision = percpu->palcode_avail[2]; srm_printk("OK (rev %lx)\n", rev); tbia(); /* do it directly in case we are SMP */ } /* * Start the kernel. */ static inline void runkernel(void) { __asm__ __volatile__( "bis %0,%0,$27\n\t" "jmp ($27)" : /* no outputs: it doesn't even return */ : "r" (START_ADDR)); } /* Must record the SP (it is virtual) on entry, so we can make sure not to overwrite it during movement or decompression. */ unsigned long SP_on_entry; /* Calculate the kernel image address based on the end of the BOOTP bootstrapper (ie this program). */ extern char _end; #define KERNEL_ORIGIN \ ((((unsigned long)&_end) + 511) & ~511) /* Round address to next higher page boundary. */ #define NEXT_PAGE(a) (((a) | (PAGE_SIZE - 1)) + 1) #ifdef INITRD_IMAGE_SIZE # define REAL_INITRD_SIZE INITRD_IMAGE_SIZE #else # define REAL_INITRD_SIZE 0 #endif /* Defines from include/asm-alpha/system.h BOOT_ADDR Virtual address at which the consoles loads the BOOTP image. KERNEL_START KSEG address at which the kernel is built to run, which includes some initial data pages before the code. START_ADDR KSEG address of the entry point of kernel code. ZERO_PGE KSEG address of page full of zeroes, but upon entry to kernel, it can be expected to hold the parameter list and possible INTRD information. These are used in the local defines below. */ /* Virtual addresses for the BOOTP image. Note that this includes the bootstrapper code as well as the compressed kernel image, and possibly the INITRD image. Oh, and do NOT forget the STACK, which appears to be placed virtually beyond the end of the loaded image. */ #define V_BOOT_IMAGE_START BOOT_ADDR #define V_BOOT_IMAGE_END SP_on_entry /* Virtual addresses for just the bootstrapper part of the BOOTP image. */ #define V_BOOTSTRAPPER_START BOOT_ADDR #define V_BOOTSTRAPPER_END KERNEL_ORIGIN /* Virtual addresses for just the data part of the BOOTP image. This may also include the INITRD image, but always includes the STACK. */ #define V_DATA_START KERNEL_ORIGIN #define V_INITRD_START (KERNEL_ORIGIN + KERNEL_Z_SIZE) #define V_INTRD_END (V_INITRD_START + REAL_INITRD_SIZE) #define V_DATA_END V_BOOT_IMAGE_END /* KSEG addresses for the uncompressed kernel. Note that the end address includes workspace for the decompression. Note also that the DATA_START address is ZERO_PGE, to which we write just before jumping to the kernel image at START_ADDR. */ #define K_KERNEL_DATA_START ZERO_PGE #define K_KERNEL_IMAGE_START START_ADDR #define K_KERNEL_IMAGE_END (START_ADDR + KERNEL_SIZE) /* Define to where we may have to decompress the kernel image, before we move it to the final position, in case of overlap. This will be above the final position of the kernel. Regardless of overlap, we move the INITRD image to the end of this copy area, because there needs to be a buffer area after the kernel for "bootmem" anyway. */ #define K_COPY_IMAGE_START NEXT_PAGE(K_KERNEL_IMAGE_END) /* Reserve one page below INITRD for the new stack. */ #define K_INITRD_START \ NEXT_PAGE(K_COPY_IMAGE_START + KERNEL_SIZE + PAGE_SIZE) #define K_COPY_IMAGE_END \ (K_INITRD_START + REAL_INITRD_SIZE + MALLOC_AREA_SIZE) #define K_COPY_IMAGE_SIZE \ NEXT_PAGE(K_COPY_IMAGE_END - K_COPY_IMAGE_START) void start_kernel(void) { int must_move = 0; /* Initialize these for the decompression-in-place situation, which is the smallest amount of work and most likely to occur when using the normal START_ADDR of the kernel (currently set to 16MB, to clear all console code. */ unsigned long uncompressed_image_start = K_KERNEL_IMAGE_START; unsigned long uncompressed_image_end = K_KERNEL_IMAGE_END; unsigned long initrd_image_start = K_INITRD_START; /* * Note that this crufty stuff with static and envval * and envbuf is because: * * 1. Frequently, the stack is short, and we don't want to overrun; * 2. Frequently the stack is where we are going to copy the kernel to; * 3. A certain SRM console required the GET_ENV output to stack. * ??? A comment in the aboot sources indicates that the GET_ENV * destination must be quadword aligned. Might this explain the * behaviour, rather than requiring output to the stack, which * seems rather far-fetched. */ static long nbytes; static char envval[256] __attribute__((aligned(8))); register unsigned long asm_sp asm("30"); SP_on_entry = asm_sp; srm_printk("Linux/Alpha BOOTPZ Loader for Linux " UTS_RELEASE "\n"); /* Validity check the HWRPB. */ if (INIT_HWRPB->pagesize != 8192) { srm_printk("Expected 8kB pages, got %ldkB\n", INIT_HWRPB->pagesize >> 10); return; } if (INIT_HWRPB->vptb != (unsigned long) VPTB) { srm_printk("Expected vptb at %p, got %p\n", VPTB, (void *)INIT_HWRPB->vptb); return; } /* PALcode (re)initialization. */ pal_init(); /* Get the parameter list from the console environment variable. */ nbytes = callback_getenv(ENV_BOOTED_OSFLAGS, envval, sizeof(envval)); if (nbytes < 0 || nbytes >= sizeof(envval)) { nbytes = 0; } envval[nbytes] = '\0'; #ifdef DEBUG_ADDRESSES srm_printk("START_ADDR 0x%lx\n", START_ADDR); srm_printk("KERNEL_ORIGIN 0x%lx\n", KERNEL_ORIGIN); srm_printk("KERNEL_SIZE 0x%x\n", KERNEL_SIZE); srm_printk("KERNEL_Z_SIZE 0x%x\n", KERNEL_Z_SIZE); #endif /* Since all the SRM consoles load the BOOTP image at virtual * 0x20000000, we have to ensure that the physical memory * pages occupied by that image do NOT overlap the physical * address range where the kernel wants to be run. This * causes real problems when attempting to cdecompress the * former into the latter... :-( * * So, we may have to decompress/move the kernel/INITRD image * virtual-to-physical someplace else first before moving * kernel /INITRD to their final resting places... ;-} * * Sigh... */ /* First, check to see if the range of addresses occupied by the bootstrapper part of the BOOTP image include any of the physical pages into which the kernel will be placed for execution. We only need check on the final kernel image range, since we will put the INITRD someplace that we can be sure is not in conflict. */ if (check_range(V_BOOTSTRAPPER_START, V_BOOTSTRAPPER_END, K_KERNEL_DATA_START, K_KERNEL_IMAGE_END)) { srm_printk("FATAL ERROR: overlap of bootstrapper code\n"); __halt(); } /* Next, check to see if the range of addresses occupied by the compressed kernel/INITRD/stack portion of the BOOTP image include any of the physical pages into which the decompressed kernel or the INITRD will be placed for execution. */ if (check_range(V_DATA_START, V_DATA_END, K_KERNEL_IMAGE_START, K_COPY_IMAGE_END)) { #ifdef DEBUG_ADDRESSES srm_printk("OVERLAP: cannot decompress in place\n"); #endif uncompressed_image_start = K_COPY_IMAGE_START; uncompressed_image_end = K_COPY_IMAGE_END; must_move = 1; /* Finally, check to see if the range of addresses occupied by the compressed kernel/INITRD part of the BOOTP image include any of the physical pages into which that part is to be copied for decompression. */ while (check_range(V_DATA_START, V_DATA_END, uncompressed_image_start, uncompressed_image_end)) { #if 0 uncompressed_image_start += K_COPY_IMAGE_SIZE; uncompressed_image_end += K_COPY_IMAGE_SIZE; initrd_image_start += K_COPY_IMAGE_SIZE; #else /* Keep as close as possible to end of BOOTP image. */ uncompressed_image_start += PAGE_SIZE; uncompressed_image_end += PAGE_SIZE; initrd_image_start += PAGE_SIZE; #endif } } srm_printk("Starting to load the kernel with args '%s'\n", envval); #ifdef DEBUG_ADDRESSES srm_printk("Decompressing the kernel...\n" "...from 0x%lx to 0x%lx size 0x%x\n", V_DATA_START, uncompressed_image_start, KERNEL_SIZE); #endif decompress_kernel((void *)uncompressed_image_start, (void *)V_DATA_START, KERNEL_SIZE, KERNEL_Z_SIZE); /* * Now, move things to their final positions, if/as required. */ #ifdef INITRD_IMAGE_SIZE /* First, we always move the INITRD image, if present. */ #ifdef DEBUG_ADDRESSES srm_printk("Moving the INITRD image...\n" " from 0x%lx to 0x%lx size 0x%x\n", V_INITRD_START, initrd_image_start, INITRD_IMAGE_SIZE); #endif memcpy((void *)initrd_image_start, (void *)V_INITRD_START, INITRD_IMAGE_SIZE); #endif /* INITRD_IMAGE_SIZE */ /* Next, we may have to move the uncompressed kernel to the final destination. */ if (must_move) { #ifdef DEBUG_ADDRESSES srm_printk("Moving the uncompressed kernel...\n" "...from 0x%lx to 0x%lx size 0x%x\n", uncompressed_image_start, K_KERNEL_IMAGE_START, (unsigned)KERNEL_SIZE); #endif /* * Move the stack to a safe place to ensure it won't be * overwritten by kernel image. */ move_stack(initrd_image_start - PAGE_SIZE); memcpy((void *)K_KERNEL_IMAGE_START, (void *)uncompressed_image_start, KERNEL_SIZE); } /* Clear the zero page, then move the argument list in. */ #ifdef DEBUG_LAST_STEPS srm_printk("Preparing ZERO_PGE...\n"); #endif memset((char*)ZERO_PGE, 0, PAGE_SIZE); strcpy((char*)ZERO_PGE, envval); #ifdef INITRD_IMAGE_SIZE #ifdef DEBUG_LAST_STEPS srm_printk("Preparing INITRD info...\n"); #endif /* Finally, set the INITRD paramenters for the kernel. */ ((long *)(ZERO_PGE+256))[0] = initrd_image_start; ((long *)(ZERO_PGE+256))[1] = INITRD_IMAGE_SIZE; #endif /* INITRD_IMAGE_SIZE */ #ifdef DEBUG_LAST_STEPS srm_printk("Doing 'runkernel()'...\n"); #endif runkernel(); } /* dummy function, should never be called. */ void *__kmalloc(size_t size, gfp_t flags) { return (void *)NULL; }
linux-master
arch/alpha/boot/bootpz.c
// SPDX-License-Identifier: GPL-2.0 /* This utility makes a bootblock suitable for the SRM console/miniloader */ /* Usage: * mkbb <device> <lxboot> * * Where <device> is the name of the device to install the bootblock on, * and <lxboot> is the name of a bootblock to merge in. This bootblock * contains the offset and size of the bootloader. It must be exactly * 512 bytes long. */ #include <fcntl.h> #include <unistd.h> #include <stdlib.h> #include <stdio.h> /* Minimal definition of disklabel, so we don't have to include * asm/disklabel.h (confuses make) */ #ifndef MAXPARTITIONS #define MAXPARTITIONS 8 /* max. # of partitions */ #endif #ifndef u8 #define u8 unsigned char #endif #ifndef u16 #define u16 unsigned short #endif #ifndef u32 #define u32 unsigned int #endif struct disklabel { u32 d_magic; /* must be DISKLABELMAGIC */ u16 d_type, d_subtype; u8 d_typename[16]; u8 d_packname[16]; u32 d_secsize; u32 d_nsectors; u32 d_ntracks; u32 d_ncylinders; u32 d_secpercyl; u32 d_secprtunit; u16 d_sparespertrack; u16 d_sparespercyl; u32 d_acylinders; u16 d_rpm, d_interleave, d_trackskew, d_cylskew; u32 d_headswitch, d_trkseek, d_flags; u32 d_drivedata[5]; u32 d_spare[5]; u32 d_magic2; /* must be DISKLABELMAGIC */ u16 d_checksum; u16 d_npartitions; u32 d_bbsize, d_sbsize; struct d_partition { u32 p_size; u32 p_offset; u32 p_fsize; u8 p_fstype; u8 p_frag; u16 p_cpg; } d_partitions[MAXPARTITIONS]; }; typedef union __bootblock { struct { char __pad1[64]; struct disklabel __label; } __u1; struct { unsigned long __pad2[63]; unsigned long __checksum; } __u2; char bootblock_bytes[512]; unsigned long bootblock_quadwords[64]; } bootblock; #define bootblock_label __u1.__label #define bootblock_checksum __u2.__checksum int main(int argc, char ** argv) { bootblock bootblock_from_disk; bootblock bootloader_image; int dev, fd; int i; int nread; /* Make sure of the arg count */ if(argc != 3) { fprintf(stderr, "Usage: %s device lxboot\n", argv[0]); exit(0); } /* First, open the device and make sure it's accessible */ dev = open(argv[1], O_RDWR); if(dev < 0) { perror(argv[1]); exit(0); } /* Now open the lxboot and make sure it's reasonable */ fd = open(argv[2], O_RDONLY); if(fd < 0) { perror(argv[2]); close(dev); exit(0); } /* Read in the lxboot */ nread = read(fd, &bootloader_image, sizeof(bootblock)); if(nread != sizeof(bootblock)) { perror("lxboot read"); fprintf(stderr, "expected %zd, got %d\n", sizeof(bootblock), nread); exit(0); } /* Read in the bootblock from disk. */ nread = read(dev, &bootblock_from_disk, sizeof(bootblock)); if(nread != sizeof(bootblock)) { perror("bootblock read"); fprintf(stderr, "expected %zd, got %d\n", sizeof(bootblock), nread); exit(0); } /* Swap the bootblock's disklabel into the bootloader */ bootloader_image.bootblock_label = bootblock_from_disk.bootblock_label; /* Calculate the bootblock checksum */ bootloader_image.bootblock_checksum = 0; for(i = 0; i < 63; i++) { bootloader_image.bootblock_checksum += bootloader_image.bootblock_quadwords[i]; } /* Write the whole thing out! */ lseek(dev, 0L, SEEK_SET); if(write(dev, &bootloader_image, sizeof(bootblock)) != sizeof(bootblock)) { perror("bootblock write"); exit(0); } close(fd); close(dev); exit(0); }
linux-master
arch/alpha/boot/tools/mkbb.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/alpha/boot/tools/objstrip.c * * Strip the object file headers/trailers from an executable (ELF or ECOFF). * * Copyright (C) 1996 David Mosberger-Tang. */ /* * Converts an ECOFF or ELF object file into a bootable file. The * object file must be a OMAGIC file (i.e., data and bss follow immediately * behind the text). See DEC "Assembly Language Programmer's Guide" * documentation for details. The SRM boot process is documented in * the Alpha AXP Architecture Reference Manual, Second Edition by * Richard L. Sites and Richard T. Witek. */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #include <sys/fcntl.h> #include <sys/stat.h> #include <sys/types.h> #include <linux/a.out.h> #include <linux/coff.h> #include <linux/param.h> #ifdef __ELF__ # include <linux/elf.h> # define elfhdr elf64_hdr # define elf_phdr elf64_phdr # define elf_check_arch(x) ((x)->e_machine == EM_ALPHA) #endif /* bootfile size must be multiple of BLOCK_SIZE: */ #define BLOCK_SIZE 512 const char * prog_name; static void usage (void) { fprintf(stderr, "usage: %s [-v] -p file primary\n" " %s [-vb] file [secondary]\n", prog_name, prog_name); exit(1); } int main (int argc, char *argv[]) { size_t nwritten, tocopy, n, mem_size, fil_size, pad = 0; int fd, ofd, i, j, verbose = 0, primary = 0; char buf[8192], *inname; struct exec * aout; /* includes file & aout header */ long offset; #ifdef __ELF__ struct elfhdr *elf; struct elf_phdr *elf_phdr; /* program header */ unsigned long long e_entry; #endif prog_name = argv[0]; for (i = 1; i < argc && argv[i][0] == '-'; ++i) { for (j = 1; argv[i][j]; ++j) { switch (argv[i][j]) { case 'v': verbose = ~verbose; break; case 'b': pad = BLOCK_SIZE; break; case 'p': primary = 1; /* make primary bootblock */ break; } } } if (i >= argc) { usage(); } inname = argv[i++]; fd = open(inname, O_RDONLY); if (fd == -1) { perror("open"); exit(1); } ofd = 1; if (i < argc) { ofd = open(argv[i++], O_WRONLY | O_CREAT | O_TRUNC, 0666); if (ofd == -1) { perror("open"); exit(1); } } if (primary) { /* generate bootblock for primary loader */ unsigned long bb[64], sum = 0; struct stat st; off_t size; int i; if (ofd == 1) { usage(); } if (fstat(fd, &st) == -1) { perror("fstat"); exit(1); } size = (st.st_size + BLOCK_SIZE - 1) & ~(BLOCK_SIZE - 1); memset(bb, 0, sizeof(bb)); strcpy((char *) bb, "Linux SRM bootblock"); bb[60] = size / BLOCK_SIZE; /* count */ bb[61] = 1; /* starting sector # */ bb[62] = 0; /* flags---must be 0 */ for (i = 0; i < 63; ++i) { sum += bb[i]; } bb[63] = sum; if (write(ofd, bb, sizeof(bb)) != sizeof(bb)) { perror("boot-block write"); exit(1); } printf("%lu\n", size); return 0; } /* read and inspect exec header: */ if (read(fd, buf, sizeof(buf)) < 0) { perror("read"); exit(1); } #ifdef __ELF__ elf = (struct elfhdr *) buf; if (memcmp(&elf->e_ident[EI_MAG0], ELFMAG, SELFMAG) == 0) { if (elf->e_type != ET_EXEC) { fprintf(stderr, "%s: %s is not an ELF executable\n", prog_name, inname); exit(1); } if (!elf_check_arch(elf)) { fprintf(stderr, "%s: is not for this processor (e_machine=%d)\n", prog_name, elf->e_machine); exit(1); } if (elf->e_phnum != 1) { fprintf(stderr, "%s: %d program headers (forgot to link with -N?)\n", prog_name, elf->e_phnum); } e_entry = elf->e_entry; lseek(fd, elf->e_phoff, SEEK_SET); if (read(fd, buf, sizeof(*elf_phdr)) != sizeof(*elf_phdr)) { perror("read"); exit(1); } elf_phdr = (struct elf_phdr *) buf; offset = elf_phdr->p_offset; mem_size = elf_phdr->p_memsz; fil_size = elf_phdr->p_filesz; /* work around ELF bug: */ if (elf_phdr->p_vaddr < e_entry) { unsigned long delta = e_entry - elf_phdr->p_vaddr; offset += delta; mem_size -= delta; fil_size -= delta; elf_phdr->p_vaddr += delta; } if (verbose) { fprintf(stderr, "%s: extracting %#016lx-%#016lx (at %lx)\n", prog_name, (long) elf_phdr->p_vaddr, elf_phdr->p_vaddr + fil_size, offset); } } else #endif { aout = (struct exec *) buf; if (!(aout->fh.f_flags & COFF_F_EXEC)) { fprintf(stderr, "%s: %s is not in executable format\n", prog_name, inname); exit(1); } if (aout->fh.f_opthdr != sizeof(aout->ah)) { fprintf(stderr, "%s: %s has unexpected optional header size\n", prog_name, inname); exit(1); } if (N_MAGIC(*aout) != OMAGIC) { fprintf(stderr, "%s: %s is not an OMAGIC file\n", prog_name, inname); exit(1); } offset = N_TXTOFF(*aout); fil_size = aout->ah.tsize + aout->ah.dsize; mem_size = fil_size + aout->ah.bsize; if (verbose) { fprintf(stderr, "%s: extracting %#016lx-%#016lx (at %lx)\n", prog_name, aout->ah.text_start, aout->ah.text_start + fil_size, offset); } } if (lseek(fd, offset, SEEK_SET) != offset) { perror("lseek"); exit(1); } if (verbose) { fprintf(stderr, "%s: copying %lu byte from %s\n", prog_name, (unsigned long) fil_size, inname); } tocopy = fil_size; while (tocopy > 0) { n = tocopy; if (n > sizeof(buf)) { n = sizeof(buf); } tocopy -= n; if ((size_t) read(fd, buf, n) != n) { perror("read"); exit(1); } do { nwritten = write(ofd, buf, n); if ((ssize_t) nwritten == -1) { perror("write"); exit(1); } n -= nwritten; } while (n > 0); } if (pad) { mem_size = ((mem_size + pad - 1) / pad) * pad; } tocopy = mem_size - fil_size; if (tocopy > 0) { fprintf(stderr, "%s: zero-filling bss and aligning to %lu with %lu bytes\n", prog_name, pad, (unsigned long) tocopy); memset(buf, 0x00, sizeof(buf)); do { n = tocopy; if (n > sizeof(buf)) { n = sizeof(buf); } nwritten = write(ofd, buf, n); if ((ssize_t) nwritten == -1) { perror("write"); exit(1); } tocopy -= nwritten; } while (tocopy > 0); } return 0; }
linux-master
arch/alpha/boot/tools/objstrip.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/lib/memcpy.c * * Copyright (C) 1995 Linus Torvalds */ /* * This is a reasonably optimized memcpy() routine. */ /* * Note that the C code is written to be optimized into good assembly. However, * at this point gcc is unable to sanely compile "if (n >= 0)", resulting in a * explicit compare against 0 (instead of just using the proper "blt reg, xx" or * "bge reg, xx"). I hope alpha-gcc will be fixed to notice this eventually.. */ #include <linux/types.h> #include <linux/export.h> /* * This should be done in one go with ldq_u*2/mask/stq_u. Do it * with a macro so that we can fix it up later.. */ #define ALIGN_DEST_TO8_UP(d,s,n) \ while (d & 7) { \ if (n <= 0) return; \ n--; \ *(char *) d = *(char *) s; \ d++; s++; \ } #define ALIGN_DEST_TO8_DN(d,s,n) \ while (d & 7) { \ if (n <= 0) return; \ n--; \ d--; s--; \ *(char *) d = *(char *) s; \ } /* * This should similarly be done with ldq_u*2/mask/stq. The destination * is aligned, but we don't fill in a full quad-word */ #define DO_REST_UP(d,s,n) \ while (n > 0) { \ n--; \ *(char *) d = *(char *) s; \ d++; s++; \ } #define DO_REST_DN(d,s,n) \ while (n > 0) { \ n--; \ d--; s--; \ *(char *) d = *(char *) s; \ } /* * This should be done with ldq/mask/stq. The source and destination are * aligned, but we don't fill in a full quad-word */ #define DO_REST_ALIGNED_UP(d,s,n) DO_REST_UP(d,s,n) #define DO_REST_ALIGNED_DN(d,s,n) DO_REST_DN(d,s,n) /* * This does unaligned memory copies. We want to avoid storing to * an unaligned address, as that would do a read-modify-write cycle. * We also want to avoid double-reading the unaligned reads. * * Note the ordering to try to avoid load (and address generation) latencies. */ static inline void __memcpy_unaligned_up (unsigned long d, unsigned long s, long n) { ALIGN_DEST_TO8_UP(d,s,n); n -= 8; /* to avoid compare against 8 in the loop */ if (n >= 0) { unsigned long low_word, high_word; __asm__("ldq_u %0,%1":"=r" (low_word):"m" (*(unsigned long *) s)); do { unsigned long tmp; __asm__("ldq_u %0,%1":"=r" (high_word):"m" (*(unsigned long *)(s+8))); n -= 8; __asm__("extql %1,%2,%0" :"=r" (low_word) :"r" (low_word), "r" (s)); __asm__("extqh %1,%2,%0" :"=r" (tmp) :"r" (high_word), "r" (s)); s += 8; *(unsigned long *) d = low_word | tmp; d += 8; low_word = high_word; } while (n >= 0); } n += 8; DO_REST_UP(d,s,n); } static inline void __memcpy_unaligned_dn (unsigned long d, unsigned long s, long n) { /* I don't understand AXP assembler well enough for this. -Tim */ s += n; d += n; while (n--) * (char *) --d = * (char *) --s; } /* * Hmm.. Strange. The __asm__ here is there to make gcc use an integer register * for the load-store. I don't know why, but it would seem that using a floating * point register for the move seems to slow things down (very small difference, * though). * * Note the ordering to try to avoid load (and address generation) latencies. */ static inline void __memcpy_aligned_up (unsigned long d, unsigned long s, long n) { ALIGN_DEST_TO8_UP(d,s,n); n -= 8; while (n >= 0) { unsigned long tmp; __asm__("ldq %0,%1":"=r" (tmp):"m" (*(unsigned long *) s)); n -= 8; s += 8; *(unsigned long *) d = tmp; d += 8; } n += 8; DO_REST_ALIGNED_UP(d,s,n); } static inline void __memcpy_aligned_dn (unsigned long d, unsigned long s, long n) { s += n; d += n; ALIGN_DEST_TO8_DN(d,s,n); n -= 8; while (n >= 0) { unsigned long tmp; s -= 8; __asm__("ldq %0,%1":"=r" (tmp):"m" (*(unsigned long *) s)); n -= 8; d -= 8; *(unsigned long *) d = tmp; } n += 8; DO_REST_ALIGNED_DN(d,s,n); } void * memcpy(void * dest, const void *src, size_t n) { if (!(((unsigned long) dest ^ (unsigned long) src) & 7)) { __memcpy_aligned_up ((unsigned long) dest, (unsigned long) src, n); return dest; } __memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n); return dest; } EXPORT_SYMBOL(memcpy);
linux-master
arch/alpha/lib/memcpy.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/alpha/lib/fpreg.c * * (C) Copyright 1998 Linus Torvalds */ #include <linux/compiler.h> #include <linux/export.h> #include <linux/preempt.h> #include <asm/thread_info.h> #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val)); #else #define STT(reg,val) asm volatile ("stt $f"#reg",%0" : "=m"(val)); #endif unsigned long alpha_read_fp_reg (unsigned long reg) { unsigned long val; if (unlikely(reg >= 32)) return 0; preempt_disable(); if (current_thread_info()->status & TS_SAVED_FP) val = current_thread_info()->fp[reg]; else switch (reg) { case 0: STT( 0, val); break; case 1: STT( 1, val); break; case 2: STT( 2, val); break; case 3: STT( 3, val); break; case 4: STT( 4, val); break; case 5: STT( 5, val); break; case 6: STT( 6, val); break; case 7: STT( 7, val); break; case 8: STT( 8, val); break; case 9: STT( 9, val); break; case 10: STT(10, val); break; case 11: STT(11, val); break; case 12: STT(12, val); break; case 13: STT(13, val); break; case 14: STT(14, val); break; case 15: STT(15, val); break; case 16: STT(16, val); break; case 17: STT(17, val); break; case 18: STT(18, val); break; case 19: STT(19, val); break; case 20: STT(20, val); break; case 21: STT(21, val); break; case 22: STT(22, val); break; case 23: STT(23, val); break; case 24: STT(24, val); break; case 25: STT(25, val); break; case 26: STT(26, val); break; case 27: STT(27, val); break; case 28: STT(28, val); break; case 29: STT(29, val); break; case 30: STT(30, val); break; case 31: STT(31, val); break; } preempt_enable(); return val; } EXPORT_SYMBOL(alpha_read_fp_reg); #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val)); #else #define LDT(reg,val) asm volatile ("ldt $f"#reg",%0" : : "m"(val)); #endif void alpha_write_fp_reg (unsigned long reg, unsigned long val) { if (unlikely(reg >= 32)) return; preempt_disable(); if (current_thread_info()->status & TS_SAVED_FP) { current_thread_info()->status |= TS_RESTORE_FP; current_thread_info()->fp[reg] = val; } else switch (reg) { case 0: LDT( 0, val); break; case 1: LDT( 1, val); break; case 2: LDT( 2, val); break; case 3: LDT( 3, val); break; case 4: LDT( 4, val); break; case 5: LDT( 5, val); break; case 6: LDT( 6, val); break; case 7: LDT( 7, val); break; case 8: LDT( 8, val); break; case 9: LDT( 9, val); break; case 10: LDT(10, val); break; case 11: LDT(11, val); break; case 12: LDT(12, val); break; case 13: LDT(13, val); break; case 14: LDT(14, val); break; case 15: LDT(15, val); break; case 16: LDT(16, val); break; case 17: LDT(17, val); break; case 18: LDT(18, val); break; case 19: LDT(19, val); break; case 20: LDT(20, val); break; case 21: LDT(21, val); break; case 22: LDT(22, val); break; case 23: LDT(23, val); break; case 24: LDT(24, val); break; case 25: LDT(25, val); break; case 26: LDT(26, val); break; case 27: LDT(27, val); break; case 28: LDT(28, val); break; case 29: LDT(29, val); break; case 30: LDT(30, val); break; case 31: LDT(31, val); break; } preempt_enable(); } EXPORT_SYMBOL(alpha_write_fp_reg); #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val)); #else #define STS(reg,val) asm volatile ("sts $f"#reg",%0" : "=m"(val)); #endif unsigned long alpha_read_fp_reg_s (unsigned long reg) { unsigned long val; if (unlikely(reg >= 32)) return 0; preempt_disable(); if (current_thread_info()->status & TS_SAVED_FP) { LDT(0, current_thread_info()->fp[reg]); STS(0, val); } else switch (reg) { case 0: STS( 0, val); break; case 1: STS( 1, val); break; case 2: STS( 2, val); break; case 3: STS( 3, val); break; case 4: STS( 4, val); break; case 5: STS( 5, val); break; case 6: STS( 6, val); break; case 7: STS( 7, val); break; case 8: STS( 8, val); break; case 9: STS( 9, val); break; case 10: STS(10, val); break; case 11: STS(11, val); break; case 12: STS(12, val); break; case 13: STS(13, val); break; case 14: STS(14, val); break; case 15: STS(15, val); break; case 16: STS(16, val); break; case 17: STS(17, val); break; case 18: STS(18, val); break; case 19: STS(19, val); break; case 20: STS(20, val); break; case 21: STS(21, val); break; case 22: STS(22, val); break; case 23: STS(23, val); break; case 24: STS(24, val); break; case 25: STS(25, val); break; case 26: STS(26, val); break; case 27: STS(27, val); break; case 28: STS(28, val); break; case 29: STS(29, val); break; case 30: STS(30, val); break; case 31: STS(31, val); break; } preempt_enable(); return val; } EXPORT_SYMBOL(alpha_read_fp_reg_s); #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val)); #else #define LDS(reg,val) asm volatile ("lds $f"#reg",%0" : : "m"(val)); #endif void alpha_write_fp_reg_s (unsigned long reg, unsigned long val) { if (unlikely(reg >= 32)) return; preempt_disable(); if (current_thread_info()->status & TS_SAVED_FP) { current_thread_info()->status |= TS_RESTORE_FP; LDS(0, val); STT(0, current_thread_info()->fp[reg]); } else switch (reg) { case 0: LDS( 0, val); break; case 1: LDS( 1, val); break; case 2: LDS( 2, val); break; case 3: LDS( 3, val); break; case 4: LDS( 4, val); break; case 5: LDS( 5, val); break; case 6: LDS( 6, val); break; case 7: LDS( 7, val); break; case 8: LDS( 8, val); break; case 9: LDS( 9, val); break; case 10: LDS(10, val); break; case 11: LDS(11, val); break; case 12: LDS(12, val); break; case 13: LDS(13, val); break; case 14: LDS(14, val); break; case 15: LDS(15, val); break; case 16: LDS(16, val); break; case 17: LDS(17, val); break; case 18: LDS(18, val); break; case 19: LDS(19, val); break; case 20: LDS(20, val); break; case 21: LDS(21, val); break; case 22: LDS(22, val); break; case 23: LDS(23, val); break; case 24: LDS(24, val); break; case 25: LDS(25, val); break; case 26: LDS(26, val); break; case 27: LDS(27, val); break; case 28: LDS(28, val); break; case 29: LDS(29, val); break; case 30: LDS(30, val); break; case 31: LDS(31, val); break; } preempt_enable(); } EXPORT_SYMBOL(alpha_write_fp_reg_s);
linux-master
arch/alpha/lib/fpreg.c
// SPDX-License-Identifier: GPL-2.0 /* * csum_partial_copy - do IP checksumming and copy * * (C) Copyright 1996 Linus Torvalds * accelerated versions (and 21264 assembly versions ) contributed by * Rick Gorton <[email protected]> * * Don't look at this too closely - you'll go mad. The things * we do for performance.. */ #include <linux/types.h> #include <linux/string.h> #include <linux/uaccess.h> #include <net/checksum.h> #define ldq_u(x,y) \ __asm__ __volatile__("ldq_u %0,%1":"=r" (x):"m" (*(const unsigned long *)(y))) #define stq_u(x,y) \ __asm__ __volatile__("stq_u %1,%0":"=m" (*(unsigned long *)(y)):"r" (x)) #define extql(x,y,z) \ __asm__ __volatile__("extql %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define extqh(x,y,z) \ __asm__ __volatile__("extqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define mskql(x,y,z) \ __asm__ __volatile__("mskql %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define mskqh(x,y,z) \ __asm__ __volatile__("mskqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define insql(x,y,z) \ __asm__ __volatile__("insql %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define insqh(x,y,z) \ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define __get_word(insn,x,ptr) \ ({ \ long __guu_err; \ __asm__ __volatile__( \ "1: "#insn" %0,%2\n" \ "2:\n" \ EXC(1b,2b,%0,%1) \ : "=r"(x), "=r"(__guu_err) \ : "m"(__m(ptr)), "1"(0)); \ __guu_err; \ }) static inline unsigned short from64to16(unsigned long x) { /* Using extract instructions is a bit more efficient than the original shift/bitmask version. */ union { unsigned long ul; unsigned int ui[2]; unsigned short us[4]; } in_v, tmp_v, out_v; in_v.ul = x; tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1]; /* Since the bits of tmp_v.sh[3] are going to always be zero, we don't have to bother to add that in. */ out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1] + (unsigned long) tmp_v.us[2]; /* Similarly, out_v.us[2] is always zero for the final add. */ return out_v.us[0] + out_v.us[1]; } /* * Ok. This isn't fun, but this is the EASY case. */ static inline unsigned long csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst, long len) { unsigned long checksum = ~0U; unsigned long carry = 0; while (len >= 0) { unsigned long word; if (__get_word(ldq, word, src)) return 0; checksum += carry; src++; checksum += word; len -= 8; carry = checksum < word; *dst = word; dst++; } len += 8; checksum += carry; if (len) { unsigned long word, tmp; if (__get_word(ldq, word, src)) return 0; tmp = *dst; mskql(word, len, word); checksum += word; mskqh(tmp, len, tmp); carry = checksum < word; *dst = word | tmp; checksum += carry; } return checksum; } /* * This is even less fun, but this is still reasonably * easy. */ static inline unsigned long csum_partial_cfu_dest_aligned(const unsigned long __user *src, unsigned long *dst, unsigned long soff, long len) { unsigned long first; unsigned long word, carry; unsigned long lastsrc = 7+len+(unsigned long)src; unsigned long checksum = ~0U; if (__get_word(ldq_u, first,src)) return 0; carry = 0; while (len >= 0) { unsigned long second; if (__get_word(ldq_u, second, src+1)) return 0; extql(first, soff, word); len -= 8; src++; extqh(second, soff, first); checksum += carry; word |= first; first = second; checksum += word; *dst = word; dst++; carry = checksum < word; } len += 8; checksum += carry; if (len) { unsigned long tmp; unsigned long second; if (__get_word(ldq_u, second, lastsrc)) return 0; tmp = *dst; extql(first, soff, word); extqh(second, soff, first); word |= first; mskql(word, len, word); checksum += word; mskqh(tmp, len, tmp); carry = checksum < word; *dst = word | tmp; checksum += carry; } return checksum; } /* * This is slightly less fun than the above.. */ static inline unsigned long csum_partial_cfu_src_aligned(const unsigned long __user *src, unsigned long *dst, unsigned long doff, long len, unsigned long partial_dest) { unsigned long carry = 0; unsigned long word; unsigned long second_dest; unsigned long checksum = ~0U; mskql(partial_dest, doff, partial_dest); while (len >= 0) { if (__get_word(ldq, word, src)) return 0; len -= 8; insql(word, doff, second_dest); checksum += carry; stq_u(partial_dest | second_dest, dst); src++; checksum += word; insqh(word, doff, partial_dest); carry = checksum < word; dst++; } len += 8; if (len) { checksum += carry; if (__get_word(ldq, word, src)) return 0; mskql(word, len, word); len -= 8; checksum += word; insql(word, doff, second_dest); len += doff; carry = checksum < word; partial_dest |= second_dest; if (len >= 0) { stq_u(partial_dest, dst); if (!len) goto out; dst++; insqh(word, doff, partial_dest); } doff = len; } ldq_u(second_dest, dst); mskqh(second_dest, doff, second_dest); stq_u(partial_dest | second_dest, dst); out: checksum += carry; return checksum; } /* * This is so totally un-fun that it's frightening. Don't * look at this too closely, you'll go blind. */ static inline unsigned long csum_partial_cfu_unaligned(const unsigned long __user * src, unsigned long * dst, unsigned long soff, unsigned long doff, long len, unsigned long partial_dest) { unsigned long carry = 0; unsigned long first; unsigned long lastsrc; unsigned long checksum = ~0U; if (__get_word(ldq_u, first, src)) return 0; lastsrc = 7+len+(unsigned long)src; mskql(partial_dest, doff, partial_dest); while (len >= 0) { unsigned long second, word; unsigned long second_dest; if (__get_word(ldq_u, second, src+1)) return 0; extql(first, soff, word); checksum += carry; len -= 8; extqh(second, soff, first); src++; word |= first; first = second; insql(word, doff, second_dest); checksum += word; stq_u(partial_dest | second_dest, dst); carry = checksum < word; insqh(word, doff, partial_dest); dst++; } len += doff; checksum += carry; if (len >= 0) { unsigned long second, word; unsigned long second_dest; if (__get_word(ldq_u, second, lastsrc)) return 0; extql(first, soff, word); extqh(second, soff, first); word |= first; first = second; mskql(word, len-doff, word); checksum += word; insql(word, doff, second_dest); carry = checksum < word; stq_u(partial_dest | second_dest, dst); if (len) { ldq_u(second_dest, dst+1); insqh(word, doff, partial_dest); mskqh(second_dest, len, second_dest); stq_u(partial_dest | second_dest, dst+1); } checksum += carry; } else { unsigned long second, word; unsigned long second_dest; if (__get_word(ldq_u, second, lastsrc)) return 0; extql(first, soff, word); extqh(second, soff, first); word |= first; ldq_u(second_dest, dst); mskql(word, len-doff, word); checksum += word; mskqh(second_dest, len, second_dest); carry = checksum < word; insql(word, doff, word); stq_u(partial_dest | word | second_dest, dst); checksum += carry; } return checksum; } static __wsum __csum_and_copy(const void __user *src, void *dst, int len) { unsigned long soff = 7 & (unsigned long) src; unsigned long doff = 7 & (unsigned long) dst; unsigned long checksum; if (!doff) { if (!soff) checksum = csum_partial_cfu_aligned( (const unsigned long __user *) src, (unsigned long *) dst, len-8); else checksum = csum_partial_cfu_dest_aligned( (const unsigned long __user *) src, (unsigned long *) dst, soff, len-8); } else { unsigned long partial_dest; ldq_u(partial_dest, dst); if (!soff) checksum = csum_partial_cfu_src_aligned( (const unsigned long __user *) src, (unsigned long *) dst, doff, len-8, partial_dest); else checksum = csum_partial_cfu_unaligned( (const unsigned long __user *) src, (unsigned long *) dst, soff, doff, len-8, partial_dest); } return (__force __wsum)from64to16 (checksum); } __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len) { if (!access_ok(src, len)) return 0; return __csum_and_copy(src, dst, len); } __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) { return __csum_and_copy((__force const void __user *)src, dst, len); } EXPORT_SYMBOL(csum_partial_copy_nocheck);
linux-master
arch/alpha/lib/csum_partial_copy.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/alpha/lib/srm_puts.c */ #include <linux/string.h> #include <asm/console.h> long srm_puts(const char *str, long len) { long remaining, written; if (!callback_init_done) return len; for (remaining = len; remaining > 0; remaining -= written) { written = callback_puts(0, str, remaining); written &= 0xffffffff; str += written; } return len; }
linux-master
arch/alpha/lib/srm_puts.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/alpha/lib/checksum.c * * This file contains network checksum routines that are better done * in an architecture-specific manner due to speed.. * Comments in other versions indicate that the algorithms are from RFC1071 * * accelerated versions (and 21264 assembly versions ) contributed by * Rick Gorton <[email protected]> */ #include <linux/module.h> #include <linux/string.h> #include <asm/byteorder.h> static inline unsigned short from64to16(unsigned long x) { /* Using extract instructions is a bit more efficient than the original shift/bitmask version. */ union { unsigned long ul; unsigned int ui[2]; unsigned short us[4]; } in_v, tmp_v, out_v; in_v.ul = x; tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1]; /* Since the bits of tmp_v.sh[3] are going to always be zero, we don't have to bother to add that in. */ out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1] + (unsigned long) tmp_v.us[2]; /* Similarly, out_v.us[2] is always zero for the final add. */ return out_v.us[0] + out_v.us[1]; } /* * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented. */ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum) { return (__force __sum16)~from64to16( (__force u64)saddr + (__force u64)daddr + (__force u64)sum + ((len + proto) << 8)); } EXPORT_SYMBOL(csum_tcpudp_magic); __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum) { unsigned long result; result = (__force u64)saddr + (__force u64)daddr + (__force u64)sum + ((len + proto) << 8); /* Fold down to 32-bits so we don't lose in the typedef-less network stack. */ /* 64 to 33 */ result = (result & 0xffffffff) + (result >> 32); /* 33 to 32 */ result = (result & 0xffffffff) + (result >> 32); return (__force __wsum)result; } EXPORT_SYMBOL(csum_tcpudp_nofold); /* * Do a 64-bit checksum on an arbitrary memory area.. * * This isn't a great routine, but it's not _horrible_ either. The * inner loop could be unrolled a bit further, and there are better * ways to do the carry, but this is reasonable. */ static inline unsigned long do_csum(const unsigned char * buff, int len) { int odd, count; unsigned long result = 0; if (len <= 0) goto out; odd = 1 & (unsigned long) buff; if (odd) { result = *buff << 8; len--; buff++; } count = len >> 1; /* nr of 16-bit words.. */ if (count) { if (2 & (unsigned long) buff) { result += *(unsigned short *) buff; count--; len -= 2; buff += 2; } count >>= 1; /* nr of 32-bit words.. */ if (count) { if (4 & (unsigned long) buff) { result += *(unsigned int *) buff; count--; len -= 4; buff += 4; } count >>= 1; /* nr of 64-bit words.. */ if (count) { unsigned long carry = 0; do { unsigned long w = *(unsigned long *) buff; count--; buff += 8; result += carry; result += w; carry = (w > result); } while (count); result += carry; result = (result & 0xffffffff) + (result >> 32); } if (len & 4) { result += *(unsigned int *) buff; buff += 4; } } if (len & 2) { result += *(unsigned short *) buff; buff += 2; } } if (len & 1) result += *buff; result = from64to16(result); if (odd) result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); out: return result; } /* * This is a version of ip_compute_csum() optimized for IP headers, * which always checksum on 4 octet boundaries. */ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { return (__force __sum16)~do_csum(iph,ihl*4); } EXPORT_SYMBOL(ip_fast_csum); /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) * * returns a 32-bit number suitable for feeding into itself * or csum_tcpudp_magic * * this function must be called with even lengths, except * for the last fragment, which may be odd * * it's best to have buff aligned on a 32-bit boundary */ __wsum csum_partial(const void *buff, int len, __wsum sum) { unsigned long result = do_csum(buff, len); /* add in old sum, and carry.. */ result += (__force u32)sum; /* 32+c bits -> 32 bits */ result = (result & 0xffffffff) + (result >> 32); return (__force __wsum)result; } EXPORT_SYMBOL(csum_partial); /* * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ __sum16 ip_compute_csum(const void *buff, int len) { return (__force __sum16)~from64to16(do_csum(buff,len)); } EXPORT_SYMBOL(ip_compute_csum);
linux-master
arch/alpha/lib/checksum.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/alpha/lib/fls.c */ #include <linux/module.h> #include <linux/bitops.h> /* This is fls(x)-1, except zero is held to zero. This allows most efficient input into extbl, plus it allows easy handling of fls(0)=0. */ const unsigned char __flsm1_tab[256] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, }; EXPORT_SYMBOL(__flsm1_tab);
linux-master
arch/alpha/lib/fls.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1993, 2000 Linus Torvalds * * Delay routines, using a pre-computed "loops_per_jiffy" value. */ #include <linux/module.h> #include <linux/sched.h> /* for udelay's use of smp_processor_id */ #include <asm/param.h> #include <asm/smp.h> #include <linux/delay.h> /* * Use only for very small delays (< 1 msec). * * The active part of our cycle counter is only 32-bits wide, and * we're treating the difference between two marks as signed. On * a 1GHz box, that's about 2 seconds. */ void __delay(int loops) { int tmp; __asm__ __volatile__( " rpcc %0\n" " addl %1,%0,%1\n" "1: rpcc %0\n" " subl %1,%0,%0\n" " bgt %0,1b" : "=&r" (tmp), "=r" (loops) : "1"(loops)); } EXPORT_SYMBOL(__delay); #ifdef CONFIG_SMP #define LPJ cpu_data[smp_processor_id()].loops_per_jiffy #else #define LPJ loops_per_jiffy #endif void udelay(unsigned long usecs) { usecs *= (((unsigned long)HZ << 32) / 1000000) * LPJ; __delay((long)usecs >> 32); } EXPORT_SYMBOL(udelay); void ndelay(unsigned long nsecs) { nsecs *= (((unsigned long)HZ << 32) / 1000000000) * LPJ; __delay((long)nsecs >> 32); } EXPORT_SYMBOL(ndelay);
linux-master
arch/alpha/lib/udelay.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> typedef unsigned int instr; #define MAJOR_OP 0xfc000000 #define LDA_OP 0x20000000 #define STQ_OP 0xb4000000 #define BR_OP 0xc0000000 #define STK_ALLOC_1 0x23de8000 /* lda $30,-X($30) */ #define STK_ALLOC_1M 0xffff8000 #define STK_ALLOC_2 0x43c0153e /* subq $30,X,$30 */ #define STK_ALLOC_2M 0xffe01fff #define MEM_REG 0x03e00000 #define MEM_BASE 0x001f0000 #define MEM_OFF 0x0000ffff #define MEM_OFF_SIGN 0x00008000 #define BASE_SP 0x001e0000 #define STK_ALLOC_MATCH(INSTR) \ (((INSTR) & STK_ALLOC_1M) == STK_ALLOC_1 \ || ((INSTR) & STK_ALLOC_2M) == STK_ALLOC_2) #define STK_PUSH_MATCH(INSTR) \ (((INSTR) & (MAJOR_OP | MEM_BASE | MEM_OFF_SIGN)) == (STQ_OP | BASE_SP)) #define MEM_OP_OFFSET(INSTR) \ (((long)((INSTR) & MEM_OFF) << 48) >> 48) #define MEM_OP_REG(INSTR) \ (((INSTR) & MEM_REG) >> 22) /* Branches, jumps, PAL calls, and illegal opcodes end a basic block. */ #define BB_END(INSTR) \ (((instr)(INSTR) >= BR_OP) | ((instr)(INSTR) < LDA_OP) | \ ((((instr)(INSTR) ^ 0x60000000) < 0x20000000) & \ (((instr)(INSTR) & 0x0c000000) != 0))) #define IS_KERNEL_TEXT(PC) ((unsigned long)(PC) > START_ADDR) static char reg_name[][4] = { "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ", "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "s6 ", "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ", "t10", "t11", "ra ", "pv ", "at ", "gp ", "sp ", "0" }; static instr * display_stored_regs(instr * pro_pc, unsigned char * sp) { instr * ret_pc = 0; int reg; unsigned long value; printk("Prologue [<%p>], Frame %p:\n", pro_pc, sp); while (!BB_END(*pro_pc)) if (STK_PUSH_MATCH(*pro_pc)) { reg = (*pro_pc & MEM_REG) >> 21; value = *(unsigned long *)(sp + (*pro_pc & MEM_OFF)); if (reg == 26) ret_pc = (instr *)value; printk("\t\t%s / 0x%016lx\n", reg_name[reg], value); } return ret_pc; } static instr * seek_prologue(instr * pc) { while (!STK_ALLOC_MATCH(*pc)) --pc; while (!BB_END(*(pc - 1))) --pc; return pc; } static long stack_increment(instr * prologue_pc) { while (!STK_ALLOC_MATCH(*prologue_pc)) ++prologue_pc; /* Count the bytes allocated. */ if ((*prologue_pc & STK_ALLOC_1M) == STK_ALLOC_1M) return -(((long)(*prologue_pc) << 48) >> 48); else return (*prologue_pc >> 13) & 0xff; } void stacktrace(void) { instr * ret_pc; instr * prologue = (instr *)stacktrace; unsigned char *sp = (unsigned char *)current_stack_pointer; printk("\tstack trace:\n"); do { ret_pc = display_stored_regs(prologue, sp); sp += stack_increment(prologue); prologue = seek_prologue(ret_pc); } while (IS_KERNEL_TEXT(ret_pc)); }
linux-master
arch/alpha/lib/stacktrace.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/alpha/lib/srm_printk.c */ #include <linux/kernel.h> #include <asm/console.h> long srm_printk(const char *fmt, ...) { static char buf[1024]; va_list args; long len, num_lf; char *src, *dst; va_start(args, fmt); len = vsprintf(buf, fmt, args); va_end(args); /* count number of linefeeds in string: */ num_lf = 0; for (src = buf; *src; ++src) { if (*src == '\n') { ++num_lf; } } if (num_lf) { /* expand each linefeed into carriage-return/linefeed: */ for (dst = src + num_lf; src >= buf; ) { if (*src == '\n') { *dst-- = '\r'; } *dst-- = *src--; } } srm_puts(buf, num_lf+len); return len; }
linux-master
arch/alpha/lib/srm_printk.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/mm/init.c * * Copyright (C) 1995 Linus Torvalds */ /* 2.3.x zone allocator, 1999 Andrea Arcangeli <[email protected]> */ #include <linux/pagemap.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/init.h> #include <linux/memblock.h> /* max_low_pfn */ #include <linux/vmalloc.h> #include <linux/gfp.h> #include <linux/uaccess.h> #include <asm/pgalloc.h> #include <asm/hwrpb.h> #include <asm/dma.h> #include <asm/mmu_context.h> #include <asm/console.h> #include <asm/tlb.h> #include <asm/setup.h> #include <asm/sections.h> extern void die_if_kernel(char *,struct pt_regs *,long); static struct pcb_struct original_pcb; pgd_t * pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); init = pgd_offset(&init_mm, 0UL); if (ret) { #ifdef CONFIG_ALPHA_LARGE_VMALLOC memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t)); #else pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]); #endif /* The last PGD entry is the VPTB self-map. */ pgd_val(ret[PTRS_PER_PGD-1]) = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL)); } return ret; } /* * BAD_PAGE is the page that is used for page faults when linux * is out-of-memory. Older versions of linux just did a * do_exit(), but using this instead means there is less risk * for a process dying in kernel mode, possibly leaving an inode * unused etc.. * * BAD_PAGETABLE is the accompanying page-table: it is initialized * to point to BAD_PAGE entries. * * ZERO_PAGE is a special page that is used for zero-initialized * data and COW. */ pmd_t * __bad_pagetable(void) { memset(absolute_pointer(EMPTY_PGT), 0, PAGE_SIZE); return (pmd_t *) EMPTY_PGT; } pte_t __bad_page(void) { memset(absolute_pointer(EMPTY_PGE), 0, PAGE_SIZE); return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED)); } static inline unsigned long load_PCB(struct pcb_struct *pcb) { register unsigned long sp __asm__("$30"); pcb->ksp = sp; return __reload_thread(pcb); } /* Set up initial PCB, VPTB, and other such nicities. */ static inline void switch_to_system_map(void) { unsigned long newptbr; unsigned long original_pcb_ptr; /* Initialize the kernel's page tables. Linux puts the vptb in the last slot of the L1 page table. */ memset(swapper_pg_dir, 0, PAGE_SIZE); newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT; pgd_val(swapper_pg_dir[1023]) = (newptbr << 32) | pgprot_val(PAGE_KERNEL); /* Set the vptb. This is often done by the bootloader, but shouldn't be required. */ if (hwrpb->vptb != 0xfffffffe00000000UL) { wrvptptr(0xfffffffe00000000UL); hwrpb->vptb = 0xfffffffe00000000UL; hwrpb_update_checksum(hwrpb); } /* Also set up the real kernel PCB while we're at it. */ init_thread_info.pcb.ptbr = newptbr; init_thread_info.pcb.flags = 1; /* set FEN, clear everything else */ original_pcb_ptr = load_PCB(&init_thread_info.pcb); tbia(); /* Save off the contents of the original PCB so that we can restore the original console's page tables for a clean reboot. Note that the PCB is supposed to be a physical address, but since KSEG values also happen to work, folks get confused. Check this here. */ if (original_pcb_ptr < PAGE_OFFSET) { original_pcb_ptr = (unsigned long) phys_to_virt(original_pcb_ptr); } original_pcb = *(struct pcb_struct *) original_pcb_ptr; } int callback_init_done; void * __init callback_init(void * kernel_end) { struct crb_struct * crb; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; void *two_pages; /* Starting at the HWRPB, locate the CRB. */ crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset); if (alpha_using_srm) { /* Tell the console whither it is to be remapped. */ if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb)) __halt(); /* "We're boned." --Bender */ /* Edit the procedure descriptors for DISPATCH and FIXUP. */ crb->dispatch_va = (struct procdesc_struct *) (VMALLOC_START + (unsigned long)crb->dispatch_va - crb->map[0].va); crb->fixup_va = (struct procdesc_struct *) (VMALLOC_START + (unsigned long)crb->fixup_va - crb->map[0].va); } switch_to_system_map(); /* Allocate one PGD and one PMD. In the case of SRM, we'll need these to actually remap the console. There is an assumption here that only one of each is needed, and this allows for 8MB. On systems with larger consoles, additional pages will be allocated as needed during the mapping process. In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC, we need to allocate the PGD we use for vmalloc before we start forking other tasks. */ two_pages = (void *) (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK); kernel_end = two_pages + 2*PAGE_SIZE; memset(two_pages, 0, 2*PAGE_SIZE); pgd = pgd_offset_k(VMALLOC_START); p4d = p4d_offset(pgd, VMALLOC_START); pud = pud_offset(p4d, VMALLOC_START); pud_set(pud, (pmd_t *)two_pages); pmd = pmd_offset(pud, VMALLOC_START); pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE)); if (alpha_using_srm) { static struct vm_struct console_remap_vm; unsigned long nr_pages = 0; unsigned long vaddr; unsigned long i, j; /* calculate needed size */ for (i = 0; i < crb->map_entries; ++i) nr_pages += crb->map[i].count; /* register the vm area */ console_remap_vm.flags = VM_ALLOC; console_remap_vm.size = nr_pages << PAGE_SHIFT; vm_area_register_early(&console_remap_vm, PAGE_SIZE); vaddr = (unsigned long)console_remap_vm.addr; /* Set up the third level PTEs and update the virtual addresses of the CRB entries. */ for (i = 0; i < crb->map_entries; ++i) { unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT; crb->map[i].va = vaddr; for (j = 0; j < crb->map[i].count; ++j) { /* Newer consoles (especially on larger systems) may require more pages of PTEs. Grab additional pages as needed. */ if (pmd != pmd_offset(pud, vaddr)) { memset(kernel_end, 0, PAGE_SIZE); pmd = pmd_offset(pud, vaddr); pmd_set(pmd, (pte_t *)kernel_end); kernel_end += PAGE_SIZE; } set_pte(pte_offset_kernel(pmd, vaddr), pfn_pte(pfn, PAGE_KERNEL)); pfn++; vaddr += PAGE_SIZE; } } } callback_init_done = 1; return kernel_end; } /* * paging_init() sets up the memory map. */ void __init paging_init(void) { unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, }; unsigned long dma_pfn; dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; max_pfn = max_low_pfn; max_zone_pfn[ZONE_DMA] = dma_pfn; max_zone_pfn[ZONE_NORMAL] = max_pfn; /* Initialize mem_map[]. */ free_area_init(max_zone_pfn); /* Initialize the kernel's ZERO_PGE. */ memset(absolute_pointer(ZERO_PGE), 0, PAGE_SIZE); } #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM) void srm_paging_stop (void) { /* Move the vptb back to where the SRM console expects it. */ swapper_pg_dir[1] = swapper_pg_dir[1023]; tbia(); wrvptptr(0x200000000UL); hwrpb->vptb = 0x200000000UL; hwrpb_update_checksum(hwrpb); /* Reload the page tables that the console had in use. */ load_PCB(&original_pcb); tbia(); } #endif void __init mem_init(void) { set_max_mapnr(max_low_pfn); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); } static const pgprot_t protection_map[16] = { [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR), [VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW), [VM_WRITE] = _PAGE_P(_PAGE_FOE), [VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE), [VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR), [VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW), [VM_EXEC | VM_WRITE] = _PAGE_P(0), [VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0), [VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR), [VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW), [VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE), [VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE), [VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR), [VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW), [VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0), [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0) }; DECLARE_VM_GET_PAGE_PROT
linux-master
arch/alpha/mm/init.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/mm/fault.c * * Copyright (C) 1995 Linus Torvalds */ #include <linux/sched/signal.h> #include <linux/kernel.h> #include <linux/mm.h> #include <asm/io.h> #define __EXTERN_INLINE inline #include <asm/mmu_context.h> #include <asm/tlbflush.h> #undef __EXTERN_INLINE #include <linux/signal.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/extable.h> #include <linux/uaccess.h> #include <linux/perf_event.h> extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *); /* * Force a new ASN for a task. */ #ifndef CONFIG_SMP unsigned long last_asn = ASN_FIRST_VERSION; #endif void __load_new_mm_context(struct mm_struct *next_mm) { unsigned long mmc; struct pcb_struct *pcb; mmc = __get_new_mm_context(next_mm, smp_processor_id()); next_mm->context[smp_processor_id()] = mmc; pcb = &current_thread_info()->pcb; pcb->asn = mmc & HARDWARE_ASN_MASK; pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; __reload_thread(pcb); } /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to handle_mm_fault(). * * mmcsr: * 0 = translation not valid * 1 = access violation * 2 = fault-on-read * 3 = fault-on-execute * 4 = fault-on-write * * cause: * -1 = instruction fetch * 0 = load * 1 = store * * Registers $9 through $15 are saved in a block just prior to `regs' and * are saved and restored around the call to allow exception code to * modify them. */ /* Macro for exception fixup code to access integer registers. */ #define dpf_reg(r) \ (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ (r) <= 18 ? (r)+10 : (r)-10]) asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) { struct vm_area_struct * vma; struct mm_struct *mm = current->mm; const struct exception_table_entry *fixup; int si_code = SEGV_MAPERR; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; /* As of EV6, a load into $31/$f31 is a prefetch, and never faults (or is suppressed by the PALcode). Support that for older CPUs by ignoring such an instruction. */ if (cause == 0) { unsigned int insn; __get_user(insn, (unsigned int __user *)regs->pc); if ((insn >> 21 & 0x1f) == 0x1f && /* ldq ldl ldt lds ldg ldf ldwu ldbu */ (1ul << (insn >> 26) & 0x30f00001400ul)) { regs->pc += 4; return; } } /* If we're in an interrupt context, or have no user context, we must not take the fault. */ if (!mm || faulthandler_disabled()) goto no_context; #ifdef CONFIG_ALPHA_LARGE_VMALLOC if (address >= TASK_SIZE) goto vmalloc_fault; #endif if (user_mode(regs)) flags |= FAULT_FLAG_USER; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) goto bad_area_nosemaphore; /* Ok, we have a good vm_area for this memory access, so we can handle it. */ si_code = SEGV_ACCERR; if (cause < 0) { if (!(vma->vm_flags & VM_EXEC)) goto bad_area; } else if (!cause) { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) goto bad_area; } else { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; flags |= FAULT_FLAG_WRITE; } /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) goto no_context; return; } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) return; if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ goto retry; } mmap_read_unlock(mm); return; /* Something tried to access memory that isn't in our memory map. Fix it, but check if it's kernel or user first. */ bad_area: mmap_read_unlock(mm); bad_area_nosemaphore: if (user_mode(regs)) goto do_sigsegv; no_context: /* Are we prepared to handle this fault as an exception? */ if ((fixup = search_exception_tables(regs->pc)) != 0) { unsigned long newpc; newpc = fixup_exception(dpf_reg, fixup, regs->pc); regs->pc = newpc; return; } /* Oops. The kernel tried to access some bad page. We'll have to terminate things with extreme prejudice. */ printk(KERN_ALERT "Unable to handle kernel paging request at " "virtual address %016lx\n", address); die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16); make_task_dead(SIGKILL); /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ out_of_memory: mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: mmap_read_unlock(mm); /* Send a sigbus, regardless of whether we were in kernel or user mode. */ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address); if (!user_mode(regs)) goto no_context; return; do_sigsegv: force_sig_fault(SIGSEGV, si_code, (void __user *) address); return; #ifdef CONFIG_ALPHA_LARGE_VMALLOC vmalloc_fault: if (user_mode(regs)) goto do_sigsegv; else { /* Synchronize this task's top level page-table with the "reference" page table from init. */ long index = pgd_index(address); pgd_t *pgd, *pgd_k; pgd = current->active_mm->pgd + index; pgd_k = swapper_pg_dir + index; if (!pgd_present(*pgd) && pgd_present(*pgd_k)) { pgd_val(*pgd) = pgd_val(*pgd_k); return; } goto no_context; } #endif }
linux-master
arch/alpha/mm/fault.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/core_cia.c * * Written by David A Rusling ([email protected]). * December 1995. * * Copyright (C) 1995 David A Rusling * Copyright (C) 1997, 1998 Jay Estabrook * Copyright (C) 1998, 1999, 2000 Richard Henderson * * Code common to all CIA core logic chips. */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_cia.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/memblock.h> #include <asm/ptrace.h> #include <asm/mce.h> #include "proto.h" #include "pci_impl.h" /* * NOTE: Herein lie back-to-back mb instructions. They are magic. * One plausible explanation is that the i/o controller does not properly * handle the system transaction. Another involves timing. Ho hum. */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBGC(args) printk args #else # define DBGC(args) #endif #define vip volatile int * /* * Given a bus, device, and function number, compute resulting * configuration space address. It is therefore not safe to have * concurrent invocations to configuration space access routines, but * there really shouldn't be any need for this. * * Type 0: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:11 Device select bit. * 10:8 Function number * 7:2 Register number * * Type 1: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:24 reserved * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *bus_dev, unsigned int device_fn, int where, unsigned long *pci_addr, unsigned char *type1) { u8 bus = bus_dev->number; *type1 = (bus != 0); *pci_addr = (bus << 16) | (device_fn << 8) | where; DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," " returning address 0x%p\n" bus, device_fn, where, *pci_addr)); return 0; } static unsigned int conf_read(unsigned long addr, unsigned char type1) { unsigned long flags; int stat0, value; int cia_cfg = 0; DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1)); local_irq_save(flags); /* Reset status register to avoid losing errors. */ stat0 = *(vip)CIA_IOC_CIA_ERR; *(vip)CIA_IOC_CIA_ERR = stat0; mb(); *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */ /* If Type1 access, must set CIA CFG. */ if (type1) { cia_cfg = *(vip)CIA_IOC_CFG; *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1; mb(); *(vip)CIA_IOC_CFG; } mb(); draina(); mcheck_expected(0) = 1; mcheck_taken(0) = 0; mb(); /* Access configuration space. */ value = *(vip)addr; mb(); mb(); /* magic */ if (mcheck_taken(0)) { mcheck_taken(0) = 0; value = 0xffffffff; mb(); } mcheck_expected(0) = 0; mb(); /* If Type1 access, must reset IOC CFG so normal IO space ops work. */ if (type1) { *(vip)CIA_IOC_CFG = cia_cfg; mb(); *(vip)CIA_IOC_CFG; } local_irq_restore(flags); DBGC(("done\n")); return value; } static void conf_write(unsigned long addr, unsigned int value, unsigned char type1) { unsigned long flags; int stat0, cia_cfg = 0; DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1)); local_irq_save(flags); /* Reset status register to avoid losing errors. */ stat0 = *(vip)CIA_IOC_CIA_ERR; *(vip)CIA_IOC_CIA_ERR = stat0; mb(); *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */ /* If Type1 access, must set CIA CFG. */ if (type1) { cia_cfg = *(vip)CIA_IOC_CFG; *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1; mb(); *(vip)CIA_IOC_CFG; } mb(); draina(); mcheck_expected(0) = 1; mcheck_taken(0) = 0; mb(); /* Access configuration space. */ *(vip)addr = value; mb(); *(vip)addr; /* read back to force the write */ mcheck_expected(0) = 0; mb(); /* If Type1 access, must reset IOC CFG so normal IO space ops work. */ if (type1) { *(vip)CIA_IOC_CFG = cia_cfg; mb(); *(vip)CIA_IOC_CFG; } local_irq_restore(flags); DBGC(("done\n")); } static int cia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr, pci_addr; long mask; unsigned char type1; int shift; if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; mask = (size - 1) * 8; shift = (where & 3) * 8; addr = (pci_addr << 5) + mask + CIA_CONF; *value = conf_read(addr, type1) >> (shift); return PCIBIOS_SUCCESSFUL; } static int cia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr, pci_addr; long mask; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; mask = (size - 1) * 8; addr = (pci_addr << 5) + mask + CIA_CONF; conf_write(addr, value << ((where & 3) * 8), type1); return PCIBIOS_SUCCESSFUL; } struct pci_ops cia_pci_ops = { .read = cia_read_config, .write = cia_write_config, }; /* * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb. * It cannot be invalidated. Rather than hard code the pass numbers, * actually try the tbia to see if it works. */ void cia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { wmb(); *(vip)CIA_IOC_PCI_TBIA = 3; /* Flush all locked and unlocked. */ mb(); *(vip)CIA_IOC_PCI_TBIA; } /* * On PYXIS, even if the tbia works, we cannot use it. It effectively locks * the chip (as well as direct write to the tag registers) if there is a * SG DMA operation in progress. This is true at least for PYXIS rev. 1, * so always use the method below. */ /* * This is the method NT and NetBSD use. * * Allocate mappings, and put the chip into DMA loopback mode to read a * garbage page. This works by causing TLB misses, causing old entries to * be purged to make room for the new entries coming in for the garbage page. */ #define CIA_BROKEN_TBIA_BASE 0x30000000 #define CIA_BROKEN_TBIA_SIZE 1024 /* Always called with interrupts disabled */ void cia_pci_tbi_try2(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { void __iomem *bus_addr; int ctrl; /* Put the chip into PCI loopback mode. */ mb(); ctrl = *(vip)CIA_IOC_CIA_CTRL; *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN; mb(); *(vip)CIA_IOC_CIA_CTRL; mb(); /* Read from PCI dense memory space at TBI_ADDR, skipping 32k on each read. This forces SG TLB misses. NetBSD claims that the TLB entries are not quite LRU, meaning that we need to read more times than there are actual tags. The 2117x docs claim strict round-robin. Oh well, we've come this far... */ /* Even better - as seen on the PYXIS rev 1 the TLB tags 0-3 can be filled by the TLB misses *only once* after being invalidated (by tbia or direct write). Next misses won't update them even though the lock bits are cleared. Tags 4-7 are "quite LRU" though, so use them and read at window 3 base exactly 4 times. Reading more sometimes makes the chip crazy. -ink */ bus_addr = cia_ioremap(CIA_BROKEN_TBIA_BASE, 32768 * 4); cia_readl(bus_addr + 0x00000); cia_readl(bus_addr + 0x08000); cia_readl(bus_addr + 0x10000); cia_readl(bus_addr + 0x18000); cia_iounmap(bus_addr); /* Restore normal PCI operation. */ mb(); *(vip)CIA_IOC_CIA_CTRL = ctrl; mb(); *(vip)CIA_IOC_CIA_CTRL; mb(); } static inline void cia_prepare_tbia_workaround(int window) { unsigned long *ppte, pte; long i; /* Use minimal 1K map. */ ppte = memblock_alloc(CIA_BROKEN_TBIA_SIZE, 32768); if (!ppte) panic("%s: Failed to allocate %u bytes align=0x%x\n", __func__, CIA_BROKEN_TBIA_SIZE, 32768); pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1; for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i) ppte[i] = pte; *(vip)CIA_IOC_PCI_Wn_BASE(window) = CIA_BROKEN_TBIA_BASE | 3; *(vip)CIA_IOC_PCI_Wn_MASK(window) = (CIA_BROKEN_TBIA_SIZE*1024 - 1) & 0xfff00000; *(vip)CIA_IOC_PCI_Tn_BASE(window) = virt_to_phys(ppte) >> 2; } static void __init verify_tb_operation(void) { static int page[PAGE_SIZE/4] __attribute__((aligned(PAGE_SIZE))) __initdata = { 0 }; struct pci_iommu_arena *arena = pci_isa_hose->sg_isa; int ctrl, addr0, tag0, pte0, data0; int temp, use_tbia_try2 = 0; void __iomem *bus_addr; /* pyxis -- tbia is broken */ if (pci_isa_hose->dense_io_base) use_tbia_try2 = 1; /* Put the chip into PCI loopback mode. */ mb(); ctrl = *(vip)CIA_IOC_CIA_CTRL; *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN; mb(); *(vip)CIA_IOC_CIA_CTRL; mb(); /* Write a valid entry directly into the TLB registers. */ addr0 = arena->dma_base; tag0 = addr0 | 1; pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1; *(vip)CIA_IOC_TB_TAGn(0) = tag0; *(vip)CIA_IOC_TB_TAGn(1) = 0; *(vip)CIA_IOC_TB_TAGn(2) = 0; *(vip)CIA_IOC_TB_TAGn(3) = 0; *(vip)CIA_IOC_TB_TAGn(4) = 0; *(vip)CIA_IOC_TB_TAGn(5) = 0; *(vip)CIA_IOC_TB_TAGn(6) = 0; *(vip)CIA_IOC_TB_TAGn(7) = 0; *(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0; *(vip)CIA_IOC_TBn_PAGEm(0,1) = 0; *(vip)CIA_IOC_TBn_PAGEm(0,2) = 0; *(vip)CIA_IOC_TBn_PAGEm(0,3) = 0; mb(); /* Get a usable bus address */ bus_addr = cia_ioremap(addr0, 8*PAGE_SIZE); /* First, verify we can read back what we've written. If this fails, we can't be sure of any of the other testing we're going to do, so bail. */ /* ??? Actually, we could do the work with machine checks. By passing this register update test, we pretty much guarantee that cia_pci_tbi_try1 works. If this test fails, cia_pci_tbi_try2 might still work. */ temp = *(vip)CIA_IOC_TB_TAGn(0); if (temp != tag0) { printk("pci: failed tb register update test " "(tag0 %#x != %#x)\n", temp, tag0); goto failed; } temp = *(vip)CIA_IOC_TB_TAGn(1); if (temp != 0) { printk("pci: failed tb register update test " "(tag1 %#x != 0)\n", temp); goto failed; } temp = *(vip)CIA_IOC_TBn_PAGEm(0,0); if (temp != pte0) { printk("pci: failed tb register update test " "(pte0 %#x != %#x)\n", temp, pte0); goto failed; } printk("pci: passed tb register update test\n"); /* Second, verify we can actually do I/O through this entry. */ data0 = 0xdeadbeef; page[0] = data0; mcheck_expected(0) = 1; mcheck_taken(0) = 0; mb(); temp = cia_readl(bus_addr); mb(); mcheck_expected(0) = 0; mb(); if (mcheck_taken(0)) { printk("pci: failed sg loopback i/o read test (mcheck)\n"); goto failed; } if (temp != data0) { printk("pci: failed sg loopback i/o read test " "(%#x != %#x)\n", temp, data0); goto failed; } printk("pci: passed sg loopback i/o read test\n"); /* Third, try to invalidate the TLB. */ if (! use_tbia_try2) { cia_pci_tbi(arena->hose, 0, -1); temp = *(vip)CIA_IOC_TB_TAGn(0); if (temp & 1) { use_tbia_try2 = 1; printk("pci: failed tbia test; workaround available\n"); } else { printk("pci: passed tbia test\n"); } } /* Fourth, verify the TLB snoops the EV5's caches when doing a tlb fill. */ data0 = 0x5adda15e; page[0] = data0; arena->ptes[4] = pte0; mcheck_expected(0) = 1; mcheck_taken(0) = 0; mb(); temp = cia_readl(bus_addr + 4*PAGE_SIZE); mb(); mcheck_expected(0) = 0; mb(); if (mcheck_taken(0)) { printk("pci: failed pte write cache snoop test (mcheck)\n"); goto failed; } if (temp != data0) { printk("pci: failed pte write cache snoop test " "(%#x != %#x)\n", temp, data0); goto failed; } printk("pci: passed pte write cache snoop test\n"); /* Fifth, verify that a previously invalid PTE entry gets filled from the page table. */ data0 = 0xabcdef12; page[0] = data0; arena->ptes[5] = pte0; mcheck_expected(0) = 1; mcheck_taken(0) = 0; mb(); temp = cia_readl(bus_addr + 5*PAGE_SIZE); mb(); mcheck_expected(0) = 0; mb(); if (mcheck_taken(0)) { printk("pci: failed valid tag invalid pte reload test " "(mcheck; workaround available)\n"); /* Work around this bug by aligning new allocations on 4 page boundaries. */ arena->align_entry = 4; } else if (temp != data0) { printk("pci: failed valid tag invalid pte reload test " "(%#x != %#x)\n", temp, data0); goto failed; } else { printk("pci: passed valid tag invalid pte reload test\n"); } /* Sixth, verify machine checks are working. Test invalid pte under the same valid tag as we used above. */ mcheck_expected(0) = 1; mcheck_taken(0) = 0; mb(); temp = cia_readl(bus_addr + 6*PAGE_SIZE); mb(); mcheck_expected(0) = 0; mb(); printk("pci: %s pci machine check test\n", mcheck_taken(0) ? "passed" : "failed"); /* Clean up after the tests. */ arena->ptes[4] = 0; arena->ptes[5] = 0; if (use_tbia_try2) { alpha_mv.mv_pci_tbi = cia_pci_tbi_try2; /* Tags 0-3 must be disabled if we use this workaround. */ wmb(); *(vip)CIA_IOC_TB_TAGn(0) = 2; *(vip)CIA_IOC_TB_TAGn(1) = 2; *(vip)CIA_IOC_TB_TAGn(2) = 2; *(vip)CIA_IOC_TB_TAGn(3) = 2; printk("pci: tbia workaround enabled\n"); } alpha_mv.mv_pci_tbi(arena->hose, 0, -1); exit: /* unmap the bus addr */ cia_iounmap(bus_addr); /* Restore normal PCI operation. */ mb(); *(vip)CIA_IOC_CIA_CTRL = ctrl; mb(); *(vip)CIA_IOC_CIA_CTRL; mb(); return; failed: printk("pci: disabling sg translation window\n"); *(vip)CIA_IOC_PCI_W0_BASE = 0; *(vip)CIA_IOC_PCI_W1_BASE = 0; pci_isa_hose->sg_isa = NULL; alpha_mv.mv_pci_tbi = NULL; goto exit; } #if defined(ALPHA_RESTORE_SRM_SETUP) /* Save CIA configuration data as the console had it set up. */ struct { unsigned int hae_mem; unsigned int hae_io; unsigned int pci_dac_offset; unsigned int err_mask; unsigned int cia_ctrl; unsigned int cia_cnfg; struct { unsigned int w_base; unsigned int w_mask; unsigned int t_base; } window[4]; } saved_config __attribute((common)); void cia_save_srm_settings(int is_pyxis) { int i; /* Save some important registers. */ saved_config.err_mask = *(vip)CIA_IOC_ERR_MASK; saved_config.cia_ctrl = *(vip)CIA_IOC_CIA_CTRL; saved_config.hae_mem = *(vip)CIA_IOC_HAE_MEM; saved_config.hae_io = *(vip)CIA_IOC_HAE_IO; saved_config.pci_dac_offset = *(vip)CIA_IOC_PCI_W_DAC; if (is_pyxis) saved_config.cia_cnfg = *(vip)CIA_IOC_CIA_CNFG; else saved_config.cia_cnfg = 0; /* Save DMA windows configuration. */ for (i = 0; i < 4; i++) { saved_config.window[i].w_base = *(vip)CIA_IOC_PCI_Wn_BASE(i); saved_config.window[i].w_mask = *(vip)CIA_IOC_PCI_Wn_MASK(i); saved_config.window[i].t_base = *(vip)CIA_IOC_PCI_Tn_BASE(i); } mb(); } void cia_restore_srm_settings(void) { int i; for (i = 0; i < 4; i++) { *(vip)CIA_IOC_PCI_Wn_BASE(i) = saved_config.window[i].w_base; *(vip)CIA_IOC_PCI_Wn_MASK(i) = saved_config.window[i].w_mask; *(vip)CIA_IOC_PCI_Tn_BASE(i) = saved_config.window[i].t_base; } *(vip)CIA_IOC_HAE_MEM = saved_config.hae_mem; *(vip)CIA_IOC_HAE_IO = saved_config.hae_io; *(vip)CIA_IOC_PCI_W_DAC = saved_config.pci_dac_offset; *(vip)CIA_IOC_ERR_MASK = saved_config.err_mask; *(vip)CIA_IOC_CIA_CTRL = saved_config.cia_ctrl; if (saved_config.cia_cnfg) /* Must be pyxis. */ *(vip)CIA_IOC_CIA_CNFG = saved_config.cia_cnfg; mb(); } #else /* ALPHA_RESTORE_SRM_SETUP */ #define cia_save_srm_settings(p) do {} while (0) #define cia_restore_srm_settings() do {} while (0) #endif /* ALPHA_RESTORE_SRM_SETUP */ static void __init do_init_arch(int is_pyxis) { struct pci_controller *hose; int temp, cia_rev, tbia_window; cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK; printk("pci: cia revision %d%s\n", cia_rev, is_pyxis ? " (pyxis)" : ""); if (alpha_using_srm) cia_save_srm_settings(is_pyxis); /* Set up error reporting. */ temp = *(vip)CIA_IOC_ERR_MASK; temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT); *(vip)CIA_IOC_ERR_MASK = temp; /* Clear all currently pending errors. */ temp = *(vip)CIA_IOC_CIA_ERR; *(vip)CIA_IOC_CIA_ERR = temp; /* Turn on mchecks. */ temp = *(vip)CIA_IOC_CIA_CTRL; temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN; *(vip)CIA_IOC_CIA_CTRL = temp; /* Clear the CFG register, which gets used for PCI config space accesses. That is the way we want to use it, and we do not want to depend on what ARC or SRM might have left behind. */ *(vip)CIA_IOC_CFG = 0; /* Zero the HAEs. */ *(vip)CIA_IOC_HAE_MEM = 0; *(vip)CIA_IOC_HAE_IO = 0; /* For PYXIS, we always use BWX bus and i/o accesses. To that end, make sure they're enabled on the controller. At the same time, enable the monster window. */ if (is_pyxis) { temp = *(vip)CIA_IOC_CIA_CNFG; temp |= CIA_CNFG_IOA_BWEN | CIA_CNFG_PCI_MWEN; *(vip)CIA_IOC_CIA_CNFG = temp; } /* Synchronize with all previous changes. */ mb(); *(vip)CIA_IOC_CIA_REV; /* * Create our single hose. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hose->mem_space = &iomem_resource; hose->index = 0; if (! is_pyxis) { struct resource *hae_mem = alloc_resource(); hose->mem_space = hae_mem; hae_mem->start = 0; hae_mem->end = CIA_MEM_R1_MASK; hae_mem->name = pci_hae0_name; hae_mem->flags = IORESOURCE_MEM; if (request_resource(&iomem_resource, hae_mem) < 0) printk(KERN_ERR "Failed to request HAE_MEM\n"); hose->sparse_mem_base = CIA_SPARSE_MEM - IDENT_ADDR; hose->dense_mem_base = CIA_DENSE_MEM - IDENT_ADDR; hose->sparse_io_base = CIA_IO - IDENT_ADDR; hose->dense_io_base = 0; } else { hose->sparse_mem_base = 0; hose->dense_mem_base = CIA_BW_MEM - IDENT_ADDR; hose->sparse_io_base = 0; hose->dense_io_base = CIA_BW_IO - IDENT_ADDR; } /* * Set up the PCI to main memory translation windows. * * Window 0 is S/G 8MB at 8MB (for isa) * Window 1 is S/G 1MB at 768MB (for tbia) (unused for CIA rev 1) * Window 2 is direct access 2GB at 2GB * Window 3 is DAC access 4GB at 8GB (or S/G for tbia if CIA rev 1) * * ??? NetBSD hints that page tables must be aligned to 32K, * possibly due to a hardware bug. This is over-aligned * from the 8K alignment one would expect for an 8MB window. * No description of what revisions affected. */ hose->sg_pci = NULL; hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768); __direct_map_base = 0x80000000; __direct_map_size = 0x80000000; *(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3; *(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000; *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2; *(vip)CIA_IOC_PCI_W2_BASE = __direct_map_base | 1; *(vip)CIA_IOC_PCI_W2_MASK = (__direct_map_size - 1) & 0xfff00000; *(vip)CIA_IOC_PCI_T2_BASE = 0 >> 2; /* On PYXIS we have the monster window, selected by bit 40, so there is no need for window3 to be enabled. On CIA, we don't have true arbitrary addressing -- bits <39:32> are compared against W_DAC. We can, however, directly map 4GB, which is better than before. However, due to assumptions made elsewhere, we should not claim that we support DAC unless that 4GB covers all of physical memory. On CIA rev 1, apparently W1 and W2 can't be used for SG. At least, there are reports that it doesn't work for Alcor. In that case, we have no choice but to use W3 for the TBIA workaround, which means we can't use DAC at all. */ tbia_window = 1; if (is_pyxis) { *(vip)CIA_IOC_PCI_W3_BASE = 0; } else if (cia_rev == 1) { *(vip)CIA_IOC_PCI_W1_BASE = 0; tbia_window = 3; } else if (max_low_pfn > (0x100000000UL >> PAGE_SHIFT)) { *(vip)CIA_IOC_PCI_W3_BASE = 0; } else { *(vip)CIA_IOC_PCI_W3_BASE = 0x00000000 | 1 | 8; *(vip)CIA_IOC_PCI_W3_MASK = 0xfff00000; *(vip)CIA_IOC_PCI_T3_BASE = 0 >> 2; alpha_mv.pci_dac_offset = 0x200000000UL; *(vip)CIA_IOC_PCI_W_DAC = alpha_mv.pci_dac_offset >> 32; } /* Prepare workaround for apparently broken tbia. */ cia_prepare_tbia_workaround(tbia_window); } void __init cia_init_arch(void) { do_init_arch(0); } void __init pyxis_init_arch(void) { /* On pyxis machines we can precisely calculate the CPU clock frequency using pyxis real time counter. It's especially useful for SX164 with broken RTC. Both CPU and chipset are driven by the single 16.666M or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is 66.66 MHz. -ink */ unsigned int cc0, cc1; unsigned long pyxis_cc; __asm__ __volatile__ ("rpcc %0" : "=r"(cc0)); pyxis_cc = *(vulp)PYXIS_RT_COUNT; do { } while(*(vulp)PYXIS_RT_COUNT - pyxis_cc < 4096); __asm__ __volatile__ ("rpcc %0" : "=r"(cc1)); cc1 -= cc0; hwrpb->cycle_freq = ((cc1 >> 11) * 100000000UL) / 3; hwrpb_update_checksum(hwrpb); do_init_arch(1); } void cia_kill_arch(int mode) { if (alpha_using_srm) cia_restore_srm_settings(); } void __init cia_init_pci(void) { /* Must delay this from init_arch, as we need machine checks. */ verify_tb_operation(); common_init_pci(); } static inline void cia_pci_clr_err(void) { int jd; jd = *(vip)CIA_IOC_CIA_ERR; *(vip)CIA_IOC_CIA_ERR = jd; mb(); *(vip)CIA_IOC_CIA_ERR; /* re-read to force write. */ } #ifdef CONFIG_VERBOSE_MCHECK static void cia_decode_pci_error(struct el_CIA_sysdata_mcheck *cia, const char *msg) { static const char * const pci_cmd_desc[16] = { "Interrupt Acknowledge", "Special Cycle", "I/O Read", "I/O Write", "Reserved 0x4", "Reserved 0x5", "Memory Read", "Memory Write", "Reserved 0x8", "Reserved 0x9", "Configuration Read", "Configuration Write", "Memory Read Multiple", "Dual Address Cycle", "Memory Read Line", "Memory Write and Invalidate" }; if (cia->cia_err & (CIA_ERR_COR_ERR | CIA_ERR_UN_COR_ERR | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV)) { static const char * const window_desc[6] = { "No window active", "Window 0 hit", "Window 1 hit", "Window 2 hit", "Window 3 hit", "Monster window hit" }; const char *window; const char *cmd; unsigned long addr, tmp; int lock, dac; cmd = pci_cmd_desc[cia->pci_err0 & 0x7]; lock = (cia->pci_err0 >> 4) & 1; dac = (cia->pci_err0 >> 5) & 1; tmp = (cia->pci_err0 >> 8) & 0x1F; tmp = ffs(tmp); window = window_desc[tmp]; addr = cia->pci_err1; if (dac) { tmp = *(vip)CIA_IOC_PCI_W_DAC & 0xFFUL; addr |= tmp << 32; } printk(KERN_CRIT "CIA machine check: %s\n", msg); printk(KERN_CRIT " DMA command: %s\n", cmd); printk(KERN_CRIT " PCI address: %#010lx\n", addr); printk(KERN_CRIT " %s, Lock: %d, DAC: %d\n", window, lock, dac); } else if (cia->cia_err & (CIA_ERR_PERR | CIA_ERR_PCI_ADDR_PE | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT | CIA_ERR_IOA_TIMEOUT)) { static const char * const master_st_desc[16] = { "Idle", "Drive bus", "Address step cycle", "Address cycle", "Data cycle", "Last read data cycle", "Last write data cycle", "Read stop cycle", "Write stop cycle", "Read turnaround cycle", "Write turnaround cycle", "Reserved 0xB", "Reserved 0xC", "Reserved 0xD", "Reserved 0xE", "Unknown state" }; static const char * const target_st_desc[16] = { "Idle", "Busy", "Read data cycle", "Write data cycle", "Read stop cycle", "Write stop cycle", "Read turnaround cycle", "Write turnaround cycle", "Read wait cycle", "Write wait cycle", "Reserved 0xA", "Reserved 0xB", "Reserved 0xC", "Reserved 0xD", "Reserved 0xE", "Unknown state" }; const char *cmd; const char *master, *target; unsigned long addr, tmp; int dac; master = master_st_desc[(cia->pci_err0 >> 16) & 0xF]; target = target_st_desc[(cia->pci_err0 >> 20) & 0xF]; cmd = pci_cmd_desc[(cia->pci_err0 >> 24) & 0xF]; dac = (cia->pci_err0 >> 28) & 1; addr = cia->pci_err2; if (dac) { tmp = *(volatile int *)CIA_IOC_PCI_W_DAC & 0xFFUL; addr |= tmp << 32; } printk(KERN_CRIT "CIA machine check: %s\n", msg); printk(KERN_CRIT " PCI command: %s\n", cmd); printk(KERN_CRIT " Master state: %s, Target state: %s\n", master, target); printk(KERN_CRIT " PCI address: %#010lx, DAC: %d\n", addr, dac); } else { printk(KERN_CRIT "CIA machine check: %s\n", msg); printk(KERN_CRIT " Unknown PCI error\n"); printk(KERN_CRIT " PCI_ERR0 = %#08lx", cia->pci_err0); printk(KERN_CRIT " PCI_ERR1 = %#08lx", cia->pci_err1); printk(KERN_CRIT " PCI_ERR2 = %#08lx", cia->pci_err2); } } static void cia_decode_mem_error(struct el_CIA_sysdata_mcheck *cia, const char *msg) { unsigned long mem_port_addr; unsigned long mem_port_mask; const char *mem_port_cmd; const char *seq_state; const char *set_select; unsigned long tmp; /* If this is a DMA command, also decode the PCI bits. */ if ((cia->mem_err1 >> 20) & 1) cia_decode_pci_error(cia, msg); else printk(KERN_CRIT "CIA machine check: %s\n", msg); mem_port_addr = cia->mem_err0 & 0xfffffff0; mem_port_addr |= (cia->mem_err1 & 0x83UL) << 32; mem_port_mask = (cia->mem_err1 >> 12) & 0xF; tmp = (cia->mem_err1 >> 8) & 0xF; tmp |= ((cia->mem_err1 >> 20) & 1) << 4; if ((tmp & 0x1E) == 0x06) mem_port_cmd = "WRITE BLOCK or WRITE BLOCK LOCK"; else if ((tmp & 0x1C) == 0x08) mem_port_cmd = "READ MISS or READ MISS MODIFY"; else if (tmp == 0x1C) mem_port_cmd = "BC VICTIM"; else if ((tmp & 0x1E) == 0x0E) mem_port_cmd = "READ MISS MODIFY"; else if ((tmp & 0x1C) == 0x18) mem_port_cmd = "DMA READ or DMA READ MODIFY"; else if ((tmp & 0x1E) == 0x12) mem_port_cmd = "DMA WRITE"; else mem_port_cmd = "Unknown"; tmp = (cia->mem_err1 >> 16) & 0xF; switch (tmp) { case 0x0: seq_state = "Idle"; break; case 0x1: seq_state = "DMA READ or DMA WRITE"; break; case 0x2: case 0x3: seq_state = "READ MISS (or READ MISS MODIFY) with victim"; break; case 0x4: case 0x5: case 0x6: seq_state = "READ MISS (or READ MISS MODIFY) with no victim"; break; case 0x8: case 0x9: case 0xB: seq_state = "Refresh"; break; case 0xC: seq_state = "Idle, waiting for DMA pending read"; break; case 0xE: case 0xF: seq_state = "Idle, ras precharge"; break; default: seq_state = "Unknown"; break; } tmp = (cia->mem_err1 >> 24) & 0x1F; switch (tmp) { case 0x00: set_select = "Set 0 selected"; break; case 0x01: set_select = "Set 1 selected"; break; case 0x02: set_select = "Set 2 selected"; break; case 0x03: set_select = "Set 3 selected"; break; case 0x04: set_select = "Set 4 selected"; break; case 0x05: set_select = "Set 5 selected"; break; case 0x06: set_select = "Set 6 selected"; break; case 0x07: set_select = "Set 7 selected"; break; case 0x08: set_select = "Set 8 selected"; break; case 0x09: set_select = "Set 9 selected"; break; case 0x0A: set_select = "Set A selected"; break; case 0x0B: set_select = "Set B selected"; break; case 0x0C: set_select = "Set C selected"; break; case 0x0D: set_select = "Set D selected"; break; case 0x0E: set_select = "Set E selected"; break; case 0x0F: set_select = "Set F selected"; break; case 0x10: set_select = "No set selected"; break; case 0x1F: set_select = "Refresh cycle"; break; default: set_select = "Unknown"; break; } printk(KERN_CRIT " Memory port command: %s\n", mem_port_cmd); printk(KERN_CRIT " Memory port address: %#010lx, mask: %#lx\n", mem_port_addr, mem_port_mask); printk(KERN_CRIT " Memory sequencer state: %s\n", seq_state); printk(KERN_CRIT " Memory set: %s\n", set_select); } static void cia_decode_ecc_error(struct el_CIA_sysdata_mcheck *cia, const char *msg) { long syn; long i; const char *fmt; cia_decode_mem_error(cia, msg); syn = cia->cia_syn & 0xff; if (syn == (syn & -syn)) { fmt = KERN_CRIT " ECC syndrome %#x -- check bit %d\n"; i = ffs(syn) - 1; } else { static unsigned char const data_bit[64] = { 0xCE, 0xCB, 0xD3, 0xD5, 0xD6, 0xD9, 0xDA, 0xDC, 0x23, 0x25, 0x26, 0x29, 0x2A, 0x2C, 0x31, 0x34, 0x0E, 0x0B, 0x13, 0x15, 0x16, 0x19, 0x1A, 0x1C, 0xE3, 0xE5, 0xE6, 0xE9, 0xEA, 0xEC, 0xF1, 0xF4, 0x4F, 0x4A, 0x52, 0x54, 0x57, 0x58, 0x5B, 0x5D, 0xA2, 0xA4, 0xA7, 0xA8, 0xAB, 0xAD, 0xB0, 0xB5, 0x8F, 0x8A, 0x92, 0x94, 0x97, 0x98, 0x9B, 0x9D, 0x62, 0x64, 0x67, 0x68, 0x6B, 0x6D, 0x70, 0x75 }; for (i = 0; i < 64; ++i) if (data_bit[i] == syn) break; if (i < 64) fmt = KERN_CRIT " ECC syndrome %#x -- data bit %d\n"; else fmt = KERN_CRIT " ECC syndrome %#x -- unknown bit\n"; } printk (fmt, syn, i); } static void cia_decode_parity_error(struct el_CIA_sysdata_mcheck *cia) { static const char * const cmd_desc[16] = { "NOP", "LOCK", "FETCH", "FETCH_M", "MEMORY BARRIER", "SET DIRTY", "WRITE BLOCK", "WRITE BLOCK LOCK", "READ MISS0", "READ MISS1", "READ MISS MOD0", "READ MISS MOD1", "BCACHE VICTIM", "Spare", "READ MISS MOD STC0", "READ MISS MOD STC1" }; unsigned long addr; unsigned long mask; const char *cmd; int par; addr = cia->cpu_err0 & 0xfffffff0; addr |= (cia->cpu_err1 & 0x83UL) << 32; cmd = cmd_desc[(cia->cpu_err1 >> 8) & 0xF]; mask = (cia->cpu_err1 >> 12) & 0xF; par = (cia->cpu_err1 >> 21) & 1; printk(KERN_CRIT "CIA machine check: System bus parity error\n"); printk(KERN_CRIT " Command: %s, Parity bit: %d\n", cmd, par); printk(KERN_CRIT " Address: %#010lx, Mask: %#lx\n", addr, mask); } #endif /* CONFIG_VERBOSE_MCHECK */ static int cia_decode_mchk(unsigned long la_ptr) { struct el_common *com; struct el_CIA_sysdata_mcheck *cia; com = (void *)la_ptr; cia = (void *)(la_ptr + com->sys_offset); if ((cia->cia_err & CIA_ERR_VALID) == 0) return 0; #ifdef CONFIG_VERBOSE_MCHECK if (!alpha_verbose_mcheck) return 1; switch (ffs(cia->cia_err & 0xfff) - 1) { case 0: /* CIA_ERR_COR_ERR */ cia_decode_ecc_error(cia, "Corrected ECC error"); break; case 1: /* CIA_ERR_UN_COR_ERR */ cia_decode_ecc_error(cia, "Uncorrected ECC error"); break; case 2: /* CIA_ERR_CPU_PE */ cia_decode_parity_error(cia); break; case 3: /* CIA_ERR_MEM_NEM */ cia_decode_mem_error(cia, "Access to nonexistent memory"); break; case 4: /* CIA_ERR_PCI_SERR */ cia_decode_pci_error(cia, "PCI bus system error"); break; case 5: /* CIA_ERR_PERR */ cia_decode_pci_error(cia, "PCI data parity error"); break; case 6: /* CIA_ERR_PCI_ADDR_PE */ cia_decode_pci_error(cia, "PCI address parity error"); break; case 7: /* CIA_ERR_RCVD_MAS_ABT */ cia_decode_pci_error(cia, "PCI master abort"); break; case 8: /* CIA_ERR_RCVD_TAR_ABT */ cia_decode_pci_error(cia, "PCI target abort"); break; case 9: /* CIA_ERR_PA_PTE_INV */ cia_decode_pci_error(cia, "PCI invalid PTE"); break; case 10: /* CIA_ERR_FROM_WRT_ERR */ cia_decode_mem_error(cia, "Write to flash ROM attempted"); break; case 11: /* CIA_ERR_IOA_TIMEOUT */ cia_decode_pci_error(cia, "I/O timeout"); break; } if (cia->cia_err & CIA_ERR_LOST_CORR_ERR) printk(KERN_CRIT "CIA lost machine check: " "Correctable ECC error\n"); if (cia->cia_err & CIA_ERR_LOST_UN_CORR_ERR) printk(KERN_CRIT "CIA lost machine check: " "Uncorrectable ECC error\n"); if (cia->cia_err & CIA_ERR_LOST_CPU_PE) printk(KERN_CRIT "CIA lost machine check: " "System bus parity error\n"); if (cia->cia_err & CIA_ERR_LOST_MEM_NEM) printk(KERN_CRIT "CIA lost machine check: " "Access to nonexistent memory\n"); if (cia->cia_err & CIA_ERR_LOST_PERR) printk(KERN_CRIT "CIA lost machine check: " "PCI data parity error\n"); if (cia->cia_err & CIA_ERR_LOST_PCI_ADDR_PE) printk(KERN_CRIT "CIA lost machine check: " "PCI address parity error\n"); if (cia->cia_err & CIA_ERR_LOST_RCVD_MAS_ABT) printk(KERN_CRIT "CIA lost machine check: " "PCI master abort\n"); if (cia->cia_err & CIA_ERR_LOST_RCVD_TAR_ABT) printk(KERN_CRIT "CIA lost machine check: " "PCI target abort\n"); if (cia->cia_err & CIA_ERR_LOST_PA_PTE_INV) printk(KERN_CRIT "CIA lost machine check: " "PCI invalid PTE\n"); if (cia->cia_err & CIA_ERR_LOST_FROM_WRT_ERR) printk(KERN_CRIT "CIA lost machine check: " "Write to flash ROM attempted\n"); if (cia->cia_err & CIA_ERR_LOST_IOA_TIMEOUT) printk(KERN_CRIT "CIA lost machine check: " "I/O timeout\n"); #endif /* CONFIG_VERBOSE_MCHECK */ return 1; } void cia_machine_check(unsigned long vector, unsigned long la_ptr) { int expected; /* Clear the error before any reporting. */ mb(); mb(); /* magic */ draina(); cia_pci_clr_err(); wrmces(rdmces()); /* reset machine check pending flag. */ mb(); expected = mcheck_expected(0); if (!expected && vector == 0x660) expected = cia_decode_mchk(la_ptr); process_mcheck_info(vector, la_ptr, "CIA", expected); }
linux-master
arch/alpha/kernel/core_cia.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_titan.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996, 1999 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * Copyright (C) 1999, 2000 Jeff Wiedemeier * * Code supporting TITAN systems (EV6+TITAN), currently: * Privateer * Falcon * Granite */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_titan.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" #include "err_impl.h" /* * Titan generic */ /* * Titan supports up to 4 CPUs */ static unsigned long titan_cpu_irq_affinity[4] = { ~0UL, ~0UL, ~0UL, ~0UL }; /* * Mask is set (1) if enabled */ static unsigned long titan_cached_irq_mask; /* * Need SMP-safe access to interrupt CSRs */ DEFINE_SPINLOCK(titan_irq_lock); static void titan_update_irq_hw(unsigned long mask) { register titan_cchip *cchip = TITAN_cchip; unsigned long isa_enable = 1UL << 55; register int bcpu = boot_cpuid; #ifdef CONFIG_SMP cpumask_t cpm; volatile unsigned long *dim0, *dim1, *dim2, *dim3; unsigned long mask0, mask1, mask2, mask3, dummy; cpumask_copy(&cpm, cpu_present_mask); mask &= ~isa_enable; mask0 = mask & titan_cpu_irq_affinity[0]; mask1 = mask & titan_cpu_irq_affinity[1]; mask2 = mask & titan_cpu_irq_affinity[2]; mask3 = mask & titan_cpu_irq_affinity[3]; if (bcpu == 0) mask0 |= isa_enable; else if (bcpu == 1) mask1 |= isa_enable; else if (bcpu == 2) mask2 |= isa_enable; else mask3 |= isa_enable; dim0 = &cchip->dim0.csr; dim1 = &cchip->dim1.csr; dim2 = &cchip->dim2.csr; dim3 = &cchip->dim3.csr; if (!cpumask_test_cpu(0, &cpm)) dim0 = &dummy; if (!cpumask_test_cpu(1, &cpm)) dim1 = &dummy; if (!cpumask_test_cpu(2, &cpm)) dim2 = &dummy; if (!cpumask_test_cpu(3, &cpm)) dim3 = &dummy; *dim0 = mask0; *dim1 = mask1; *dim2 = mask2; *dim3 = mask3; mb(); *dim0; *dim1; *dim2; *dim3; #else volatile unsigned long *dimB; dimB = &cchip->dim0.csr; if (bcpu == 1) dimB = &cchip->dim1.csr; else if (bcpu == 2) dimB = &cchip->dim2.csr; else if (bcpu == 3) dimB = &cchip->dim3.csr; *dimB = mask | isa_enable; mb(); *dimB; #endif } static inline void titan_enable_irq(struct irq_data *d) { unsigned int irq = d->irq; spin_lock(&titan_irq_lock); titan_cached_irq_mask |= 1UL << (irq - 16); titan_update_irq_hw(titan_cached_irq_mask); spin_unlock(&titan_irq_lock); } static inline void titan_disable_irq(struct irq_data *d) { unsigned int irq = d->irq; spin_lock(&titan_irq_lock); titan_cached_irq_mask &= ~(1UL << (irq - 16)); titan_update_irq_hw(titan_cached_irq_mask); spin_unlock(&titan_irq_lock); } static void titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) { int cpu; for (cpu = 0; cpu < 4; cpu++) { if (cpumask_test_cpu(cpu, &affinity)) titan_cpu_irq_affinity[cpu] |= 1UL << irq; else titan_cpu_irq_affinity[cpu] &= ~(1UL << irq); } } static int titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) { unsigned int irq = d->irq; spin_lock(&titan_irq_lock); titan_cpu_set_irq_affinity(irq - 16, *affinity); titan_update_irq_hw(titan_cached_irq_mask); spin_unlock(&titan_irq_lock); return 0; } static void titan_device_interrupt(unsigned long vector) { printk("titan_device_interrupt: NOT IMPLEMENTED YET!!\n"); } static void titan_srm_device_interrupt(unsigned long vector) { int irq; irq = (vector - 0x800) >> 4; handle_irq(irq); } static void __init init_titan_irqs(struct irq_chip * ops, int imin, int imax) { long i; for (i = imin; i <= imax; ++i) { irq_set_chip_and_handler(i, ops, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } } static struct irq_chip titan_irq_type = { .name = "TITAN", .irq_unmask = titan_enable_irq, .irq_mask = titan_disable_irq, .irq_mask_ack = titan_disable_irq, .irq_set_affinity = titan_set_irq_affinity, }; static irqreturn_t titan_intr_nop(int irq, void *dev_id) { /* * This is a NOP interrupt handler for the purposes of * event counting -- just return. */ return IRQ_HANDLED; } static void __init titan_init_irq(void) { if (alpha_using_srm && !alpha_mv.device_interrupt) alpha_mv.device_interrupt = titan_srm_device_interrupt; if (!alpha_mv.device_interrupt) alpha_mv.device_interrupt = titan_device_interrupt; titan_update_irq_hw(0); init_titan_irqs(&titan_irq_type, 16, 63 + 16); } static void __init titan_legacy_init_irq(void) { /* init the legacy dma controller */ outb(0, DMA1_RESET_REG); outb(0, DMA2_RESET_REG); outb(DMA_MODE_CASCADE, DMA2_MODE_REG); outb(0, DMA2_MASK_REG); /* init the legacy irq controller */ init_i8259a_irqs(); /* init the titan irqs */ titan_init_irq(); } void titan_dispatch_irqs(u64 mask) { unsigned long vector; /* * Mask down to those interrupts which are enable on this processor */ mask &= titan_cpu_irq_affinity[smp_processor_id()]; /* * Dispatch all requested interrupts */ while (mask) { /* convert to SRM vector... priority is <63> -> <0> */ vector = 63 - __kernel_ctlz(mask); mask &= ~(1UL << vector); /* clear it out */ vector = 0x900 + (vector << 4); /* convert to SRM vector */ /* dispatch it */ alpha_mv.device_interrupt(vector); } } /* * Titan Family */ static void __init titan_request_irq(unsigned int irq, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int err; err = request_irq(irq, handler, irqflags, devname, dev_id); if (err) { printk("titan_request_irq for IRQ %d returned %d; ignoring\n", irq, err); } } static void __init titan_late_init(void) { /* * Enable the system error interrupts. These interrupts are * all reported to the kernel as machine checks, so the handler * is a nop so it can be called to count the individual events. */ titan_request_irq(63+16, titan_intr_nop, 0, "CChip Error", NULL); titan_request_irq(62+16, titan_intr_nop, 0, "PChip 0 H_Error", NULL); titan_request_irq(61+16, titan_intr_nop, 0, "PChip 1 H_Error", NULL); titan_request_irq(60+16, titan_intr_nop, 0, "PChip 0 C_Error", NULL); titan_request_irq(59+16, titan_intr_nop, 0, "PChip 1 C_Error", NULL); /* * Register our error handlers. */ titan_register_error_handlers(); /* * Check if the console left us any error logs. */ cdl_check_console_data_log(); } static int titan_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { u8 intline; int irq; /* Get the current intline. */ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); irq = intline; /* Is it explicitly routed through ISA? */ if ((irq & 0xF0) == 0xE0) return irq; /* Offset by 16 to make room for ISA interrupts 0 - 15. */ return irq + 16; } static void __init titan_init_pci(void) { /* * This isn't really the right place, but there's some init * that needs to be done after everything is basically up. */ titan_late_init(); /* Indicate that we trust the console to configure things properly */ pci_set_flags(PCI_PROBE_ONLY); common_init_pci(); SMC669_Init(0); locate_and_init_vga(NULL); } /* * Privateer */ static void __init privateer_init_pci(void) { /* * Hook a couple of extra err interrupts that the * common titan code won't. */ titan_request_irq(53+16, titan_intr_nop, 0, "NMI", NULL); titan_request_irq(50+16, titan_intr_nop, 0, "Temperature Warning", NULL); /* * Finish with the common version. */ return titan_init_pci(); } /* * The System Vectors. */ struct alpha_machine_vector titan_mv __initmv = { .vector_name = "TITAN", DO_EV6_MMU, DO_DEFAULT_RTC, DO_TITAN_IO, .machine_check = titan_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = TITAN_DAC_OFFSET, .nr_irqs = 80, /* 64 + 16 */ /* device_interrupt will be filled in by titan_init_irq */ .agp_info = titan_agp_info, .init_arch = titan_init_arch, .init_irq = titan_legacy_init_irq, .init_rtc = common_init_rtc, .init_pci = titan_init_pci, .kill_arch = titan_kill_arch, .pci_map_irq = titan_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(titan) struct alpha_machine_vector privateer_mv __initmv = { .vector_name = "PRIVATEER", DO_EV6_MMU, DO_DEFAULT_RTC, DO_TITAN_IO, .machine_check = privateer_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = TITAN_DAC_OFFSET, .nr_irqs = 80, /* 64 + 16 */ /* device_interrupt will be filled in by titan_init_irq */ .agp_info = titan_agp_info, .init_arch = titan_init_arch, .init_irq = titan_legacy_init_irq, .init_rtc = common_init_rtc, .init_pci = privateer_init_pci, .kill_arch = titan_kill_arch, .pci_map_irq = titan_map_irq, .pci_swizzle = common_swizzle, }; /* No alpha_mv alias for privateer since we compile it in unconditionally with titan; setup_arch knows how to cope. */
linux-master
arch/alpha/kernel/sys_titan.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/core_t2.c * * Written by Jay A Estabrook ([email protected]). * December 1996. * * based on CIA code by David A Rusling ([email protected]) * * Code common to all T2 core logic chips. */ #define __EXTERN_INLINE #include <asm/io.h> #include <asm/core_t2.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <asm/ptrace.h> #include <asm/delay.h> #include <asm/mce.h> #include "proto.h" #include "pci_impl.h" /* For dumping initial DMA window settings. */ #define DEBUG_PRINT_INITIAL_SETTINGS 0 /* For dumping final DMA window settings. */ #define DEBUG_PRINT_FINAL_SETTINGS 0 /* * By default, we direct-map starting at 2GB, in order to allow the * maximum size direct-map window (2GB) to match the maximum amount of * memory (2GB) that can be present on SABLEs. But that limits the * floppy to DMA only via the scatter/gather window set up for 8MB * ISA DMA, since the maximum ISA DMA address is 2GB-1. * * For now, this seems a reasonable trade-off: even though most SABLEs * have less than 1GB of memory, floppy usage/performance will not * really be affected by forcing it to go via scatter/gather... */ #define T2_DIRECTMAP_2G 1 #if T2_DIRECTMAP_2G # define T2_DIRECTMAP_START 0x80000000UL # define T2_DIRECTMAP_LENGTH 0x80000000UL #else # define T2_DIRECTMAP_START 0x40000000UL # define T2_DIRECTMAP_LENGTH 0x40000000UL #endif /* The ISA scatter/gather window settings. */ #define T2_ISA_SG_START 0x00800000UL #define T2_ISA_SG_LENGTH 0x00800000UL /* * NOTE: Herein lie back-to-back mb instructions. They are magic. * One plausible explanation is that the i/o controller does not properly * handle the system transaction. Another involves timing. Ho hum. */ /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG(args) printk args #else # define DBG(args) #endif static volatile unsigned int t2_mcheck_any_expected; static volatile unsigned int t2_mcheck_last_taken; /* Place to save the DMA Window registers as set up by SRM for restoration during shutdown. */ static struct { struct { unsigned long wbase; unsigned long wmask; unsigned long tbase; } window[2]; unsigned long hae_1; unsigned long hae_2; unsigned long hae_3; unsigned long hae_4; unsigned long hbase; } t2_saved_config __attribute((common)); /* * Given a bus, device, and function number, compute resulting * configuration space address and setup the T2_HAXR2 register * accordingly. It is therefore not safe to have concurrent * invocations to configuration space access routines, but there * really shouldn't be any need for this. * * Type 0: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:11 Device select bit. * 10:8 Function number * 7:2 Register number * * Type 1: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:24 reserved * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, unsigned char *type1) { unsigned long addr; u8 bus = pbus->number; DBG(("mk_conf_addr(bus=%d, dfn=0x%x, where=0x%x," " addr=0x%lx, type1=0x%x)\n", bus, device_fn, where, pci_addr, type1)); if (bus == 0) { int device = device_fn >> 3; /* Type 0 configuration cycle. */ if (device > 8) { DBG(("mk_conf_addr: device (%d)>20, returning -1\n", device)); return -1; } *type1 = 0; addr = (0x0800L << device) | ((device_fn & 7) << 8) | (where); } else { /* Type 1 configuration cycle. */ *type1 = 1; addr = (bus << 16) | (device_fn << 8) | (where); } *pci_addr = addr; DBG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return 0; } /* * NOTE: both conf_read() and conf_write() may set HAE_3 when needing * to do type1 access. This is protected by the use of spinlock IRQ * primitives in the wrapper functions pci_{read,write}_config_*() * defined in drivers/pci/pci.c. */ static unsigned int conf_read(unsigned long addr, unsigned char type1) { unsigned int value, cpu, taken; unsigned long t2_cfg = 0; cpu = smp_processor_id(); DBG(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1)); /* If Type1 access, must set T2 CFG. */ if (type1) { t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL; *(vulp)T2_HAE_3 = 0x40000000UL | t2_cfg; mb(); } mb(); draina(); mcheck_expected(cpu) = 1; mcheck_taken(cpu) = 0; t2_mcheck_any_expected |= (1 << cpu); mb(); /* Access configuration space. */ value = *(vuip)addr; mb(); mb(); /* magic */ /* Wait for possible mcheck. Also, this lets other CPUs clear their mchecks as well, as they can reliably tell when another CPU is in the midst of handling a real mcheck via the "taken" function. */ udelay(100); if ((taken = mcheck_taken(cpu))) { mcheck_taken(cpu) = 0; t2_mcheck_last_taken |= (1 << cpu); value = 0xffffffffU; mb(); } mcheck_expected(cpu) = 0; t2_mcheck_any_expected = 0; mb(); /* If Type1 access, must reset T2 CFG so normal IO space ops work. */ if (type1) { *(vulp)T2_HAE_3 = t2_cfg; mb(); } return value; } static void conf_write(unsigned long addr, unsigned int value, unsigned char type1) { unsigned int cpu, taken; unsigned long t2_cfg = 0; cpu = smp_processor_id(); /* If Type1 access, must set T2 CFG. */ if (type1) { t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL; *(vulp)T2_HAE_3 = t2_cfg | 0x40000000UL; mb(); } mb(); draina(); mcheck_expected(cpu) = 1; mcheck_taken(cpu) = 0; t2_mcheck_any_expected |= (1 << cpu); mb(); /* Access configuration space. */ *(vuip)addr = value; mb(); mb(); /* magic */ /* Wait for possible mcheck. Also, this lets other CPUs clear their mchecks as well, as they can reliably tell when this CPU is in the midst of handling a real mcheck via the "taken" function. */ udelay(100); if ((taken = mcheck_taken(cpu))) { mcheck_taken(cpu) = 0; t2_mcheck_last_taken |= (1 << cpu); mb(); } mcheck_expected(cpu) = 0; t2_mcheck_any_expected = 0; mb(); /* If Type1 access, must reset T2 CFG so normal IO space ops work. */ if (type1) { *(vulp)T2_HAE_3 = t2_cfg; mb(); } } static int t2_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr, pci_addr; unsigned char type1; int shift; long mask; if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; mask = (size - 1) * 8; shift = (where & 3) * 8; addr = (pci_addr << 5) + mask + T2_CONF; *value = conf_read(addr, type1) >> (shift); return PCIBIOS_SUCCESSFUL; } static int t2_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr, pci_addr; unsigned char type1; long mask; if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; mask = (size - 1) * 8; addr = (pci_addr << 5) + mask + T2_CONF; conf_write(addr, value << ((where & 3) * 8), type1); return PCIBIOS_SUCCESSFUL; } struct pci_ops t2_pci_ops = { .read = t2_read_config, .write = t2_write_config, }; static void __init t2_direct_map_window1(unsigned long base, unsigned long length) { unsigned long temp; __direct_map_base = base; __direct_map_size = length; temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20); *(vulp)T2_WBASE1 = temp | 0x80000UL; /* OR in ENABLE bit */ temp = (length - 1) & 0xfff00000UL; *(vulp)T2_WMASK1 = temp; *(vulp)T2_TBASE1 = 0; #if DEBUG_PRINT_FINAL_SETTINGS printk("%s: setting WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __func__, *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1); #endif } static void __init t2_sg_map_window2(struct pci_controller *hose, unsigned long base, unsigned long length) { unsigned long temp; /* Note we can only do 1 SG window, as the other is for direct, so do an ISA SG area, especially for the floppy. */ hose->sg_isa = iommu_arena_new(hose, base, length, SMP_CACHE_BYTES); hose->sg_pci = NULL; temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20); *(vulp)T2_WBASE2 = temp | 0xc0000UL; /* OR in ENABLE/SG bits */ temp = (length - 1) & 0xfff00000UL; *(vulp)T2_WMASK2 = temp; *(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1; mb(); t2_pci_tbi(hose, 0, -1); /* flush TLB all */ #if DEBUG_PRINT_FINAL_SETTINGS printk("%s: setting WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __func__, *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2); #endif } static void __init t2_save_configuration(void) { #if DEBUG_PRINT_INITIAL_SETTINGS printk("%s: HAE_1 was 0x%lx\n", __func__, srm_hae); /* HW is 0 */ printk("%s: HAE_2 was 0x%lx\n", __func__, *(vulp)T2_HAE_2); printk("%s: HAE_3 was 0x%lx\n", __func__, *(vulp)T2_HAE_3); printk("%s: HAE_4 was 0x%lx\n", __func__, *(vulp)T2_HAE_4); printk("%s: HBASE was 0x%lx\n", __func__, *(vulp)T2_HBASE); printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __func__, *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1); printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __func__, *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2); #endif /* * Save the DMA Window registers. */ t2_saved_config.window[0].wbase = *(vulp)T2_WBASE1; t2_saved_config.window[0].wmask = *(vulp)T2_WMASK1; t2_saved_config.window[0].tbase = *(vulp)T2_TBASE1; t2_saved_config.window[1].wbase = *(vulp)T2_WBASE2; t2_saved_config.window[1].wmask = *(vulp)T2_WMASK2; t2_saved_config.window[1].tbase = *(vulp)T2_TBASE2; t2_saved_config.hae_1 = srm_hae; /* HW is already set to 0 */ t2_saved_config.hae_2 = *(vulp)T2_HAE_2; t2_saved_config.hae_3 = *(vulp)T2_HAE_3; t2_saved_config.hae_4 = *(vulp)T2_HAE_4; t2_saved_config.hbase = *(vulp)T2_HBASE; } void __init t2_init_arch(void) { struct pci_controller *hose; struct resource *hae_mem; unsigned long temp; unsigned int i; for (i = 0; i < NR_CPUS; i++) { mcheck_expected(i) = 0; mcheck_taken(i) = 0; } t2_mcheck_any_expected = 0; t2_mcheck_last_taken = 0; /* Enable scatter/gather TLB use. */ temp = *(vulp)T2_IOCSR; if (!(temp & (0x1UL << 26))) { printk("t2_init_arch: enabling SG TLB, IOCSR was 0x%lx\n", temp); *(vulp)T2_IOCSR = temp | (0x1UL << 26); mb(); *(vulp)T2_IOCSR; /* read it back to make sure */ } t2_save_configuration(); /* * Create our single hose. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hae_mem = alloc_resource(); hae_mem->start = 0; hae_mem->end = T2_MEM_R1_MASK; hae_mem->name = pci_hae0_name; if (request_resource(&iomem_resource, hae_mem) < 0) printk(KERN_ERR "Failed to request HAE_MEM\n"); hose->mem_space = hae_mem; hose->index = 0; hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR; hose->dense_mem_base = T2_DENSE_MEM - IDENT_ADDR; hose->sparse_io_base = T2_IO - IDENT_ADDR; hose->dense_io_base = 0; /* * Set up the PCI->physical memory translation windows. * * Window 1 is direct mapped. * Window 2 is scatter/gather (for ISA). */ t2_direct_map_window1(T2_DIRECTMAP_START, T2_DIRECTMAP_LENGTH); /* Always make an ISA DMA window. */ t2_sg_map_window2(hose, T2_ISA_SG_START, T2_ISA_SG_LENGTH); *(vulp)T2_HBASE = 0x0; /* Disable HOLES. */ /* Zero HAE. */ *(vulp)T2_HAE_1 = 0; mb(); /* Sparse MEM HAE */ *(vulp)T2_HAE_2 = 0; mb(); /* Sparse I/O HAE */ *(vulp)T2_HAE_3 = 0; mb(); /* Config Space HAE */ /* * We also now zero out HAE_4, the dense memory HAE, so that * we need not account for its "offset" when accessing dense * memory resources which we allocated in our normal way. This * HAE would need to stay untouched were we to keep the SRM * resource settings. * * Thus we can now run standard X servers on SABLE/LYNX. :-) */ *(vulp)T2_HAE_4 = 0; mb(); } void t2_kill_arch(int mode) { /* * Restore the DMA Window registers. */ *(vulp)T2_WBASE1 = t2_saved_config.window[0].wbase; *(vulp)T2_WMASK1 = t2_saved_config.window[0].wmask; *(vulp)T2_TBASE1 = t2_saved_config.window[0].tbase; *(vulp)T2_WBASE2 = t2_saved_config.window[1].wbase; *(vulp)T2_WMASK2 = t2_saved_config.window[1].wmask; *(vulp)T2_TBASE2 = t2_saved_config.window[1].tbase; mb(); *(vulp)T2_HAE_1 = srm_hae; *(vulp)T2_HAE_2 = t2_saved_config.hae_2; *(vulp)T2_HAE_3 = t2_saved_config.hae_3; *(vulp)T2_HAE_4 = t2_saved_config.hae_4; *(vulp)T2_HBASE = t2_saved_config.hbase; mb(); *(vulp)T2_HBASE; /* READ it back to ensure WRITE occurred. */ } void t2_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { unsigned long t2_iocsr; t2_iocsr = *(vulp)T2_IOCSR; /* set the TLB Clear bit */ *(vulp)T2_IOCSR = t2_iocsr | (0x1UL << 28); mb(); *(vulp)T2_IOCSR; /* read it back to make sure */ /* clear the TLB Clear bit */ *(vulp)T2_IOCSR = t2_iocsr & ~(0x1UL << 28); mb(); *(vulp)T2_IOCSR; /* read it back to make sure */ } #define SIC_SEIC (1UL << 33) /* System Event Clear */ static void t2_clear_errors(int cpu) { struct sable_cpu_csr *cpu_regs; cpu_regs = (struct sable_cpu_csr *)T2_CPUn_BASE(cpu); cpu_regs->sic &= ~SIC_SEIC; /* Clear CPU errors. */ cpu_regs->bcce |= cpu_regs->bcce; cpu_regs->cbe |= cpu_regs->cbe; cpu_regs->bcue |= cpu_regs->bcue; cpu_regs->dter |= cpu_regs->dter; *(vulp)T2_CERR1 |= *(vulp)T2_CERR1; *(vulp)T2_PERR1 |= *(vulp)T2_PERR1; mb(); mb(); /* magic */ } /* * SABLE seems to have a "broadcast" style machine check, in that all * CPUs receive it. And, the issuing CPU, in the case of PCI Config * space read/write faults, will also receive a second mcheck, upon * lowering IPL during completion processing in pci_read_config_byte() * et al. * * Hence all the taken/expected/any_expected/last_taken stuff... */ void t2_machine_check(unsigned long vector, unsigned long la_ptr) { int cpu = smp_processor_id(); #ifdef CONFIG_VERBOSE_MCHECK struct el_common *mchk_header = (struct el_common *)la_ptr; #endif /* Clear the error before any reporting. */ mb(); mb(); /* magic */ draina(); t2_clear_errors(cpu); /* This should not actually be done until the logout frame is examined, but, since we don't do that, go on and do this... */ wrmces(0x7); mb(); /* Now, do testing for the anomalous conditions. */ if (!mcheck_expected(cpu) && t2_mcheck_any_expected) { /* * FUNKY: Received mcheck on a CPU and not * expecting it, but another CPU is expecting one. * * Just dismiss it for now on this CPU... */ #ifdef CONFIG_VERBOSE_MCHECK if (alpha_verbose_mcheck > 1) { printk("t2_machine_check(cpu%d): any_expected 0x%x -" " (assumed) spurious -" " code 0x%x\n", cpu, t2_mcheck_any_expected, (unsigned int)mchk_header->code); } #endif return; } if (!mcheck_expected(cpu) && !t2_mcheck_any_expected) { if (t2_mcheck_last_taken & (1 << cpu)) { #ifdef CONFIG_VERBOSE_MCHECK if (alpha_verbose_mcheck > 1) { printk("t2_machine_check(cpu%d): last_taken 0x%x - " "unexpected mcheck - code 0x%x\n", cpu, t2_mcheck_last_taken, (unsigned int)mchk_header->code); } #endif t2_mcheck_last_taken = 0; mb(); return; } else { t2_mcheck_last_taken = 0; mb(); } } #ifdef CONFIG_VERBOSE_MCHECK if (alpha_verbose_mcheck > 1) { printk("%s t2_mcheck(cpu%d): last_taken 0x%x - " "any_expected 0x%x - code 0x%x\n", (mcheck_expected(cpu) ? "EX" : "UN"), cpu, t2_mcheck_last_taken, t2_mcheck_any_expected, (unsigned int)mchk_header->code); } #endif process_mcheck_info(vector, la_ptr, "T2", mcheck_expected(cpu)); }
linux-master
arch/alpha/kernel/core_t2.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/alpha/kernel/pci-sysfs.c * * Copyright (C) 2009 Ivan Kokshaysky * * Alpha PCI resource files. * * Loosely based on generic HAVE_PCI_MMAP implementation in * drivers/pci/pci-sysfs.c */ #include <linux/sched.h> #include <linux/stat.h> #include <linux/slab.h> #include <linux/pci.h> static int hose_mmap_page_range(struct pci_controller *hose, struct vm_area_struct *vma, enum pci_mmap_state mmap_type, int sparse) { unsigned long base; if (mmap_type == pci_mmap_mem) base = sparse ? hose->sparse_mem_base : hose->dense_mem_base; else base = sparse ? hose->sparse_io_base : hose->dense_io_base; vma->vm_pgoff += base >> PAGE_SHIFT; return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); } static int __pci_mmap_fits(struct pci_dev *pdev, int num, struct vm_area_struct *vma, int sparse) { unsigned long nr, start, size; int shift = sparse ? 5 : 0; nr = vma_pages(vma); start = vma->vm_pgoff; size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1; if (start < size && size - start >= nr) return 1; WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on %s BAR %d " "(size 0x%08lx)\n", current->comm, sparse ? " sparse" : "", start, start + nr, pci_name(pdev), num, size); return 0; } /** * pci_mmap_resource - map a PCI resource into user memory space * @kobj: kobject for mapping * @attr: struct bin_attribute for the file being mapped * @vma: struct vm_area_struct passed into the mmap * @sparse: address space type * * Use the bus mapping routines to map a PCI resource into userspace. * * Return: %0 on success, negative error code otherwise */ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma, int sparse) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); struct resource *res = attr->private; enum pci_mmap_state mmap_type; struct pci_bus_region bar; int i; for (i = 0; i < PCI_STD_NUM_BARS; i++) if (res == &pdev->resource[i]) break; if (i >= PCI_STD_NUM_BARS) return -ENODEV; if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) return -EINVAL; if (!__pci_mmap_fits(pdev, i, vma, sparse)) return -EINVAL; pcibios_resource_to_bus(pdev->bus, &bar, res); vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse); } static int pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { return pci_mmap_resource(kobj, attr, vma, 1); } static int pci_mmap_resource_dense(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { return pci_mmap_resource(kobj, attr, vma, 0); } /** * pci_remove_resource_files - cleanup resource files * @pdev: pci_dev to cleanup * * If we created resource files for @dev, remove them from sysfs and * free their resources. */ void pci_remove_resource_files(struct pci_dev *pdev) { int i; for (i = 0; i < PCI_STD_NUM_BARS; i++) { struct bin_attribute *res_attr; res_attr = pdev->res_attr[i]; if (res_attr) { sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); kfree(res_attr); } res_attr = pdev->res_attr_wc[i]; if (res_attr) { sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); kfree(res_attr); } } } static int sparse_mem_mmap_fits(struct pci_dev *pdev, int num) { struct pci_bus_region bar; struct pci_controller *hose = pdev->sysdata; long dense_offset; unsigned long sparse_size; pcibios_resource_to_bus(pdev->bus, &bar, &pdev->resource[num]); /* All core logic chips have 4G sparse address space, except CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM definitions in asm/core_xxx.h files). This corresponds to 128M or 512M of the bus space. */ dense_offset = (long)(hose->dense_mem_base - hose->sparse_mem_base); sparse_size = dense_offset >= 0x400000000UL ? 0x20000000 : 0x8000000; return bar.end < sparse_size; } static int pci_create_one_attr(struct pci_dev *pdev, int num, char *name, char *suffix, struct bin_attribute *res_attr, unsigned long sparse) { size_t size = pci_resource_len(pdev, num); sprintf(name, "resource%d%s", num, suffix); res_attr->mmap = sparse ? pci_mmap_resource_sparse : pci_mmap_resource_dense; res_attr->attr.name = name; res_attr->attr.mode = S_IRUSR | S_IWUSR; res_attr->size = sparse ? size << 5 : size; res_attr->private = &pdev->resource[num]; return sysfs_create_bin_file(&pdev->dev.kobj, res_attr); } static int pci_create_attr(struct pci_dev *pdev, int num) { /* allocate attribute structure, piggyback attribute name */ int retval, nlen1, nlen2 = 0, res_count = 1; unsigned long sparse_base, dense_base; struct bin_attribute *attr; struct pci_controller *hose = pdev->sysdata; char *suffix, *attr_name; suffix = ""; /* Assume bwx machine, normal resourceN files. */ nlen1 = 10; if (pdev->resource[num].flags & IORESOURCE_MEM) { sparse_base = hose->sparse_mem_base; dense_base = hose->dense_mem_base; if (sparse_base && !sparse_mem_mmap_fits(pdev, num)) { sparse_base = 0; suffix = "_dense"; nlen1 = 16; /* resourceN_dense */ } } else { sparse_base = hose->sparse_io_base; dense_base = hose->dense_io_base; } if (sparse_base) { suffix = "_sparse"; nlen1 = 17; if (dense_base) { nlen2 = 16; /* resourceN_dense */ res_count = 2; } } attr = kzalloc(sizeof(*attr) * res_count + nlen1 + nlen2, GFP_ATOMIC); if (!attr) return -ENOMEM; /* Create bwx, sparse or single dense file */ attr_name = (char *)(attr + res_count); pdev->res_attr[num] = attr; retval = pci_create_one_attr(pdev, num, attr_name, suffix, attr, sparse_base); if (retval || res_count == 1) return retval; /* Create dense file */ attr_name += nlen1; attr++; pdev->res_attr_wc[num] = attr; return pci_create_one_attr(pdev, num, attr_name, "_dense", attr, 0); } /** * pci_create_resource_files - create resource files in sysfs for @pdev * @pdev: pci_dev in question * * Walk the resources in @dev creating files for each resource available. * * Return: %0 on success, or negative error code */ int pci_create_resource_files(struct pci_dev *pdev) { int i; int retval; /* Expose the PCI resources from this device as files */ for (i = 0; i < PCI_STD_NUM_BARS; i++) { /* skip empty resources */ if (!pci_resource_len(pdev, i)) continue; retval = pci_create_attr(pdev, i); if (retval) { pci_remove_resource_files(pdev); return retval; } } return 0; } /* Legacy I/O bus mapping stuff. */ static int __legacy_mmap_fits(struct pci_controller *hose, struct vm_area_struct *vma, unsigned long res_size, int sparse) { unsigned long nr, start, size; nr = vma_pages(vma); start = vma->vm_pgoff; size = ((res_size - 1) >> PAGE_SHIFT) + 1; if (start < size && size - start >= nr) return 1; WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on hose %d " "(size 0x%08lx)\n", current->comm, sparse ? " sparse" : "", start, start + nr, hose->index, size); return 0; } static inline int has_sparse(struct pci_controller *hose, enum pci_mmap_state mmap_type) { unsigned long base; base = (mmap_type == pci_mmap_mem) ? hose->sparse_mem_base : hose->sparse_io_base; return base != 0; } int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, enum pci_mmap_state mmap_type) { struct pci_controller *hose = bus->sysdata; int sparse = has_sparse(hose, mmap_type); unsigned long res_size; res_size = (mmap_type == pci_mmap_mem) ? bus->legacy_mem->size : bus->legacy_io->size; if (!__legacy_mmap_fits(hose, vma, res_size, sparse)) return -EINVAL; return hose_mmap_page_range(hose, vma, mmap_type, sparse); } /** * pci_adjust_legacy_attr - adjustment of legacy file attributes * @bus: bus to create files under * @mmap_type: I/O port or memory * * Adjust file name and size for sparse mappings. */ void pci_adjust_legacy_attr(struct pci_bus *bus, enum pci_mmap_state mmap_type) { struct pci_controller *hose = bus->sysdata; if (!has_sparse(hose, mmap_type)) return; if (mmap_type == pci_mmap_mem) { bus->legacy_mem->attr.name = "legacy_mem_sparse"; bus->legacy_mem->size <<= 5; } else { bus->legacy_io->attr.name = "legacy_io_sparse"; bus->legacy_io->size <<= 5; } return; } /* Legacy I/O bus read/write functions */ int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) { struct pci_controller *hose = bus->sysdata; port += hose->io_space->start; switch(size) { case 1: *((u8 *)val) = inb(port); return 1; case 2: if (port & 1) return -EINVAL; *((u16 *)val) = inw(port); return 2; case 4: if (port & 3) return -EINVAL; *((u32 *)val) = inl(port); return 4; } return -EINVAL; } int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) { struct pci_controller *hose = bus->sysdata; port += hose->io_space->start; switch(size) { case 1: outb(port, val); return 1; case 2: if (port & 1) return -EINVAL; outw(port, val); return 2; case 4: if (port & 3) return -EINVAL; outl(port, val); return 4; } return -EINVAL; }
linux-master
arch/alpha/kernel/pci-sysfs.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_rawhide.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the RAWHIDE. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_mcpcia.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* * HACK ALERT! only the boot cpu is used for interrupts. */ /* Note mask bit is true for ENABLED irqs. */ static unsigned int hose_irq_masks[4] = { 0xff0000, 0xfe0000, 0xff0000, 0xff0000 }; static unsigned int cached_irq_masks[4]; DEFINE_SPINLOCK(rawhide_irq_lock); static inline void rawhide_update_irq_hw(int hose, int mask) { *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose)) = mask; mb(); *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(hose)); } #define hose_exists(h) \ (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) static inline void rawhide_enable_irq(struct irq_data *d) { unsigned int mask, hose; unsigned int irq = d->irq; irq -= 16; hose = irq / 24; if (!hose_exists(hose)) /* if hose non-existent, exit */ return; irq -= hose * 24; mask = 1 << irq; spin_lock(&rawhide_irq_lock); mask |= cached_irq_masks[hose]; cached_irq_masks[hose] = mask; rawhide_update_irq_hw(hose, mask); spin_unlock(&rawhide_irq_lock); } static void rawhide_disable_irq(struct irq_data *d) { unsigned int mask, hose; unsigned int irq = d->irq; irq -= 16; hose = irq / 24; if (!hose_exists(hose)) /* if hose non-existent, exit */ return; irq -= hose * 24; mask = ~(1 << irq) | hose_irq_masks[hose]; spin_lock(&rawhide_irq_lock); mask &= cached_irq_masks[hose]; cached_irq_masks[hose] = mask; rawhide_update_irq_hw(hose, mask); spin_unlock(&rawhide_irq_lock); } static void rawhide_mask_and_ack_irq(struct irq_data *d) { unsigned int mask, mask1, hose; unsigned int irq = d->irq; irq -= 16; hose = irq / 24; if (!hose_exists(hose)) /* if hose non-existent, exit */ return; irq -= hose * 24; mask1 = 1 << irq; mask = ~mask1 | hose_irq_masks[hose]; spin_lock(&rawhide_irq_lock); mask &= cached_irq_masks[hose]; cached_irq_masks[hose] = mask; rawhide_update_irq_hw(hose, mask); /* Clear the interrupt. */ *(vuip)MCPCIA_INT_REQ(MCPCIA_HOSE2MID(hose)) = mask1; spin_unlock(&rawhide_irq_lock); } static struct irq_chip rawhide_irq_type = { .name = "RAWHIDE", .irq_unmask = rawhide_enable_irq, .irq_mask = rawhide_disable_irq, .irq_mask_ack = rawhide_mask_and_ack_irq, }; static void rawhide_srm_device_interrupt(unsigned long vector) { int irq; irq = (vector - 0x800) >> 4; /* * The RAWHIDE SRM console reports PCI interrupts with a vector * 0x80 *higher* than one might expect, as PCI IRQ 0 (ie bit 0) * shows up as IRQ 24, etc, etc. We adjust it down by 8 to have * it line up with the actual bit numbers from the REQ registers, * which is how we manage the interrupts/mask. Sigh... * * Also, PCI #1 interrupts are offset some more... :-( */ if (irq == 52) { /* SCSI on PCI1 is special. */ irq = 72; } /* Adjust by which hose it is from. */ irq -= ((irq + 16) >> 2) & 0x38; handle_irq(irq); } static void __init rawhide_init_irq(void) { struct pci_controller *hose; long i; mcpcia_init_hoses(); /* Clear them all; only hoses that exist will be non-zero. */ for (i = 0; i < MCPCIA_MAX_HOSES; i++) cached_irq_masks[i] = 0; for (hose = hose_head; hose; hose = hose->next) { unsigned int h = hose->index; unsigned int mask = hose_irq_masks[h]; cached_irq_masks[h] = mask; *(vuip)MCPCIA_INT_MASK0(MCPCIA_HOSE2MID(h)) = mask; *(vuip)MCPCIA_INT_MASK1(MCPCIA_HOSE2MID(h)) = 0; } for (i = 16; i < 128; ++i) { irq_set_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } init_i8259a_irqs(); common_init_isa_dma(); } /* * PCI Fixup configuration. * * Summary @ MCPCIA_PCI0_INT_REQ: * Bit Meaning * 0 Interrupt Line A from slot 2 PCI0 * 1 Interrupt Line B from slot 2 PCI0 * 2 Interrupt Line C from slot 2 PCI0 * 3 Interrupt Line D from slot 2 PCI0 * 4 Interrupt Line A from slot 3 PCI0 * 5 Interrupt Line B from slot 3 PCI0 * 6 Interrupt Line C from slot 3 PCI0 * 7 Interrupt Line D from slot 3 PCI0 * 8 Interrupt Line A from slot 4 PCI0 * 9 Interrupt Line B from slot 4 PCI0 * 10 Interrupt Line C from slot 4 PCI0 * 11 Interrupt Line D from slot 4 PCI0 * 12 Interrupt Line A from slot 5 PCI0 * 13 Interrupt Line B from slot 5 PCI0 * 14 Interrupt Line C from slot 5 PCI0 * 15 Interrupt Line D from slot 5 PCI0 * 16 EISA interrupt (PCI 0) or SCSI interrupt (PCI 1) * 17-23 NA * * IdSel * 1 EISA bridge (PCI bus 0 only) * 2 PCI option slot 2 * 3 PCI option slot 3 * 4 PCI option slot 4 * 5 PCI option slot 5 * */ static int rawhide_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[5][5] = { /*INT INTA INTB INTC INTD */ { 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */ { 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */ { 16+ 4, 16+ 4, 16+ 5, 16+ 6, 16+ 7}, /* IdSel 3 slot 3 */ { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 4 slot 4 */ { 16+12, 16+12, 16+13, 16+14, 16+15} /* IdSel 5 slot 5 */ }; const long min_idsel = 1, max_idsel = 5, irqs_per_slot = 5; struct pci_controller *hose = dev->sysdata; int irq = COMMON_TABLE_LOOKUP; if (irq >= 0) irq += 24 * hose->index; return irq; } /* * The System Vector */ struct alpha_machine_vector rawhide_mv __initmv = { .vector_name = "Rawhide", DO_EV5_MMU, DO_DEFAULT_RTC, DO_MCPCIA_IO, .machine_check = mcpcia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = MCPCIA_DEFAULT_MEM_BASE, .pci_dac_offset = MCPCIA_DAC_OFFSET, .nr_irqs = 128, .device_interrupt = rawhide_srm_device_interrupt, .init_arch = mcpcia_init_arch, .init_irq = rawhide_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .kill_arch = NULL, .pci_map_irq = rawhide_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(rawhide)
linux-master
arch/alpha/kernel/sys_rawhide.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/core_wildfire.c * * Wildfire support. * * Copyright (C) 2000 Andrea Arcangeli <[email protected]> SuSE */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_wildfire.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <asm/ptrace.h> #include <asm/smp.h> #include "proto.h" #include "pci_impl.h" #define DEBUG_CONFIG 0 #define DEBUG_DUMP_REGS 0 #define DEBUG_DUMP_CONFIG 1 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif #if DEBUG_DUMP_REGS static void wildfire_dump_pci_regs(int qbbno, int hoseno); static void wildfire_dump_pca_regs(int qbbno, int pcano); static void wildfire_dump_qsa_regs(int qbbno); static void wildfire_dump_qsd_regs(int qbbno); static void wildfire_dump_iop_regs(int qbbno); static void wildfire_dump_gp_regs(int qbbno); #endif #if DEBUG_DUMP_CONFIG static void wildfire_dump_hardware_config(void); #endif unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB]; unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB]; #define QBB_MAP_EMPTY 0xff unsigned long wildfire_hard_qbb_mask; unsigned long wildfire_soft_qbb_mask; unsigned long wildfire_gp_mask; unsigned long wildfire_hs_mask; unsigned long wildfire_iop_mask; unsigned long wildfire_ior_mask; unsigned long wildfire_pca_mask; unsigned long wildfire_cpu_mask; unsigned long wildfire_mem_mask; void __init wildfire_init_hose(int qbbno, int hoseno) { struct pci_controller *hose; wildfire_pci *pci; hose = alloc_pci_controller(); hose->io_space = alloc_resource(); hose->mem_space = alloc_resource(); /* This is for userland consumption. */ hose->sparse_mem_base = 0; hose->sparse_io_base = 0; hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno); hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno); hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno); hose->index = (qbbno << 3) + hoseno; hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS; hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1; hose->io_space->name = pci_io_names[hoseno]; hose->io_space->flags = IORESOURCE_IO; hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS; hose->mem_space->end = hose->mem_space->start + 0xffffffff; hose->mem_space->name = pci_mem_names[hoseno]; hose->mem_space->flags = IORESOURCE_MEM; if (request_resource(&ioport_resource, hose->io_space) < 0) printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n", qbbno, hoseno); if (request_resource(&iomem_resource, hose->mem_space) < 0) printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n", qbbno, hoseno); #if DEBUG_DUMP_REGS wildfire_dump_pci_regs(qbbno, hoseno); #endif /* * Set up the PCI to main memory translation windows. * * Note: Window 3 is scatter-gather only * * Window 0 is scatter-gather 8MB at 8MB (for isa) * Window 1 is direct access 1GB at 1GB * Window 2 is direct access 1GB at 2GB * Window 3 is scatter-gather 128MB at 3GB * ??? We ought to scale window 3 memory. * */ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, SMP_CACHE_BYTES); hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, SMP_CACHE_BYTES); pci = WILDFIRE_pci(qbbno, hoseno); pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3; pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000; pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes); pci->pci_window[1].wbase.csr = 0x40000000 | 1; pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000; pci->pci_window[1].tbase.csr = 0; pci->pci_window[2].wbase.csr = 0x80000000 | 1; pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000; pci->pci_window[2].tbase.csr = 0x40000000; pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3; pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000; pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes); wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */ } void __init wildfire_init_pca(int qbbno, int pcano) { /* Test for PCA existence first. */ if (!WILDFIRE_PCA_EXISTS(qbbno, pcano)) return; #if DEBUG_DUMP_REGS wildfire_dump_pca_regs(qbbno, pcano); #endif /* Do both hoses of the PCA. */ wildfire_init_hose(qbbno, (pcano << 1) + 0); wildfire_init_hose(qbbno, (pcano << 1) + 1); } void __init wildfire_init_qbb(int qbbno) { int pcano; /* Test for QBB existence first. */ if (!WILDFIRE_QBB_EXISTS(qbbno)) return; #if DEBUG_DUMP_REGS wildfire_dump_qsa_regs(qbbno); wildfire_dump_qsd_regs(qbbno); wildfire_dump_iop_regs(qbbno); wildfire_dump_gp_regs(qbbno); #endif /* Init all PCAs here. */ for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) { wildfire_init_pca(qbbno, pcano); } } void __init wildfire_hardware_probe(void) { unsigned long temp; unsigned int hard_qbb, soft_qbb; wildfire_fast_qsd *fast = WILDFIRE_fast_qsd(); wildfire_qsd *qsd; wildfire_qsa *qsa; wildfire_iop *iop; wildfire_gp *gp; wildfire_ne *ne; wildfire_fe *fe; int i; temp = fast->qsd_whami.csr; #if 0 printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp); #endif hard_qbb = (temp >> 8) & 7; soft_qbb = (temp >> 4) & 7; /* Init the HW configuration variables. */ wildfire_hard_qbb_mask = (1 << hard_qbb); wildfire_soft_qbb_mask = (1 << soft_qbb); wildfire_gp_mask = 0; wildfire_hs_mask = 0; wildfire_iop_mask = 0; wildfire_ior_mask = 0; wildfire_pca_mask = 0; wildfire_cpu_mask = 0; wildfire_mem_mask = 0; memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB); memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB); /* First, determine which QBBs are present. */ qsa = WILDFIRE_qsa(soft_qbb); temp = qsa->qsa_qbb_id.csr; #if 0 printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp); #endif if (temp & 0x40) /* Is there an HS? */ wildfire_hs_mask = 1; if (temp & 0x20) { /* Is there a GP? */ gp = WILDFIRE_gp(soft_qbb); temp = 0; for (i = 0; i < 4; i++) { temp |= gp->gpa_qbb_map[i].csr << (i * 8); #if 0 printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n", i, gp, temp); #endif } for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) { if (temp & 8) { /* Is there a QBB? */ soft_qbb = temp & 7; wildfire_hard_qbb_mask |= (1 << hard_qbb); wildfire_soft_qbb_mask |= (1 << soft_qbb); } temp >>= 4; } wildfire_gp_mask = wildfire_soft_qbb_mask; } /* Next determine each QBBs resources. */ for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) { if (WILDFIRE_QBB_EXISTS(soft_qbb)) { qsd = WILDFIRE_qsd(soft_qbb); temp = qsd->qsd_whami.csr; #if 0 printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp); #endif hard_qbb = (temp >> 8) & 7; wildfire_hard_qbb_map[hard_qbb] = soft_qbb; wildfire_soft_qbb_map[soft_qbb] = hard_qbb; qsa = WILDFIRE_qsa(soft_qbb); temp = qsa->qsa_qbb_pop[0].csr; #if 0 printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp); #endif wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2); wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2); temp = qsa->qsa_qbb_pop[1].csr; #if 0 printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp); #endif wildfire_iop_mask |= (1 << soft_qbb); wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2); temp = qsa->qsa_qbb_id.csr; #if 0 printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp); #endif if (temp & 0x20) wildfire_gp_mask |= (1 << soft_qbb); /* Probe for PCA existence here. */ for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) { iop = WILDFIRE_iop(soft_qbb); ne = WILDFIRE_ne(soft_qbb, i); fe = WILDFIRE_fe(soft_qbb, i); if ((iop->iop_hose[i].init.csr & 1) == 1 && ((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) && ((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL)) { wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i); } } } } #if DEBUG_DUMP_CONFIG wildfire_dump_hardware_config(); #endif } void __init wildfire_init_arch(void) { int qbbno; /* With multiple PCI buses, we play with I/O as physical addrs. */ ioport_resource.end = ~0UL; /* Probe the hardware for info about configuration. */ wildfire_hardware_probe(); /* Now init all the found QBBs. */ for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) { wildfire_init_qbb(qbbno); } /* Normal direct PCI DMA mapping. */ __direct_map_base = 0x40000000UL; __direct_map_size = 0x80000000UL; } void wildfire_machine_check(unsigned long vector, unsigned long la_ptr) { mb(); mb(); /* magic */ draina(); /* FIXME: clear pci errors */ wrmces(0x7); mb(); process_mcheck_info(vector, la_ptr, "WILDFIRE", mcheck_expected(smp_processor_id())); } void wildfire_kill_arch(int mode) { } void wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { int qbbno = hose->index >> 3; int hoseno = hose->index & 7; wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno); mb(); pci->pci_flush_tlb.csr; /* reading does the trick */ } static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, unsigned char *type1) { struct pci_controller *hose = pbus->sysdata; unsigned long addr; u8 bus = pbus->number; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " "pci_addr=0x%p, type1=0x%p)\n", bus, device_fn, where, pci_addr, type1)); if (!pbus->parent) /* No parent means peer PCI bus. */ bus = 0; *type1 = (bus != 0); addr = (bus << 16) | (device_fn << 8) | where; addr |= hose->config_space_base; *pci_addr = addr; DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return 0; } static int wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops wildfire_pci_ops = { .read = wildfire_read_config, .write = wildfire_write_config, }; #if DEBUG_DUMP_REGS static void __init wildfire_dump_pci_regs(int qbbno, int hoseno) { wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno); int i; printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n", qbbno, hoseno, pci); printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n", pci->pci_io_addr_ext.csr); printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr); printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr); printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr); printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr); printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr); printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr); printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n", qbbno, hoseno, pci); for (i = 0; i < 4; i++) { printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i, pci->pci_window[i].wbase.csr, pci->pci_window[i].wmask.csr, pci->pci_window[i].tbase.csr); } printk(KERN_ERR "\n"); } static void __init wildfire_dump_pca_regs(int qbbno, int pcano) { wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano); int i; printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n", qbbno, pcano, pca); printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr); printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr); printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr); printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr); printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n", pca->pca_stdio_edge_level.csr); printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n", qbbno, pcano, pca); for (i = 0; i < 4; i++) { printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i, pca->pca_int[i].target.csr, pca->pca_int[i].enable.csr); } printk(KERN_ERR "\n"); } static void __init wildfire_dump_qsa_regs(int qbbno) { wildfire_qsa *qsa = WILDFIRE_qsa(qbbno); int i; printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa); printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr); printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr); printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr); for (i = 0; i < 5; i++) printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n", i, qsa->qsa_config[i].csr); for (i = 0; i < 2; i++) printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n", i, qsa->qsa_qbb_pop[0].csr); printk(KERN_ERR "\n"); } static void __init wildfire_dump_qsd_regs(int qbbno) { wildfire_qsd *qsd = WILDFIRE_qsd(qbbno); printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd); printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr); printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr); printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n", qsd->qsd_port_present.csr); printk(KERN_ERR " QSD_PORT_ACTIVE: 0x%16lx\n", qsd->qsd_port_active.csr); printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n", qsd->qsd_fault_ena.csr); printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n", qsd->qsd_cpu_int_ena.csr); printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n", qsd->qsd_mem_config.csr); printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n", qsd->qsd_err_sum.csr); printk(KERN_ERR "\n"); } static void __init wildfire_dump_iop_regs(int qbbno) { wildfire_iop *iop = WILDFIRE_iop(qbbno); int i; printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop); printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr); printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr); printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n", iop->iop_switch_credits.csr); printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n", iop->iop_hose_credits.csr); for (i = 0; i < 4; i++) printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n", i, iop->iop_hose[i].init.csr); for (i = 0; i < 4; i++) printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n", i, iop->iop_dev_int[i].target.csr); printk(KERN_ERR "\n"); } static void __init wildfire_dump_gp_regs(int qbbno) { wildfire_gp *gp = WILDFIRE_gp(qbbno); int i; printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp); for (i = 0; i < 4; i++) printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n", i, gp->gpa_qbb_map[i].csr); printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n", gp->gpa_mem_pop_map.csr); printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr); printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr); printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr); printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr); printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr); printk(KERN_ERR "\n"); } #endif /* DUMP_REGS */ #if DEBUG_DUMP_CONFIG static void __init wildfire_dump_hardware_config(void) { int i; printk(KERN_ERR "Probed Hardware Configuration\n"); printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask); printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask); printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask); printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask); printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask); printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask); printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask); printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask); printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask); printk(" hard_qbb_map: "); for (i = 0; i < WILDFIRE_MAX_QBB; i++) if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY) printk("--- "); else printk("%3d ", wildfire_hard_qbb_map[i]); printk("\n"); printk(" soft_qbb_map: "); for (i = 0; i < WILDFIRE_MAX_QBB; i++) if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY) printk("--- "); else printk("%3d ", wildfire_soft_qbb_map[i]); printk("\n"); } #endif /* DUMP_CONFIG */
linux-master
arch/alpha/kernel/core_wildfire.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_noritake.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the NORITAKE (AlphaServer 1000A), * CORELLE (AlphaServer 800), and ALCOR Primo (AlphaStation 600A). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/mce.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_apecs.h> #include <asm/core_cia.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note mask bit is true for ENABLED irqs. */ static int cached_irq_mask; static inline void noritake_update_irq_hw(int irq, int mask) { int port = 0x54a; if (irq >= 32) { mask >>= 16; port = 0x54c; } outw(mask, port); } static void noritake_enable_irq(struct irq_data *d) { noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16)); } static void noritake_disable_irq(struct irq_data *d) { noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16))); } static struct irq_chip noritake_irq_type = { .name = "NORITAKE", .irq_unmask = noritake_enable_irq, .irq_mask = noritake_disable_irq, .irq_mask_ack = noritake_disable_irq, }; static void noritake_device_interrupt(unsigned long vector) { unsigned long pld; unsigned int i; /* Read the interrupt summary registers of NORITAKE */ pld = (((unsigned long) inw(0x54c) << 32) | ((unsigned long) inw(0x54a) << 16) | ((unsigned long) inb(0xa0) << 8) | inb(0x20)); /* * Now for every possible bit set, work through them and call * the appropriate interrupt handler. */ while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ if (i < 16) { isa_device_interrupt(vector); } else { handle_irq(i); } } } static void noritake_srm_device_interrupt(unsigned long vector) { int irq; irq = (vector - 0x800) >> 4; /* * I really hate to do this, too, but the NORITAKE SRM console also * reports PCI vectors *lower* than I expected from the bit numbers * in the documentation. * But I really don't want to change the fixup code for allocation * of IRQs, nor the alpha_irq_mask maintenance stuff, both of which * look nice and clean now. * So, here's this additional grotty hack... :-( */ if (irq >= 16) irq = irq + 1; handle_irq(irq); } static void __init noritake_init_irq(void) { long i; if (alpha_using_srm) alpha_mv.device_interrupt = noritake_srm_device_interrupt; outw(0, 0x54a); outw(0, 0x54c); for (i = 16; i < 48; ++i) { irq_set_chip_and_handler(i, &noritake_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } init_i8259a_irqs(); common_init_isa_dma(); } /* * PCI Fixup configuration. * * Summary @ 0x542, summary register #1: * Bit Meaning * 0 All valid ints from summary regs 2 & 3 * 1 QLOGIC ISP1020A SCSI * 2 Interrupt Line A from slot 0 * 3 Interrupt Line B from slot 0 * 4 Interrupt Line A from slot 1 * 5 Interrupt line B from slot 1 * 6 Interrupt Line A from slot 2 * 7 Interrupt Line B from slot 2 * 8 Interrupt Line A from slot 3 * 9 Interrupt Line B from slot 3 *10 Interrupt Line A from slot 4 *11 Interrupt Line B from slot 4 *12 Interrupt Line A from slot 5 *13 Interrupt Line B from slot 5 *14 Interrupt Line A from slot 6 *15 Interrupt Line B from slot 6 * * Summary @ 0x544, summary register #2: * Bit Meaning * 0 OR of all unmasked ints in SR #2 * 1 OR of secondary bus ints * 2 Interrupt Line C from slot 0 * 3 Interrupt Line D from slot 0 * 4 Interrupt Line C from slot 1 * 5 Interrupt line D from slot 1 * 6 Interrupt Line C from slot 2 * 7 Interrupt Line D from slot 2 * 8 Interrupt Line C from slot 3 * 9 Interrupt Line D from slot 3 *10 Interrupt Line C from slot 4 *11 Interrupt Line D from slot 4 *12 Interrupt Line C from slot 5 *13 Interrupt Line D from slot 5 *14 Interrupt Line C from slot 6 *15 Interrupt Line D from slot 6 * * The device to slot mapping looks like: * * Slot Device * 7 Intel PCI-EISA bridge chip * 8 DEC PCI-PCI bridge chip * 11 PCI on board slot 0 * 12 PCI on board slot 1 * 13 PCI on board slot 2 * * * This two layered interrupt approach means that we allocate IRQ 16 and * above for PCI interrupts. The IRQ relates to which bit the interrupt * comes in on. This makes interrupt processing much easier. */ static int noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[15][5] = { /*INT INTA INTB INTC INTD */ /* note: IDSELs 16, 17, and 25 are CORELLE only */ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ { -1, -1, -1, -1, -1}, /* IdSel 17, S3 Trio64 */ { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */ { -1, -1, -1, -1, -1}, /* IdSel 19, PPB */ { -1, -1, -1, -1, -1}, /* IdSel 20, ???? */ { -1, -1, -1, -1, -1}, /* IdSel 21, ???? */ { 16+2, 16+2, 16+3, 32+2, 32+3}, /* IdSel 22, slot 0 */ { 16+4, 16+4, 16+5, 32+4, 32+5}, /* IdSel 23, slot 1 */ { 16+6, 16+6, 16+7, 32+6, 32+7}, /* IdSel 24, slot 2 */ { 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 25, slot 3 */ /* The following 5 are actually on PCI bus 1, which is across the built-in bridge of the NORITAKE only. */ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ { 16+8, 16+8, 16+9, 32+8, 32+9}, /* IdSel 17, slot 3 */ {16+10, 16+10, 16+11, 32+10, 32+11}, /* IdSel 18, slot 4 */ {16+12, 16+12, 16+13, 32+12, 32+13}, /* IdSel 19, slot 5 */ {16+14, 16+14, 16+15, 32+14, 32+15}, /* IdSel 20, slot 6 */ }; const long min_idsel = 5, max_idsel = 19, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static u8 noritake_swizzle(struct pci_dev *dev, u8 *pinp) { int slot, pin = *pinp; if (dev->bus->number == 0) { slot = PCI_SLOT(dev->devfn); } /* Check for the built-in bridge */ else if (PCI_SLOT(dev->bus->self->devfn) == 8) { slot = PCI_SLOT(dev->devfn) + 15; /* WAG! */ } else { /* Must be a card-based bridge. */ do { if (PCI_SLOT(dev->bus->self->devfn) == 8) { slot = PCI_SLOT(dev->devfn) + 15; break; } pin = pci_swizzle_interrupt_pin(dev, pin); /* Move up the chain of bridges. */ dev = dev->bus->self; /* Slot of the next bridge. */ slot = PCI_SLOT(dev->devfn); } while (dev->bus->self); } *pinp = pin; return slot; } #if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) static void noritake_apecs_machine_check(unsigned long vector, unsigned long la_ptr) { #define MCHK_NO_DEVSEL 0x205U #define MCHK_NO_TABT 0x204U struct el_common *mchk_header; unsigned int code; mchk_header = (struct el_common *)la_ptr; /* Clear the error before any reporting. */ mb(); mb(); /* magic */ draina(); apecs_pci_clr_err(); wrmces(0x7); mb(); code = mchk_header->code; process_mcheck_info(vector, la_ptr, "NORITAKE APECS", (mcheck_expected(0) && (code == MCHK_NO_DEVSEL || code == MCHK_NO_TABT))); } #endif /* * The System Vectors */ #if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) struct alpha_machine_vector noritake_mv __initmv = { .vector_name = "Noritake", DO_EV4_MMU, DO_DEFAULT_RTC, DO_APECS_IO, .machine_check = noritake_apecs_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 48, .device_interrupt = noritake_device_interrupt, .init_arch = apecs_init_arch, .init_irq = noritake_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .pci_map_irq = noritake_map_irq, .pci_swizzle = noritake_swizzle, }; ALIAS_MV(noritake) #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO) struct alpha_machine_vector noritake_primo_mv __initmv = { .vector_name = "Noritake-Primo", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 48, .device_interrupt = noritake_device_interrupt, .init_arch = cia_init_arch, .init_irq = noritake_init_irq, .init_rtc = common_init_rtc, .init_pci = cia_init_pci, .kill_arch = cia_kill_arch, .pci_map_irq = noritake_map_irq, .pci_swizzle = noritake_swizzle, }; ALIAS_MV(noritake_primo) #endif
linux-master
arch/alpha/kernel/sys_noritake.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/process.c * * Copyright (C) 1995 Linus Torvalds */ /* * This file handles the architecture-dependent parts of process handling. */ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/time.h> #include <linux/major.h> #include <linux/stat.h> #include <linux/vt.h> #include <linux/mman.h> #include <linux/elfcore.h> #include <linux/reboot.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <asm/reg.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/hwrpb.h> #include <asm/fpu.h> #include "proto.h" #include "pci_impl.h" /* * Power off function, if any */ void (*pm_power_off)(void) = machine_power_off; EXPORT_SYMBOL(pm_power_off); #ifdef CONFIG_ALPHA_WTINT /* * Sleep the CPU. * EV6, LCA45 and QEMU know how to power down, skipping N timer interrupts. */ void arch_cpu_idle(void) { wtint(0); } void __noreturn arch_cpu_idle_dead(void) { wtint(INT_MAX); BUG(); } #endif /* ALPHA_WTINT */ struct halt_info { int mode; char *restart_cmd; }; static void common_shutdown_1(void *generic_ptr) { struct halt_info *how = generic_ptr; struct percpu_struct *cpup; unsigned long *pflags, flags; int cpuid = smp_processor_id(); /* No point in taking interrupts anymore. */ local_irq_disable(); cpup = (struct percpu_struct *) ((unsigned long)hwrpb + hwrpb->processor_offset + hwrpb->processor_size * cpuid); pflags = &cpup->flags; flags = *pflags; /* Clear reason to "default"; clear "bootstrap in progress". */ flags &= ~0x00ff0001UL; #ifdef CONFIG_SMP /* Secondaries halt here. */ if (cpuid != boot_cpuid) { flags |= 0x00040000UL; /* "remain halted" */ *pflags = flags; set_cpu_present(cpuid, false); set_cpu_possible(cpuid, false); halt(); } #endif if (how->mode == LINUX_REBOOT_CMD_RESTART) { if (!how->restart_cmd) { flags |= 0x00020000UL; /* "cold bootstrap" */ } else { /* For SRM, we could probably set environment variables to get this to work. We'd have to delay this until after srm_paging_stop unless we ever got srm_fixup working. At the moment, SRM will use the last boot device, but the file and flags will be the defaults, when doing a "warm" bootstrap. */ flags |= 0x00030000UL; /* "warm bootstrap" */ } } else { flags |= 0x00040000UL; /* "remain halted" */ } *pflags = flags; #ifdef CONFIG_SMP /* Wait for the secondaries to halt. */ set_cpu_present(boot_cpuid, false); set_cpu_possible(boot_cpuid, false); while (!cpumask_empty(cpu_present_mask)) barrier(); #endif /* If booted from SRM, reset some of the original environment. */ if (alpha_using_srm) { #ifdef CONFIG_DUMMY_CONSOLE /* If we've gotten here after SysRq-b, leave interrupt context before taking over the console. */ if (in_hardirq()) irq_exit(); /* This has the effect of resetting the VGA video origin. */ console_lock(); do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1); console_unlock(); #endif pci_restore_srm_config(); set_hae(srm_hae); } if (alpha_mv.kill_arch) alpha_mv.kill_arch(how->mode); if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) { /* Unfortunately, since MILO doesn't currently understand the hwrpb bits above, we can't reliably halt the processor and keep it halted. So just loop. */ return; } if (alpha_using_srm) srm_paging_stop(); halt(); } static void common_shutdown(int mode, char *restart_cmd) { struct halt_info args; args.mode = mode; args.restart_cmd = restart_cmd; on_each_cpu(common_shutdown_1, &args, 0); } void machine_restart(char *restart_cmd) { common_shutdown(LINUX_REBOOT_CMD_RESTART, restart_cmd); } void machine_halt(void) { common_shutdown(LINUX_REBOOT_CMD_HALT, NULL); } void machine_power_off(void) { common_shutdown(LINUX_REBOOT_CMD_POWER_OFF, NULL); } /* Used by sysrq-p, among others. I don't believe r9-r15 are ever saved in the context it's used. */ void show_regs(struct pt_regs *regs) { show_regs_print_info(KERN_DEFAULT); dik_show_regs(regs, NULL); } /* * Re-start a thread when doing execve() */ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) { regs->pc = pc; regs->ps = 8; wrusp(sp); } EXPORT_SYMBOL(start_thread); void flush_thread(void) { /* Arrange for each exec'ed process to start off with a clean slate with respect to the FPU. This is all exceptions disabled. */ current_thread_info()->ieee_state = 0; wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0)); /* Clean slate for TLS. */ current_thread_info()->pcb.unique = 0; } /* * Copy architecture-specific thread state */ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { unsigned long clone_flags = args->flags; unsigned long usp = args->stack; unsigned long tls = args->tls; extern void ret_from_fork(void); extern void ret_from_kernel_thread(void); struct thread_info *childti = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); struct pt_regs *regs = current_pt_regs(); struct switch_stack *childstack, *stack; childstack = ((struct switch_stack *) childregs) - 1; childti->pcb.ksp = (unsigned long) childstack; childti->pcb.flags = 1; /* set FEN, clear everything else */ childti->status |= TS_SAVED_FP | TS_RESTORE_FP; if (unlikely(args->fn)) { /* kernel thread */ memset(childstack, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs)); childstack->r26 = (unsigned long) ret_from_kernel_thread; childstack->r9 = (unsigned long) args->fn; childstack->r10 = (unsigned long) args->fn_arg; childregs->hae = alpha_mv.hae_cache; memset(childti->fp, '\0', sizeof(childti->fp)); childti->pcb.usp = 0; return 0; } /* Note: if CLONE_SETTLS is not set, then we must inherit the value from the parent, which will have been set by the block copy in dup_task_struct. This is non-intuitive, but is required for proper operation in the case of a threaded application calling fork. */ if (clone_flags & CLONE_SETTLS) childti->pcb.unique = tls; else regs->r20 = 0; /* OSF/1 has some strange fork() semantics. */ childti->pcb.usp = usp ?: rdusp(); *childregs = *regs; childregs->r0 = 0; childregs->r19 = 0; childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ stack = ((struct switch_stack *) regs) - 1; *childstack = *stack; childstack->r26 = (unsigned long) ret_from_fork; return 0; } /* * Fill in the user structure for a ELF core dump. */ void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) { /* switch stack follows right below pt_regs: */ struct switch_stack * sw = ((struct switch_stack *) pt) - 1; dest[ 0] = pt->r0; dest[ 1] = pt->r1; dest[ 2] = pt->r2; dest[ 3] = pt->r3; dest[ 4] = pt->r4; dest[ 5] = pt->r5; dest[ 6] = pt->r6; dest[ 7] = pt->r7; dest[ 8] = pt->r8; dest[ 9] = sw->r9; dest[10] = sw->r10; dest[11] = sw->r11; dest[12] = sw->r12; dest[13] = sw->r13; dest[14] = sw->r14; dest[15] = sw->r15; dest[16] = pt->r16; dest[17] = pt->r17; dest[18] = pt->r18; dest[19] = pt->r19; dest[20] = pt->r20; dest[21] = pt->r21; dest[22] = pt->r22; dest[23] = pt->r23; dest[24] = pt->r24; dest[25] = pt->r25; dest[26] = pt->r26; dest[27] = pt->r27; dest[28] = pt->r28; dest[29] = pt->gp; dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; dest[31] = pt->pc; /* Once upon a time this was the PS value. Which is stupid since that is always 8 for usermode. Usurped for the more useful value of the thread's UNIQUE field. */ dest[32] = ti->pcb.unique; } EXPORT_SYMBOL(dump_elf_thread); int dump_elf_task(elf_greg_t *dest, struct task_struct *task) { dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task)); return 1; } EXPORT_SYMBOL(dump_elf_task); int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu) { memcpy(fpu, task_thread_info(t)->fp, 32 * 8); return 1; } /* * Return saved PC of a blocked thread. This assumes the frame * pointer is the 6th saved long on the kernel stack and that the * saved return address is the first long in the frame. This all * holds provided the thread blocked through a call to schedule() ($15 * is the frame pointer in schedule() and $15 is saved at offset 48 by * entry.S:do_switch_stack). * * Under heavy swap load I've seen this lose in an ugly way. So do * some extra sanity checking on the ranges we expect these pointers * to be in so that we can fail gracefully. This is just for ps after * all. -- r~ */ static unsigned long thread_saved_pc(struct task_struct *t) { unsigned long base = (unsigned long)task_stack_page(t); unsigned long fp, sp = task_thread_info(t)->pcb.ksp; if (sp > base && sp+6*8 < base + 16*1024) { fp = ((unsigned long*)sp)[6]; if (fp > sp && fp < base + 16*1024) return *(unsigned long *)fp; } return 0; } unsigned long __get_wchan(struct task_struct *p) { unsigned long schedule_frame; unsigned long pc; /* * This one depends on the frame size of schedule(). Do a * "disass schedule" in gdb to find the frame size. Also, the * code assumes that sleep_on() follows immediately after * interruptible_sleep_on() and that add_timer() follows * immediately after interruptible_sleep(). Ugly, isn't it? * Maybe adding a wchan field to task_struct would be better, * after all... */ pc = thread_saved_pc(p); if (in_sched_functions(pc)) { schedule_frame = ((unsigned long *)task_thread_info(p)->pcb.ksp)[6]; return ((unsigned long *)schedule_frame)[12]; } return pc; }
linux-master
arch/alpha/kernel/process.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_alcor.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the ALCOR and XLT (XL-300/366/433). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/reboot.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/mmu_context.h> #include <asm/irq.h> #include <asm/core_cia.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note mask bit is true for ENABLED irqs. */ static unsigned long cached_irq_mask; static inline void alcor_update_irq_hw(unsigned long mask) { *(vuip)GRU_INT_MASK = mask; mb(); } static inline void alcor_enable_irq(struct irq_data *d) { alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); } static void alcor_disable_irq(struct irq_data *d) { alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); } static void alcor_mask_and_ack_irq(struct irq_data *d) { alcor_disable_irq(d); /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb(); *(vuip)GRU_INT_CLEAR = 0; mb(); } static void alcor_isa_mask_and_ack_irq(struct irq_data *d) { i8259a_mask_and_ack_irq(d); /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); *(vuip)GRU_INT_CLEAR = 0; mb(); } static struct irq_chip alcor_irq_type = { .name = "ALCOR", .irq_unmask = alcor_enable_irq, .irq_mask = alcor_disable_irq, .irq_mask_ack = alcor_mask_and_ack_irq, }; static void alcor_device_interrupt(unsigned long vector) { unsigned long pld; unsigned int i; /* Read the interrupt summary register of the GRU */ pld = (*(vuip)GRU_INT_REQ) & GRU_INT_REQ_BITS; /* * Now for every possible bit set, work through them and call * the appropriate interrupt handler. */ while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ if (i == 31) { isa_device_interrupt(vector); } else { handle_irq(16 + i); } } } static void __init alcor_init_irq(void) { long i; if (alpha_using_srm) alpha_mv.device_interrupt = srm_device_interrupt; *(vuip)GRU_INT_MASK = 0; mb(); /* all disabled */ *(vuip)GRU_INT_EDGE = 0; mb(); /* all are level */ *(vuip)GRU_INT_HILO = 0x80000000U; mb(); /* ISA only HI */ *(vuip)GRU_INT_CLEAR = 0; mb(); /* all clear */ for (i = 16; i < 48; ++i) { /* On Alcor, at least, lines 20..30 are not connected and can generate spurious interrupts if we turn them on while IRQ probing. */ if (i >= 16+20 && i <= 16+30) continue; irq_set_chip_and_handler(i, &alcor_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq; init_i8259a_irqs(); common_init_isa_dma(); if (request_irq(16 + 31, no_action, 0, "isa-cascade", NULL)) pr_err("Failed to register isa-cascade interrupt\n"); } /* * PCI Fixup configuration. * * Summary @ GRU_INT_REQ: * Bit Meaning * 0 Interrupt Line A from slot 2 * 1 Interrupt Line B from slot 2 * 2 Interrupt Line C from slot 2 * 3 Interrupt Line D from slot 2 * 4 Interrupt Line A from slot 1 * 5 Interrupt line B from slot 1 * 6 Interrupt Line C from slot 1 * 7 Interrupt Line D from slot 1 * 8 Interrupt Line A from slot 0 * 9 Interrupt Line B from slot 0 *10 Interrupt Line C from slot 0 *11 Interrupt Line D from slot 0 *12 Interrupt Line A from slot 4 *13 Interrupt Line B from slot 4 *14 Interrupt Line C from slot 4 *15 Interrupt Line D from slot 4 *16 Interrupt Line D from slot 3 *17 Interrupt Line D from slot 3 *18 Interrupt Line D from slot 3 *19 Interrupt Line D from slot 3 *20-30 Reserved *31 EISA interrupt * * The device to slot mapping looks like: * * Slot Device * 6 built-in TULIP (XLT only) * 7 PCI on board slot 0 * 8 PCI on board slot 3 * 9 PCI on board slot 4 * 10 PCEB (PCI-EISA bridge) * 11 PCI on board slot 2 * 12 PCI on board slot 1 * * * This two layered interrupt approach means that we allocate IRQ 16 and * above for PCI interrupts. The IRQ relates to which bit the interrupt * comes in on. This makes interrupt processing much easier. */ static int alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[7][5] = { /*INT INTA INTB INTC INTD */ /* note: IDSEL 17 is XLT only */ {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */ { 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 18, slot 0 */ {16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 19, slot 3 */ {16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 20, slot 4 */ { -1, -1, -1, -1, -1}, /* IdSel 21, PCEB */ { 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 2 */ { 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */ }; const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static void alcor_kill_arch(int mode) { cia_kill_arch(mode); #ifndef ALPHA_RESTORE_SRM_SETUP switch(mode) { case LINUX_REBOOT_CMD_RESTART: /* Who said DEC engineer's have no sense of humor? ;-) */ if (alpha_using_srm) { *(vuip) GRU_RESET = 0x0000dead; mb(); } break; case LINUX_REBOOT_CMD_HALT: break; case LINUX_REBOOT_CMD_POWER_OFF: break; } halt(); #endif } static void __init alcor_init_pci(void) { struct pci_dev *dev; cia_init_pci(); /* * Now we can look to see if we are really running on an XLT-type * motherboard, by looking for a 21040 TULIP in slot 6, which is * built into XLT and BRET/MAVERICK, but not available on ALCOR. */ dev = pci_get_device(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, NULL); if (dev && dev->devfn == PCI_DEVFN(6,0)) { alpha_mv.sys.cia.gru_int_req_bits = XLT_GRU_INT_REQ_BITS; printk(KERN_INFO "%s: Detected AS500 or XLT motherboard.\n", __func__); } pci_dev_put(dev); } /* * The System Vectors */ struct alpha_machine_vector alcor_mv __initmv = { .vector_name = "Alcor", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 48, .device_interrupt = alcor_device_interrupt, .init_arch = cia_init_arch, .init_irq = alcor_init_irq, .init_rtc = common_init_rtc, .init_pci = alcor_init_pci, .kill_arch = alcor_kill_arch, .pci_map_irq = alcor_map_irq, .pci_swizzle = common_swizzle, .sys = { .cia = { .gru_int_req_bits = ALCOR_GRU_INT_REQ_BITS }} }; ALIAS_MV(alcor) struct alpha_machine_vector xlt_mv __initmv = { .vector_name = "XLT", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 48, .device_interrupt = alcor_device_interrupt, .init_arch = cia_init_arch, .init_irq = alcor_init_irq, .init_rtc = common_init_rtc, .init_pci = alcor_init_pci, .kill_arch = alcor_kill_arch, .pci_map_irq = alcor_map_irq, .pci_swizzle = common_swizzle, .sys = { .cia = { .gru_int_req_bits = XLT_GRU_INT_REQ_BITS }} }; /* No alpha_mv alias for XLT, since we compile it in unconditionally with ALCOR; setup_arch knows how to cope. */
linux-master
arch/alpha/kernel/sys_alcor.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/err_marvel.c * * Copyright (C) 2001 Jeff Wiedemeier (Compaq Computer Corporation) * */ #include <linux/init.h> #include <linux/pci.h> #include <linux/sched.h> #include <asm/io.h> #include <asm/console.h> #include <asm/core_marvel.h> #include <asm/hwrpb.h> #include <asm/smp.h> #include <asm/err_common.h> #include <asm/err_ev7.h> #include "err_impl.h" #include "proto.h" static void marvel_print_680_frame(struct ev7_lf_subpackets *lf_subpackets) { #ifdef CONFIG_VERBOSE_MCHECK struct ev7_pal_environmental_subpacket *env; struct { int type; char *name; } ev_packets[] = { { EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE, "Ambient Temperature" }, { EL_TYPE__PAL__ENV__AIRMOVER_FAN, "AirMover / Fan" }, { EL_TYPE__PAL__ENV__VOLTAGE, "Voltage" }, { EL_TYPE__PAL__ENV__INTRUSION, "Intrusion" }, { EL_TYPE__PAL__ENV__POWER_SUPPLY, "Power Supply" }, { EL_TYPE__PAL__ENV__LAN, "LAN" }, { EL_TYPE__PAL__ENV__HOT_PLUG, "Hot Plug" }, { 0, NULL } }; int i; for (i = 0; ev_packets[i].type != 0; i++) { env = lf_subpackets->env[ev7_lf_env_index(ev_packets[i].type)]; if (!env) continue; printk("%s**%s event (cabinet %d, drawer %d)\n", err_print_prefix, ev_packets[i].name, env->cabinet, env->drawer); printk("%s Module Type: 0x%x - Unit ID 0x%x - " "Condition 0x%x\n", err_print_prefix, env->module_type, env->unit_id, env->condition); } #endif /* CONFIG_VERBOSE_MCHECK */ } static int marvel_process_680_frame(struct ev7_lf_subpackets *lf_subpackets, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; int i; for (i = ev7_lf_env_index(EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE); i <= ev7_lf_env_index(EL_TYPE__PAL__ENV__HOT_PLUG); i++) { if (lf_subpackets->env[i]) status = MCHK_DISPOSITION_REPORT; } if (print) marvel_print_680_frame(lf_subpackets); return status; } #ifdef CONFIG_VERBOSE_MCHECK static void marvel_print_err_cyc(u64 err_cyc) { static char *packet_desc[] = { "No Error", "UNKNOWN", "1 cycle (1 or 2 flit packet)", "2 cycles (3 flit packet)", "9 cycles (18 flit packet)", "10 cycles (19 flit packet)", "UNKNOWN", "UNKNOWN", "UNKNOWN" }; #define IO7__ERR_CYC__ODD_FLT (1UL << 0) #define IO7__ERR_CYC__EVN_FLT (1UL << 1) #define IO7__ERR_CYC__PACKET__S (6) #define IO7__ERR_CYC__PACKET__M (0x7) #define IO7__ERR_CYC__LOC (1UL << 5) #define IO7__ERR_CYC__CYCLE__S (2) #define IO7__ERR_CYC__CYCLE__M (0x7) printk("%s Packet In Error: %s\n" "%s Error in %s, cycle %lld%s%s\n", err_print_prefix, packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)], err_print_prefix, (err_cyc & IO7__ERR_CYC__LOC) ? "DATA" : "HEADER", EXTRACT(err_cyc, IO7__ERR_CYC__CYCLE), (err_cyc & IO7__ERR_CYC__ODD_FLT) ? " [ODD Flit]": "", (err_cyc & IO7__ERR_CYC__EVN_FLT) ? " [Even Flit]": ""); } static void marvel_print_po7_crrct_sym(u64 crrct_sym) { #define IO7__PO7_CRRCT_SYM__SYN__S (0) #define IO7__PO7_CRRCT_SYM__SYN__M (0x7f) #define IO7__PO7_CRRCT_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT + EVN_FLT */ #define IO7__PO7_CRRCT_SYM__ERR_CYC__M (0x1ff) printk("%s Correctable Error Symptoms:\n" "%s Syndrome: 0x%llx\n", err_print_prefix, err_print_prefix, EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__SYN)); marvel_print_err_cyc(EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__ERR_CYC)); } static void marvel_print_po7_uncrr_sym(u64 uncrr_sym, u64 valid_mask) { static char *clk_names[] = { "_h[0]", "_h[1]", "_n[0]", "_n[1]" }; static char *clk_decode[] = { "No Error", "One extra rising edge", "Two extra rising edges", "Lost one clock" }; static char *port_names[] = { "Port 0", "Port 1", "Port 2", "Port 3", "Unknown Port", "Unknown Port", "Unknown Port", "Port 7" }; int scratch, i; #define IO7__PO7_UNCRR_SYM__SYN__S (0) #define IO7__PO7_UNCRR_SYM__SYN__M (0x7f) #define IO7__PO7_UNCRR_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT... */ #define IO7__PO7_UNCRR_SYM__ERR_CYC__M (0x1ff) /* ... + EVN_FLT */ #define IO7__PO7_UNCRR_SYM__CLK__S (16) #define IO7__PO7_UNCRR_SYM__CLK__M (0xff) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ (1UL << 24) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO (1UL << 25) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO (1UL << 26) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK (1UL << 27) #define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK (1UL << 28) #define IO7__PO7_UNCRR_SYM__OVF__READIO (1UL << 29) #define IO7__PO7_UNCRR_SYM__OVF__WRITEIO (1UL << 30) #define IO7__PO7_UNCRR_SYM__OVF__FWD (1UL << 31) #define IO7__PO7_UNCRR_SYM__VICTIM_SP__S (32) #define IO7__PO7_UNCRR_SYM__VICTIM_SP__M (0xff) #define IO7__PO7_UNCRR_SYM__DETECT_SP__S (40) #define IO7__PO7_UNCRR_SYM__DETECT_SP__M (0xff) #define IO7__PO7_UNCRR_SYM__STRV_VTR__S (48) #define IO7__PO7_UNCRR_SYM__STRV_VTR__M (0x3ff) #define IO7__STRV_VTR__LSI__INTX__S (0) #define IO7__STRV_VTR__LSI__INTX__M (0x3) #define IO7__STRV_VTR__LSI__SLOT__S (2) #define IO7__STRV_VTR__LSI__SLOT__M (0x7) #define IO7__STRV_VTR__LSI__BUS__S (5) #define IO7__STRV_VTR__LSI__BUS__M (0x3) #define IO7__STRV_VTR__MSI__INTNUM__S (0) #define IO7__STRV_VTR__MSI__INTNUM__M (0x1ff) #define IO7__STRV_VTR__IS_MSI (1UL << 9) printk("%s Uncorrectable Error Symptoms:\n", err_print_prefix); uncrr_sym &= valid_mask; if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__SYN)) printk("%s Syndrome: 0x%llx\n", err_print_prefix, EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__SYN)); if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__ERR_CYC)) marvel_print_err_cyc(EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__ERR_CYC)); scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__CLK); for (i = 0; i < 4; i++, scratch >>= 2) { if (scratch & 0x3) printk("%s Clock %s: %s\n", err_print_prefix, clk_names[i], clk_decode[scratch & 0x3]); } if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ) printk("%s REQ Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO) printk("%s RIO Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO) printk("%s WIO Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK) printk("%s BLK Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK) printk("%s NBK Credit Timeout or Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__READIO) printk("%s Read I/O Buffer Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__WRITEIO) printk("%s Write I/O Buffer Overflow\n", err_print_prefix); if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__FWD) printk("%s FWD Buffer Overflow\n", err_print_prefix); if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__VICTIM_SP))) { int lost = scratch & (1UL << 4); scratch &= ~lost; for (i = 0; i < 8; i++, scratch >>= 1) { if (!(scratch & 1)) continue; printk("%s Error Response sent to %s", err_print_prefix, port_names[i]); } if (lost) printk("%s Lost Error sent somewhere else\n", err_print_prefix); } if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__DETECT_SP))) { for (i = 0; i < 8; i++, scratch >>= 1) { if (!(scratch & 1)) continue; printk("%s Error Reported by %s", err_print_prefix, port_names[i]); } } if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__STRV_VTR)) { char starvation_message[80]; scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__STRV_VTR); if (scratch & IO7__STRV_VTR__IS_MSI) sprintf(starvation_message, "MSI Interrupt 0x%x", EXTRACT(scratch, IO7__STRV_VTR__MSI__INTNUM)); else sprintf(starvation_message, "LSI INT%c for Bus:Slot (%d:%d)\n", 'A' + EXTRACT(scratch, IO7__STRV_VTR__LSI__INTX), EXTRACT(scratch, IO7__STRV_VTR__LSI__BUS), EXTRACT(scratch, IO7__STRV_VTR__LSI__SLOT)); printk("%s Starvation Int Trigger By: %s\n", err_print_prefix, starvation_message); } } static void marvel_print_po7_ugbge_sym(u64 ugbge_sym) { char opcode_str[10]; #define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__S (6) #define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__M (0xfffffffful) #define IO7__PO7_UGBGE_SYM__UPH_OPCODE__S (40) #define IO7__PO7_UGBGE_SYM__UPH_OPCODE__M (0xff) #define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__S (48) #define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__M (0xf) #define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__S (52) #define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__M (0x7ff) #define IO7__PO7_UGBGE_SYM__VALID (1UL << 63) if (!(ugbge_sym & IO7__PO7_UGBGE_SYM__VALID)) return; switch(EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) { case 0x51: sprintf(opcode_str, "Wr32"); break; case 0x50: sprintf(opcode_str, "WrQW"); break; case 0x54: sprintf(opcode_str, "WrIPR"); break; case 0xD8: sprintf(opcode_str, "Victim"); break; case 0xC5: sprintf(opcode_str, "BlkIO"); break; default: sprintf(opcode_str, "0x%llx\n", EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)); break; } printk("%s Up Hose Garbage Symptom:\n" "%s Source Port: %lld - Dest PID: %lld - OpCode: %s\n", err_print_prefix, err_print_prefix, EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT), EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_DEST_PID), opcode_str); if (0xC5 != EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) printk("%s Packet Offset 0x%08llx\n", err_print_prefix, EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_PKT_OFF)); } static void marvel_print_po7_err_sum(struct ev7_pal_io_subpacket *io) { u64 uncrr_sym_valid = 0; #define IO7__PO7_ERRSUM__CR_SBE (1UL << 32) #define IO7__PO7_ERRSUM__CR_SBE2 (1UL << 33) #define IO7__PO7_ERRSUM__CR_PIO_WBYTE (1UL << 34) #define IO7__PO7_ERRSUM__CR_CSR_NXM (1UL << 35) #define IO7__PO7_ERRSUM__CR_RPID_ACV (1UL << 36) #define IO7__PO7_ERRSUM__CR_RSP_NXM (1UL << 37) #define IO7__PO7_ERRSUM__CR_ERR_RESP (1UL << 38) #define IO7__PO7_ERRSUM__CR_CLK_DERR (1UL << 39) #define IO7__PO7_ERRSUM__CR_DAT_DBE (1UL << 40) #define IO7__PO7_ERRSUM__CR_DAT_GRBG (1UL << 41) #define IO7__PO7_ERRSUM__MAF_TO (1UL << 42) #define IO7__PO7_ERRSUM__UGBGE (1UL << 43) #define IO7__PO7_ERRSUM__UN_MAF_LOST (1UL << 44) #define IO7__PO7_ERRSUM__UN_PKT_OVF (1UL << 45) #define IO7__PO7_ERRSUM__UN_CDT_OVF (1UL << 46) #define IO7__PO7_ERRSUM__UN_DEALLOC (1UL << 47) #define IO7__PO7_ERRSUM__BH_CDT_TO (1UL << 51) #define IO7__PO7_ERRSUM__BH_CLK_HDR (1UL << 52) #define IO7__PO7_ERRSUM__BH_DBE_HDR (1UL << 53) #define IO7__PO7_ERRSUM__BH_GBG_HDR (1UL << 54) #define IO7__PO7_ERRSUM__BH_BAD_CMD (1UL << 55) #define IO7__PO7_ERRSUM__HLT_INT (1UL << 56) #define IO7__PO7_ERRSUM__HP_INT (1UL << 57) #define IO7__PO7_ERRSUM__CRD_INT (1UL << 58) #define IO7__PO7_ERRSUM__STV_INT (1UL << 59) #define IO7__PO7_ERRSUM__HRD_INT (1UL << 60) #define IO7__PO7_ERRSUM__BH_SUM (1UL << 61) #define IO7__PO7_ERRSUM__ERR_LST (1UL << 62) #define IO7__PO7_ERRSUM__ERR_VALID (1UL << 63) #define IO7__PO7_ERRSUM__ERR_MASK (IO7__PO7_ERRSUM__ERR_VALID | \ IO7__PO7_ERRSUM__CR_SBE) /* * Single bit errors aren't covered by ERR_VALID. */ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE) { printk("%s %sSingle Bit Error(s) detected/corrected\n", err_print_prefix, (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE2) ? "Multiple " : ""); marvel_print_po7_crrct_sym(io->po7_crrct_sym); } /* * Neither are the interrupt status bits */ if (io->po7_error_sum & IO7__PO7_ERRSUM__HLT_INT) printk("%s Halt Interrupt posted", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__HP_INT) { printk("%s Hot Plug Event Interrupt posted", err_print_prefix); uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP); } if (io->po7_error_sum & IO7__PO7_ERRSUM__CRD_INT) printk("%s Correctable Error Interrupt posted", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__STV_INT) { printk("%s Starvation Interrupt posted", err_print_prefix); uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__STRV_VTR); } if (io->po7_error_sum & IO7__PO7_ERRSUM__HRD_INT) { printk("%s Hard Error Interrupt posted", err_print_prefix); uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP); } /* * Everything else is valid only with ERR_VALID, so skip to the end * (uncrr_sym check) unless ERR_VALID is set. */ if (!(io->po7_error_sum & IO7__PO7_ERRSUM__ERR_VALID)) goto check_uncrr_sym; /* * Since ERR_VALID is set, VICTIM_SP in uncrr_sym is valid. * For bits [29:0] to also be valid, the following bits must * not be set: * CR_PIO_WBYTE CR_CSR_NXM CR_RSP_NXM * CR_ERR_RESP MAF_TO */ uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__VICTIM_SP); if (!(io->po7_error_sum & (IO7__PO7_ERRSUM__CR_PIO_WBYTE | IO7__PO7_ERRSUM__CR_CSR_NXM | IO7__PO7_ERRSUM__CR_RSP_NXM | IO7__PO7_ERRSUM__CR_ERR_RESP | IO7__PO7_ERRSUM__MAF_TO))) uncrr_sym_valid |= 0x3ffffffful; if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_PIO_WBYTE) printk("%s Write byte into IO7 CSR\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CSR_NXM) printk("%s PIO to non-existent CSR\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RPID_ACV) printk("%s Bus Requester PID (Access Violation)\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RSP_NXM) printk("%s Received NXM response from EV7\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_ERR_RESP) printk("%s Received ERROR RESPONSE\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CLK_DERR) printk("%s Clock error on data flit\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_DBE) printk("%s Double Bit Error Data Error Detected\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_GRBG) printk("%s Garbage Encoding Detected on the data\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__UGBGE) { printk("%s Garbage Encoding sent up hose\n", err_print_prefix); marvel_print_po7_ugbge_sym(io->po7_ugbge_sym); } if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_MAF_LOST) printk("%s Orphan response (unexpected response)\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_PKT_OVF) printk("%s Down hose packet overflow\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_CDT_OVF) printk("%s Down hose credit overflow\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_DEALLOC) printk("%s Unexpected or bad dealloc field\n", err_print_prefix); /* * The black hole events. */ if (io->po7_error_sum & IO7__PO7_ERRSUM__MAF_TO) printk("%s BLACK HOLE: Timeout for all responses\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CDT_TO) printk("%s BLACK HOLE: Credit Timeout\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CLK_HDR) printk("%s BLACK HOLE: Clock check on header\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_DBE_HDR) printk("%s BLACK HOLE: Uncorrectable Error on header\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_GBG_HDR) printk("%s BLACK HOLE: Garbage on header\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_BAD_CMD) printk("%s BLACK HOLE: Bad EV7 command\n", err_print_prefix); if (io->po7_error_sum & IO7__PO7_ERRSUM__ERR_LST) printk("%s Lost Error\n", err_print_prefix); printk("%s Failing Packet:\n" "%s Cycle 1: %016llx\n" "%s Cycle 2: %016llx\n", err_print_prefix, err_print_prefix, io->po7_err_pkt0, err_print_prefix, io->po7_err_pkt1); /* * If there are any valid bits in UNCRR sym for this err, * print UNCRR_SYM as well. */ check_uncrr_sym: if (uncrr_sym_valid) marvel_print_po7_uncrr_sym(io->po7_uncrr_sym, uncrr_sym_valid); } static void marvel_print_pox_tlb_err(u64 tlb_err) { static char *tlb_errors[] = { "No Error", "North Port Signaled Error fetching TLB entry", "PTE invalid or UCC or GBG error on this entry", "Address did not hit any DMA window" }; #define IO7__POX_TLBERR__ERR_VALID (1UL << 63) #define IO7__POX_TLBERR__ERRCODE__S (0) #define IO7__POX_TLBERR__ERRCODE__M (0x3) #define IO7__POX_TLBERR__ERR_TLB_PTR__S (3) #define IO7__POX_TLBERR__ERR_TLB_PTR__M (0x7) #define IO7__POX_TLBERR__FADDR__S (6) #define IO7__POX_TLBERR__FADDR__M (0x3fffffffffful) if (!(tlb_err & IO7__POX_TLBERR__ERR_VALID)) return; printk("%s TLB Error on index 0x%llx:\n" "%s - %s\n" "%s - Addr: 0x%016llx\n", err_print_prefix, EXTRACT(tlb_err, IO7__POX_TLBERR__ERR_TLB_PTR), err_print_prefix, tlb_errors[EXTRACT(tlb_err, IO7__POX_TLBERR__ERRCODE)], err_print_prefix, EXTRACT(tlb_err, IO7__POX_TLBERR__FADDR) << 6); } static void marvel_print_pox_spl_cmplt(u64 spl_cmplt) { char message[80]; #define IO7__POX_SPLCMPLT__MESSAGE__S (0) #define IO7__POX_SPLCMPLT__MESSAGE__M (0x0fffffffful) #define IO7__POX_SPLCMPLT__SOURCE_BUS__S (40) #define IO7__POX_SPLCMPLT__SOURCE_BUS__M (0xfful) #define IO7__POX_SPLCMPLT__SOURCE_DEV__S (35) #define IO7__POX_SPLCMPLT__SOURCE_DEV__M (0x1ful) #define IO7__POX_SPLCMPLT__SOURCE_FUNC__S (32) #define IO7__POX_SPLCMPLT__SOURCE_FUNC__M (0x07ul) #define IO7__POX_SPLCMPLT__MSG_CLASS__S (28) #define IO7__POX_SPLCMPLT__MSG_CLASS__M (0xf) #define IO7__POX_SPLCMPLT__MSG_INDEX__S (20) #define IO7__POX_SPLCMPLT__MSG_INDEX__M (0xff) #define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__S (20) #define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__M (0xfff) #define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__S (12) #define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__M (0x7f) #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__S (0) #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff) printk("%s Split Completion Error:\n" "%s Source (Bus:Dev:Func): %lld:%lld:%lld\n", err_print_prefix, err_print_prefix, EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS), EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_DEV), EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_FUNC)); switch(EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MSG_CLASSINDEX)) { case 0x000: sprintf(message, "Normal completion"); break; case 0x100: sprintf(message, "Bridge - Master Abort"); break; case 0x101: sprintf(message, "Bridge - Target Abort"); break; case 0x102: sprintf(message, "Bridge - Uncorrectable Write Data Error"); break; case 0x200: sprintf(message, "Byte Count Out of Range"); break; case 0x201: sprintf(message, "Uncorrectable Split Write Data Error"); break; default: sprintf(message, "%08llx\n", EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MESSAGE)); break; } printk("%s Message: %s\n", err_print_prefix, message); } static void marvel_print_pox_trans_sum(u64 trans_sum) { static const char * const pcix_cmd[] = { "Interrupt Acknowledge", "Special Cycle", "I/O Read", "I/O Write", "Reserved", "Reserved / Device ID Message", "Memory Read", "Memory Write", "Reserved / Alias to Memory Read Block", "Reserved / Alias to Memory Write Block", "Configuration Read", "Configuration Write", "Memory Read Multiple / Split Completion", "Dual Address Cycle", "Memory Read Line / Memory Read Block", "Memory Write and Invalidate / Memory Write Block" }; #define IO7__POX_TRANSUM__PCI_ADDR__S (0) #define IO7__POX_TRANSUM__PCI_ADDR__M (0x3fffffffffffful) #define IO7__POX_TRANSUM__DAC (1UL << 50) #define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__S (52) #define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__M (0xf) #define IO7__POX_TRANSUM__PCIX_CMD__S (56) #define IO7__POX_TRANSUM__PCIX_CMD__M (0xf) #define IO7__POX_TRANSUM__ERR_VALID (1UL << 63) if (!(trans_sum & IO7__POX_TRANSUM__ERR_VALID)) return; printk("%s Transaction Summary:\n" "%s Command: 0x%llx - %s\n" "%s Address: 0x%016llx%s\n" "%s PCI-X Master Slot: 0x%llx\n", err_print_prefix, err_print_prefix, EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD), pcix_cmd[EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD)], err_print_prefix, EXTRACT(trans_sum, IO7__POX_TRANSUM__PCI_ADDR), (trans_sum & IO7__POX_TRANSUM__DAC) ? " (DAC)" : "", err_print_prefix, EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_MASTER_SLOT)); } static void marvel_print_pox_err(u64 err_sum, struct ev7_pal_io_one_port *port) { #define IO7__POX_ERRSUM__AGP_REQQ_OVFL (1UL << 4) #define IO7__POX_ERRSUM__AGP_SYNC_ERR (1UL << 5) #define IO7__POX_ERRSUM__MRETRY_TO (1UL << 6) #define IO7__POX_ERRSUM__PCIX_UX_SPL (1UL << 7) #define IO7__POX_ERRSUM__PCIX_SPLIT_TO (1UL << 8) #define IO7__POX_ERRSUM__PCIX_DISCARD_SPL (1UL << 9) #define IO7__POX_ERRSUM__DMA_RD_TO (1UL << 10) #define IO7__POX_ERRSUM__CSR_NXM_RD (1UL << 11) #define IO7__POX_ERRSUM__CSR_NXM_WR (1UL << 12) #define IO7__POX_ERRSUM__DMA_TO (1UL << 13) #define IO7__POX_ERRSUM__ALL_MABORTS (1UL << 14) #define IO7__POX_ERRSUM__MABORT (1UL << 15) #define IO7__POX_ERRSUM__MABORT_MASK (IO7__POX_ERRSUM__ALL_MABORTS|\ IO7__POX_ERRSUM__MABORT) #define IO7__POX_ERRSUM__PT_TABORT (1UL << 16) #define IO7__POX_ERRSUM__PM_TABORT (1UL << 17) #define IO7__POX_ERRSUM__TABORT_MASK (IO7__POX_ERRSUM__PT_TABORT | \ IO7__POX_ERRSUM__PM_TABORT) #define IO7__POX_ERRSUM__SERR (1UL << 18) #define IO7__POX_ERRSUM__ADDRERR_STB (1UL << 19) #define IO7__POX_ERRSUM__DETECTED_SERR (1UL << 20) #define IO7__POX_ERRSUM__PERR (1UL << 21) #define IO7__POX_ERRSUM__DATAERR_STB_NIOW (1UL << 22) #define IO7__POX_ERRSUM__DETECTED_PERR (1UL << 23) #define IO7__POX_ERRSUM__PM_PERR (1UL << 24) #define IO7__POX_ERRSUM__PT_SCERROR (1UL << 26) #define IO7__POX_ERRSUM__HUNG_BUS (1UL << 28) #define IO7__POX_ERRSUM__UPE_ERROR__S (51) #define IO7__POX_ERRSUM__UPE_ERROR__M (0xffUL) #define IO7__POX_ERRSUM__UPE_ERROR GEN_MASK(IO7__POX_ERRSUM__UPE_ERROR) #define IO7__POX_ERRSUM__TLB_ERR (1UL << 59) #define IO7__POX_ERRSUM__ERR_VALID (1UL << 63) #define IO7__POX_ERRSUM__TRANS_SUM__MASK (IO7__POX_ERRSUM__MRETRY_TO | \ IO7__POX_ERRSUM__PCIX_UX_SPL | \ IO7__POX_ERRSUM__PCIX_SPLIT_TO | \ IO7__POX_ERRSUM__DMA_TO | \ IO7__POX_ERRSUM__MABORT_MASK | \ IO7__POX_ERRSUM__TABORT_MASK | \ IO7__POX_ERRSUM__SERR | \ IO7__POX_ERRSUM__ADDRERR_STB | \ IO7__POX_ERRSUM__PERR | \ IO7__POX_ERRSUM__DATAERR_STB_NIOW |\ IO7__POX_ERRSUM__DETECTED_PERR | \ IO7__POX_ERRSUM__PM_PERR | \ IO7__POX_ERRSUM__PT_SCERROR | \ IO7__POX_ERRSUM__UPE_ERROR) if (!(err_sum & IO7__POX_ERRSUM__ERR_VALID)) return; /* * First the transaction summary errors */ if (err_sum & IO7__POX_ERRSUM__MRETRY_TO) printk("%s IO7 Master Retry Timeout expired\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PCIX_UX_SPL) printk("%s Unexpected Split Completion\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PCIX_SPLIT_TO) printk("%s IO7 Split Completion Timeout expired\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__DMA_TO) printk("%s Hung bus during DMA transaction\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__MABORT_MASK) printk("%s Master Abort\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PT_TABORT) printk("%s IO7 Asserted Target Abort\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PM_TABORT) printk("%s IO7 Received Target Abort\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__ADDRERR_STB) { printk("%s Address or PCI-X Attribute Parity Error\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__SERR) printk("%s IO7 Asserted SERR\n", err_print_prefix); } if (err_sum & IO7__POX_ERRSUM__PERR) { if (err_sum & IO7__POX_ERRSUM__DATAERR_STB_NIOW) printk("%s IO7 Detected Data Parity Error\n", err_print_prefix); else printk("%s Split Completion Response with " "Parity Error\n", err_print_prefix); } if (err_sum & IO7__POX_ERRSUM__DETECTED_PERR) printk("%s PERR detected\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PM_PERR) printk("%s PERR while IO7 is master\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PT_SCERROR) { printk("%s IO7 Received Split Completion Error message\n", err_print_prefix); marvel_print_pox_spl_cmplt(port->pox_spl_cmplt); } if (err_sum & IO7__POX_ERRSUM__UPE_ERROR) { unsigned int upe_error = EXTRACT(err_sum, IO7__POX_ERRSUM__UPE_ERROR); int i; static char *upe_errors[] = { "Parity Error on MSI write data", "MSI read (MSI window is write only", "TLB - Invalid WR transaction", "TLB - Invalid RD transaction", "DMA - WR error (see north port)", "DMA - RD error (see north port)", "PPR - WR error (see north port)", "PPR - RD error (see north port)" }; printk("%s UPE Error:\n", err_print_prefix); for (i = 0; i < 8; i++) { if (upe_error & (1 << i)) printk("%s %s\n", err_print_prefix, upe_errors[i]); } } /* * POx_TRANS_SUM, if appropriate. */ if (err_sum & IO7__POX_ERRSUM__TRANS_SUM__MASK) marvel_print_pox_trans_sum(port->pox_trans_sum); /* * Then TLB_ERR. */ if (err_sum & IO7__POX_ERRSUM__TLB_ERR) { printk("%s TLB ERROR\n", err_print_prefix); marvel_print_pox_tlb_err(port->pox_tlb_err); } /* * And the single bit status errors. */ if (err_sum & IO7__POX_ERRSUM__AGP_REQQ_OVFL) printk("%s AGP Request Queue Overflow\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__AGP_SYNC_ERR) printk("%s AGP Sync Error\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__PCIX_DISCARD_SPL) printk("%s Discarded split completion\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__DMA_RD_TO) printk("%s DMA Read Timeout\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__CSR_NXM_RD) printk("%s CSR NXM READ\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__CSR_NXM_WR) printk("%s CSR NXM WRITE\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__DETECTED_SERR) printk("%s SERR detected\n", err_print_prefix); if (err_sum & IO7__POX_ERRSUM__HUNG_BUS) printk("%s HUNG BUS detected\n", err_print_prefix); } #endif /* CONFIG_VERBOSE_MCHECK */ static struct ev7_pal_io_subpacket * marvel_find_io7_with_error(struct ev7_lf_subpackets *lf_subpackets) { struct ev7_pal_io_subpacket *io = lf_subpackets->io; struct io7 *io7; int i; /* * Caller must provide the packet to fill */ if (!io) return NULL; /* * Fill the subpacket with the console's standard fill pattern */ memset(io, 0x55, sizeof(*io)); for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); ) { unsigned long err_sum = 0; err_sum |= io7->csrs->PO7_ERROR_SUM.csr; for (i = 0; i < IO7_NUM_PORTS; i++) { if (!io7->ports[i].enabled) continue; err_sum |= io7->ports[i].csrs->POx_ERR_SUM.csr; } /* * Is there at least one error? */ if (err_sum & (1UL << 63)) break; } /* * Did we find an IO7 with an error? */ if (!io7) return NULL; /* * We have an IO7 with an error. * * Fill in the IO subpacket. */ io->io_asic_rev = io7->csrs->IO_ASIC_REV.csr; io->io_sys_rev = io7->csrs->IO_SYS_REV.csr; io->io7_uph = io7->csrs->IO7_UPH.csr; io->hpi_ctl = io7->csrs->HPI_CTL.csr; io->crd_ctl = io7->csrs->CRD_CTL.csr; io->hei_ctl = io7->csrs->HEI_CTL.csr; io->po7_error_sum = io7->csrs->PO7_ERROR_SUM.csr; io->po7_uncrr_sym = io7->csrs->PO7_UNCRR_SYM.csr; io->po7_crrct_sym = io7->csrs->PO7_CRRCT_SYM.csr; io->po7_ugbge_sym = io7->csrs->PO7_UGBGE_SYM.csr; io->po7_err_pkt0 = io7->csrs->PO7_ERR_PKT[0].csr; io->po7_err_pkt1 = io7->csrs->PO7_ERR_PKT[1].csr; for (i = 0; i < IO7_NUM_PORTS; i++) { io7_ioport_csrs *csrs = io7->ports[i].csrs; if (!io7->ports[i].enabled) continue; io->ports[i].pox_err_sum = csrs->POx_ERR_SUM.csr; io->ports[i].pox_tlb_err = csrs->POx_TLB_ERR.csr; io->ports[i].pox_spl_cmplt = csrs->POx_SPL_COMPLT.csr; io->ports[i].pox_trans_sum = csrs->POx_TRANS_SUM.csr; io->ports[i].pox_first_err = csrs->POx_FIRST_ERR.csr; io->ports[i].pox_mult_err = csrs->POx_MULT_ERR.csr; io->ports[i].pox_dm_source = csrs->POx_DM_SOURCE.csr; io->ports[i].pox_dm_dest = csrs->POx_DM_DEST.csr; io->ports[i].pox_dm_size = csrs->POx_DM_SIZE.csr; io->ports[i].pox_dm_ctrl = csrs->POx_DM_CTRL.csr; /* * Ack this port's errors, if any. POx_ERR_SUM must be last. * * Most of the error registers get cleared and unlocked when * the associated bits in POx_ERR_SUM are cleared (by writing * 1). POx_TLB_ERR is an exception and must be explicitly * cleared. */ csrs->POx_TLB_ERR.csr = io->ports[i].pox_tlb_err; csrs->POx_ERR_SUM.csr = io->ports[i].pox_err_sum; mb(); csrs->POx_ERR_SUM.csr; } /* * Ack any port 7 error(s). */ io7->csrs->PO7_ERROR_SUM.csr = io->po7_error_sum; mb(); io7->csrs->PO7_ERROR_SUM.csr; /* * Correct the io7_pid. */ lf_subpackets->io_pid = io7->pe; return io; } static int marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK struct ev7_pal_io_subpacket *io = lf_subpackets->io; int i; #endif /* CONFIG_VERBOSE_MCHECK */ #define MARVEL_IO_ERR_VALID(x) ((x) & (1UL << 63)) if (!lf_subpackets->logout || !lf_subpackets->io) return status; /* * The PALcode only builds an IO subpacket if there is a * locally connected IO7. In the cases of * 1) a uniprocessor kernel * 2) an mp kernel before the local secondary has called in * error interrupts are all directed to the primary processor. * In that case, we may not have an IO subpacket at all and, event * if we do, it may not be the right now. * * If the RBOX indicates an I/O error interrupt, make sure we have * the correct IO7 information. If we don't have an IO subpacket * or it's the wrong one, try to find the right one. * * RBOX I/O error interrupts are indicated by RBOX_INT<29> and * RBOX_INT<10>. */ if ((lf_subpackets->io->po7_error_sum & (1UL << 32)) || ((lf_subpackets->io->po7_error_sum | lf_subpackets->io->ports[0].pox_err_sum | lf_subpackets->io->ports[1].pox_err_sum | lf_subpackets->io->ports[2].pox_err_sum | lf_subpackets->io->ports[3].pox_err_sum) & (1UL << 63))) { /* * Either we have no IO subpacket or no error is * indicated in the one we do have. Try find the * one with the error. */ if (!marvel_find_io7_with_error(lf_subpackets)) return status; } /* * We have an IO7 indicating an error - we're going to report it */ status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s*Error occurred on IO7 at PID %u\n", err_print_prefix, lf_subpackets->io_pid); /* * Check port 7 first */ if (lf_subpackets->io->po7_error_sum & IO7__PO7_ERRSUM__ERR_MASK) { marvel_print_po7_err_sum(io); #if 0 printk("%s PORT 7 ERROR:\n" "%s PO7_ERROR_SUM: %016llx\n" "%s PO7_UNCRR_SYM: %016llx\n" "%s PO7_CRRCT_SYM: %016llx\n" "%s PO7_UGBGE_SYM: %016llx\n" "%s PO7_ERR_PKT0: %016llx\n" "%s PO7_ERR_PKT1: %016llx\n", err_print_prefix, err_print_prefix, io->po7_error_sum, err_print_prefix, io->po7_uncrr_sym, err_print_prefix, io->po7_crrct_sym, err_print_prefix, io->po7_ugbge_sym, err_print_prefix, io->po7_err_pkt0, err_print_prefix, io->po7_err_pkt1); #endif } /* * Then loop through the ports */ for (i = 0; i < IO7_NUM_PORTS; i++) { if (!MARVEL_IO_ERR_VALID(io->ports[i].pox_err_sum)) continue; printk("%s PID %u PORT %d POx_ERR_SUM: %016llx\n", err_print_prefix, lf_subpackets->io_pid, i, io->ports[i].pox_err_sum); marvel_print_pox_err(io->ports[i].pox_err_sum, &io->ports[i]); printk("%s [ POx_FIRST_ERR: %016llx ]\n", err_print_prefix, io->ports[i].pox_first_err); marvel_print_pox_err(io->ports[i].pox_first_err, &io->ports[i]); } #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int marvel_process_logout_frame(struct ev7_lf_subpackets *lf_subpackets, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; /* * I/O error? */ #define EV7__RBOX_INT__IO_ERROR__MASK 0x20000400ul if (lf_subpackets->logout && (lf_subpackets->logout->rbox_int & 0x20000400ul)) status = marvel_process_io_error(lf_subpackets, print); /* * Probing behind PCI-X bridges can cause machine checks on * Marvel when the probe is handled by the bridge as a split * completion transaction. The symptom is an ERROR_RESPONSE * to a CONFIG address. Since these errors will happen in * normal operation, dismiss them. * * Dismiss if: * C_STAT = 0x14 (Error Response) * C_STS<3> = 0 (C_ADDR valid) * C_ADDR<42> = 1 (I/O) * C_ADDR<31:22> = 111110xxb (PCI Config space) */ if (lf_subpackets->ev7 && (lf_subpackets->ev7->c_stat == 0x14) && !(lf_subpackets->ev7->c_sts & 0x8) && ((lf_subpackets->ev7->c_addr & 0x400ff000000ul) == 0x400fe000000ul)) status = MCHK_DISPOSITION_DISMISS; return status; } void marvel_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr; int (*process_frame)(struct ev7_lf_subpackets *, int) = NULL; struct ev7_lf_subpackets subpacket_collection = { NULL, }; struct ev7_pal_io_subpacket scratch_io_packet = { 0, }; struct ev7_lf_subpackets *lf_subpackets = NULL; int disposition = MCHK_DISPOSITION_UNKNOWN_ERROR; char *saved_err_prefix = err_print_prefix; char *error_type = NULL; /* * Sync the processor */ mb(); draina(); switch(vector) { case SCB_Q_SYSEVENT: process_frame = marvel_process_680_frame; error_type = "System Event"; break; case SCB_Q_SYSMCHK: process_frame = marvel_process_logout_frame; error_type = "System Uncorrectable Error"; break; case SCB_Q_SYSERR: process_frame = marvel_process_logout_frame; error_type = "System Correctable Error"; break; default: /* Don't know it - pass it up. */ ev7_machine_check(vector, la_ptr); return; } /* * A system event or error has occurred, handle it here. * * Any errors in the logout frame have already been cleared by the * PALcode, so just parse it. */ err_print_prefix = KERN_CRIT; /* * Parse the logout frame without printing first. If the only error(s) * found are classified as "dismissable", then just dismiss them and * don't print any message */ lf_subpackets = ev7_collect_logout_frame_subpackets(el_ptr, &subpacket_collection); if (process_frame && lf_subpackets && lf_subpackets->logout) { /* * We might not have the correct (or any) I/O subpacket. * [ See marvel_process_io_error() for explanation. ] * If we don't have one, point the io subpacket in * lf_subpackets at scratch_io_packet so that * marvel_find_io7_with_error() will have someplace to * store the info. */ if (!lf_subpackets->io) lf_subpackets->io = &scratch_io_packet; /* * Default io_pid to the processor reporting the error * [this will get changed in marvel_find_io7_with_error() * if a different one is needed] */ lf_subpackets->io_pid = lf_subpackets->logout->whami; /* * Evaluate the frames. */ disposition = process_frame(lf_subpackets, 0); } switch(disposition) { case MCHK_DISPOSITION_DISMISS: /* Nothing to do. */ break; case MCHK_DISPOSITION_REPORT: /* Recognized error, report it. */ printk("%s*%s (Vector 0x%x) reported on CPU %d\n", err_print_prefix, error_type, (unsigned int)vector, (int)smp_processor_id()); el_print_timestamp(&lf_subpackets->logout->timestamp); process_frame(lf_subpackets, 1); break; default: /* Unknown - dump the annotated subpackets. */ printk("%s*%s (Vector 0x%x) reported on CPU %d\n", err_print_prefix, error_type, (unsigned int)vector, (int)smp_processor_id()); el_process_subpacket(el_ptr); break; } err_print_prefix = saved_err_prefix; /* Release the logout frame. */ wrmces(0x7); mb(); } void __init marvel_register_error_handlers(void) { ev7_register_error_handlers(); }
linux-master
arch/alpha/kernel/err_marvel.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/core_apecs.c * * Rewritten for Apecs from the lca.c from: * * Written by David Mosberger ([email protected]) with some code * taken from Dave Rusling's ([email protected]) 32-bit * bios code. * * Code common to all APECS core logic chips. */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_apecs.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/init.h> #include <asm/ptrace.h> #include <asm/smp.h> #include <asm/mce.h> #include "proto.h" #include "pci_impl.h" /* * NOTE: Herein lie back-to-back mb instructions. They are magic. * One plausible explanation is that the i/o controller does not properly * handle the system transaction. Another involves timing. Ho hum. */ /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBGC(args) printk args #else # define DBGC(args) #endif #define vuip volatile unsigned int * /* * Given a bus, device, and function number, compute resulting * configuration space address and setup the APECS_HAXR2 register * accordingly. It is therefore not safe to have concurrent * invocations to configuration space access routines, but there * really shouldn't be any need for this. * * Type 0: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:11 Device select bit. * 10:8 Function number * 7:2 Register number * * Type 1: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:24 reserved * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, unsigned char *type1) { unsigned long addr; u8 bus = pbus->number; DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," " pci_addr=0x%p, type1=0x%p)\n", bus, device_fn, where, pci_addr, type1)); if (bus == 0) { int device = device_fn >> 3; /* type 0 configuration cycle: */ if (device > 20) { DBGC(("mk_conf_addr: device (%d) > 20, returning -1\n", device)); return -1; } *type1 = 0; addr = (device_fn << 8) | (where); } else { /* type 1 configuration cycle: */ *type1 = 1; addr = (bus << 16) | (device_fn << 8) | (where); } *pci_addr = addr; DBGC(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return 0; } static unsigned int conf_read(unsigned long addr, unsigned char type1) { unsigned long flags; unsigned int stat0, value; unsigned int haxr2 = 0; local_irq_save(flags); /* avoid getting hit by machine check */ DBGC(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1)); /* Reset status register to avoid losing errors. */ stat0 = *(vuip)APECS_IOC_DCSR; *(vuip)APECS_IOC_DCSR = stat0; mb(); DBGC(("conf_read: APECS DCSR was 0x%x\n", stat0)); /* If Type1 access, must set HAE #2. */ if (type1) { haxr2 = *(vuip)APECS_IOC_HAXR2; mb(); *(vuip)APECS_IOC_HAXR2 = haxr2 | 1; DBGC(("conf_read: TYPE1 access\n")); } draina(); mcheck_expected(0) = 1; mcheck_taken(0) = 0; mb(); /* Access configuration space. */ /* Some SRMs step on these registers during a machine check. */ asm volatile("ldl %0,%1; mb; mb" : "=r"(value) : "m"(*(vuip)addr) : "$9", "$10", "$11", "$12", "$13", "$14", "memory"); if (mcheck_taken(0)) { mcheck_taken(0) = 0; value = 0xffffffffU; mb(); } mcheck_expected(0) = 0; mb(); #if 1 /* * [email protected]. This code is needed for the * EB64+ as it does not generate a machine check (why I don't * know). When we build kernels for one particular platform * then we can make this conditional on the type. */ draina(); /* Now look for any errors. */ stat0 = *(vuip)APECS_IOC_DCSR; DBGC(("conf_read: APECS DCSR after read 0x%x\n", stat0)); /* Is any error bit set? */ if (stat0 & 0xffe0U) { /* If not NDEV, print status. */ if (!(stat0 & 0x0800)) { printk("apecs.c:conf_read: got stat0=%x\n", stat0); } /* Reset error status. */ *(vuip)APECS_IOC_DCSR = stat0; mb(); wrmces(0x7); /* reset machine check */ value = 0xffffffff; } #endif /* If Type1 access, must reset HAE #2 so normal IO space ops work. */ if (type1) { *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1; mb(); } local_irq_restore(flags); return value; } static void conf_write(unsigned long addr, unsigned int value, unsigned char type1) { unsigned long flags; unsigned int stat0; unsigned int haxr2 = 0; local_irq_save(flags); /* avoid getting hit by machine check */ /* Reset status register to avoid losing errors. */ stat0 = *(vuip)APECS_IOC_DCSR; *(vuip)APECS_IOC_DCSR = stat0; mb(); /* If Type1 access, must set HAE #2. */ if (type1) { haxr2 = *(vuip)APECS_IOC_HAXR2; mb(); *(vuip)APECS_IOC_HAXR2 = haxr2 | 1; } draina(); mcheck_expected(0) = 1; mb(); /* Access configuration space. */ *(vuip)addr = value; mb(); mb(); /* magic */ mcheck_expected(0) = 0; mb(); #if 1 /* * [email protected]. This code is needed for the * EB64+ as it does not generate a machine check (why I don't * know). When we build kernels for one particular platform * then we can make this conditional on the type. */ draina(); /* Now look for any errors. */ stat0 = *(vuip)APECS_IOC_DCSR; /* Is any error bit set? */ if (stat0 & 0xffe0U) { /* If not NDEV, print status. */ if (!(stat0 & 0x0800)) { printk("apecs.c:conf_write: got stat0=%x\n", stat0); } /* Reset error status. */ *(vuip)APECS_IOC_DCSR = stat0; mb(); wrmces(0x7); /* reset machine check */ } #endif /* If Type1 access, must reset HAE #2 so normal IO space ops work. */ if (type1) { *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1; mb(); } local_irq_restore(flags); } static int apecs_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr, pci_addr; unsigned char type1; long mask; int shift; if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; mask = (size - 1) * 8; shift = (where & 3) * 8; addr = (pci_addr << 5) + mask + APECS_CONF; *value = conf_read(addr, type1) >> (shift); return PCIBIOS_SUCCESSFUL; } static int apecs_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr, pci_addr; unsigned char type1; long mask; if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; mask = (size - 1) * 8; addr = (pci_addr << 5) + mask + APECS_CONF; conf_write(addr, value << ((where & 3) * 8), type1); return PCIBIOS_SUCCESSFUL; } struct pci_ops apecs_pci_ops = { .read = apecs_read_config, .write = apecs_write_config, }; void apecs_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { wmb(); *(vip)APECS_IOC_TBIA = 0; mb(); } void __init apecs_init_arch(void) { struct pci_controller *hose; /* * Create our single hose. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hose->mem_space = &iomem_resource; hose->index = 0; hose->sparse_mem_base = APECS_SPARSE_MEM - IDENT_ADDR; hose->dense_mem_base = APECS_DENSE_MEM - IDENT_ADDR; hose->sparse_io_base = APECS_IO - IDENT_ADDR; hose->dense_io_base = 0; /* * Set up the PCI to main memory translation windows. * * Window 1 is direct access 1GB at 1GB * Window 2 is scatter-gather 8MB at 8MB (for isa) */ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, SMP_CACHE_BYTES); hose->sg_pci = NULL; __direct_map_base = 0x40000000; __direct_map_size = 0x40000000; *(vuip)APECS_IOC_PB1R = __direct_map_base | 0x00080000; *(vuip)APECS_IOC_PM1R = (__direct_map_size - 1) & 0xfff00000U; *(vuip)APECS_IOC_TB1R = 0; *(vuip)APECS_IOC_PB2R = hose->sg_isa->dma_base | 0x000c0000; *(vuip)APECS_IOC_PM2R = (hose->sg_isa->size - 1) & 0xfff00000; *(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1; apecs_pci_tbi(hose, 0, -1); /* * Finally, clear the HAXR2 register, which gets used * for PCI Config Space accesses. That is the way * we want to use it, and we do not want to depend on * what ARC or SRM might have left behind... */ *(vuip)APECS_IOC_HAXR2 = 0; mb(); } void apecs_pci_clr_err(void) { unsigned int jd; jd = *(vuip)APECS_IOC_DCSR; if (jd & 0xffe0L) { *(vuip)APECS_IOC_SEAR; *(vuip)APECS_IOC_DCSR = jd | 0xffe1L; mb(); *(vuip)APECS_IOC_DCSR; } *(vuip)APECS_IOC_TBIA = (unsigned int)APECS_IOC_TBIA; mb(); *(vuip)APECS_IOC_TBIA; } void apecs_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header; struct el_apecs_procdata *mchk_procdata; struct el_apecs_sysdata_mcheck *mchk_sysdata; mchk_header = (struct el_common *)la_ptr; mchk_procdata = (struct el_apecs_procdata *) (la_ptr + mchk_header->proc_offset - sizeof(mchk_procdata->paltemp)); mchk_sysdata = (struct el_apecs_sysdata_mcheck *) (la_ptr + mchk_header->sys_offset); /* Clear the error before any reporting. */ mb(); mb(); /* magic */ draina(); apecs_pci_clr_err(); wrmces(0x7); /* reset machine check pending flag */ mb(); process_mcheck_info(vector, la_ptr, "APECS", (mcheck_expected(0) && (mchk_sysdata->epic_dcsr & 0x0c00UL))); }
linux-master
arch/alpha/kernel/core_apecs.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/irq_i8259.c * * This is the 'legacy' 8259A Programmable Interrupt Controller, * present in the majority of PC/AT boxes. * * Started hacking from linux-2.3.30pre6/arch/i386/kernel/i8259.c. */ #include <linux/init.h> #include <linux/cache.h> #include <linux/sched.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <asm/io.h> #include "proto.h" #include "irq_impl.h" /* Note mask bit is true for DISABLED irqs. */ static unsigned int cached_irq_mask = 0xffff; static DEFINE_SPINLOCK(i8259_irq_lock); static inline void i8259_update_irq_hw(unsigned int irq, unsigned long mask) { int port = 0x21; if (irq & 8) mask >>= 8; if (irq & 8) port = 0xA1; outb(mask, port); } inline void i8259a_enable_irq(struct irq_data *d) { spin_lock(&i8259_irq_lock); i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); spin_unlock(&i8259_irq_lock); } static inline void __i8259a_disable_irq(unsigned int irq) { i8259_update_irq_hw(irq, cached_irq_mask |= 1 << irq); } void i8259a_disable_irq(struct irq_data *d) { spin_lock(&i8259_irq_lock); __i8259a_disable_irq(d->irq); spin_unlock(&i8259_irq_lock); } void i8259a_mask_and_ack_irq(struct irq_data *d) { unsigned int irq = d->irq; spin_lock(&i8259_irq_lock); __i8259a_disable_irq(irq); /* Ack the interrupt making it the lowest priority. */ if (irq >= 8) { outb(0xE0 | (irq - 8), 0xa0); /* ack the slave */ irq = 2; } outb(0xE0 | irq, 0x20); /* ack the master */ spin_unlock(&i8259_irq_lock); } struct irq_chip i8259a_irq_type = { .name = "XT-PIC", .irq_unmask = i8259a_enable_irq, .irq_mask = i8259a_disable_irq, .irq_mask_ack = i8259a_mask_and_ack_irq, }; void __init init_i8259a_irqs(void) { long i; outb(0xff, 0x21); /* mask all of 8259A-1 */ outb(0xff, 0xA1); /* mask all of 8259A-2 */ for (i = 0; i < 16; i++) { irq_set_chip_and_handler(i, &i8259a_irq_type, handle_level_irq); } if (request_irq(2, no_action, 0, "cascade", NULL)) pr_err("Failed to request irq 2 (cascade)\n"); } #if defined(CONFIG_ALPHA_GENERIC) # define IACK_SC alpha_mv.iack_sc #elif defined(CONFIG_ALPHA_APECS) # define IACK_SC APECS_IACK_SC #elif defined(CONFIG_ALPHA_LCA) # define IACK_SC LCA_IACK_SC #elif defined(CONFIG_ALPHA_CIA) # define IACK_SC CIA_IACK_SC #elif defined(CONFIG_ALPHA_PYXIS) # define IACK_SC PYXIS_IACK_SC #elif defined(CONFIG_ALPHA_TITAN) # define IACK_SC TITAN_IACK_SC #elif defined(CONFIG_ALPHA_TSUNAMI) # define IACK_SC TSUNAMI_IACK_SC #elif defined(CONFIG_ALPHA_IRONGATE) # define IACK_SC IRONGATE_IACK_SC #endif /* Note that CONFIG_ALPHA_POLARIS is intentionally left out here, since sys_rx164 wants to use isa_no_iack_sc_device_interrupt for some reason. */ #if defined(IACK_SC) void isa_device_interrupt(unsigned long vector) { /* * Generate a PCI interrupt acknowledge cycle. The PIC will * respond with the interrupt vector of the highest priority * interrupt that is pending. The PALcode sets up the * interrupts vectors such that irq level L generates vector L. */ int j = *(vuip) IACK_SC; j &= 0xff; handle_irq(j); } #endif #if defined(CONFIG_ALPHA_GENERIC) || !defined(IACK_SC) void isa_no_iack_sc_device_interrupt(unsigned long vector) { unsigned long pic; /* * It seems to me that the probability of two or more *device* * interrupts occurring at almost exactly the same time is * pretty low. So why pay the price of checking for * additional interrupts here if the common case can be * handled so much easier? */ /* * The first read of gives you *all* interrupting lines. * Therefore, read the mask register and and out those lines * not enabled. Note that some documentation has 21 and a1 * write only. This is not true. */ pic = inb(0x20) | (inb(0xA0) << 8); /* read isr */ pic &= 0xFFFB; /* mask out cascade & hibits */ while (pic) { int j = ffz(~pic); pic &= pic - 1; handle_irq(j); } } #endif
linux-master
arch/alpha/kernel/irq_i8259.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/irq.c * * Copyright (C) 1995 Linus Torvalds * * This file contains the code used by various IRQ handling routines: * asking for different IRQ's should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel_stat.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/interrupt.h> #include <linux/random.h> #include <linux/irq.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/profile.h> #include <linux/bitops.h> #include <asm/io.h> #include <linux/uaccess.h> volatile unsigned long irq_err_count; DEFINE_PER_CPU(unsigned long, irq_pmi_count); void ack_bad_irq(unsigned int irq) { irq_err_count++; printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq); } #ifdef CONFIG_SMP static char irq_user_affinity[NR_IRQS]; int irq_select_affinity(unsigned int irq) { struct irq_data *data = irq_get_irq_data(irq); struct irq_chip *chip; static int last_cpu; int cpu = last_cpu + 1; if (!data) return 1; chip = irq_data_get_irq_chip(data); if (!chip->irq_set_affinity || irq_user_affinity[irq]) return 1; while (!cpu_possible(cpu) || !cpumask_test_cpu(cpu, irq_default_affinity)) cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); last_cpu = cpu; irq_data_update_affinity(data, cpumask_of(cpu)); chip->irq_set_affinity(data, cpumask_of(cpu), false); return 0; } #endif /* CONFIG_SMP */ int arch_show_interrupts(struct seq_file *p, int prec) { int j; #ifdef CONFIG_SMP seq_puts(p, "IPI: "); for_each_online_cpu(j) seq_printf(p, "%10lu ", cpu_data[j].ipi_count); seq_putc(p, '\n'); #endif seq_puts(p, "PMI: "); for_each_online_cpu(j) seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); seq_puts(p, " Performance Monitoring\n"); seq_printf(p, "ERR: %10lu\n", irq_err_count); return 0; } /* * handle_irq handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ #define MAX_ILLEGAL_IRQS 16 void handle_irq(int irq) { /* * We ack quickly, we don't want the irq controller * thinking we're snobs just because some other CPU has * disabled global interrupts (we have already done the * INT_ACK cycles, it's too late to try to pretend to the * controller that we aren't taking the interrupt). * * 0 return value means that this irq is already being * handled by some other CPU. (or is disabled) */ static unsigned int illegal_count=0; struct irq_desc *desc = irq_to_desc(irq); if (!desc || ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS)) { irq_err_count++; illegal_count++; printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n", irq); return; } irq_enter(); generic_handle_irq_desc(desc); irq_exit(); }
linux-master
arch/alpha/kernel/irq.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/err_ev6.c * * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) * * Error handling code supporting Alpha systems */ #include <linux/sched.h> #include <asm/io.h> #include <asm/irq_regs.h> #include <asm/hwrpb.h> #include <asm/smp.h> #include <asm/err_common.h> #include <asm/err_ev6.h> #include "err_impl.h" #include "proto.h" static int ev6_parse_ibox(u64 i_stat, int print) { int status = MCHK_DISPOSITION_REPORT; #define EV6__I_STAT__PAR (1UL << 29) #define EV6__I_STAT__ERRMASK (EV6__I_STAT__PAR) if (!(i_stat & EV6__I_STAT__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; if (!print) return status; if (i_stat & EV6__I_STAT__PAR) printk("%s Icache parity error\n", err_print_prefix); return status; } static int ev6_parse_mbox(u64 mm_stat, u64 d_stat, u64 c_stat, int print) { int status = MCHK_DISPOSITION_REPORT; #define EV6__MM_STAT__DC_TAG_PERR (1UL << 10) #define EV6__MM_STAT__ERRMASK (EV6__MM_STAT__DC_TAG_PERR) #define EV6__D_STAT__TPERR_P0 (1UL << 0) #define EV6__D_STAT__TPERR_P1 (1UL << 1) #define EV6__D_STAT__ECC_ERR_ST (1UL << 2) #define EV6__D_STAT__ECC_ERR_LD (1UL << 3) #define EV6__D_STAT__SEO (1UL << 4) #define EV6__D_STAT__ERRMASK (EV6__D_STAT__TPERR_P0 | \ EV6__D_STAT__TPERR_P1 | \ EV6__D_STAT__ECC_ERR_ST | \ EV6__D_STAT__ECC_ERR_LD | \ EV6__D_STAT__SEO) if (!(d_stat & EV6__D_STAT__ERRMASK) && !(mm_stat & EV6__MM_STAT__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; if (!print) return status; if (mm_stat & EV6__MM_STAT__DC_TAG_PERR) printk("%s Dcache tag parity error on probe\n", err_print_prefix); if (d_stat & EV6__D_STAT__TPERR_P0) printk("%s Dcache tag parity error - pipe 0\n", err_print_prefix); if (d_stat & EV6__D_STAT__TPERR_P1) printk("%s Dcache tag parity error - pipe 1\n", err_print_prefix); if (d_stat & EV6__D_STAT__ECC_ERR_ST) printk("%s ECC error occurred on a store\n", err_print_prefix); if (d_stat & EV6__D_STAT__ECC_ERR_LD) printk("%s ECC error occurred on a %s load\n", err_print_prefix, c_stat ? "" : "speculative "); if (d_stat & EV6__D_STAT__SEO) printk("%s Dcache second error\n", err_print_prefix); return status; } static int ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn, u64 c_stat, u64 c_sts, int print) { static const char * const sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN", "MEMORY", "BCACHE", "DCACHE", "BCACHE PROBE", "BCACHE PROBE" }; static const char * const streamname[] = { "D", "I" }; static const char * const bitsname[] = { "SINGLE", "DOUBLE" }; int status = MCHK_DISPOSITION_REPORT; int source = -1, stream = -1, bits = -1; #define EV6__C_STAT__BC_PERR (0x01) #define EV6__C_STAT__DC_PERR (0x02) #define EV6__C_STAT__DSTREAM_MEM_ERR (0x03) #define EV6__C_STAT__DSTREAM_BC_ERR (0x04) #define EV6__C_STAT__DSTREAM_DC_ERR (0x05) #define EV6__C_STAT__PROBE_BC_ERR0 (0x06) /* both 6 and 7 indicate... */ #define EV6__C_STAT__PROBE_BC_ERR1 (0x07) /* ...probe bc error. */ #define EV6__C_STAT__ISTREAM_MEM_ERR (0x0B) #define EV6__C_STAT__ISTREAM_BC_ERR (0x0C) #define EV6__C_STAT__DSTREAM_MEM_DBL (0x13) #define EV6__C_STAT__DSTREAM_BC_DBL (0x14) #define EV6__C_STAT__ISTREAM_MEM_DBL (0x1B) #define EV6__C_STAT__ISTREAM_BC_DBL (0x1C) #define EV6__C_STAT__SOURCE_MEMORY (0x03) #define EV6__C_STAT__SOURCE_BCACHE (0x04) #define EV6__C_STAT__SOURCE__S (0) #define EV6__C_STAT__SOURCE__M (0x07) #define EV6__C_STAT__ISTREAM__S (3) #define EV6__C_STAT__ISTREAM__M (0x01) #define EV6__C_STAT__DOUBLE__S (4) #define EV6__C_STAT__DOUBLE__M (0x01) #define EV6__C_STAT__ERRMASK (0x1F) #define EV6__C_STS__SHARED (1 << 0) #define EV6__C_STS__DIRTY (1 << 1) #define EV6__C_STS__VALID (1 << 2) #define EV6__C_STS__PARITY (1 << 3) if (!(c_stat & EV6__C_STAT__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; if (!print) return status; source = EXTRACT(c_stat, EV6__C_STAT__SOURCE); stream = EXTRACT(c_stat, EV6__C_STAT__ISTREAM); bits = EXTRACT(c_stat, EV6__C_STAT__DOUBLE); if (c_stat & EV6__C_STAT__BC_PERR) { printk("%s Bcache tag parity error\n", err_print_prefix); source = -1; } if (c_stat & EV6__C_STAT__DC_PERR) { printk("%s Dcache tag parity error\n", err_print_prefix); source = -1; } if (c_stat == EV6__C_STAT__PROBE_BC_ERR0 || c_stat == EV6__C_STAT__PROBE_BC_ERR1) { printk("%s Bcache single-bit error on a probe hit\n", err_print_prefix); source = -1; } if (source != -1) printk("%s %s-STREAM %s-BIT ECC error from %s\n", err_print_prefix, streamname[stream], bitsname[bits], sourcename[source]); printk("%s Address: 0x%016llx\n" " Syndrome[upper.lower]: %02llx.%02llx\n", err_print_prefix, c_addr, c2_syn, c1_syn); if (source == EV6__C_STAT__SOURCE_MEMORY || source == EV6__C_STAT__SOURCE_BCACHE) printk("%s Block status: %s%s%s%s\n", err_print_prefix, (c_sts & EV6__C_STS__SHARED) ? "SHARED " : "", (c_sts & EV6__C_STS__DIRTY) ? "DIRTY " : "", (c_sts & EV6__C_STS__VALID) ? "VALID " : "", (c_sts & EV6__C_STS__PARITY) ? "PARITY " : ""); return status; } void ev6_register_error_handlers(void) { /* None right now. */ } int ev6_process_logout_frame(struct el_common *mchk_header, int print) { struct el_common_EV6_mcheck *ev6mchk = (struct el_common_EV6_mcheck *)mchk_header; int status = MCHK_DISPOSITION_UNKNOWN_ERROR; status |= ev6_parse_ibox(ev6mchk->I_STAT, print); status |= ev6_parse_mbox(ev6mchk->MM_STAT, ev6mchk->DC_STAT, ev6mchk->C_STAT, print); status |= ev6_parse_cbox(ev6mchk->C_ADDR, ev6mchk->DC1_SYNDROME, ev6mchk->DC0_SYNDROME, ev6mchk->C_STAT, ev6mchk->C_STS, print); if (!print) return status; if (status != MCHK_DISPOSITION_DISMISS) { char *saved_err_prefix = err_print_prefix; /* * Dump some additional information from the frame */ printk("%s EXC_ADDR: 0x%016lx IER_CM: 0x%016lx" " ISUM: 0x%016lx\n" " PAL_BASE: 0x%016lx I_CTL: 0x%016lx" " PCTX: 0x%016lx\n", err_print_prefix, ev6mchk->EXC_ADDR, ev6mchk->IER_CM, ev6mchk->ISUM, ev6mchk->PAL_BASE, ev6mchk->I_CTL, ev6mchk->PCTX); if (status == MCHK_DISPOSITION_UNKNOWN_ERROR) { printk("%s UNKNOWN error, frame follows:\n", err_print_prefix); } else { /* had decode -- downgrade print level for frame */ err_print_prefix = KERN_NOTICE; } mchk_dump_logout_frame(mchk_header); err_print_prefix = saved_err_prefix; } return status; } void ev6_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header = (struct el_common *)la_ptr; /* * Sync the processor */ mb(); draina(); /* * Parse the logout frame without printing first. If the only error(s) * found are have a disposition of "dismiss", then just dismiss them * and don't print any message */ if (ev6_process_logout_frame(mchk_header, 0) != MCHK_DISPOSITION_DISMISS) { char *saved_err_prefix = err_print_prefix; err_print_prefix = KERN_CRIT; /* * Either a nondismissable error was detected or no * recognized error was detected in the logout frame * -- report the error in either case */ printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d:\n", err_print_prefix, (vector == SCB_Q_PROCERR)?"Correctable":"Uncorrectable", (unsigned int)vector, (int)smp_processor_id()); ev6_process_logout_frame(mchk_header, 1); dik_show_regs(get_irq_regs(), NULL); err_print_prefix = saved_err_prefix; } /* * Release the logout frame */ wrmces(0x7); mb(); }
linux-master
arch/alpha/kernel/err_ev6.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/core_tsunami.c * * Based on code written by David A. Rusling ([email protected]). * * Code common to all TSUNAMI core logic chips. */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_tsunami.h> #undef __EXTERN_INLINE #include <linux/module.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/memblock.h> #include <asm/ptrace.h> #include <asm/smp.h> #include <asm/vga.h> #include "proto.h" #include "pci_impl.h" /* Save Tsunami configuration data as the console had it set up. */ struct { unsigned long wsba[4]; unsigned long wsm[4]; unsigned long tba[4]; } saved_config[2] __attribute__((common)); /* * NOTE: Herein lie back-to-back mb instructions. They are magic. * One plausible explanation is that the I/O controller does not properly * handle the system transaction. Another involves timing. Ho hum. */ /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Given a bus, device, and function number, compute resulting * configuration space address * accordingly. It is therefore not safe to have concurrent * invocations to configuration space access routines, but there * really shouldn't be any need for this. * * Note that all config space accesses use Type 1 address format. * * Note also that type 1 is determined by non-zero bus number. * * Type 1: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:24 reserved * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, unsigned char *type1) { struct pci_controller *hose = pbus->sysdata; unsigned long addr; u8 bus = pbus->number; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " "pci_addr=0x%p, type1=0x%p)\n", bus, device_fn, where, pci_addr, type1)); if (!pbus->parent) /* No parent means peer PCI bus. */ bus = 0; *type1 = (bus != 0); addr = (bus << 16) | (device_fn << 8) | where; addr |= hose->config_space_base; *pci_addr = addr; DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return 0; } static int tsunami_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int tsunami_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops tsunami_pci_ops = { .read = tsunami_read_config, .write = tsunami_write_config, }; void tsunami_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { tsunami_pchip *pchip = hose->index ? TSUNAMI_pchip1 : TSUNAMI_pchip0; volatile unsigned long *csr; unsigned long value; /* We can invalidate up to 8 tlb entries in a go. The flush matches against <31:16> in the pci address. */ csr = &pchip->tlbia.csr; if (((start ^ end) & 0xffff0000) == 0) csr = &pchip->tlbiv.csr; /* For TBIA, it doesn't matter what value we write. For TBI, it's the shifted tag bits. */ value = (start & 0xffff0000) >> 12; *csr = value; mb(); *csr; } #ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI static long __init tsunami_probe_read(volatile unsigned long *vaddr) { long dont_care, probe_result; int cpu = smp_processor_id(); int s = swpipl(IPL_MCHECK - 1); mcheck_taken(cpu) = 0; mcheck_expected(cpu) = 1; mb(); dont_care = *vaddr; draina(); mcheck_expected(cpu) = 0; probe_result = !mcheck_taken(cpu); mcheck_taken(cpu) = 0; setipl(s); printk("dont_care == 0x%lx\n", dont_care); return probe_result; } static long __init tsunami_probe_write(volatile unsigned long *vaddr) { long true_contents, probe_result = 1; TSUNAMI_cchip->misc.csr |= (1L << 28); /* clear NXM... */ true_contents = *vaddr; *vaddr = 0; draina(); if (TSUNAMI_cchip->misc.csr & (1L << 28)) { int source = (TSUNAMI_cchip->misc.csr >> 29) & 7; TSUNAMI_cchip->misc.csr |= (1L << 28); /* ...and unlock NXS. */ probe_result = 0; printk("tsunami_probe_write: unit %d at 0x%016lx\n", source, (unsigned long)vaddr); } if (probe_result) *vaddr = true_contents; return probe_result; } #else #define tsunami_probe_read(ADDR) 1 #endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */ static void __init tsunami_init_one_pchip(tsunami_pchip *pchip, int index) { struct pci_controller *hose; if (tsunami_probe_read(&pchip->pctl.csr) == 0) return; hose = alloc_pci_controller(); if (index == 0) pci_isa_hose = hose; hose->io_space = alloc_resource(); hose->mem_space = alloc_resource(); /* This is for userland consumption. For some reason, the 40-bit PIO bias that we use in the kernel through KSEG didn't work for the page table based user mappings. So make sure we get the 43-bit PIO bias. */ hose->sparse_mem_base = 0; hose->sparse_io_base = 0; hose->dense_mem_base = (TSUNAMI_MEM(index) & 0xffffffffffL) | 0x80000000000L; hose->dense_io_base = (TSUNAMI_IO(index) & 0xffffffffffL) | 0x80000000000L; hose->config_space_base = TSUNAMI_CONF(index); hose->index = index; hose->io_space->start = TSUNAMI_IO(index) - TSUNAMI_IO_BIAS; hose->io_space->end = hose->io_space->start + TSUNAMI_IO_SPACE - 1; hose->io_space->name = pci_io_names[index]; hose->io_space->flags = IORESOURCE_IO; hose->mem_space->start = TSUNAMI_MEM(index) - TSUNAMI_MEM_BIAS; hose->mem_space->end = hose->mem_space->start + 0xffffffff; hose->mem_space->name = pci_mem_names[index]; hose->mem_space->flags = IORESOURCE_MEM; if (request_resource(&ioport_resource, hose->io_space) < 0) printk(KERN_ERR "Failed to request IO on hose %d\n", index); if (request_resource(&iomem_resource, hose->mem_space) < 0) printk(KERN_ERR "Failed to request MEM on hose %d\n", index); /* * Save the existing PCI window translations. SRM will * need them when we go to reboot. */ saved_config[index].wsba[0] = pchip->wsba[0].csr; saved_config[index].wsm[0] = pchip->wsm[0].csr; saved_config[index].tba[0] = pchip->tba[0].csr; saved_config[index].wsba[1] = pchip->wsba[1].csr; saved_config[index].wsm[1] = pchip->wsm[1].csr; saved_config[index].tba[1] = pchip->tba[1].csr; saved_config[index].wsba[2] = pchip->wsba[2].csr; saved_config[index].wsm[2] = pchip->wsm[2].csr; saved_config[index].tba[2] = pchip->tba[2].csr; saved_config[index].wsba[3] = pchip->wsba[3].csr; saved_config[index].wsm[3] = pchip->wsm[3].csr; saved_config[index].tba[3] = pchip->tba[3].csr; /* * Set up the PCI to main memory translation windows. * * Note: Window 3 is scatter-gather only * * Window 0 is scatter-gather 8MB at 8MB (for isa) * Window 1 is scatter-gather (up to) 1GB at 1GB * Window 2 is direct access 2GB at 2GB * * NOTE: we need the align_entry settings for Acer devices on ES40, * specifically floppy and IDE when memory is larger than 2GB. */ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, SMP_CACHE_BYTES); /* Initially set for 4 PTEs, but will be overridden to 64K for ISA. */ hose->sg_isa->align_entry = 4; hose->sg_pci = iommu_arena_new(hose, 0x40000000, size_for_memory(0x40000000), SMP_CACHE_BYTES); hose->sg_pci->align_entry = 4; /* Tsunami caches 4 PTEs at a time */ __direct_map_base = 0x80000000; __direct_map_size = 0x80000000; pchip->wsba[0].csr = hose->sg_isa->dma_base | 3; pchip->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000; pchip->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); pchip->wsba[1].csr = hose->sg_pci->dma_base | 3; pchip->wsm[1].csr = (hose->sg_pci->size - 1) & 0xfff00000; pchip->tba[1].csr = virt_to_phys(hose->sg_pci->ptes); pchip->wsba[2].csr = 0x80000000 | 1; pchip->wsm[2].csr = (0x80000000 - 1) & 0xfff00000; pchip->tba[2].csr = 0; pchip->wsba[3].csr = 0; /* Enable the Monster Window to make DAC pci64 possible. */ pchip->pctl.csr |= pctl_m_mwin; tsunami_pci_tbi(hose, 0, -1); } void __iomem * tsunami_ioportmap(unsigned long addr) { FIXUP_IOADDR_VGA(addr); return (void __iomem *)(addr + TSUNAMI_IO_BIAS); } void __iomem * tsunami_ioremap(unsigned long addr, unsigned long size) { FIXUP_MEMADDR_VGA(addr); return (void __iomem *)(addr + TSUNAMI_MEM_BIAS); } #ifndef CONFIG_ALPHA_GENERIC EXPORT_SYMBOL(tsunami_ioportmap); EXPORT_SYMBOL(tsunami_ioremap); #endif void __init tsunami_init_arch(void) { #ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI unsigned long tmp; /* Ho hum.. init_arch is called before init_IRQ, but we need to be able to handle machine checks. So install the handler now. */ wrent(entInt, 0); /* NXMs just don't matter to Tsunami--unless they make it choke completely. */ tmp = (unsigned long)(TSUNAMI_cchip - 1); printk("%s: probing bogus address: 0x%016lx\n", __func__, bogus_addr); printk("\tprobe %s\n", tsunami_probe_write((unsigned long *)bogus_addr) ? "succeeded" : "failed"); #endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */ #if 0 printk("%s: CChip registers:\n", __func__); printk("%s: CSR_CSC 0x%lx\n", __func__, TSUNAMI_cchip->csc.csr); printk("%s: CSR_MTR 0x%lx\n", __func__, TSUNAMI_cchip.mtr.csr); printk("%s: CSR_MISC 0x%lx\n", __func__, TSUNAMI_cchip->misc.csr); printk("%s: CSR_DIM0 0x%lx\n", __func__, TSUNAMI_cchip->dim0.csr); printk("%s: CSR_DIM1 0x%lx\n", __func__, TSUNAMI_cchip->dim1.csr); printk("%s: CSR_DIR0 0x%lx\n", __func__, TSUNAMI_cchip->dir0.csr); printk("%s: CSR_DIR1 0x%lx\n", __func__, TSUNAMI_cchip->dir1.csr); printk("%s: CSR_DRIR 0x%lx\n", __func__, TSUNAMI_cchip->drir.csr); printk("%s: DChip registers:\n"); printk("%s: CSR_DSC 0x%lx\n", __func__, TSUNAMI_dchip->dsc.csr); printk("%s: CSR_STR 0x%lx\n", __func__, TSUNAMI_dchip->str.csr); printk("%s: CSR_DREV 0x%lx\n", __func__, TSUNAMI_dchip->drev.csr); #endif /* With multiple PCI busses, we play with I/O as physical addrs. */ ioport_resource.end = ~0UL; /* Find how many hoses we have, and initialize them. TSUNAMI and TYPHOON can have 2, but might only have 1 (DS10). */ tsunami_init_one_pchip(TSUNAMI_pchip0, 0); if (TSUNAMI_cchip->csc.csr & 1L<<14) tsunami_init_one_pchip(TSUNAMI_pchip1, 1); /* Check for graphic console location (if any). */ find_console_vga_hose(); } static void tsunami_kill_one_pchip(tsunami_pchip *pchip, int index) { pchip->wsba[0].csr = saved_config[index].wsba[0]; pchip->wsm[0].csr = saved_config[index].wsm[0]; pchip->tba[0].csr = saved_config[index].tba[0]; pchip->wsba[1].csr = saved_config[index].wsba[1]; pchip->wsm[1].csr = saved_config[index].wsm[1]; pchip->tba[1].csr = saved_config[index].tba[1]; pchip->wsba[2].csr = saved_config[index].wsba[2]; pchip->wsm[2].csr = saved_config[index].wsm[2]; pchip->tba[2].csr = saved_config[index].tba[2]; pchip->wsba[3].csr = saved_config[index].wsba[3]; pchip->wsm[3].csr = saved_config[index].wsm[3]; pchip->tba[3].csr = saved_config[index].tba[3]; } void tsunami_kill_arch(int mode) { tsunami_kill_one_pchip(TSUNAMI_pchip0, 0); if (TSUNAMI_cchip->csc.csr & 1L<<14) tsunami_kill_one_pchip(TSUNAMI_pchip1, 1); } static inline void tsunami_pci_clr_err_1(tsunami_pchip *pchip) { pchip->perror.csr; pchip->perror.csr = 0x040; mb(); pchip->perror.csr; } static inline void tsunami_pci_clr_err(void) { tsunami_pci_clr_err_1(TSUNAMI_pchip0); /* TSUNAMI and TYPHOON can have 2, but might only have 1 (DS10) */ if (TSUNAMI_cchip->csc.csr & 1L<<14) tsunami_pci_clr_err_1(TSUNAMI_pchip1); } void tsunami_machine_check(unsigned long vector, unsigned long la_ptr) { /* Clear error before any reporting. */ mb(); mb(); /* magic */ draina(); tsunami_pci_clr_err(); wrmces(0x7); mb(); process_mcheck_info(vector, la_ptr, "TSUNAMI", mcheck_expected(smp_processor_id())); }
linux-master
arch/alpha/kernel/core_tsunami.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/rtc.c * * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds * * This file contains date handling. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/mc146818rtc.h> #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/platform_device.h> #include "proto.h" /* * Support for the RTC device. * * We don't want to use the rtc-cmos driver, because we don't want to support * alarms, as that would be indistinguishable from timer interrupts. * * Further, generic code is really, really tied to a 1900 epoch. This is * true in __get_rtc_time as well as the users of struct rtc_time e.g. * rtc_tm_to_time. Thankfully all of the other epochs in use are later * than 1900, and so it's easy to adjust. */ static unsigned long rtc_epoch; static int __init specifiy_epoch(char *str) { unsigned long epoch = simple_strtoul(str, NULL, 0); if (epoch < 1900) printk("Ignoring invalid user specified epoch %lu\n", epoch); else rtc_epoch = epoch; return 1; } __setup("epoch=", specifiy_epoch); static void __init init_rtc_epoch(void) { int epoch, year, ctrl; if (rtc_epoch != 0) { /* The epoch was specified on the command-line. */ return; } /* Detect the epoch in use on this computer. */ ctrl = CMOS_READ(RTC_CONTROL); year = CMOS_READ(RTC_YEAR); if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) year = bcd2bin(year); /* PC-like is standard; used for year >= 70 */ epoch = 1900; if (year < 20) { epoch = 2000; } else if (year >= 20 && year < 48) { /* NT epoch */ epoch = 1980; } else if (year >= 48 && year < 70) { /* Digital UNIX epoch */ epoch = 1952; } rtc_epoch = epoch; printk(KERN_INFO "Using epoch %d for rtc year %d\n", epoch, year); } static int alpha_rtc_read_time(struct device *dev, struct rtc_time *tm) { int ret = mc146818_get_time(tm); if (ret < 0) { dev_err_ratelimited(dev, "unable to read current time\n"); return ret; } /* Adjust for non-default epochs. It's easier to depend on the generic __get_rtc_time and adjust the epoch here than create a copy of __get_rtc_time with the edits we need. */ if (rtc_epoch != 1900) { int year = tm->tm_year; /* Undo the century adjustment made in __get_rtc_time. */ if (year >= 100) year -= 100; year += rtc_epoch - 1900; /* Redo the century adjustment with the epoch in place. */ if (year <= 69) year += 100; tm->tm_year = year; } return 0; } static int alpha_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct rtc_time xtm; if (rtc_epoch != 1900) { xtm = *tm; xtm.tm_year -= rtc_epoch - 1900; tm = &xtm; } return mc146818_set_time(tm); } static int alpha_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { switch (cmd) { case RTC_EPOCH_READ: return put_user(rtc_epoch, (unsigned long __user *)arg); case RTC_EPOCH_SET: if (arg < 1900) return -EINVAL; rtc_epoch = arg; return 0; default: return -ENOIOCTLCMD; } } static const struct rtc_class_ops alpha_rtc_ops = { .read_time = alpha_rtc_read_time, .set_time = alpha_rtc_set_time, .ioctl = alpha_rtc_ioctl, }; /* * Similarly, except do the actual CMOS access on the boot cpu only. * This requires marshalling the data across an interprocessor call. */ #if defined(CONFIG_SMP) && \ (defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_MARVEL)) # define HAVE_REMOTE_RTC 1 union remote_data { struct rtc_time *tm; long retval; }; static void do_remote_read(void *data) { union remote_data *x = data; x->retval = alpha_rtc_read_time(NULL, x->tm); } static int remote_read_time(struct device *dev, struct rtc_time *tm) { union remote_data x; if (smp_processor_id() != boot_cpuid) { x.tm = tm; smp_call_function_single(boot_cpuid, do_remote_read, &x, 1); return x.retval; } return alpha_rtc_read_time(NULL, tm); } static void do_remote_set(void *data) { union remote_data *x = data; x->retval = alpha_rtc_set_time(NULL, x->tm); } static int remote_set_time(struct device *dev, struct rtc_time *tm) { union remote_data x; if (smp_processor_id() != boot_cpuid) { x.tm = tm; smp_call_function_single(boot_cpuid, do_remote_set, &x, 1); return x.retval; } return alpha_rtc_set_time(NULL, tm); } static const struct rtc_class_ops remote_rtc_ops = { .read_time = remote_read_time, .set_time = remote_set_time, .ioctl = alpha_rtc_ioctl, }; #endif static int __init alpha_rtc_init(void) { struct platform_device *pdev; struct rtc_device *rtc; init_rtc_epoch(); pdev = platform_device_register_simple("rtc-alpha", -1, NULL, 0); rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(rtc)) return PTR_ERR(rtc); platform_set_drvdata(pdev, rtc); rtc->ops = &alpha_rtc_ops; #ifdef HAVE_REMOTE_RTC if (alpha_mv.rtc_boot_cpu_only) rtc->ops = &remote_rtc_ops; #endif return devm_rtc_register_device(rtc); } device_initcall(alpha_rtc_init);
linux-master
arch/alpha/kernel/rtc.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_marvel.c * * Marvel / IO7 support */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_marvel.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include <asm/vga.h> #include "proto.h" #include "err_impl.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" #if NR_IRQS < MARVEL_NR_IRQS # error NR_IRQS < MARVEL_NR_IRQS !!! #endif /* * Interrupt handling. */ static void io7_device_interrupt(unsigned long vector) { unsigned int pid; unsigned int irq; /* * Vector is 0x800 + (interrupt) * * where (interrupt) is: * * ...16|15 14|13 4|3 0 * -----+-----+--------+--- * PE | 0 | irq | 0 * * where (irq) is * * 0x0800 - 0x0ff0 - 0x0800 + (LSI id << 4) * 0x1000 - 0x2ff0 - 0x1000 + (MSI_DAT<8:0> << 4) */ pid = vector >> 16; irq = ((vector & 0xffff) - 0x800) >> 4; irq += 16; /* offset for legacy */ irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* not too many bits */ irq |= pid << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */ handle_irq(irq); } static volatile unsigned long * io7_get_irq_ctl(unsigned int irq, struct io7 **pio7) { volatile unsigned long *ctl; unsigned int pid; struct io7 *io7; pid = irq >> MARVEL_IRQ_VEC_PE_SHIFT; if (!(io7 = marvel_find_io7(pid))) { printk(KERN_ERR "%s for nonexistent io7 -- vec %x, pid %d\n", __func__, irq, pid); return NULL; } irq &= MARVEL_IRQ_VEC_IRQ_MASK; /* isolate the vector */ irq -= 16; /* subtract legacy bias */ if (irq >= 0x180) { printk(KERN_ERR "%s for invalid irq -- pid %d adjusted irq %x\n", __func__, pid, irq); return NULL; } ctl = &io7->csrs->PO7_LSI_CTL[irq & 0xff].csr; /* assume LSI */ if (irq >= 0x80) /* MSI */ ctl = &io7->csrs->PO7_MSI_CTL[((irq - 0x80) >> 5) & 0x0f].csr; if (pio7) *pio7 = io7; return ctl; } static void io7_enable_irq(struct irq_data *d) { volatile unsigned long *ctl; unsigned int irq = d->irq; struct io7 *io7; ctl = io7_get_irq_ctl(irq, &io7); if (!ctl || !io7) { printk(KERN_ERR "%s: get_ctl failed for irq %x\n", __func__, irq); return; } raw_spin_lock(&io7->irq_lock); *ctl |= 1UL << 24; mb(); *ctl; raw_spin_unlock(&io7->irq_lock); } static void io7_disable_irq(struct irq_data *d) { volatile unsigned long *ctl; unsigned int irq = d->irq; struct io7 *io7; ctl = io7_get_irq_ctl(irq, &io7); if (!ctl || !io7) { printk(KERN_ERR "%s: get_ctl failed for irq %x\n", __func__, irq); return; } raw_spin_lock(&io7->irq_lock); *ctl &= ~(1UL << 24); mb(); *ctl; raw_spin_unlock(&io7->irq_lock); } static void marvel_irq_noop(struct irq_data *d) { return; } static struct irq_chip marvel_legacy_irq_type = { .name = "LEGACY", .irq_mask = marvel_irq_noop, .irq_unmask = marvel_irq_noop, }; static struct irq_chip io7_lsi_irq_type = { .name = "LSI", .irq_unmask = io7_enable_irq, .irq_mask = io7_disable_irq, .irq_mask_ack = io7_disable_irq, }; static struct irq_chip io7_msi_irq_type = { .name = "MSI", .irq_unmask = io7_enable_irq, .irq_mask = io7_disable_irq, .irq_ack = marvel_irq_noop, }; static void io7_redirect_irq(struct io7 *io7, volatile unsigned long *csr, unsigned int where) { unsigned long val; val = *csr; val &= ~(0x1ffUL << 24); /* clear the target pid */ val |= ((unsigned long)where << 24); /* set the new target pid */ *csr = val; mb(); *csr; } static void io7_redirect_one_lsi(struct io7 *io7, unsigned int which, unsigned int where) { unsigned long val; /* * LSI_CTL has target PID @ 14 */ val = io7->csrs->PO7_LSI_CTL[which].csr; val &= ~(0x1ffUL << 14); /* clear the target pid */ val |= ((unsigned long)where << 14); /* set the new target pid */ io7->csrs->PO7_LSI_CTL[which].csr = val; mb(); io7->csrs->PO7_LSI_CTL[which].csr; } static void io7_redirect_one_msi(struct io7 *io7, unsigned int which, unsigned int where) { unsigned long val; /* * MSI_CTL has target PID @ 14 */ val = io7->csrs->PO7_MSI_CTL[which].csr; val &= ~(0x1ffUL << 14); /* clear the target pid */ val |= ((unsigned long)where << 14); /* set the new target pid */ io7->csrs->PO7_MSI_CTL[which].csr = val; mb(); io7->csrs->PO7_MSI_CTL[which].csr; } static void __init init_one_io7_lsi(struct io7 *io7, unsigned int which, unsigned int where) { /* * LSI_CTL has target PID @ 14 */ io7->csrs->PO7_LSI_CTL[which].csr = ((unsigned long)where << 14); mb(); io7->csrs->PO7_LSI_CTL[which].csr; } static void __init init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where) { /* * MSI_CTL has target PID @ 14 */ io7->csrs->PO7_MSI_CTL[which].csr = ((unsigned long)where << 14); mb(); io7->csrs->PO7_MSI_CTL[which].csr; } static void __init init_io7_irqs(struct io7 *io7, struct irq_chip *lsi_ops, struct irq_chip *msi_ops) { long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16; long i; printk("Initializing interrupts for IO7 at PE %u - base %lx\n", io7->pe, base); /* * Where should interrupts from this IO7 go? * * They really should be sent to the local CPU to avoid having to * traverse the mesh, but if it's not an SMP kernel, they have to * go to the boot CPU. Send them all to the boot CPU for now, * as each secondary starts, it can redirect it's local device * interrupts. */ printk(" Interrupts reported to CPU at PE %u\n", boot_cpuid); raw_spin_lock(&io7->irq_lock); /* set up the error irqs */ io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, boot_cpuid); io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, boot_cpuid); io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, boot_cpuid); io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, boot_cpuid); io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, boot_cpuid); /* Set up the lsi irqs. */ for (i = 0; i < 128; ++i) { irq_set_chip_and_handler(base + i, lsi_ops, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } /* Disable the implemented irqs in hardware. */ for (i = 0; i < 0x60; ++i) init_one_io7_lsi(io7, i, boot_cpuid); init_one_io7_lsi(io7, 0x74, boot_cpuid); init_one_io7_lsi(io7, 0x75, boot_cpuid); /* Set up the msi irqs. */ for (i = 128; i < (128 + 512); ++i) { irq_set_chip_and_handler(base + i, msi_ops, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } for (i = 0; i < 16; ++i) init_one_io7_msi(io7, i, boot_cpuid); raw_spin_unlock(&io7->irq_lock); } static void __init marvel_init_irq(void) { int i; struct io7 *io7 = NULL; /* Reserve the legacy irqs. */ for (i = 0; i < 16; ++i) { irq_set_chip_and_handler(i, &marvel_legacy_irq_type, handle_level_irq); } /* Init the io7 irqs. */ for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) init_io7_irqs(io7, &io7_lsi_irq_type, &io7_msi_irq_type); } static int marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin) { struct pci_dev *dev = (struct pci_dev *)cdev; struct pci_controller *hose = dev->sysdata; struct io7_port *io7_port = hose->sysdata; struct io7 *io7 = io7_port->io7; int msi_loc, msi_data_off; u16 msg_ctl; u16 msg_dat; u8 intline; int irq; pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); irq = intline; msi_loc = dev->msi_cap; msg_ctl = 0; if (msi_loc) pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl); if (msg_ctl & PCI_MSI_FLAGS_ENABLE) { msi_data_off = PCI_MSI_DATA_32; if (msg_ctl & PCI_MSI_FLAGS_64BIT) msi_data_off = PCI_MSI_DATA_64; pci_read_config_word(dev, msi_loc + msi_data_off, &msg_dat); irq = msg_dat & 0x1ff; /* we use msg_data<8:0> */ irq += 0x80; /* offset for lsi */ #if 1 printk("PCI:%d:%d:%d (hose %d) is using MSI\n", dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), hose->index); printk(" %d message(s) from 0x%04x\n", 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4), msg_dat); printk(" reporting on %d IRQ(s) from %d (0x%x)\n", 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4), (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT), (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT)); #endif #if 0 pci_write_config_word(dev, msi_loc + PCI_MSI_FLAGS, msg_ctl & ~PCI_MSI_FLAGS_ENABLE); pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline); irq = intline; printk(" forcing LSI interrupt on irq %d [0x%x]\n", irq, irq); #endif } irq += 16; /* offset for legacy */ irq |= io7->pe << MARVEL_IRQ_VEC_PE_SHIFT; /* merge the pid */ return irq; } static void __init marvel_init_pci(void) { struct io7 *io7; marvel_register_error_handlers(); /* Indicate that we trust the console to configure things properly */ pci_set_flags(PCI_PROBE_ONLY); common_init_pci(); locate_and_init_vga(NULL); /* Clear any io7 errors. */ for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) io7_clear_errors(io7); } static void __init marvel_init_rtc(void) { init_rtc_irq(NULL); } static void marvel_smp_callin(void) { int cpuid = hard_smp_processor_id(); struct io7 *io7 = marvel_find_io7(cpuid); unsigned int i; if (!io7) return; /* * There is a local IO7 - redirect all of its interrupts here. */ printk("Redirecting IO7 interrupts to local CPU at PE %u\n", cpuid); /* Redirect the error IRQS here. */ io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, cpuid); io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, cpuid); io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, cpuid); io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, cpuid); io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, cpuid); /* Redirect the implemented LSIs here. */ for (i = 0; i < 0x60; ++i) io7_redirect_one_lsi(io7, i, cpuid); io7_redirect_one_lsi(io7, 0x74, cpuid); io7_redirect_one_lsi(io7, 0x75, cpuid); /* Redirect the MSIs here. */ for (i = 0; i < 16; ++i) io7_redirect_one_msi(io7, i, cpuid); } /* * System Vectors */ struct alpha_machine_vector marvel_ev7_mv __initmv = { .vector_name = "MARVEL/EV7", DO_EV7_MMU, .rtc_port = 0x70, .rtc_boot_cpu_only = 1, DO_MARVEL_IO, .machine_check = marvel_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = IO7_DAC_OFFSET, .nr_irqs = MARVEL_NR_IRQS, .device_interrupt = io7_device_interrupt, .agp_info = marvel_agp_info, .smp_callin = marvel_smp_callin, .init_arch = marvel_init_arch, .init_irq = marvel_init_irq, .init_rtc = marvel_init_rtc, .init_pci = marvel_init_pci, .kill_arch = marvel_kill_arch, .pci_map_irq = marvel_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(marvel_ev7)
linux-master
arch/alpha/kernel/sys_marvel.c
// SPDX-License-Identifier: GPL-2.0 /* * Alpha IO and memory functions. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> #include <asm/io.h> /* Out-of-line versions of the i/o routines that redirect into the platform-specific version. Note that "platform-specific" may mean "generic", which bumps through the machine vector. */ unsigned int ioread8(const void __iomem *addr) { unsigned int ret; mb(); ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); mb(); return ret; } unsigned int ioread16(const void __iomem *addr) { unsigned int ret; mb(); ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); mb(); return ret; } unsigned int ioread32(const void __iomem *addr) { unsigned int ret; mb(); ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); mb(); return ret; } u64 ioread64(const void __iomem *addr) { unsigned int ret; mb(); ret = IO_CONCAT(__IO_PREFIX,ioread64)(addr); mb(); return ret; } void iowrite8(u8 b, void __iomem *addr) { mb(); IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); } void iowrite16(u16 b, void __iomem *addr) { mb(); IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); } void iowrite32(u32 b, void __iomem *addr) { mb(); IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); } void iowrite64(u64 b, void __iomem *addr) { mb(); IO_CONCAT(__IO_PREFIX,iowrite64)(b, addr); } EXPORT_SYMBOL(ioread8); EXPORT_SYMBOL(ioread16); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread64); EXPORT_SYMBOL(iowrite8); EXPORT_SYMBOL(iowrite16); EXPORT_SYMBOL(iowrite32); EXPORT_SYMBOL(iowrite64); u8 inb(unsigned long port) { return ioread8(ioport_map(port, 1)); } u16 inw(unsigned long port) { return ioread16(ioport_map(port, 2)); } u32 inl(unsigned long port) { return ioread32(ioport_map(port, 4)); } void outb(u8 b, unsigned long port) { iowrite8(b, ioport_map(port, 1)); } void outw(u16 b, unsigned long port) { iowrite16(b, ioport_map(port, 2)); } void outl(u32 b, unsigned long port) { iowrite32(b, ioport_map(port, 4)); } EXPORT_SYMBOL(inb); EXPORT_SYMBOL(inw); EXPORT_SYMBOL(inl); EXPORT_SYMBOL(outb); EXPORT_SYMBOL(outw); EXPORT_SYMBOL(outl); u8 __raw_readb(const volatile void __iomem *addr) { return IO_CONCAT(__IO_PREFIX,readb)(addr); } u16 __raw_readw(const volatile void __iomem *addr) { return IO_CONCAT(__IO_PREFIX,readw)(addr); } u32 __raw_readl(const volatile void __iomem *addr) { return IO_CONCAT(__IO_PREFIX,readl)(addr); } u64 __raw_readq(const volatile void __iomem *addr) { return IO_CONCAT(__IO_PREFIX,readq)(addr); } void __raw_writeb(u8 b, volatile void __iomem *addr) { IO_CONCAT(__IO_PREFIX,writeb)(b, addr); } void __raw_writew(u16 b, volatile void __iomem *addr) { IO_CONCAT(__IO_PREFIX,writew)(b, addr); } void __raw_writel(u32 b, volatile void __iomem *addr) { IO_CONCAT(__IO_PREFIX,writel)(b, addr); } void __raw_writeq(u64 b, volatile void __iomem *addr) { IO_CONCAT(__IO_PREFIX,writeq)(b, addr); } EXPORT_SYMBOL(__raw_readb); EXPORT_SYMBOL(__raw_readw); EXPORT_SYMBOL(__raw_readl); EXPORT_SYMBOL(__raw_readq); EXPORT_SYMBOL(__raw_writeb); EXPORT_SYMBOL(__raw_writew); EXPORT_SYMBOL(__raw_writel); EXPORT_SYMBOL(__raw_writeq); u8 readb(const volatile void __iomem *addr) { u8 ret; mb(); ret = __raw_readb(addr); mb(); return ret; } u16 readw(const volatile void __iomem *addr) { u16 ret; mb(); ret = __raw_readw(addr); mb(); return ret; } u32 readl(const volatile void __iomem *addr) { u32 ret; mb(); ret = __raw_readl(addr); mb(); return ret; } u64 readq(const volatile void __iomem *addr) { u64 ret; mb(); ret = __raw_readq(addr); mb(); return ret; } void writeb(u8 b, volatile void __iomem *addr) { mb(); __raw_writeb(b, addr); } void writew(u16 b, volatile void __iomem *addr) { mb(); __raw_writew(b, addr); } void writel(u32 b, volatile void __iomem *addr) { mb(); __raw_writel(b, addr); } void writeq(u64 b, volatile void __iomem *addr) { mb(); __raw_writeq(b, addr); } EXPORT_SYMBOL(readb); EXPORT_SYMBOL(readw); EXPORT_SYMBOL(readl); EXPORT_SYMBOL(readq); EXPORT_SYMBOL(writeb); EXPORT_SYMBOL(writew); EXPORT_SYMBOL(writel); EXPORT_SYMBOL(writeq); /* * The _relaxed functions must be ordered w.r.t. each other, but they don't * have to be ordered w.r.t. other memory accesses. */ u8 readb_relaxed(const volatile void __iomem *addr) { mb(); return __raw_readb(addr); } u16 readw_relaxed(const volatile void __iomem *addr) { mb(); return __raw_readw(addr); } u32 readl_relaxed(const volatile void __iomem *addr) { mb(); return __raw_readl(addr); } u64 readq_relaxed(const volatile void __iomem *addr) { mb(); return __raw_readq(addr); } EXPORT_SYMBOL(readb_relaxed); EXPORT_SYMBOL(readw_relaxed); EXPORT_SYMBOL(readl_relaxed); EXPORT_SYMBOL(readq_relaxed); /* * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. */ void ioread8_rep(const void __iomem *port, void *dst, unsigned long count) { while ((unsigned long)dst & 0x3) { if (!count) return; count--; *(unsigned char *)dst = ioread8(port); dst += 1; } while (count >= 4) { unsigned int w; count -= 4; w = ioread8(port); w |= ioread8(port) << 8; w |= ioread8(port) << 16; w |= ioread8(port) << 24; *(unsigned int *)dst = w; dst += 4; } while (count) { --count; *(unsigned char *)dst = ioread8(port); dst += 1; } } void insb(unsigned long port, void *dst, unsigned long count) { ioread8_rep(ioport_map(port, 1), dst, count); } EXPORT_SYMBOL(ioread8_rep); EXPORT_SYMBOL(insb); /* * Read COUNT 16-bit words from port PORT into memory starting at * SRC. SRC must be at least short aligned. This is used by the * IDE driver to read disk sectors. Performance is important, but * the interfaces seems to be slow: just using the inlined version * of the inw() breaks things. */ void ioread16_rep(const void __iomem *port, void *dst, unsigned long count) { if (unlikely((unsigned long)dst & 0x3)) { if (!count) return; BUG_ON((unsigned long)dst & 0x1); count--; *(unsigned short *)dst = ioread16(port); dst += 2; } while (count >= 2) { unsigned int w; count -= 2; w = ioread16(port); w |= ioread16(port) << 16; *(unsigned int *)dst = w; dst += 4; } if (count) { *(unsigned short*)dst = ioread16(port); } } void insw(unsigned long port, void *dst, unsigned long count) { ioread16_rep(ioport_map(port, 2), dst, count); } EXPORT_SYMBOL(ioread16_rep); EXPORT_SYMBOL(insw); /* * Read COUNT 32-bit words from port PORT into memory starting at * SRC. Now works with any alignment in SRC. Performance is important, * but the interfaces seems to be slow: just using the inlined version * of the inl() breaks things. */ void ioread32_rep(const void __iomem *port, void *dst, unsigned long count) { if (unlikely((unsigned long)dst & 0x3)) { while (count--) { struct S { int x __attribute__((packed)); }; ((struct S *)dst)->x = ioread32(port); dst += 4; } } else { /* Buffer 32-bit aligned. */ while (count--) { *(unsigned int *)dst = ioread32(port); dst += 4; } } } void insl(unsigned long port, void *dst, unsigned long count) { ioread32_rep(ioport_map(port, 4), dst, count); } EXPORT_SYMBOL(ioread32_rep); EXPORT_SYMBOL(insl); /* * Like insb but in the opposite direction. * Don't worry as much about doing aligned memory transfers: * doing byte reads the "slow" way isn't nearly as slow as * doing byte writes the slow way (no r-m-w cycle). */ void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count) { const unsigned char *src = xsrc; while (count--) iowrite8(*src++, port); } void outsb(unsigned long port, const void *src, unsigned long count) { iowrite8_rep(ioport_map(port, 1), src, count); } EXPORT_SYMBOL(iowrite8_rep); EXPORT_SYMBOL(outsb); /* * Like insw but in the opposite direction. This is used by the IDE * driver to write disk sectors. Performance is important, but the * interfaces seems to be slow: just using the inlined version of the * outw() breaks things. */ void iowrite16_rep(void __iomem *port, const void *src, unsigned long count) { if (unlikely((unsigned long)src & 0x3)) { if (!count) return; BUG_ON((unsigned long)src & 0x1); iowrite16(*(unsigned short *)src, port); src += 2; --count; } while (count >= 2) { unsigned int w; count -= 2; w = *(unsigned int *)src; src += 4; iowrite16(w >> 0, port); iowrite16(w >> 16, port); } if (count) { iowrite16(*(unsigned short *)src, port); } } void outsw(unsigned long port, const void *src, unsigned long count) { iowrite16_rep(ioport_map(port, 2), src, count); } EXPORT_SYMBOL(iowrite16_rep); EXPORT_SYMBOL(outsw); /* * Like insl but in the opposite direction. This is used by the IDE * driver to write disk sectors. Works with any alignment in SRC. * Performance is important, but the interfaces seems to be slow: * just using the inlined version of the outl() breaks things. */ void iowrite32_rep(void __iomem *port, const void *src, unsigned long count) { if (unlikely((unsigned long)src & 0x3)) { while (count--) { struct S { int x __attribute__((packed)); }; iowrite32(((struct S *)src)->x, port); src += 4; } } else { /* Buffer 32-bit aligned. */ while (count--) { iowrite32(*(unsigned int *)src, port); src += 4; } } } void outsl(unsigned long port, const void *src, unsigned long count) { iowrite32_rep(ioport_map(port, 4), src, count); } EXPORT_SYMBOL(iowrite32_rep); EXPORT_SYMBOL(outsl); /* * Copy data from IO memory space to "real" memory space. * This needs to be optimized. */ void memcpy_fromio(void *to, const volatile void __iomem *from, long count) { /* Optimize co-aligned transfers. Everything else gets handled a byte at a time. */ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { count -= 8; do { *(u64 *)to = __raw_readq(from); count -= 8; to += 8; from += 8; } while (count >= 0); count += 8; } if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { count -= 4; do { *(u32 *)to = __raw_readl(from); count -= 4; to += 4; from += 4; } while (count >= 0); count += 4; } if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { count -= 2; do { *(u16 *)to = __raw_readw(from); count -= 2; to += 2; from += 2; } while (count >= 0); count += 2; } while (count > 0) { *(u8 *) to = __raw_readb(from); count--; to++; from++; } mb(); } EXPORT_SYMBOL(memcpy_fromio); /* * Copy data from "real" memory space to IO memory space. * This needs to be optimized. */ void memcpy_toio(volatile void __iomem *to, const void *from, long count) { /* Optimize co-aligned transfers. Everything else gets handled a byte at a time. */ /* FIXME -- align FROM. */ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { count -= 8; do { __raw_writeq(*(const u64 *)from, to); count -= 8; to += 8; from += 8; } while (count >= 0); count += 8; } if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { count -= 4; do { __raw_writel(*(const u32 *)from, to); count -= 4; to += 4; from += 4; } while (count >= 0); count += 4; } if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { count -= 2; do { __raw_writew(*(const u16 *)from, to); count -= 2; to += 2; from += 2; } while (count >= 0); count += 2; } while (count > 0) { __raw_writeb(*(const u8 *) from, to); count--; to++; from++; } mb(); } EXPORT_SYMBOL(memcpy_toio); /* * "memset" on IO memory space. */ void _memset_c_io(volatile void __iomem *to, unsigned long c, long count) { /* Handle any initial odd byte */ if (count > 0 && ((u64)to & 1)) { __raw_writeb(c, to); to++; count--; } /* Handle any initial odd halfword */ if (count >= 2 && ((u64)to & 2)) { __raw_writew(c, to); to += 2; count -= 2; } /* Handle any initial odd word */ if (count >= 4 && ((u64)to & 4)) { __raw_writel(c, to); to += 4; count -= 4; } /* Handle all full-sized quadwords: we're aligned (or have a small count) */ count -= 8; if (count >= 0) { do { __raw_writeq(c, to); to += 8; count -= 8; } while (count >= 0); } count += 8; /* The tail is word-aligned if we still have count >= 4 */ if (count >= 4) { __raw_writel(c, to); to += 4; count -= 4; } /* The tail is half-word aligned if we have count >= 2 */ if (count >= 2) { __raw_writew(c, to); to += 2; count -= 2; } /* And finally, one last byte.. */ if (count) { __raw_writeb(c, to); } mb(); } EXPORT_SYMBOL(_memset_c_io); /* A version of memcpy used by the vga console routines to move data around arbitrarily between screen and main memory. */ void scr_memcpyw(u16 *d, const u16 *s, unsigned int count) { const u16 __iomem *ios = (const u16 __iomem *) s; u16 __iomem *iod = (u16 __iomem *) d; int s_isio = __is_ioaddr(s); int d_isio = __is_ioaddr(d); if (s_isio) { if (d_isio) { /* FIXME: Should handle unaligned ops and operation widening. */ count /= 2; while (count--) { u16 tmp = __raw_readw(ios++); __raw_writew(tmp, iod++); } } else memcpy_fromio(d, ios, count); } else { if (d_isio) memcpy_toio(iod, s, count); else memcpy(d, s, count); } } EXPORT_SYMBOL(scr_memcpyw); void __iomem *ioport_map(unsigned long port, unsigned int size) { return IO_CONCAT(__IO_PREFIX,ioportmap) (port); } void ioport_unmap(void __iomem *addr) { } EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap);
linux-master
arch/alpha/kernel/io.c
// SPDX-License-Identifier: GPL-2.0 /* ptrace.c */ /* By Ross Biro 1/23/92 */ /* edited by Linus Torvalds */ /* mangled further by Bob Manson ([email protected]) */ /* more mutilation by David Mosberger ([email protected]) */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/audit.h> #include <linux/uaccess.h> #include <asm/fpu.h> #include "proto.h" #define DEBUG DBG_MEM #undef DEBUG #ifdef DEBUG enum { DBG_MEM = (1<<0), DBG_BPT = (1<<1), DBG_MEM_ALL = (1<<2) }; #define DBG(fac,args) {if ((fac) & DEBUG) printk args;} #else #define DBG(fac,args) #endif #define BREAKINST 0x00000080 /* call_pal bpt */ /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ /* * Processes always block with the following stack-layout: * * +================================+ <---- task + 2*PAGE_SIZE * | PALcode saved frame (ps, pc, | ^ * | gp, a0, a1, a2) | | * +================================+ | struct pt_regs * | | | * | frame generated by SAVE_ALL | | * | | v * +================================+ * | | ^ * | frame saved by do_switch_stack | | struct switch_stack * | | v * +================================+ */ /* * The following table maps a register index into the stack offset at * which the register is saved. Register indices are 0-31 for integer * regs, 32-63 for fp regs, and 64 for the pc. Notice that sp and * zero have no stack-slot and need to be treated specially (see * get_reg/put_reg below). */ enum { REG_R0 = 0, REG_F0 = 32, REG_FPCR = 63, REG_PC = 64 }; #define PT_REG(reg) \ (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg)) #define SW_REG(reg) \ (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \ + offsetof(struct switch_stack, reg)) #define FP_REG(reg) (offsetof(struct thread_info, reg)) static int regoff[] = { PT_REG( r0), PT_REG( r1), PT_REG( r2), PT_REG( r3), PT_REG( r4), PT_REG( r5), PT_REG( r6), PT_REG( r7), PT_REG( r8), SW_REG( r9), SW_REG( r10), SW_REG( r11), SW_REG( r12), SW_REG( r13), SW_REG( r14), SW_REG( r15), PT_REG( r16), PT_REG( r17), PT_REG( r18), PT_REG( r19), PT_REG( r20), PT_REG( r21), PT_REG( r22), PT_REG( r23), PT_REG( r24), PT_REG( r25), PT_REG( r26), PT_REG( r27), PT_REG( r28), PT_REG( gp), -1, -1, FP_REG(fp[ 0]), FP_REG(fp[ 1]), FP_REG(fp[ 2]), FP_REG(fp[ 3]), FP_REG(fp[ 4]), FP_REG(fp[ 5]), FP_REG(fp[ 6]), FP_REG(fp[ 7]), FP_REG(fp[ 8]), FP_REG(fp[ 9]), FP_REG(fp[10]), FP_REG(fp[11]), FP_REG(fp[12]), FP_REG(fp[13]), FP_REG(fp[14]), FP_REG(fp[15]), FP_REG(fp[16]), FP_REG(fp[17]), FP_REG(fp[18]), FP_REG(fp[19]), FP_REG(fp[20]), FP_REG(fp[21]), FP_REG(fp[22]), FP_REG(fp[23]), FP_REG(fp[24]), FP_REG(fp[25]), FP_REG(fp[26]), FP_REG(fp[27]), FP_REG(fp[28]), FP_REG(fp[29]), FP_REG(fp[30]), FP_REG(fp[31]), PT_REG( pc) }; static unsigned long zero; /* * Get address of register REGNO in task TASK. */ static unsigned long * get_reg_addr(struct task_struct * task, unsigned long regno) { unsigned long *addr; if (regno == 30) { addr = &task_thread_info(task)->pcb.usp; } else if (regno == 65) { addr = &task_thread_info(task)->pcb.unique; } else if (regno == 31 || regno > 65) { zero = 0; addr = &zero; } else { addr = task_stack_page(task) + regoff[regno]; } return addr; } /* * Get contents of register REGNO in task TASK. */ static unsigned long get_reg(struct task_struct * task, unsigned long regno) { /* Special hack for fpcr -- combine hardware and software bits. */ if (regno == 63) { unsigned long fpcr = *get_reg_addr(task, regno); unsigned long swcr = task_thread_info(task)->ieee_state & IEEE_SW_MASK; swcr = swcr_update_status(swcr, fpcr); return fpcr | swcr; } return *get_reg_addr(task, regno); } /* * Write contents of register REGNO in task TASK. */ static int put_reg(struct task_struct *task, unsigned long regno, unsigned long data) { if (regno == 63) { task_thread_info(task)->ieee_state = ((task_thread_info(task)->ieee_state & ~IEEE_SW_MASK) | (data & IEEE_SW_MASK)); data = (data & FPCR_DYN_MASK) | ieee_swcr_to_fpcr(data); } *get_reg_addr(task, regno) = data; return 0; } static inline int read_int(struct task_struct *task, unsigned long addr, int * data) { int copied = access_process_vm(task, addr, data, sizeof(int), FOLL_FORCE); return (copied == sizeof(int)) ? 0 : -EIO; } static inline int write_int(struct task_struct *task, unsigned long addr, int data) { int copied = access_process_vm(task, addr, &data, sizeof(int), FOLL_FORCE | FOLL_WRITE); return (copied == sizeof(int)) ? 0 : -EIO; } /* * Set breakpoint. */ int ptrace_set_bpt(struct task_struct * child) { int displ, i, res, reg_b, nsaved = 0; unsigned int insn, op_code; unsigned long pc; pc = get_reg(child, REG_PC); res = read_int(child, pc, (int *) &insn); if (res < 0) return res; op_code = insn >> 26; if (op_code >= 0x30) { /* * It's a branch: instead of trying to figure out * whether the branch will be taken or not, we'll put * a breakpoint at either location. This is simpler, * more reliable, and probably not a whole lot slower * than the alternative approach of emulating the * branch (emulation can be tricky for fp branches). */ displ = ((s32)(insn << 11)) >> 9; task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; if (displ) /* guard against unoptimized code */ task_thread_info(child)->bpt_addr[nsaved++] = pc + 4 + displ; DBG(DBG_BPT, ("execing branch\n")); } else if (op_code == 0x1a) { reg_b = (insn >> 16) & 0x1f; task_thread_info(child)->bpt_addr[nsaved++] = get_reg(child, reg_b); DBG(DBG_BPT, ("execing jump\n")); } else { task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; DBG(DBG_BPT, ("execing normal insn\n")); } /* install breakpoints: */ for (i = 0; i < nsaved; ++i) { res = read_int(child, task_thread_info(child)->bpt_addr[i], (int *) &insn); if (res < 0) return res; task_thread_info(child)->bpt_insn[i] = insn; DBG(DBG_BPT, (" -> next_pc=%lx\n", task_thread_info(child)->bpt_addr[i])); res = write_int(child, task_thread_info(child)->bpt_addr[i], BREAKINST); if (res < 0) return res; } task_thread_info(child)->bpt_nsaved = nsaved; return 0; } /* * Ensure no single-step breakpoint is pending. Returns non-zero * value if child was being single-stepped. */ int ptrace_cancel_bpt(struct task_struct * child) { int i, nsaved = task_thread_info(child)->bpt_nsaved; task_thread_info(child)->bpt_nsaved = 0; if (nsaved > 2) { printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved); nsaved = 2; } for (i = 0; i < nsaved; ++i) { write_int(child, task_thread_info(child)->bpt_addr[i], task_thread_info(child)->bpt_insn[i]); } return (nsaved != 0); } void user_enable_single_step(struct task_struct *child) { /* Mark single stepping. */ task_thread_info(child)->bpt_nsaved = -1; } void user_disable_single_step(struct task_struct *child) { ptrace_cancel_bpt(child); } /* * Called by kernel/ptrace.c when detaching.. * * Make sure the single step bit is not set. */ void ptrace_disable(struct task_struct *child) { user_disable_single_step(child); } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { unsigned long tmp; size_t copied; long ret; switch (request) { /* When I and D space are separate, these will need to be fixed. */ case PTRACE_PEEKTEXT: /* read word at location addr. */ case PTRACE_PEEKDATA: copied = ptrace_access_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE); ret = -EIO; if (copied != sizeof(tmp)) break; force_successful_syscall_return(); ret = tmp; break; /* Read register number ADDR. */ case PTRACE_PEEKUSR: force_successful_syscall_return(); ret = get_reg(child, addr); DBG(DBG_MEM, ("peek $%lu->%#lx\n", addr, ret)); break; /* When I and D space are separate, this will have to be fixed. */ case PTRACE_POKETEXT: /* write the word at location addr. */ case PTRACE_POKEDATA: ret = generic_ptrace_pokedata(child, addr, data); break; case PTRACE_POKEUSR: /* write the specified register */ DBG(DBG_MEM, ("poke $%lu<-%#lx\n", addr, data)); ret = put_reg(child, addr, data); break; default: ret = ptrace_request(child, request, addr, data); break; } return ret; } asmlinkage unsigned long syscall_trace_enter(void) { unsigned long ret = 0; struct pt_regs *regs = current_pt_regs(); if (test_thread_flag(TIF_SYSCALL_TRACE) && ptrace_report_syscall_entry(current_pt_regs())) ret = -1UL; audit_syscall_entry(regs->r0, regs->r16, regs->r17, regs->r18, regs->r19); return ret ?: current_pt_regs()->r0; } asmlinkage void syscall_trace_leave(void) { audit_syscall_exit(current_pt_regs()); if (test_thread_flag(TIF_SYSCALL_TRACE)) ptrace_report_syscall_exit(current_pt_regs(), 0); }
linux-master
arch/alpha/kernel/ptrace.c
// SPDX-License-Identifier: GPL-2.0 /* * SMC 37C93X initialization code */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/delay.h> #include <asm/hwrpb.h> #include <asm/io.h> #define SMC_DEBUG 0 #if SMC_DEBUG # define DBG_DEVS(args) printk args #else # define DBG_DEVS(args) #endif #define KB 1024 #define MB (1024*KB) #define GB (1024*MB) /* device "activate" register contents */ #define DEVICE_ON 1 #define DEVICE_OFF 0 /* configuration on/off keys */ #define CONFIG_ON_KEY 0x55 #define CONFIG_OFF_KEY 0xaa /* configuration space device definitions */ #define FDC 0 #define IDE1 1 #define IDE2 2 #define PARP 3 #define SER1 4 #define SER2 5 #define RTCL 6 #define KYBD 7 #define AUXIO 8 /* Chip register offsets from base */ #define CONFIG_CONTROL 0x02 #define INDEX_ADDRESS 0x03 #define LOGICAL_DEVICE_NUMBER 0x07 #define DEVICE_ID 0x20 #define DEVICE_REV 0x21 #define POWER_CONTROL 0x22 #define POWER_MGMT 0x23 #define OSC 0x24 #define ACTIVATE 0x30 #define ADDR_HI 0x60 #define ADDR_LO 0x61 #define INTERRUPT_SEL 0x70 #define INTERRUPT_SEL_2 0x72 /* KYBD/MOUS only */ #define DMA_CHANNEL_SEL 0x74 /* FDC/PARP only */ #define FDD_MODE_REGISTER 0x90 #define FDD_OPTION_REGISTER 0x91 /* values that we read back that are expected ... */ #define VALID_DEVICE_ID 2 /* default device addresses */ #define KYBD_INTERRUPT 1 #define MOUS_INTERRUPT 12 #define COM2_BASE 0x2f8 #define COM2_INTERRUPT 3 #define COM1_BASE 0x3f8 #define COM1_INTERRUPT 4 #define PARP_BASE 0x3bc #define PARP_INTERRUPT 7 static unsigned long __init SMCConfigState(unsigned long baseAddr) { unsigned char devId; unsigned long configPort; unsigned long indexPort; unsigned long dataPort; int i; configPort = indexPort = baseAddr; dataPort = configPort + 1; #define NUM_RETRIES 5 for (i = 0; i < NUM_RETRIES; i++) { outb(CONFIG_ON_KEY, configPort); outb(CONFIG_ON_KEY, configPort); outb(DEVICE_ID, indexPort); devId = inb(dataPort); if (devId == VALID_DEVICE_ID) { outb(DEVICE_REV, indexPort); /* unsigned char devRev = */ inb(dataPort); break; } else udelay(100); } return (i != NUM_RETRIES) ? baseAddr : 0L; } static void __init SMCRunState(unsigned long baseAddr) { outb(CONFIG_OFF_KEY, baseAddr); } static unsigned long __init SMCDetectUltraIO(void) { unsigned long baseAddr; baseAddr = 0x3F0; if ( ( baseAddr = SMCConfigState( baseAddr ) ) == 0x3F0 ) { return( baseAddr ); } baseAddr = 0x370; if ( ( baseAddr = SMCConfigState( baseAddr ) ) == 0x370 ) { return( baseAddr ); } return( ( unsigned long )0 ); } static void __init SMCEnableDevice(unsigned long baseAddr, unsigned long device, unsigned long portaddr, unsigned long interrupt) { unsigned long indexPort; unsigned long dataPort; indexPort = baseAddr; dataPort = baseAddr + 1; outb(LOGICAL_DEVICE_NUMBER, indexPort); outb(device, dataPort); outb(ADDR_LO, indexPort); outb(( portaddr & 0xFF ), dataPort); outb(ADDR_HI, indexPort); outb((portaddr >> 8) & 0xFF, dataPort); outb(INTERRUPT_SEL, indexPort); outb(interrupt, dataPort); outb(ACTIVATE, indexPort); outb(DEVICE_ON, dataPort); } static void __init SMCEnableKYBD(unsigned long baseAddr) { unsigned long indexPort; unsigned long dataPort; indexPort = baseAddr; dataPort = baseAddr + 1; outb(LOGICAL_DEVICE_NUMBER, indexPort); outb(KYBD, dataPort); outb(INTERRUPT_SEL, indexPort); /* Primary interrupt select */ outb(KYBD_INTERRUPT, dataPort); outb(INTERRUPT_SEL_2, indexPort); /* Secondary interrupt select */ outb(MOUS_INTERRUPT, dataPort); outb(ACTIVATE, indexPort); outb(DEVICE_ON, dataPort); } static void __init SMCEnableFDC(unsigned long baseAddr) { unsigned long indexPort; unsigned long dataPort; unsigned char oldValue; indexPort = baseAddr; dataPort = baseAddr + 1; outb(LOGICAL_DEVICE_NUMBER, indexPort); outb(FDC, dataPort); outb(FDD_MODE_REGISTER, indexPort); oldValue = inb(dataPort); oldValue |= 0x0E; /* Enable burst mode */ outb(oldValue, dataPort); outb(INTERRUPT_SEL, indexPort); /* Primary interrupt select */ outb(0x06, dataPort ); outb(DMA_CHANNEL_SEL, indexPort); /* DMA channel select */ outb(0x02, dataPort); outb(ACTIVATE, indexPort); outb(DEVICE_ON, dataPort); } #if SMC_DEBUG static void __init SMCReportDeviceStatus(unsigned long baseAddr) { unsigned long indexPort; unsigned long dataPort; unsigned char currentControl; indexPort = baseAddr; dataPort = baseAddr + 1; outb(POWER_CONTROL, indexPort); currentControl = inb(dataPort); printk(currentControl & (1 << FDC) ? "\t+FDC Enabled\n" : "\t-FDC Disabled\n"); printk(currentControl & (1 << IDE1) ? "\t+IDE1 Enabled\n" : "\t-IDE1 Disabled\n"); printk(currentControl & (1 << IDE2) ? "\t+IDE2 Enabled\n" : "\t-IDE2 Disabled\n"); printk(currentControl & (1 << PARP) ? "\t+PARP Enabled\n" : "\t-PARP Disabled\n"); printk(currentControl & (1 << SER1) ? "\t+SER1 Enabled\n" : "\t-SER1 Disabled\n"); printk(currentControl & (1 << SER2) ? "\t+SER2 Enabled\n" : "\t-SER2 Disabled\n"); printk( "\n" ); } #endif int __init SMC93x_Init(void) { unsigned long SMCUltraBase; unsigned long flags; local_irq_save(flags); if ((SMCUltraBase = SMCDetectUltraIO()) != 0UL) { #if SMC_DEBUG SMCReportDeviceStatus(SMCUltraBase); #endif SMCEnableDevice(SMCUltraBase, SER1, COM1_BASE, COM1_INTERRUPT); DBG_DEVS(("SMC FDC37C93X: SER1 done\n")); SMCEnableDevice(SMCUltraBase, SER2, COM2_BASE, COM2_INTERRUPT); DBG_DEVS(("SMC FDC37C93X: SER2 done\n")); SMCEnableDevice(SMCUltraBase, PARP, PARP_BASE, PARP_INTERRUPT); DBG_DEVS(("SMC FDC37C93X: PARP done\n")); /* On PC164, IDE on the SMC is not enabled; CMD646 (PCI) on MB */ SMCEnableKYBD(SMCUltraBase); DBG_DEVS(("SMC FDC37C93X: KYB done\n")); SMCEnableFDC(SMCUltraBase); DBG_DEVS(("SMC FDC37C93X: FDC done\n")); #if SMC_DEBUG SMCReportDeviceStatus(SMCUltraBase); #endif SMCRunState(SMCUltraBase); local_irq_restore(flags); printk("SMC FDC37C93X Ultra I/O Controller found @ 0x%lx\n", SMCUltraBase); return 1; } else { local_irq_restore(flags); DBG_DEVS(("No SMC FDC37C93X Ultra I/O Controller found\n")); return 0; } }
linux-master
arch/alpha/kernel/smc37c93x.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/alpha/kernel/traps.c * * (C) Copyright 1994 Linus Torvalds */ /* * This file initializes the trap entry points */ #include <linux/jiffies.h> #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/sched/debug.h> #include <linux/tty.h> #include <linux/delay.h> #include <linux/extable.h> #include <linux/kallsyms.h> #include <linux/ratelimit.h> #include <asm/gentrap.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include <asm/sysinfo.h> #include <asm/hwrpb.h> #include <asm/mmu_context.h> #include <asm/special_insns.h> #include "proto.h" /* Work-around for some SRMs which mishandle opDEC faults. */ static int opDEC_fix; static void opDEC_check(void) { __asm__ __volatile__ ( /* Load the address of... */ " br $16, 1f\n" /* A stub instruction fault handler. Just add 4 to the pc and continue. */ " ldq $16, 8($sp)\n" " addq $16, 4, $16\n" " stq $16, 8($sp)\n" " call_pal %[rti]\n" /* Install the instruction fault handler. */ "1: lda $17, 3\n" " call_pal %[wrent]\n" /* With that in place, the fault from the round-to-minf fp insn will arrive either at the "lda 4" insn (bad) or one past that (good). This places the correct fixup in %0. */ " lda %[fix], 0\n" " cvttq/svm $f31,$f31\n" " lda %[fix], 4" : [fix] "=r" (opDEC_fix) : [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent) : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25"); if (opDEC_fix) printk("opDEC fixup enabled.\n"); } void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15) { printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", regs->pc, regs->r26, regs->ps, print_tainted()); printk("pc is at %pSR\n", (void *)regs->pc); printk("ra is at %pSR\n", (void *)regs->r26); printk("v0 = %016lx t0 = %016lx t1 = %016lx\n", regs->r0, regs->r1, regs->r2); printk("t2 = %016lx t3 = %016lx t4 = %016lx\n", regs->r3, regs->r4, regs->r5); printk("t5 = %016lx t6 = %016lx t7 = %016lx\n", regs->r6, regs->r7, regs->r8); if (r9_15) { printk("s0 = %016lx s1 = %016lx s2 = %016lx\n", r9_15[9], r9_15[10], r9_15[11]); printk("s3 = %016lx s4 = %016lx s5 = %016lx\n", r9_15[12], r9_15[13], r9_15[14]); printk("s6 = %016lx\n", r9_15[15]); } printk("a0 = %016lx a1 = %016lx a2 = %016lx\n", regs->r16, regs->r17, regs->r18); printk("a3 = %016lx a4 = %016lx a5 = %016lx\n", regs->r19, regs->r20, regs->r21); printk("t8 = %016lx t9 = %016lx t10= %016lx\n", regs->r22, regs->r23, regs->r24); printk("t11= %016lx pv = %016lx at = %016lx\n", regs->r25, regs->r27, regs->r28); printk("gp = %016lx sp = %p\n", regs->gp, regs+1); #if 0 __halt(); #endif } #if 0 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"}; #endif static void dik_show_code(unsigned int *pc) { long i; printk("Code:"); for (i = -6; i < 2; i++) { unsigned int insn; if (__get_user(insn, (unsigned int __user *)pc + i)) break; printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>'); } printk("\n"); } static void dik_show_trace(unsigned long *sp, const char *loglvl) { long i = 0; printk("%sTrace:\n", loglvl); while (0x1ff8 & (unsigned long) sp) { extern char _stext[], _etext[]; unsigned long tmp = *sp; sp++; if (!is_kernel_text(tmp)) continue; printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp); if (i > 40) { printk("%s ...", loglvl); break; } } printk("%s\n", loglvl); } static int kstack_depth_to_print = 24; void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { unsigned long *stack; int i; /* * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the * back trace for this cpu. */ if(sp==NULL) sp=(unsigned long*)&sp; stack = sp; for(i=0; i < kstack_depth_to_print; i++) { if (((long) stack & (THREAD_SIZE-1)) == 0) break; if ((i % 4) == 0) { if (i) pr_cont("\n"); printk("%s ", loglvl); } else { pr_cont(" "); } pr_cont("%016lx", *stack++); } pr_cont("\n"); dik_show_trace(sp, loglvl); } void die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) { if (regs->ps & 8) return; #ifdef CONFIG_SMP printk("CPU %d ", hard_smp_processor_id()); #endif printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err); dik_show_regs(regs, r9_15); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT); dik_show_code((unsigned int *)regs->pc); if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { printk("die_if_kernel recursion detected.\n"); local_irq_enable(); while (1); } make_task_dead(SIGSEGV); } #ifndef CONFIG_MATHEMU static long dummy_emul(void) { return 0; } long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask) = (void *)dummy_emul; EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise); long (*alpha_fp_emul) (unsigned long pc) = (void *)dummy_emul; EXPORT_SYMBOL_GPL(alpha_fp_emul); #else long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask); long alpha_fp_emul (unsigned long pc); #endif asmlinkage void do_entArith(unsigned long summary, unsigned long write_mask, struct pt_regs *regs) { long si_code = FPE_FLTINV; if (summary & 1) { /* Software-completion summary bit is set, so try to emulate the instruction. If the processor supports precise exceptions, we don't have to search. */ if (!amask(AMASK_PRECISE_TRAP)) si_code = alpha_fp_emul(regs->pc - 4); else si_code = alpha_fp_emul_imprecise(regs, write_mask); if (si_code == 0) return; } die_if_kernel("Arithmetic fault", regs, 0, NULL); send_sig_fault_trapno(SIGFPE, si_code, (void __user *) regs->pc, 0, current); } asmlinkage void do_entIF(unsigned long type, struct pt_regs *regs) { int signo, code; if (type == 3) { /* FEN fault */ /* Irritating users can call PAL_clrfen to disable the FPU for the process. The kernel will then trap in do_switch_stack and undo_switch_stack when we try to save and restore the FP registers. Given that GCC by default generates code that uses the FP registers, PAL_clrfen is not useful except for DoS attacks. So turn the bleeding FPU back on and be done with it. */ current_thread_info()->pcb.flags |= 1; __reload_thread(&current_thread_info()->pcb); return; } if (!user_mode(regs)) { if (type == 1) { const unsigned int *data = (const unsigned int *) regs->pc; printk("Kernel bug at %s:%d\n", (const char *)(data[1] | (long)data[2] << 32), data[0]); } #ifdef CONFIG_ALPHA_WTINT if (type == 4) { /* If CALL_PAL WTINT is totally unsupported by the PALcode, e.g. MILO, "emulate" it by overwriting the insn. */ unsigned int *pinsn = (unsigned int *) regs->pc - 1; if (*pinsn == PAL_wtint) { *pinsn = 0x47e01400; /* mov 0,$0 */ imb(); regs->r0 = 0; return; } } #endif /* ALPHA_WTINT */ die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"), regs, type, NULL); } switch (type) { case 0: /* breakpoint */ if (ptrace_cancel_bpt(current)) { regs->pc -= 4; /* make pc point to former bpt */ } send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc, current); return; case 1: /* bugcheck */ send_sig_fault_trapno(SIGTRAP, TRAP_UNK, (void __user *) regs->pc, 0, current); return; case 2: /* gentrap */ switch ((long) regs->r16) { case GEN_INTOVF: signo = SIGFPE; code = FPE_INTOVF; break; case GEN_INTDIV: signo = SIGFPE; code = FPE_INTDIV; break; case GEN_FLTOVF: signo = SIGFPE; code = FPE_FLTOVF; break; case GEN_FLTDIV: signo = SIGFPE; code = FPE_FLTDIV; break; case GEN_FLTUND: signo = SIGFPE; code = FPE_FLTUND; break; case GEN_FLTINV: signo = SIGFPE; code = FPE_FLTINV; break; case GEN_FLTINE: signo = SIGFPE; code = FPE_FLTRES; break; case GEN_ROPRAND: signo = SIGFPE; code = FPE_FLTUNK; break; case GEN_DECOVF: case GEN_DECDIV: case GEN_DECINV: case GEN_ASSERTERR: case GEN_NULPTRERR: case GEN_STKOVF: case GEN_STRLENERR: case GEN_SUBSTRERR: case GEN_RANGERR: case GEN_SUBRNG: case GEN_SUBRNG1: case GEN_SUBRNG2: case GEN_SUBRNG3: case GEN_SUBRNG4: case GEN_SUBRNG5: case GEN_SUBRNG6: case GEN_SUBRNG7: default: signo = SIGTRAP; code = TRAP_UNK; break; } send_sig_fault_trapno(signo, code, (void __user *) regs->pc, regs->r16, current); return; case 4: /* opDEC */ if (implver() == IMPLVER_EV4) { long si_code; /* The some versions of SRM do not handle the opDEC properly - they return the PC of the opDEC fault, not the instruction after as the Alpha architecture requires. Here we fix it up. We do this by intentionally causing an opDEC fault during the boot sequence and testing if we get the correct PC. If not, we set a flag to correct it every time through. */ regs->pc += opDEC_fix; /* EV4 does not implement anything except normal rounding. Everything else will come here as an illegal instruction. Emulate them. */ si_code = alpha_fp_emul(regs->pc - 4); if (si_code == 0) return; if (si_code > 0) { send_sig_fault_trapno(SIGFPE, si_code, (void __user *) regs->pc, 0, current); return; } } break; case 5: /* illoc */ default: /* unexpected instruction-fault type */ ; } send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, current); } /* There is an ifdef in the PALcode in MILO that enables a "kernel debugging entry point" as an unprivileged call_pal. We don't want to have anything to do with it, but unfortunately several versions of MILO included in distributions have it enabled, and if we don't put something on the entry point we'll oops. */ asmlinkage void do_entDbg(struct pt_regs *regs) { die_if_kernel("Instruction fault", regs, 0, NULL); force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc); } /* * entUna has a different register layout to be reasonably simple. It * needs access to all the integer registers (the kernel doesn't use * fp-regs), and it needs to have them in order for simpler access. * * Due to the non-standard register layout (and because we don't want * to handle floating-point regs), user-mode unaligned accesses are * handled separately by do_entUnaUser below. * * Oh, btw, we don't handle the "gp" register correctly, but if we fault * on a gp-register unaligned load/store, something is _very_ wrong * in the kernel anyway.. */ struct allregs { unsigned long regs[32]; unsigned long ps, pc, gp, a0, a1, a2; }; struct unaligned_stat { unsigned long count, va, pc; } unaligned[2]; /* Macro for exception fixup code to access integer registers. */ #define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)]) asmlinkage void do_entUna(void * va, unsigned long opcode, unsigned long reg, struct allregs *regs) { long error, tmp1, tmp2, tmp3, tmp4; unsigned long pc = regs->pc - 4; unsigned long *_regs = regs->regs; const struct exception_table_entry *fixup; unaligned[0].count++; unaligned[0].va = (unsigned long) va; unaligned[0].pc = pc; /* We don't want to use the generic get/put unaligned macros as we want to trap exceptions. Only if we actually get an exception will we decide whether we should have caught it. */ switch (opcode) { case 0x0c: /* ldwu */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,1(%3)\n" " extwl %1,%3,%1\n" " extwh %2,%3,%2\n" "3:\n" EXC(1b,3b,%1,%0) EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto got_exception; una_reg(reg) = tmp1|tmp2; return; case 0x28: /* ldl */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,3(%3)\n" " extll %1,%3,%1\n" " extlh %2,%3,%2\n" "3:\n" EXC(1b,3b,%1,%0) EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto got_exception; una_reg(reg) = (int)(tmp1|tmp2); return; case 0x29: /* ldq */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,7(%3)\n" " extql %1,%3,%1\n" " extqh %2,%3,%2\n" "3:\n" EXC(1b,3b,%1,%0) EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto got_exception; una_reg(reg) = tmp1|tmp2; return; /* Note that the store sequences do not indicate that they change memory because it _should_ be affecting nothing in this context. (Otherwise we have other, much larger, problems.) */ case 0x0d: /* stw */ __asm__ __volatile__( "1: ldq_u %2,1(%5)\n" "2: ldq_u %1,0(%5)\n" " inswh %6,%5,%4\n" " inswl %6,%5,%3\n" " mskwh %2,%5,%2\n" " mskwl %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,1(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" EXC(1b,5b,%2,%0) EXC(2b,5b,%1,%0) EXC(3b,5b,$31,%0) EXC(4b,5b,$31,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(una_reg(reg)), "0"(0)); if (error) goto got_exception; return; case 0x2c: /* stl */ __asm__ __volatile__( "1: ldq_u %2,3(%5)\n" "2: ldq_u %1,0(%5)\n" " inslh %6,%5,%4\n" " insll %6,%5,%3\n" " msklh %2,%5,%2\n" " mskll %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,3(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" EXC(1b,5b,%2,%0) EXC(2b,5b,%1,%0) EXC(3b,5b,$31,%0) EXC(4b,5b,$31,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(una_reg(reg)), "0"(0)); if (error) goto got_exception; return; case 0x2d: /* stq */ __asm__ __volatile__( "1: ldq_u %2,7(%5)\n" "2: ldq_u %1,0(%5)\n" " insqh %6,%5,%4\n" " insql %6,%5,%3\n" " mskqh %2,%5,%2\n" " mskql %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,7(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" EXC(1b,5b,%2,%0) EXC(2b,5b,%1,%0) EXC(3b,5b,$31,%0) EXC(4b,5b,$31,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(una_reg(reg)), "0"(0)); if (error) goto got_exception; return; } printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", pc, va, opcode, reg); make_task_dead(SIGSEGV); got_exception: /* Ok, we caught the exception, but we don't want it. Is there someone to pass it along to? */ if ((fixup = search_exception_tables(pc)) != 0) { unsigned long newpc; newpc = fixup_exception(una_reg, fixup, pc); printk("Forwarding unaligned exception at %lx (%lx)\n", pc, newpc); regs->pc = newpc; return; } /* * Yikes! No one to forward the exception to. * Since the registers are in a weird format, dump them ourselves. */ printk("%s(%d): unhandled unaligned exception\n", current->comm, task_pid_nr(current)); printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n", pc, una_reg(26), regs->ps); printk("r0 = %016lx r1 = %016lx r2 = %016lx\n", una_reg(0), una_reg(1), una_reg(2)); printk("r3 = %016lx r4 = %016lx r5 = %016lx\n", una_reg(3), una_reg(4), una_reg(5)); printk("r6 = %016lx r7 = %016lx r8 = %016lx\n", una_reg(6), una_reg(7), una_reg(8)); printk("r9 = %016lx r10= %016lx r11= %016lx\n", una_reg(9), una_reg(10), una_reg(11)); printk("r12= %016lx r13= %016lx r14= %016lx\n", una_reg(12), una_reg(13), una_reg(14)); printk("r15= %016lx\n", una_reg(15)); printk("r16= %016lx r17= %016lx r18= %016lx\n", una_reg(16), una_reg(17), una_reg(18)); printk("r19= %016lx r20= %016lx r21= %016lx\n", una_reg(19), una_reg(20), una_reg(21)); printk("r22= %016lx r23= %016lx r24= %016lx\n", una_reg(22), una_reg(23), una_reg(24)); printk("r25= %016lx r27= %016lx r28= %016lx\n", una_reg(25), una_reg(27), una_reg(28)); printk("gp = %016lx sp = %p\n", regs->gp, regs+1); dik_show_code((unsigned int *)pc); dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT); if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { printk("die_if_kernel recursion detected.\n"); local_irq_enable(); while (1); } make_task_dead(SIGSEGV); } /* * Convert an s-floating point value in memory format to the * corresponding value in register format. The exponent * needs to be remapped to preserve non-finite values * (infinities, not-a-numbers, denormals). */ static inline unsigned long s_mem_to_reg (unsigned long s_mem) { unsigned long frac = (s_mem >> 0) & 0x7fffff; unsigned long sign = (s_mem >> 31) & 0x1; unsigned long exp_msb = (s_mem >> 30) & 0x1; unsigned long exp_low = (s_mem >> 23) & 0x7f; unsigned long exp; exp = (exp_msb << 10) | exp_low; /* common case */ if (exp_msb) { if (exp_low == 0x7f) { exp = 0x7ff; } } else { if (exp_low == 0x00) { exp = 0x000; } else { exp |= (0x7 << 7); } } return (sign << 63) | (exp << 52) | (frac << 29); } /* * Convert an s-floating point value in register format to the * corresponding value in memory format. */ static inline unsigned long s_reg_to_mem (unsigned long s_reg) { return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34); } /* * Handle user-level unaligned fault. Handling user-level unaligned * faults is *extremely* slow and produces nasty messages. A user * program *should* fix unaligned faults ASAP. * * Notice that we have (almost) the regular kernel stack layout here, * so finding the appropriate registers is a little more difficult * than in the kernel case. * * Finally, we handle regular integer load/stores only. In * particular, load-linked/store-conditionally and floating point * load/stores are not supported. The former make no sense with * unaligned faults (they are guaranteed to fail) and I don't think * the latter will occur in any decent program. * * Sigh. We *do* have to handle some FP operations, because GCC will * uses them as temporary storage for integer memory to memory copies. * However, we need to deal with stt/ldt and sts/lds only. */ #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \ | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \ | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \ | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */ #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \ | 1L << 0x2c | 1L << 0x2d /* stl stq */ \ | 1L << 0x0d | 1L << 0x0e ) /* stw stb */ #define R(x) ((size_t) &((struct pt_regs *)0)->x) static int unauser_reg_offsets[32] = { R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8), /* r9 ... r15 are stored in front of regs. */ -56, -48, -40, -32, -24, -16, -8, R(r16), R(r17), R(r18), R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26), R(r27), R(r28), R(gp), 0, 0 }; #undef R asmlinkage void do_entUnaUser(void __user * va, unsigned long opcode, unsigned long reg, struct pt_regs *regs) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); unsigned long tmp1, tmp2, tmp3, tmp4; unsigned long fake_reg, *reg_addr = &fake_reg; int si_code; long error; /* Check the UAC bits to decide what the user wants us to do with the unaligned access. */ if (!(current_thread_info()->status & TS_UAC_NOPRINT)) { if (__ratelimit(&ratelimit)) { printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", current->comm, task_pid_nr(current), regs->pc - 4, va, opcode, reg); } } if ((current_thread_info()->status & TS_UAC_SIGBUS)) goto give_sigbus; /* Not sure why you'd want to use this, but... */ if ((current_thread_info()->status & TS_UAC_NOFIX)) return; /* Don't bother reading ds in the access check since we already know that this came from the user. Also rely on the fact that the page at TASK_SIZE is unmapped and so can't be touched anyway. */ if ((unsigned long)va >= TASK_SIZE) goto give_sigsegv; ++unaligned[1].count; unaligned[1].va = (unsigned long)va; unaligned[1].pc = regs->pc - 4; if ((1L << opcode) & OP_INT_MASK) { /* it's an integer load/store */ if (reg < 30) { reg_addr = (unsigned long *) ((char *)regs + unauser_reg_offsets[reg]); } else if (reg == 30) { /* usp in PAL regs */ fake_reg = rdusp(); } else { /* zero "register" */ fake_reg = 0; } } /* We don't want to use the generic get/put unaligned macros as we want to trap exceptions. Only if we actually get an exception will we decide whether we should have caught it. */ switch (opcode) { case 0x0c: /* ldwu */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,1(%3)\n" " extwl %1,%3,%1\n" " extwh %2,%3,%2\n" "3:\n" EXC(1b,3b,%1,%0) EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; *reg_addr = tmp1|tmp2; break; case 0x22: /* lds */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,3(%3)\n" " extll %1,%3,%1\n" " extlh %2,%3,%2\n" "3:\n" EXC(1b,3b,%1,%0) EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2))); return; case 0x23: /* ldt */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,7(%3)\n" " extql %1,%3,%1\n" " extqh %2,%3,%2\n" "3:\n" EXC(1b,3b,%1,%0) EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; alpha_write_fp_reg(reg, tmp1|tmp2); return; case 0x28: /* ldl */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,3(%3)\n" " extll %1,%3,%1\n" " extlh %2,%3,%2\n" "3:\n" EXC(1b,3b,%1,%0) EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; *reg_addr = (int)(tmp1|tmp2); break; case 0x29: /* ldq */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,7(%3)\n" " extql %1,%3,%1\n" " extqh %2,%3,%2\n" "3:\n" EXC(1b,3b,%1,%0) EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; *reg_addr = tmp1|tmp2; break; /* Note that the store sequences do not indicate that they change memory because it _should_ be affecting nothing in this context. (Otherwise we have other, much larger, problems.) */ case 0x0d: /* stw */ __asm__ __volatile__( "1: ldq_u %2,1(%5)\n" "2: ldq_u %1,0(%5)\n" " inswh %6,%5,%4\n" " inswl %6,%5,%3\n" " mskwh %2,%5,%2\n" " mskwl %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,1(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" EXC(1b,5b,%2,%0) EXC(2b,5b,%1,%0) EXC(3b,5b,$31,%0) EXC(4b,5b,$31,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(*reg_addr), "0"(0)); if (error) goto give_sigsegv; return; case 0x26: /* sts */ fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg)); fallthrough; case 0x2c: /* stl */ __asm__ __volatile__( "1: ldq_u %2,3(%5)\n" "2: ldq_u %1,0(%5)\n" " inslh %6,%5,%4\n" " insll %6,%5,%3\n" " msklh %2,%5,%2\n" " mskll %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,3(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" EXC(1b,5b,%2,%0) EXC(2b,5b,%1,%0) EXC(3b,5b,$31,%0) EXC(4b,5b,$31,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(*reg_addr), "0"(0)); if (error) goto give_sigsegv; return; case 0x27: /* stt */ fake_reg = alpha_read_fp_reg(reg); fallthrough; case 0x2d: /* stq */ __asm__ __volatile__( "1: ldq_u %2,7(%5)\n" "2: ldq_u %1,0(%5)\n" " insqh %6,%5,%4\n" " insql %6,%5,%3\n" " mskqh %2,%5,%2\n" " mskql %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,7(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" EXC(1b,5b,%2,%0) EXC(2b,5b,%1,%0) EXC(3b,5b,$31,%0) EXC(4b,5b,$31,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(*reg_addr), "0"(0)); if (error) goto give_sigsegv; return; default: /* What instruction were you trying to use, exactly? */ goto give_sigbus; } /* Only integer loads should get here; everyone else returns early. */ if (reg == 30) wrusp(fake_reg); return; give_sigsegv: regs->pc -= 4; /* make pc point to faulting insn */ /* We need to replicate some of the logic in mm/fault.c, since we don't have access to the fault code in the exception handling return path. */ if ((unsigned long)va >= TASK_SIZE) si_code = SEGV_ACCERR; else { struct mm_struct *mm = current->mm; mmap_read_lock(mm); if (find_vma(mm, (unsigned long)va)) si_code = SEGV_ACCERR; else si_code = SEGV_MAPERR; mmap_read_unlock(mm); } send_sig_fault(SIGSEGV, si_code, va, current); return; give_sigbus: regs->pc -= 4; send_sig_fault(SIGBUS, BUS_ADRALN, va, current); return; } void trap_init(void) { /* Tell PAL-code what global pointer we want in the kernel. */ register unsigned long gptr __asm__("$29"); wrkgp(gptr); /* Hack for Multia (UDB) and JENSEN: some of their SRMs have a bug in the handling of the opDEC fault. Fix it up if so. */ if (implver() == IMPLVER_EV4) opDEC_check(); wrent(entArith, 1); wrent(entMM, 2); wrent(entIF, 3); wrent(entUna, 4); wrent(entSys, 5); wrent(entDbg, 6); }
linux-master
arch/alpha/kernel/traps.c
// SPDX-License-Identifier: GPL-2.0 /* * Generate definitions needed by assembly language modules. * This code generates raw asm output which is post-processed to extract * and format the required data. */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/kbuild.h> #include <asm/io.h> void foo(void) { DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_FP, offsetof(struct thread_info, fp)); DEFINE(TI_STATUS, offsetof(struct thread_info, status)); BLANK(); DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); DEFINE(TASK_CRED, offsetof(struct task_struct, cred)); DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent)); DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader)); DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); BLANK(); DEFINE(CRED_UID, offsetof(struct cred, uid)); DEFINE(CRED_EUID, offsetof(struct cred, euid)); DEFINE(CRED_GID, offsetof(struct cred, gid)); DEFINE(CRED_EGID, offsetof(struct cred, egid)); BLANK(); DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs)); DEFINE(PT_PTRACED, PT_PTRACED); DEFINE(CLONE_VM, CLONE_VM); DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); DEFINE(SIGCHLD, SIGCHLD); BLANK(); DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache)); DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register)); }
linux-master
arch/alpha/kernel/asm-offsets.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/pci.c * * Extruded from code written by * Dave Rusling ([email protected]) * David Mosberger ([email protected]) */ /* 2.3.x PCI/resources, 1999 Andrea Arcangeli <[email protected]> */ /* * Nov 2000, Ivan Kokshaysky <[email protected]> * PCI-PCI bridges cleanup */ #include <linux/string.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/module.h> #include <linux/cache.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <asm/machvec.h> #include "proto.h" #include "pci_impl.h" /* * Some string constants used by the various core logics. */ const char *const pci_io_names[] = { "PCI IO bus 0", "PCI IO bus 1", "PCI IO bus 2", "PCI IO bus 3", "PCI IO bus 4", "PCI IO bus 5", "PCI IO bus 6", "PCI IO bus 7" }; const char *const pci_mem_names[] = { "PCI mem bus 0", "PCI mem bus 1", "PCI mem bus 2", "PCI mem bus 3", "PCI mem bus 4", "PCI mem bus 5", "PCI mem bus 6", "PCI mem bus 7" }; const char pci_hae0_name[] = "HAE0"; /* * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource * assignments. */ /* * The PCI controller list. */ struct pci_controller *hose_head, **hose_tail = &hose_head; struct pci_controller *pci_isa_hose; /* * Quirks. */ static void quirk_isa_bridge(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_ISA << 8; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge); static void quirk_cypress(struct pci_dev *dev) { /* The Notorious Cy82C693 chip. */ /* The generic legacy mode IDE fixup in drivers/pci/probe.c doesn't work correctly with the Cypress IDE controller as it has non-standard register layout. Fix that. */ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) { dev->resource[2].start = dev->resource[3].start = 0; dev->resource[2].end = dev->resource[3].end = 0; dev->resource[2].flags = dev->resource[3].flags = 0; if (PCI_FUNC(dev->devfn) == 2) { dev->resource[0].start = 0x170; dev->resource[0].end = 0x177; dev->resource[1].start = 0x376; dev->resource[1].end = 0x376; } } /* The Cypress bridge responds on the PCI bus in the address range 0xffff0000-0xffffffff (conventional x86 BIOS ROM). There is no way to turn this off. The bridge also supports several extended BIOS ranges (disabled after power-up), and some consoles do turn them on. So if we use a large direct-map window, or a large SG window, we must avoid the entire 0xfff00000-0xffffffff region. */ if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) { if (__direct_map_base + __direct_map_size >= 0xfff00000UL) __direct_map_size = 0xfff00000UL - __direct_map_base; else { struct pci_controller *hose = dev->sysdata; struct pci_iommu_arena *pci = hose->sg_pci; if (pci && pci->dma_base + pci->size >= 0xfff00000UL) pci->size = 0xfff00000UL - pci->dma_base; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress); /* Called for each device after PCI setup is done. */ static void pcibios_fixup_final(struct pci_dev *dev) { unsigned int class = dev->class >> 8; if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_EISA) { dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1; isa_bridge = dev; } } DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); /* Just declaring that the power-of-ten prefixes are actually the power-of-two ones doesn't make it true :) */ #define KB 1024 #define MB (1024*KB) #define GB (1024*MB) resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; struct pci_controller *hose = dev->sysdata; unsigned long alignto; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { /* Make sure we start at our min on all hoses */ if (start - hose->io_space->start < PCIBIOS_MIN_IO) start = PCIBIOS_MIN_IO + hose->io_space->start; /* * Put everything into 0x00-0xff region modulo 0x400 */ if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; } else if (res->flags & IORESOURCE_MEM) { /* Make sure we start at our min on all hoses */ if (start - hose->mem_space->start < PCIBIOS_MIN_MEM) start = PCIBIOS_MIN_MEM + hose->mem_space->start; /* * The following holds at least for the Low Cost * Alpha implementation of the PCI interface: * * In sparse memory address space, the first * octant (16MB) of every 128MB segment is * aliased to the very first 16 MB of the * address space (i.e., it aliases the ISA * memory address space). Thus, we try to * avoid allocating PCI devices in that range. * Can be allocated in 2nd-7th octant only. * Devices that need more than 112MB of * address space must be accessed through * dense memory space only! */ /* Align to multiple of size of minimum base. */ alignto = max_t(resource_size_t, 0x1000, align); start = ALIGN(start, alignto); if (hose->sparse_mem_base && size <= 7 * 16*MB) { if (((start / (16*MB)) & 0x7) == 0) { start &= ~(128*MB - 1); start += 16*MB; start = ALIGN(start, alignto); } if (start/(128*MB) != (start + size - 1)/(128*MB)) { start &= ~(128*MB - 1); start += (128 + 16)*MB; start = ALIGN(start, alignto); } } } return start; } #undef KB #undef MB #undef GB static int __init pcibios_init(void) { if (alpha_mv.init_pci) alpha_mv.init_pci(); return 0; } subsys_initcall(pcibios_init); #ifdef ALPHA_RESTORE_SRM_SETUP /* Store PCI device configuration left by SRM here. */ struct pdev_srm_saved_conf { struct pdev_srm_saved_conf *next; struct pci_dev *dev; }; static struct pdev_srm_saved_conf *srm_saved_configs; static void pdev_save_srm_config(struct pci_dev *dev) { struct pdev_srm_saved_conf *tmp; static int printed = 0; if (!alpha_using_srm || pci_has_flag(PCI_PROBE_ONLY)) return; if (!printed) { printk(KERN_INFO "pci: enabling save/restore of SRM state\n"); printed = 1; } tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) { printk(KERN_ERR "%s: kmalloc() failed!\n", __func__); return; } tmp->next = srm_saved_configs; tmp->dev = dev; pci_save_state(dev); srm_saved_configs = tmp; } void pci_restore_srm_config(void) { struct pdev_srm_saved_conf *tmp; /* No need to restore if probed only. */ if (pci_has_flag(PCI_PROBE_ONLY)) return; /* Restore SRM config. */ for (tmp = srm_saved_configs; tmp; tmp = tmp->next) { pci_restore_state(tmp->dev); } } #else #define pdev_save_srm_config(dev) do {} while (0) #endif void pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev = bus->self; if (pci_has_flag(PCI_PROBE_ONLY) && dev && (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { pci_read_bridge_bases(bus); } list_for_each_entry(dev, &bus->devices, bus_list) { pdev_save_srm_config(dev); } } /* * If we set up a device for bus mastering, we need to check the latency * timer as certain firmware forgets to set it properly, as seen * on SX164 and LX164 with SRM. */ void pcibios_set_master(struct pci_dev *dev) { u8 lat; pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); if (lat >= 16) return; printk("PCI: Setting latency timer of device %s to 64\n", pci_name(dev)); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); } void __init pcibios_claim_one_bus(struct pci_bus *b) { struct pci_dev *dev; struct pci_bus *child_bus; list_for_each_entry(dev, &b->devices, bus_list) { struct resource *r; int i; pci_dev_for_each_resource(dev, r, i) { if (r->parent || !r->start || !r->flags) continue; if (pci_has_flag(PCI_PROBE_ONLY) || (r->flags & IORESOURCE_PCI_FIXED)) { if (pci_claim_resource(dev, i) == 0) continue; pci_claim_bridge_resource(dev, i); } } } list_for_each_entry(child_bus, &b->children, node) pcibios_claim_one_bus(child_bus); } static void __init pcibios_claim_console_setup(void) { struct pci_bus *b; list_for_each_entry(b, &pci_root_buses, node) pcibios_claim_one_bus(b); } void __init common_init_pci(void) { struct pci_controller *hose; struct list_head resources; struct pci_host_bridge *bridge; struct pci_bus *bus; int ret, next_busno; int need_domain_info = 0; u32 pci_mem_end; u32 sg_base; unsigned long end; /* Scan all of the recorded PCI controllers. */ for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0; /* Adjust hose mem_space limit to prevent PCI allocations in the iommu windows. */ pci_mem_end = min((u32)__direct_map_base, sg_base) - 1; end = hose->mem_space->start + pci_mem_end; if (hose->mem_space->end > end) hose->mem_space->end = end; INIT_LIST_HEAD(&resources); pci_add_resource_offset(&resources, hose->io_space, hose->io_space->start); pci_add_resource_offset(&resources, hose->mem_space, hose->mem_space->start); bridge = pci_alloc_host_bridge(0); if (!bridge) continue; list_splice_init(&resources, &bridge->windows); bridge->dev.parent = NULL; bridge->sysdata = hose; bridge->busnr = next_busno; bridge->ops = alpha_mv.pci_ops; bridge->swizzle_irq = alpha_mv.pci_swizzle; bridge->map_irq = alpha_mv.pci_map_irq; ret = pci_scan_root_bus_bridge(bridge); if (ret) { pci_free_host_bridge(bridge); continue; } bus = hose->bus = bridge->bus; hose->need_domain_info = need_domain_info; next_busno = bus->busn_res.end + 1; /* Don't allow 8-bit bus number overflow inside the hose - reserve some space for bridges. */ if (next_busno > 224) { next_busno = 0; need_domain_info = 1; } } pcibios_claim_console_setup(); pci_assign_unassigned_resources(); for (hose = hose_head; hose; hose = hose->next) { bus = hose->bus; if (bus) pci_bus_add_devices(bus); } } struct pci_controller * __init alloc_pci_controller(void) { struct pci_controller *hose; hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); if (!hose) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(*hose)); *hose_tail = hose; hose_tail = &hose->next; return hose; } struct resource * __init alloc_resource(void) { void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); if (!ptr) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(struct resource)); return ptr; } /* Provide information on locations of various I/O regions in physical memory. Do this on a per-card basis so that we choose the right hose. */ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus, unsigned long, dfn) { struct pci_controller *hose; struct pci_dev *dev; /* from hose or from bus.devfn */ if (which & IOBASE_FROM_HOSE) { for(hose = hose_head; hose; hose = hose->next) if (hose->index == bus) break; if (!hose) return -ENODEV; } else { /* Special hook for ISA access. */ if (bus == 0 && dfn == 0) { hose = pci_isa_hose; } else { dev = pci_get_domain_bus_and_slot(0, bus, dfn); if (!dev) return -ENODEV; hose = dev->sysdata; pci_dev_put(dev); } } switch (which & ~IOBASE_FROM_HOSE) { case IOBASE_HOSE: return hose->index; case IOBASE_SPARSE_MEM: return hose->sparse_mem_base; case IOBASE_DENSE_MEM: return hose->dense_mem_base; case IOBASE_SPARSE_IO: return hose->sparse_io_base; case IOBASE_DENSE_IO: return hose->dense_io_base; case IOBASE_ROOT_BUS: return hose->bus->number; } return -EOPNOTSUPP; } /* Destroy an __iomem token. Not copied from lib/iomap.c. */ void pci_iounmap(struct pci_dev *dev, void __iomem * addr) { if (__is_mmio(addr)) iounmap(addr); } EXPORT_SYMBOL(pci_iounmap); /* FIXME: Some boxes have multiple ISA bridges! */ struct pci_dev *isa_bridge; EXPORT_SYMBOL(isa_bridge);
linux-master
arch/alpha/kernel/pci.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/ioport.h> #include <asm/io.h> #include "pc873xx.h" static unsigned pc873xx_probelist[] = {0x398, 0x26e, 0}; static char *pc873xx_names[] = { "PC87303", "PC87306", "PC87312", "PC87332", "PC87334" }; static unsigned int base, model; unsigned int __init pc873xx_get_base(void) { return base; } char *__init pc873xx_get_model(void) { return pc873xx_names[model]; } static unsigned char __init pc873xx_read(unsigned int base, int reg) { outb(reg, base); return inb(base + 1); } static void __init pc873xx_write(unsigned int base, int reg, unsigned char data) { unsigned long flags; local_irq_save(flags); outb(reg, base); outb(data, base + 1); outb(data, base + 1); /* Must be written twice */ local_irq_restore(flags); } int __init pc873xx_probe(void) { int val, index = 0; while ((base = pc873xx_probelist[index++])) { if (request_region(base, 2, "Super IO PC873xx") == NULL) continue; val = pc873xx_read(base, REG_SID); if ((val & 0xf0) == 0x10) { model = PC87332; break; } else if ((val & 0xf8) == 0x70) { model = PC87306; break; } else if ((val & 0xf8) == 0x50) { model = PC87334; break; } else if ((val & 0xf8) == 0x40) { model = PC87303; break; } release_region(base, 2); } return (base == 0) ? -1 : 1; } void __init pc873xx_enable_epp19(void) { unsigned char data; printk(KERN_INFO "PC873xx enabling EPP v1.9\n"); data = pc873xx_read(base, REG_PCR); pc873xx_write(base, REG_PCR, (data & 0xFC) | 0x02); } void __init pc873xx_enable_ide(void) { unsigned char data; printk(KERN_INFO "PC873xx enabling IDE interrupt\n"); data = pc873xx_read(base, REG_FER); pc873xx_write(base, REG_FER, data | 0x40); }
linux-master
arch/alpha/kernel/pc873xx.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/core_mcpcia.c * * Based on code written by David A Rusling ([email protected]). * * Code common to all MCbus-PCI Adaptor core logic chipsets */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_mcpcia.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/delay.h> #include <asm/ptrace.h> #include "proto.h" #include "pci_impl.h" /* * NOTE: Herein lie back-to-back mb instructions. They are magic. * One plausible explanation is that the i/o controller does not properly * handle the system transaction. Another involves timing. Ho hum. */ /* * BIOS32-style PCI interface: */ #define DEBUG_CFG 0 #if DEBUG_CFG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Given a bus, device, and function number, compute resulting * configuration space address and setup the MCPCIA_HAXR2 register * accordingly. It is therefore not safe to have concurrent * invocations to configuration space access routines, but there * really shouldn't be any need for this. * * Type 0: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:11 Device select bit. * 10:8 Function number * 7:2 Register number * * Type 1: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:24 reserved * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static unsigned int conf_read(unsigned long addr, unsigned char type1, struct pci_controller *hose) { unsigned long flags; unsigned long mid = MCPCIA_HOSE2MID(hose->index); unsigned int stat0, value, cpu; cpu = smp_processor_id(); local_irq_save(flags); DBG_CFG(("conf_read(addr=0x%lx, type1=%d, hose=%d)\n", addr, type1, mid)); /* Reset status register to avoid losing errors. */ stat0 = *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); *(vuip)MCPCIA_CAP_ERR(mid); DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); mb(); draina(); mcheck_expected(cpu) = 1; mcheck_taken(cpu) = 0; mcheck_extra(cpu) = mid; mb(); /* Access configuration space. */ value = *((vuip)addr); mb(); mb(); /* magic */ if (mcheck_taken(cpu)) { mcheck_taken(cpu) = 0; value = 0xffffffffU; mb(); } mcheck_expected(cpu) = 0; mb(); DBG_CFG(("conf_read(): finished\n")); local_irq_restore(flags); return value; } static void conf_write(unsigned long addr, unsigned int value, unsigned char type1, struct pci_controller *hose) { unsigned long flags; unsigned long mid = MCPCIA_HOSE2MID(hose->index); unsigned int stat0, cpu; cpu = smp_processor_id(); local_irq_save(flags); /* avoid getting hit by machine check */ /* Reset status register to avoid losing errors. */ stat0 = *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); *(vuip)MCPCIA_CAP_ERR(mid); DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); draina(); mcheck_expected(cpu) = 1; mcheck_extra(cpu) = mid; mb(); /* Access configuration space. */ *((vuip)addr) = value; mb(); mb(); /* magic */ *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ mcheck_expected(cpu) = 0; mb(); DBG_CFG(("conf_write(): finished\n")); local_irq_restore(flags); } static int mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where, struct pci_controller *hose, unsigned long *pci_addr, unsigned char *type1) { u8 bus = pbus->number; unsigned long addr; DBG_CFG(("mk_conf_addr(bus=%d,devfn=0x%x,hose=%d,where=0x%x," " pci_addr=0x%p, type1=0x%p)\n", bus, devfn, hose->index, where, pci_addr, type1)); /* Type 1 configuration cycle for *ALL* busses. */ *type1 = 1; if (!pbus->parent) /* No parent means peer PCI bus. */ bus = 0; addr = (bus << 16) | (devfn << 8) | (where); addr <<= 5; /* swizzle for SPARSE */ addr |= hose->config_space_base; *pci_addr = addr; DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return 0; } static int mcpcia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { struct pci_controller *hose = bus->sysdata; unsigned long addr, w; unsigned char type1; if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; addr |= (size - 1) * 8; w = conf_read(addr, type1, hose); switch (size) { case 1: *value = __kernel_extbl(w, where & 3); break; case 2: *value = __kernel_extwl(w, where & 3); break; case 4: *value = w; break; } return PCIBIOS_SUCCESSFUL; } static int mcpcia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { struct pci_controller *hose = bus->sysdata; unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; addr |= (size - 1) * 8; value = __kernel_insql(value, where & 3); conf_write(addr, value, type1, hose); return PCIBIOS_SUCCESSFUL; } struct pci_ops mcpcia_pci_ops = { .read = mcpcia_read_config, .write = mcpcia_write_config, }; void mcpcia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { wmb(); *(vuip)MCPCIA_SG_TBIA(MCPCIA_HOSE2MID(hose->index)) = 0; mb(); } static int __init mcpcia_probe_hose(int h) { int cpu = smp_processor_id(); int mid = MCPCIA_HOSE2MID(h); unsigned int pci_rev; /* Gotta be REAL careful. If hose is absent, we get an mcheck. */ mb(); mb(); draina(); wrmces(7); mcheck_expected(cpu) = 2; /* indicates probing */ mcheck_taken(cpu) = 0; mcheck_extra(cpu) = mid; mb(); /* Access the bus revision word. */ pci_rev = *(vuip)MCPCIA_REV(mid); mb(); mb(); /* magic */ if (mcheck_taken(cpu)) { mcheck_taken(cpu) = 0; pci_rev = 0xffffffff; mb(); } mcheck_expected(cpu) = 0; mb(); return (pci_rev >> 16) == PCI_CLASS_BRIDGE_HOST; } static void __init mcpcia_new_hose(int h) { struct pci_controller *hose; struct resource *io, *mem, *hae_mem; int mid = MCPCIA_HOSE2MID(h); hose = alloc_pci_controller(); if (h == 0) pci_isa_hose = hose; io = alloc_resource(); mem = alloc_resource(); hae_mem = alloc_resource(); hose->io_space = io; hose->mem_space = hae_mem; hose->sparse_mem_base = MCPCIA_SPARSE(mid) - IDENT_ADDR; hose->dense_mem_base = MCPCIA_DENSE(mid) - IDENT_ADDR; hose->sparse_io_base = MCPCIA_IO(mid) - IDENT_ADDR; hose->dense_io_base = 0; hose->config_space_base = MCPCIA_CONF(mid); hose->index = h; io->start = MCPCIA_IO(mid) - MCPCIA_IO_BIAS; io->end = io->start + 0xffff; io->name = pci_io_names[h]; io->flags = IORESOURCE_IO; mem->start = MCPCIA_DENSE(mid) - MCPCIA_MEM_BIAS; mem->end = mem->start + 0xffffffff; mem->name = pci_mem_names[h]; mem->flags = IORESOURCE_MEM; hae_mem->start = mem->start; hae_mem->end = mem->start + MCPCIA_MEM_MASK; hae_mem->name = pci_hae0_name; hae_mem->flags = IORESOURCE_MEM; if (request_resource(&ioport_resource, io) < 0) printk(KERN_ERR "Failed to request IO on hose %d\n", h); if (request_resource(&iomem_resource, mem) < 0) printk(KERN_ERR "Failed to request MEM on hose %d\n", h); if (request_resource(mem, hae_mem) < 0) printk(KERN_ERR "Failed to request HAE_MEM on hose %d\n", h); } static void mcpcia_pci_clr_err(int mid) { *(vuip)MCPCIA_CAP_ERR(mid); *(vuip)MCPCIA_CAP_ERR(mid) = 0xffffffff; /* Clear them all. */ mb(); *(vuip)MCPCIA_CAP_ERR(mid); /* Re-read for force write. */ } static void __init mcpcia_startup_hose(struct pci_controller *hose) { int mid = MCPCIA_HOSE2MID(hose->index); unsigned int tmp; mcpcia_pci_clr_err(mid); /* * Set up error reporting. */ tmp = *(vuip)MCPCIA_CAP_ERR(mid); tmp |= 0x0006; /* master/target abort */ *(vuip)MCPCIA_CAP_ERR(mid) = tmp; mb(); tmp = *(vuip)MCPCIA_CAP_ERR(mid); /* * Set up the PCI->physical memory translation windows. * * Window 0 is scatter-gather 8MB at 8MB (for isa) * Window 1 is scatter-gather (up to) 1GB at 1GB (for pci) * Window 2 is direct access 2GB at 2GB */ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, SMP_CACHE_BYTES); hose->sg_pci = iommu_arena_new(hose, 0x40000000, size_for_memory(0x40000000), SMP_CACHE_BYTES); __direct_map_base = 0x80000000; __direct_map_size = 0x80000000; *(vuip)MCPCIA_W0_BASE(mid) = hose->sg_isa->dma_base | 3; *(vuip)MCPCIA_W0_MASK(mid) = (hose->sg_isa->size - 1) & 0xfff00000; *(vuip)MCPCIA_T0_BASE(mid) = virt_to_phys(hose->sg_isa->ptes) >> 8; *(vuip)MCPCIA_W1_BASE(mid) = hose->sg_pci->dma_base | 3; *(vuip)MCPCIA_W1_MASK(mid) = (hose->sg_pci->size - 1) & 0xfff00000; *(vuip)MCPCIA_T1_BASE(mid) = virt_to_phys(hose->sg_pci->ptes) >> 8; *(vuip)MCPCIA_W2_BASE(mid) = __direct_map_base | 1; *(vuip)MCPCIA_W2_MASK(mid) = (__direct_map_size - 1) & 0xfff00000; *(vuip)MCPCIA_T2_BASE(mid) = 0; *(vuip)MCPCIA_W3_BASE(mid) = 0x0; mcpcia_pci_tbi(hose, 0, -1); *(vuip)MCPCIA_HBASE(mid) = 0x0; mb(); *(vuip)MCPCIA_HAE_MEM(mid) = 0U; mb(); *(vuip)MCPCIA_HAE_MEM(mid); /* read it back. */ *(vuip)MCPCIA_HAE_IO(mid) = 0; mb(); *(vuip)MCPCIA_HAE_IO(mid); /* read it back. */ } void __init mcpcia_init_arch(void) { /* With multiple PCI busses, we play with I/O as physical addrs. */ ioport_resource.end = ~0UL; /* Allocate hose 0. That's the one that all the ISA junk hangs off of, from which we'll be registering stuff here in a bit. Other hose detection is done in mcpcia_init_hoses, which is called from init_IRQ. */ mcpcia_new_hose(0); } /* This is called from init_IRQ, since we cannot take interrupts before then. Which means we cannot do this in init_arch. */ void __init mcpcia_init_hoses(void) { struct pci_controller *hose; int hose_count; int h; /* First, find how many hoses we have. */ hose_count = 0; for (h = 0; h < MCPCIA_MAX_HOSES; ++h) { if (mcpcia_probe_hose(h)) { if (h != 0) mcpcia_new_hose(h); hose_count++; } } printk("mcpcia_init_hoses: found %d hoses\n", hose_count); /* Now do init for each hose. */ for (hose = hose_head; hose; hose = hose->next) mcpcia_startup_hose(hose); } static void mcpcia_print_uncorrectable(struct el_MCPCIA_uncorrected_frame_mcheck *logout) { struct el_common_EV5_uncorrectable_mcheck *frame; int i; frame = &logout->procdata; /* Print PAL fields */ for (i = 0; i < 24; i += 2) { printk(" paltmp[%d-%d] = %16lx %16lx\n", i, i+1, frame->paltemp[i], frame->paltemp[i+1]); } for (i = 0; i < 8; i += 2) { printk(" shadow[%d-%d] = %16lx %16lx\n", i, i+1, frame->shadow[i], frame->shadow[i+1]); } printk(" Addr of excepting instruction = %16lx\n", frame->exc_addr); printk(" Summary of arithmetic traps = %16lx\n", frame->exc_sum); printk(" Exception mask = %16lx\n", frame->exc_mask); printk(" Base address for PALcode = %16lx\n", frame->pal_base); printk(" Interrupt Status Reg = %16lx\n", frame->isr); printk(" CURRENT SETUP OF EV5 IBOX = %16lx\n", frame->icsr); printk(" I-CACHE Reg %s parity error = %16lx\n", (frame->ic_perr_stat & 0x800L) ? "Data" : "Tag", frame->ic_perr_stat); printk(" D-CACHE error Reg = %16lx\n", frame->dc_perr_stat); if (frame->dc_perr_stat & 0x2) { switch (frame->dc_perr_stat & 0x03c) { case 8: printk(" Data error in bank 1\n"); break; case 4: printk(" Data error in bank 0\n"); break; case 20: printk(" Tag error in bank 1\n"); break; case 10: printk(" Tag error in bank 0\n"); break; } } printk(" Effective VA = %16lx\n", frame->va); printk(" Reason for D-stream = %16lx\n", frame->mm_stat); printk(" EV5 SCache address = %16lx\n", frame->sc_addr); printk(" EV5 SCache TAG/Data parity = %16lx\n", frame->sc_stat); printk(" EV5 BC_TAG_ADDR = %16lx\n", frame->bc_tag_addr); printk(" EV5 EI_ADDR: Phys addr of Xfer = %16lx\n", frame->ei_addr); printk(" Fill Syndrome = %16lx\n", frame->fill_syndrome); printk(" EI_STAT reg = %16lx\n", frame->ei_stat); printk(" LD_LOCK = %16lx\n", frame->ld_lock); } static void mcpcia_print_system_area(unsigned long la_ptr) { struct el_common *frame; struct pci_controller *hose; struct IOD_subpacket { unsigned long base; unsigned int whoami; unsigned int rsvd1; unsigned int pci_rev; unsigned int cap_ctrl; unsigned int hae_mem; unsigned int hae_io; unsigned int int_ctl; unsigned int int_reg; unsigned int int_mask0; unsigned int int_mask1; unsigned int mc_err0; unsigned int mc_err1; unsigned int cap_err; unsigned int rsvd2; unsigned int pci_err1; unsigned int mdpa_stat; unsigned int mdpa_syn; unsigned int mdpb_stat; unsigned int mdpb_syn; unsigned int rsvd3; unsigned int rsvd4; unsigned int rsvd5; } *iodpp; frame = (struct el_common *)la_ptr; iodpp = (struct IOD_subpacket *) (la_ptr + frame->sys_offset); for (hose = hose_head; hose; hose = hose->next, iodpp++) { printk("IOD %d Register Subpacket - Bridge Base Address %16lx\n", hose->index, iodpp->base); printk(" WHOAMI = %8x\n", iodpp->whoami); printk(" PCI_REV = %8x\n", iodpp->pci_rev); printk(" CAP_CTRL = %8x\n", iodpp->cap_ctrl); printk(" HAE_MEM = %8x\n", iodpp->hae_mem); printk(" HAE_IO = %8x\n", iodpp->hae_io); printk(" INT_CTL = %8x\n", iodpp->int_ctl); printk(" INT_REG = %8x\n", iodpp->int_reg); printk(" INT_MASK0 = %8x\n", iodpp->int_mask0); printk(" INT_MASK1 = %8x\n", iodpp->int_mask1); printk(" MC_ERR0 = %8x\n", iodpp->mc_err0); printk(" MC_ERR1 = %8x\n", iodpp->mc_err1); printk(" CAP_ERR = %8x\n", iodpp->cap_err); printk(" PCI_ERR1 = %8x\n", iodpp->pci_err1); printk(" MDPA_STAT = %8x\n", iodpp->mdpa_stat); printk(" MDPA_SYN = %8x\n", iodpp->mdpa_syn); printk(" MDPB_STAT = %8x\n", iodpp->mdpb_stat); printk(" MDPB_SYN = %8x\n", iodpp->mdpb_syn); } } void mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; unsigned int cpu = smp_processor_id(); int expected; mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; expected = mcheck_expected(cpu); mb(); mb(); /* magic */ draina(); switch (expected) { case 0: { /* FIXME: how do we figure out which hose the error was on? */ struct pci_controller *hose; for (hose = hose_head; hose; hose = hose->next) mcpcia_pci_clr_err(MCPCIA_HOSE2MID(hose->index)); break; } case 1: mcpcia_pci_clr_err(mcheck_extra(cpu)); break; default: /* Otherwise, we're being called from mcpcia_probe_hose and there's no hose clear an error from. */ break; } wrmces(0x7); mb(); process_mcheck_info(vector, la_ptr, "MCPCIA", expected != 0); if (!expected && vector != 0x620 && vector != 0x630) { mcpcia_print_uncorrectable(mchk_logout); mcpcia_print_system_area(la_ptr); } }
linux-master
arch/alpha/kernel/core_mcpcia.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Kernel module help for Alpha. Copyright (C) 2002 Richard Henderson. */ #include <linux/moduleloader.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/slab.h> #if 0 #define DEBUGP printk #else #define DEBUGP(fmt...) #endif /* Allocate the GOT at the end of the core sections. */ struct got_entry { struct got_entry *next; Elf64_Sxword r_addend; int got_offset; }; static inline void process_reloc_for_got(Elf64_Rela *rela, struct got_entry *chains, Elf64_Xword *poffset) { unsigned long r_sym = ELF64_R_SYM (rela->r_info); unsigned long r_type = ELF64_R_TYPE (rela->r_info); Elf64_Sxword r_addend = rela->r_addend; struct got_entry *g; if (r_type != R_ALPHA_LITERAL) return; for (g = chains + r_sym; g ; g = g->next) if (g->r_addend == r_addend) { if (g->got_offset == 0) { g->got_offset = *poffset; *poffset += 8; } goto found_entry; } g = kmalloc (sizeof (*g), GFP_KERNEL); g->next = chains[r_sym].next; g->r_addend = r_addend; g->got_offset = *poffset; *poffset += 8; chains[r_sym].next = g; found_entry: /* Trick: most of the ELF64_R_TYPE field is unused. There are 42 valid relocation types, and a 32-bit field. Co-opt the bits above 256 to store the got offset for this reloc. */ rela->r_info |= g->got_offset << 8; } int module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs, char *secstrings, struct module *me) { struct got_entry *chains; Elf64_Rela *rela; Elf64_Shdr *esechdrs, *symtab, *s, *got; unsigned long nsyms, nrela, i; esechdrs = sechdrs + hdr->e_shnum; symtab = got = NULL; /* Find out how large the symbol table is. Allocate one got_entry head per symbol. Normally this will be enough, but not always. We'll chain different offsets for the symbol down each head. */ for (s = sechdrs; s < esechdrs; ++s) if (s->sh_type == SHT_SYMTAB) symtab = s; else if (!strcmp(".got", secstrings + s->sh_name)) { got = s; me->arch.gotsecindex = s - sechdrs; } if (!symtab) { printk(KERN_ERR "module %s: no symbol table\n", me->name); return -ENOEXEC; } if (!got) { printk(KERN_ERR "module %s: no got section\n", me->name); return -ENOEXEC; } nsyms = symtab->sh_size / sizeof(Elf64_Sym); chains = kcalloc(nsyms, sizeof(struct got_entry), GFP_KERNEL); if (!chains) { printk(KERN_ERR "module %s: no memory for symbol chain buffer\n", me->name); return -ENOMEM; } got->sh_size = 0; got->sh_addralign = 8; got->sh_type = SHT_NOBITS; /* Examine all LITERAL relocations to find out what GOT entries are required. This sizes the GOT section as well. */ for (s = sechdrs; s < esechdrs; ++s) if (s->sh_type == SHT_RELA) { nrela = s->sh_size / sizeof(Elf64_Rela); rela = (void *)hdr + s->sh_offset; for (i = 0; i < nrela; ++i) process_reloc_for_got(rela+i, chains, &got->sh_size); } /* Free the memory we allocated. */ for (i = 0; i < nsyms; ++i) { struct got_entry *g, *n; for (g = chains[i].next; g ; g = n) { n = g->next; kfree(g); } } kfree(chains); return 0; } int apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr; unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela); Elf64_Sym *symtab, *sym; void *base, *location; unsigned long got, gp; DEBUGP("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; got = sechdrs[me->arch.gotsecindex].sh_addr; gp = got + 0x8000; for (i = 0; i < n; i++) { unsigned long r_sym = ELF64_R_SYM (rela[i].r_info); unsigned long r_type = ELF64_R_TYPE (rela[i].r_info); unsigned long r_got_offset = r_type >> 8; unsigned long value, hi, lo; r_type &= 0xff; /* This is where to make the change. */ location = base + rela[i].r_offset; /* This is the symbol it is referring to. Note that all unresolved symbols have been resolved. */ sym = symtab + r_sym; value = sym->st_value + rela[i].r_addend; switch (r_type) { case R_ALPHA_NONE: break; case R_ALPHA_REFLONG: *(u32 *)location = value; break; case R_ALPHA_REFQUAD: /* BUG() can produce misaligned relocations. */ ((u32 *)location)[0] = value; ((u32 *)location)[1] = value >> 32; break; case R_ALPHA_GPREL32: value -= gp; if ((int)value != value) goto reloc_overflow; *(u32 *)location = value; break; case R_ALPHA_LITERAL: hi = got + r_got_offset; lo = hi - gp; if ((short)lo != lo) goto reloc_overflow; *(u16 *)location = lo; *(u64 *)hi = value; break; case R_ALPHA_LITUSE: break; case R_ALPHA_GPDISP: value = gp - (u64)location; lo = (short)value; hi = (int)(value - lo); if (hi + lo != value) goto reloc_overflow; *(u16 *)location = hi >> 16; *(u16 *)(location + rela[i].r_addend) = lo; break; case R_ALPHA_BRSGP: /* BRSGP is only allowed to bind to local symbols. If the section is undef, this means that the value was resolved from somewhere else. */ if (sym->st_shndx == SHN_UNDEF) goto reloc_overflow; if ((sym->st_other & STO_ALPHA_STD_GPLOAD) == STO_ALPHA_STD_GPLOAD) /* Omit the prologue. */ value += 8; fallthrough; case R_ALPHA_BRADDR: value -= (u64)location + 4; if (value & 3) goto reloc_overflow; value = (long)value >> 2; if (value + (1<<21) >= 1<<22) goto reloc_overflow; value &= 0x1fffff; value |= *(u32 *)location & ~0x1fffff; *(u32 *)location = value; break; case R_ALPHA_HINT: break; case R_ALPHA_SREL32: value -= (u64)location; if ((int)value != value) goto reloc_overflow; *(u32 *)location = value; break; case R_ALPHA_SREL64: value -= (u64)location; *(u64 *)location = value; break; case R_ALPHA_GPRELHIGH: value = (long)(value - gp + 0x8000) >> 16; if ((short) value != value) goto reloc_overflow; *(u16 *)location = value; break; case R_ALPHA_GPRELLOW: value -= gp; *(u16 *)location = value; break; case R_ALPHA_GPREL16: value -= gp; if ((short) value != value) goto reloc_overflow; *(u16 *)location = value; break; default: printk(KERN_ERR "module %s: Unknown relocation: %lu\n", me->name, r_type); return -ENOEXEC; reloc_overflow: if (ELF64_ST_TYPE (sym->st_info) == STT_SECTION) printk(KERN_ERR "module %s: Relocation (type %lu) overflow vs section %d\n", me->name, r_type, sym->st_shndx); else printk(KERN_ERR "module %s: Relocation (type %lu) overflow vs %s\n", me->name, r_type, strtab + sym->st_name); return -ENOEXEC; } } return 0; }
linux-master
arch/alpha/kernel/module.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/core_marvel.c * * Code common to all Marvel based systems. */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_marvel.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/mc146818rtc.h> #include <linux/rtc.h> #include <linux/module.h> #include <linux/memblock.h> #include <asm/ptrace.h> #include <asm/smp.h> #include <asm/gct.h> #include <asm/tlbflush.h> #include <asm/vga.h> #include "proto.h" #include "pci_impl.h" /* * Debug helpers */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Private data */ static struct io7 *io7_head = NULL; /* * Helper functions */ static unsigned long __attribute__ ((unused)) read_ev7_csr(int pe, unsigned long offset) { ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset); unsigned long q; mb(); q = ev7csr->csr; mb(); return q; } static void __attribute__ ((unused)) write_ev7_csr(int pe, unsigned long offset, unsigned long q) { ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset); mb(); ev7csr->csr = q; mb(); } static char * __init mk_resource_name(int pe, int port, char *str) { char tmp[80]; char *name; sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port); name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES); if (!name) panic("%s: Failed to allocate %zu bytes\n", __func__, strlen(tmp) + 1); strcpy(name, tmp); return name; } inline struct io7 * marvel_next_io7(struct io7 *prev) { return (prev ? prev->next : io7_head); } struct io7 * marvel_find_io7(int pe) { struct io7 *io7; for (io7 = io7_head; io7 && io7->pe != pe; io7 = io7->next) continue; return io7; } static struct io7 * __init alloc_io7(unsigned int pe) { struct io7 *io7; struct io7 *insp; int h; if (marvel_find_io7(pe)) { printk(KERN_WARNING "IO7 at PE %d already allocated!\n", pe); return NULL; } io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES); if (!io7) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(*io7)); io7->pe = pe; raw_spin_lock_init(&io7->irq_lock); for (h = 0; h < 4; h++) { io7->ports[h].io7 = io7; io7->ports[h].port = h; io7->ports[h].enabled = 0; /* default to disabled */ } /* * Insert in pe sorted order. */ if (NULL == io7_head) /* empty list */ io7_head = io7; else if (io7_head->pe > io7->pe) { /* insert at head */ io7->next = io7_head; io7_head = io7; } else { /* insert at position */ for (insp = io7_head; insp; insp = insp->next) { if (insp->pe == io7->pe) { printk(KERN_ERR "Too many IO7s at PE %d\n", io7->pe); return NULL; } if (NULL == insp->next || insp->next->pe > io7->pe) { /* insert here */ io7->next = insp->next; insp->next = io7; break; } } if (NULL == insp) { /* couldn't insert ?!? */ printk(KERN_WARNING "Failed to insert IO7 at PE %d " " - adding at head of list\n", io7->pe); io7->next = io7_head; io7_head = io7; } } return io7; } void io7_clear_errors(struct io7 *io7) { io7_port7_csrs *p7csrs; io7_ioport_csrs *csrs; int port; /* * First the IO ports. */ for (port = 0; port < 4; port++) { csrs = IO7_CSRS_KERN(io7->pe, port); csrs->POx_ERR_SUM.csr = -1UL; csrs->POx_TLB_ERR.csr = -1UL; csrs->POx_SPL_COMPLT.csr = -1UL; csrs->POx_TRANS_SUM.csr = -1UL; } /* * Then the common ones. */ p7csrs = IO7_PORT7_CSRS_KERN(io7->pe); p7csrs->PO7_ERROR_SUM.csr = -1UL; p7csrs->PO7_UNCRR_SYM.csr = -1UL; p7csrs->PO7_CRRCT_SYM.csr = -1UL; } /* * IO7 PCI, PCI/X, AGP configuration. */ static void __init io7_init_hose(struct io7 *io7, int port) { static int hose_index = 0; struct pci_controller *hose = alloc_pci_controller(); struct io7_port *io7_port = &io7->ports[port]; io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, port); int i; hose->index = hose_index++; /* arbitrary */ /* * We don't have an isa or legacy hose, but glibc expects to be * able to use the bus == 0 / dev == 0 form of the iobase syscall * to determine information about the i/o system. Since XFree86 * relies on glibc's determination to tell whether or not to use * sparse access, we need to point the pci_isa_hose at a real hose * so at least that determination is correct. */ if (hose->index == 0) pci_isa_hose = hose; io7_port->csrs = csrs; io7_port->hose = hose; hose->sysdata = io7_port; hose->io_space = alloc_resource(); hose->mem_space = alloc_resource(); /* * Base addresses for userland consumption. Since these are going * to be mapped, they are pure physical addresses. */ hose->sparse_mem_base = hose->sparse_io_base = 0; hose->dense_mem_base = IO7_MEM_PHYS(io7->pe, port); hose->dense_io_base = IO7_IO_PHYS(io7->pe, port); /* * Base addresses and resource ranges for kernel consumption. */ hose->config_space_base = (unsigned long)IO7_CONF_KERN(io7->pe, port); hose->io_space->start = (unsigned long)IO7_IO_KERN(io7->pe, port); hose->io_space->end = hose->io_space->start + IO7_IO_SPACE - 1; hose->io_space->name = mk_resource_name(io7->pe, port, "IO"); hose->io_space->flags = IORESOURCE_IO; hose->mem_space->start = (unsigned long)IO7_MEM_KERN(io7->pe, port); hose->mem_space->end = hose->mem_space->start + IO7_MEM_SPACE - 1; hose->mem_space->name = mk_resource_name(io7->pe, port, "MEM"); hose->mem_space->flags = IORESOURCE_MEM; if (request_resource(&ioport_resource, hose->io_space) < 0) printk(KERN_ERR "Failed to request IO on hose %d\n", hose->index); if (request_resource(&iomem_resource, hose->mem_space) < 0) printk(KERN_ERR "Failed to request MEM on hose %d\n", hose->index); /* * Save the existing DMA window settings for later restoration. */ for (i = 0; i < 4; i++) { io7_port->saved_wbase[i] = csrs->POx_WBASE[i].csr; io7_port->saved_wmask[i] = csrs->POx_WMASK[i].csr; io7_port->saved_tbase[i] = csrs->POx_TBASE[i].csr; } /* * Set up the PCI to main memory translation windows. * * Window 0 is scatter-gather 8MB at 8MB * Window 1 is direct access 1GB at 2GB * Window 2 is scatter-gather (up-to) 1GB at 3GB * Window 3 is disabled */ /* * TBIA before modifying windows. */ marvel_pci_tbi(hose, 0, -1); /* * Set up window 0 for scatter-gather 8MB at 8MB. */ hose->sg_isa = iommu_arena_new_node(0, hose, 0x00800000, 0x00800000, 0); hose->sg_isa->align_entry = 8; /* cache line boundary */ csrs->POx_WBASE[0].csr = hose->sg_isa->dma_base | wbase_m_ena | wbase_m_sg; csrs->POx_WMASK[0].csr = (hose->sg_isa->size - 1) & wbase_m_addr; csrs->POx_TBASE[0].csr = virt_to_phys(hose->sg_isa->ptes); /* * Set up window 1 for direct-mapped 1GB at 2GB. */ csrs->POx_WBASE[1].csr = __direct_map_base | wbase_m_ena; csrs->POx_WMASK[1].csr = (__direct_map_size - 1) & wbase_m_addr; csrs->POx_TBASE[1].csr = 0; /* * Set up window 2 for scatter-gather (up-to) 1GB at 3GB. */ hose->sg_pci = iommu_arena_new_node(0, hose, 0xc0000000, 0x40000000, 0); hose->sg_pci->align_entry = 8; /* cache line boundary */ csrs->POx_WBASE[2].csr = hose->sg_pci->dma_base | wbase_m_ena | wbase_m_sg; csrs->POx_WMASK[2].csr = (hose->sg_pci->size - 1) & wbase_m_addr; csrs->POx_TBASE[2].csr = virt_to_phys(hose->sg_pci->ptes); /* * Disable window 3. */ csrs->POx_WBASE[3].csr = 0; /* * Make sure that the AGP Monster Window is disabled. */ csrs->POx_CTRL.csr &= ~(1UL << 61); #if 1 printk("FIXME: disabling master aborts\n"); csrs->POx_MSK_HEI.csr &= ~(3UL << 14); #endif /* * TBIA after modifying windows. */ marvel_pci_tbi(hose, 0, -1); } static void __init marvel_init_io7(struct io7 *io7) { int i; printk("Initializing IO7 at PID %d\n", io7->pe); /* * Get the Port 7 CSR pointer. */ io7->csrs = IO7_PORT7_CSRS_KERN(io7->pe); /* * Init this IO7's hoses. */ for (i = 0; i < IO7_NUM_PORTS; i++) { io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, i); if (csrs->POx_CACHE_CTL.csr == 8) { io7->ports[i].enabled = 1; io7_init_hose(io7, i); } } } void __init marvel_io7_present(gct6_node *node) { int pe; if (node->type != GCT_TYPE_HOSE || node->subtype != GCT_SUBTYPE_IO_PORT_MODULE) return; pe = (node->id >> 8) & 0xff; printk("Found an IO7 at PID %d\n", pe); alloc_io7(pe); } static void __init marvel_find_console_vga_hose(void) { #ifdef CONFIG_VGA_HOSE u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset); if (pu64[7] == 3) { /* TERM_TYPE == graphics */ struct pci_controller *hose = NULL; int h = (pu64[30] >> 24) & 0xff; /* TERM_OUT_LOC, hose # */ struct io7 *io7; int pid, port; /* FIXME - encoding is going to have to change for Marvel * since hose will be able to overflow a byte... * need to fix this decode when the console * changes its encoding */ printk("console graphics is on hose %d (console)\n", h); /* * The console's hose numbering is: * * hose<n:2>: PID * hose<1:0>: PORT * * We need to find the hose at that pid and port */ pid = h >> 2; port = h & 3; if ((io7 = marvel_find_io7(pid))) hose = io7->ports[port].hose; if (hose) { printk("Console graphics on hose %d\n", hose->index); pci_vga_hose = hose; } } #endif } gct6_search_struct gct_wanted_node_list[] __initdata = { { GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present }, { 0, 0, NULL } }; /* * In case the GCT is not complete, let the user specify PIDs with IO7s * at boot time. Syntax is 'io7=a,b,c,...,n' where a-n are the PIDs (decimal) * where IO7s are connected */ static int __init marvel_specify_io7(char *str) { unsigned long pid; struct io7 *io7; char *pchar; do { pid = simple_strtoul(str, &pchar, 0); if (pchar != str) { printk("User-specified IO7 at PID %lu\n", pid); io7 = alloc_io7(pid); if (io7) marvel_init_io7(io7); } if (pchar == str) pchar++; str = pchar; } while(*str); return 1; } __setup("io7=", marvel_specify_io7); void __init marvel_init_arch(void) { struct io7 *io7; /* With multiple PCI busses, we play with I/O as physical addrs. */ ioport_resource.end = ~0UL; /* PCI DMA Direct Mapping is 1GB at 2GB. */ __direct_map_base = 0x80000000; __direct_map_size = 0x40000000; /* Parse the config tree. */ gct6_find_nodes(GCT_NODE_PTR(0), gct_wanted_node_list); /* Init the io7s. */ for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); ) marvel_init_io7(io7); /* Check for graphic console location (if any). */ marvel_find_console_vga_hose(); } void marvel_kill_arch(int mode) { } /* * PCI Configuration Space access functions * * Configuration space addresses have the following format: * * |2 2 2 2|1 1 1 1|1 1 1 1|1 1 * |3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|R|R| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * n:24 reserved for hose base * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * IO7 determines whether to use a type 0 or type 1 config cycle * based on the bus number. Therefore the bus number must be set * to 0 for the root bus on any hose. * * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * */ static inline unsigned long build_conf_addr(struct pci_controller *hose, u8 bus, unsigned int devfn, int where) { return (hose->config_space_base | (bus << 16) | (devfn << 8) | where); } static unsigned long mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where) { struct pci_controller *hose = pbus->sysdata; struct io7_port *io7_port; unsigned long addr = 0; u8 bus = pbus->number; if (!hose) return addr; /* Check for enabled. */ io7_port = hose->sysdata; if (!io7_port->enabled) return addr; if (!pbus->parent) { /* No parent means peer PCI bus. */ /* Don't support idsel > 20 on primary bus. */ if (devfn >= PCI_DEVFN(21, 0)) return addr; bus = 0; } addr = build_conf_addr(hose, bus, devfn, where); DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return addr; } static int marvel_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; if (0 == (addr = mk_conf_addr(bus, devfn, where))) return PCIBIOS_DEVICE_NOT_FOUND; switch(size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; default: return PCIBIOS_FUNC_NOT_SUPPORTED; } return PCIBIOS_SUCCESSFUL; } static int marvel_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; if (0 == (addr = mk_conf_addr(bus, devfn, where))) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; default: return PCIBIOS_FUNC_NOT_SUPPORTED; } return PCIBIOS_SUCCESSFUL; } struct pci_ops marvel_pci_ops = { .read = marvel_read_config, .write = marvel_write_config, }; /* * Other PCI helper functions. */ void marvel_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { io7_ioport_csrs *csrs = ((struct io7_port *)hose->sysdata)->csrs; wmb(); csrs->POx_SG_TBIA.csr = 0; mb(); csrs->POx_SG_TBIA.csr; } /* * RTC Support */ struct marvel_rtc_access_info { unsigned long function; unsigned long index; unsigned long data; }; static void __marvel_access_rtc(void *info) { struct marvel_rtc_access_info *rtc_access = info; register unsigned long __r0 __asm__("$0"); register unsigned long __r16 __asm__("$16") = rtc_access->function; register unsigned long __r17 __asm__("$17") = rtc_access->index; register unsigned long __r18 __asm__("$18") = rtc_access->data; __asm__ __volatile__( "call_pal %4 # cserve rtc" : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r0) : "i"(PAL_cserve), "0"(__r16), "1"(__r17), "2"(__r18) : "$1", "$22", "$23", "$24", "$25"); rtc_access->data = __r0; } static u8 __marvel_rtc_io(u8 b, unsigned long addr, int write) { static u8 index = 0; struct marvel_rtc_access_info rtc_access; u8 ret = 0; switch(addr) { case 0x70: /* RTC_PORT(0) */ if (write) index = b; ret = index; break; case 0x71: /* RTC_PORT(1) */ rtc_access.index = index; rtc_access.data = bcd2bin(b); rtc_access.function = 0x48 + !write; /* GET/PUT_TOY */ __marvel_access_rtc(&rtc_access); ret = bin2bcd(rtc_access.data); break; default: printk(KERN_WARNING "Illegal RTC port %lx\n", addr); break; } return ret; } /* * IO map support. */ void __iomem * marvel_ioremap(unsigned long addr, unsigned long size) { struct pci_controller *hose; unsigned long baddr, last; struct vm_struct *area; unsigned long vaddr; unsigned long *ptes; unsigned long pfn; /* * Adjust the address. */ FIXUP_MEMADDR_VGA(addr); /* * Find the hose. */ for (hose = hose_head; hose; hose = hose->next) { if ((addr >> 32) == (hose->mem_space->start >> 32)) break; } if (!hose) return NULL; /* * We have the hose - calculate the bus limits. */ baddr = addr - hose->mem_space->start; last = baddr + size - 1; /* * Is it direct-mapped? */ if ((baddr >= __direct_map_base) && ((baddr + size - 1) < __direct_map_base + __direct_map_size)) { addr = IDENT_ADDR | (baddr - __direct_map_base); return (void __iomem *) addr; } /* * Check the scatter-gather arena. */ if (hose->sg_pci && baddr >= (unsigned long)hose->sg_pci->dma_base && last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size) { /* * Adjust the limits (mappings must be page aligned) */ baddr -= hose->sg_pci->dma_base; last -= hose->sg_pci->dma_base; baddr &= PAGE_MASK; size = PAGE_ALIGN(last) - baddr; /* * Map it. */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; ptes = hose->sg_pci->ptes; for (vaddr = (unsigned long)area->addr; baddr <= last; baddr += PAGE_SIZE, vaddr += PAGE_SIZE) { pfn = ptes[baddr >> PAGE_SHIFT]; if (!(pfn & 1)) { printk("ioremap failed... pte not valid...\n"); vfree(area->addr); return NULL; } pfn >>= 1; /* make it a true pfn */ if (__alpha_remap_area_pages(vaddr, pfn << PAGE_SHIFT, PAGE_SIZE, 0)) { printk("FAILED to map...\n"); vfree(area->addr); return NULL; } } flush_tlb_all(); vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); return (void __iomem *) vaddr; } /* Assume it was already a reasonable address */ vaddr = baddr + hose->mem_space->start; return (void __iomem *) vaddr; } void marvel_iounmap(volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr >= VMALLOC_START) vfree((void *)(PAGE_MASK & addr)); } int marvel_is_mmio(const volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr >= VMALLOC_START) return 1; else return (addr & 0xFF000000UL) == 0; } #define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64)) #define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71)) void __iomem *marvel_ioportmap (unsigned long addr) { FIXUP_IOADDR_VGA(addr); return (void __iomem *)addr; } u8 marvel_ioread8(const void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (__marvel_is_port_kbd(addr)) return 0; else if (__marvel_is_port_rtc(addr)) return __marvel_rtc_io(0, addr, 0); else if (marvel_is_ioaddr(addr)) return __kernel_ldbu(*(vucp)addr); else /* this should catch other legacy addresses that would normally fail on MARVEL, because there really is nothing there... */ return ~0; } void marvel_iowrite8(u8 b, void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (__marvel_is_port_kbd(addr)) return; else if (__marvel_is_port_rtc(addr)) __marvel_rtc_io(b, addr, 1); else if (marvel_is_ioaddr(addr)) __kernel_stb(b, *(vucp)addr); } #ifndef CONFIG_ALPHA_GENERIC EXPORT_SYMBOL(marvel_ioremap); EXPORT_SYMBOL(marvel_iounmap); EXPORT_SYMBOL(marvel_is_mmio); EXPORT_SYMBOL(marvel_ioportmap); EXPORT_SYMBOL(marvel_ioread8); EXPORT_SYMBOL(marvel_iowrite8); #endif /* * AGP GART Support. */ #include <linux/agp_backend.h> #include <asm/agp_backend.h> #include <linux/slab.h> #include <linux/delay.h> struct marvel_agp_aperture { struct pci_iommu_arena *arena; long pg_start; long pg_count; }; static int marvel_agp_setup(alpha_agp_info *agp) { struct marvel_agp_aperture *aper; if (!alpha_agpgart_size) return -ENOMEM; aper = kmalloc(sizeof(*aper), GFP_KERNEL); if (aper == NULL) return -ENOMEM; aper->arena = agp->hose->sg_pci; aper->pg_count = alpha_agpgart_size / PAGE_SIZE; aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, aper->pg_count - 1); if (aper->pg_start < 0) { printk(KERN_ERR "Failed to reserve AGP memory\n"); kfree(aper); return -ENOMEM; } agp->aperture.bus_base = aper->arena->dma_base + aper->pg_start * PAGE_SIZE; agp->aperture.size = aper->pg_count * PAGE_SIZE; agp->aperture.sysdata = aper; return 0; } static void marvel_agp_cleanup(alpha_agp_info *agp) { struct marvel_agp_aperture *aper = agp->aperture.sysdata; int status; status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); if (status == -EBUSY) { printk(KERN_WARNING "Attempted to release bound AGP memory - unbinding\n"); iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); } if (status < 0) printk(KERN_ERR "Failed to release AGP memory\n"); kfree(aper); kfree(agp); } static int marvel_agp_configure(alpha_agp_info *agp) { io7_ioport_csrs *csrs = ((struct io7_port *)agp->hose->sysdata)->csrs; struct io7 *io7 = ((struct io7_port *)agp->hose->sysdata)->io7; unsigned int new_rate = 0; unsigned long agp_pll; /* * Check the requested mode against the PLL setting. * The agpgart_be code has not programmed the card yet, * so we can still tweak mode here. */ agp_pll = io7->csrs->POx_RST[IO7_AGP_PORT].csr; switch(IO7_PLL_RNGB(agp_pll)) { case 0x4: /* 2x only */ /* * The PLL is only programmed for 2x, so adjust the * rate to 2x, if necessary. */ if (agp->mode.bits.rate != 2) new_rate = 2; break; case 0x6: /* 1x / 4x */ /* * The PLL is programmed for 1x or 4x. Don't go faster * than requested, so if the requested rate is 2x, use 1x. */ if (agp->mode.bits.rate == 2) new_rate = 1; break; default: /* ??????? */ /* * Don't know what this PLL setting is, take the requested * rate, but warn the user. */ printk("%s: unknown PLL setting RNGB=%lx (PLL6_CTL=%016lx)\n", __func__, IO7_PLL_RNGB(agp_pll), agp_pll); break; } /* * Set the new rate, if necessary. */ if (new_rate) { printk("Requested AGP Rate %dX not compatible " "with PLL setting - using %dX\n", agp->mode.bits.rate, new_rate); agp->mode.bits.rate = new_rate; } printk("Enabling AGP on hose %d: %dX%s RQ %d\n", agp->hose->index, agp->mode.bits.rate, agp->mode.bits.sba ? " - SBA" : "", agp->mode.bits.rq); csrs->AGP_CMD.csr = agp->mode.lw; return 0; } static int marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) { struct marvel_agp_aperture *aper = agp->aperture.sysdata; return iommu_bind(aper->arena, aper->pg_start + pg_start, mem->page_count, mem->pages); } static int marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) { struct marvel_agp_aperture *aper = agp->aperture.sysdata; return iommu_unbind(aper->arena, aper->pg_start + pg_start, mem->page_count); } static unsigned long marvel_agp_translate(alpha_agp_info *agp, dma_addr_t addr) { struct marvel_agp_aperture *aper = agp->aperture.sysdata; unsigned long baddr = addr - aper->arena->dma_base; unsigned long pte; if (addr < agp->aperture.bus_base || addr >= agp->aperture.bus_base + agp->aperture.size) { printk("%s: addr out of range\n", __func__); return -EINVAL; } pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; if (!(pte & 1)) { printk("%s: pte not valid\n", __func__); return -EINVAL; } return (pte >> 1) << PAGE_SHIFT; } struct alpha_agp_ops marvel_agp_ops = { .setup = marvel_agp_setup, .cleanup = marvel_agp_cleanup, .configure = marvel_agp_configure, .bind = marvel_agp_bind_memory, .unbind = marvel_agp_unbind_memory, .translate = marvel_agp_translate }; alpha_agp_info * marvel_agp_info(void) { struct pci_controller *hose; io7_ioport_csrs *csrs; alpha_agp_info *agp; struct io7 *io7; /* * Find the first IO7 with an AGP card. * * FIXME -- there should be a better way (we want to be able to * specify and what if the agp card is not video???) */ hose = NULL; for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) { struct pci_controller *h; vuip addr; if (!io7->ports[IO7_AGP_PORT].enabled) continue; h = io7->ports[IO7_AGP_PORT].hose; addr = (vuip)build_conf_addr(h, 0, PCI_DEVFN(5, 0), 0); if (*addr != 0xffffffffu) { hose = h; break; } } if (!hose || !hose->sg_pci) return NULL; printk("MARVEL - using hose %d as AGP\n", hose->index); /* * Get the csrs from the hose. */ csrs = ((struct io7_port *)hose->sysdata)->csrs; /* * Allocate the info structure. */ agp = kmalloc(sizeof(*agp), GFP_KERNEL); if (!agp) return NULL; /* * Fill it in. */ agp->hose = hose; agp->private = NULL; agp->ops = &marvel_agp_ops; /* * Aperture - not configured until ops.setup(). */ agp->aperture.bus_base = 0; agp->aperture.size = 0; agp->aperture.sysdata = NULL; /* * Capabilities. * * NOTE: IO7 reports through AGP_STAT that it can support a read queue * depth of 17 (rq = 0x10). It actually only supports a depth of * 16 (rq = 0xf). */ agp->capability.lw = csrs->AGP_STAT.csr; agp->capability.bits.rq = 0xf; /* * Mode. */ agp->mode.lw = csrs->AGP_CMD.csr; return agp; }
linux-master
arch/alpha/kernel/core_marvel.c