python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <linux/kernel_stat.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/loongson.h>
#include <asm/setup.h>
DEFINE_PER_CPU(unsigned long, irq_stack);
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
struct acpi_vector_group pch_group[MAX_IO_PICS];
struct acpi_vector_group msi_group[MAX_IO_PICS];
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(unsigned int irq)
{
pr_warn("Unexpected IRQ # %d\n", irq);
}
atomic_t irq_err_count;
asmlinkage void spurious_interrupt(void)
{
atomic_inc(&irq_err_count);
}
int arch_show_interrupts(struct seq_file *p, int prec)
{
#ifdef CONFIG_SMP
show_ipi_list(p, prec);
#endif
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
return 0;
}
static int __init early_pci_mcfg_parse(struct acpi_table_header *header)
{
struct acpi_table_mcfg *mcfg;
struct acpi_mcfg_allocation *mptr;
int i, n;
if (header->length < sizeof(struct acpi_table_mcfg))
return -EINVAL;
n = (header->length - sizeof(struct acpi_table_mcfg)) /
sizeof(struct acpi_mcfg_allocation);
mcfg = (struct acpi_table_mcfg *)header;
mptr = (struct acpi_mcfg_allocation *) &mcfg[1];
for (i = 0; i < n; i++, mptr++) {
msi_group[i].pci_segment = mptr->pci_segment;
pch_group[i].node = msi_group[i].node = (mptr->address >> 44) & 0xf;
}
return 0;
}
static void __init init_vec_parent_group(void)
{
int i;
for (i = 0; i < MAX_IO_PICS; i++) {
msi_group[i].pci_segment = -1;
msi_group[i].node = -1;
pch_group[i].node = -1;
}
acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse);
}
static int __init get_ipi_irq(void)
{
struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
if (d)
return irq_create_mapping(d, INT_IPI);
return -EINVAL;
}
void __init init_IRQ(void)
{
int i;
#ifdef CONFIG_SMP
int r, ipi_irq;
static int ipi_dummy_dev;
#endif
unsigned int order = get_order(IRQ_STACK_SIZE);
struct page *page;
clear_csr_ecfg(ECFG0_IM);
clear_csr_estat(ESTATF_IP);
init_vec_parent_group();
irqchip_init();
#ifdef CONFIG_SMP
ipi_irq = get_ipi_irq();
if (ipi_irq < 0)
panic("IPI IRQ mapping failed\n");
irq_set_percpu_devid(ipi_irq);
r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &ipi_dummy_dev);
if (r < 0)
panic("IPI IRQ request failed\n");
#endif
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
for_each_possible_cpu(i) {
page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
per_cpu(irq_stack, i) = (unsigned long)page_address(page);
pr_debug("CPU%d IRQ stack at 0x%lx - 0x%lx\n", i,
per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE);
}
set_csr_ecfg(ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC);
}
| linux-master | arch/loongarch/kernel/irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/export.h>
#include <linux/types.h>
#include <linux/io.h>
/*
* Copy data from IO memory space to "real" memory space.
*/
void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
{
while (count && !IS_ALIGNED((unsigned long)from, 8)) {
*(u8 *)to = __raw_readb(from);
from++;
to++;
count--;
}
while (count >= 8) {
*(u64 *)to = __raw_readq(from);
from += 8;
to += 8;
count -= 8;
}
while (count) {
*(u8 *)to = __raw_readb(from);
from++;
to++;
count--;
}
}
EXPORT_SYMBOL(__memcpy_fromio);
/*
* Copy data from "real" memory space to IO memory space.
*/
void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
{
while (count && !IS_ALIGNED((unsigned long)to, 8)) {
__raw_writeb(*(u8 *)from, to);
from++;
to++;
count--;
}
while (count >= 8) {
__raw_writeq(*(u64 *)from, to);
from += 8;
to += 8;
count -= 8;
}
while (count) {
__raw_writeb(*(u8 *)from, to);
from++;
to++;
count--;
}
}
EXPORT_SYMBOL(__memcpy_toio);
/*
* "memset" on IO memory space.
*/
void __memset_io(volatile void __iomem *dst, int c, size_t count)
{
u64 qc = (u8)c;
qc |= qc << 8;
qc |= qc << 16;
qc |= qc << 32;
while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
__raw_writeb(c, dst);
dst++;
count--;
}
while (count >= 8) {
__raw_writeq(qc, dst);
dst += 8;
count -= 8;
}
while (count) {
__raw_writeb(c, dst);
dst++;
count--;
}
}
EXPORT_SYMBOL(__memset_io);
| linux-master | arch/loongarch/kernel/io.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author: Hanlu Li <[email protected]>
* Huacai Chen <[email protected]>
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1992 Ross Biro
* Copyright (C) Linus Torvalds
* Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
* Copyright (C) 1996 David S. Miller
* Kevin D. Kissell, [email protected] and Carsten Langgaard, [email protected]
* Copyright (C) 1999 MIPS Technologies, Inc.
* Copyright (C) 2000 Ulf Carlsson
*/
#include <linux/kernel.h>
#include <linux/audit.h>
#include <linux/compiler.h>
#include <linux/context_tracking.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm.h>
#include <linux/nospec.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/security.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/seccomp.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/loongarch.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/reg.h>
#include <asm/syscall.h>
static void init_fp_ctx(struct task_struct *target)
{
/* The target already has context */
if (tsk_used_math(target))
return;
/* Begin with data registers set to all 1s... */
memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
set_stopped_child_used_math(target);
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* Don't load the watchpoint registers for the ex-child. */
clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
/* regset get/set implementations */
static int gpr_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
int r;
struct pt_regs *regs = task_pt_regs(target);
r = membuf_write(&to, ®s->regs, sizeof(u64) * GPR_NUM);
r = membuf_write(&to, ®s->orig_a0, sizeof(u64));
r = membuf_write(&to, ®s->csr_era, sizeof(u64));
r = membuf_write(&to, ®s->csr_badvaddr, sizeof(u64));
return r;
}
static int gpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int err;
int a0_start = sizeof(u64) * GPR_NUM;
int era_start = a0_start + sizeof(u64);
int badvaddr_start = era_start + sizeof(u64);
struct pt_regs *regs = task_pt_regs(target);
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->regs,
0, a0_start);
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->orig_a0,
a0_start, a0_start + sizeof(u64));
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->csr_era,
era_start, era_start + sizeof(u64));
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->csr_badvaddr,
badvaddr_start, badvaddr_start + sizeof(u64));
return err;
}
/*
* Get the general floating-point registers.
*/
static int gfpr_get(struct task_struct *target, struct membuf *to)
{
return membuf_write(to, &target->thread.fpu.fpr,
sizeof(elf_fpreg_t) * NUM_FPU_REGS);
}
static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
{
int i, r;
u64 fpr_val;
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
for (i = 0; i < NUM_FPU_REGS; i++) {
fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
}
return r;
}
/*
* Choose the appropriate helper for general registers, and then copy
* the FCC and FCSR registers separately.
*/
static int fpr_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
int r;
save_fpu_regs(target);
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
r = gfpr_get(target, &to);
else
r = gfpr_get_simd(target, &to);
r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
return r;
}
static int gfpr_set(struct task_struct *target,
unsigned int *pos, unsigned int *count,
const void **kbuf, const void __user **ubuf)
{
return user_regset_copyin(pos, count, kbuf, ubuf,
&target->thread.fpu.fpr,
0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
}
static int gfpr_set_simd(struct task_struct *target,
unsigned int *pos, unsigned int *count,
const void **kbuf, const void __user **ubuf)
{
int i, err;
u64 fpr_val;
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
err = user_regset_copyin(pos, count, kbuf, ubuf,
&fpr_val, i * sizeof(elf_fpreg_t),
(i + 1) * sizeof(elf_fpreg_t));
if (err)
return err;
set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
}
return 0;
}
/*
* Choose the appropriate helper for general registers, and then copy
* the FCC register separately.
*/
static int fpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
const int fcsr_start = fcc_start + sizeof(u64);
int err;
BUG_ON(count % sizeof(elf_fpreg_t));
if (pos + count > sizeof(elf_fpregset_t))
return -EIO;
init_fp_ctx(target);
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
else
err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
if (err)
return err;
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.fcc, fcc_start,
fcc_start + sizeof(u64));
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.fcsr, fcsr_start,
fcsr_start + sizeof(u32));
return err;
}
static int cfg_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
int i, r;
u32 cfg_val;
i = 0;
while (to.left > 0) {
cfg_val = read_cpucfg(i++);
r = membuf_write(&to, &cfg_val, sizeof(u32));
}
return r;
}
/*
* CFG registers are read-only.
*/
static int cfg_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return 0;
}
#ifdef CONFIG_CPU_HAS_LSX
static void copy_pad_fprs(struct task_struct *target,
const struct user_regset *regset,
struct membuf *to, unsigned int live_sz)
{
int i, j;
unsigned long long fill = ~0ull;
unsigned int cp_sz, pad_sz;
cp_sz = min(regset->size, live_sz);
pad_sz = regset->size - cp_sz;
WARN_ON(pad_sz % sizeof(fill));
for (i = 0; i < NUM_FPU_REGS; i++) {
membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
membuf_store(to, fill);
}
}
}
static int simd_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const unsigned int wr_size = NUM_FPU_REGS * regset->size;
save_fpu_regs(target);
if (!tsk_used_math(target)) {
/* The task hasn't used FP or LSX, fill with 0xff */
copy_pad_fprs(target, regset, &to, 0);
} else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) {
/* Copy scalar FP context, fill the rest with 0xff */
copy_pad_fprs(target, regset, &to, 8);
#ifdef CONFIG_CPU_HAS_LASX
} else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) {
/* Copy LSX 128 Bit context, fill the rest with 0xff */
copy_pad_fprs(target, regset, &to, 16);
#endif
} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
/* Trivially copy the vector registers */
membuf_write(&to, &target->thread.fpu.fpr, wr_size);
} else {
/* Copy as much context as possible, fill the rest with 0xff */
copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
}
return 0;
}
static int simd_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
const unsigned int wr_size = NUM_FPU_REGS * regset->size;
unsigned int cp_sz;
int i, err, start;
init_fp_ctx(target);
if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
/* Trivially copy the vector registers */
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.fpr,
0, wr_size);
} else {
/* Copy as much context as possible */
cp_sz = min_t(unsigned int, regset->size,
sizeof(target->thread.fpu.fpr[0]));
i = start = err = 0;
for (; i < NUM_FPU_REGS; i++, start += regset->size) {
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.fpr[i],
start, start + cp_sz);
}
}
return err;
}
#endif /* CONFIG_CPU_HAS_LSX */
#ifdef CONFIG_CPU_HAS_LBT
static int lbt_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
int r;
r = membuf_write(&to, &target->thread.lbt.scr0, sizeof(target->thread.lbt.scr0));
r = membuf_write(&to, &target->thread.lbt.scr1, sizeof(target->thread.lbt.scr1));
r = membuf_write(&to, &target->thread.lbt.scr2, sizeof(target->thread.lbt.scr2));
r = membuf_write(&to, &target->thread.lbt.scr3, sizeof(target->thread.lbt.scr3));
r = membuf_write(&to, &target->thread.lbt.eflags, sizeof(u32));
r = membuf_write(&to, &target->thread.fpu.ftop, sizeof(u32));
return r;
}
static int lbt_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int err = 0;
const int eflags_start = 4 * sizeof(target->thread.lbt.scr0);
const int ftop_start = eflags_start + sizeof(u32);
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.lbt.scr0,
0, 4 * sizeof(target->thread.lbt.scr0));
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.lbt.eflags,
eflags_start, ftop_start);
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpu.ftop,
ftop_start, ftop_start + sizeof(u32));
return err;
}
#endif /* CONFIG_CPU_HAS_LBT */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* Handle hitting a HW-breakpoint.
*/
static void ptrace_hbptriggered(struct perf_event *bp,
struct perf_sample_data *data,
struct pt_regs *regs)
{
int i;
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
if (current->thread.hbp_break[i] == bp)
break;
for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
if (current->thread.hbp_watch[i] == bp)
break;
force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
}
static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
struct task_struct *tsk,
unsigned long idx)
{
struct perf_event *bp;
switch (note_type) {
case NT_LOONGARCH_HW_BREAK:
if (idx >= LOONGARCH_MAX_BRP)
return ERR_PTR(-EINVAL);
idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
bp = tsk->thread.hbp_break[idx];
break;
case NT_LOONGARCH_HW_WATCH:
if (idx >= LOONGARCH_MAX_WRP)
return ERR_PTR(-EINVAL);
idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
bp = tsk->thread.hbp_watch[idx];
break;
}
return bp;
}
static int ptrace_hbp_set_event(unsigned int note_type,
struct task_struct *tsk,
unsigned long idx,
struct perf_event *bp)
{
switch (note_type) {
case NT_LOONGARCH_HW_BREAK:
if (idx >= LOONGARCH_MAX_BRP)
return -EINVAL;
idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
tsk->thread.hbp_break[idx] = bp;
break;
case NT_LOONGARCH_HW_WATCH:
if (idx >= LOONGARCH_MAX_WRP)
return -EINVAL;
idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
tsk->thread.hbp_watch[idx] = bp;
break;
}
return 0;
}
static struct perf_event *ptrace_hbp_create(unsigned int note_type,
struct task_struct *tsk,
unsigned long idx)
{
int err, type;
struct perf_event *bp;
struct perf_event_attr attr;
switch (note_type) {
case NT_LOONGARCH_HW_BREAK:
type = HW_BREAKPOINT_X;
break;
case NT_LOONGARCH_HW_WATCH:
type = HW_BREAKPOINT_RW;
break;
default:
return ERR_PTR(-EINVAL);
}
ptrace_breakpoint_init(&attr);
/*
* Initialise fields to sane defaults
* (i.e. values that will pass validation).
*/
attr.bp_addr = 0;
attr.bp_len = HW_BREAKPOINT_LEN_4;
attr.bp_type = type;
attr.disabled = 1;
bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
if (IS_ERR(bp))
return bp;
err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
if (err)
return ERR_PTR(err);
return bp;
}
static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
struct arch_hw_breakpoint_ctrl ctrl,
struct perf_event_attr *attr)
{
int err, len, type, offset;
err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
if (err)
return err;
switch (note_type) {
case NT_LOONGARCH_HW_BREAK:
if ((type & HW_BREAKPOINT_X) != type)
return -EINVAL;
break;
case NT_LOONGARCH_HW_WATCH:
if ((type & HW_BREAKPOINT_RW) != type)
return -EINVAL;
break;
default:
return -EINVAL;
}
attr->bp_len = len;
attr->bp_type = type;
attr->bp_addr += offset;
return 0;
}
static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
{
u8 num;
u64 reg = 0;
switch (note_type) {
case NT_LOONGARCH_HW_BREAK:
num = hw_breakpoint_slots(TYPE_INST);
break;
case NT_LOONGARCH_HW_WATCH:
num = hw_breakpoint_slots(TYPE_DATA);
break;
default:
return -EINVAL;
}
*info = reg | num;
return 0;
}
static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
struct task_struct *tsk,
unsigned long idx)
{
struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
if (!bp)
bp = ptrace_hbp_create(note_type, tsk, idx);
return bp;
}
static int ptrace_hbp_get_ctrl(unsigned int note_type,
struct task_struct *tsk,
unsigned long idx, u32 *ctrl)
{
struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);
*ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
return 0;
}
static int ptrace_hbp_get_mask(unsigned int note_type,
struct task_struct *tsk,
unsigned long idx, u64 *mask)
{
struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);
*mask = bp ? counter_arch_bp(bp)->mask : 0;
return 0;
}
static int ptrace_hbp_get_addr(unsigned int note_type,
struct task_struct *tsk,
unsigned long idx, u64 *addr)
{
struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);
*addr = bp ? counter_arch_bp(bp)->address : 0;
return 0;
}
static int ptrace_hbp_set_ctrl(unsigned int note_type,
struct task_struct *tsk,
unsigned long idx, u32 uctrl)
{
int err;
struct perf_event *bp;
struct perf_event_attr attr;
struct arch_hw_breakpoint_ctrl ctrl;
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);
attr = bp->attr;
decode_ctrl_reg(uctrl, &ctrl);
err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
if (err)
return err;
return modify_user_hw_breakpoint(bp, &attr);
}
static int ptrace_hbp_set_mask(unsigned int note_type,
struct task_struct *tsk,
unsigned long idx, u64 mask)
{
struct perf_event *bp;
struct perf_event_attr attr;
struct arch_hw_breakpoint *info;
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);
attr = bp->attr;
info = counter_arch_bp(bp);
info->mask = mask;
return modify_user_hw_breakpoint(bp, &attr);
}
static int ptrace_hbp_set_addr(unsigned int note_type,
struct task_struct *tsk,
unsigned long idx, u64 addr)
{
struct perf_event *bp;
struct perf_event_attr attr;
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);
attr = bp->attr;
attr.bp_addr = addr;
return modify_user_hw_breakpoint(bp, &attr);
}
#define PTRACE_HBP_ADDR_SZ sizeof(u64)
#define PTRACE_HBP_MASK_SZ sizeof(u64)
#define PTRACE_HBP_CTRL_SZ sizeof(u32)
#define PTRACE_HBP_PAD_SZ sizeof(u32)
static int hw_break_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
u64 info;
u32 ctrl;
u64 addr, mask;
int ret, idx = 0;
unsigned int note_type = regset->core_note_type;
/* Resource info */
ret = ptrace_hbp_get_resource_info(note_type, &info);
if (ret)
return ret;
membuf_write(&to, &info, sizeof(info));
/* (address, mask, ctrl) registers */
while (to.left) {
ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
if (ret)
return ret;
ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
if (ret)
return ret;
ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
if (ret)
return ret;
membuf_store(&to, addr);
membuf_store(&to, mask);
membuf_store(&to, ctrl);
membuf_zero(&to, sizeof(u32));
idx++;
}
return 0;
}
static int hw_break_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
u32 ctrl;
u64 addr, mask;
int ret, idx = 0, offset, limit;
unsigned int note_type = regset->core_note_type;
/* Resource info */
offset = offsetof(struct user_watch_state, dbg_regs);
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
/* (address, mask, ctrl) registers */
limit = regset->n * regset->size;
while (count && offset < limit) {
if (count < PTRACE_HBP_ADDR_SZ)
return -EINVAL;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
offset, offset + PTRACE_HBP_ADDR_SZ);
if (ret)
return ret;
ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
if (ret)
return ret;
offset += PTRACE_HBP_ADDR_SZ;
if (!count)
break;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
offset, offset + PTRACE_HBP_MASK_SZ);
if (ret)
return ret;
ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
if (ret)
return ret;
offset += PTRACE_HBP_MASK_SZ;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
offset, offset + PTRACE_HBP_CTRL_SZ);
if (ret)
return ret;
ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
if (ret)
return ret;
offset += PTRACE_HBP_CTRL_SZ;
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
offset, offset + PTRACE_HBP_PAD_SZ);
offset += PTRACE_HBP_PAD_SZ;
idx++;
}
return 0;
}
#endif
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(r0, regs[0]),
REG_OFFSET_NAME(r1, regs[1]),
REG_OFFSET_NAME(r2, regs[2]),
REG_OFFSET_NAME(r3, regs[3]),
REG_OFFSET_NAME(r4, regs[4]),
REG_OFFSET_NAME(r5, regs[5]),
REG_OFFSET_NAME(r6, regs[6]),
REG_OFFSET_NAME(r7, regs[7]),
REG_OFFSET_NAME(r8, regs[8]),
REG_OFFSET_NAME(r9, regs[9]),
REG_OFFSET_NAME(r10, regs[10]),
REG_OFFSET_NAME(r11, regs[11]),
REG_OFFSET_NAME(r12, regs[12]),
REG_OFFSET_NAME(r13, regs[13]),
REG_OFFSET_NAME(r14, regs[14]),
REG_OFFSET_NAME(r15, regs[15]),
REG_OFFSET_NAME(r16, regs[16]),
REG_OFFSET_NAME(r17, regs[17]),
REG_OFFSET_NAME(r18, regs[18]),
REG_OFFSET_NAME(r19, regs[19]),
REG_OFFSET_NAME(r20, regs[20]),
REG_OFFSET_NAME(r21, regs[21]),
REG_OFFSET_NAME(r22, regs[22]),
REG_OFFSET_NAME(r23, regs[23]),
REG_OFFSET_NAME(r24, regs[24]),
REG_OFFSET_NAME(r25, regs[25]),
REG_OFFSET_NAME(r26, regs[26]),
REG_OFFSET_NAME(r27, regs[27]),
REG_OFFSET_NAME(r28, regs[28]),
REG_OFFSET_NAME(r29, regs[29]),
REG_OFFSET_NAME(r30, regs[30]),
REG_OFFSET_NAME(r31, regs[31]),
REG_OFFSET_NAME(orig_a0, orig_a0),
REG_OFFSET_NAME(csr_era, csr_era),
REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
REG_OFFSET_NAME(csr_crmd, csr_crmd),
REG_OFFSET_NAME(csr_prmd, csr_prmd),
REG_OFFSET_NAME(csr_euen, csr_euen),
REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
REG_OFFSET_NAME(csr_estat, csr_estat),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
enum loongarch_regset {
REGSET_GPR,
REGSET_FPR,
REGSET_CPUCFG,
#ifdef CONFIG_CPU_HAS_LSX
REGSET_LSX,
#endif
#ifdef CONFIG_CPU_HAS_LASX
REGSET_LASX,
#endif
#ifdef CONFIG_CPU_HAS_LBT
REGSET_LBT,
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
REGSET_HW_BREAK,
REGSET_HW_WATCH,
#endif
};
static const struct user_regset loongarch64_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(elf_greg_t),
.align = sizeof(elf_greg_t),
.regset_get = gpr_get,
.set = gpr_set,
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG,
.n = ELF_NFPREG,
.size = sizeof(elf_fpreg_t),
.align = sizeof(elf_fpreg_t),
.regset_get = fpr_get,
.set = fpr_set,
},
[REGSET_CPUCFG] = {
.core_note_type = NT_LOONGARCH_CPUCFG,
.n = 64,
.size = sizeof(u32),
.align = sizeof(u32),
.regset_get = cfg_get,
.set = cfg_set,
},
#ifdef CONFIG_CPU_HAS_LSX
[REGSET_LSX] = {
.core_note_type = NT_LOONGARCH_LSX,
.n = NUM_FPU_REGS,
.size = 16,
.align = 16,
.regset_get = simd_get,
.set = simd_set,
},
#endif
#ifdef CONFIG_CPU_HAS_LASX
[REGSET_LASX] = {
.core_note_type = NT_LOONGARCH_LASX,
.n = NUM_FPU_REGS,
.size = 32,
.align = 32,
.regset_get = simd_get,
.set = simd_set,
},
#endif
#ifdef CONFIG_CPU_HAS_LBT
[REGSET_LBT] = {
.core_note_type = NT_LOONGARCH_LBT,
.n = 5,
.size = sizeof(u64),
.align = sizeof(u64),
.regset_get = lbt_get,
.set = lbt_set,
},
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
[REGSET_HW_BREAK] = {
.core_note_type = NT_LOONGARCH_HW_BREAK,
.n = sizeof(struct user_watch_state) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.regset_get = hw_break_get,
.set = hw_break_set,
},
[REGSET_HW_WATCH] = {
.core_note_type = NT_LOONGARCH_HW_WATCH,
.n = sizeof(struct user_watch_state) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.regset_get = hw_break_get,
.set = hw_break_set,
},
#endif
};
static const struct user_regset_view user_loongarch64_view = {
.name = "loongarch64",
.e_machine = ELF_ARCH,
.regsets = loongarch64_regsets,
.n = ARRAY_SIZE(loongarch64_regsets),
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_loongarch64_view;
}
static inline int read_user(struct task_struct *target, unsigned long addr,
unsigned long __user *data)
{
unsigned long tmp = 0;
switch (addr) {
case 0 ... 31:
tmp = task_pt_regs(target)->regs[addr];
break;
case ARG0:
tmp = task_pt_regs(target)->orig_a0;
break;
case PC:
tmp = task_pt_regs(target)->csr_era;
break;
case BADVADDR:
tmp = task_pt_regs(target)->csr_badvaddr;
break;
default:
return -EIO;
}
return put_user(tmp, data);
}
static inline int write_user(struct task_struct *target, unsigned long addr,
unsigned long data)
{
switch (addr) {
case 0 ... 31:
task_pt_regs(target)->regs[addr] = data;
break;
case ARG0:
task_pt_regs(target)->orig_a0 = data;
break;
case PC:
task_pt_regs(target)->csr_era = data;
break;
case BADVADDR:
task_pt_regs(target)->csr_badvaddr = data;
break;
default:
return -EIO;
}
return 0;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (void __user *) data;
switch (request) {
case PTRACE_PEEKUSR:
ret = read_user(child, addr, datap);
break;
case PTRACE_POKEUSR:
ret = write_user(child, addr, data);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
#ifdef CONFIG_HAVE_HW_BREAKPOINT
static void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
attr = bp->attr;
attr.disabled = true;
modify_user_hw_breakpoint(bp, &attr);
}
static int set_single_step(struct task_struct *tsk, unsigned long addr)
{
struct perf_event *bp;
struct perf_event_attr attr;
struct arch_hw_breakpoint *info;
struct thread_struct *thread = &tsk->thread;
bp = thread->hbp_break[0];
if (!bp) {
ptrace_breakpoint_init(&attr);
attr.bp_addr = addr;
attr.bp_len = HW_BREAKPOINT_LEN_8;
attr.bp_type = HW_BREAKPOINT_X;
bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
NULL, tsk);
if (IS_ERR(bp))
return PTR_ERR(bp);
thread->hbp_break[0] = bp;
} else {
int err;
attr = bp->attr;
attr.bp_addr = addr;
/* Reenable breakpoint */
attr.disabled = false;
err = modify_user_hw_breakpoint(bp, &attr);
if (unlikely(err))
return err;
csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
}
info = counter_arch_bp(bp);
info->mask = TASK_SIZE - 1;
return 0;
}
/* ptrace API */
void user_enable_single_step(struct task_struct *task)
{
struct thread_info *ti = task_thread_info(task);
set_single_step(task, task_pt_regs(task)->csr_era);
task->thread.single_step = task_pt_regs(task)->csr_era;
set_ti_thread_flag(ti, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
#endif
| linux-master | arch/loongarch/kernel/ptrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author: Huacai Chen <[email protected]>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/context_tracking.h>
#include <linux/entry-common.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/module.h>
#include <linux/extable.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/sched/debug.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/memblock.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/notifier.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
#include <asm/break.h>
#include <asm/cpu.h>
#include <asm/exception.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/inst.h>
#include <asm/kgdb.h>
#include <asm/loongarch.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/siginfo.h>
#include <asm/stacktrace.h>
#include <asm/tlb.h>
#include <asm/types.h>
#include <asm/unwind.h>
#include <asm/uprobes.h>
#include "access-helper.h"
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
const char *loglvl, bool user)
{
unsigned long addr;
struct unwind_state state;
struct pt_regs *pregs = (struct pt_regs *)regs;
if (!task)
task = current;
printk("%sCall Trace:", loglvl);
for (unwind_start(&state, task, pregs);
!unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
print_ip_sym(loglvl, addr);
}
printk("%s\n", loglvl);
}
static void show_stacktrace(struct task_struct *task,
const struct pt_regs *regs, const char *loglvl, bool user)
{
int i;
const int field = 2 * sizeof(unsigned long);
unsigned long stackdata;
unsigned long *sp = (unsigned long *)regs->regs[3];
printk("%sStack :", loglvl);
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % (64 / field)) == 0)) {
pr_cont("\n");
printk("%s ", loglvl);
}
if (i > 39) {
pr_cont(" ...");
break;
}
if (__get_addr(&stackdata, sp++, user)) {
pr_cont(" (Bad stack address)");
break;
}
pr_cont(" %0*lx", field, stackdata);
i++;
}
pr_cont("\n");
show_backtrace(task, regs, loglvl, user);
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
struct pt_regs regs;
regs.csr_crmd = 0;
if (sp) {
regs.csr_era = 0;
regs.regs[1] = 0;
regs.regs[3] = (unsigned long)sp;
} else {
if (!task || task == current)
prepare_frametrace(®s);
else {
regs.csr_era = task->thread.reg01;
regs.regs[1] = 0;
regs.regs[3] = task->thread.reg03;
regs.regs[22] = task->thread.reg22;
}
}
show_stacktrace(task, ®s, loglvl, false);
}
static void show_code(unsigned int *pc, bool user)
{
long i;
unsigned int insn;
printk("Code:");
for(i = -3 ; i < 6 ; i++) {
if (__get_inst(&insn, pc + i, user)) {
pr_cont(" (Bad address in era)\n");
break;
}
pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
}
pr_cont("\n");
}
static void print_bool_fragment(const char *key, unsigned long val, bool first)
{
/* e.g. "+PG", "-DA" */
pr_cont("%s%c%s", first ? "" : " ", val ? '+' : '-', key);
}
static void print_plv_fragment(const char *key, int val)
{
/* e.g. "PLV0", "PPLV3" */
pr_cont("%s%d", key, val);
}
static void print_memory_type_fragment(const char *key, unsigned long val)
{
const char *humanized_type;
switch (val) {
case 0:
humanized_type = "SUC";
break;
case 1:
humanized_type = "CC";
break;
case 2:
humanized_type = "WUC";
break;
default:
pr_cont(" %s=Reserved(%lu)", key, val);
return;
}
/* e.g. " DATM=WUC" */
pr_cont(" %s=%s", key, humanized_type);
}
static void print_intr_fragment(const char *key, unsigned long val)
{
/* e.g. "LIE=0-1,3,5-7" */
pr_cont("%s=%*pbl", key, EXCCODE_INT_NUM, &val);
}
static void print_crmd(unsigned long x)
{
printk(" CRMD: %08lx (", x);
print_plv_fragment("PLV", (int) FIELD_GET(CSR_CRMD_PLV, x));
print_bool_fragment("IE", FIELD_GET(CSR_CRMD_IE, x), false);
print_bool_fragment("DA", FIELD_GET(CSR_CRMD_DA, x), false);
print_bool_fragment("PG", FIELD_GET(CSR_CRMD_PG, x), false);
print_memory_type_fragment("DACF", FIELD_GET(CSR_CRMD_DACF, x));
print_memory_type_fragment("DACM", FIELD_GET(CSR_CRMD_DACM, x));
print_bool_fragment("WE", FIELD_GET(CSR_CRMD_WE, x), false);
pr_cont(")\n");
}
static void print_prmd(unsigned long x)
{
printk(" PRMD: %08lx (", x);
print_plv_fragment("PPLV", (int) FIELD_GET(CSR_PRMD_PPLV, x));
print_bool_fragment("PIE", FIELD_GET(CSR_PRMD_PIE, x), false);
print_bool_fragment("PWE", FIELD_GET(CSR_PRMD_PWE, x), false);
pr_cont(")\n");
}
static void print_euen(unsigned long x)
{
printk(" EUEN: %08lx (", x);
print_bool_fragment("FPE", FIELD_GET(CSR_EUEN_FPEN, x), true);
print_bool_fragment("SXE", FIELD_GET(CSR_EUEN_LSXEN, x), false);
print_bool_fragment("ASXE", FIELD_GET(CSR_EUEN_LASXEN, x), false);
print_bool_fragment("BTE", FIELD_GET(CSR_EUEN_LBTEN, x), false);
pr_cont(")\n");
}
static void print_ecfg(unsigned long x)
{
printk(" ECFG: %08lx (", x);
print_intr_fragment("LIE", FIELD_GET(CSR_ECFG_IM, x));
pr_cont(" VS=%d)\n", (int) FIELD_GET(CSR_ECFG_VS, x));
}
static const char *humanize_exc_name(unsigned int ecode, unsigned int esubcode)
{
/*
* LoongArch users and developers are probably more familiar with
* those names found in the ISA manual, so we are going to print out
* the latter. This will require some mapping.
*/
switch (ecode) {
case EXCCODE_RSV: return "INT";
case EXCCODE_TLBL: return "PIL";
case EXCCODE_TLBS: return "PIS";
case EXCCODE_TLBI: return "PIF";
case EXCCODE_TLBM: return "PME";
case EXCCODE_TLBNR: return "PNR";
case EXCCODE_TLBNX: return "PNX";
case EXCCODE_TLBPE: return "PPI";
case EXCCODE_ADE:
switch (esubcode) {
case EXSUBCODE_ADEF: return "ADEF";
case EXSUBCODE_ADEM: return "ADEM";
}
break;
case EXCCODE_ALE: return "ALE";
case EXCCODE_BCE: return "BCE";
case EXCCODE_SYS: return "SYS";
case EXCCODE_BP: return "BRK";
case EXCCODE_INE: return "INE";
case EXCCODE_IPE: return "IPE";
case EXCCODE_FPDIS: return "FPD";
case EXCCODE_LSXDIS: return "SXD";
case EXCCODE_LASXDIS: return "ASXD";
case EXCCODE_FPE:
switch (esubcode) {
case EXCSUBCODE_FPE: return "FPE";
case EXCSUBCODE_VFPE: return "VFPE";
}
break;
case EXCCODE_WATCH:
switch (esubcode) {
case EXCSUBCODE_WPEF: return "WPEF";
case EXCSUBCODE_WPEM: return "WPEM";
}
break;
case EXCCODE_BTDIS: return "BTD";
case EXCCODE_BTE: return "BTE";
case EXCCODE_GSPR: return "GSPR";
case EXCCODE_HVC: return "HVC";
case EXCCODE_GCM:
switch (esubcode) {
case EXCSUBCODE_GCSC: return "GCSC";
case EXCSUBCODE_GCHC: return "GCHC";
}
break;
/*
* The manual did not mention the EXCCODE_SE case, but print out it
* nevertheless.
*/
case EXCCODE_SE: return "SE";
}
return "???";
}
static void print_estat(unsigned long x)
{
unsigned int ecode = FIELD_GET(CSR_ESTAT_EXC, x);
unsigned int esubcode = FIELD_GET(CSR_ESTAT_ESUBCODE, x);
printk("ESTAT: %08lx [%s] (", x, humanize_exc_name(ecode, esubcode));
print_intr_fragment("IS", FIELD_GET(CSR_ESTAT_IS, x));
pr_cont(" ECode=%d EsubCode=%d)\n", (int) ecode, (int) esubcode);
}
static void __show_regs(const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned int exccode = FIELD_GET(CSR_ESTAT_EXC, regs->csr_estat);
show_regs_print_info(KERN_DEFAULT);
/* Print saved GPRs except $zero (substituting with PC/ERA) */
#define GPR_FIELD(x) field, regs->regs[x]
printk("pc %0*lx ra %0*lx tp %0*lx sp %0*lx\n",
field, regs->csr_era, GPR_FIELD(1), GPR_FIELD(2), GPR_FIELD(3));
printk("a0 %0*lx a1 %0*lx a2 %0*lx a3 %0*lx\n",
GPR_FIELD(4), GPR_FIELD(5), GPR_FIELD(6), GPR_FIELD(7));
printk("a4 %0*lx a5 %0*lx a6 %0*lx a7 %0*lx\n",
GPR_FIELD(8), GPR_FIELD(9), GPR_FIELD(10), GPR_FIELD(11));
printk("t0 %0*lx t1 %0*lx t2 %0*lx t3 %0*lx\n",
GPR_FIELD(12), GPR_FIELD(13), GPR_FIELD(14), GPR_FIELD(15));
printk("t4 %0*lx t5 %0*lx t6 %0*lx t7 %0*lx\n",
GPR_FIELD(16), GPR_FIELD(17), GPR_FIELD(18), GPR_FIELD(19));
printk("t8 %0*lx u0 %0*lx s9 %0*lx s0 %0*lx\n",
GPR_FIELD(20), GPR_FIELD(21), GPR_FIELD(22), GPR_FIELD(23));
printk("s1 %0*lx s2 %0*lx s3 %0*lx s4 %0*lx\n",
GPR_FIELD(24), GPR_FIELD(25), GPR_FIELD(26), GPR_FIELD(27));
printk("s5 %0*lx s6 %0*lx s7 %0*lx s8 %0*lx\n",
GPR_FIELD(28), GPR_FIELD(29), GPR_FIELD(30), GPR_FIELD(31));
/* The slot for $zero is reused as the syscall restart flag */
if (regs->regs[0])
printk("syscall restart flag: %0*lx\n", GPR_FIELD(0));
if (user_mode(regs)) {
printk(" ra: %0*lx\n", GPR_FIELD(1));
printk(" ERA: %0*lx\n", field, regs->csr_era);
} else {
printk(" ra: %0*lx %pS\n", GPR_FIELD(1), (void *) regs->regs[1]);
printk(" ERA: %0*lx %pS\n", field, regs->csr_era, (void *) regs->csr_era);
}
#undef GPR_FIELD
/* Print saved important CSRs */
print_crmd(regs->csr_crmd);
print_prmd(regs->csr_prmd);
print_euen(regs->csr_euen);
print_ecfg(regs->csr_ecfg);
print_estat(regs->csr_estat);
if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
printk(" BADV: %0*lx\n", field, regs->csr_badvaddr);
printk(" PRID: %08x (%s, %s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
cpu_family_string(), cpu_full_name_string());
}
void show_regs(struct pt_regs *regs)
{
__show_regs((struct pt_regs *)regs);
dump_stack();
}
void show_registers(struct pt_regs *regs)
{
__show_regs(regs);
print_modules();
printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
current->comm, current->pid, current_thread_info(), current);
show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
show_code((void *)regs->csr_era, user_mode(regs));
printk("\n");
}
static DEFINE_RAW_SPINLOCK(die_lock);
void die(const char *str, struct pt_regs *regs)
{
int ret;
static int die_counter;
oops_enter();
ret = notify_die(DIE_OOPS, str, regs, 0,
current->thread.trap_nr, SIGSEGV);
console_verbose();
raw_spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
raw_spin_unlock_irq(&die_lock);
oops_exit();
if (ret == NOTIFY_STOP)
return;
if (regs && kexec_should_crash(current))
crash_kexec(regs);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
make_task_dead(SIGSEGV);
}
static inline void setup_vint_size(unsigned int size)
{
unsigned int vs;
vs = ilog2(size/4);
if (vs == 0 || vs > 7)
panic("vint_size %d Not support yet", vs);
csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
}
/*
* Send SIGFPE according to FCSR Cause bits, which must have already
* been masked against Enable bits. This is impotant as Inexact can
* happen together with Overflow or Underflow, and `ptrace' can set
* any bits.
*/
static void force_fcsr_sig(unsigned long fcsr,
void __user *fault_addr, struct task_struct *tsk)
{
int si_code = FPE_FLTUNK;
if (fcsr & FPU_CSR_INV_X)
si_code = FPE_FLTINV;
else if (fcsr & FPU_CSR_DIV_X)
si_code = FPE_FLTDIV;
else if (fcsr & FPU_CSR_OVF_X)
si_code = FPE_FLTOVF;
else if (fcsr & FPU_CSR_UDF_X)
si_code = FPE_FLTUND;
else if (fcsr & FPU_CSR_INE_X)
si_code = FPE_FLTRES;
force_sig_fault(SIGFPE, si_code, fault_addr);
}
static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
{
int si_code;
switch (sig) {
case 0:
return 0;
case SIGFPE:
force_fcsr_sig(fcsr, fault_addr, current);
return 1;
case SIGBUS:
force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
return 1;
case SIGSEGV:
mmap_read_lock(current->mm);
if (vma_lookup(current->mm, (unsigned long)fault_addr))
si_code = SEGV_ACCERR;
else
si_code = SEGV_MAPERR;
mmap_read_unlock(current->mm);
force_sig_fault(SIGSEGV, si_code, fault_addr);
return 1;
default:
force_sig(sig);
return 1;
}
}
/*
* Delayed fp exceptions when doing a lazy ctx switch
*/
asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
{
int sig;
void __user *fault_addr;
irqentry_state_t state = irqentry_enter(regs);
if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
SIGFPE) == NOTIFY_STOP)
goto out;
/* Clear FCSR.Cause before enabling interrupts */
write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
local_irq_enable();
die_if_kernel("FP exception in kernel code", regs);
sig = SIGFPE;
fault_addr = (void __user *) regs->csr_era;
/* Send a signal if required. */
process_fpemu_return(sig, fault_addr, fcsr);
out:
local_irq_disable();
irqentry_exit(regs, state);
}
asmlinkage void noinstr do_ade(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
die_if_kernel("Kernel ade access", regs);
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
irqentry_exit(regs, state);
}
/* sysctl hooks */
int unaligned_enabled __read_mostly = 1; /* Enabled by default */
int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
asmlinkage void noinstr do_ale(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
#ifndef CONFIG_ARCH_STRICT_ALIGN
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
#else
unsigned int *pc;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
*/
if (regs->csr_badvaddr == regs->csr_era)
goto sigbus;
if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
goto sigbus;
if (!unaligned_enabled)
goto sigbus;
if (!no_unaligned_warning)
show_registers(regs);
pc = (unsigned int *)exception_era(regs);
emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
goto out;
sigbus:
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
out:
#endif
irqentry_exit(regs, state);
}
#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long addr)
{
return 1;
}
#endif /* CONFIG_GENERIC_BUG */
static void bug_handler(struct pt_regs *regs)
{
switch (report_bug(regs->csr_era, regs)) {
case BUG_TRAP_TYPE_BUG:
case BUG_TRAP_TYPE_NONE:
die_if_kernel("Oops - BUG", regs);
force_sig(SIGTRAP);
break;
case BUG_TRAP_TYPE_WARN:
/* Skip the BUG instruction and continue */
regs->csr_era += LOONGARCH_INSN_SIZE;
break;
}
}
asmlinkage void noinstr do_bce(struct pt_regs *regs)
{
bool user = user_mode(regs);
unsigned long era = exception_era(regs);
u64 badv = 0, lower = 0, upper = ULONG_MAX;
union loongarch_instruction insn;
irqentry_state_t state = irqentry_enter(regs);
if (regs->csr_prmd & CSR_PRMD_PIE)
local_irq_enable();
current->thread.trap_nr = read_csr_excode();
die_if_kernel("Bounds check error in kernel code", regs);
/*
* Pull out the address that failed bounds checking, and the lower /
* upper bound, by minimally looking at the faulting instruction word
* and reading from the correct register.
*/
if (__get_inst(&insn.word, (u32 *)era, user))
goto bad_era;
switch (insn.reg3_format.opcode) {
case asrtle_op:
if (insn.reg3_format.rd != 0)
break; /* not asrtle */
badv = regs->regs[insn.reg3_format.rj];
upper = regs->regs[insn.reg3_format.rk];
break;
case asrtgt_op:
if (insn.reg3_format.rd != 0)
break; /* not asrtgt */
badv = regs->regs[insn.reg3_format.rj];
lower = regs->regs[insn.reg3_format.rk];
break;
case ldleb_op:
case ldleh_op:
case ldlew_op:
case ldled_op:
case stleb_op:
case stleh_op:
case stlew_op:
case stled_op:
case fldles_op:
case fldled_op:
case fstles_op:
case fstled_op:
badv = regs->regs[insn.reg3_format.rj];
upper = regs->regs[insn.reg3_format.rk];
break;
case ldgtb_op:
case ldgth_op:
case ldgtw_op:
case ldgtd_op:
case stgtb_op:
case stgth_op:
case stgtw_op:
case stgtd_op:
case fldgts_op:
case fldgtd_op:
case fstgts_op:
case fstgtd_op:
badv = regs->regs[insn.reg3_format.rj];
lower = regs->regs[insn.reg3_format.rk];
break;
}
force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
out:
if (regs->csr_prmd & CSR_PRMD_PIE)
local_irq_disable();
irqentry_exit(regs, state);
return;
bad_era:
/*
* Cannot pull out the instruction word, hence cannot provide more
* info than a regular SIGSEGV in this case.
*/
force_sig(SIGSEGV);
goto out;
}
asmlinkage void noinstr do_bp(struct pt_regs *regs)
{
bool user = user_mode(regs);
unsigned int opcode, bcode;
unsigned long era = exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);
if (regs->csr_prmd & CSR_PRMD_PIE)
local_irq_enable();
if (__get_inst(&opcode, (u32 *)era, user))
goto out_sigsegv;
bcode = (opcode & 0x7fff);
/*
* notify the kprobe handlers, if instruction is likely to
* pertain to them.
*/
switch (bcode) {
case BRK_KDB:
if (kgdb_breakpoint_handler(regs))
goto out;
else
break;
case BRK_KPROBE_BP:
if (kprobe_breakpoint_handler(regs))
goto out;
else
break;
case BRK_KPROBE_SSTEPBP:
if (kprobe_singlestep_handler(regs))
goto out;
else
break;
case BRK_UPROBE_BP:
if (uprobe_breakpoint_handler(regs))
goto out;
else
break;
case BRK_UPROBE_XOLBP:
if (uprobe_singlestep_handler(regs))
goto out;
else
break;
default:
current->thread.trap_nr = read_csr_excode();
if (notify_die(DIE_TRAP, "Break", regs, bcode,
current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
goto out;
else
break;
}
switch (bcode) {
case BRK_BUG:
bug_handler(regs);
break;
case BRK_DIVZERO:
die_if_kernel("Break instruction in kernel code", regs);
force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
break;
case BRK_OVERFLOW:
die_if_kernel("Break instruction in kernel code", regs);
force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
break;
default:
die_if_kernel("Break instruction in kernel code", regs);
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
break;
}
out:
if (regs->csr_prmd & CSR_PRMD_PIE)
local_irq_disable();
irqentry_exit(regs, state);
return;
out_sigsegv:
force_sig(SIGSEGV);
goto out;
}
asmlinkage void noinstr do_watch(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
#ifndef CONFIG_HAVE_HW_BREAKPOINT
pr_warn("Hardware watch point handler not implemented!\n");
#else
if (kgdb_breakpoint_handler(regs))
goto out;
if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
unsigned long pc = instruction_pointer(regs);
union loongarch_instruction *ip = (union loongarch_instruction *)pc;
if (llbit) {
/*
* When the ll-sc combo is encountered, it is regarded as an single
* instruction. So don't clear llbit and reset CSR.FWPS.Skip until
* the llsc execution is completed.
*/
csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL);
goto out;
}
if (pc == current->thread.single_step) {
/*
* Certain insns are occasionally not skipped when CSR.FWPS.Skip is
* set, such as fld.d/fst.d. So singlestep needs to compare whether
* the csr_era is equal to the value of singlestep which last time set.
*/
if (!is_self_loop_ins(ip, regs)) {
/*
* Check if the given instruction the target pc is equal to the
* current pc, If yes, then we should not set the CSR.FWPS.SKIP
* bit to break the original instruction stream.
*/
csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
goto out;
}
}
} else {
breakpoint_handler(regs);
watchpoint_handler(regs);
}
force_sig(SIGTRAP);
out:
#endif
irqentry_exit(regs, state);
}
asmlinkage void noinstr do_ri(struct pt_regs *regs)
{
int status = SIGILL;
unsigned int __maybe_unused opcode;
unsigned int __user *era = (unsigned int __user *)exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);
local_irq_enable();
current->thread.trap_nr = read_csr_excode();
if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
SIGILL) == NOTIFY_STOP)
goto out;
die_if_kernel("Reserved instruction in kernel code", regs);
if (unlikely(get_user(opcode, era) < 0)) {
status = SIGSEGV;
current->thread.error_code = 1;
}
force_sig(status);
out:
local_irq_disable();
irqentry_exit(regs, state);
}
static void init_restore_fp(void)
{
if (!used_math()) {
/* First time FP context user. */
init_fpu();
} else {
/* This task has formerly used the FP context */
if (!is_fpu_owner())
own_fpu_inatomic(1);
}
BUG_ON(!is_fp_enabled());
}
static void init_restore_lsx(void)
{
enable_lsx();
if (!thread_lsx_context_live()) {
/* First time LSX context user */
init_restore_fp();
init_lsx_upper();
set_thread_flag(TIF_LSX_CTX_LIVE);
} else {
if (!is_simd_owner()) {
if (is_fpu_owner()) {
restore_lsx_upper(current);
} else {
__own_fpu();
restore_lsx(current);
}
}
}
set_thread_flag(TIF_USEDSIMD);
BUG_ON(!is_fp_enabled());
BUG_ON(!is_lsx_enabled());
}
static void init_restore_lasx(void)
{
enable_lasx();
if (!thread_lasx_context_live()) {
/* First time LASX context user */
init_restore_lsx();
init_lasx_upper();
set_thread_flag(TIF_LASX_CTX_LIVE);
} else {
if (is_fpu_owner() || is_simd_owner()) {
init_restore_lsx();
restore_lasx_upper(current);
} else {
__own_fpu();
enable_lsx();
restore_lasx(current);
}
}
set_thread_flag(TIF_USEDSIMD);
BUG_ON(!is_fp_enabled());
BUG_ON(!is_lsx_enabled());
BUG_ON(!is_lasx_enabled());
}
asmlinkage void noinstr do_fpu(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
local_irq_enable();
die_if_kernel("do_fpu invoked from kernel context!", regs);
BUG_ON(is_lsx_enabled());
BUG_ON(is_lasx_enabled());
preempt_disable();
init_restore_fp();
preempt_enable();
local_irq_disable();
irqentry_exit(regs, state);
}
asmlinkage void noinstr do_lsx(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
local_irq_enable();
if (!cpu_has_lsx) {
force_sig(SIGILL);
goto out;
}
die_if_kernel("do_lsx invoked from kernel context!", regs);
BUG_ON(is_lasx_enabled());
preempt_disable();
init_restore_lsx();
preempt_enable();
out:
local_irq_disable();
irqentry_exit(regs, state);
}
asmlinkage void noinstr do_lasx(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
local_irq_enable();
if (!cpu_has_lasx) {
force_sig(SIGILL);
goto out;
}
die_if_kernel("do_lasx invoked from kernel context!", regs);
preempt_disable();
init_restore_lasx();
preempt_enable();
out:
local_irq_disable();
irqentry_exit(regs, state);
}
static void init_restore_lbt(void)
{
if (!thread_lbt_context_live()) {
/* First time LBT context user */
init_lbt();
set_thread_flag(TIF_LBT_CTX_LIVE);
} else {
if (!is_lbt_owner())
own_lbt_inatomic(1);
}
BUG_ON(!is_lbt_enabled());
}
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
/*
* BTD (Binary Translation Disable exception) can be triggered
* during FP save/restore if TM (Top Mode) is on, which may
* cause irq_enable during 'switch_to'. To avoid this situation
* (including the user using 'MOVGR2GCSR' to turn on TM, which
* will not trigger the BTE), we need to check PRMD first.
*/
if (regs->csr_prmd & CSR_PRMD_PIE)
local_irq_enable();
if (!cpu_has_lbt) {
force_sig(SIGILL);
goto out;
}
BUG_ON(is_lbt_enabled());
preempt_disable();
init_restore_lbt();
preempt_enable();
out:
if (regs->csr_prmd & CSR_PRMD_PIE)
local_irq_disable();
irqentry_exit(regs, state);
}
asmlinkage void noinstr do_reserved(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
local_irq_enable();
/*
* Game over - no way to handle this if it ever occurs. Most probably
* caused by a fatal error after another hardware/software error.
*/
pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
read_csr_excode(), current->pid, current->comm);
die_if_kernel("do_reserved exception", regs);
force_sig(SIGUNUSED);
local_irq_disable();
irqentry_exit(regs, state);
}
asmlinkage void cache_parity_error(void)
{
/* For the moment, report the problem and hang. */
pr_err("Cache error exception:\n");
pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
panic("Can't handle the cache error!");
}
asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
{
struct pt_regs *old_regs;
irq_enter_rcu();
old_regs = set_irq_regs(regs);
handle_arch_irq(regs);
set_irq_regs(old_regs);
irq_exit_rcu();
}
asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
{
register int cpu;
register unsigned long stack;
irqentry_state_t state = irqentry_enter(regs);
cpu = smp_processor_id();
if (on_irq_stack(cpu, sp))
handle_loongarch_irq(regs);
else {
stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
/* Save task's sp on IRQ stack for unwinding */
*(unsigned long *)stack = sp;
__asm__ __volatile__(
"move $s0, $sp \n" /* Preserve sp */
"move $sp, %[stk] \n" /* Switch stack */
"move $a0, %[regs] \n"
"bl handle_loongarch_irq \n"
"move $sp, $s0 \n" /* Restore sp */
: /* No outputs */
: [stk] "r" (stack), [regs] "r" (regs)
: "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
"$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
"memory");
}
irqentry_exit(regs, state);
}
unsigned long eentry;
unsigned long tlbrentry;
long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
static void configure_exception_vector(void)
{
eentry = (unsigned long)exception_handlers;
tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
csr_write64(eentry, LOONGARCH_CSR_EENTRY);
csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
}
void per_cpu_trap_init(int cpu)
{
unsigned int i;
setup_vint_size(VECSIZE);
configure_exception_vector();
if (!cpu_data[cpu].asid_cache)
cpu_data[cpu].asid_cache = asid_first_version(cpu);
mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
/* Initialise exception handlers */
if (cpu == 0)
for (i = 0; i < 64; i++)
set_handler(i * VECSIZE, handle_reserved, VECSIZE);
tlb_init(cpu);
cpu_cache_init();
}
/* Install CPU exception handler */
void set_handler(unsigned long offset, void *addr, unsigned long size)
{
memcpy((void *)(eentry + offset), addr, size);
local_flush_icache_range(eentry + offset, eentry + offset + size);
}
static const char panic_null_cerr[] =
"Trying to set NULL cache error exception handler\n";
/*
* Install uncached CPU exception handler.
* This is suitable only for the cache error exception which is the only
* exception handler that is being run uncached.
*/
void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
{
unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
if (!addr)
panic(panic_null_cerr);
memcpy((void *)(uncached_eentry + offset), addr, size);
}
void __init trap_init(void)
{
long i;
/* Set interrupt vector handler */
for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
set_handler(i * VECSIZE, handle_vint, VECSIZE);
set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE);
set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
cache_error_setup();
local_flush_icache_range(eentry, eentry + 0x400);
}
| linux-master | arch/loongarch/kernel/traps.c |
// SPDX-License-Identifier: GPL-2.0
/*
* asm-offsets.c: Calculate pt_regs and task_struct offsets.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/kbuild.h>
#include <linux/suspend.h>
#include <asm/cpu-info.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/ftrace.h>
void output_ptreg_defines(void)
{
COMMENT("LoongArch pt_regs offsets.");
OFFSET(PT_R0, pt_regs, regs[0]);
OFFSET(PT_R1, pt_regs, regs[1]);
OFFSET(PT_R2, pt_regs, regs[2]);
OFFSET(PT_R3, pt_regs, regs[3]);
OFFSET(PT_R4, pt_regs, regs[4]);
OFFSET(PT_R5, pt_regs, regs[5]);
OFFSET(PT_R6, pt_regs, regs[6]);
OFFSET(PT_R7, pt_regs, regs[7]);
OFFSET(PT_R8, pt_regs, regs[8]);
OFFSET(PT_R9, pt_regs, regs[9]);
OFFSET(PT_R10, pt_regs, regs[10]);
OFFSET(PT_R11, pt_regs, regs[11]);
OFFSET(PT_R12, pt_regs, regs[12]);
OFFSET(PT_R13, pt_regs, regs[13]);
OFFSET(PT_R14, pt_regs, regs[14]);
OFFSET(PT_R15, pt_regs, regs[15]);
OFFSET(PT_R16, pt_regs, regs[16]);
OFFSET(PT_R17, pt_regs, regs[17]);
OFFSET(PT_R18, pt_regs, regs[18]);
OFFSET(PT_R19, pt_regs, regs[19]);
OFFSET(PT_R20, pt_regs, regs[20]);
OFFSET(PT_R21, pt_regs, regs[21]);
OFFSET(PT_R22, pt_regs, regs[22]);
OFFSET(PT_R23, pt_regs, regs[23]);
OFFSET(PT_R24, pt_regs, regs[24]);
OFFSET(PT_R25, pt_regs, regs[25]);
OFFSET(PT_R26, pt_regs, regs[26]);
OFFSET(PT_R27, pt_regs, regs[27]);
OFFSET(PT_R28, pt_regs, regs[28]);
OFFSET(PT_R29, pt_regs, regs[29]);
OFFSET(PT_R30, pt_regs, regs[30]);
OFFSET(PT_R31, pt_regs, regs[31]);
OFFSET(PT_CRMD, pt_regs, csr_crmd);
OFFSET(PT_PRMD, pt_regs, csr_prmd);
OFFSET(PT_EUEN, pt_regs, csr_euen);
OFFSET(PT_ECFG, pt_regs, csr_ecfg);
OFFSET(PT_ESTAT, pt_regs, csr_estat);
OFFSET(PT_ERA, pt_regs, csr_era);
OFFSET(PT_BVADDR, pt_regs, csr_badvaddr);
OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
DEFINE(PT_SIZE, sizeof(struct pt_regs));
BLANK();
}
void output_task_defines(void)
{
COMMENT("LoongArch task_struct offsets.");
OFFSET(TASK_STATE, task_struct, __state);
OFFSET(TASK_THREAD_INFO, task_struct, stack);
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm);
OFFSET(TASK_PID, task_struct, pid);
#if defined(CONFIG_STACKPROTECTOR)
OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
#endif
DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
BLANK();
}
void output_thread_info_defines(void)
{
COMMENT("LoongArch thread_info offsets.");
OFFSET(TI_TASK, thread_info, task);
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_TP_VALUE, thread_info, tp_value);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
OFFSET(TI_REGS, thread_info, regs);
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
BLANK();
}
void output_thread_defines(void)
{
COMMENT("LoongArch specific thread_struct offsets.");
OFFSET(THREAD_REG01, task_struct, thread.reg01);
OFFSET(THREAD_REG03, task_struct, thread.reg03);
OFFSET(THREAD_REG22, task_struct, thread.reg22);
OFFSET(THREAD_REG23, task_struct, thread.reg23);
OFFSET(THREAD_REG24, task_struct, thread.reg24);
OFFSET(THREAD_REG25, task_struct, thread.reg25);
OFFSET(THREAD_REG26, task_struct, thread.reg26);
OFFSET(THREAD_REG27, task_struct, thread.reg27);
OFFSET(THREAD_REG28, task_struct, thread.reg28);
OFFSET(THREAD_REG29, task_struct, thread.reg29);
OFFSET(THREAD_REG30, task_struct, thread.reg30);
OFFSET(THREAD_REG31, task_struct, thread.reg31);
OFFSET(THREAD_SCHED_RA, task_struct, thread.sched_ra);
OFFSET(THREAD_SCHED_CFA, task_struct, thread.sched_cfa);
OFFSET(THREAD_CSRCRMD, task_struct,
thread.csr_crmd);
OFFSET(THREAD_CSRPRMD, task_struct,
thread.csr_prmd);
OFFSET(THREAD_CSREUEN, task_struct,
thread.csr_euen);
OFFSET(THREAD_CSRECFG, task_struct,
thread.csr_ecfg);
OFFSET(THREAD_FPU, task_struct, thread.fpu);
OFFSET(THREAD_BVADDR, task_struct, \
thread.csr_badvaddr);
OFFSET(THREAD_ECODE, task_struct, \
thread.error_code);
OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
BLANK();
}
void output_thread_fpu_defines(void)
{
OFFSET(THREAD_FPR0, loongarch_fpu, fpr[0]);
OFFSET(THREAD_FPR1, loongarch_fpu, fpr[1]);
OFFSET(THREAD_FPR2, loongarch_fpu, fpr[2]);
OFFSET(THREAD_FPR3, loongarch_fpu, fpr[3]);
OFFSET(THREAD_FPR4, loongarch_fpu, fpr[4]);
OFFSET(THREAD_FPR5, loongarch_fpu, fpr[5]);
OFFSET(THREAD_FPR6, loongarch_fpu, fpr[6]);
OFFSET(THREAD_FPR7, loongarch_fpu, fpr[7]);
OFFSET(THREAD_FPR8, loongarch_fpu, fpr[8]);
OFFSET(THREAD_FPR9, loongarch_fpu, fpr[9]);
OFFSET(THREAD_FPR10, loongarch_fpu, fpr[10]);
OFFSET(THREAD_FPR11, loongarch_fpu, fpr[11]);
OFFSET(THREAD_FPR12, loongarch_fpu, fpr[12]);
OFFSET(THREAD_FPR13, loongarch_fpu, fpr[13]);
OFFSET(THREAD_FPR14, loongarch_fpu, fpr[14]);
OFFSET(THREAD_FPR15, loongarch_fpu, fpr[15]);
OFFSET(THREAD_FPR16, loongarch_fpu, fpr[16]);
OFFSET(THREAD_FPR17, loongarch_fpu, fpr[17]);
OFFSET(THREAD_FPR18, loongarch_fpu, fpr[18]);
OFFSET(THREAD_FPR19, loongarch_fpu, fpr[19]);
OFFSET(THREAD_FPR20, loongarch_fpu, fpr[20]);
OFFSET(THREAD_FPR21, loongarch_fpu, fpr[21]);
OFFSET(THREAD_FPR22, loongarch_fpu, fpr[22]);
OFFSET(THREAD_FPR23, loongarch_fpu, fpr[23]);
OFFSET(THREAD_FPR24, loongarch_fpu, fpr[24]);
OFFSET(THREAD_FPR25, loongarch_fpu, fpr[25]);
OFFSET(THREAD_FPR26, loongarch_fpu, fpr[26]);
OFFSET(THREAD_FPR27, loongarch_fpu, fpr[27]);
OFFSET(THREAD_FPR28, loongarch_fpu, fpr[28]);
OFFSET(THREAD_FPR29, loongarch_fpu, fpr[29]);
OFFSET(THREAD_FPR30, loongarch_fpu, fpr[30]);
OFFSET(THREAD_FPR31, loongarch_fpu, fpr[31]);
OFFSET(THREAD_FCSR, loongarch_fpu, fcsr);
OFFSET(THREAD_FCC, loongarch_fpu, fcc);
OFFSET(THREAD_FTOP, loongarch_fpu, ftop);
BLANK();
}
void output_thread_lbt_defines(void)
{
OFFSET(THREAD_SCR0, loongarch_lbt, scr0);
OFFSET(THREAD_SCR1, loongarch_lbt, scr1);
OFFSET(THREAD_SCR2, loongarch_lbt, scr2);
OFFSET(THREAD_SCR3, loongarch_lbt, scr3);
OFFSET(THREAD_EFLAGS, loongarch_lbt, eflags);
BLANK();
}
void output_mm_defines(void)
{
COMMENT("Size of struct page");
DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
BLANK();
COMMENT("Linux mm_struct offsets.");
OFFSET(MM_USERS, mm_struct, mm_users);
OFFSET(MM_PGD, mm_struct, pgd);
OFFSET(MM_CONTEXT, mm_struct, context);
BLANK();
DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
DEFINE(_PTE_T_SIZE, sizeof(pte_t));
BLANK();
DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
#ifndef __PAGETABLE_PMD_FOLDED
DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
#endif
DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
BLANK();
DEFINE(_PMD_SHIFT, PMD_SHIFT);
DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
BLANK();
DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
BLANK();
DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
DEFINE(_PAGE_SIZE, PAGE_SIZE);
BLANK();
}
void output_sc_defines(void)
{
COMMENT("Linux sigcontext offsets.");
OFFSET(SC_REGS, sigcontext, sc_regs);
OFFSET(SC_PC, sigcontext, sc_pc);
BLANK();
}
void output_signal_defines(void)
{
COMMENT("Linux signal numbers.");
DEFINE(_SIGHUP, SIGHUP);
DEFINE(_SIGINT, SIGINT);
DEFINE(_SIGQUIT, SIGQUIT);
DEFINE(_SIGILL, SIGILL);
DEFINE(_SIGTRAP, SIGTRAP);
DEFINE(_SIGIOT, SIGIOT);
DEFINE(_SIGABRT, SIGABRT);
DEFINE(_SIGFPE, SIGFPE);
DEFINE(_SIGKILL, SIGKILL);
DEFINE(_SIGBUS, SIGBUS);
DEFINE(_SIGSEGV, SIGSEGV);
DEFINE(_SIGSYS, SIGSYS);
DEFINE(_SIGPIPE, SIGPIPE);
DEFINE(_SIGALRM, SIGALRM);
DEFINE(_SIGTERM, SIGTERM);
DEFINE(_SIGUSR1, SIGUSR1);
DEFINE(_SIGUSR2, SIGUSR2);
DEFINE(_SIGCHLD, SIGCHLD);
DEFINE(_SIGPWR, SIGPWR);
DEFINE(_SIGWINCH, SIGWINCH);
DEFINE(_SIGURG, SIGURG);
DEFINE(_SIGIO, SIGIO);
DEFINE(_SIGSTOP, SIGSTOP);
DEFINE(_SIGTSTP, SIGTSTP);
DEFINE(_SIGCONT, SIGCONT);
DEFINE(_SIGTTIN, SIGTTIN);
DEFINE(_SIGTTOU, SIGTTOU);
DEFINE(_SIGVTALRM, SIGVTALRM);
DEFINE(_SIGPROF, SIGPROF);
DEFINE(_SIGXCPU, SIGXCPU);
DEFINE(_SIGXFSZ, SIGXFSZ);
BLANK();
}
#ifdef CONFIG_SMP
void output_smpboot_defines(void)
{
COMMENT("Linux smp cpu boot offsets.");
OFFSET(CPU_BOOT_STACK, secondary_data, stack);
OFFSET(CPU_BOOT_TINFO, secondary_data, thread_info);
BLANK();
}
#endif
#ifdef CONFIG_HIBERNATION
void output_pbe_defines(void)
{
COMMENT("Linux struct pbe offsets.");
OFFSET(PBE_ADDRESS, pbe, address);
OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address);
OFFSET(PBE_NEXT, pbe, next);
DEFINE(PBE_SIZE, sizeof(struct pbe));
BLANK();
}
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void output_fgraph_ret_regs_defines(void)
{
COMMENT("LoongArch fgraph_ret_regs offsets.");
OFFSET(FGRET_REGS_A0, fgraph_ret_regs, regs[0]);
OFFSET(FGRET_REGS_A1, fgraph_ret_regs, regs[1]);
OFFSET(FGRET_REGS_FP, fgraph_ret_regs, fp);
DEFINE(FGRET_REGS_SIZE, sizeof(struct fgraph_ret_regs));
BLANK();
}
#endif
| linux-master | arch/loongarch/kernel/asm-offsets.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/node.h>
#include <linux/nodemask.h>
#include <linux/percpu.h>
#include <asm/bootinfo.h>
#include <acpi/processor.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
#ifdef CONFIG_HOTPLUG_CPU
int arch_register_cpu(int cpu)
{
int ret;
struct cpu *c = &per_cpu(cpu_devices, cpu);
c->hotpluggable = 1;
ret = register_cpu(c, cpu);
if (ret < 0)
pr_warn("register_cpu %d failed (%d)\n", cpu, ret);
return ret;
}
EXPORT_SYMBOL(arch_register_cpu);
void arch_unregister_cpu(int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
c->hotpluggable = 0;
unregister_cpu(c);
}
EXPORT_SYMBOL(arch_unregister_cpu);
#endif
static int __init topology_init(void)
{
int i, ret;
for_each_present_cpu(i) {
struct cpu *c = &per_cpu(cpu_devices, i);
c->hotpluggable = !io_master(i);
ret = register_cpu(c, i);
if (ret < 0)
pr_warn("topology_init: register_cpu %d failed (%d)\n", i, ret);
}
return 0;
}
subsys_initcall(topology_init);
| linux-master | arch/loongarch/kernel/topology.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/efi.h>
#include <linux/initrd.h>
#include <linux/memblock.h>
#include <asm/bootinfo.h>
#include <asm/loongson.h>
#include <asm/sections.h>
void __init memblock_init(void)
{
u32 mem_type;
u64 mem_start, mem_end, mem_size;
efi_memory_desc_t *md;
/* Parse memory information */
for_each_efi_memory_desc(md) {
mem_type = md->type;
mem_start = md->phys_addr;
mem_size = md->num_pages << EFI_PAGE_SHIFT;
mem_end = mem_start + mem_size;
switch (mem_type) {
case EFI_LOADER_CODE:
case EFI_LOADER_DATA:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_PERSISTENT_MEMORY:
case EFI_CONVENTIONAL_MEMORY:
memblock_add(mem_start, mem_size);
if (max_low_pfn < (mem_end >> PAGE_SHIFT))
max_low_pfn = mem_end >> PAGE_SHIFT;
break;
case EFI_PAL_CODE:
case EFI_UNUSABLE_MEMORY:
case EFI_ACPI_RECLAIM_MEMORY:
memblock_add(mem_start, mem_size);
fallthrough;
case EFI_RESERVED_TYPE:
case EFI_RUNTIME_SERVICES_CODE:
case EFI_RUNTIME_SERVICES_DATA:
case EFI_MEMORY_MAPPED_IO:
case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
memblock_reserve(mem_start, mem_size);
break;
}
}
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
/* Reserve the first 2MB */
memblock_reserve(PHYS_OFFSET, 0x200000);
/* Reserve the kernel text/data/bss */
memblock_reserve(__pa_symbol(&_text),
__pa_symbol(&_end) - __pa_symbol(&_text));
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.reserved, 0);
}
| linux-master | arch/loongarch/kernel/mem.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Kernel relocation at boot time
*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/panic_notifier.h>
#include <linux/start_kernel.h>
#include <asm/bootinfo.h>
#include <asm/early_ioremap.h>
#include <asm/inst.h>
#include <asm/sections.h>
#include <asm/setup.h>
#define RELOCATED(x) ((void *)((long)x + reloc_offset))
#define RELOCATED_KASLR(x) ((void *)((long)x + random_offset))
static unsigned long reloc_offset;
static inline void __init relocate_relative(void)
{
Elf64_Rela *rela, *rela_end;
rela = (Elf64_Rela *)&__rela_dyn_begin;
rela_end = (Elf64_Rela *)&__rela_dyn_end;
for ( ; rela < rela_end; rela++) {
Elf64_Addr addr = rela->r_offset;
Elf64_Addr relocated_addr = rela->r_addend;
if (rela->r_info != R_LARCH_RELATIVE)
continue;
if (relocated_addr >= VMLINUX_LOAD_ADDRESS)
relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr);
*(Elf64_Addr *)RELOCATED(addr) = relocated_addr;
}
}
static inline void __init relocate_absolute(long random_offset)
{
void *begin, *end;
struct rela_la_abs *p;
begin = RELOCATED_KASLR(&__la_abs_begin);
end = RELOCATED_KASLR(&__la_abs_end);
for (p = begin; (void *)p < end; p++) {
long v = p->symvalue;
uint32_t lu12iw, ori, lu32id, lu52id;
union loongarch_instruction *insn = (void *)p - p->offset;
lu12iw = (v >> 12) & 0xfffff;
ori = v & 0xfff;
lu32id = (v >> 32) & 0xfffff;
lu52id = v >> 52;
insn[0].reg1i20_format.immediate = lu12iw;
insn[1].reg2i12_format.immediate = ori;
insn[2].reg1i20_format.immediate = lu32id;
insn[3].reg2i12_format.immediate = lu52id;
}
}
#ifdef CONFIG_RANDOMIZE_BASE
static inline __init unsigned long rotate_xor(unsigned long hash,
const void *area, size_t size)
{
size_t i, diff;
const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
diff = (void *)ptr - area;
if (size < diff + sizeof(hash))
return hash;
size = ALIGN_DOWN(size - diff, sizeof(hash));
for (i = 0; i < size / sizeof(hash); i++) {
/* Rotate by odd number of bits and XOR. */
hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
hash ^= ptr[i];
}
return hash;
}
static inline __init unsigned long get_random_boot(void)
{
unsigned long hash = 0;
unsigned long entropy = random_get_entropy();
/* Attempt to create a simple but unpredictable starting entropy. */
hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
/* Add in any runtime entropy we can get */
hash = rotate_xor(hash, &entropy, sizeof(entropy));
return hash;
}
static inline __init bool kaslr_disabled(void)
{
char *str;
const char *builtin_cmdline = CONFIG_CMDLINE;
str = strstr(builtin_cmdline, "nokaslr");
if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
return true;
str = strstr(boot_command_line, "nokaslr");
if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
return true;
return false;
}
/* Choose a new address for the kernel */
static inline void __init *determine_relocation_address(void)
{
unsigned long kernel_length;
unsigned long random_offset;
void *destination = _text;
if (kaslr_disabled())
return destination;
kernel_length = (long)_end - (long)_text;
random_offset = get_random_boot() << 16;
random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
if (random_offset < kernel_length)
random_offset += ALIGN(kernel_length, 0xffff);
return RELOCATED_KASLR(destination);
}
static inline int __init relocation_addr_valid(void *location_new)
{
if ((unsigned long)location_new & 0x00000ffff)
return 0; /* Inappropriately aligned new location */
if ((unsigned long)location_new < (unsigned long)_end)
return 0; /* New location overlaps original kernel */
return 1;
}
#endif
static inline void __init update_reloc_offset(unsigned long *addr, long random_offset)
{
unsigned long *new_addr = (unsigned long *)RELOCATED_KASLR(addr);
*new_addr = (unsigned long)reloc_offset;
}
unsigned long __init relocate_kernel(void)
{
unsigned long kernel_length;
unsigned long random_offset = 0;
void *location_new = _text; /* Default to original kernel start */
char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
#ifdef CONFIG_RANDOMIZE_BASE
location_new = determine_relocation_address();
/* Sanity check relocation address */
if (relocation_addr_valid(location_new))
random_offset = (unsigned long)location_new - (unsigned long)(_text);
#endif
reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
if (random_offset) {
kernel_length = (long)(_end) - (long)(_text);
/* Copy the kernel to it's new location */
memcpy(location_new, _text, kernel_length);
/* Sync the caches ready for execution of new kernel */
__asm__ __volatile__ (
"ibar 0 \t\n"
"dbar 0 \t\n"
::: "memory");
reloc_offset += random_offset;
/* The current thread is now within the relocated kernel */
__current_thread_info = RELOCATED_KASLR(__current_thread_info);
update_reloc_offset(&reloc_offset, random_offset);
}
if (reloc_offset)
relocate_relative();
relocate_absolute(random_offset);
return random_offset;
}
/*
* Show relocation information on panic.
*/
static void show_kernel_relocation(const char *level)
{
if (reloc_offset > 0) {
printk(level);
pr_cont("Kernel relocated by 0x%lx\n", reloc_offset);
pr_cont(" .text @ 0x%px\n", _text);
pr_cont(" .data @ 0x%px\n", _sdata);
pr_cont(" .bss @ 0x%px\n", __bss_start);
}
}
static int kernel_location_notifier_fn(struct notifier_block *self,
unsigned long v, void *p)
{
show_kernel_relocation(KERN_EMERG);
return NOTIFY_DONE;
}
static struct notifier_block kernel_location_notifier = {
.notifier_call = kernel_location_notifier_fn
};
static int __init register_kernel_offset_dumper(void)
{
atomic_notifier_chain_register(&panic_notifier_list,
&kernel_location_notifier);
return 0;
}
arch_initcall(register_kernel_offset_dumper);
| linux-master | arch/loongarch/kernel/relocate.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Author: Hanlu Li <[email protected]>
* Huacai Chen <[email protected]>
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#define pr_fmt(fmt) "kmod: " fmt
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/mm.h>
#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/ftrace.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/alternative.h>
#include <asm/inst.h>
static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
{
if (*rela_stack_top >= RELA_STACK_DEPTH)
return -ENOEXEC;
rela_stack[(*rela_stack_top)++] = stack_value;
pr_debug("%s stack_value = 0x%llx\n", __func__, stack_value);
return 0;
}
static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_stack_top)
{
if (*rela_stack_top == 0)
return -ENOEXEC;
*stack_value = rela_stack[--(*rela_stack_top)];
pr_debug("%s stack_value = 0x%llx\n", __func__, *stack_value);
return 0;
}
static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
return 0;
}
static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
pr_err("%s: Unsupport relocation type %u, please add its support.\n", me->name, type);
return -EINVAL;
}
static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
*location = v;
return 0;
}
static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
*(Elf_Addr *)location = v;
return 0;
}
static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top);
}
static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
return rela_stack_push(v, rela_stack, rela_stack_top);
}
static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
int err = 0;
s64 opr1;
err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
if (err)
return err;
err = rela_stack_push(opr1, rela_stack, rela_stack_top);
if (err)
return err;
err = rela_stack_push(opr1, rela_stack, rela_stack_top);
if (err)
return err;
return 0;
}
static int apply_r_larch_sop_push_plt_pcrel(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
if (offset >= SZ_128M)
v = module_emit_plt_entry(mod, sechdrs, v);
if (offset < -SZ_128M)
v = module_emit_plt_entry(mod, sechdrs, v);
return apply_r_larch_sop_push_pcrel(mod, location, v, rela_stack, rela_stack_top, type);
}
static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
int err = 0;
s64 opr1, opr2, opr3;
if (type == R_LARCH_SOP_IF_ELSE) {
err = rela_stack_pop(&opr3, rela_stack, rela_stack_top);
if (err)
return err;
}
err = rela_stack_pop(&opr2, rela_stack, rela_stack_top);
if (err)
return err;
err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
if (err)
return err;
switch (type) {
case R_LARCH_SOP_AND:
err = rela_stack_push(opr1 & opr2, rela_stack, rela_stack_top);
break;
case R_LARCH_SOP_ADD:
err = rela_stack_push(opr1 + opr2, rela_stack, rela_stack_top);
break;
case R_LARCH_SOP_SUB:
err = rela_stack_push(opr1 - opr2, rela_stack, rela_stack_top);
break;
case R_LARCH_SOP_SL:
err = rela_stack_push(opr1 << opr2, rela_stack, rela_stack_top);
break;
case R_LARCH_SOP_SR:
err = rela_stack_push(opr1 >> opr2, rela_stack, rela_stack_top);
break;
case R_LARCH_SOP_IF_ELSE:
err = rela_stack_push(opr1 ? opr2 : opr3, rela_stack, rela_stack_top);
break;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
}
return err;
}
static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
int err = 0;
s64 opr1;
union loongarch_instruction *insn = (union loongarch_instruction *)location;
err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
if (err)
return err;
switch (type) {
case R_LARCH_SOP_POP_32_U_10_12:
if (!unsigned_imm_check(opr1, 12))
goto overflow;
/* (*(uint32_t *) PC) [21 ... 10] = opr [11 ... 0] */
insn->reg2i12_format.immediate = opr1 & 0xfff;
return 0;
case R_LARCH_SOP_POP_32_S_10_12:
if (!signed_imm_check(opr1, 12))
goto overflow;
insn->reg2i12_format.immediate = opr1 & 0xfff;
return 0;
case R_LARCH_SOP_POP_32_S_10_16:
if (!signed_imm_check(opr1, 16))
goto overflow;
insn->reg2i16_format.immediate = opr1 & 0xffff;
return 0;
case R_LARCH_SOP_POP_32_S_10_16_S2:
if (opr1 % 4)
goto unaligned;
if (!signed_imm_check(opr1, 18))
goto overflow;
insn->reg2i16_format.immediate = (opr1 >> 2) & 0xffff;
return 0;
case R_LARCH_SOP_POP_32_S_5_20:
if (!signed_imm_check(opr1, 20))
goto overflow;
insn->reg1i20_format.immediate = (opr1) & 0xfffff;
return 0;
case R_LARCH_SOP_POP_32_S_0_5_10_16_S2:
if (opr1 % 4)
goto unaligned;
if (!signed_imm_check(opr1, 23))
goto overflow;
opr1 >>= 2;
insn->reg1i21_format.immediate_l = opr1 & 0xffff;
insn->reg1i21_format.immediate_h = (opr1 >> 16) & 0x1f;
return 0;
case R_LARCH_SOP_POP_32_S_0_10_10_16_S2:
if (opr1 % 4)
goto unaligned;
if (!signed_imm_check(opr1, 28))
goto overflow;
opr1 >>= 2;
insn->reg0i26_format.immediate_l = opr1 & 0xffff;
insn->reg0i26_format.immediate_h = (opr1 >> 16) & 0x3ff;
return 0;
case R_LARCH_SOP_POP_32_U:
if (!unsigned_imm_check(opr1, 32))
goto overflow;
/* (*(uint32_t *) PC) = opr */
*location = (u32)opr1;
return 0;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
}
overflow:
pr_err("module %s: opr1 = 0x%llx overflow! dangerous %s (%u) relocation\n",
mod->name, opr1, __func__, type);
return -ENOEXEC;
unaligned:
pr_err("module %s: opr1 = 0x%llx unaligned! dangerous %s (%u) relocation\n",
mod->name, opr1, __func__, type);
return -ENOEXEC;
}
static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
switch (type) {
case R_LARCH_ADD32:
*(s32 *)location += v;
return 0;
case R_LARCH_ADD64:
*(s64 *)location += v;
return 0;
case R_LARCH_SUB32:
*(s32 *)location -= v;
return 0;
case R_LARCH_SUB64:
*(s64 *)location -= v;
return 0;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
}
}
static int apply_r_larch_b26(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
ptrdiff_t offset = (void *)v - (void *)location;
union loongarch_instruction *insn = (union loongarch_instruction *)location;
if (offset >= SZ_128M)
v = module_emit_plt_entry(mod, sechdrs, v);
if (offset < -SZ_128M)
v = module_emit_plt_entry(mod, sechdrs, v);
offset = (void *)v - (void *)location;
if (offset & 3) {
pr_err("module %s: jump offset = 0x%llx unaligned! dangerous R_LARCH_B26 (%u) relocation\n",
mod->name, (long long)offset, type);
return -ENOEXEC;
}
if (!signed_imm_check(offset, 28)) {
pr_err("module %s: jump offset = 0x%llx overflow! dangerous R_LARCH_B26 (%u) relocation\n",
mod->name, (long long)offset, type);
return -ENOEXEC;
}
offset >>= 2;
insn->reg0i26_format.immediate_l = offset & 0xffff;
insn->reg0i26_format.immediate_h = (offset >> 16) & 0x3ff;
return 0;
}
static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
union loongarch_instruction *insn = (union loongarch_instruction *)location;
/* Use s32 for a sign-extension deliberately. */
s32 offset_hi20 = (void *)((v + 0x800) & ~0xfff) -
(void *)((Elf_Addr)location & ~0xfff);
Elf_Addr anchor = (((Elf_Addr)location) & ~0xfff) + offset_hi20;
ptrdiff_t offset_rem = (void *)v - (void *)anchor;
switch (type) {
case R_LARCH_PCALA_LO12:
insn->reg2i12_format.immediate = v & 0xfff;
break;
case R_LARCH_PCALA_HI20:
v = offset_hi20 >> 12;
insn->reg1i20_format.immediate = v & 0xfffff;
break;
case R_LARCH_PCALA64_LO20:
v = offset_rem >> 32;
insn->reg1i20_format.immediate = v & 0xfffff;
break;
case R_LARCH_PCALA64_HI12:
v = offset_rem >> 52;
insn->reg2i12_format.immediate = v & 0xfff;
break;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
}
return 0;
}
static int apply_r_larch_got_pc(struct module *mod,
Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
{
Elf_Addr got = module_emit_got_entry(mod, sechdrs, v);
if (!got)
return -EINVAL;
switch (type) {
case R_LARCH_GOT_PC_LO12:
type = R_LARCH_PCALA_LO12;
break;
case R_LARCH_GOT_PC_HI20:
type = R_LARCH_PCALA_HI20;
break;
default:
pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
return -EINVAL;
}
return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type);
}
/*
* reloc_handlers_rela() - Apply a particular relocation to a module
* @mod: the module to apply the reloc to
* @location: the address at which the reloc is to be applied
* @v: the value of the reloc, with addend for RELA-style
* @rela_stack: the stack used for store relocation info, LOCAL to THIS module
* @rela_stac_top: where the stack operation(pop/push) applies to
*
* Return: 0 upon success, else -ERRNO
*/
typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
s64 *rela_stack, size_t *rela_stack_top, unsigned int type);
/* The handlers for known reloc types */
static reloc_rela_handler reloc_rela_handlers[] = {
[R_LARCH_NONE ... R_LARCH_RELAX] = apply_r_larch_error,
[R_LARCH_NONE] = apply_r_larch_none,
[R_LARCH_32] = apply_r_larch_32,
[R_LARCH_64] = apply_r_larch_64,
[R_LARCH_MARK_LA] = apply_r_larch_none,
[R_LARCH_MARK_PCREL] = apply_r_larch_none,
[R_LARCH_SOP_PUSH_PCREL] = apply_r_larch_sop_push_pcrel,
[R_LARCH_SOP_PUSH_ABSOLUTE] = apply_r_larch_sop_push_absolute,
[R_LARCH_SOP_PUSH_DUP] = apply_r_larch_sop_push_dup,
[R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop,
[R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
[R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub,
[R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala,
};
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *mod)
{
int i, err;
unsigned int type;
s64 rela_stack[RELA_STACK_DEPTH];
size_t rela_stack_top = 0;
reloc_rela_handler handler;
void *location;
Elf_Addr v;
Elf_Sym *sym;
Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
pr_debug("%s: Applying relocate section %u to %u\n", __func__, relsec,
sechdrs[relsec].sh_info);
rela_stack_top = 0;
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[i].r_info);
if (IS_ERR_VALUE(sym->st_value)) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
pr_warn("%s: Unknown symbol %s\n", mod->name, strtab + sym->st_name);
return -ENOENT;
}
type = ELF_R_TYPE(rel[i].r_info);
if (type < ARRAY_SIZE(reloc_rela_handlers))
handler = reloc_rela_handlers[type];
else
handler = NULL;
if (!handler) {
pr_err("%s: Unknown relocation type %u\n", mod->name, type);
return -EINVAL;
}
pr_debug("type %d st_value %llx r_addend %llx loc %llx\n",
(int)ELF_R_TYPE(rel[i].r_info),
sym->st_value, rel[i].r_addend, (u64)location);
v = sym->st_value + rel[i].r_addend;
switch (type) {
case R_LARCH_B26:
err = apply_r_larch_b26(mod, sechdrs, location,
v, rela_stack, &rela_stack_top, type);
break;
case R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12:
err = apply_r_larch_got_pc(mod, sechdrs, location,
v, rela_stack, &rela_stack_top, type);
break;
case R_LARCH_SOP_PUSH_PLT_PCREL:
err = apply_r_larch_sop_push_plt_pcrel(mod, sechdrs, location,
v, rela_stack, &rela_stack_top, type);
break;
default:
err = handler(mod, location, v, rela_stack, &rela_stack_top, type);
}
if (err)
return err;
}
return 0;
}
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
}
static void module_init_ftrace_plt(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *mod)
{
#ifdef CONFIG_DYNAMIC_FTRACE
struct plt_entry *ftrace_plts;
ftrace_plts = (void *)sechdrs->sh_addr;
ftrace_plts[FTRACE_PLT_IDX] = emit_plt_entry(FTRACE_ADDR);
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
ftrace_plts[FTRACE_REGS_PLT_IDX] = emit_plt_entry(FTRACE_REGS_ADDR);
mod->arch.ftrace_trampolines = ftrace_plts;
#endif
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *mod)
{
const Elf_Shdr *s, *se;
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
if (!strcmp(".altinstructions", secstrs + s->sh_name))
apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size);
if (!strcmp(".ftrace_trampoline", secstrs + s->sh_name))
module_init_ftrace_plt(hdr, s, mod);
}
return 0;
}
| linux-master | arch/loongarch/kernel/module.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/ftrace.h>
Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
{
struct mod_section *got_sec = &mod->arch.got;
int i = got_sec->num_entries;
struct got_entry *got = get_got_entry(val, sechdrs, got_sec);
if (got)
return (Elf_Addr)got;
/* There is no GOT entry for val yet, create a new one. */
got = (struct got_entry *)sechdrs[got_sec->shndx].sh_addr;
got[i] = emit_got_entry(val);
got_sec->num_entries++;
if (got_sec->num_entries > got_sec->max_entries) {
/*
* This may happen when the module contains a GOT_HI20 without
* a paired GOT_LO12. Such a module is broken, reject it.
*/
pr_err("%s: module contains bad GOT relocation\n", mod->name);
return 0;
}
return (Elf_Addr)&got[i];
}
Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
{
int nr;
struct mod_section *plt_sec = &mod->arch.plt;
struct mod_section *plt_idx_sec = &mod->arch.plt_idx;
struct plt_entry *plt = get_plt_entry(val, sechdrs, plt_sec, plt_idx_sec);
struct plt_idx_entry *plt_idx;
if (plt)
return (Elf_Addr)plt;
nr = plt_sec->num_entries;
/* There is no duplicate entry, create a new one */
plt = (struct plt_entry *)sechdrs[plt_sec->shndx].sh_addr;
plt[nr] = emit_plt_entry(val);
plt_idx = (struct plt_idx_entry *)sechdrs[plt_idx_sec->shndx].sh_addr;
plt_idx[nr] = emit_plt_idx_entry(val);
plt_sec->num_entries++;
plt_idx_sec->num_entries++;
BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
return (Elf_Addr)&plt[nr];
}
static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
{
return x->r_info == y->r_info && x->r_addend == y->r_addend;
}
static bool duplicate_rela(const Elf_Rela *rela, int idx)
{
int i;
for (i = 0; i < idx; i++) {
if (is_rela_equal(&rela[i], &rela[idx]))
return true;
}
return false;
}
static void count_max_entries(Elf_Rela *relas, int num,
unsigned int *plts, unsigned int *gots)
{
unsigned int i, type;
for (i = 0; i < num; i++) {
type = ELF_R_TYPE(relas[i].r_info);
switch (type) {
case R_LARCH_SOP_PUSH_PLT_PCREL:
case R_LARCH_B26:
if (!duplicate_rela(relas, i))
(*plts)++;
break;
case R_LARCH_GOT_PC_HI20:
if (!duplicate_rela(relas, i))
(*gots)++;
break;
default:
break; /* Do nothing. */
}
}
}
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
unsigned int i, num_plts = 0, num_gots = 0;
Elf_Shdr *got_sec, *plt_sec, *plt_idx_sec, *tramp = NULL;
/*
* Find the empty .plt sections.
*/
for (i = 0; i < ehdr->e_shnum; i++) {
if (!strcmp(secstrings + sechdrs[i].sh_name, ".got"))
mod->arch.got.shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
mod->arch.plt.shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx"))
mod->arch.plt_idx.shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".ftrace_trampoline"))
tramp = sechdrs + i;
}
if (!mod->arch.got.shndx) {
pr_err("%s: module GOT section(s) missing\n", mod->name);
return -ENOEXEC;
}
if (!mod->arch.plt.shndx) {
pr_err("%s: module PLT section(s) missing\n", mod->name);
return -ENOEXEC;
}
if (!mod->arch.plt_idx.shndx) {
pr_err("%s: module PLT.IDX section(s) missing\n", mod->name);
return -ENOEXEC;
}
/* Calculate the maxinum number of entries */
for (i = 0; i < ehdr->e_shnum; i++) {
int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela);
Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
if (sechdrs[i].sh_type != SHT_RELA)
continue;
/* ignore relocations that operate on non-exec sections */
if (!(dst_sec->sh_flags & SHF_EXECINSTR))
continue;
count_max_entries(relas, num_rela, &num_plts, &num_gots);
}
got_sec = sechdrs + mod->arch.got.shndx;
got_sec->sh_type = SHT_NOBITS;
got_sec->sh_flags = SHF_ALLOC;
got_sec->sh_addralign = L1_CACHE_BYTES;
got_sec->sh_size = (num_gots + 1) * sizeof(struct got_entry);
mod->arch.got.num_entries = 0;
mod->arch.got.max_entries = num_gots;
plt_sec = sechdrs + mod->arch.plt.shndx;
plt_sec->sh_type = SHT_NOBITS;
plt_sec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
plt_sec->sh_addralign = L1_CACHE_BYTES;
plt_sec->sh_size = (num_plts + 1) * sizeof(struct plt_entry);
mod->arch.plt.num_entries = 0;
mod->arch.plt.max_entries = num_plts;
plt_idx_sec = sechdrs + mod->arch.plt_idx.shndx;
plt_idx_sec->sh_type = SHT_NOBITS;
plt_idx_sec->sh_flags = SHF_ALLOC;
plt_idx_sec->sh_addralign = L1_CACHE_BYTES;
plt_idx_sec->sh_size = (num_plts + 1) * sizeof(struct plt_idx_entry);
mod->arch.plt_idx.num_entries = 0;
mod->arch.plt_idx.max_entries = num_plts;
if (tramp) {
tramp->sh_type = SHT_NOBITS;
tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
tramp->sh_addralign = __alignof__(struct plt_entry);
tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
}
return 0;
}
| linux-master | arch/loongarch/kernel/module-sections.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author: Huacai Chen <[email protected]>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/binfmts.h>
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/time_namespace.h>
#include <linux/timekeeper_internal.h>
#include <asm/page.h>
#include <asm/vdso.h>
#include <vdso/helpers.h>
#include <vdso/vsyscall.h>
#include <generated/vdso-offsets.h>
extern char vdso_start[], vdso_end[];
/* Kernel-provided data used by the VDSO. */
static union {
u8 page[PAGE_SIZE];
struct vdso_data data[CS_BASES];
} generic_vdso_data __page_aligned_data;
static union {
u8 page[LOONGARCH_VDSO_DATA_SIZE];
struct loongarch_vdso_data vdata;
} loongarch_vdso_data __page_aligned_data;
static struct page *vdso_pages[] = { NULL };
struct vdso_data *vdso_data = generic_vdso_data.data;
struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
current->mm->context.vdso = (void *)(new_vma->vm_start);
return 0;
}
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
unsigned long pfn;
struct page *timens_page = find_timens_vvar_page(vma);
switch (vmf->pgoff) {
case VVAR_GENERIC_PAGE_OFFSET:
if (!timens_page)
pfn = sym_to_pfn(vdso_data);
else
pfn = page_to_pfn(timens_page);
break;
#ifdef CONFIG_TIME_NS
case VVAR_TIMENS_PAGE_OFFSET:
/*
* If a task belongs to a time namespace then a namespace specific
* VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real
* VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset.
* See also the comment near timens_setup_vdso_data().
*/
if (!timens_page)
return VM_FAULT_SIGBUS;
else
pfn = sym_to_pfn(vdso_data);
break;
#endif /* CONFIG_TIME_NS */
case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END:
pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START;
break;
default:
return VM_FAULT_SIGBUS;
}
return vmf_insert_pfn(vma, vmf->address, pfn);
}
struct loongarch_vdso_info vdso_info = {
.vdso = vdso_start,
.size = PAGE_SIZE,
.code_mapping = {
.name = "[vdso]",
.pages = vdso_pages,
.mremap = vdso_mremap,
},
.data_mapping = {
.name = "[vvar]",
.fault = vvar_fault,
},
.offset_sigreturn = vdso_offset_sigreturn,
};
static int __init init_vdso(void)
{
unsigned long i, cpu, pfn;
BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
BUG_ON(!PAGE_ALIGNED(vdso_info.size));
for_each_possible_cpu(cpu)
vdso_pdata[cpu].node = cpu_to_node(cpu);
pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
return 0;
}
subsys_initcall(init_vdso);
#ifdef CONFIG_TIME_NS
struct vdso_data *arch_get_vdso_data(void *vvar_page)
{
return (struct vdso_data *)(vvar_page);
}
/*
* The vvar mapping contains data for a specific time namespace, so when a
* task changes namespace we must unmap its vvar data for the old namespace.
* Subsequent faults will map in data for the new namespace.
*
* For more details see timens_setup_vdso_data().
*/
int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
mmap_read_lock(mm);
for_each_vma(vmi, vma) {
if (vma_is_special_mapping(vma, &vdso_info.data_mapping))
zap_vma_pages(vma);
}
mmap_read_unlock(mm);
return 0;
}
#endif
static unsigned long vdso_base(void)
{
unsigned long base = STACK_TOP;
if (current->flags & PF_RANDOMIZE) {
base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
base = PAGE_ALIGN(base);
}
return base;
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
int ret;
unsigned long size, data_addr, vdso_addr;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct loongarch_vdso_info *info = current->thread.vdso;
if (mmap_write_lock_killable(mm))
return -EINTR;
/*
* Determine total area size. This includes the VDSO data itself
* and the data pages.
*/
size = VVAR_SIZE + info->size;
data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
if (IS_ERR_VALUE(data_addr)) {
ret = data_addr;
goto out;
}
vma = _install_special_mapping(mm, data_addr, VVAR_SIZE,
VM_READ | VM_MAYREAD | VM_PFNMAP,
&info->data_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
}
vdso_addr = data_addr + VVAR_SIZE;
vma = _install_special_mapping(mm, vdso_addr, info->size,
VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
&info->code_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out;
}
mm->context.vdso = (void *)vdso_addr;
ret = 0;
out:
mmap_write_unlock(mm);
return ret;
}
| linux-master | arch/loongarch/kernel/vdso.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <asm/unwind.h>
unsigned long unwind_get_return_address(struct unwind_state *state)
{
return __unwind_get_return_address(state);
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
void unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs)
{
__unwind_start(state, task, regs);
if (!unwind_done(state) && !__kernel_text_address(state->pc))
unwind_next_frame(state);
}
EXPORT_SYMBOL_GPL(unwind_start);
bool unwind_next_frame(struct unwind_state *state)
{
return default_next_frame(state);
}
EXPORT_SYMBOL_GPL(unwind_next_frame);
| linux-master | arch/loongarch/kernel/unwind_guess.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/crash_dump.h>
#include <linux/io.h>
#include <linux/uio.h>
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
size_t csize, unsigned long offset)
{
void *vaddr;
if (!csize)
return 0;
vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
if (!vaddr)
return -ENOMEM;
csize = copy_to_iter(vaddr + offset, csize, iter);
memunmap(vaddr);
return csize;
}
| linux-master | arch/loongarch/kernel/crash_dump.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/mm.h>
#include <linux/module.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>
#include <asm/sections.h>
int __read_mostly alternatives_patched;
EXPORT_SYMBOL_GPL(alternatives_patched);
#define MAX_PATCH_SIZE (((u8)(-1)) / LOONGARCH_INSN_SIZE)
static int __initdata_or_module debug_alternative;
static int __init debug_alt(char *str)
{
debug_alternative = 1;
return 1;
}
__setup("debug-alternative", debug_alt);
#define DPRINTK(fmt, args...) \
do { \
if (debug_alternative) \
printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
} while (0)
#define DUMP_WORDS(buf, count, fmt, args...) \
do { \
if (unlikely(debug_alternative)) { \
int _j; \
union loongarch_instruction *_buf = buf; \
\
if (!(count)) \
break; \
\
printk(KERN_DEBUG fmt, ##args); \
for (_j = 0; _j < count - 1; _j++) \
printk(KERN_CONT "<%08x> ", _buf[_j].word); \
printk(KERN_CONT "<%08x>\n", _buf[_j].word); \
} \
} while (0)
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
static void __init_or_module add_nops(union loongarch_instruction *insn, int count)
{
while (count--) {
insn->word = INSN_NOP;
insn++;
}
}
/* Is the jump addr in local .altinstructions */
static inline bool in_alt_jump(unsigned long jump, void *start, void *end)
{
return jump >= (unsigned long)start && jump < (unsigned long)end;
}
static void __init_or_module recompute_jump(union loongarch_instruction *buf,
union loongarch_instruction *dest, union loongarch_instruction *src,
void *start, void *end)
{
unsigned int si, si_l, si_h;
unsigned long cur_pc, jump_addr, pc;
long offset;
cur_pc = (unsigned long)src;
pc = (unsigned long)dest;
si_l = src->reg0i26_format.immediate_l;
si_h = src->reg0i26_format.immediate_h;
switch (src->reg0i26_format.opcode) {
case b_op:
case bl_op:
jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
if (in_alt_jump(jump_addr, start, end))
return;
offset = jump_addr - pc;
BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
offset >>= 2;
buf->reg0i26_format.immediate_h = offset >> 16;
buf->reg0i26_format.immediate_l = offset;
return;
}
si_l = src->reg1i21_format.immediate_l;
si_h = src->reg1i21_format.immediate_h;
switch (src->reg1i21_format.opcode) {
case bceqz_op: /* bceqz_op = bcnez_op */
BUG_ON(buf->reg1i21_format.rj & BIT(4));
fallthrough;
case beqz_op:
case bnez_op:
jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
if (in_alt_jump(jump_addr, start, end))
return;
offset = jump_addr - pc;
BUG_ON(offset < -SZ_4M || offset >= SZ_4M);
offset >>= 2;
buf->reg1i21_format.immediate_h = offset >> 16;
buf->reg1i21_format.immediate_l = offset;
return;
}
si = src->reg2i16_format.immediate;
switch (src->reg2i16_format.opcode) {
case beq_op:
case bne_op:
case blt_op:
case bge_op:
case bltu_op:
case bgeu_op:
jump_addr = cur_pc + sign_extend64(si << 2, 17);
if (in_alt_jump(jump_addr, start, end))
return;
offset = jump_addr - pc;
BUG_ON(offset < -SZ_128K || offset >= SZ_128K);
offset >>= 2;
buf->reg2i16_format.immediate = offset;
return;
}
}
static int __init_or_module copy_alt_insns(union loongarch_instruction *buf,
union loongarch_instruction *dest, union loongarch_instruction *src, int nr)
{
int i;
for (i = 0; i < nr; i++) {
buf[i].word = src[i].word;
if (is_pc_ins(&src[i])) {
pr_err("Not support pcrel instruction at present!");
return -EINVAL;
}
if (is_branch_ins(&src[i]) &&
src[i].reg2i16_format.opcode != jirl_op) {
recompute_jump(&buf[i], &dest[i], &src[i], src, src + nr);
}
}
return 0;
}
/*
* text_poke_early - Update instructions on a live kernel at boot time
*
* When you use this code to patch more than one byte of an instruction
* you need to make sure that other CPUs cannot execute this code in parallel.
* Also no thread must be currently preempted in the middle of these
* instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch.
*/
static void *__init_or_module text_poke_early(union loongarch_instruction *insn,
union loongarch_instruction *buf, unsigned int nr)
{
int i;
unsigned long flags;
local_irq_save(flags);
for (i = 0; i < nr; i++)
insn[i].word = buf[i].word;
local_irq_restore(flags);
wbflush();
flush_icache_range((unsigned long)insn, (unsigned long)(insn + nr));
return insn;
}
/*
* Replace instructions with better alternatives for this CPU type. This runs
* before SMP is initialized to avoid SMP problems with self modifying code.
* This implies that asymmetric systems where APs have less capabilities than
* the boot processor are not handled. Tough. Make sure you disable such
* features by hand.
*/
void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
struct alt_instr *a;
unsigned int nr_instr, nr_repl, nr_insnbuf;
union loongarch_instruction *instr, *replacement;
union loongarch_instruction insnbuf[MAX_PATCH_SIZE];
DPRINTK("alt table %px, -> %px", start, end);
/*
* The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code.
* Some kernel functions (e.g. memcpy, memset, etc) use this order to
* patch code.
*
* So be careful if you want to change the scan order to any other
* order.
*/
for (a = start; a < end; a++) {
nr_insnbuf = 0;
instr = (void *)&a->instr_offset + a->instr_offset;
replacement = (void *)&a->replace_offset + a->replace_offset;
BUG_ON(a->instrlen > sizeof(insnbuf));
BUG_ON(a->instrlen & 0x3);
BUG_ON(a->replacementlen & 0x3);
nr_instr = a->instrlen / LOONGARCH_INSN_SIZE;
nr_repl = a->replacementlen / LOONGARCH_INSN_SIZE;
if (!cpu_has(a->feature)) {
DPRINTK("feat not exist: %d, old: (%px len: %d), repl: (%px, len: %d)",
a->feature, instr, a->instrlen,
replacement, a->replacementlen);
continue;
}
DPRINTK("feat: %d, old: (%px len: %d), repl: (%px, len: %d)",
a->feature, instr, a->instrlen,
replacement, a->replacementlen);
DUMP_WORDS(instr, nr_instr, "%px: old_insn: ", instr);
DUMP_WORDS(replacement, nr_repl, "%px: rpl_insn: ", replacement);
copy_alt_insns(insnbuf, instr, replacement, nr_repl);
nr_insnbuf = nr_repl;
if (nr_instr > nr_repl) {
add_nops(insnbuf + nr_repl, nr_instr - nr_repl);
nr_insnbuf += nr_instr - nr_repl;
}
DUMP_WORDS(insnbuf, nr_insnbuf, "%px: final_insn: ", instr);
text_poke_early(instr, insnbuf, nr_insnbuf);
}
}
void __init alternative_instructions(void)
{
apply_alternatives(__alt_instructions, __alt_instructions_end);
alternatives_patched = 1;
}
| linux-master | arch/loongarch/kernel/alternative.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Author: Hanlu Li <[email protected]>
* Huacai Chen <[email protected]>
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/capability.h>
#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/linkage.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <asm/asm.h>
#include <asm/exception.h>
#include <asm/signal.h>
#include <asm/switch_to.h>
#include <asm-generic/syscalls.h>
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
prot, unsigned long, flags, unsigned long, fd, off_t, offset)
{
if (offset & ~PAGE_MASK)
return -EINVAL;
return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
}
void *sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
#include <asm/unistd.h>
};
typedef long (*sys_call_fn)(unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long, unsigned long);
void noinstr do_syscall(struct pt_regs *regs)
{
unsigned long nr;
sys_call_fn syscall_fn;
nr = regs->regs[11];
/* Set for syscall restarting */
if (nr < NR_syscalls)
regs->regs[0] = nr + 1;
regs->csr_era += 4;
regs->orig_a0 = regs->regs[4];
regs->regs[4] = -ENOSYS;
nr = syscall_enter_from_user_mode(regs, nr);
if (nr < NR_syscalls) {
syscall_fn = sys_call_table[nr];
regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
regs->regs[7], regs->regs[8], regs->regs[9]);
}
syscall_exit_to_user_mode(regs);
}
| linux-master | arch/loongarch/kernel/syscall.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/pm.h>
#include <linux/types.h>
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/console.h>
#include <acpi/reboot.h>
#include <asm/idle.h>
#include <asm/loongarch.h>
#include <asm/loongson.h>
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
void machine_halt(void)
{
#ifdef CONFIG_SMP
preempt_disable();
smp_send_stop();
#endif
local_irq_disable();
clear_csr_ecfg(ECFG0_IM);
pr_notice("\n\n** You can safely turn off the power now **\n\n");
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
while (true) {
__arch_cpu_idle();
}
}
void machine_power_off(void)
{
#ifdef CONFIG_SMP
preempt_disable();
smp_send_stop();
#endif
#ifdef CONFIG_PM
if (!acpi_disabled)
enable_pci_wakeup();
#endif
do_kernel_power_off();
#ifdef CONFIG_EFI
efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
#endif
while (true) {
__arch_cpu_idle();
}
}
void machine_restart(char *command)
{
#ifdef CONFIG_SMP
preempt_disable();
smp_send_stop();
#endif
do_kernel_restart(command);
#ifdef CONFIG_EFI
if (efi_capsule_pending(NULL))
efi_reboot(REBOOT_WARM, NULL);
else
efi_reboot(REBOOT_COLD, NULL);
#endif
if (!acpi_disabled)
acpi_reboot();
while (true) {
__arch_cpu_idle();
}
}
| linux-master | arch/loongarch/kernel/reset.c |
// SPDX-License-Identifier: GPL-2.0
/*
* EFI initialization
*
* Author: Jianmin Lv <[email protected]>
* Huacai Chen <[email protected]>
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/efi-bgrt.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/kobject.h>
#include <linux/memblock.h>
#include <linux/reboot.h>
#include <linux/screen_info.h>
#include <linux/uaccess.h>
#include <asm/early_ioremap.h>
#include <asm/efi.h>
#include <asm/loongson.h>
static unsigned long efi_nr_tables;
static unsigned long efi_config_table;
static unsigned long __initdata boot_memmap = EFI_INVALID_TABLE_ADDR;
static unsigned long __initdata fdt_pointer = EFI_INVALID_TABLE_ADDR;
static efi_system_table_t *efi_systab;
static efi_config_table_type_t arch_tables[] __initdata = {
{LINUX_EFI_BOOT_MEMMAP_GUID, &boot_memmap, "MEMMAP" },
{DEVICE_TREE_GUID, &fdt_pointer, "FDTPTR" },
{},
};
void __init *efi_fdt_pointer(void)
{
if (!efi_systab)
return NULL;
if (fdt_pointer == EFI_INVALID_TABLE_ADDR)
return NULL;
return early_memremap_ro(fdt_pointer, SZ_64K);
}
void __init efi_runtime_init(void)
{
if (!efi_enabled(EFI_BOOT) || !efi_systab->runtime)
return;
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
return;
}
efi.runtime = (efi_runtime_services_t *)efi_systab->runtime;
efi.runtime_version = (unsigned int)efi.runtime->hdr.revision;
efi_native_runtime_setup();
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
}
unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
static void __init init_screen_info(void)
{
struct screen_info *si;
if (screen_info_table == EFI_INVALID_TABLE_ADDR)
return;
si = early_memremap(screen_info_table, sizeof(*si));
if (!si) {
pr_err("Could not map screen_info config table\n");
return;
}
screen_info = *si;
memset(si, 0, sizeof(*si));
early_memunmap(si, sizeof(*si));
memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
}
void __init efi_init(void)
{
int size;
void *config_tables;
struct efi_boot_memmap *tbl;
if (!efi_system_table)
return;
efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, sizeof(*efi_systab));
if (!efi_systab) {
pr_err("Can't find EFI system table.\n");
return;
}
efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor);
set_bit(EFI_64BIT, &efi.flags);
efi_nr_tables = efi_systab->nr_tables;
efi_config_table = (unsigned long)efi_systab->tables;
size = sizeof(efi_config_table_t);
config_tables = early_memremap(efi_config_table, efi_nr_tables * size);
efi_config_parse_tables(config_tables, efi_systab->nr_tables, arch_tables);
early_memunmap(config_tables, efi_nr_tables * size);
set_bit(EFI_CONFIG_TABLES, &efi.flags);
init_screen_info();
if (boot_memmap == EFI_INVALID_TABLE_ADDR)
return;
tbl = early_memremap_ro(boot_memmap, sizeof(*tbl));
if (tbl) {
struct efi_memory_map_data data;
data.phys_map = boot_memmap + sizeof(*tbl);
data.size = tbl->map_size;
data.desc_size = tbl->desc_size;
data.desc_version = tbl->desc_ver;
if (efi_memmap_init_early(&data) < 0)
panic("Unable to map EFI memory map.\n");
early_memunmap(tbl, sizeof(*tbl));
}
}
| linux-master | arch/loongarch/kernel/efi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic return hook for LoongArch.
*/
#include <linux/kprobes.h>
#include <linux/rethook.h>
#include "rethook.h"
/* This is called from arch_rethook_trampoline() */
unsigned long __used arch_rethook_trampoline_callback(struct pt_regs *regs)
{
return rethook_trampoline_handler(regs, 0);
}
NOKPROBE_SYMBOL(arch_rethook_trampoline_callback);
void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount)
{
rhn->frame = 0;
rhn->ret_addr = regs->regs[1];
/* replace return addr with trampoline */
regs->regs[1] = (unsigned long)arch_rethook_trampoline;
}
NOKPROBE_SYMBOL(arch_rethook_prepare);
/* ASM function that handles the rethook must not be probed itself */
NOKPROBE_SYMBOL(arch_rethook_trampoline);
| linux-master | arch/loongarch/kernel/rethook.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022-2023 Loongson Technology Corporation Limited
*/
#include <linux/kernel.h>
#include <linux/ftrace.h>
#include <asm/unwind.h>
bool default_next_frame(struct unwind_state *state)
{
struct stack_info *info = &state->stack_info;
unsigned long addr;
if (unwind_done(state))
return false;
do {
for (state->sp += sizeof(unsigned long);
state->sp < info->end; state->sp += sizeof(unsigned long)) {
addr = *(unsigned long *)(state->sp);
state->pc = unwind_graph_addr(state, addr, state->sp + 8);
if (__kernel_text_address(state->pc))
return true;
}
state->sp = info->next_sp;
} while (!get_stack_info(state->sp, state->task, info));
state->error = true;
return false;
}
| linux-master | arch/loongarch/kernel/unwind.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1995 Linus Torvalds
* Copyright (C) 1995 Waldorf Electronics
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
* Copyright (C) 1996 Stoned Elipot
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/screen_info.h>
#include <linux/memblock.h>
#include <linux/initrd.h>
#include <linux/ioport.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/pfn.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/device.h>
#include <linux/dma-map-ops.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
#include <linux/of_address.h>
#include <linux/suspend.h>
#include <linux/swiotlb.h>
#include <asm/addrspace.h>
#include <asm/alternative.h>
#include <asm/bootinfo.h>
#include <asm/cache.h>
#include <asm/cpu.h>
#include <asm/dma.h>
#include <asm/efi.h>
#include <asm/loongson.h>
#include <asm/numa.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/time.h>
#define SMBIOS_BIOSSIZE_OFFSET 0x09
#define SMBIOS_BIOSEXTERN_OFFSET 0x13
#define SMBIOS_FREQLOW_OFFSET 0x16
#define SMBIOS_FREQHIGH_OFFSET 0x17
#define SMBIOS_FREQLOW_MASK 0xFF
#define SMBIOS_CORE_PACKAGE_OFFSET 0x23
#define LOONGSON_EFI_ENABLE (1 << 3)
struct screen_info screen_info __section(".data");
unsigned long fw_arg0, fw_arg1, fw_arg2;
DEFINE_PER_CPU(unsigned long, kernelsp);
struct cpuinfo_loongarch cpu_data[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_data);
struct loongson_board_info b_info;
static const char dmi_empty_string[] = " ";
/*
* Setup information
*
* These are initialized so they are in the .data section
*/
char init_command_line[COMMAND_LINE_SIZE] __initdata;
static int num_standard_resources;
static struct resource *standard_resources;
static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };
static struct resource bss_resource = { .name = "Kernel bss", };
const char *get_system_type(void)
{
return "generic-loongson-machine";
}
void __init arch_cpu_finalize_init(void)
{
alternative_instructions();
}
static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
{
const u8 *bp = ((u8 *) dm) + dm->length;
if (s) {
s--;
while (s > 0 && *bp) {
bp += strlen(bp) + 1;
s--;
}
if (*bp != 0) {
size_t len = strlen(bp)+1;
size_t cmp_len = len > 8 ? 8 : len;
if (!memcmp(bp, dmi_empty_string, cmp_len))
return dmi_empty_string;
return bp;
}
}
return "";
}
static void __init parse_cpu_table(const struct dmi_header *dm)
{
long freq_temp = 0;
char *dmi_data = (char *)dm;
freq_temp = ((*(dmi_data + SMBIOS_FREQHIGH_OFFSET) << 8) +
((*(dmi_data + SMBIOS_FREQLOW_OFFSET)) & SMBIOS_FREQLOW_MASK));
cpu_clock_freq = freq_temp * 1000000;
loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET);
pr_info("CpuClock = %llu\n", cpu_clock_freq);
}
static void __init parse_bios_table(const struct dmi_header *dm)
{
char *dmi_data = (char *)dm;
b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6;
}
static void __init find_tokens(const struct dmi_header *dm, void *dummy)
{
switch (dm->type) {
case 0x0: /* Extern BIOS */
parse_bios_table(dm);
break;
case 0x4: /* Calling interface */
parse_cpu_table(dm);
break;
}
}
static void __init smbios_parse(void)
{
b_info.bios_vendor = (void *)dmi_get_system_info(DMI_BIOS_VENDOR);
b_info.bios_version = (void *)dmi_get_system_info(DMI_BIOS_VERSION);
b_info.bios_release_date = (void *)dmi_get_system_info(DMI_BIOS_DATE);
b_info.board_vendor = (void *)dmi_get_system_info(DMI_BOARD_VENDOR);
b_info.board_name = (void *)dmi_get_system_info(DMI_BOARD_NAME);
dmi_walk(find_tokens, NULL);
}
#ifdef CONFIG_ARCH_WRITECOMBINE
pgprot_t pgprot_wc = PAGE_KERNEL_WUC;
#else
pgprot_t pgprot_wc = PAGE_KERNEL_SUC;
#endif
EXPORT_SYMBOL(pgprot_wc);
static int __init setup_writecombine(char *p)
{
if (!strcmp(p, "on"))
pgprot_wc = PAGE_KERNEL_WUC;
else if (!strcmp(p, "off"))
pgprot_wc = PAGE_KERNEL_SUC;
else
pr_warn("Unknown writecombine setting \"%s\".\n", p);
return 0;
}
early_param("writecombine", setup_writecombine);
static int usermem __initdata;
static int __init early_parse_mem(char *p)
{
phys_addr_t start, size;
if (!p) {
pr_err("mem parameter is empty, do nothing\n");
return -EINVAL;
}
/*
* If a user specifies memory size, we
* blow away any automatically generated
* size.
*/
if (usermem == 0) {
usermem = 1;
memblock_remove(memblock_start_of_DRAM(),
memblock_end_of_DRAM() - memblock_start_of_DRAM());
}
start = 0;
size = memparse(p, &p);
if (*p == '@')
start = memparse(p + 1, &p);
else {
pr_err("Invalid format!\n");
return -EINVAL;
}
if (!IS_ENABLED(CONFIG_NUMA))
memblock_add(start, size);
else
memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
return 0;
}
early_param("mem", early_parse_mem);
static void __init arch_reserve_vmcore(void)
{
#ifdef CONFIG_PROC_VMCORE
u64 i;
phys_addr_t start, end;
if (!is_kdump_kernel())
return;
if (!elfcorehdr_size) {
for_each_mem_range(i, &start, &end) {
if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
/*
* Reserve from the elf core header to the end of
* the memory segment, that should all be kdump
* reserved memory.
*/
elfcorehdr_size = end - elfcorehdr_addr;
break;
}
}
}
if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
pr_warn("elfcorehdr is overlapped\n");
return;
}
memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
elfcorehdr_size >> 10, elfcorehdr_addr);
#endif
}
/* 2MB alignment for crash kernel regions */
#define CRASH_ALIGN SZ_2M
#define CRASH_ADDR_MAX SZ_4G
static void __init arch_parse_crashkernel(void)
{
#ifdef CONFIG_KEXEC
int ret;
unsigned long long total_mem;
unsigned long long crash_base, crash_size;
total_mem = memblock_phys_mem_size();
ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
if (ret < 0 || crash_size <= 0)
return;
if (crash_base <= 0) {
crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, CRASH_ALIGN, CRASH_ADDR_MAX);
if (!crash_base) {
pr_warn("crashkernel reservation failed - No suitable area found.\n");
return;
}
} else if (!memblock_phys_alloc_range(crash_size, CRASH_ALIGN, crash_base, crash_base + crash_size)) {
pr_warn("Invalid memory region reserved for crash kernel\n");
return;
}
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
#endif
}
static void __init fdt_setup(void)
{
#ifdef CONFIG_OF_EARLY_FLATTREE
void *fdt_pointer;
/* ACPI-based systems do not require parsing fdt */
if (acpi_os_get_root_pointer())
return;
/* Look for a device tree configuration table entry */
fdt_pointer = efi_fdt_pointer();
if (!fdt_pointer || fdt_check_header(fdt_pointer))
return;
early_init_dt_scan(fdt_pointer);
early_init_fdt_reserve_self();
max_low_pfn = PFN_PHYS(memblock_end_of_DRAM());
#endif
}
static void __init bootcmdline_init(char **cmdline_p)
{
/*
* If CONFIG_CMDLINE_FORCE is enabled then initializing the command line
* is trivial - we simply use the built-in command line unconditionally &
* unmodified.
*/
if (IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
goto out;
}
#ifdef CONFIG_OF_FLATTREE
/*
* If CONFIG_CMDLINE_BOOTLOADER is enabled and we are in FDT-based system,
* the boot_command_line will be overwritten by early_init_dt_scan_chosen().
* So we need to append init_command_line (the original copy of boot_command_line)
* to boot_command_line.
*/
if (initial_boot_params) {
if (boot_command_line[0])
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, init_command_line, COMMAND_LINE_SIZE);
goto out;
}
#endif
/*
* Append built-in command line to the bootloader command line if
* CONFIG_CMDLINE_EXTEND is enabled.
*/
if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) && CONFIG_CMDLINE[0]) {
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
}
/*
* Use built-in command line if the bootloader command line is empty.
*/
if (IS_ENABLED(CONFIG_CMDLINE_BOOTLOADER) && !boot_command_line[0])
strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
out:
*cmdline_p = boot_command_line;
}
void __init platform_init(void)
{
arch_reserve_vmcore();
arch_parse_crashkernel();
#ifdef CONFIG_ACPI_TABLE_UPGRADE
acpi_table_upgrade();
#endif
#ifdef CONFIG_ACPI
acpi_gbl_use_default_register_widths = false;
acpi_boot_table_init();
#endif
unflatten_and_copy_device_tree();
#ifdef CONFIG_NUMA
init_numa_memory();
#endif
dmi_setup();
smbios_parse();
pr_info("The BIOS Version: %s\n", b_info.bios_version);
efi_runtime_init();
}
static void __init check_kernel_sections_mem(void)
{
phys_addr_t start = __pa_symbol(&_text);
phys_addr_t size = __pa_symbol(&_end) - start;
if (!memblock_is_region_memory(start, size)) {
pr_info("Kernel sections are not in the memory maps\n");
memblock_add(start, size);
}
}
/*
* arch_mem_init - initialize memory management subsystem
*/
static void __init arch_mem_init(char **cmdline_p)
{
if (usermem)
pr_info("User-defined physical RAM map overwrite\n");
check_kernel_sections_mem();
early_init_fdt_scan_reserved_mem();
/*
* In order to reduce the possibility of kernel panic when failed to
* get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
* low memory as small as possible before swiotlb_init(), so make
* sparse_init() using top-down allocation.
*/
memblock_set_bottom_up(false);
sparse_init();
memblock_set_bottom_up(true);
swiotlb_init(true, SWIOTLB_VERBOSE);
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
/* Reserve for hibernation. */
register_nosave_region(PFN_DOWN(__pa_symbol(&__nosave_begin)),
PFN_UP(__pa_symbol(&__nosave_end)));
memblock_dump_all();
early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
}
static void __init resource_init(void)
{
long i = 0;
size_t res_size;
struct resource *res;
struct memblock_region *region;
code_resource.start = __pa_symbol(&_text);
code_resource.end = __pa_symbol(&_etext) - 1;
data_resource.start = __pa_symbol(&_etext);
data_resource.end = __pa_symbol(&_edata) - 1;
bss_resource.start = __pa_symbol(&__bss_start);
bss_resource.end = __pa_symbol(&__bss_stop) - 1;
num_standard_resources = memblock.memory.cnt;
res_size = num_standard_resources * sizeof(*standard_resources);
standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
for_each_mem_region(region) {
res = &standard_resources[i++];
if (!memblock_is_nomap(region)) {
res->name = "System RAM";
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
} else {
res->name = "Reserved";
res->flags = IORESOURCE_MEM;
res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
}
request_resource(&iomem_resource, res);
/*
* We don't know which RAM region contains kernel data,
* so we try it repeatedly and let the resource manager
* test it.
*/
request_resource(res, &code_resource);
request_resource(res, &data_resource);
request_resource(res, &bss_resource);
}
#ifdef CONFIG_KEXEC
if (crashk_res.start < crashk_res.end) {
insert_resource(&iomem_resource, &crashk_res);
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
(unsigned long)((crashk_res.end - crashk_res.start + 1) >> 20),
(unsigned long)(crashk_res.start >> 20));
}
#endif
}
static int __init add_legacy_isa_io(struct fwnode_handle *fwnode,
resource_size_t hw_start, resource_size_t size)
{
int ret = 0;
unsigned long vaddr;
struct logic_pio_hwaddr *range;
range = kzalloc(sizeof(*range), GFP_ATOMIC);
if (!range)
return -ENOMEM;
range->fwnode = fwnode;
range->size = size = round_up(size, PAGE_SIZE);
range->hw_start = hw_start;
range->flags = LOGIC_PIO_CPU_MMIO;
ret = logic_pio_register_range(range);
if (ret) {
kfree(range);
return ret;
}
/* Legacy ISA must placed at the start of PCI_IOBASE */
if (range->io_start != 0) {
logic_pio_unregister_range(range);
kfree(range);
return -EINVAL;
}
vaddr = (unsigned long)(PCI_IOBASE + range->io_start);
ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
return 0;
}
static __init int arch_reserve_pio_range(void)
{
struct device_node *np;
for_each_node_by_name(np, "isa") {
struct of_range range;
struct of_range_parser parser;
pr_info("ISA Bridge: %pOF\n", np);
if (of_range_parser_init(&parser, np)) {
pr_info("Failed to parse resources.\n");
of_node_put(np);
break;
}
for_each_of_range(&parser, &range) {
switch (range.flags & IORESOURCE_TYPE_BITS) {
case IORESOURCE_IO:
pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
range.cpu_addr,
range.cpu_addr + range.size - 1,
range.bus_addr);
if (add_legacy_isa_io(&np->fwnode, range.cpu_addr, range.size))
pr_warn("Failed to reserve legacy IO in Logic PIO\n");
break;
case IORESOURCE_MEM:
pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx\n",
range.cpu_addr,
range.cpu_addr + range.size - 1,
range.bus_addr);
break;
}
}
}
return 0;
}
arch_initcall(arch_reserve_pio_range);
static int __init reserve_memblock_reserved_regions(void)
{
u64 i, j;
for (i = 0; i < num_standard_resources; ++i) {
struct resource *mem = &standard_resources[i];
phys_addr_t r_start, r_end, mem_size = resource_size(mem);
if (!memblock_is_region_reserved(mem->start, mem_size))
continue;
for_each_reserved_mem_range(j, &r_start, &r_end) {
resource_size_t start, end;
start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
if (start > mem->end || end < mem->start)
continue;
reserve_region_with_split(mem, start, end, "Reserved");
}
}
return 0;
}
arch_initcall(reserve_memblock_reserved_regions);
#ifdef CONFIG_SMP
static void __init prefill_possible_map(void)
{
int i, possible;
possible = num_processors + disabled_cpus;
if (possible > nr_cpu_ids)
possible = nr_cpu_ids;
pr_info("SMP: Allowing %d CPUs, %d hotplug CPUs\n",
possible, max((possible - num_processors), 0));
for (i = 0; i < possible; i++)
set_cpu_possible(i, true);
for (; i < NR_CPUS; i++)
set_cpu_possible(i, false);
set_nr_cpu_ids(possible);
}
#endif
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
init_environ();
efi_init();
fdt_setup();
memblock_init();
pagetable_init();
bootcmdline_init(cmdline_p);
parse_early_param();
reserve_initrd_mem();
platform_init();
arch_mem_init(cmdline_p);
resource_init();
#ifdef CONFIG_SMP
plat_smp_setup();
prefill_possible_map();
#endif
paging_init();
#ifdef CONFIG_KASAN
kasan_init();
#endif
}
| linux-master | arch/loongarch/kernel/setup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* LoongArch cacheinfo support
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/cacheinfo.h>
#include <linux/topology.h>
#include <asm/bootinfo.h>
#include <asm/cpu-info.h>
int init_cache_level(unsigned int cpu)
{
int cache_present = current_cpu_data.cache_leaves_present;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
this_cpu_ci->num_levels =
current_cpu_data.cache_leaves[cache_present - 1].level;
this_cpu_ci->num_leaves = cache_present;
return 0;
}
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
return (!(*(unsigned char *)(this_leaf->priv) & CACHE_PRIVATE)
&& !(*(unsigned char *)(sib_leaf->priv) & CACHE_PRIVATE));
}
static void cache_cpumap_setup(unsigned int cpu)
{
unsigned int index;
struct cacheinfo *this_leaf, *sib_leaf;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
for (index = 0; index < this_cpu_ci->num_leaves; index++) {
unsigned int i;
this_leaf = this_cpu_ci->info_list + index;
/* skip if shared_cpu_map is already populated */
if (!cpumask_empty(&this_leaf->shared_cpu_map))
continue;
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
for_each_online_cpu(i) {
struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
if (i == cpu || !sib_cpu_ci->info_list ||
(cpu_to_node(i) != cpu_to_node(cpu)))
continue;
sib_leaf = sib_cpu_ci->info_list + index;
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
}
}
}
}
int populate_cache_leaves(unsigned int cpu)
{
int i, cache_present = current_cpu_data.cache_leaves_present;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
struct cache_desc *cd, *cdesc = current_cpu_data.cache_leaves;
for (i = 0; i < cache_present; i++) {
cd = cdesc + i;
this_leaf->type = cd->type;
this_leaf->level = cd->level;
this_leaf->coherency_line_size = cd->linesz;
this_leaf->number_of_sets = cd->sets;
this_leaf->ways_of_associativity = cd->ways;
this_leaf->size = cd->linesz * cd->sets * cd->ways;
this_leaf->priv = &cd->flags;
this_leaf++;
}
cache_cpumap_setup(cpu);
this_cpu_ci->cpu_map_populated = true;
return 0;
}
| linux-master | arch/loongarch/kernel/cacheinfo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/acpi.h>
#include <linux/dma-direct.h>
void acpi_arch_dma_setup(struct device *dev)
{
int ret;
u64 mask, end = 0;
const struct bus_dma_region *map = NULL;
ret = acpi_dma_get_range(dev, &map);
if (!ret && map) {
const struct bus_dma_region *r = map;
for (end = 0; r->size; r++) {
if (r->dma_start + r->size - 1 > end)
end = r->dma_start + r->size - 1;
}
mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end;
dev->dma_range_map = map;
dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
*dev->dma_mask = min(*dev->dma_mask, mask);
}
}
| linux-master | arch/loongarch/kernel/dma.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Handle unaligned accesses by emulation.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2014 Imagination Technologies Ltd.
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/fpu.h>
#include <asm/inst.h>
#include "access-helper.h"
#ifdef CONFIG_DEBUG_FS
static u32 unaligned_instructions_user;
static u32 unaligned_instructions_kernel;
#endif
static inline unsigned long read_fpr(unsigned int idx)
{
#define READ_FPR(idx, __value) \
__asm__ __volatile__("movfr2gr.d %0, $f"#idx"\n\t" : "=r"(__value));
unsigned long __value;
switch (idx) {
case 0:
READ_FPR(0, __value);
break;
case 1:
READ_FPR(1, __value);
break;
case 2:
READ_FPR(2, __value);
break;
case 3:
READ_FPR(3, __value);
break;
case 4:
READ_FPR(4, __value);
break;
case 5:
READ_FPR(5, __value);
break;
case 6:
READ_FPR(6, __value);
break;
case 7:
READ_FPR(7, __value);
break;
case 8:
READ_FPR(8, __value);
break;
case 9:
READ_FPR(9, __value);
break;
case 10:
READ_FPR(10, __value);
break;
case 11:
READ_FPR(11, __value);
break;
case 12:
READ_FPR(12, __value);
break;
case 13:
READ_FPR(13, __value);
break;
case 14:
READ_FPR(14, __value);
break;
case 15:
READ_FPR(15, __value);
break;
case 16:
READ_FPR(16, __value);
break;
case 17:
READ_FPR(17, __value);
break;
case 18:
READ_FPR(18, __value);
break;
case 19:
READ_FPR(19, __value);
break;
case 20:
READ_FPR(20, __value);
break;
case 21:
READ_FPR(21, __value);
break;
case 22:
READ_FPR(22, __value);
break;
case 23:
READ_FPR(23, __value);
break;
case 24:
READ_FPR(24, __value);
break;
case 25:
READ_FPR(25, __value);
break;
case 26:
READ_FPR(26, __value);
break;
case 27:
READ_FPR(27, __value);
break;
case 28:
READ_FPR(28, __value);
break;
case 29:
READ_FPR(29, __value);
break;
case 30:
READ_FPR(30, __value);
break;
case 31:
READ_FPR(31, __value);
break;
default:
panic("unexpected idx '%d'", idx);
}
#undef READ_FPR
return __value;
}
static inline void write_fpr(unsigned int idx, unsigned long value)
{
#define WRITE_FPR(idx, value) \
__asm__ __volatile__("movgr2fr.d $f"#idx", %0\n\t" :: "r"(value));
switch (idx) {
case 0:
WRITE_FPR(0, value);
break;
case 1:
WRITE_FPR(1, value);
break;
case 2:
WRITE_FPR(2, value);
break;
case 3:
WRITE_FPR(3, value);
break;
case 4:
WRITE_FPR(4, value);
break;
case 5:
WRITE_FPR(5, value);
break;
case 6:
WRITE_FPR(6, value);
break;
case 7:
WRITE_FPR(7, value);
break;
case 8:
WRITE_FPR(8, value);
break;
case 9:
WRITE_FPR(9, value);
break;
case 10:
WRITE_FPR(10, value);
break;
case 11:
WRITE_FPR(11, value);
break;
case 12:
WRITE_FPR(12, value);
break;
case 13:
WRITE_FPR(13, value);
break;
case 14:
WRITE_FPR(14, value);
break;
case 15:
WRITE_FPR(15, value);
break;
case 16:
WRITE_FPR(16, value);
break;
case 17:
WRITE_FPR(17, value);
break;
case 18:
WRITE_FPR(18, value);
break;
case 19:
WRITE_FPR(19, value);
break;
case 20:
WRITE_FPR(20, value);
break;
case 21:
WRITE_FPR(21, value);
break;
case 22:
WRITE_FPR(22, value);
break;
case 23:
WRITE_FPR(23, value);
break;
case 24:
WRITE_FPR(24, value);
break;
case 25:
WRITE_FPR(25, value);
break;
case 26:
WRITE_FPR(26, value);
break;
case 27:
WRITE_FPR(27, value);
break;
case 28:
WRITE_FPR(28, value);
break;
case 29:
WRITE_FPR(29, value);
break;
case 30:
WRITE_FPR(30, value);
break;
case 31:
WRITE_FPR(31, value);
break;
default:
panic("unexpected idx '%d'", idx);
}
#undef WRITE_FPR
}
void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int *pc)
{
bool fp = false;
bool sign, write;
bool user = user_mode(regs);
unsigned int res, size = 0;
unsigned long value = 0;
union loongarch_instruction insn;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
__get_inst(&insn.word, pc, user);
switch (insn.reg2i12_format.opcode) {
case ldh_op:
size = 2;
sign = true;
write = false;
break;
case ldhu_op:
size = 2;
sign = false;
write = false;
break;
case sth_op:
size = 2;
sign = true;
write = true;
break;
case ldw_op:
size = 4;
sign = true;
write = false;
break;
case ldwu_op:
size = 4;
sign = false;
write = false;
break;
case stw_op:
size = 4;
sign = true;
write = true;
break;
case ldd_op:
size = 8;
sign = true;
write = false;
break;
case std_op:
size = 8;
sign = true;
write = true;
break;
case flds_op:
size = 4;
fp = true;
sign = true;
write = false;
break;
case fsts_op:
size = 4;
fp = true;
sign = true;
write = true;
break;
case fldd_op:
size = 8;
fp = true;
sign = true;
write = false;
break;
case fstd_op:
size = 8;
fp = true;
sign = true;
write = true;
break;
}
switch (insn.reg2i14_format.opcode) {
case ldptrw_op:
size = 4;
sign = true;
write = false;
break;
case stptrw_op:
size = 4;
sign = true;
write = true;
break;
case ldptrd_op:
size = 8;
sign = true;
write = false;
break;
case stptrd_op:
size = 8;
sign = true;
write = true;
break;
}
switch (insn.reg3_format.opcode) {
case ldxh_op:
size = 2;
sign = true;
write = false;
break;
case ldxhu_op:
size = 2;
sign = false;
write = false;
break;
case stxh_op:
size = 2;
sign = true;
write = true;
break;
case ldxw_op:
size = 4;
sign = true;
write = false;
break;
case ldxwu_op:
size = 4;
sign = false;
write = false;
break;
case stxw_op:
size = 4;
sign = true;
write = true;
break;
case ldxd_op:
size = 8;
sign = true;
write = false;
break;
case stxd_op:
size = 8;
sign = true;
write = true;
break;
case fldxs_op:
size = 4;
fp = true;
sign = true;
write = false;
break;
case fstxs_op:
size = 4;
fp = true;
sign = true;
write = true;
break;
case fldxd_op:
size = 8;
fp = true;
sign = true;
write = false;
break;
case fstxd_op:
size = 8;
fp = true;
sign = true;
write = true;
break;
}
if (!size)
goto sigbus;
if (user && !access_ok(addr, size))
goto sigbus;
if (!write) {
res = unaligned_read(addr, &value, size, sign);
if (res)
goto fault;
/* Rd is the same field in any formats */
if (!fp)
regs->regs[insn.reg3_format.rd] = value;
else {
if (is_fpu_owner())
write_fpr(insn.reg3_format.rd, value);
else
set_fpr64(¤t->thread.fpu.fpr[insn.reg3_format.rd], 0, value);
}
} else {
/* Rd is the same field in any formats */
if (!fp)
value = regs->regs[insn.reg3_format.rd];
else {
if (is_fpu_owner())
value = read_fpr(insn.reg3_format.rd);
else
value = get_fpr64(¤t->thread.fpu.fpr[insn.reg3_format.rd], 0);
}
res = unaligned_write(addr, value, size);
if (res)
goto fault;
}
#ifdef CONFIG_DEBUG_FS
if (user)
unaligned_instructions_user++;
else
unaligned_instructions_kernel++;
#endif
compute_return_era(regs);
return;
fault:
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return;
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGSEGV);
return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGBUS);
return;
}
#ifdef CONFIG_DEBUG_FS
static int __init debugfs_unaligned(void)
{
struct dentry *d;
d = debugfs_create_dir("loongarch", NULL);
debugfs_create_u32("unaligned_instructions_user",
S_IRUGO, d, &unaligned_instructions_user);
debugfs_create_u32("unaligned_instructions_kernel",
S_IRUGO, d, &unaligned_instructions_kernel);
return 0;
}
arch_initcall(debugfs_unaligned);
#endif
| linux-master | arch/loongarch/kernel/unaligned.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author: Huacai Chen <[email protected]>
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <asm/early_ioremap.h>
#include <asm/bootinfo.h>
#include <asm/loongson.h>
#include <asm/setup.h>
u64 efi_system_table;
struct loongson_system_configuration loongson_sysconf;
EXPORT_SYMBOL(loongson_sysconf);
void __init init_environ(void)
{
int efi_boot = fw_arg0;
char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE);
if (efi_boot)
set_bit(EFI_BOOT, &efi.flags);
else
clear_bit(EFI_BOOT, &efi.flags);
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
strscpy(init_command_line, cmdline, COMMAND_LINE_SIZE);
early_memunmap(cmdline, COMMAND_LINE_SIZE);
efi_system_table = fw_arg2;
}
static int __init init_cpu_fullname(void)
{
int cpu;
if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) {
for (cpu = 0; cpu < NR_CPUS; cpu++)
__cpu_full_name[cpu] = loongson_sysconf.cpuname;
}
return 0;
}
arch_initcall(init_cpu_fullname);
static ssize_t boardinfo_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf,
"BIOS Information\n"
"Vendor\t\t\t: %s\n"
"Version\t\t\t: %s\n"
"ROM Size\t\t: %d KB\n"
"Release Date\t\t: %s\n\n"
"Board Information\n"
"Manufacturer\t\t: %s\n"
"Board Name\t\t: %s\n"
"Family\t\t\t: LOONGSON64\n\n",
b_info.bios_vendor, b_info.bios_version,
b_info.bios_size, b_info.bios_release_date,
b_info.board_vendor, b_info.board_name);
}
static struct kobj_attribute boardinfo_attr = __ATTR(boardinfo, 0444,
boardinfo_show, NULL);
static int __init boardinfo_init(void)
{
struct kobject *loongson_kobj;
loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
}
late_initcall(boardinfo_init);
| linux-master | arch/loongarch/kernel/env.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/sizes.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>
static DEFINE_RAW_SPINLOCK(patch_lock);
void simu_pc(struct pt_regs *regs, union loongarch_instruction insn)
{
unsigned long pc = regs->csr_era;
unsigned int rd = insn.reg1i20_format.rd;
unsigned int imm = insn.reg1i20_format.immediate;
if (pc & 3) {
pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
return;
}
switch (insn.reg1i20_format.opcode) {
case pcaddi_op:
regs->regs[rd] = pc + sign_extend64(imm << 2, 21);
break;
case pcaddu12i_op:
regs->regs[rd] = pc + sign_extend64(imm << 12, 31);
break;
case pcaddu18i_op:
regs->regs[rd] = pc + sign_extend64(imm << 18, 37);
break;
case pcalau12i_op:
regs->regs[rd] = pc + sign_extend64(imm << 12, 31);
regs->regs[rd] &= ~((1 << 12) - 1);
break;
default:
pr_info("%s: unknown opcode\n", __func__);
return;
}
regs->csr_era += LOONGARCH_INSN_SIZE;
}
void simu_branch(struct pt_regs *regs, union loongarch_instruction insn)
{
unsigned int imm, imm_l, imm_h, rd, rj;
unsigned long pc = regs->csr_era;
if (pc & 3) {
pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
return;
}
imm_l = insn.reg0i26_format.immediate_l;
imm_h = insn.reg0i26_format.immediate_h;
switch (insn.reg0i26_format.opcode) {
case b_op:
regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 27);
return;
case bl_op:
regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 27);
regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
return;
}
imm_l = insn.reg1i21_format.immediate_l;
imm_h = insn.reg1i21_format.immediate_h;
rj = insn.reg1i21_format.rj;
switch (insn.reg1i21_format.opcode) {
case beqz_op:
if (regs->regs[rj] == 0)
regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 22);
else
regs->csr_era = pc + LOONGARCH_INSN_SIZE;
return;
case bnez_op:
if (regs->regs[rj] != 0)
regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 22);
else
regs->csr_era = pc + LOONGARCH_INSN_SIZE;
return;
}
imm = insn.reg2i16_format.immediate;
rj = insn.reg2i16_format.rj;
rd = insn.reg2i16_format.rd;
switch (insn.reg2i16_format.opcode) {
case beq_op:
if (regs->regs[rj] == regs->regs[rd])
regs->csr_era = pc + sign_extend64(imm << 2, 17);
else
regs->csr_era = pc + LOONGARCH_INSN_SIZE;
break;
case bne_op:
if (regs->regs[rj] != regs->regs[rd])
regs->csr_era = pc + sign_extend64(imm << 2, 17);
else
regs->csr_era = pc + LOONGARCH_INSN_SIZE;
break;
case blt_op:
if ((long)regs->regs[rj] < (long)regs->regs[rd])
regs->csr_era = pc + sign_extend64(imm << 2, 17);
else
regs->csr_era = pc + LOONGARCH_INSN_SIZE;
break;
case bge_op:
if ((long)regs->regs[rj] >= (long)regs->regs[rd])
regs->csr_era = pc + sign_extend64(imm << 2, 17);
else
regs->csr_era = pc + LOONGARCH_INSN_SIZE;
break;
case bltu_op:
if (regs->regs[rj] < regs->regs[rd])
regs->csr_era = pc + sign_extend64(imm << 2, 17);
else
regs->csr_era = pc + LOONGARCH_INSN_SIZE;
break;
case bgeu_op:
if (regs->regs[rj] >= regs->regs[rd])
regs->csr_era = pc + sign_extend64(imm << 2, 17);
else
regs->csr_era = pc + LOONGARCH_INSN_SIZE;
break;
case jirl_op:
regs->csr_era = regs->regs[rj] + sign_extend64(imm << 2, 17);
regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
break;
default:
pr_info("%s: unknown opcode\n", __func__);
return;
}
}
bool insns_not_supported(union loongarch_instruction insn)
{
switch (insn.reg3_format.opcode) {
case amswapw_op ... ammindbdu_op:
pr_notice("atomic memory access instructions are not supported\n");
return true;
}
switch (insn.reg2i14_format.opcode) {
case llw_op:
case lld_op:
case scw_op:
case scd_op:
pr_notice("ll and sc instructions are not supported\n");
return true;
}
switch (insn.reg1i21_format.opcode) {
case bceqz_op:
pr_notice("bceqz and bcnez instructions are not supported\n");
return true;
}
return false;
}
bool insns_need_simulation(union loongarch_instruction insn)
{
if (is_pc_ins(&insn))
return true;
if (is_branch_ins(&insn))
return true;
return false;
}
void arch_simulate_insn(union loongarch_instruction insn, struct pt_regs *regs)
{
if (is_pc_ins(&insn))
simu_pc(regs, insn);
else if (is_branch_ins(&insn))
simu_branch(regs, insn);
}
int larch_insn_read(void *addr, u32 *insnp)
{
int ret;
u32 val;
ret = copy_from_kernel_nofault(&val, addr, LOONGARCH_INSN_SIZE);
if (!ret)
*insnp = val;
return ret;
}
int larch_insn_write(void *addr, u32 insn)
{
int ret;
unsigned long flags = 0;
raw_spin_lock_irqsave(&patch_lock, flags);
ret = copy_to_kernel_nofault(addr, &insn, LOONGARCH_INSN_SIZE);
raw_spin_unlock_irqrestore(&patch_lock, flags);
return ret;
}
int larch_insn_patch_text(void *addr, u32 insn)
{
int ret;
u32 *tp = addr;
if ((unsigned long)tp & 3)
return -EINVAL;
ret = larch_insn_write(tp, insn);
if (!ret)
flush_icache_range((unsigned long)tp,
(unsigned long)tp + LOONGARCH_INSN_SIZE);
return ret;
}
u32 larch_insn_gen_nop(void)
{
return INSN_NOP;
}
u32 larch_insn_gen_b(unsigned long pc, unsigned long dest)
{
long offset = dest - pc;
union loongarch_instruction insn;
if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) {
pr_warn("The generated b instruction is out of range.\n");
return INSN_BREAK;
}
emit_b(&insn, offset >> 2);
return insn.word;
}
u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest)
{
long offset = dest - pc;
union loongarch_instruction insn;
if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) {
pr_warn("The generated bl instruction is out of range.\n");
return INSN_BREAK;
}
emit_bl(&insn, offset >> 2);
return insn.word;
}
u32 larch_insn_gen_break(int imm)
{
union loongarch_instruction insn;
if (imm < 0 || imm >= SZ_32K) {
pr_warn("The generated break instruction is out of range.\n");
return INSN_BREAK;
}
emit_break(&insn, imm);
return insn.word;
}
u32 larch_insn_gen_or(enum loongarch_gpr rd, enum loongarch_gpr rj, enum loongarch_gpr rk)
{
union loongarch_instruction insn;
emit_or(&insn, rd, rj, rk);
return insn.word;
}
u32 larch_insn_gen_move(enum loongarch_gpr rd, enum loongarch_gpr rj)
{
return larch_insn_gen_or(rd, rj, 0);
}
u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm)
{
union loongarch_instruction insn;
if (imm < -SZ_512K || imm >= SZ_512K) {
pr_warn("The generated lu12i.w instruction is out of range.\n");
return INSN_BREAK;
}
emit_lu12iw(&insn, rd, imm);
return insn.word;
}
u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm)
{
union loongarch_instruction insn;
if (imm < -SZ_512K || imm >= SZ_512K) {
pr_warn("The generated lu32i.d instruction is out of range.\n");
return INSN_BREAK;
}
emit_lu32id(&insn, rd, imm);
return insn.word;
}
u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
{
union loongarch_instruction insn;
if (imm < -SZ_2K || imm >= SZ_2K) {
pr_warn("The generated lu52i.d instruction is out of range.\n");
return INSN_BREAK;
}
emit_lu52id(&insn, rd, rj, imm);
return insn.word;
}
u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
{
union loongarch_instruction insn;
if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) {
pr_warn("The generated jirl instruction is out of range.\n");
return INSN_BREAK;
}
emit_jirl(&insn, rj, rd, imm >> 2);
return insn.word;
}
| linux-master | arch/loongarch/kernel/inst.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common time service routines for LoongArch machines.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/sched_clock.h>
#include <linux/spinlock.h>
#include <asm/cpu-features.h>
#include <asm/loongarch.h>
#include <asm/time.h>
u64 cpu_clock_freq;
EXPORT_SYMBOL(cpu_clock_freq);
u64 const_clock_freq;
EXPORT_SYMBOL(const_clock_freq);
static DEFINE_RAW_SPINLOCK(state_lock);
static DEFINE_PER_CPU(struct clock_event_device, constant_clockevent_device);
static void constant_event_handler(struct clock_event_device *dev)
{
}
static irqreturn_t constant_timer_interrupt(int irq, void *data)
{
int cpu = smp_processor_id();
struct clock_event_device *cd;
/* Clear Timer Interrupt */
write_csr_tintclear(CSR_TINTCLR_TI);
cd = &per_cpu(constant_clockevent_device, cpu);
cd->event_handler(cd);
return IRQ_HANDLED;
}
static int constant_set_state_oneshot(struct clock_event_device *evt)
{
unsigned long timer_config;
raw_spin_lock(&state_lock);
timer_config = csr_read64(LOONGARCH_CSR_TCFG);
timer_config |= CSR_TCFG_EN;
timer_config &= ~CSR_TCFG_PERIOD;
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
return 0;
}
static int constant_set_state_oneshot_stopped(struct clock_event_device *evt)
{
unsigned long timer_config;
raw_spin_lock(&state_lock);
timer_config = csr_read64(LOONGARCH_CSR_TCFG);
timer_config &= ~CSR_TCFG_EN;
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
return 0;
}
static int constant_set_state_periodic(struct clock_event_device *evt)
{
unsigned long period;
unsigned long timer_config;
raw_spin_lock(&state_lock);
period = const_clock_freq / HZ;
timer_config = period & CSR_TCFG_VAL;
timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
raw_spin_unlock(&state_lock);
return 0;
}
static int constant_set_state_shutdown(struct clock_event_device *evt)
{
return 0;
}
static int constant_timer_next_event(unsigned long delta, struct clock_event_device *evt)
{
unsigned long timer_config;
delta &= CSR_TCFG_VAL;
timer_config = delta | CSR_TCFG_EN;
csr_write64(timer_config, LOONGARCH_CSR_TCFG);
return 0;
}
static unsigned long __init get_loops_per_jiffy(void)
{
unsigned long lpj = (unsigned long)const_clock_freq;
do_div(lpj, HZ);
return lpj;
}
static long init_offset __nosavedata;
void save_counter(void)
{
init_offset = drdtime();
}
void sync_counter(void)
{
/* Ensure counter begin at 0 */
csr_write64(init_offset, LOONGARCH_CSR_CNTC);
}
static int get_timer_irq(void)
{
struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
if (d)
return irq_create_mapping(d, INT_TI);
return -EINVAL;
}
int constant_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned long min_delta = 0x600;
unsigned long max_delta = (1UL << 48) - 1;
struct clock_event_device *cd;
static int irq = 0, timer_irq_installed = 0;
if (!timer_irq_installed) {
irq = get_timer_irq();
if (irq < 0)
pr_err("Failed to map irq %d (timer)\n", irq);
}
cd = &per_cpu(constant_clockevent_device, cpu);
cd->name = "Constant";
cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_PERCPU;
cd->irq = irq;
cd->rating = 320;
cd->cpumask = cpumask_of(cpu);
cd->set_state_oneshot = constant_set_state_oneshot;
cd->set_state_oneshot_stopped = constant_set_state_oneshot_stopped;
cd->set_state_periodic = constant_set_state_periodic;
cd->set_state_shutdown = constant_set_state_shutdown;
cd->set_next_event = constant_timer_next_event;
cd->event_handler = constant_event_handler;
clockevents_config_and_register(cd, const_clock_freq, min_delta, max_delta);
if (timer_irq_installed)
return 0;
timer_irq_installed = 1;
sync_counter();
if (request_irq(irq, constant_timer_interrupt, IRQF_PERCPU | IRQF_TIMER, "timer", NULL))
pr_err("Failed to request irq %d (timer)\n", irq);
lpj_fine = get_loops_per_jiffy();
pr_info("Constant clock event device register\n");
return 0;
}
static u64 read_const_counter(struct clocksource *clk)
{
return drdtime();
}
static noinstr u64 sched_clock_read(void)
{
return drdtime();
}
static struct clocksource clocksource_const = {
.name = "Constant",
.rating = 400,
.read = read_const_counter,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.vdso_clock_mode = VDSO_CLOCKMODE_CPU,
};
int __init constant_clocksource_init(void)
{
int res;
unsigned long freq = const_clock_freq;
res = clocksource_register_hz(&clocksource_const, freq);
sched_clock_register(sched_clock_read, 64, freq);
pr_info("Constant clock source device register\n");
return res;
}
void __init time_init(void)
{
if (!cpu_has_cpucfg)
const_clock_freq = cpu_clock_freq;
else
const_clock_freq = calc_const_freq();
init_offset = -(drdtime() - csr_read64(LOONGARCH_CSR_CNTC));
constant_clockevent_init();
constant_clocksource_init();
}
| linux-master | arch/loongarch/kernel/time.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* machine_kexec.c for kexec
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/libfdt.h>
#include <linux/mm.h>
#include <linux/of_fdt.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
/* 0x100000 ~ 0x200000 is safe */
#define KEXEC_CONTROL_CODE TO_CACHE(0x100000UL)
#define KEXEC_CMDLINE_ADDR TO_CACHE(0x108000UL)
static unsigned long reboot_code_buffer;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
#ifdef CONFIG_SMP
static void (*relocated_kexec_smp_wait)(void *);
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
#endif
static unsigned long efi_boot;
static unsigned long cmdline_ptr;
static unsigned long systable_ptr;
static unsigned long start_addr;
static unsigned long first_ind_entry;
static void kexec_image_info(const struct kimage *kimage)
{
unsigned long i;
pr_debug("kexec kimage info:\n");
pr_debug("\ttype: %d\n", kimage->type);
pr_debug("\tstart: %lx\n", kimage->start);
pr_debug("\thead: %lx\n", kimage->head);
pr_debug("\tnr_segments: %lu\n", kimage->nr_segments);
for (i = 0; i < kimage->nr_segments; i++) {
pr_debug("\t segment[%lu]: %016lx - %016lx", i,
kimage->segment[i].mem,
kimage->segment[i].mem + kimage->segment[i].memsz);
pr_debug("\t\t0x%lx bytes, %lu pages\n",
(unsigned long)kimage->segment[i].memsz,
(unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
}
}
int machine_kexec_prepare(struct kimage *kimage)
{
int i;
char *bootloader = "kexec";
void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR;
kexec_image_info(kimage);
kimage->arch.efi_boot = fw_arg0;
kimage->arch.systable_ptr = fw_arg2;
/* Find the command line */
for (i = 0; i < kimage->nr_segments; i++) {
if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
break;
}
}
if (!kimage->arch.cmdline_ptr) {
pr_err("Command line not included in the provided image\n");
return -EINVAL;
}
/* kexec/kdump need a safe page to save reboot_code_buffer */
kimage->control_code_page = virt_to_page((void *)KEXEC_CONTROL_CODE);
reboot_code_buffer = (unsigned long)page_address(kimage->control_code_page);
memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size);
#ifdef CONFIG_SMP
/* All secondary cpus now may jump to kexec_smp_wait cycle */
relocated_kexec_smp_wait = reboot_code_buffer + (void *)(kexec_smp_wait - relocate_new_kernel);
#endif
return 0;
}
void machine_kexec_cleanup(struct kimage *kimage)
{
}
void kexec_reboot(void)
{
do_kexec_t do_kexec = NULL;
/*
* We know we were online, and there will be no incoming IPIs at
* this point. Mark online again before rebooting so that the crash
* analysis tool will see us correctly.
*/
set_cpu_online(smp_processor_id(), true);
/* Ensure remote CPUs observe that we're online before rebooting. */
smp_mb__after_atomic();
/*
* Make sure we get correct instructions written by the
* machine_kexec_prepare() CPU.
*/
__asm__ __volatile__ ("\tibar 0\n"::);
#ifdef CONFIG_SMP
/* All secondary cpus go to kexec_smp_wait */
if (smp_processor_id() > 0) {
relocated_kexec_smp_wait(NULL);
unreachable();
}
#endif
do_kexec = (void *)reboot_code_buffer;
do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
unreachable();
}
#ifdef CONFIG_SMP
static void kexec_shutdown_secondary(void *regs)
{
int cpu = smp_processor_id();
if (!cpu_online(cpu))
return;
/* We won't be sent IPIs any more. */
set_cpu_online(cpu, false);
local_irq_disable();
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
kexec_reboot();
}
static void crash_shutdown_secondary(void *passed_regs)
{
int cpu = smp_processor_id();
struct pt_regs *regs = passed_regs;
/*
* If we are passed registers, use those. Otherwise get the
* regs from the last interrupt, which should be correct, as
* we are in an interrupt. But if the regs are not there,
* pull them from the top of the stack. They are probably
* wrong, but we need something to keep from crashing again.
*/
if (!regs)
regs = get_irq_regs();
if (!regs)
regs = task_pt_regs(current);
if (!cpu_online(cpu))
return;
/* We won't be sent IPIs any more. */
set_cpu_online(cpu, false);
local_irq_disable();
if (!cpumask_test_cpu(cpu, &cpus_in_crash))
crash_save_cpu(regs, cpu);
cpumask_set_cpu(cpu, &cpus_in_crash);
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
kexec_reboot();
}
void crash_smp_send_stop(void)
{
unsigned int ncpus;
unsigned long timeout;
static int cpus_stopped;
/*
* This function can be called twice in panic path, but obviously
* we should execute this only once.
*/
if (cpus_stopped)
return;
cpus_stopped = 1;
/* Excluding the panic cpu */
ncpus = num_online_cpus() - 1;
smp_call_function(crash_shutdown_secondary, NULL, 0);
smp_wmb();
/*
* The crash CPU sends an IPI and wait for other CPUs to
* respond. Delay of at least 10 seconds.
*/
timeout = MSEC_PER_SEC * 10;
pr_emerg("Sending IPI to other cpus...\n");
while ((cpumask_weight(&cpus_in_crash) < ncpus) && timeout--) {
mdelay(1);
cpu_relax();
}
}
#endif /* defined(CONFIG_SMP) */
void machine_shutdown(void)
{
int cpu;
/* All CPUs go to reboot_code_buffer */
for_each_possible_cpu(cpu)
if (!cpu_online(cpu))
cpu_device_up(get_cpu_device(cpu));
#ifdef CONFIG_SMP
smp_call_function(kexec_shutdown_secondary, NULL, 0);
#endif
}
void machine_crash_shutdown(struct pt_regs *regs)
{
int crashing_cpu;
local_irq_disable();
crashing_cpu = smp_processor_id();
crash_save_cpu(regs, crashing_cpu);
#ifdef CONFIG_SMP
crash_smp_send_stop();
#endif
cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
pr_info("Starting crashdump kernel...\n");
}
void machine_kexec(struct kimage *image)
{
unsigned long entry, *ptr;
struct kimage_arch *internal = &image->arch;
efi_boot = internal->efi_boot;
cmdline_ptr = internal->cmdline_ptr;
systable_ptr = internal->systable_ptr;
start_addr = (unsigned long)phys_to_virt(image->start);
first_ind_entry = (image->type == KEXEC_TYPE_DEFAULT) ?
(unsigned long)phys_to_virt(image->head & PAGE_MASK) : 0;
/*
* The generic kexec code builds a page list with physical
* addresses. they are directly accessible through XKPRANGE
* hence the phys_to_virt() call.
*/
for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
*ptr & IND_DESTINATION)
*ptr = (unsigned long) phys_to_virt(*ptr);
}
/* Mark offline before disabling local irq. */
set_cpu_online(smp_processor_id(), false);
/* We do not want to be bothered. */
local_irq_disable();
pr_notice("EFI boot flag 0x%lx\n", efi_boot);
pr_notice("Command line at 0x%lx\n", cmdline_ptr);
pr_notice("System table at 0x%lx\n", systable_ptr);
pr_notice("We will call new kernel at 0x%lx\n", start_addr);
pr_notice("Bye ...\n");
/* Make reboot code buffer available to the boot CPU. */
flush_cache_all();
#ifdef CONFIG_SMP
atomic_set(&kexec_ready_to_reboot, 1);
#endif
kexec_reboot();
}
| linux-master | arch/loongarch/kernel/machine_kexec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Linux performance counter support for LoongArch.
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 2010 MIPS Technologies, Inc.
* Copyright (C) 2011 Cavium Networks, Inc.
* Author: Deng-Cheng Zhu
*/
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <linux/sched/task_stack.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static unsigned long
user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp)
{
unsigned long err;
unsigned long __user *user_frame_tail;
struct stack_frame buftail;
user_frame_tail = (unsigned long __user *)(fp - sizeof(struct stack_frame));
/* Also check accessibility of one struct frame_tail beyond */
if (!access_ok(user_frame_tail, sizeof(buftail)))
return 0;
pagefault_disable();
err = __copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail));
pagefault_enable();
if (err || (unsigned long)user_frame_tail >= buftail.fp)
return 0;
perf_callchain_store(entry, buftail.ra);
return buftail.fp;
}
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long fp;
if (perf_guest_state()) {
/* We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->csr_era);
fp = regs->regs[22];
while (entry->nr < entry->max_stack && fp && !((unsigned long)fp & 0xf))
fp = user_backtrace(entry, fp);
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
struct unwind_state state;
unsigned long addr;
for (unwind_start(&state, current, regs);
!unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
if (!addr || perf_callchain_store(entry, addr))
return;
}
}
#define LOONGARCH_MAX_HWEVENTS 32
struct cpu_hw_events {
/* Array of events on this cpu. */
struct perf_event *events[LOONGARCH_MAX_HWEVENTS];
/*
* Set the bit (indexed by the counter number) when the counter
* is used for an event.
*/
unsigned long used_mask[BITS_TO_LONGS(LOONGARCH_MAX_HWEVENTS)];
/*
* Software copy of the control register for each performance counter.
*/
unsigned int saved_ctrl[LOONGARCH_MAX_HWEVENTS];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.saved_ctrl = {0},
};
/* The description of LoongArch performance events. */
struct loongarch_perf_event {
unsigned int event_id;
};
static struct loongarch_perf_event raw_event;
static DEFINE_MUTEX(raw_event_mutex);
#define C(x) PERF_COUNT_HW_CACHE_##x
#define HW_OP_UNSUPPORTED 0xffffffff
#define CACHE_OP_UNSUPPORTED 0xffffffff
#define PERF_MAP_ALL_UNSUPPORTED \
[0 ... PERF_COUNT_HW_MAX - 1] = {HW_OP_UNSUPPORTED}
#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
[0 ... C(MAX) - 1] = { \
[0 ... C(OP_MAX) - 1] = { \
[0 ... C(RESULT_MAX) - 1] = {CACHE_OP_UNSUPPORTED}, \
}, \
}
struct loongarch_pmu {
u64 max_period;
u64 valid_count;
u64 overflow;
const char *name;
unsigned int num_counters;
u64 (*read_counter)(unsigned int idx);
void (*write_counter)(unsigned int idx, u64 val);
const struct loongarch_perf_event *(*map_raw_event)(u64 config);
const struct loongarch_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
const struct loongarch_perf_event (*cache_event_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
};
static struct loongarch_pmu loongarch_pmu;
#define M_PERFCTL_EVENT(event) (event & CSR_PERFCTRL_EVENT)
#define M_PERFCTL_COUNT_EVENT_WHENEVER (CSR_PERFCTRL_PLV0 | \
CSR_PERFCTRL_PLV1 | \
CSR_PERFCTRL_PLV2 | \
CSR_PERFCTRL_PLV3 | \
CSR_PERFCTRL_IE)
#define M_PERFCTL_CONFIG_MASK 0x1f0000
static void pause_local_counters(void);
static void resume_local_counters(void);
static u64 loongarch_pmu_read_counter(unsigned int idx)
{
u64 val = -1;
switch (idx) {
case 0:
val = read_csr_perfcntr0();
break;
case 1:
val = read_csr_perfcntr1();
break;
case 2:
val = read_csr_perfcntr2();
break;
case 3:
val = read_csr_perfcntr3();
break;
default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
return 0;
}
return val;
}
static void loongarch_pmu_write_counter(unsigned int idx, u64 val)
{
switch (idx) {
case 0:
write_csr_perfcntr0(val);
return;
case 1:
write_csr_perfcntr1(val);
return;
case 2:
write_csr_perfcntr2(val);
return;
case 3:
write_csr_perfcntr3(val);
return;
default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
return;
}
}
static unsigned int loongarch_pmu_read_control(unsigned int idx)
{
unsigned int val = -1;
switch (idx) {
case 0:
val = read_csr_perfctrl0();
break;
case 1:
val = read_csr_perfctrl1();
break;
case 2:
val = read_csr_perfctrl2();
break;
case 3:
val = read_csr_perfctrl3();
break;
default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
return 0;
}
return val;
}
static void loongarch_pmu_write_control(unsigned int idx, unsigned int val)
{
switch (idx) {
case 0:
write_csr_perfctrl0(val);
return;
case 1:
write_csr_perfctrl1(val);
return;
case 2:
write_csr_perfctrl2(val);
return;
case 3:
write_csr_perfctrl3(val);
return;
default:
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
return;
}
}
static int loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
{
int i;
for (i = 0; i < loongarch_pmu.num_counters; i++) {
if (!test_and_set_bit(i, cpuc->used_mask))
return i;
}
return -EAGAIN;
}
static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx)
{
unsigned int cpu;
struct perf_event *event = container_of(evt, struct perf_event, hw);
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
/* Make sure interrupt enabled. */
cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base) |
(evt->config_base & M_PERFCTL_CONFIG_MASK) | CSR_PERFCTRL_IE;
cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
/*
* We do not actually let the counter run. Leave it until start().
*/
pr_debug("Enabling perf counter for CPU%d\n", cpu);
}
static void loongarch_pmu_disable_event(int idx)
{
unsigned long flags;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
local_irq_save(flags);
cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) &
~M_PERFCTL_COUNT_EVENT_WHENEVER;
loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
local_irq_restore(flags);
}
static int loongarch_pmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
int ret = 0;
u64 left = local64_read(&hwc->period_left);
u64 period = hwc->sample_period;
if (unlikely((left + period) & (1ULL << 63))) {
/* left underflowed by more than period. */
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
} else if (unlikely((left + period) <= period)) {
/* left underflowed by less than period. */
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > loongarch_pmu.max_period) {
left = loongarch_pmu.max_period;
local64_set(&hwc->period_left, left);
}
local64_set(&hwc->prev_count, loongarch_pmu.overflow - left);
loongarch_pmu.write_counter(idx, loongarch_pmu.overflow - left);
perf_event_update_userpage(event);
return ret;
}
static void loongarch_pmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
u64 delta;
u64 prev_raw_count, new_raw_count;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = loongarch_pmu.read_counter(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = new_raw_count - prev_raw_count;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
}
static void loongarch_pmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
/* Set the period for the event. */
loongarch_pmu_event_set_period(event, hwc, hwc->idx);
/* Enable the event. */
loongarch_pmu_enable_event(hwc, hwc->idx);
}
static void loongarch_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (!(hwc->state & PERF_HES_STOPPED)) {
/* We are working on a local event. */
loongarch_pmu_disable_event(hwc->idx);
barrier();
loongarch_pmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
static int loongarch_pmu_add(struct perf_event *event, int flags)
{
int idx, err = 0;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
perf_pmu_disable(event->pmu);
/* To look for a free counter for this event. */
idx = loongarch_pmu_alloc_counter(cpuc, hwc);
if (idx < 0) {
err = idx;
goto out;
}
/*
* If there is an event in the counter we are going to use then
* make sure it is disabled.
*/
event->hw.idx = idx;
loongarch_pmu_disable_event(idx);
cpuc->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
loongarch_pmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
out:
perf_pmu_enable(event->pmu);
return err;
}
static void loongarch_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
loongarch_pmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
perf_event_update_userpage(event);
}
static void loongarch_pmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/* Don't read disabled counters! */
if (hwc->idx < 0)
return;
loongarch_pmu_event_update(event, hwc, hwc->idx);
}
static void loongarch_pmu_enable(struct pmu *pmu)
{
resume_local_counters();
}
static void loongarch_pmu_disable(struct pmu *pmu)
{
pause_local_counters();
}
static DEFINE_MUTEX(pmu_reserve_mutex);
static atomic_t active_events = ATOMIC_INIT(0);
static int get_pmc_irq(void)
{
struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
if (d)
return irq_create_mapping(d, INT_PCOV);
return -EINVAL;
}
static void reset_counters(void *arg);
static int __hw_perf_event_init(struct perf_event *event);
static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
on_each_cpu(reset_counters, NULL, 1);
free_irq(get_pmc_irq(), &loongarch_pmu);
mutex_unlock(&pmu_reserve_mutex);
}
}
static void handle_associated_event(struct cpu_hw_events *cpuc, int idx,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc = &event->hw;
loongarch_pmu_event_update(event, hwc, idx);
data->period = event->hw.last_period;
if (!loongarch_pmu_event_set_period(event, hwc, idx))
return;
if (perf_event_overflow(event, data, regs))
loongarch_pmu_disable_event(idx);
}
static irqreturn_t pmu_handle_irq(int irq, void *dev)
{
int n;
int handled = IRQ_NONE;
uint64_t counter;
struct pt_regs *regs;
struct perf_sample_data data;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
/*
* First we pause the local counters, so that when we are locked
* here, the counters are all paused. When it gets locked due to
* perf_disable(), the timer interrupt handler will be delayed.
*
* See also loongarch_pmu_start().
*/
pause_local_counters();
regs = get_irq_regs();
perf_sample_data_init(&data, 0, 0);
for (n = 0; n < loongarch_pmu.num_counters; n++) {
if (test_bit(n, cpuc->used_mask)) {
counter = loongarch_pmu.read_counter(n);
if (counter & loongarch_pmu.overflow) {
handle_associated_event(cpuc, n, &data, regs);
handled = IRQ_HANDLED;
}
}
}
resume_local_counters();
/*
* Do all the work for the pending perf events. We can do this
* in here because the performance counter interrupt is a regular
* interrupt, not NMI.
*/
if (handled == IRQ_HANDLED)
irq_work_run();
return handled;
}
static int loongarch_pmu_event_init(struct perf_event *event)
{
int r, irq;
unsigned long flags;
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
/* Init it to avoid false validate_group */
event->hw.event_base = 0xffffffff;
return -ENOENT;
}
if (event->cpu >= 0 && !cpu_online(event->cpu))
return -ENODEV;
irq = get_pmc_irq();
flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED;
if (!atomic_inc_not_zero(&active_events)) {
mutex_lock(&pmu_reserve_mutex);
if (atomic_read(&active_events) == 0) {
r = request_irq(irq, pmu_handle_irq, flags, "Perf_PMU", &loongarch_pmu);
if (r < 0) {
mutex_unlock(&pmu_reserve_mutex);
pr_warn("PMU IRQ request failed\n");
return -ENODEV;
}
}
atomic_inc(&active_events);
mutex_unlock(&pmu_reserve_mutex);
}
return __hw_perf_event_init(event);
}
static struct pmu pmu = {
.pmu_enable = loongarch_pmu_enable,
.pmu_disable = loongarch_pmu_disable,
.event_init = loongarch_pmu_event_init,
.add = loongarch_pmu_add,
.del = loongarch_pmu_del,
.start = loongarch_pmu_start,
.stop = loongarch_pmu_stop,
.read = loongarch_pmu_read,
};
static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev)
{
return M_PERFCTL_EVENT(pev->event_id);
}
static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx)
{
const struct loongarch_perf_event *pev;
pev = &(*loongarch_pmu.general_event_map)[idx];
if (pev->event_id == HW_OP_UNSUPPORTED)
return ERR_PTR(-ENOENT);
return pev;
}
static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 config)
{
unsigned int cache_type, cache_op, cache_result;
const struct loongarch_perf_event *pev;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return ERR_PTR(-EINVAL);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return ERR_PTR(-EINVAL);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return ERR_PTR(-EINVAL);
pev = &((*loongarch_pmu.cache_event_map)
[cache_type]
[cache_op]
[cache_result]);
if (pev->event_id == CACHE_OP_UNSUPPORTED)
return ERR_PTR(-ENOENT);
return pev;
}
static int validate_group(struct perf_event *event)
{
struct cpu_hw_events fake_cpuc;
struct perf_event *sibling, *leader = event->group_leader;
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
return -EINVAL;
for_each_sibling_event(sibling, leader) {
if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
return -EINVAL;
}
if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
return -EINVAL;
return 0;
}
static void reset_counters(void *arg)
{
int n;
int counters = loongarch_pmu.num_counters;
for (n = 0; n < counters; n++) {
loongarch_pmu_write_control(n, 0);
loongarch_pmu.write_counter(n, 0);
}
}
static const struct loongarch_perf_event loongson_event_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00 },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01 },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x08 },
[PERF_COUNT_HW_CACHE_MISSES] = { 0x09 },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02 },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x03 },
};
static const struct loongarch_perf_event loongson_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)] = {
/*
* Like some other architectures (e.g. ARM), the performance
* counters don't differentiate between read and write
* accesses/misses, so this isn't strictly correct, but it's the
* best we can do. Writes and reads get combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x8 },
[C(RESULT_MISS)] = { 0x9 },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x8 },
[C(RESULT_MISS)] = { 0x9 },
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = { 0xaa },
[C(RESULT_MISS)] = { 0xa9 },
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x6 },
[C(RESULT_MISS)] = { 0x7 },
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0xc },
[C(RESULT_MISS)] = { 0xd },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0xc },
[C(RESULT_MISS)] = { 0xd },
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_MISS)] = { 0x3b },
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x4 },
[C(RESULT_MISS)] = { 0x3c },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x4 },
[C(RESULT_MISS)] = { 0x3c },
},
},
[C(BPU)] = {
/* Using the same code for *HW_BRANCH* */
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x02 },
[C(RESULT_MISS)] = { 0x03 },
},
},
};
static int __hw_perf_event_init(struct perf_event *event)
{
int err;
struct hw_perf_event *hwc = &event->hw;
struct perf_event_attr *attr = &event->attr;
const struct loongarch_perf_event *pev;
/* Returning LoongArch event descriptor for generic perf event. */
if (PERF_TYPE_HARDWARE == event->attr.type) {
if (event->attr.config >= PERF_COUNT_HW_MAX)
return -EINVAL;
pev = loongarch_pmu_map_general_event(event->attr.config);
} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
pev = loongarch_pmu_map_cache_event(event->attr.config);
} else if (PERF_TYPE_RAW == event->attr.type) {
/* We are working on the global raw event. */
mutex_lock(&raw_event_mutex);
pev = loongarch_pmu.map_raw_event(event->attr.config);
} else {
/* The event type is not (yet) supported. */
return -EOPNOTSUPP;
}
if (IS_ERR(pev)) {
if (PERF_TYPE_RAW == event->attr.type)
mutex_unlock(&raw_event_mutex);
return PTR_ERR(pev);
}
/*
* We allow max flexibility on how each individual counter shared
* by the single CPU operates (the mode exclusion and the range).
*/
hwc->config_base = CSR_PERFCTRL_IE;
hwc->event_base = loongarch_pmu_perf_event_encode(pev);
if (PERF_TYPE_RAW == event->attr.type)
mutex_unlock(&raw_event_mutex);
if (!attr->exclude_user) {
hwc->config_base |= CSR_PERFCTRL_PLV3;
hwc->config_base |= CSR_PERFCTRL_PLV2;
}
if (!attr->exclude_kernel) {
hwc->config_base |= CSR_PERFCTRL_PLV0;
}
if (!attr->exclude_hv) {
hwc->config_base |= CSR_PERFCTRL_PLV1;
}
hwc->config_base &= M_PERFCTL_CONFIG_MASK;
/*
* The event can belong to another cpu. We do not assign a local
* counter for it for now.
*/
hwc->idx = -1;
hwc->config = 0;
if (!hwc->sample_period) {
hwc->sample_period = loongarch_pmu.max_period;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
err = 0;
if (event->group_leader != event)
err = validate_group(event);
event->destroy = hw_perf_event_destroy;
if (err)
event->destroy(event);
return err;
}
static void pause_local_counters(void)
{
unsigned long flags;
int ctr = loongarch_pmu.num_counters;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
local_irq_save(flags);
do {
ctr--;
cpuc->saved_ctrl[ctr] = loongarch_pmu_read_control(ctr);
loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
~M_PERFCTL_COUNT_EVENT_WHENEVER);
} while (ctr > 0);
local_irq_restore(flags);
}
static void resume_local_counters(void)
{
int ctr = loongarch_pmu.num_counters;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
do {
ctr--;
loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
} while (ctr > 0);
}
static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config)
{
raw_event.event_id = M_PERFCTL_EVENT(config);
return &raw_event;
}
static int __init init_hw_perf_events(void)
{
int counters;
if (!cpu_has_pmp)
return -ENODEV;
pr_info("Performance counters: ");
counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> 4) + 1;
loongarch_pmu.num_counters = counters;
loongarch_pmu.max_period = (1ULL << 63) - 1;
loongarch_pmu.valid_count = (1ULL << 63) - 1;
loongarch_pmu.overflow = 1ULL << 63;
loongarch_pmu.name = "loongarch/loongson64";
loongarch_pmu.read_counter = loongarch_pmu_read_counter;
loongarch_pmu.write_counter = loongarch_pmu_write_counter;
loongarch_pmu.map_raw_event = loongarch_pmu_map_raw_event;
loongarch_pmu.general_event_map = &loongson_event_map;
loongarch_pmu.cache_event_map = &loongson_cache_map;
on_each_cpu(reset_counters, NULL, 1);
pr_cont("%s PMU enabled, %d %d-bit counters available to each CPU.\n",
loongarch_pmu.name, counters, 64);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
early_initcall(init_hw_perf_events);
| linux-master | arch/loongarch/kernel/perf_event.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*
* Based on arch/arm64/kernel/jump_label.c
*/
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <asm/inst.h>
void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type)
{
u32 insn;
void *addr = (void *)jump_entry_code(entry);
if (type == JUMP_LABEL_JMP)
insn = larch_insn_gen_b(jump_entry_code(entry), jump_entry_target(entry));
else
insn = larch_insn_gen_nop();
larch_insn_patch_text(addr, insn);
}
| linux-master | arch/loongarch/kernel/jump_label.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/uprobes.h>
#include <asm/cacheflush.h>
#define UPROBE_TRAP_NR UINT_MAX
int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
struct mm_struct *mm, unsigned long addr)
{
int idx;
union loongarch_instruction insn;
if (addr & 0x3)
return -EILSEQ;
for (idx = ARRAY_SIZE(auprobe->insn) - 1; idx >= 0; idx--) {
insn.word = auprobe->insn[idx];
if (insns_not_supported(insn))
return -EINVAL;
}
if (insns_need_simulation(insn)) {
auprobe->ixol[0] = larch_insn_gen_nop();
auprobe->simulate = true;
} else {
auprobe->ixol[0] = auprobe->insn[0];
auprobe->simulate = false;
}
auprobe->ixol[1] = UPROBE_XOLBP_INSN;
return 0;
}
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
utask->autask.saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
instruction_pointer_set(regs, utask->xol_vaddr);
user_enable_single_step(current);
return 0;
}
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
current->thread.trap_nr = utask->autask.saved_trap_nr;
if (auprobe->simulate)
instruction_pointer_set(regs, auprobe->resume_era);
else
instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
user_disable_single_step(current);
return 0;
}
void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
current->thread.trap_nr = utask->autask.saved_trap_nr;
instruction_pointer_set(regs, utask->vaddr);
user_disable_single_step(current);
}
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
{
if (t->thread.trap_nr != UPROBE_TRAP_NR)
return true;
return false;
}
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
union loongarch_instruction insn;
if (!auprobe->simulate)
return false;
insn.word = auprobe->insn[0];
arch_simulate_insn(insn, regs);
auprobe->resume_era = regs->csr_era;
return true;
}
unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
struct pt_regs *regs)
{
unsigned long ra = regs->regs[1];
regs->regs[1] = trampoline_vaddr;
return ra;
}
bool arch_uretprobe_is_alive(struct return_instance *ret,
enum rp_check ctx, struct pt_regs *regs)
{
if (ctx == RP_CHECK_CHAIN_CALL)
return regs->regs[3] <= ret->stack;
else
return regs->regs[3] < ret->stack;
}
int arch_uprobe_exception_notify(struct notifier_block *self,
unsigned long val, void *data)
{
return NOTIFY_DONE;
}
bool uprobe_breakpoint_handler(struct pt_regs *regs)
{
if (uprobe_pre_sstep_notifier(regs))
return true;
return false;
}
bool uprobe_singlestep_handler(struct pt_regs *regs)
{
if (uprobe_post_sstep_notifier(regs))
return true;
return false;
}
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
{
return instruction_pointer(regs);
}
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len)
{
void *kaddr = kmap_local_page(page);
void *dst = kaddr + (vaddr & ~PAGE_MASK);
memcpy(dst, src, len);
flush_icache_range((unsigned long)dst, (unsigned long)dst + len);
kunmap_local(kaddr);
}
| linux-master | arch/loongarch/kernel/uprobes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author: Xiang Gao <[email protected]>
* Huacai Chen <[email protected]>
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/swap.h>
#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/irq.h>
#include <linux/pci.h>
#include <asm/bootinfo.h>
#include <asm/loongson.h>
#include <asm/numa.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/time.h>
int numa_off;
struct pglist_data *node_data[MAX_NUMNODES];
unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
EXPORT_SYMBOL(node_data);
EXPORT_SYMBOL(node_distances);
static struct numa_meminfo numa_meminfo;
cpumask_t cpus_on_node[MAX_NUMNODES];
cpumask_t phys_cpus_on_node[MAX_NUMNODES];
EXPORT_SYMBOL(cpus_on_node);
/*
* apicid, cpu, node mappings
*/
s16 __cpuid_to_node[CONFIG_NR_CPUS] = {
[0 ... CONFIG_NR_CPUS - 1] = NUMA_NO_NODE
};
EXPORT_SYMBOL(__cpuid_to_node);
nodemask_t numa_nodes_parsed __initdata;
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
static int __init pcpu_cpu_to_node(int cpu)
{
return early_cpu_to_node(cpu);
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
}
void __init pcpu_populate_pte(unsigned long addr)
{
populate_kernel_pte(addr);
}
void __init setup_per_cpu_areas(void)
{
unsigned long delta;
unsigned int cpu;
int rc = -EINVAL;
if (pcpu_chosen_fc == PCPU_FC_AUTO) {
if (nr_node_ids >= 8)
pcpu_chosen_fc = PCPU_FC_PAGE;
else
pcpu_chosen_fc = PCPU_FC_EMBED;
}
/*
* Always reserve area for module percpu variables. That's
* what the legacy allocator did.
*/
if (pcpu_chosen_fc != PCPU_FC_PAGE) {
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, PMD_SIZE,
pcpu_cpu_distance, pcpu_cpu_to_node);
if (rc < 0)
pr_warn("%s allocator failed (%d), falling back to page size\n",
pcpu_fc_names[pcpu_chosen_fc], rc);
}
if (rc < 0)
rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_cpu_to_node);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu)
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
}
#endif
/*
* Get nodeid by logical cpu number.
* __cpuid_to_node maps phyical cpu id to node, so we
* should use cpu_logical_map(cpu) to index it.
*
* This routine is only used in early phase during
* booting, after setup_per_cpu_areas calling and numa_node
* initialization, cpu_to_node will be used instead.
*/
int early_cpu_to_node(int cpu)
{
int physid = cpu_logical_map(cpu);
if (physid < 0)
return NUMA_NO_NODE;
return __cpuid_to_node[physid];
}
void __init early_numa_add_cpu(int cpuid, s16 node)
{
int cpu = __cpu_number_map[cpuid];
if (cpu < 0)
return;
cpumask_set_cpu(cpu, &cpus_on_node[node]);
cpumask_set_cpu(cpuid, &phys_cpus_on_node[node]);
}
void numa_add_cpu(unsigned int cpu)
{
int nid = cpu_to_node(cpu);
cpumask_set_cpu(cpu, &cpus_on_node[nid]);
}
void numa_remove_cpu(unsigned int cpu)
{
int nid = cpu_to_node(cpu);
cpumask_clear_cpu(cpu, &cpus_on_node[nid]);
}
static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
struct numa_meminfo *mi)
{
/* ignore zero length blks */
if (start == end)
return 0;
/* whine about and ignore invalid blks */
if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
nid, start, end - 1);
return 0;
}
if (mi->nr_blks >= NR_NODE_MEMBLKS) {
pr_err("NUMA: too many memblk ranges\n");
return -EINVAL;
}
mi->blk[mi->nr_blks].start = PFN_ALIGN(start);
mi->blk[mi->nr_blks].end = PFN_ALIGN(end - PAGE_SIZE + 1);
mi->blk[mi->nr_blks].nid = nid;
mi->nr_blks++;
return 0;
}
/**
* numa_add_memblk - Add one numa_memblk to numa_meminfo
* @nid: NUMA node ID of the new memblk
* @start: Start address of the new memblk
* @end: End address of the new memblk
*
* Add a new memblk to the default numa_meminfo.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int __init numa_add_memblk(int nid, u64 start, u64 end)
{
return numa_add_memblk_to(nid, start, end, &numa_meminfo);
}
static void __init alloc_node_data(int nid)
{
void *nd;
unsigned long nd_pa;
size_t nd_sz = roundup(sizeof(pg_data_t), PAGE_SIZE);
nd_pa = memblock_phys_alloc_try_nid(nd_sz, SMP_CACHE_BYTES, nid);
if (!nd_pa) {
pr_err("Cannot find %zu Byte for node_data (initial node: %d)\n", nd_sz, nid);
return;
}
nd = __va(nd_pa);
node_data[nid] = nd;
memset(nd, 0, sizeof(pg_data_t));
}
static void __init node_mem_init(unsigned int node)
{
unsigned long start_pfn, end_pfn;
unsigned long node_addrspace_offset;
node_addrspace_offset = nid_to_addrbase(node);
pr_info("Node%d's addrspace_offset is 0x%lx\n",
node, node_addrspace_offset);
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
node, start_pfn, end_pfn);
alloc_node_data(node);
}
#ifdef CONFIG_ACPI_NUMA
/*
* Sanity check to catch more bad NUMA configurations (they are amazingly
* common). Make sure the nodes cover all memory.
*/
static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
{
int i;
u64 numaram, biosram;
numaram = 0;
for (i = 0; i < mi->nr_blks; i++) {
u64 s = mi->blk[i].start >> PAGE_SHIFT;
u64 e = mi->blk[i].end >> PAGE_SHIFT;
numaram += e - s;
numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
if ((s64)numaram < 0)
numaram = 0;
}
max_pfn = max_low_pfn;
biosram = max_pfn - absent_pages_in_range(0, max_pfn);
BUG_ON((s64)(biosram - numaram) >= (1 << (20 - PAGE_SHIFT)));
return true;
}
static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type)
{
static unsigned long num_physpages;
num_physpages += (size >> PAGE_SHIFT);
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
node, type, start, size);
pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
start >> PAGE_SHIFT, (start + size) >> PAGE_SHIFT, num_physpages);
memblock_set_node(start, size, &memblock.memory, node);
}
/*
* add_numamem_region
*
* Add a uasable memory region described by BIOS. The
* routine gets each intersection between BIOS's region
* and node's region, and adds them into node's memblock
* pool.
*
*/
static void __init add_numamem_region(u64 start, u64 end, u32 type)
{
u32 i;
u64 ofs = start;
if (start >= end) {
pr_debug("Invalid region: %016llx-%016llx\n", start, end);
return;
}
for (i = 0; i < numa_meminfo.nr_blks; i++) {
struct numa_memblk *mb = &numa_meminfo.blk[i];
if (ofs > mb->end)
continue;
if (end > mb->end) {
add_node_intersection(mb->nid, ofs, mb->end - ofs, type);
ofs = mb->end;
} else {
add_node_intersection(mb->nid, ofs, end - ofs, type);
break;
}
}
}
static void __init init_node_memblock(void)
{
u32 mem_type;
u64 mem_end, mem_start, mem_size;
efi_memory_desc_t *md;
/* Parse memory information and activate */
for_each_efi_memory_desc(md) {
mem_type = md->type;
mem_start = md->phys_addr;
mem_size = md->num_pages << EFI_PAGE_SHIFT;
mem_end = mem_start + mem_size;
switch (mem_type) {
case EFI_LOADER_CODE:
case EFI_LOADER_DATA:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_PERSISTENT_MEMORY:
case EFI_CONVENTIONAL_MEMORY:
add_numamem_region(mem_start, mem_end, mem_type);
break;
case EFI_PAL_CODE:
case EFI_UNUSABLE_MEMORY:
case EFI_ACPI_RECLAIM_MEMORY:
add_numamem_region(mem_start, mem_end, mem_type);
fallthrough;
case EFI_RESERVED_TYPE:
case EFI_RUNTIME_SERVICES_CODE:
case EFI_RUNTIME_SERVICES_DATA:
case EFI_MEMORY_MAPPED_IO:
case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
pr_info("Resvd: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
mem_type, mem_start, mem_size);
break;
}
}
}
static void __init numa_default_distance(void)
{
int row, col;
for (row = 0; row < MAX_NUMNODES; row++)
for (col = 0; col < MAX_NUMNODES; col++) {
if (col == row)
node_distances[row][col] = LOCAL_DISTANCE;
else
/* We assume that one node per package here!
*
* A SLIT should be used for multiple nodes
* per package to override default setting.
*/
node_distances[row][col] = REMOTE_DISTANCE;
}
}
/*
* fake_numa_init() - For Non-ACPI systems
* Return: 0 on success, -errno on failure.
*/
static int __init fake_numa_init(void)
{
phys_addr_t start = memblock_start_of_DRAM();
phys_addr_t end = memblock_end_of_DRAM() - 1;
node_set(0, numa_nodes_parsed);
pr_info("Faking a node at [mem %pap-%pap]\n", &start, &end);
return numa_add_memblk(0, start, end + 1);
}
int __init init_numa_memory(void)
{
int i;
int ret;
int node;
for (i = 0; i < NR_CPUS; i++)
set_cpuid_to_node(i, NUMA_NO_NODE);
numa_default_distance();
nodes_clear(numa_nodes_parsed);
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
/* Parse SRAT and SLIT if provided by firmware. */
ret = acpi_disabled ? fake_numa_init() : acpi_numa_init();
if (ret < 0)
return ret;
node_possible_map = numa_nodes_parsed;
if (WARN_ON(nodes_empty(node_possible_map)))
return -EINVAL;
init_node_memblock();
if (numa_meminfo_cover_memory(&numa_meminfo) == false)
return -EINVAL;
for_each_node_mask(node, node_possible_map) {
node_mem_init(node);
node_set_online(node);
}
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
setup_nr_node_ids();
loongson_sysconf.nr_nodes = nr_node_ids;
loongson_sysconf.cores_per_node = cpumask_weight(&phys_cpus_on_node[0]);
return 0;
}
#endif
void __init paging_init(void)
{
unsigned int node;
unsigned long zones_size[MAX_NR_ZONES] = {0, };
for_each_online_node(node) {
unsigned long start_pfn, end_pfn;
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
if (end_pfn > max_low_pfn)
max_low_pfn = end_pfn;
}
#ifdef CONFIG_ZONE_DMA32
zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
#endif
zones_size[ZONE_NORMAL] = max_low_pfn;
free_area_init(zones_size);
}
void __init mem_init(void)
{
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
memblock_free_all();
}
int pcibus_to_node(struct pci_bus *bus)
{
return dev_to_node(&bus->dev);
}
EXPORT_SYMBOL(pcibus_to_node);
| linux-master | arch/loongarch/kernel/numa.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Stack trace management functions
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
unsigned long addr;
struct pt_regs dummyregs;
struct unwind_state state;
if (!regs) {
regs = &dummyregs;
if (task == current) {
regs->regs[3] = (unsigned long)__builtin_frame_address(0);
regs->csr_era = (unsigned long)__builtin_return_address(0);
} else {
regs->regs[3] = thread_saved_fp(task);
regs->csr_era = thread_saved_ra(task);
}
regs->regs[1] = 0;
}
for (unwind_start(&state, task, regs);
!unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
if (!addr || !consume_entry(cookie, addr))
break;
}
}
static int
copy_stack_frame(unsigned long fp, struct stack_frame *frame)
{
int ret = 1;
unsigned long err;
unsigned long __user *user_frame_tail;
user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
if (!access_ok(user_frame_tail, sizeof(*frame)))
return 0;
pagefault_disable();
err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame)));
if (err || (unsigned long)user_frame_tail >= frame->fp)
ret = 0;
pagefault_enable();
return ret;
}
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs)
{
unsigned long fp = regs->regs[22];
while (fp && !((unsigned long)fp & 0xf)) {
struct stack_frame frame;
frame.fp = 0;
frame.ra = 0;
if (!copy_stack_frame(fp, &frame))
break;
if (!frame.ra)
break;
if (!consume_entry(cookie, frame.ra))
break;
fp = frame.fp;
}
}
| linux-master | arch/loongarch/kernel/stacktrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/idle.h>
#include <asm/processor.h>
#include <asm/time.h>
/*
* No lock; only written during early bootup by CPU 0.
*/
static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
}
int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
{
return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
unsigned int version = cpu_data[n].processor_id & 0xff;
unsigned int fp_version = cpu_data[n].fpu_vers;
struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
#ifdef CONFIG_SMP
if (!cpu_online(n))
return 0;
#endif
/*
* For the first processor also print the system type
*/
if (n == 0)
seq_printf(m, "system type\t\t: %s\n\n", get_system_type());
seq_printf(m, "processor\t\t: %ld\n", n);
seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
seq_printf(m, "global_id\t\t: %d\n", cpu_data[n].global_id);
seq_printf(m, "CPU Family\t\t: %s\n", __cpu_family[n]);
seq_printf(m, "Model Name\t\t: %s\n", __cpu_full_name[n]);
seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version);
seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version);
seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
cpu_clock_freq / 1000000, (cpu_clock_freq / 10000) % 100);
seq_printf(m, "BogoMIPS\t\t: %llu.%02llu\n",
(lpj_fine * cpu_clock_freq / const_clock_freq) / (500000/HZ),
((lpj_fine * cpu_clock_freq / const_clock_freq) / (5000/HZ)) % 100);
seq_printf(m, "TLB Entries\t\t: %d\n", cpu_data[n].tlbsize);
seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n",
cpu_pabits + 1, cpu_vabits + 1);
seq_printf(m, "ISA\t\t\t:");
if (cpu_has_loongarch32)
seq_printf(m, " loongarch32");
if (cpu_has_loongarch64)
seq_printf(m, " loongarch64");
seq_printf(m, "\n");
seq_printf(m, "Features\t\t:");
if (cpu_has_cpucfg) seq_printf(m, " cpucfg");
if (cpu_has_lam) seq_printf(m, " lam");
if (cpu_has_ual) seq_printf(m, " ual");
if (cpu_has_fpu) seq_printf(m, " fpu");
if (cpu_has_lsx) seq_printf(m, " lsx");
if (cpu_has_lasx) seq_printf(m, " lasx");
if (cpu_has_crc32) seq_printf(m, " crc32");
if (cpu_has_complex) seq_printf(m, " complex");
if (cpu_has_crypto) seq_printf(m, " crypto");
if (cpu_has_ptw) seq_printf(m, " ptw");
if (cpu_has_lvz) seq_printf(m, " lvz");
if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86");
if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm");
if (cpu_has_lbt_mips) seq_printf(m, " lbt_mips");
seq_printf(m, "\n");
seq_printf(m, "Hardware Watchpoint\t: %s",
cpu_has_watch ? "yes, " : "no\n");
if (cpu_has_watch) {
seq_printf(m, "iwatch count: %d, dwatch count: %d\n",
cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count);
}
proc_cpuinfo_notifier_args.m = m;
proc_cpuinfo_notifier_args.n = n;
raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
&proc_cpuinfo_notifier_args);
seq_printf(m, "\n");
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
unsigned long i = *pos;
return i < nr_cpu_ids ? (void *)(i + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
| linux-master | arch/loongarch/kernel/proc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/cpu.h>
#include <linux/init.h>
#include <asm/fpu.h>
#include <asm/smp.h>
static unsigned int euen_mask = CSR_EUEN_FPEN;
/*
* The critical section between kernel_fpu_begin() and kernel_fpu_end()
* is non-reentrant. It is the caller's responsibility to avoid reentrance.
* See drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c as an example.
*/
static DEFINE_PER_CPU(bool, in_kernel_fpu);
static DEFINE_PER_CPU(unsigned int, euen_current);
void kernel_fpu_begin(void)
{
unsigned int *euen_curr;
preempt_disable();
WARN_ON(this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, true);
euen_curr = this_cpu_ptr(&euen_current);
*euen_curr = csr_xchg32(euen_mask, euen_mask, LOONGARCH_CSR_EUEN);
#ifdef CONFIG_CPU_HAS_LASX
if (*euen_curr & CSR_EUEN_LASXEN)
_save_lasx(¤t->thread.fpu);
else
#endif
#ifdef CONFIG_CPU_HAS_LSX
if (*euen_curr & CSR_EUEN_LSXEN)
_save_lsx(¤t->thread.fpu);
else
#endif
if (*euen_curr & CSR_EUEN_FPEN)
_save_fp(¤t->thread.fpu);
write_fcsr(LOONGARCH_FCSR0, 0);
}
EXPORT_SYMBOL_GPL(kernel_fpu_begin);
void kernel_fpu_end(void)
{
unsigned int *euen_curr;
WARN_ON(!this_cpu_read(in_kernel_fpu));
euen_curr = this_cpu_ptr(&euen_current);
#ifdef CONFIG_CPU_HAS_LASX
if (*euen_curr & CSR_EUEN_LASXEN)
_restore_lasx(¤t->thread.fpu);
else
#endif
#ifdef CONFIG_CPU_HAS_LSX
if (*euen_curr & CSR_EUEN_LSXEN)
_restore_lsx(¤t->thread.fpu);
else
#endif
if (*euen_curr & CSR_EUEN_FPEN)
_restore_fp(¤t->thread.fpu);
*euen_curr = csr_xchg32(*euen_curr, euen_mask, LOONGARCH_CSR_EUEN);
this_cpu_write(in_kernel_fpu, false);
preempt_enable();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);
static int __init init_euen_mask(void)
{
if (cpu_has_lsx)
euen_mask |= CSR_EUEN_LSXEN;
if (cpu_has_lasx)
euen_mask |= CSR_EUEN_LASXEN;
return 0;
}
arch_initcall(init_euen_mask);
| linux-master | arch/loongarch/kernel/kfpu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Processor capabilities determination functions.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/export.h>
#include <linux/printk.h>
#include <linux/uaccess.h>
#include <asm/cpu-features.h>
#include <asm/elf.h>
#include <asm/fpu.h>
#include <asm/loongarch.h>
#include <asm/pgtable-bits.h>
#include <asm/setup.h>
/* Hardware capabilities */
unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
/*
* Determine the FCSR mask for FPU hardware.
*/
static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_loongarch *c)
{
unsigned long sr, mask, fcsr, fcsr0, fcsr1;
fcsr = c->fpu_csr0;
mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
sr = read_csr_euen();
enable_fpu();
fcsr0 = fcsr & mask;
write_fcsr(LOONGARCH_FCSR0, fcsr0);
fcsr0 = read_fcsr(LOONGARCH_FCSR0);
fcsr1 = fcsr | ~mask;
write_fcsr(LOONGARCH_FCSR0, fcsr1);
fcsr1 = read_fcsr(LOONGARCH_FCSR0);
write_fcsr(LOONGARCH_FCSR0, fcsr);
write_csr_euen(sr);
c->fpu_mask = ~(fcsr0 ^ fcsr1) & ~mask;
}
static inline void set_elf_platform(int cpu, const char *plat)
{
if (cpu == 0)
__elf_platform = plat;
}
/* MAP BASE */
unsigned long vm_map_base;
EXPORT_SYMBOL(vm_map_base);
static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
{
#ifdef __NEED_ADDRBITS_PROBE
c->pabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4;
c->vabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12;
vm_map_base = 0UL - (1UL << c->vabits);
#endif
}
static void set_isa(struct cpuinfo_loongarch *c, unsigned int isa)
{
switch (isa) {
case LOONGARCH_CPU_ISA_LA64:
c->isa_level |= LOONGARCH_CPU_ISA_LA64;
fallthrough;
case LOONGARCH_CPU_ISA_LA32S:
c->isa_level |= LOONGARCH_CPU_ISA_LA32S;
fallthrough;
case LOONGARCH_CPU_ISA_LA32R:
c->isa_level |= LOONGARCH_CPU_ISA_LA32R;
break;
}
}
static void cpu_probe_common(struct cpuinfo_loongarch *c)
{
unsigned int config;
unsigned long asid_mask;
c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
config = read_cpucfg(LOONGARCH_CPUCFG1);
if (config & CPUCFG1_UAL) {
c->options |= LOONGARCH_CPU_UAL;
elf_hwcap |= HWCAP_LOONGARCH_UAL;
}
if (config & CPUCFG1_CRC32) {
c->options |= LOONGARCH_CPU_CRC32;
elf_hwcap |= HWCAP_LOONGARCH_CRC32;
}
config = read_cpucfg(LOONGARCH_CPUCFG2);
if (config & CPUCFG2_LAM) {
c->options |= LOONGARCH_CPU_LAM;
elf_hwcap |= HWCAP_LOONGARCH_LAM;
}
if (config & CPUCFG2_FP) {
c->options |= LOONGARCH_CPU_FPU;
elf_hwcap |= HWCAP_LOONGARCH_FPU;
}
#ifdef CONFIG_CPU_HAS_LSX
if (config & CPUCFG2_LSX) {
c->options |= LOONGARCH_CPU_LSX;
elf_hwcap |= HWCAP_LOONGARCH_LSX;
}
#endif
#ifdef CONFIG_CPU_HAS_LASX
if (config & CPUCFG2_LASX) {
c->options |= LOONGARCH_CPU_LASX;
elf_hwcap |= HWCAP_LOONGARCH_LASX;
}
#endif
if (config & CPUCFG2_COMPLEX) {
c->options |= LOONGARCH_CPU_COMPLEX;
elf_hwcap |= HWCAP_LOONGARCH_COMPLEX;
}
if (config & CPUCFG2_CRYPTO) {
c->options |= LOONGARCH_CPU_CRYPTO;
elf_hwcap |= HWCAP_LOONGARCH_CRYPTO;
}
if (config & CPUCFG2_PTW) {
c->options |= LOONGARCH_CPU_PTW;
elf_hwcap |= HWCAP_LOONGARCH_PTW;
}
if (config & CPUCFG2_LVZP) {
c->options |= LOONGARCH_CPU_LVZ;
elf_hwcap |= HWCAP_LOONGARCH_LVZ;
}
#ifdef CONFIG_CPU_HAS_LBT
if (config & CPUCFG2_X86BT) {
c->options |= LOONGARCH_CPU_LBT_X86;
elf_hwcap |= HWCAP_LOONGARCH_LBT_X86;
}
if (config & CPUCFG2_ARMBT) {
c->options |= LOONGARCH_CPU_LBT_ARM;
elf_hwcap |= HWCAP_LOONGARCH_LBT_ARM;
}
if (config & CPUCFG2_MIPSBT) {
c->options |= LOONGARCH_CPU_LBT_MIPS;
elf_hwcap |= HWCAP_LOONGARCH_LBT_MIPS;
}
#endif
config = read_cpucfg(LOONGARCH_CPUCFG6);
if (config & CPUCFG6_PMP)
c->options |= LOONGARCH_CPU_PMP;
config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
if (config & IOCSRF_CSRIPI)
c->options |= LOONGARCH_CPU_CSRIPI;
if (config & IOCSRF_EXTIOI)
c->options |= LOONGARCH_CPU_EXTIOI;
if (config & IOCSRF_FREQSCALE)
c->options |= LOONGARCH_CPU_SCALEFREQ;
if (config & IOCSRF_FLATMODE)
c->options |= LOONGARCH_CPU_FLATMODE;
if (config & IOCSRF_EIODECODE)
c->options |= LOONGARCH_CPU_EIODECODE;
if (config & IOCSRF_VM)
c->options |= LOONGARCH_CPU_HYPERVISOR;
config = csr_read32(LOONGARCH_CSR_ASID);
config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT;
asid_mask = GENMASK(config - 1, 0);
set_cpu_asid_mask(c, asid_mask);
config = read_csr_prcfg1();
c->ksave_mask = GENMASK((config & CSR_CONF1_KSNUM) - 1, 0);
c->ksave_mask &= ~(EXC_KSAVE_MASK | PERCPU_KSAVE_MASK | KVM_KSAVE_MASK);
config = read_csr_prcfg3();
switch (config & CSR_CONF3_TLBTYPE) {
case 0:
c->tlbsizemtlb = 0;
c->tlbsizestlbsets = 0;
c->tlbsizestlbways = 0;
c->tlbsize = 0;
break;
case 1:
c->tlbsizemtlb = ((config & CSR_CONF3_MTLBSIZE) >> CSR_CONF3_MTLBSIZE_SHIFT) + 1;
c->tlbsizestlbsets = 0;
c->tlbsizestlbways = 0;
c->tlbsize = c->tlbsizemtlb + c->tlbsizestlbsets * c->tlbsizestlbways;
break;
case 2:
c->tlbsizemtlb = ((config & CSR_CONF3_MTLBSIZE) >> CSR_CONF3_MTLBSIZE_SHIFT) + 1;
c->tlbsizestlbsets = 1 << ((config & CSR_CONF3_STLBIDX) >> CSR_CONF3_STLBIDX_SHIFT);
c->tlbsizestlbways = ((config & CSR_CONF3_STLBWAYS) >> CSR_CONF3_STLBWAYS_SHIFT) + 1;
c->tlbsize = c->tlbsizemtlb + c->tlbsizestlbsets * c->tlbsizestlbways;
break;
default:
pr_warn("Warning: unknown TLB type\n");
}
}
#define MAX_NAME_LEN 32
#define VENDOR_OFFSET 0
#define CPUNAME_OFFSET 9
static char cpu_full_name[MAX_NAME_LEN] = " - ";
static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu)
{
uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
if (!__cpu_full_name[cpu])
__cpu_full_name[cpu] = cpu_full_name;
*vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
*cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
switch (c->processor_id & PRID_SERIES_MASK) {
case PRID_SERIES_LA132:
c->cputype = CPU_LOONGSON32;
set_isa(c, LOONGARCH_CPU_ISA_LA32S);
__cpu_family[cpu] = "Loongson-32bit";
pr_info("32-bit Loongson Processor probed (LA132 Core)\n");
break;
case PRID_SERIES_LA264:
c->cputype = CPU_LOONGSON64;
set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
pr_info("64-bit Loongson Processor probed (LA264 Core)\n");
break;
case PRID_SERIES_LA364:
c->cputype = CPU_LOONGSON64;
set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
pr_info("64-bit Loongson Processor probed (LA364 Core)\n");
break;
case PRID_SERIES_LA464:
c->cputype = CPU_LOONGSON64;
set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
pr_info("64-bit Loongson Processor probed (LA464 Core)\n");
break;
case PRID_SERIES_LA664:
c->cputype = CPU_LOONGSON64;
set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
pr_info("64-bit Loongson Processor probed (LA664 Core)\n");
break;
default: /* Default to 64 bit */
c->cputype = CPU_LOONGSON64;
set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
pr_info("64-bit Loongson Processor probed (Unknown Core)\n");
}
}
#ifdef CONFIG_64BIT
/* For use by uaccess.h */
u64 __ua_limit;
EXPORT_SYMBOL(__ua_limit);
#endif
const char *__cpu_family[NR_CPUS];
const char *__cpu_full_name[NR_CPUS];
const char *__elf_platform;
static void cpu_report(void)
{
struct cpuinfo_loongarch *c = ¤t_cpu_data;
pr_info("CPU%d revision is: %08x (%s)\n",
smp_processor_id(), c->processor_id, cpu_family_string());
if (c->options & LOONGARCH_CPU_FPU)
pr_info("FPU%d revision is: %08x\n", smp_processor_id(), c->fpu_vers);
}
void cpu_probe(void)
{
unsigned int cpu = smp_processor_id();
struct cpuinfo_loongarch *c = ¤t_cpu_data;
/*
* Set a default ELF platform, cpu probe may later
* overwrite it with a more precise value
*/
set_elf_platform(cpu, "loongarch");
c->cputype = CPU_UNKNOWN;
c->processor_id = read_cpucfg(LOONGARCH_CPUCFG0);
c->fpu_vers = (read_cpucfg(LOONGARCH_CPUCFG2) & CPUCFG2_FPVERS) >> 3;
c->fpu_csr0 = FPU_CSR_RN;
c->fpu_mask = FPU_CSR_RSVD;
cpu_probe_common(c);
per_cpu_trap_init(cpu);
switch (c->processor_id & PRID_COMP_MASK) {
case PRID_COMP_LOONGSON:
cpu_probe_loongson(c, cpu);
break;
}
BUG_ON(!__cpu_family[cpu]);
BUG_ON(c->cputype == CPU_UNKNOWN);
cpu_probe_addrbits(c);
#ifdef CONFIG_64BIT
if (cpu == 0)
__ua_limit = ~((1ull << cpu_vabits) - 1);
#endif
cpu_report();
}
| linux-master | arch/loongarch/kernel/cpu-probe.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Author: Hanlu Li <[email protected]>
* Huacai Chen <[email protected]>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2014, Imagination Technologies Ltd.
*/
#include <linux/audit.h>
#include <linux/cache.h>
#include <linux/context_tracking.h>
#include <linux/entry-common.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/personality.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/compiler.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <asm/asm.h>
#include <asm/cacheflush.h>
#include <asm/cpu-features.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
#ifdef DEBUG_SIG
# define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
#else
# define DEBUGP(fmt, args...)
#endif
/* Make sure we will not lose FPU ownership */
#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
/* Make sure we will not lose LBT ownership */
#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
/* Assembly functions to move context to/from the FPU */
extern asmlinkage int
_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
extern asmlinkage int
_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
extern asmlinkage int
_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
extern asmlinkage int
_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
extern asmlinkage int
_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
extern asmlinkage int
_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
#ifdef CONFIG_CPU_HAS_LBT
extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
extern asmlinkage int _save_ftop_context(void __user *ftop);
extern asmlinkage int _restore_ftop_context(void __user *ftop);
#endif
struct rt_sigframe {
struct siginfo rs_info;
struct ucontext rs_uctx;
};
struct _ctx_layout {
struct sctx_info *addr;
unsigned int size;
};
struct extctx_layout {
unsigned long size;
unsigned int flags;
struct _ctx_layout fpu;
struct _ctx_layout lsx;
struct _ctx_layout lasx;
struct _ctx_layout lbt;
struct _ctx_layout end;
};
static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
{
return (void __user *)((char *)info + sizeof(struct sctx_info));
}
/*
* Thread saved context copy to/from a signal context presumed to be on the
* user stack, and therefore accessed with appropriate macros from uaccess.h.
*/
static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
{
int i;
int err = 0;
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |=
__put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
®s[i]);
}
err |= __put_user(current->thread.fpu.fcc, fcc);
err |= __put_user(current->thread.fpu.fcsr, fcsr);
return err;
}
static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
{
int i;
int err = 0;
u64 fpr_val;
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |= __get_user(fpr_val, ®s[i]);
set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
}
err |= __get_user(current->thread.fpu.fcc, fcc);
err |= __get_user(current->thread.fpu.fcsr, fcsr);
return err;
}
static int copy_lsx_to_sigcontext(struct lsx_context __user *ctx)
{
int i;
int err = 0;
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
®s[2*i]);
err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1),
®s[2*i+1]);
}
err |= __put_user(current->thread.fpu.fcc, fcc);
err |= __put_user(current->thread.fpu.fcsr, fcsr);
return err;
}
static int copy_lsx_from_sigcontext(struct lsx_context __user *ctx)
{
int i;
int err = 0;
u64 fpr_val;
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |= __get_user(fpr_val, ®s[2*i]);
set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
err |= __get_user(fpr_val, ®s[2*i+1]);
set_fpr64(¤t->thread.fpu.fpr[i], 1, fpr_val);
}
err |= __get_user(current->thread.fpu.fcc, fcc);
err |= __get_user(current->thread.fpu.fcsr, fcsr);
return err;
}
static int copy_lasx_to_sigcontext(struct lasx_context __user *ctx)
{
int i;
int err = 0;
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
®s[4*i]);
err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1),
®s[4*i+1]);
err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 2),
®s[4*i+2]);
err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 3),
®s[4*i+3]);
}
err |= __put_user(current->thread.fpu.fcc, fcc);
err |= __put_user(current->thread.fpu.fcsr, fcsr);
return err;
}
static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx)
{
int i;
int err = 0;
u64 fpr_val;
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |= __get_user(fpr_val, ®s[4*i]);
set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
err |= __get_user(fpr_val, ®s[4*i+1]);
set_fpr64(¤t->thread.fpu.fpr[i], 1, fpr_val);
err |= __get_user(fpr_val, ®s[4*i+2]);
set_fpr64(¤t->thread.fpu.fpr[i], 2, fpr_val);
err |= __get_user(fpr_val, ®s[4*i+3]);
set_fpr64(¤t->thread.fpu.fpr[i], 3, fpr_val);
}
err |= __get_user(current->thread.fpu.fcc, fcc);
err |= __get_user(current->thread.fpu.fcsr, fcsr);
return err;
}
#ifdef CONFIG_CPU_HAS_LBT
static int copy_lbt_to_sigcontext(struct lbt_context __user *ctx)
{
int err = 0;
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
err |= __put_user(current->thread.lbt.scr0, ®s[0]);
err |= __put_user(current->thread.lbt.scr1, ®s[1]);
err |= __put_user(current->thread.lbt.scr2, ®s[2]);
err |= __put_user(current->thread.lbt.scr3, ®s[3]);
err |= __put_user(current->thread.lbt.eflags, eflags);
return err;
}
static int copy_lbt_from_sigcontext(struct lbt_context __user *ctx)
{
int err = 0;
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
err |= __get_user(current->thread.lbt.scr0, ®s[0]);
err |= __get_user(current->thread.lbt.scr1, ®s[1]);
err |= __get_user(current->thread.lbt.scr2, ®s[2]);
err |= __get_user(current->thread.lbt.scr3, ®s[3]);
err |= __get_user(current->thread.lbt.eflags, eflags);
return err;
}
static int copy_ftop_to_sigcontext(struct lbt_context __user *ctx)
{
uint32_t __user *ftop = &ctx->ftop;
return __put_user(current->thread.fpu.ftop, ftop);
}
static int copy_ftop_from_sigcontext(struct lbt_context __user *ctx)
{
uint32_t __user *ftop = &ctx->ftop;
return __get_user(current->thread.fpu.ftop, ftop);
}
#endif
/*
* Wrappers for the assembly _{save,restore}_fp_context functions.
*/
static int save_hw_fpu_context(struct fpu_context __user *ctx)
{
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
return _save_fp_context(regs, fcc, fcsr);
}
static int restore_hw_fpu_context(struct fpu_context __user *ctx)
{
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
return _restore_fp_context(regs, fcc, fcsr);
}
static int save_hw_lsx_context(struct lsx_context __user *ctx)
{
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
return _save_lsx_context(regs, fcc, fcsr);
}
static int restore_hw_lsx_context(struct lsx_context __user *ctx)
{
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
return _restore_lsx_context(regs, fcc, fcsr);
}
static int save_hw_lasx_context(struct lasx_context __user *ctx)
{
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
return _save_lasx_context(regs, fcc, fcsr);
}
static int restore_hw_lasx_context(struct lasx_context __user *ctx)
{
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint64_t __user *fcc = &ctx->fcc;
uint32_t __user *fcsr = &ctx->fcsr;
return _restore_lasx_context(regs, fcc, fcsr);
}
/*
* Wrappers for the assembly _{save,restore}_lbt_context functions.
*/
#ifdef CONFIG_CPU_HAS_LBT
static int save_hw_lbt_context(struct lbt_context __user *ctx)
{
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
return _save_lbt_context(regs, eflags);
}
static int restore_hw_lbt_context(struct lbt_context __user *ctx)
{
uint64_t __user *regs = (uint64_t *)&ctx->regs;
uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
return _restore_lbt_context(regs, eflags);
}
static int save_hw_ftop_context(struct lbt_context __user *ctx)
{
uint32_t __user *ftop = &ctx->ftop;
return _save_ftop_context(ftop);
}
static int restore_hw_ftop_context(struct lbt_context __user *ctx)
{
uint32_t __user *ftop = &ctx->ftop;
return _restore_ftop_context(ftop);
}
#endif
static int fcsr_pending(unsigned int __user *fcsr)
{
int err, sig = 0;
unsigned int csr, enabled;
err = __get_user(csr, fcsr);
enabled = ((csr & FPU_CSR_ALL_E) << 24);
/*
* If the signal handler set some FPU exceptions, clear it and
* send SIGFPE.
*/
if (csr & enabled) {
csr &= ~enabled;
err |= __put_user(csr, fcsr);
sig = SIGFPE;
}
return err ?: sig;
}
/*
* Helper routines
*/
static int protected_save_fpu_context(struct extctx_layout *extctx)
{
int err = 0;
struct sctx_info __user *info = extctx->fpu.addr;
struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
uint64_t __user *fcc = &fpu_ctx->fcc;
uint32_t __user *fcsr = &fpu_ctx->fcsr;
while (1) {
lock_fpu_owner();
if (is_fpu_owner())
err = save_hw_fpu_context(fpu_ctx);
else
err = copy_fpu_to_sigcontext(fpu_ctx);
unlock_fpu_owner();
err |= __put_user(FPU_CTX_MAGIC, &info->magic);
err |= __put_user(extctx->fpu.size, &info->size);
if (likely(!err))
break;
/* Touch the FPU context and try again */
err = __put_user(0, ®s[0]) |
__put_user(0, ®s[31]) |
__put_user(0, fcc) |
__put_user(0, fcsr);
if (err)
return err; /* really bad sigcontext */
}
return err;
}
static int protected_restore_fpu_context(struct extctx_layout *extctx)
{
int err = 0, sig = 0, tmp __maybe_unused;
struct sctx_info __user *info = extctx->fpu.addr;
struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
uint64_t __user *fcc = &fpu_ctx->fcc;
uint32_t __user *fcsr = &fpu_ctx->fcsr;
err = sig = fcsr_pending(fcsr);
if (err < 0)
return err;
while (1) {
lock_fpu_owner();
if (is_fpu_owner())
err = restore_hw_fpu_context(fpu_ctx);
else
err = copy_fpu_from_sigcontext(fpu_ctx);
unlock_fpu_owner();
if (likely(!err))
break;
/* Touch the FPU context and try again */
err = __get_user(tmp, ®s[0]) |
__get_user(tmp, ®s[31]) |
__get_user(tmp, fcc) |
__get_user(tmp, fcsr);
if (err)
break; /* really bad sigcontext */
}
return err ?: sig;
}
static int protected_save_lsx_context(struct extctx_layout *extctx)
{
int err = 0;
struct sctx_info __user *info = extctx->lsx.addr;
struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs;
uint64_t __user *fcc = &lsx_ctx->fcc;
uint32_t __user *fcsr = &lsx_ctx->fcsr;
while (1) {
lock_fpu_owner();
if (is_lsx_enabled())
err = save_hw_lsx_context(lsx_ctx);
else {
if (is_fpu_owner())
save_fp(current);
err = copy_lsx_to_sigcontext(lsx_ctx);
}
unlock_fpu_owner();
err |= __put_user(LSX_CTX_MAGIC, &info->magic);
err |= __put_user(extctx->lsx.size, &info->size);
if (likely(!err))
break;
/* Touch the LSX context and try again */
err = __put_user(0, ®s[0]) |
__put_user(0, ®s[32*2-1]) |
__put_user(0, fcc) |
__put_user(0, fcsr);
if (err)
return err; /* really bad sigcontext */
}
return err;
}
static int protected_restore_lsx_context(struct extctx_layout *extctx)
{
int err = 0, sig = 0, tmp __maybe_unused;
struct sctx_info __user *info = extctx->lsx.addr;
struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs;
uint64_t __user *fcc = &lsx_ctx->fcc;
uint32_t __user *fcsr = &lsx_ctx->fcsr;
err = sig = fcsr_pending(fcsr);
if (err < 0)
return err;
while (1) {
lock_fpu_owner();
if (is_lsx_enabled())
err = restore_hw_lsx_context(lsx_ctx);
else {
err = copy_lsx_from_sigcontext(lsx_ctx);
if (is_fpu_owner())
restore_fp(current);
}
unlock_fpu_owner();
if (likely(!err))
break;
/* Touch the LSX context and try again */
err = __get_user(tmp, ®s[0]) |
__get_user(tmp, ®s[32*2-1]) |
__get_user(tmp, fcc) |
__get_user(tmp, fcsr);
if (err)
break; /* really bad sigcontext */
}
return err ?: sig;
}
static int protected_save_lasx_context(struct extctx_layout *extctx)
{
int err = 0;
struct sctx_info __user *info = extctx->lasx.addr;
struct lasx_context __user *lasx_ctx =
(struct lasx_context *)get_ctx_through_ctxinfo(info);
uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs;
uint64_t __user *fcc = &lasx_ctx->fcc;
uint32_t __user *fcsr = &lasx_ctx->fcsr;
while (1) {
lock_fpu_owner();
if (is_lasx_enabled())
err = save_hw_lasx_context(lasx_ctx);
else {
if (is_lsx_enabled())
save_lsx(current);
else if (is_fpu_owner())
save_fp(current);
err = copy_lasx_to_sigcontext(lasx_ctx);
}
unlock_fpu_owner();
err |= __put_user(LASX_CTX_MAGIC, &info->magic);
err |= __put_user(extctx->lasx.size, &info->size);
if (likely(!err))
break;
/* Touch the LASX context and try again */
err = __put_user(0, ®s[0]) |
__put_user(0, ®s[32*4-1]) |
__put_user(0, fcc) |
__put_user(0, fcsr);
if (err)
return err; /* really bad sigcontext */
}
return err;
}
static int protected_restore_lasx_context(struct extctx_layout *extctx)
{
int err = 0, sig = 0, tmp __maybe_unused;
struct sctx_info __user *info = extctx->lasx.addr;
struct lasx_context __user *lasx_ctx =
(struct lasx_context *)get_ctx_through_ctxinfo(info);
uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs;
uint64_t __user *fcc = &lasx_ctx->fcc;
uint32_t __user *fcsr = &lasx_ctx->fcsr;
err = sig = fcsr_pending(fcsr);
if (err < 0)
return err;
while (1) {
lock_fpu_owner();
if (is_lasx_enabled())
err = restore_hw_lasx_context(lasx_ctx);
else {
err = copy_lasx_from_sigcontext(lasx_ctx);
if (is_lsx_enabled())
restore_lsx(current);
else if (is_fpu_owner())
restore_fp(current);
}
unlock_fpu_owner();
if (likely(!err))
break;
/* Touch the LASX context and try again */
err = __get_user(tmp, ®s[0]) |
__get_user(tmp, ®s[32*4-1]) |
__get_user(tmp, fcc) |
__get_user(tmp, fcsr);
if (err)
break; /* really bad sigcontext */
}
return err ?: sig;
}
#ifdef CONFIG_CPU_HAS_LBT
static int protected_save_lbt_context(struct extctx_layout *extctx)
{
int err = 0;
struct sctx_info __user *info = extctx->lbt.addr;
struct lbt_context __user *lbt_ctx =
(struct lbt_context *)get_ctx_through_ctxinfo(info);
uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
while (1) {
lock_lbt_owner();
if (is_lbt_owner())
err |= save_hw_lbt_context(lbt_ctx);
else
err |= copy_lbt_to_sigcontext(lbt_ctx);
if (is_fpu_owner())
err |= save_hw_ftop_context(lbt_ctx);
else
err |= copy_ftop_to_sigcontext(lbt_ctx);
unlock_lbt_owner();
err |= __put_user(LBT_CTX_MAGIC, &info->magic);
err |= __put_user(extctx->lbt.size, &info->size);
if (likely(!err))
break;
/* Touch the LBT context and try again */
err = __put_user(0, ®s[0]) | __put_user(0, eflags);
if (err)
return err;
}
return err;
}
static int protected_restore_lbt_context(struct extctx_layout *extctx)
{
int err = 0, tmp __maybe_unused;
struct sctx_info __user *info = extctx->lbt.addr;
struct lbt_context __user *lbt_ctx =
(struct lbt_context *)get_ctx_through_ctxinfo(info);
uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
while (1) {
lock_lbt_owner();
if (is_lbt_owner())
err |= restore_hw_lbt_context(lbt_ctx);
else
err |= copy_lbt_from_sigcontext(lbt_ctx);
if (is_fpu_owner())
err |= restore_hw_ftop_context(lbt_ctx);
else
err |= copy_ftop_from_sigcontext(lbt_ctx);
unlock_lbt_owner();
if (likely(!err))
break;
/* Touch the LBT context and try again */
err = __get_user(tmp, ®s[0]) | __get_user(tmp, eflags);
if (err)
return err;
}
return err;
}
#endif
static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
struct extctx_layout *extctx)
{
int i, err = 0;
struct sctx_info __user *info;
err |= __put_user(regs->csr_era, &sc->sc_pc);
err |= __put_user(extctx->flags, &sc->sc_flags);
err |= __put_user(0, &sc->sc_regs[0]);
for (i = 1; i < 32; i++)
err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
if (extctx->lasx.addr)
err |= protected_save_lasx_context(extctx);
else if (extctx->lsx.addr)
err |= protected_save_lsx_context(extctx);
else if (extctx->fpu.addr)
err |= protected_save_fpu_context(extctx);
#ifdef CONFIG_CPU_HAS_LBT
if (extctx->lbt.addr)
err |= protected_save_lbt_context(extctx);
#endif
/* Set the "end" magic */
info = (struct sctx_info *)extctx->end.addr;
err |= __put_user(0, &info->magic);
err |= __put_user(0, &info->size);
return err;
}
static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
{
int err = 0;
unsigned int magic, size;
struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
while(1) {
err |= __get_user(magic, &info->magic);
err |= __get_user(size, &info->size);
if (err)
return err;
switch (magic) {
case 0: /* END */
goto done;
case FPU_CTX_MAGIC:
if (size < (sizeof(struct sctx_info) +
sizeof(struct fpu_context)))
goto invalid;
extctx->fpu.addr = info;
break;
case LSX_CTX_MAGIC:
if (size < (sizeof(struct sctx_info) +
sizeof(struct lsx_context)))
goto invalid;
extctx->lsx.addr = info;
break;
case LASX_CTX_MAGIC:
if (size < (sizeof(struct sctx_info) +
sizeof(struct lasx_context)))
goto invalid;
extctx->lasx.addr = info;
break;
case LBT_CTX_MAGIC:
if (size < (sizeof(struct sctx_info) +
sizeof(struct lbt_context)))
goto invalid;
extctx->lbt.addr = info;
break;
default:
goto invalid;
}
info = (struct sctx_info *)((char *)info + size);
}
done:
return 0;
invalid:
return -EINVAL;
}
static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int i, err = 0;
struct extctx_layout extctx;
memset(&extctx, 0, sizeof(struct extctx_layout));
err = __get_user(extctx.flags, &sc->sc_flags);
if (err)
goto bad;
err = parse_extcontext(sc, &extctx);
if (err)
goto bad;
conditional_used_math(extctx.flags & SC_USED_FP);
/*
* The signal handler may have used FPU; give it up if the program
* doesn't want it following sigreturn.
*/
if (!(extctx.flags & SC_USED_FP))
lose_fpu(0);
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->csr_era, &sc->sc_pc);
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
if (extctx.lasx.addr)
err |= protected_restore_lasx_context(&extctx);
else if (extctx.lsx.addr)
err |= protected_restore_lsx_context(&extctx);
else if (extctx.fpu.addr)
err |= protected_restore_fpu_context(&extctx);
#ifdef CONFIG_CPU_HAS_LBT
if (extctx.lbt.addr)
err |= protected_restore_lbt_context(&extctx);
#endif
bad:
return err;
}
static unsigned int handle_flags(void)
{
unsigned int flags = 0;
flags = used_math() ? SC_USED_FP : 0;
switch (current->thread.error_code) {
case 1:
flags |= SC_ADDRERR_RD;
break;
case 2:
flags |= SC_ADDRERR_WR;
break;
}
return flags;
}
static unsigned long extframe_alloc(struct extctx_layout *extctx,
struct _ctx_layout *layout,
size_t size, unsigned int align, unsigned long base)
{
unsigned long new_base = base - size;
new_base = round_down(new_base, (align < 16 ? 16 : align));
new_base -= sizeof(struct sctx_info);
layout->addr = (void *)new_base;
layout->size = (unsigned int)(base - new_base);
extctx->size += layout->size;
return new_base;
}
static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
{
unsigned long new_sp = sp;
memset(extctx, 0, sizeof(struct extctx_layout));
extctx->flags = handle_flags();
/* Grow down, alloc "end" context info first. */
new_sp -= sizeof(struct sctx_info);
extctx->end.addr = (void *)new_sp;
extctx->end.size = (unsigned int)sizeof(struct sctx_info);
extctx->size += extctx->end.size;
if (extctx->flags & SC_USED_FP) {
if (cpu_has_lasx && thread_lasx_context_live())
new_sp = extframe_alloc(extctx, &extctx->lasx,
sizeof(struct lasx_context), LASX_CTX_ALIGN, new_sp);
else if (cpu_has_lsx && thread_lsx_context_live())
new_sp = extframe_alloc(extctx, &extctx->lsx,
sizeof(struct lsx_context), LSX_CTX_ALIGN, new_sp);
else if (cpu_has_fpu)
new_sp = extframe_alloc(extctx, &extctx->fpu,
sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
}
#ifdef CONFIG_CPU_HAS_LBT
if (cpu_has_lbt && thread_lbt_context_live()) {
new_sp = extframe_alloc(extctx, &extctx->lbt,
sizeof(struct lbt_context), LBT_CTX_ALIGN, new_sp);
}
#endif
return new_sp;
}
static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
struct extctx_layout *extctx)
{
unsigned long sp;
/* Default to using normal stack */
sp = regs->regs[3];
/*
* If we are on the alternate signal stack and would overflow it, don't.
* Return an always-bogus address instead so we will die with SIGSEGV.
*/
if (on_sig_stack(sp) &&
!likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
return (void __user __force *)(-1UL);
sp = sigsp(sp, ksig);
sp = round_down(sp, 16);
sp = setup_extcontext(extctx, sp);
sp -= sizeof(struct rt_sigframe);
if (!IS_ALIGNED(sp, 16))
BUG();
return (void __user *)sp;
}
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
SYSCALL_DEFINE0(rt_sigreturn)
{
int sig;
sigset_t set;
struct pt_regs *regs;
struct rt_sigframe __user *frame;
regs = current_pt_regs();
frame = (struct rt_sigframe __user *)regs->regs[3];
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig);
regs->regs[0] = 0; /* No syscall restarting */
if (restore_altstack(&frame->rs_uctx.uc_stack))
goto badframe;
return regs->regs[4];
badframe:
force_sig(SIGSEGV);
return 0;
}
static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
int err = 0;
struct extctx_layout extctx;
struct rt_sigframe __user *frame;
frame = get_sigframe(ksig, regs, &extctx);
if (!access_ok(frame, sizeof(*frame) + extctx.size))
return -EFAULT;
/* Create siginfo. */
err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
/* Create the ucontext. */
err |= __put_user(0, &frame->rs_uctx.uc_flags);
err |= __put_user(NULL, &frame->rs_uctx.uc_link);
err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
if (err)
return -EFAULT;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = pointer to siginfo
* a2 = pointer to ucontext
*
* c0_era point to the signal handler, $r3 (sp) points to
* the struct rt_sigframe.
*/
regs->regs[4] = ksig->sig;
regs->regs[5] = (unsigned long) &frame->rs_info;
regs->regs[6] = (unsigned long) &frame->rs_uctx;
regs->regs[3] = (unsigned long) frame;
regs->regs[1] = (unsigned long) sig_return;
regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->csr_era, regs->regs[1]);
return 0;
}
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
int ret;
sigset_t *oldset = sigmask_to_save();
void *vdso = current->mm->context.vdso;
/* Are we from a system call? */
if (regs->regs[0]) {
switch (regs->regs[4]) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
regs->regs[4] = -EINTR;
break;
case -ERESTARTSYS:
if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
regs->regs[4] = -EINTR;
break;
}
fallthrough;
case -ERESTARTNOINTR:
regs->regs[4] = regs->orig_a0;
regs->csr_era -= 4;
}
regs->regs[0] = 0; /* Don't deal with this again. */
}
rseq_signal_deliver(ksig, regs);
ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
signal_setup_done(ret, ksig, 0);
}
void arch_do_signal_or_restart(struct pt_regs *regs)
{
struct ksignal ksig;
if (get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
return;
}
/* Are we from a system call? */
if (regs->regs[0]) {
switch (regs->regs[4]) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->regs[4] = regs->orig_a0;
regs->csr_era -= 4;
break;
case -ERESTART_RESTARTBLOCK:
regs->regs[4] = regs->orig_a0;
regs->regs[11] = __NR_restart_syscall;
regs->csr_era -= 4;
break;
}
regs->regs[0] = 0; /* Don't deal with this again. */
}
/*
* If there's no signal to deliver, we just put the saved sigmask
* back
*/
restore_saved_sigmask();
}
| linux-master | arch/loongarch/kernel/signal.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* LoongArch KGDB support
*
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
#include <linux/hw_breakpoint.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
#include <linux/processor.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#include <asm/hw_breakpoint.h>
#include <asm/inst.h>
#include <asm/irq_regs.h>
#include <asm/ptrace.h>
#include <asm/sigcontext.h>
int kgdb_watch_activated;
static unsigned int stepped_opcode;
static unsigned long stepped_address;
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
{ "orig_a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, orig_a0) },
{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_era) },
{ "badv", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_badvaddr) },
{ "f0", GDB_SIZEOF_REG, 0 },
{ "f1", GDB_SIZEOF_REG, 1 },
{ "f2", GDB_SIZEOF_REG, 2 },
{ "f3", GDB_SIZEOF_REG, 3 },
{ "f4", GDB_SIZEOF_REG, 4 },
{ "f5", GDB_SIZEOF_REG, 5 },
{ "f6", GDB_SIZEOF_REG, 6 },
{ "f7", GDB_SIZEOF_REG, 7 },
{ "f8", GDB_SIZEOF_REG, 8 },
{ "f9", GDB_SIZEOF_REG, 9 },
{ "f10", GDB_SIZEOF_REG, 10 },
{ "f11", GDB_SIZEOF_REG, 11 },
{ "f12", GDB_SIZEOF_REG, 12 },
{ "f13", GDB_SIZEOF_REG, 13 },
{ "f14", GDB_SIZEOF_REG, 14 },
{ "f15", GDB_SIZEOF_REG, 15 },
{ "f16", GDB_SIZEOF_REG, 16 },
{ "f17", GDB_SIZEOF_REG, 17 },
{ "f18", GDB_SIZEOF_REG, 18 },
{ "f19", GDB_SIZEOF_REG, 19 },
{ "f20", GDB_SIZEOF_REG, 20 },
{ "f21", GDB_SIZEOF_REG, 21 },
{ "f22", GDB_SIZEOF_REG, 22 },
{ "f23", GDB_SIZEOF_REG, 23 },
{ "f24", GDB_SIZEOF_REG, 24 },
{ "f25", GDB_SIZEOF_REG, 25 },
{ "f26", GDB_SIZEOF_REG, 26 },
{ "f27", GDB_SIZEOF_REG, 27 },
{ "f28", GDB_SIZEOF_REG, 28 },
{ "f29", GDB_SIZEOF_REG, 29 },
{ "f30", GDB_SIZEOF_REG, 30 },
{ "f31", GDB_SIZEOF_REG, 31 },
{ "fcc0", 1, 0 },
{ "fcc1", 1, 1 },
{ "fcc2", 1, 2 },
{ "fcc3", 1, 3 },
{ "fcc4", 1, 4 },
{ "fcc5", 1, 5 },
{ "fcc6", 1, 6 },
{ "fcc7", 1, 7 },
{ "fcsr", 4, 0 },
};
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
int reg_offset, reg_size;
if (regno < 0 || regno >= DBG_MAX_REG_NUM)
return NULL;
reg_offset = dbg_reg_def[regno].offset;
reg_size = dbg_reg_def[regno].size;
if (reg_offset == -1)
goto out;
/* Handle general-purpose/orig_a0/pc/badv registers */
if (regno <= DBG_PT_REGS_END) {
memcpy(mem, (void *)regs + reg_offset, reg_size);
goto out;
}
if (!(regs->csr_euen & CSR_EUEN_FPEN))
goto out;
save_fp(current);
/* Handle FP registers */
switch (regno) {
case DBG_FCSR: /* Process the fcsr */
memcpy(mem, (void *)¤t->thread.fpu.fcsr, reg_size);
break;
case DBG_FCC_BASE ... DBG_FCC_END: /* Process the fcc */
memcpy(mem, (void *)¤t->thread.fpu.fcc + reg_offset, reg_size);
break;
case DBG_FPR_BASE ... DBG_FPR_END: /* Process the fpr */
memcpy(mem, (void *)¤t->thread.fpu.fpr[reg_offset], reg_size);
break;
default:
break;
}
out:
return dbg_reg_def[regno].name;
}
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
int reg_offset, reg_size;
if (regno < 0 || regno >= DBG_MAX_REG_NUM)
return -EINVAL;
reg_offset = dbg_reg_def[regno].offset;
reg_size = dbg_reg_def[regno].size;
if (reg_offset == -1)
return 0;
/* Handle general-purpose/orig_a0/pc/badv registers */
if (regno <= DBG_PT_REGS_END) {
memcpy((void *)regs + reg_offset, mem, reg_size);
return 0;
}
if (!(regs->csr_euen & CSR_EUEN_FPEN))
return 0;
/* Handle FP registers */
switch (regno) {
case DBG_FCSR: /* Process the fcsr */
memcpy((void *)¤t->thread.fpu.fcsr, mem, reg_size);
break;
case DBG_FCC_BASE ... DBG_FCC_END: /* Process the fcc */
memcpy((void *)¤t->thread.fpu.fcc + reg_offset, mem, reg_size);
break;
case DBG_FPR_BASE ... DBG_FPR_END: /* Process the fpr */
memcpy((void *)¤t->thread.fpu.fpr[reg_offset], mem, reg_size);
break;
default:
break;
}
restore_fp(current);
return 0;
}
/*
* Similar to regs_to_gdb_regs() except that process is sleeping and so
* we may not be able to get all the info.
*/
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
/* Initialize to zero */
memset((char *)gdb_regs, 0, NUMREGBYTES);
gdb_regs[DBG_LOONGARCH_RA] = p->thread.reg01;
gdb_regs[DBG_LOONGARCH_TP] = (long)p;
gdb_regs[DBG_LOONGARCH_SP] = p->thread.reg03;
/* S0 - S8 */
gdb_regs[DBG_LOONGARCH_S0] = p->thread.reg23;
gdb_regs[DBG_LOONGARCH_S1] = p->thread.reg24;
gdb_regs[DBG_LOONGARCH_S2] = p->thread.reg25;
gdb_regs[DBG_LOONGARCH_S3] = p->thread.reg26;
gdb_regs[DBG_LOONGARCH_S4] = p->thread.reg27;
gdb_regs[DBG_LOONGARCH_S5] = p->thread.reg28;
gdb_regs[DBG_LOONGARCH_S6] = p->thread.reg29;
gdb_regs[DBG_LOONGARCH_S7] = p->thread.reg30;
gdb_regs[DBG_LOONGARCH_S8] = p->thread.reg31;
/*
* PC use return address (RA), i.e. the moment after return from __switch_to()
*/
gdb_regs[DBG_LOONGARCH_PC] = p->thread.reg01;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
regs->csr_era = pc;
}
void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__ ( \
".globl kgdb_breakinst\n\t" \
"nop\n" \
"kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
}
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
* then try to fall into the debugger
*/
static int kgdb_loongarch_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
struct die_args *args = (struct die_args *)ptr;
struct pt_regs *regs = args->regs;
/* Userspace events, ignore. */
if (user_mode(regs))
return NOTIFY_DONE;
if (!kgdb_io_module_registered)
return NOTIFY_DONE;
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(smp_processor_id(), regs);
if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
return NOTIFY_DONE;
if (atomic_read(&kgdb_setting_breakpoint))
if (regs->csr_era == (unsigned long)&kgdb_breakinst)
regs->csr_era += LOONGARCH_INSN_SIZE;
return NOTIFY_STOP;
}
bool kgdb_breakpoint_handler(struct pt_regs *regs)
{
struct die_args args = {
.regs = regs,
.str = "Break",
.err = BRK_KDB,
.trapnr = read_csr_excode(),
.signr = SIGTRAP,
};
return (kgdb_loongarch_notify(NULL, DIE_TRAP, &args) == NOTIFY_STOP) ? true : false;
}
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_loongarch_notify,
};
static inline void kgdb_arch_update_addr(struct pt_regs *regs,
char *remcom_in_buffer)
{
unsigned long addr;
char *ptr;
ptr = &remcom_in_buffer[1];
if (kgdb_hex2long(&ptr, &addr))
regs->csr_era = addr;
}
/* Calculate the new address for after a step */
static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
{
char cj_val;
unsigned int si, si_l, si_h, rd, rj, cj;
unsigned long pc = instruction_pointer(regs);
union loongarch_instruction *ip = (union loongarch_instruction *)pc;
if (pc & 3) {
pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
return -EINVAL;
}
*next_addr = pc + LOONGARCH_INSN_SIZE;
si_h = ip->reg0i26_format.immediate_h;
si_l = ip->reg0i26_format.immediate_l;
switch (ip->reg0i26_format.opcode) {
case b_op:
*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
return 0;
case bl_op:
*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
return 0;
}
rj = ip->reg1i21_format.rj;
cj = (rj & 0x07) + DBG_FCC_BASE;
si_l = ip->reg1i21_format.immediate_l;
si_h = ip->reg1i21_format.immediate_h;
dbg_get_reg(cj, &cj_val, regs);
switch (ip->reg1i21_format.opcode) {
case beqz_op:
if (regs->regs[rj] == 0)
*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
return 0;
case bnez_op:
if (regs->regs[rj] != 0)
*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
return 0;
case bceqz_op: /* bceqz_op = bcnez_op */
if (((rj & 0x18) == 0x00) && !cj_val) /* bceqz */
*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
if (((rj & 0x18) == 0x08) && cj_val) /* bcnez */
*next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
return 0;
}
rj = ip->reg2i16_format.rj;
rd = ip->reg2i16_format.rd;
si = ip->reg2i16_format.immediate;
switch (ip->reg2i16_format.opcode) {
case beq_op:
if (regs->regs[rj] == regs->regs[rd])
*next_addr = pc + sign_extend64(si << 2, 17);
return 0;
case bne_op:
if (regs->regs[rj] != regs->regs[rd])
*next_addr = pc + sign_extend64(si << 2, 17);
return 0;
case blt_op:
if ((long)regs->regs[rj] < (long)regs->regs[rd])
*next_addr = pc + sign_extend64(si << 2, 17);
return 0;
case bge_op:
if ((long)regs->regs[rj] >= (long)regs->regs[rd])
*next_addr = pc + sign_extend64(si << 2, 17);
return 0;
case bltu_op:
if (regs->regs[rj] < regs->regs[rd])
*next_addr = pc + sign_extend64(si << 2, 17);
return 0;
case bgeu_op:
if (regs->regs[rj] >= regs->regs[rd])
*next_addr = pc + sign_extend64(si << 2, 17);
return 0;
case jirl_op:
regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
*next_addr = regs->regs[rj] + sign_extend64(si << 2, 17);
return 0;
}
return 0;
}
static int do_single_step(struct pt_regs *regs)
{
int error = 0;
unsigned long addr = 0; /* Determine where the target instruction will send us to */
error = get_step_address(regs, &addr);
if (error)
return error;
/* Store the opcode in the stepped address */
error = get_kernel_nofault(stepped_opcode, (void *)addr);
if (error)
return error;
stepped_address = addr;
/* Replace the opcode with the break instruction */
error = copy_to_kernel_nofault((void *)stepped_address,
arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
if (error) {
stepped_opcode = 0;
stepped_address = 0;
} else {
kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
}
return error;
}
/* Undo a single step */
static void undo_single_step(struct pt_regs *regs)
{
if (stepped_opcode) {
copy_to_kernel_nofault((void *)stepped_address,
(void *)&stepped_opcode, BREAK_INSTR_SIZE);
flush_icache_range(stepped_address, stepped_address + BREAK_INSTR_SIZE);
}
stepped_opcode = 0;
stepped_address = 0;
kgdb_single_step = 0;
atomic_set(&kgdb_cpu_doing_single_step, -1);
}
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *regs)
{
int ret = 0;
undo_single_step(regs);
regs->csr_prmd |= CSR_PRMD_PWE;
switch (remcom_in_buffer[0]) {
case 'D':
case 'k':
regs->csr_prmd &= ~CSR_PRMD_PWE;
fallthrough;
case 'c':
kgdb_arch_update_addr(regs, remcom_in_buffer);
break;
case 's':
kgdb_arch_update_addr(regs, remcom_in_buffer);
ret = do_single_step(regs);
break;
default:
ret = -1;
}
return ret;
}
static struct hw_breakpoint {
unsigned int enabled;
unsigned long addr;
int len;
int type;
struct perf_event * __percpu *pev;
} breakinfo[LOONGARCH_MAX_BRP];
static int hw_break_reserve_slot(int breakno)
{
int cpu, cnt = 0;
struct perf_event **pevent;
for_each_online_cpu(cpu) {
cnt++;
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
if (dbg_reserve_bp_slot(*pevent))
goto fail;
}
return 0;
fail:
for_each_online_cpu(cpu) {
cnt--;
if (!cnt)
break;
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
dbg_release_bp_slot(*pevent);
}
return -1;
}
static int hw_break_release_slot(int breakno)
{
int cpu;
struct perf_event **pevent;
if (dbg_is_early)
return 0;
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
if (dbg_release_bp_slot(*pevent))
/*
* The debugger is responsible for handing the retry on
* remove failure.
*/
return -1;
}
return 0;
}
static int kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
{
int i;
for (i = 0; i < LOONGARCH_MAX_BRP; i++)
if (!breakinfo[i].enabled)
break;
if (i == LOONGARCH_MAX_BRP)
return -1;
switch (bptype) {
case BP_HARDWARE_BREAKPOINT:
breakinfo[i].type = HW_BREAKPOINT_X;
break;
case BP_READ_WATCHPOINT:
breakinfo[i].type = HW_BREAKPOINT_R;
break;
case BP_WRITE_WATCHPOINT:
breakinfo[i].type = HW_BREAKPOINT_W;
break;
case BP_ACCESS_WATCHPOINT:
breakinfo[i].type = HW_BREAKPOINT_RW;
break;
default:
return -1;
}
switch (len) {
case 1:
breakinfo[i].len = HW_BREAKPOINT_LEN_1;
break;
case 2:
breakinfo[i].len = HW_BREAKPOINT_LEN_2;
break;
case 4:
breakinfo[i].len = HW_BREAKPOINT_LEN_4;
break;
case 8:
breakinfo[i].len = HW_BREAKPOINT_LEN_8;
break;
default:
return -1;
}
breakinfo[i].addr = addr;
if (hw_break_reserve_slot(i)) {
breakinfo[i].addr = 0;
return -1;
}
breakinfo[i].enabled = 1;
return 0;
}
static int kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
{
int i;
for (i = 0; i < LOONGARCH_MAX_BRP; i++)
if (breakinfo[i].addr == addr && breakinfo[i].enabled)
break;
if (i == LOONGARCH_MAX_BRP)
return -1;
if (hw_break_release_slot(i)) {
pr_err("Cannot remove hw breakpoint at %lx\n", addr);
return -1;
}
breakinfo[i].enabled = 0;
return 0;
}
static void kgdb_disable_hw_break(struct pt_regs *regs)
{
int i;
int cpu = raw_smp_processor_id();
struct perf_event *bp;
for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
if (!breakinfo[i].enabled)
continue;
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
if (bp->attr.disabled == 1)
continue;
arch_uninstall_hw_breakpoint(bp);
bp->attr.disabled = 1;
}
/* Disable hardware debugging while we are in kgdb */
csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
}
static void kgdb_remove_all_hw_break(void)
{
int i;
int cpu = raw_smp_processor_id();
struct perf_event *bp;
for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
if (!breakinfo[i].enabled)
continue;
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
if (!bp->attr.disabled) {
arch_uninstall_hw_breakpoint(bp);
bp->attr.disabled = 1;
continue;
}
if (hw_break_release_slot(i))
pr_err("KGDB: hw bpt remove failed %lx\n", breakinfo[i].addr);
breakinfo[i].enabled = 0;
}
csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
kgdb_watch_activated = 0;
}
static void kgdb_correct_hw_break(void)
{
int i, activated = 0;
for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
struct perf_event *bp;
int val;
int cpu = raw_smp_processor_id();
if (!breakinfo[i].enabled)
continue;
bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
if (bp->attr.disabled != 1)
continue;
bp->attr.bp_addr = breakinfo[i].addr;
bp->attr.bp_len = breakinfo[i].len;
bp->attr.bp_type = breakinfo[i].type;
val = hw_breakpoint_arch_parse(bp, &bp->attr, counter_arch_bp(bp));
if (val)
return;
val = arch_install_hw_breakpoint(bp);
if (!val)
bp->attr.disabled = 0;
activated = 1;
}
csr_xchg32(activated ? CSR_CRMD_WE : 0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
kgdb_watch_activated = activated;
}
const struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = {0x02, 0x00, break_op >> 1, 0x00}, /* BRK_KDB = 2 */
.flags = KGDB_HW_BREAKPOINT,
.set_hw_breakpoint = kgdb_set_hw_break,
.remove_hw_breakpoint = kgdb_remove_hw_break,
.disable_hw_break = kgdb_disable_hw_break,
.remove_all_hw_break = kgdb_remove_all_hw_break,
.correct_hw_break = kgdb_correct_hw_break,
};
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
}
void kgdb_arch_late(void)
{
int i, cpu;
struct perf_event_attr attr;
struct perf_event **pevent;
hw_breakpoint_init(&attr);
attr.bp_addr = (unsigned long)kgdb_arch_init;
attr.bp_len = HW_BREAKPOINT_LEN_4;
attr.bp_type = HW_BREAKPOINT_W;
attr.disabled = 1;
for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
if (breakinfo[i].pev)
continue;
breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
if (IS_ERR((void * __force)breakinfo[i].pev)) {
pr_err("kgdb: Could not allocate hw breakpoints.\n");
breakinfo[i].pev = NULL;
return;
}
for_each_online_cpu(cpu) {
pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
if (pevent[0]->destroy) {
pevent[0]->destroy = NULL;
release_bp_slot(*pevent);
}
}
}
}
void kgdb_arch_exit(void)
{
int i;
for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
if (breakinfo[i].pev) {
unregister_wide_hw_breakpoint(breakinfo[i].pev);
breakinfo[i].pev = NULL;
}
}
unregister_die_notifier(&kgdb_notifier);
}
| linux-master | arch/loongarch/kernel/kgdb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 2013 Cavium, Inc.
*/
#include <linux/perf_event.h>
#include <asm/ptrace.h>
#ifdef CONFIG_32BIT
u64 perf_reg_abi(struct task_struct *tsk)
{
return PERF_SAMPLE_REGS_ABI_32;
}
#else /* Must be CONFIG_64BIT */
u64 perf_reg_abi(struct task_struct *tsk)
{
if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
return PERF_SAMPLE_REGS_ABI_32;
else
return PERF_SAMPLE_REGS_ABI_64;
}
#endif /* CONFIG_32BIT */
int perf_reg_validate(u64 mask)
{
if (!mask)
return -EINVAL;
if (mask & ~((1ull << PERF_REG_LOONGARCH_MAX) - 1))
return -EINVAL;
return 0;
}
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
if (WARN_ON_ONCE((u32)idx >= PERF_REG_LOONGARCH_MAX))
return 0;
if ((u32)idx == PERF_REG_LOONGARCH_PC)
return regs->csr_era;
return regs->regs[idx];
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
}
| linux-master | arch/loongarch/kernel/perf_regs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* LoongArch idle loop support.
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/cpu.h>
#include <linux/irqflags.h>
#include <asm/cpu.h>
#include <asm/idle.h>
void __cpuidle arch_cpu_idle(void)
{
raw_local_irq_enable();
__arch_cpu_idle(); /* idle instruction needs irq enabled */
raw_local_irq_disable();
}
| linux-master | arch/loongarch/kernel/idle.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kdebug.h>
#include <linux/kprobes.h>
#include <linux/preempt.h>
#include <asm/break.h>
#define KPROBE_BP_INSN larch_insn_gen_break(BRK_KPROBE_BP)
#define KPROBE_SSTEPBP_INSN larch_insn_gen_break(BRK_KPROBE_SSTEPBP)
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void arch_prepare_ss_slot(struct kprobe *p)
{
p->ainsn.insn[0] = *p->addr;
p->ainsn.insn[1] = KPROBE_SSTEPBP_INSN;
p->ainsn.restore = (unsigned long)p->addr + LOONGARCH_INSN_SIZE;
}
NOKPROBE_SYMBOL(arch_prepare_ss_slot);
static void arch_prepare_simulate(struct kprobe *p)
{
p->ainsn.restore = 0;
}
NOKPROBE_SYMBOL(arch_prepare_simulate);
int arch_prepare_kprobe(struct kprobe *p)
{
union loongarch_instruction insn;
if ((unsigned long)p->addr & 0x3)
return -EILSEQ;
/* copy instruction */
p->opcode = *p->addr;
insn.word = p->opcode;
/* decode instruction */
if (insns_not_supported(insn))
return -EINVAL;
if (insns_need_simulation(insn)) {
p->ainsn.insn = NULL;
} else {
p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn)
return -ENOMEM;
}
/* prepare the instruction */
if (p->ainsn.insn)
arch_prepare_ss_slot(p);
else
arch_prepare_simulate(p);
return 0;
}
NOKPROBE_SYMBOL(arch_prepare_kprobe);
/* Install breakpoint in text */
void arch_arm_kprobe(struct kprobe *p)
{
*p->addr = KPROBE_BP_INSN;
flush_insn_slot(p);
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
/* Remove breakpoint from text */
void arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_insn_slot(p);
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
void arch_remove_kprobe(struct kprobe *p)
{
if (p->ainsn.insn) {
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
}
}
NOKPROBE_SYMBOL(arch_remove_kprobe);
static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
NOKPROBE_SYMBOL(save_previous_kprobe);
static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
NOKPROBE_SYMBOL(restore_previous_kprobe);
static void set_current_kprobe(struct kprobe *p)
{
__this_cpu_write(current_kprobe, p);
}
NOKPROBE_SYMBOL(set_current_kprobe);
/*
* Interrupts need to be disabled before single-step mode is set,
* and not reenabled until after single-step mode ends.
* Without disabling interrupt on local CPU, there is a chance of
* interrupt occurrence in the period of exception return and start
* of out-of-line single-step, that result in wrongly single stepping
* into the interrupt handler.
*/
static void save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
kcb->saved_status = regs->csr_prmd;
regs->csr_prmd &= ~CSR_PRMD_PIE;
}
NOKPROBE_SYMBOL(save_local_irqflag);
static void restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
regs->csr_prmd = kcb->saved_status;
}
NOKPROBE_SYMBOL(restore_local_irqflag);
static void post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
/* return addr restore if non-branching insn */
if (cur->ainsn.restore != 0)
instruction_pointer_set(regs, cur->ainsn.restore);
/* restore back original saved kprobe variables and continue */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
preempt_enable_no_resched();
return;
}
/*
* update the kcb status even if the cur->post_handler is
* not set because reset_curent_kprobe() doesn't update kcb.
*/
kcb->kprobe_status = KPROBE_HIT_SSDONE;
if (cur->post_handler)
cur->post_handler(cur, regs, 0);
reset_current_kprobe();
preempt_enable_no_resched();
}
NOKPROBE_SYMBOL(post_kprobe_handler);
static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb, int reenter)
{
union loongarch_instruction insn;
if (reenter) {
save_previous_kprobe(kcb);
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_REENTER;
} else {
kcb->kprobe_status = KPROBE_HIT_SS;
}
if (p->ainsn.insn) {
/* IRQs and single stepping do not mix well */
save_local_irqflag(kcb, regs);
/* set ip register to prepare for single stepping */
regs->csr_era = (unsigned long)p->ainsn.insn;
} else {
/* simulate single steping */
insn.word = p->opcode;
arch_simulate_insn(insn, regs);
/* now go for post processing */
post_kprobe_handler(p, kcb, regs);
}
}
NOKPROBE_SYMBOL(setup_singlestep);
static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
switch (kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_HIT_SSDONE:
case KPROBE_HIT_ACTIVE:
kprobes_inc_nmissed_count(p);
setup_singlestep(p, regs, kcb, 1);
break;
case KPROBE_REENTER:
pr_warn("Failed to recover from reentered kprobes.\n");
dump_kprobe(p);
WARN_ON_ONCE(1);
break;
default:
WARN_ON(1);
return false;
}
return true;
}
NOKPROBE_SYMBOL(reenter_kprobe);
bool kprobe_breakpoint_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb;
struct kprobe *p, *cur_kprobe;
kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era;
/*
* We don't want to be preempted for the entire
* duration of kprobe processing.
*/
preempt_disable();
kcb = get_kprobe_ctlblk();
cur_kprobe = kprobe_running();
p = get_kprobe(addr);
if (p) {
if (cur_kprobe) {
if (reenter_kprobe(p, regs, kcb))
return true;
} else {
/* Probe hit */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
* pre-handler and it returned non-zero, it will
* modify the execution path and no need to single
* stepping. Let's just reset current kprobe and exit.
*
* pre_handler can hit a breakpoint and can step thru
* before return.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, regs, kcb, 0);
} else {
reset_current_kprobe();
preempt_enable_no_resched();
}
return true;
}
}
if (*addr != KPROBE_BP_INSN) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
* Return back to original instruction, and continue.
*/
regs->csr_era = (unsigned long)addr;
preempt_enable_no_resched();
return true;
}
preempt_enable_no_resched();
return false;
}
NOKPROBE_SYMBOL(kprobe_breakpoint_handler);
bool kprobe_singlestep_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long addr = instruction_pointer(regs);
if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
((unsigned long)&cur->ainsn.insn[1] == addr)) {
restore_local_irqflag(kcb, regs);
post_kprobe_handler(cur, kcb, regs);
return true;
}
preempt_enable_no_resched();
return false;
}
NOKPROBE_SYMBOL(kprobe_singlestep_handler);
bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
switch (kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe and the ip points back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs->csr_era = (unsigned long)cur->addr;
WARN_ON_ONCE(!instruction_pointer(regs));
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
} else {
restore_local_irqflag(kcb, regs);
reset_current_kprobe();
}
preempt_enable_no_resched();
break;
}
return false;
}
NOKPROBE_SYMBOL(kprobe_fault_handler);
/*
* Provide a blacklist of symbols identifying ranges which cannot be kprobed.
* This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
*/
int __init arch_populate_kprobe_blacklist(void)
{
return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
(unsigned long)__irqentry_text_end);
}
int __init arch_init_kprobes(void)
{
return 0;
}
int arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
}
NOKPROBE_SYMBOL(arch_trampoline_kprobe);
| linux-master | arch/loongarch/kernel/kprobes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author: Huacai Chen <[email protected]>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/binfmts.h>
#include <linux/elf.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <asm/cpu-features.h>
#include <asm/cpu-info.h>
int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
bool is_interp, struct arch_elf_state *state)
{
return 0;
}
int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
struct arch_elf_state *state)
{
return 0;
}
void loongarch_set_personality_fcsr(struct arch_elf_state *state)
{
current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
}
| linux-master | arch/loongarch/kernel/elf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 2000, 2001 Kanoj Sarcar
* Copyright (C) 2000, 2001 Ralf Baechle
* Copyright (C) 2000, 2001 Silicon Graphics, Inc.
* Copyright (C) 2000, 2001, 2003 Broadcom Corporation
*/
#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/profile.h>
#include <linux/seq_file.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/export.h>
#include <linux/syscore_ops.h>
#include <linux/time.h>
#include <linux/tracepoint.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <asm/cpu.h>
#include <asm/idle.h>
#include <asm/loongson.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/time.h>
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
EXPORT_SYMBOL(__cpu_number_map);
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
EXPORT_SYMBOL(__cpu_logical_map);
/* Representing the threads (siblings) of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
/* Representing the core map of multi-core chips of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
static DECLARE_COMPLETION(cpu_starting);
static DECLARE_COMPLETION(cpu_running);
/*
* A logcal cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
*/
cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_foreign_map);
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
/* representing cpus for which core maps can be computed */
static cpumask_t cpu_core_setup_map;
struct secondary_data cpuboot_data;
static DEFINE_PER_CPU(int, cpu_state);
enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNCTION,
};
static const char *ipi_types[NR_IPI] __tracepoint_string = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNCTION] = "Function call interrupts",
};
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
}
}
/* Send mailbox buffer via Mail_Send */
static void csr_mail_send(uint64_t data, int cpu, int mailbox)
{
uint64_t val;
/* Send high 32 bits */
val = IOCSR_MBUF_SEND_BLOCKING;
val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
val |= (data & IOCSR_MBUF_SEND_H32_MASK);
iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
/* Send low 32 bits */
val = IOCSR_MBUF_SEND_BLOCKING;
val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
};
static u32 ipi_read_clear(int cpu)
{
u32 action;
/* Load the ipi register to figure out what we're supposed to do */
action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
/* Clear the ipi register to clear the interrupt */
iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
wbflush();
return action;
}
static void ipi_write_action(int cpu, u32 action)
{
unsigned int irq = 0;
while ((irq = ffs(action))) {
uint32_t val = IOCSR_IPI_SEND_BLOCKING;
val |= (irq - 1);
val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
action &= ~BIT(irq - 1);
}
}
void loongson_send_ipi_single(int cpu, unsigned int action)
{
ipi_write_action(cpu_logical_map(cpu), (u32)action);
}
void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
ipi_write_action(cpu_logical_map(i), (u32)action);
}
/*
* This function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
void arch_smp_send_reschedule(int cpu)
{
loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
}
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
{
unsigned int action;
unsigned int cpu = smp_processor_id();
action = ipi_read_clear(cpu_logical_map(cpu));
if (action & SMP_RESCHEDULE) {
scheduler_ipi();
per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
}
if (action & SMP_CALL_FUNCTION) {
generic_smp_call_function_interrupt();
per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
}
return IRQ_HANDLED;
}
static void __init fdt_smp_setup(void)
{
#ifdef CONFIG_OF
unsigned int cpu, cpuid;
struct device_node *node = NULL;
for_each_of_cpu_node(node) {
if (!of_device_is_available(node))
continue;
cpuid = of_get_cpu_hwid(node, 0);
if (cpuid >= nr_cpu_ids)
continue;
if (cpuid == loongson_sysconf.boot_cpu_id) {
cpu = 0;
numa_add_cpu(cpu);
} else {
cpu = cpumask_next_zero(-1, cpu_present_mask);
}
num_processors++;
set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;
}
loongson_sysconf.nr_cpus = num_processors;
set_bit(0, &(loongson_sysconf.cores_io_master));
#endif
}
void __init loongson_smp_setup(void)
{
fdt_smp_setup();
cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
}
void __init loongson_prepare_cpus(unsigned int max_cpus)
{
int i = 0;
parse_acpi_topology();
for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
set_cpu_present(i, true);
csr_mail_send(0, __cpu_logical_map[i], 0);
cpu_data[i].global_id = __cpu_logical_map[i];
}
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
}
/*
* Setup the PC, SP, and TP of a secondary processor and start it running!
*/
void loongson_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned long entry;
pr_info("Booting CPU#%d...\n", cpu);
entry = __pa_symbol((unsigned long)&smpboot_entry);
cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
csr_mail_send(entry, cpu_logical_map(cpu), 0);
loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
}
/*
* SMP init and finish on secondary CPUs
*/
void loongson_init_secondary(void)
{
unsigned int cpu = smp_processor_id();
unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
change_csr_ecfg(ECFG0_IM, imask);
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
#ifdef CONFIG_NUMA
numa_add_cpu(cpu);
#endif
per_cpu(cpu_state, cpu) = CPU_ONLINE;
cpu_data[cpu].package =
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
}
void loongson_smp_finish(void)
{
local_irq_enable();
iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
pr_info("CPU#%d finished\n", smp_processor_id());
}
#ifdef CONFIG_HOTPLUG_CPU
int loongson_cpu_disable(void)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
if (io_master(cpu))
return -EBUSY;
#ifdef CONFIG_NUMA
numa_remove_cpu(cpu);
#endif
set_cpu_online(cpu, false);
calculate_cpu_foreign_map();
local_irq_save(flags);
irq_migrate_all_off_this_cpu();
clear_csr_ecfg(ECFG0_IM);
local_irq_restore(flags);
local_flush_tlb_all();
return 0;
}
void loongson_cpu_die(unsigned int cpu)
{
while (per_cpu(cpu_state, cpu) != CPU_DEAD)
cpu_relax();
mb();
}
void __noreturn arch_cpu_idle_dead(void)
{
register uint64_t addr;
register void (*init_fn)(void);
idle_task_exit();
local_irq_enable();
set_csr_ecfg(ECFGF_IPI);
__this_cpu_write(cpu_state, CPU_DEAD);
__smp_mb();
do {
__asm__ __volatile__("idle 0\n\t");
addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
} while (addr == 0);
init_fn = (void *)TO_CACHE(addr);
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
init_fn();
BUG();
}
#endif
/*
* Power management
*/
#ifdef CONFIG_PM
static int loongson_ipi_suspend(void)
{
return 0;
}
static void loongson_ipi_resume(void)
{
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
}
static struct syscore_ops loongson_ipi_syscore_ops = {
.resume = loongson_ipi_resume,
.suspend = loongson_ipi_suspend,
};
/*
* Enable boot cpu ipi before enabling nonboot cpus
* during syscore_resume.
*/
static int __init ipi_pm_init(void)
{
register_syscore_ops(&loongson_ipi_syscore_ops);
return 0;
}
core_initcall(ipi_pm_init);
#endif
static inline void set_cpu_sibling_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
for_each_cpu(i, &cpu_sibling_setup_map) {
if (cpus_are_siblings(cpu, i)) {
cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
}
}
}
static inline void set_cpu_core_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_core_setup_map);
for_each_cpu(i, &cpu_core_setup_map) {
if (cpu_data[cpu].package == cpu_data[i].package) {
cpumask_set_cpu(i, &cpu_core_map[cpu]);
cpumask_set_cpu(cpu, &cpu_core_map[i]);
}
}
}
/*
* Calculate a new cpu_foreign_map mask whenever a
* new cpu appears or disappears.
*/
void calculate_cpu_foreign_map(void)
{
int i, k, core_present;
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)
if (cpus_are_siblings(i, k))
core_present = 1;
if (!core_present)
cpumask_set_cpu(i, &temp_foreign_map);
}
for_each_online_cpu(i)
cpumask_andnot(&cpu_foreign_map[i],
&temp_foreign_map, &cpu_sibling_map[i]);
}
/* Preload SMP state for boot cpu */
void smp_prepare_boot_cpu(void)
{
unsigned int cpu, node, rr_node;
set_cpu_possible(0, true);
set_cpu_online(0, true);
set_my_cpu_offset(per_cpu_offset(0));
rr_node = first_node(node_online_map);
for_each_possible_cpu(cpu) {
node = early_cpu_to_node(cpu);
/*
* The mapping between present cpus and nodes has been
* built during MADT and SRAT parsing.
*
* If possible cpus = present cpus here, early_cpu_to_node
* will return valid node.
*
* If possible cpus > present cpus here (e.g. some possible
* cpus will be added by cpu-hotplug later), for possible but
* not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
* and we just map them to online nodes in round-robin way.
* Once hotplugged, new correct mapping will be built for them.
*/
if (node != NUMA_NO_NODE)
set_cpu_numa_node(cpu, node);
else {
set_cpu_numa_node(cpu, rr_node);
rr_node = next_node_in(rr_node, node_online_map);
}
}
}
/* called from main before smp_init() */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
init_new_context(current, &init_mm);
current_thread_info()->cpu = 0;
loongson_prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
set_cpu_core_map(0);
calculate_cpu_foreign_map();
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask);
#endif
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
loongson_boot_secondary(cpu, tidle);
/* Wait for CPU to start and be ready to sync counters */
if (!wait_for_completion_timeout(&cpu_starting,
msecs_to_jiffies(5000))) {
pr_crit("CPU%u: failed to start\n", cpu);
return -EIO;
}
/* Wait for CPU to finish startup & mark itself online before return */
wait_for_completion(&cpu_running);
return 0;
}
/*
* First C code run on the secondary CPUs after being started up by
* the master.
*/
asmlinkage void start_secondary(void)
{
unsigned int cpu;
sync_counter();
cpu = smp_processor_id();
set_my_cpu_offset(per_cpu_offset(cpu));
cpu_probe();
constant_clockevent_init();
loongson_init_secondary();
set_cpu_sibling_map(cpu);
set_cpu_core_map(cpu);
notify_cpu_starting(cpu);
/* Notify boot CPU that we're starting */
complete(&cpu_starting);
/* The CPU is running, now mark it online */
set_cpu_online(cpu, true);
calculate_cpu_foreign_map();
/*
* Notify boot CPU that we're up & online and it can safely return
* from __cpu_up()
*/
complete(&cpu_running);
/*
* irq will be enabled in loongson_smp_finish(), enabling it too
* early is dangerous.
*/
WARN_ON_ONCE(!irqs_disabled());
loongson_smp_finish();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
static void stop_this_cpu(void *dummy)
{
set_cpu_online(smp_processor_id(), false);
calculate_cpu_foreign_map();
local_irq_disable();
while (true);
}
void smp_send_stop(void)
{
smp_call_function(stop_this_cpu, NULL, 0);
}
#ifdef CONFIG_PROFILING
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
#endif
static void flush_tlb_all_ipi(void *info)
{
local_flush_tlb_all();
}
void flush_tlb_all(void)
{
on_each_cpu(flush_tlb_all_ipi, NULL, 1);
}
static void flush_tlb_mm_ipi(void *mm)
{
local_flush_tlb_mm((struct mm_struct *)mm);
}
void flush_tlb_mm(struct mm_struct *mm)
{
if (atomic_read(&mm->mm_users) == 0)
return; /* happens as a result of exit_mmap() */
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
} else {
unsigned int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0;
}
local_flush_tlb_mm(mm);
}
preempt_enable();
}
struct flush_tlb_data {
struct vm_area_struct *vma;
unsigned long addr1;
unsigned long addr2;
};
static void flush_tlb_range_ipi(void *info)
{
struct flush_tlb_data *fd = info;
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = start,
.addr2 = end,
};
on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
} else {
unsigned int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0;
}
local_flush_tlb_range(vma, start, end);
}
preempt_enable();
}
static void flush_tlb_kernel_range_ipi(void *info)
{
struct flush_tlb_data *fd = info;
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
struct flush_tlb_data fd = {
.addr1 = start,
.addr2 = end,
};
on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
}
static void flush_tlb_page_ipi(void *info)
{
struct flush_tlb_data *fd = info;
local_flush_tlb_page(fd->vma, fd->addr1);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
preempt_disable();
if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = page,
};
on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
} else {
unsigned int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
cpu_context(cpu, vma->vm_mm) = 0;
}
local_flush_tlb_page(vma, page);
}
preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_page);
static void flush_tlb_one_ipi(void *info)
{
unsigned long vaddr = (unsigned long) info;
local_flush_tlb_one(vaddr);
}
void flush_tlb_one(unsigned long vaddr)
{
on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
}
EXPORT_SYMBOL(flush_tlb_one);
| linux-master | arch/loongarch/kernel/smp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022-2023 Loongson Technology Corporation Limited
*/
#define pr_fmt(fmt) "hw-breakpoint: " fmt
#include <linux/hw_breakpoint.h>
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <asm/hw_breakpoint.h>
/* Breakpoint currently in use for each BRP. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[LOONGARCH_MAX_BRP]);
/* Watchpoint currently in use for each WRP. */
static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[LOONGARCH_MAX_WRP]);
int hw_breakpoint_slots(int type)
{
/*
* We can be called early, so don't rely on
* our static variables being initialised.
*/
switch (type) {
case TYPE_INST:
return get_num_brps();
case TYPE_DATA:
return get_num_wrps();
default:
pr_warn("unknown slot type: %d\n", type);
return 0;
}
}
#define READ_WB_REG_CASE(OFF, N, REG, T, VAL) \
case (OFF + N): \
LOONGARCH_CSR_WATCH_READ(N, REG, T, VAL); \
break
#define WRITE_WB_REG_CASE(OFF, N, REG, T, VAL) \
case (OFF + N): \
LOONGARCH_CSR_WATCH_WRITE(N, REG, T, VAL); \
break
#define GEN_READ_WB_REG_CASES(OFF, REG, T, VAL) \
READ_WB_REG_CASE(OFF, 0, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 1, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 2, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 3, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 4, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 5, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 6, REG, T, VAL); \
READ_WB_REG_CASE(OFF, 7, REG, T, VAL);
#define GEN_WRITE_WB_REG_CASES(OFF, REG, T, VAL) \
WRITE_WB_REG_CASE(OFF, 0, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 1, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 2, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 3, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 4, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 5, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 6, REG, T, VAL); \
WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL);
static u64 read_wb_reg(int reg, int n, int t)
{
u64 val = 0;
switch (reg + n) {
GEN_READ_WB_REG_CASES(CSR_CFG_ADDR, ADDR, t, val);
GEN_READ_WB_REG_CASES(CSR_CFG_MASK, MASK, t, val);
GEN_READ_WB_REG_CASES(CSR_CFG_CTRL, CTRL, t, val);
GEN_READ_WB_REG_CASES(CSR_CFG_ASID, ASID, t, val);
default:
pr_warn("Attempt to read from unknown breakpoint register %d\n", n);
}
return val;
}
NOKPROBE_SYMBOL(read_wb_reg);
static void write_wb_reg(int reg, int n, int t, u64 val)
{
switch (reg + n) {
GEN_WRITE_WB_REG_CASES(CSR_CFG_ADDR, ADDR, t, val);
GEN_WRITE_WB_REG_CASES(CSR_CFG_MASK, MASK, t, val);
GEN_WRITE_WB_REG_CASES(CSR_CFG_CTRL, CTRL, t, val);
GEN_WRITE_WB_REG_CASES(CSR_CFG_ASID, ASID, t, val);
default:
pr_warn("Attempt to write to unknown breakpoint register %d\n", n);
}
}
NOKPROBE_SYMBOL(write_wb_reg);
enum hw_breakpoint_ops {
HW_BREAKPOINT_INSTALL,
HW_BREAKPOINT_UNINSTALL,
};
/*
* hw_breakpoint_slot_setup - Find and setup a perf slot according to operations
*
* @slots: pointer to array of slots
* @max_slots: max number of slots
* @bp: perf_event to setup
* @ops: operation to be carried out on the slot
*
* Return:
* slot index on success
* -ENOSPC if no slot is available/matches
* -EINVAL on wrong operations parameter
*/
static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
struct perf_event *bp, enum hw_breakpoint_ops ops)
{
int i;
struct perf_event **slot;
for (i = 0; i < max_slots; ++i) {
slot = &slots[i];
switch (ops) {
case HW_BREAKPOINT_INSTALL:
if (!*slot) {
*slot = bp;
return i;
}
break;
case HW_BREAKPOINT_UNINSTALL:
if (*slot == bp) {
*slot = NULL;
return i;
}
break;
default:
pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
return -EINVAL;
}
}
return -ENOSPC;
}
void ptrace_hw_copy_thread(struct task_struct *tsk)
{
memset(tsk->thread.hbp_break, 0, sizeof(tsk->thread.hbp_break));
memset(tsk->thread.hbp_watch, 0, sizeof(tsk->thread.hbp_watch));
}
/*
* Unregister breakpoints from this task and reset the pointers in the thread_struct.
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
int i;
struct thread_struct *t = &tsk->thread;
for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
if (t->hbp_break[i]) {
unregister_hw_breakpoint(t->hbp_break[i]);
t->hbp_break[i] = NULL;
}
}
for (i = 0; i < LOONGARCH_MAX_WRP; i++) {
if (t->hbp_watch[i]) {
unregister_hw_breakpoint(t->hbp_watch[i]);
t->hbp_watch[i] = NULL;
}
}
}
static int hw_breakpoint_control(struct perf_event *bp,
enum hw_breakpoint_ops ops)
{
u32 ctrl;
int i, max_slots, enable;
struct perf_event **slots;
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
/* Breakpoint */
slots = this_cpu_ptr(bp_on_reg);
max_slots = boot_cpu_data.watch_ireg_count;
} else {
/* Watchpoint */
slots = this_cpu_ptr(wp_on_reg);
max_slots = boot_cpu_data.watch_dreg_count;
}
i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
return i;
switch (ops) {
case HW_BREAKPOINT_INSTALL:
/* Set the FWPnCFG/MWPnCFG 1~4 register. */
write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
} else {
ctrl = encode_ctrl_reg(info->ctrl);
write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE);
}
enable = csr_read64(LOONGARCH_CSR_CRMD);
csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
break;
case HW_BREAKPOINT_UNINSTALL:
/* Reset the FWPnCFG/MWPnCFG 1~4 register. */
write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
write_wb_reg(CSR_CFG_MASK, i, 0, 0);
write_wb_reg(CSR_CFG_MASK, i, 1, 0);
write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
write_wb_reg(CSR_CFG_ASID, i, 0, 0);
write_wb_reg(CSR_CFG_ASID, i, 1, 0);
break;
}
return 0;
}
/*
* Install a perf counter breakpoint.
*/
int arch_install_hw_breakpoint(struct perf_event *bp)
{
return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
}
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
}
static int get_hbp_len(u8 hbp_len)
{
unsigned int len_in_bytes = 0;
switch (hbp_len) {
case LOONGARCH_BREAKPOINT_LEN_1:
len_in_bytes = 1;
break;
case LOONGARCH_BREAKPOINT_LEN_2:
len_in_bytes = 2;
break;
case LOONGARCH_BREAKPOINT_LEN_4:
len_in_bytes = 4;
break;
case LOONGARCH_BREAKPOINT_LEN_8:
len_in_bytes = 8;
break;
}
return len_in_bytes;
}
/*
* Check whether bp virtual address is in kernel space.
*/
int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
va = hw->address;
len = get_hbp_len(hw->ctrl.len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
/*
* Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
* Hopefully this will disappear when ptrace can bypass the conversion
* to generic breakpoint descriptions.
*/
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
int *gen_len, int *gen_type, int *offset)
{
/* Type */
switch (ctrl.type) {
case LOONGARCH_BREAKPOINT_EXECUTE:
*gen_type = HW_BREAKPOINT_X;
break;
case LOONGARCH_BREAKPOINT_LOAD:
*gen_type = HW_BREAKPOINT_R;
break;
case LOONGARCH_BREAKPOINT_STORE:
*gen_type = HW_BREAKPOINT_W;
break;
case LOONGARCH_BREAKPOINT_LOAD | LOONGARCH_BREAKPOINT_STORE:
*gen_type = HW_BREAKPOINT_RW;
break;
default:
return -EINVAL;
}
if (!ctrl.len)
return -EINVAL;
*offset = __ffs(ctrl.len);
/* Len */
switch (ctrl.len) {
case LOONGARCH_BREAKPOINT_LEN_1:
*gen_len = HW_BREAKPOINT_LEN_1;
break;
case LOONGARCH_BREAKPOINT_LEN_2:
*gen_len = HW_BREAKPOINT_LEN_2;
break;
case LOONGARCH_BREAKPOINT_LEN_4:
*gen_len = HW_BREAKPOINT_LEN_4;
break;
case LOONGARCH_BREAKPOINT_LEN_8:
*gen_len = HW_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Construct an arch_hw_breakpoint from a perf_event.
*/
static int arch_build_bp_info(struct perf_event *bp,
const struct perf_event_attr *attr,
struct arch_hw_breakpoint *hw)
{
/* Type */
switch (attr->bp_type) {
case HW_BREAKPOINT_X:
hw->ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
break;
case HW_BREAKPOINT_R:
hw->ctrl.type = LOONGARCH_BREAKPOINT_LOAD;
break;
case HW_BREAKPOINT_W:
hw->ctrl.type = LOONGARCH_BREAKPOINT_STORE;
break;
case HW_BREAKPOINT_RW:
hw->ctrl.type = LOONGARCH_BREAKPOINT_LOAD | LOONGARCH_BREAKPOINT_STORE;
break;
default:
return -EINVAL;
}
/* Len */
switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_8:
hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
}
/* Address */
hw->address = attr->bp_addr;
return 0;
}
/*
* Validate the arch-specific HW Breakpoint register settings.
*/
int hw_breakpoint_arch_parse(struct perf_event *bp,
const struct perf_event_attr *attr,
struct arch_hw_breakpoint *hw)
{
int ret;
u64 alignment_mask, offset;
/* Build the arch_hw_breakpoint. */
ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE)
alignment_mask = 0x7;
else
alignment_mask = 0x3;
offset = hw->address & alignment_mask;
hw->address &= ~alignment_mask;
hw->ctrl.len <<= offset;
return 0;
}
static void update_bp_registers(struct pt_regs *regs, int enable, int type)
{
u32 ctrl;
int i, max_slots;
struct perf_event **slots;
struct arch_hw_breakpoint *info;
switch (type) {
case 0:
slots = this_cpu_ptr(bp_on_reg);
max_slots = boot_cpu_data.watch_ireg_count;
break;
case 1:
slots = this_cpu_ptr(wp_on_reg);
max_slots = boot_cpu_data.watch_dreg_count;
break;
default:
return;
}
for (i = 0; i < max_slots; ++i) {
if (!slots[i])
continue;
info = counter_arch_bp(slots[i]);
if (enable) {
if ((info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) && (type == 0)) {
write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
} else {
ctrl = read_wb_reg(CSR_CFG_CTRL, i, 1);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_LOAD)
ctrl |= 0x1 << MWPnCFG3_LoadEn;
if (info->ctrl.type == LOONGARCH_BREAKPOINT_STORE)
ctrl |= 0x1 << MWPnCFG3_StoreEn;
write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl);
}
regs->csr_prmd |= CSR_PRMD_PWE;
} else {
if ((info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) && (type == 0)) {
write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
} else {
ctrl = read_wb_reg(CSR_CFG_CTRL, i, 1);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_LOAD)
ctrl &= ~0x1 << MWPnCFG3_LoadEn;
if (info->ctrl.type == LOONGARCH_BREAKPOINT_STORE)
ctrl &= ~0x1 << MWPnCFG3_StoreEn;
write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl);
}
regs->csr_prmd &= ~CSR_PRMD_PWE;
}
}
}
NOKPROBE_SYMBOL(update_bp_registers);
/*
* Debug exception handlers.
*/
void breakpoint_handler(struct pt_regs *regs)
{
int i;
struct perf_event *bp, **slots;
slots = this_cpu_ptr(bp_on_reg);
for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
bp = slots[i];
if (bp == NULL)
continue;
perf_bp_event(bp, regs);
}
update_bp_registers(regs, 0, 0);
}
NOKPROBE_SYMBOL(breakpoint_handler);
void watchpoint_handler(struct pt_regs *regs)
{
int i;
struct perf_event *wp, **slots;
slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
wp = slots[i];
if (wp == NULL)
continue;
perf_bp_event(wp, regs);
}
update_bp_registers(regs, 0, 1);
}
NOKPROBE_SYMBOL(watchpoint_handler);
static int __init arch_hw_breakpoint_init(void)
{
int cpu;
boot_cpu_data.watch_ireg_count = get_num_brps();
boot_cpu_data.watch_dreg_count = get_num_wrps();
pr_info("Found %d breakpoint and %d watchpoint registers.\n",
boot_cpu_data.watch_ireg_count, boot_cpu_data.watch_dreg_count);
for (cpu = 1; cpu < NR_CPUS; cpu++) {
cpu_data[cpu].watch_ireg_count = boot_cpu_data.watch_ireg_count;
cpu_data[cpu].watch_dreg_count = boot_cpu_data.watch_dreg_count;
}
return 0;
}
arch_initcall(arch_hw_breakpoint_init);
void hw_breakpoint_thread_switch(struct task_struct *next)
{
u64 addr, mask;
struct pt_regs *regs = task_pt_regs(next);
if (test_tsk_thread_flag(next, TIF_SINGLESTEP)) {
addr = read_wb_reg(CSR_CFG_ADDR, 0, 0);
mask = read_wb_reg(CSR_CFG_MASK, 0, 0);
if (!((regs->csr_era ^ addr) & ~mask))
csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
regs->csr_prmd |= CSR_PRMD_PWE;
} else {
/* Update breakpoints */
update_bp_registers(regs, 1, 0);
/* Update watchpoints */
update_bp_registers(regs, 1, 1);
}
}
void hw_breakpoint_pmu_read(struct perf_event *bp)
{
}
/*
* Dummy function to register with die_notifier.
*/
int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data)
{
return NOTIFY_DONE;
}
| linux-master | arch/loongarch/kernel/hw_breakpoint.c |
// SPDX-License-Identifier: GPL-2.0
/*
* acpi.c - Architecture-Specific Low-Level ACPI Boot Support
*
* Author: Jianmin Lv <[email protected]>
* Huacai Chen <[email protected]>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/serial_core.h>
#include <asm/io.h>
#include <asm/numa.h>
#include <asm/loongson.h>
int acpi_disabled;
EXPORT_SYMBOL(acpi_disabled);
int acpi_noirq;
int acpi_pci_disabled;
EXPORT_SYMBOL(acpi_pci_disabled);
int acpi_strict = 1; /* We have no workarounds on LoongArch */
int num_processors;
int disabled_cpus;
u64 acpi_saved_sp;
#define MAX_CORE_PIC 256
#define PREFIX "ACPI: "
struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
{
if (!phys || !size)
return NULL;
return early_memremap(phys, size);
}
void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
{
if (!map || !size)
return;
early_memunmap(map, size);
}
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
if (!memblock_is_memory(phys))
return ioremap(phys, size);
else
return ioremap_cache(phys, size);
}
#ifdef CONFIG_SMP
static int set_processor_mask(u32 id, u32 flags)
{
int cpu, cpuid = id;
if (num_processors >= nr_cpu_ids) {
pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
" processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
return -ENODEV;
}
if (cpuid == loongson_sysconf.boot_cpu_id)
cpu = 0;
else
cpu = cpumask_next_zero(-1, cpu_present_mask);
if (flags & ACPI_MADT_ENABLED) {
num_processors++;
set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;
} else
disabled_cpus++;
return cpu;
}
#endif
static int __init
acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_madt_core_pic *processor = NULL;
processor = (struct acpi_madt_core_pic *)header;
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
#ifdef CONFIG_SMP
acpi_core_pic[processor->core_id] = *processor;
set_processor_mask(processor->core_id, processor->flags);
#endif
return 0;
}
static int __init
acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
{
static int core = 0;
struct acpi_madt_eio_pic *eiointc = NULL;
eiointc = (struct acpi_madt_eio_pic *)header;
if (BAD_MADT_ENTRY(eiointc, end))
return -EINVAL;
core = eiointc->node * CORES_PER_EIO_NODE;
set_bit(core, &(loongson_sysconf.cores_io_master));
return 0;
}
static void __init acpi_process_madt(void)
{
#ifdef CONFIG_SMP
int i;
for (i = 0; i < NR_CPUS; i++) {
__cpu_number_map[i] = -1;
__cpu_logical_map[i] = -1;
}
#endif
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
acpi_parse_processor, MAX_CORE_PIC);
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
acpi_parse_eio_master, MAX_IO_PICS);
loongson_sysconf.nr_cpus = num_processors;
}
int pptt_enabled;
int __init parse_acpi_topology(void)
{
int cpu, topology_id;
for_each_possible_cpu(cpu) {
topology_id = find_acpi_cpu_topology(cpu, 0);
if (topology_id < 0) {
pr_warn("Invalid BIOS PPTT\n");
return -ENOENT;
}
if (acpi_pptt_cpu_is_thread(cpu) <= 0)
cpu_data[cpu].core = topology_id;
else {
topology_id = find_acpi_cpu_topology(cpu, 1);
if (topology_id < 0)
return -ENOENT;
cpu_data[cpu].core = topology_id;
}
}
pptt_enabled = 1;
return 0;
}
#ifndef CONFIG_SUSPEND
int (*acpi_suspend_lowlevel)(void);
#else
int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend;
#endif
void __init acpi_boot_table_init(void)
{
/*
* If acpi_disabled, bail out
*/
if (acpi_disabled)
goto fdt_earlycon;
/*
* Initialize the ACPI boot-time table parser.
*/
if (acpi_table_init()) {
disable_acpi();
goto fdt_earlycon;
}
loongson_sysconf.boot_cpu_id = read_csr_cpuid();
/*
* Process the Multiple APIC Description Table (MADT), if present
*/
acpi_process_madt();
/* Do not enable ACPI SPCR console by default */
acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
return;
fdt_earlycon:
if (earlycon_acpi_spcr_enable)
early_init_dt_scan_chosen_stdout();
}
#ifdef CONFIG_ACPI_NUMA
static __init int setup_node(int pxm)
{
return acpi_map_pxm_to_node(pxm);
}
/*
* Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
* I/O localities since SRAT does not list them. I/O localities are
* not supported at this point.
*/
unsigned int numa_distance_cnt;
static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
{
return slit->locality_count;
}
void __init numa_set_distance(int from, int to, int distance)
{
if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
from, to, distance);
return;
}
node_distances[from][to] = distance;
}
/* Callback for Proximity Domain -> CPUID mapping */
void __init
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
{
int pxm, node;
if (srat_disabled())
return;
if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
bad_srat();
return;
}
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return;
pxm = pa->proximity_domain_lo;
if (acpi_srat_revision >= 2) {
pxm |= (pa->proximity_domain_hi[0] << 8);
pxm |= (pa->proximity_domain_hi[1] << 16);
pxm |= (pa->proximity_domain_hi[2] << 24);
}
node = setup_node(pxm);
if (node < 0) {
pr_err("SRAT: Too many proximity domains %x\n", pxm);
bad_srat();
return;
}
if (pa->apic_id >= CONFIG_NR_CPUS) {
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
pxm, pa->apic_id, node);
return;
}
early_numa_add_cpu(pa->apic_id, node);
set_cpuid_to_node(pa->apic_id, node);
node_set(node, numa_nodes_parsed);
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
}
#endif
void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
{
memblock_reserve(addr, size);
}
#ifdef CONFIG_ACPI_HOTPLUG_CPU
#include <acpi/processor.h>
static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
{
#ifdef CONFIG_ACPI_NUMA
int nid;
nid = acpi_get_node(handle);
if (nid != NUMA_NO_NODE) {
set_cpuid_to_node(physid, nid);
node_set(nid, numa_nodes_parsed);
set_cpu_numa_node(cpu, nid);
cpumask_set_cpu(cpu, cpumask_of_node(nid));
}
#endif
return 0;
}
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
{
int cpu;
cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
if (cpu < 0) {
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
return cpu;
}
acpi_map_cpu2node(handle, cpu, physid);
*pcpu = cpu;
return 0;
}
EXPORT_SYMBOL(acpi_map_cpu);
int acpi_unmap_cpu(int cpu)
{
#ifdef CONFIG_ACPI_NUMA
set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
#endif
set_cpu_present(cpu, false);
num_processors--;
pr_info("cpu%d hot remove!\n", cpu);
return 0;
}
EXPORT_SYMBOL(acpi_unmap_cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
| linux-master | arch/loongarch/kernel/acpi.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2009 Wind River Systems,
* written by Ralf Baechle <[email protected]>
*/
#include <linux/init.h>
#include <linux/irqflags.h>
#include <linux/notifier.h>
#include <linux/prefetch.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/cop2.h>
#include <asm/current.h>
#include <asm/mipsregs.h>
#include <asm/page.h>
#include <asm/octeon/octeon.h>
static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
void *data)
{
unsigned long flags;
unsigned int status;
switch (action) {
case CU2_EXCEPTION:
prefetch(¤t->thread.cp2);
local_irq_save(flags);
KSTK_STATUS(current) |= ST0_CU2;
status = read_c0_status();
write_c0_status(status | ST0_CU2);
octeon_cop2_restore(&(current->thread.cp2));
write_c0_status(status & ~ST0_CU2);
local_irq_restore(flags);
return NOTIFY_BAD; /* Don't call default notifier */
}
return NOTIFY_OK; /* Let default notifier send signals */
}
static int __init cnmips_cu2_setup(void)
{
return cu2_notifier(cnmips_cu2_call, 0);
}
early_initcall(cnmips_cu2_setup);
| linux-master | arch/mips/cavium-octeon/cpu.c |
/*
* Octeon Bootbus flash setup
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007, 2008 Cavium Networks
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/semaphore.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#include <asm/octeon/octeon.h>
static struct map_info flash_map;
static struct mtd_info *mymtd;
static const char *part_probe_types[] = {
"cmdlinepart",
#ifdef CONFIG_MTD_REDBOOT_PARTS
"RedBoot",
#endif
NULL
};
static map_word octeon_flash_map_read(struct map_info *map, unsigned long ofs)
{
map_word r;
down(&octeon_bootbus_sem);
r = inline_map_read(map, ofs);
up(&octeon_bootbus_sem);
return r;
}
static void octeon_flash_map_write(struct map_info *map, const map_word datum,
unsigned long ofs)
{
down(&octeon_bootbus_sem);
inline_map_write(map, datum, ofs);
up(&octeon_bootbus_sem);
}
static void octeon_flash_map_copy_from(struct map_info *map, void *to,
unsigned long from, ssize_t len)
{
down(&octeon_bootbus_sem);
inline_map_copy_from(map, to, from, len);
up(&octeon_bootbus_sem);
}
static void octeon_flash_map_copy_to(struct map_info *map, unsigned long to,
const void *from, ssize_t len)
{
down(&octeon_bootbus_sem);
inline_map_copy_to(map, to, from, len);
up(&octeon_bootbus_sem);
}
/*
* Module/ driver initialization.
*
* Returns Zero on success
*/
static int octeon_flash_probe(struct platform_device *pdev)
{
union cvmx_mio_boot_reg_cfgx region_cfg;
u32 cs;
int r;
struct device_node *np = pdev->dev.of_node;
r = of_property_read_u32(np, "reg", &cs);
if (r)
return r;
/*
* Read the bootbus region 0 setup to determine the base
* address of the flash.
*/
region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
if (region_cfg.s.en) {
/*
* The bootloader always takes the flash and sets its
* address so the entire flash fits below
* 0x1fc00000. This way the flash aliases to
* 0x1fc00000 for booting. Software can access the
* full flash at the true address, while core boot can
* access 4MB.
*/
/* Use this name so old part lines work */
flash_map.name = "phys_mapped_flash";
flash_map.phys = region_cfg.s.base << 16;
flash_map.size = 0x1fc00000 - flash_map.phys;
/* 8-bit bus (0 + 1) or 16-bit bus (1 + 1) */
flash_map.bankwidth = region_cfg.s.width + 1;
flash_map.virt = ioremap(flash_map.phys, flash_map.size);
pr_notice("Bootbus flash: Setting flash for %luMB flash at "
"0x%08llx\n", flash_map.size >> 20, flash_map.phys);
WARN_ON(!map_bankwidth_supported(flash_map.bankwidth));
flash_map.read = octeon_flash_map_read;
flash_map.write = octeon_flash_map_write;
flash_map.copy_from = octeon_flash_map_copy_from;
flash_map.copy_to = octeon_flash_map_copy_to;
mymtd = do_map_probe("cfi_probe", &flash_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
mtd_device_parse_register(mymtd, part_probe_types,
NULL, NULL, 0);
} else {
pr_err("Failed to register MTD device for flash\n");
}
}
return 0;
}
static const struct of_device_id of_flash_match[] = {
{
.compatible = "cfi-flash",
},
{ },
};
MODULE_DEVICE_TABLE(of, of_flash_match);
static struct platform_driver of_flash_driver = {
.driver = {
.name = "octeon-of-flash",
.of_match_table = of_flash_match,
},
.probe = octeon_flash_probe,
};
static int octeon_flash_init(void)
{
return platform_driver_register(&of_flash_driver);
}
late_initcall(octeon_flash_init);
MODULE_LICENSE("GPL");
| linux-master | arch/mips/cavium-octeon/flash_setup.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 by Ralf Baechle
* Copyright (C) 2009, 2012 Cavium, Inc.
*/
#include <linux/clocksource.h>
#include <linux/sched/clock.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/cpu-info.h>
#include <asm/cpu-type.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-ipd-defs.h>
#include <asm/octeon/cvmx-mio-defs.h>
#include <asm/octeon/cvmx-rst-defs.h>
#include <asm/octeon/cvmx-fpa-defs.h>
static u64 f;
static u64 rdiv;
static u64 sdiv;
static u64 octeon_udelay_factor;
static u64 octeon_ndelay_factor;
void __init octeon_setup_delays(void)
{
octeon_udelay_factor = octeon_get_clock_rate() / 1000000;
/*
* For __ndelay we divide by 2^16, so the factor is multiplied
* by the same amount.
*/
octeon_ndelay_factor = (octeon_udelay_factor * 0x10000ull) / 1000ull;
preset_lpj = octeon_get_clock_rate() / HZ;
if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
union cvmx_mio_rst_boot rst_boot;
rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
rdiv = rst_boot.s.c_mul; /* CPU clock */
sdiv = rst_boot.s.pnr_mul; /* I/O clock */
f = (0x8000000000000000ull / sdiv) * 2;
} else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) {
union cvmx_rst_boot rst_boot;
rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
rdiv = rst_boot.s.c_mul; /* CPU clock */
sdiv = rst_boot.s.pnr_mul; /* I/O clock */
f = (0x8000000000000000ull / sdiv) * 2;
}
}
/*
* Set the current core's cvmcount counter to the value of the
* IPD_CLK_COUNT. We do this on all cores as they are brought
* on-line. This allows for a read from a local cpu register to
* access a synchronized counter.
*
* On CPU_CAVIUM_OCTEON2 the IPD_CLK_COUNT is scaled by rdiv/sdiv.
*/
void octeon_init_cvmcount(void)
{
u64 clk_reg;
unsigned long flags;
unsigned loops = 2;
clk_reg = octeon_has_feature(OCTEON_FEATURE_FPA3) ?
CVMX_FPA_CLK_COUNT : CVMX_IPD_CLK_COUNT;
/* Clobber loops so GCC will not unroll the following while loop. */
asm("" : "+r" (loops));
local_irq_save(flags);
/*
* Loop several times so we are executing from the cache,
* which should give more deterministic timing.
*/
while (loops--) {
u64 clk_count = cvmx_read_csr(clk_reg);
if (rdiv != 0) {
clk_count *= rdiv;
if (f != 0) {
asm("dmultu\t%[cnt],%[f]\n\t"
"mfhi\t%[cnt]"
: [cnt] "+r" (clk_count)
: [f] "r" (f)
: "hi", "lo");
}
}
write_c0_cvmcount(clk_count);
}
local_irq_restore(flags);
}
static u64 octeon_cvmcount_read(struct clocksource *cs)
{
return read_c0_cvmcount();
}
static struct clocksource clocksource_mips = {
.name = "OCTEON_CVMCOUNT",
.read = octeon_cvmcount_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
unsigned long long notrace sched_clock(void)
{
/* 64-bit arithmatic can overflow, so use 128-bit. */
u64 t1, t2, t3;
unsigned long long rv;
u64 mult = clocksource_mips.mult;
u64 shift = clocksource_mips.shift;
u64 cnt = read_c0_cvmcount();
asm (
"dmultu\t%[cnt],%[mult]\n\t"
"nor\t%[t1],$0,%[shift]\n\t"
"mfhi\t%[t2]\n\t"
"mflo\t%[t3]\n\t"
"dsll\t%[t2],%[t2],1\n\t"
"dsrlv\t%[rv],%[t3],%[shift]\n\t"
"dsllv\t%[t1],%[t2],%[t1]\n\t"
"or\t%[rv],%[t1],%[rv]\n\t"
: [rv] "=&r" (rv), [t1] "=&r" (t1), [t2] "=&r" (t2), [t3] "=&r" (t3)
: [cnt] "r" (cnt), [mult] "r" (mult), [shift] "r" (shift)
: "hi", "lo");
return rv;
}
void __init plat_time_init(void)
{
clocksource_mips.rating = 300;
clocksource_register_hz(&clocksource_mips, octeon_get_clock_rate());
}
void __udelay(unsigned long us)
{
u64 cur, end, inc;
cur = read_c0_cvmcount();
inc = us * octeon_udelay_factor;
end = cur + inc;
while (end > cur)
cur = read_c0_cvmcount();
}
EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long ns)
{
u64 cur, end, inc;
cur = read_c0_cvmcount();
inc = ((ns * octeon_ndelay_factor) >> 16);
end = cur + inc;
while (end > cur)
cur = read_c0_cvmcount();
}
EXPORT_SYMBOL(__ndelay);
void __delay(unsigned long loops)
{
u64 cur, end;
cur = read_c0_cvmcount();
end = cur + loops;
while (end > cur)
cur = read_c0_cvmcount();
}
EXPORT_SYMBOL(__delay);
/**
* octeon_io_clk_delay - wait for a given number of io clock cycles to pass.
*
* We scale the wait by the clock ratio, and then wait for the
* corresponding number of core clocks.
*
* @count: The number of clocks to wait.
*/
void octeon_io_clk_delay(unsigned long count)
{
u64 cur, end;
cur = read_c0_cvmcount();
if (rdiv != 0) {
end = count * rdiv;
if (f != 0) {
asm("dmultu\t%[cnt],%[f]\n\t"
"mfhi\t%[cnt]"
: [cnt] "+r" (end)
: [f] "r" (f)
: "hi", "lo");
}
end = cur + end;
} else {
end = cur + count;
}
while (end > cur)
cur = read_c0_cvmcount();
}
EXPORT_SYMBOL(octeon_io_clk_delay);
| linux-master | arch/mips/cavium-octeon/csrc-octeon.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-ciu-defs.h>
#include <asm/octeon/cvmx.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#define TIMER_NUM 3
static bool reset_stats;
struct latency_info {
u64 io_interval;
u64 cpu_interval;
u64 timer_start1;
u64 timer_start2;
u64 max_latency;
u64 min_latency;
u64 latency_sum;
u64 average_latency;
u64 interrupt_cnt;
};
static struct latency_info li;
static struct dentry *dir;
static int oct_ilm_show(struct seq_file *m, void *v)
{
u64 cpuclk, avg, max, min;
struct latency_info curr_li = li;
cpuclk = octeon_get_clock_rate();
max = (curr_li.max_latency * 1000000000) / cpuclk;
min = (curr_li.min_latency * 1000000000) / cpuclk;
avg = (curr_li.latency_sum * 1000000000) / (cpuclk * curr_li.interrupt_cnt);
seq_printf(m, "cnt: %10lld, avg: %7lld ns, max: %7lld ns, min: %7lld ns\n",
curr_li.interrupt_cnt, avg, max, min);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(oct_ilm);
static int reset_statistics(void *data, u64 value)
{
reset_stats = true;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n");
static void init_debugfs(void)
{
dir = debugfs_create_dir("oct_ilm", 0);
debugfs_create_file("statistics", 0222, dir, NULL, &oct_ilm_fops);
debugfs_create_file("reset", 0222, dir, NULL, &reset_statistics_ops);
}
static void init_latency_info(struct latency_info *li, int startup)
{
/* interval in milli seconds after which the interrupt will
* be triggered
*/
int interval = 1;
if (startup) {
/* Calculating by the amounts io clock and cpu clock would
* increment in interval amount of ms
*/
li->io_interval = (octeon_get_io_clock_rate() * interval) / 1000;
li->cpu_interval = (octeon_get_clock_rate() * interval) / 1000;
}
li->timer_start1 = 0;
li->timer_start2 = 0;
li->max_latency = 0;
li->min_latency = (u64)-1;
li->latency_sum = 0;
li->interrupt_cnt = 0;
}
static void start_timer(int timer, u64 interval)
{
union cvmx_ciu_timx timx;
unsigned long flags;
timx.u64 = 0;
timx.s.one_shot = 1;
timx.s.len = interval;
raw_local_irq_save(flags);
li.timer_start1 = read_c0_cvmcount();
cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64);
/* Read it back to force wait until register is written. */
timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer));
li.timer_start2 = read_c0_cvmcount();
raw_local_irq_restore(flags);
}
static irqreturn_t cvm_oct_ciu_timer_interrupt(int cpl, void *dev_id)
{
u64 last_latency;
u64 last_int_cnt;
if (reset_stats) {
init_latency_info(&li, 0);
reset_stats = false;
} else {
last_int_cnt = read_c0_cvmcount();
last_latency = last_int_cnt - (li.timer_start1 + li.cpu_interval);
li.interrupt_cnt++;
li.latency_sum += last_latency;
if (last_latency > li.max_latency)
li.max_latency = last_latency;
if (last_latency < li.min_latency)
li.min_latency = last_latency;
}
start_timer(TIMER_NUM, li.io_interval);
return IRQ_HANDLED;
}
static void disable_timer(int timer)
{
union cvmx_ciu_timx timx;
timx.s.one_shot = 0;
timx.s.len = 0;
cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64);
/* Read it back to force immediate write of timer register*/
timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer));
}
static __init int oct_ilm_module_init(void)
{
int rc;
int irq = OCTEON_IRQ_TIMER0 + TIMER_NUM;
init_debugfs();
rc = request_irq(irq, cvm_oct_ciu_timer_interrupt, IRQF_NO_THREAD,
"oct_ilm", 0);
if (rc) {
WARN(1, "Could not acquire IRQ %d", irq);
goto err_irq;
}
init_latency_info(&li, 1);
start_timer(TIMER_NUM, li.io_interval);
return 0;
err_irq:
debugfs_remove_recursive(dir);
return rc;
}
static __exit void oct_ilm_module_exit(void)
{
disable_timer(TIMER_NUM);
debugfs_remove_recursive(dir);
free_irq(OCTEON_IRQ_TIMER0 + TIMER_NUM, 0);
}
module_exit(oct_ilm_module_exit);
module_init(oct_ilm_module_init);
MODULE_AUTHOR("Venkat Subbiah, Cavium");
MODULE_DESCRIPTION("Measures interrupt latency on Octeon chips.");
MODULE_LICENSE("GPL");
| linux-master | arch/mips/cavium-octeon/oct_ilm.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2007 Cavium Networks
* Copyright (C) 2008, 2009 Wind River Systems
* written by Ralf Baechle <[email protected]>
*/
#include <linux/compiler.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/serial.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <linux/string.h> /* for memset */
#include <linux/tty.h>
#include <linux/time.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/kexec.h>
#include <asm/processor.h>
#include <asm/reboot.h>
#include <asm/smp-ops.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/bootinfo.h>
#include <asm/sections.h>
#include <asm/fw/fw.h>
#include <asm/setup.h>
#include <asm/prom.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/pci-octeon.h>
#include <asm/octeon/cvmx-rst-defs.h>
/*
* TRUE for devices having registers with little-endian byte
* order, FALSE for registers with native-endian byte order.
* PCI mandates little-endian, USB and SATA are configuraable,
* but we chose little-endian for these.
*/
const bool octeon_should_swizzle_table[256] = {
[0x00] = true, /* bootbus/CF */
[0x1b] = true, /* PCI mmio window */
[0x1c] = true, /* PCI mmio window */
[0x1d] = true, /* PCI mmio window */
[0x1e] = true, /* PCI mmio window */
[0x68] = true, /* OCTEON III USB */
[0x69] = true, /* OCTEON III USB */
[0x6c] = true, /* OCTEON III SATA */
[0x6f] = true, /* OCTEON II USB */
};
EXPORT_SYMBOL(octeon_should_swizzle_table);
#ifdef CONFIG_PCI
extern void pci_console_init(const char *arg);
#endif
static unsigned long long max_memory = ULLONG_MAX;
static unsigned long long reserve_low_mem;
DEFINE_SEMAPHORE(octeon_bootbus_sem, 1);
EXPORT_SYMBOL(octeon_bootbus_sem);
static struct octeon_boot_descriptor *octeon_boot_desc_ptr;
struct cvmx_bootinfo *octeon_bootinfo;
EXPORT_SYMBOL(octeon_bootinfo);
#ifdef CONFIG_KEXEC
#ifdef CONFIG_SMP
/*
* Wait for relocation code is prepared and send
* secondary CPUs to spin until kernel is relocated.
*/
static void octeon_kexec_smp_down(void *ignored)
{
int cpu = smp_processor_id();
local_irq_disable();
set_cpu_online(cpu, false);
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
asm volatile (
" sync \n"
" synci ($0) \n");
kexec_reboot();
}
#endif
#define OCTEON_DDR0_BASE (0x0ULL)
#define OCTEON_DDR0_SIZE (0x010000000ULL)
#define OCTEON_DDR1_BASE (0x410000000ULL)
#define OCTEON_DDR1_SIZE (0x010000000ULL)
#define OCTEON_DDR2_BASE (0x020000000ULL)
#define OCTEON_DDR2_SIZE (0x3e0000000ULL)
#define OCTEON_MAX_PHY_MEM_SIZE (16*1024*1024*1024ULL)
static struct kimage *kimage_ptr;
static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes)
{
int64_t addr;
struct cvmx_bootmem_desc *bootmem_desc;
bootmem_desc = cvmx_bootmem_get_desc();
if (mem_size > OCTEON_MAX_PHY_MEM_SIZE) {
mem_size = OCTEON_MAX_PHY_MEM_SIZE;
pr_err("Error: requested memory too large,"
"truncating to maximum size\n");
}
bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
addr = (OCTEON_DDR0_BASE + reserve_low_mem + low_reserved_bytes);
bootmem_desc->head_addr = 0;
if (mem_size <= OCTEON_DDR0_SIZE) {
__cvmx_bootmem_phy_free(addr,
mem_size - reserve_low_mem -
low_reserved_bytes, 0);
return;
}
__cvmx_bootmem_phy_free(addr,
OCTEON_DDR0_SIZE - reserve_low_mem -
low_reserved_bytes, 0);
mem_size -= OCTEON_DDR0_SIZE;
if (mem_size > OCTEON_DDR1_SIZE) {
__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
__cvmx_bootmem_phy_free(OCTEON_DDR2_BASE,
mem_size - OCTEON_DDR1_SIZE, 0);
} else
__cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
}
static int octeon_kexec_prepare(struct kimage *image)
{
int i;
char *bootloader = "kexec";
octeon_boot_desc_ptr->argc = 0;
for (i = 0; i < image->nr_segments; i++) {
if (!strncmp(bootloader, (char *)image->segment[i].buf,
strlen(bootloader))) {
/*
* convert command line string to array
* of parameters (as bootloader does).
*/
int argc = 0, offt;
char *str = (char *)image->segment[i].buf;
char *ptr = strchr(str, ' ');
while (ptr && (OCTEON_ARGV_MAX_ARGS > argc)) {
*ptr = '\0';
if (ptr[1] != ' ') {
offt = (int)(ptr - str + 1);
octeon_boot_desc_ptr->argv[argc] =
image->segment[i].mem + offt;
argc++;
}
ptr = strchr(ptr + 1, ' ');
}
octeon_boot_desc_ptr->argc = argc;
break;
}
}
/*
* Information about segments will be needed during pre-boot memory
* initialization.
*/
kimage_ptr = image;
return 0;
}
static void octeon_generic_shutdown(void)
{
int i;
#ifdef CONFIG_SMP
int cpu;
#endif
struct cvmx_bootmem_desc *bootmem_desc;
void *named_block_array_ptr;
bootmem_desc = cvmx_bootmem_get_desc();
named_block_array_ptr =
cvmx_phys_to_ptr(bootmem_desc->named_block_array_addr);
#ifdef CONFIG_SMP
/* disable watchdogs */
for_each_online_cpu(cpu)
cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
#else
cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
#endif
if (kimage_ptr != kexec_crash_image) {
memset(named_block_array_ptr,
0x0,
CVMX_BOOTMEM_NUM_NAMED_BLOCKS *
sizeof(struct cvmx_bootmem_named_block_desc));
/*
* Mark all memory (except low 0x100000 bytes) as free.
* It is the same thing that bootloader does.
*/
kexec_bootmem_init(octeon_bootinfo->dram_size*1024ULL*1024ULL,
0x100000);
/*
* Allocate all segments to avoid their corruption during boot.
*/
for (i = 0; i < kimage_ptr->nr_segments; i++)
cvmx_bootmem_alloc_address(
kimage_ptr->segment[i].memsz + 2*PAGE_SIZE,
kimage_ptr->segment[i].mem - PAGE_SIZE,
PAGE_SIZE);
} else {
/*
* Do not mark all memory as free. Free only named sections
* leaving the rest of memory unchanged.
*/
struct cvmx_bootmem_named_block_desc *ptr =
(struct cvmx_bootmem_named_block_desc *)
named_block_array_ptr;
for (i = 0; i < bootmem_desc->named_block_num_blocks; i++)
if (ptr[i].size)
cvmx_bootmem_free_named(ptr[i].name);
}
kexec_args[2] = 1UL; /* running on octeon_main_processor */
kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
#ifdef CONFIG_SMP
secondary_kexec_args[2] = 0UL; /* running on secondary cpu */
secondary_kexec_args[3] = (unsigned long)octeon_boot_desc_ptr;
#endif
}
static void octeon_shutdown(void)
{
octeon_generic_shutdown();
#ifdef CONFIG_SMP
smp_call_function(octeon_kexec_smp_down, NULL, 0);
smp_wmb();
while (num_online_cpus() > 1) {
cpu_relax();
mdelay(1);
}
#endif
}
static void octeon_crash_shutdown(struct pt_regs *regs)
{
octeon_generic_shutdown();
default_machine_crash_shutdown(regs);
}
#ifdef CONFIG_SMP
void octeon_crash_smp_send_stop(void)
{
int cpu;
/* disable watchdogs */
for_each_online_cpu(cpu)
cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
}
#endif
#endif /* CONFIG_KEXEC */
uint64_t octeon_reserve32_memory;
EXPORT_SYMBOL(octeon_reserve32_memory);
#ifdef CONFIG_KEXEC
/* crashkernel cmdline parameter is parsed _after_ memory setup
* we also parse it here (workaround for EHB5200) */
static uint64_t crashk_size, crashk_base;
#endif
static int octeon_uart;
extern asmlinkage void handle_int(void);
/**
* octeon_is_simulation - Return non-zero if we are currently running
* in the Octeon simulator
*
* Return: non-0 if running in the Octeon simulator, 0 otherwise
*/
int octeon_is_simulation(void)
{
return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
}
EXPORT_SYMBOL(octeon_is_simulation);
/**
* octeon_is_pci_host - Return true if Octeon is in PCI Host mode. This means
* Linux can control the PCI bus.
*
* Return: Non-zero if Octeon is in host mode.
*/
int octeon_is_pci_host(void)
{
#ifdef CONFIG_PCI
return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
#else
return 0;
#endif
}
/**
* octeon_get_clock_rate - Get the clock rate of Octeon
*
* Return: Clock rate in HZ
*/
uint64_t octeon_get_clock_rate(void)
{
struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
return sysinfo->cpu_clock_hz;
}
EXPORT_SYMBOL(octeon_get_clock_rate);
static u64 octeon_io_clock_rate;
u64 octeon_get_io_clock_rate(void)
{
return octeon_io_clock_rate;
}
EXPORT_SYMBOL(octeon_get_io_clock_rate);
/**
* octeon_write_lcd - Write to the LCD display connected to the bootbus.
* @s: String to write
*
* This display exists on most Cavium evaluation boards. If it doesn't exist,
* then this function doesn't do anything.
*/
static void octeon_write_lcd(const char *s)
{
if (octeon_bootinfo->led_display_base_addr) {
void __iomem *lcd_address =
ioremap(octeon_bootinfo->led_display_base_addr,
8);
int i;
for (i = 0; i < 8; i++, s++) {
if (*s)
iowrite8(*s, lcd_address + i);
else
iowrite8(' ', lcd_address + i);
}
iounmap(lcd_address);
}
}
/**
* octeon_get_boot_uart - Return the console uart passed by the bootloader
*
* Return: uart number (0 or 1)
*/
static int octeon_get_boot_uart(void)
{
return (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
1 : 0;
}
/**
* octeon_get_boot_coremask - Get the coremask Linux was booted on.
*
* Return: Core mask
*/
int octeon_get_boot_coremask(void)
{
return octeon_boot_desc_ptr->core_mask;
}
/**
* octeon_check_cpu_bist - Check the hardware BIST results for a CPU
*/
void octeon_check_cpu_bist(void)
{
const int coreid = cvmx_get_core_num();
unsigned long long mask;
unsigned long long bist_val;
/* Check BIST results for COP0 registers */
mask = 0x1f00000000ull;
bist_val = read_octeon_c0_icacheerr();
if (bist_val & mask)
pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
coreid, bist_val);
bist_val = read_octeon_c0_dcacheerr();
if (bist_val & 1)
pr_err("Core%d L1 Dcache parity error: "
"CacheErr(dcache) = 0x%llx\n",
coreid, bist_val);
mask = 0xfc00000000000000ull;
bist_val = read_c0_cvmmemctl();
if (bist_val & mask)
pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
coreid, bist_val);
write_octeon_c0_dcacheerr(0);
}
/**
* octeon_restart - Reboot Octeon
*
* @command: Command to pass to the bootloader. Currently ignored.
*/
static void octeon_restart(char *command)
{
/* Disable all watchdogs before soft reset. They don't get cleared */
#ifdef CONFIG_SMP
int cpu;
for_each_online_cpu(cpu)
cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
#else
cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
#endif
mb();
while (1)
if (OCTEON_IS_OCTEON3())
cvmx_write_csr(CVMX_RST_SOFT_RST, 1);
else
cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
}
/**
* octeon_kill_core - Permanently stop a core.
*
* @arg: Ignored.
*/
static void octeon_kill_core(void *arg)
{
if (octeon_is_simulation())
/* A break instruction causes the simulator stop a core */
asm volatile ("break" ::: "memory");
local_irq_disable();
/* Disable watchdog on this core. */
cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
/* Spin in a low power mode. */
while (true)
asm volatile ("wait" ::: "memory");
}
/**
* octeon_halt - Halt the system
*/
static void octeon_halt(void)
{
smp_call_function(octeon_kill_core, NULL, 0);
switch (octeon_bootinfo->board_type) {
case CVMX_BOARD_TYPE_NAO38:
/* Driving a 1 to GPIO 12 shuts off this board */
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
break;
default:
octeon_write_lcd("PowerOff");
break;
}
octeon_kill_core(NULL);
}
static char __read_mostly octeon_system_type[80];
static void __init init_octeon_system_type(void)
{
char const *board_type;
board_type = cvmx_board_type_to_string(octeon_bootinfo->board_type);
if (board_type == NULL) {
struct device_node *root;
int ret;
root = of_find_node_by_path("/");
ret = of_property_read_string(root, "model", &board_type);
of_node_put(root);
if (ret)
board_type = "Unsupported Board";
}
snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
board_type, octeon_model_get_string(read_c0_prid()));
}
/**
* octeon_board_type_string - Return a string representing the system type
*
* Return: system type string
*/
const char *octeon_board_type_string(void)
{
return octeon_system_type;
}
const char *get_system_type(void)
__attribute__ ((alias("octeon_board_type_string")));
void octeon_user_io_init(void)
{
union octeon_cvmemctl cvmmemctl;
/* Get the current settings for CP0_CVMMEMCTL_REG */
cvmmemctl.u64 = read_c0_cvmmemctl();
/* R/W If set, marked write-buffer entries time out the same
* as other entries; if clear, marked write-buffer entries
* use the maximum timeout. */
cvmmemctl.s.dismarkwblongto = 1;
/* R/W If set, a merged store does not clear the write-buffer
* entry timeout state. */
cvmmemctl.s.dismrgclrwbto = 0;
/* R/W Two bits that are the MSBs of the resultant CVMSEG LM
* word location for an IOBDMA. The other 8 bits come from the
* SCRADDR field of the IOBDMA. */
cvmmemctl.s.iobdmascrmsb = 0;
/* R/W If set, SYNCWS and SYNCS only order marked stores; if
* clear, SYNCWS and SYNCS only order unmarked
* stores. SYNCWSMARKED has no effect when DISSYNCWS is
* set. */
cvmmemctl.s.syncwsmarked = 0;
/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
cvmmemctl.s.dissyncws = 0;
/* R/W If set, no stall happens on write buffer full. */
if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
cvmmemctl.s.diswbfst = 1;
else
cvmmemctl.s.diswbfst = 0;
/* R/W If set (and SX set), supervisor-level loads/stores can
* use XKPHYS addresses with <48>==0 */
cvmmemctl.s.xkmemenas = 0;
/* R/W If set (and UX set), user-level loads/stores can use
* XKPHYS addresses with VA<48>==0 */
cvmmemctl.s.xkmemenau = 0;
/* R/W If set (and SX set), supervisor-level loads/stores can
* use XKPHYS addresses with VA<48>==1 */
cvmmemctl.s.xkioenas = 0;
/* R/W If set (and UX set), user-level loads/stores can use
* XKPHYS addresses with VA<48>==1 */
cvmmemctl.s.xkioenau = 0;
/* R/W If set, all stores act as SYNCW (NOMERGE must be set
* when this is set) RW, reset to 0. */
cvmmemctl.s.allsyncw = 0;
/* R/W If set, no stores merge, and all stores reach the
* coherent bus in order. */
cvmmemctl.s.nomerge = 0;
/* R/W Selects the bit in the counter used for DID time-outs 0
* = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
* between 1x and 2x this interval. For example, with
* DIDTTO=3, expiration interval is between 16K and 32K. */
cvmmemctl.s.didtto = 0;
/* R/W If set, the (mem) CSR clock never turns off. */
cvmmemctl.s.csrckalwys = 0;
/* R/W If set, mclk never turns off. */
cvmmemctl.s.mclkalwys = 0;
/* R/W Selects the bit in the counter used for write buffer
* flush time-outs (WBFLT+11) is the bit position in an
* internal counter used to determine expiration. The write
* buffer expires between 1x and 2x this interval. For
* example, with WBFLT = 0, a write buffer expires between 2K
* and 4K cycles after the write buffer entry is allocated. */
cvmmemctl.s.wbfltime = 0;
/* R/W If set, do not put Istream in the L2 cache. */
cvmmemctl.s.istrnol2 = 0;
/*
* R/W The write buffer threshold. As per erratum Core-14752
* for CN63XX, a sc/scd might fail if the write buffer is
* full. Lowering WBTHRESH greatly lowers the chances of the
* write buffer ever being full and triggering the erratum.
*/
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
cvmmemctl.s.wbthresh = 4;
else
cvmmemctl.s.wbthresh = 10;
/* R/W If set, CVMSEG is available for loads/stores in
* kernel/debug mode. */
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
cvmmemctl.s.cvmsegenak = 1;
#else
cvmmemctl.s.cvmsegenak = 0;
#endif
/* R/W If set, CVMSEG is available for loads/stores in
* supervisor mode. */
cvmmemctl.s.cvmsegenas = 0;
/* R/W If set, CVMSEG is available for loads/stores in user
* mode. */
cvmmemctl.s.cvmsegenau = 0;
write_c0_cvmmemctl(cvmmemctl.u64);
/* Setup of CVMSEG is done in kernel-entry-init.h */
if (smp_processor_id() == 0)
pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
union cvmx_iob_fau_timeout fau_timeout;
/* Set a default for the hardware timeouts */
fau_timeout.u64 = 0;
fau_timeout.s.tout_val = 0xfff;
/* Disable tagwait FAU timeout */
fau_timeout.s.tout_enb = 0;
cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
}
if ((!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
!OCTEON_IS_MODEL(OCTEON_CN7XXX)) ||
OCTEON_IS_MODEL(OCTEON_CN70XX)) {
union cvmx_pow_nw_tim nm_tim;
nm_tim.u64 = 0;
/* 4096 cycles */
nm_tim.s.nw_tim = 3;
cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
}
write_octeon_c0_icacheerr(0);
write_c0_derraddr1(0);
}
/**
* prom_init - Early entry point for arch setup
*/
void __init prom_init(void)
{
struct cvmx_sysinfo *sysinfo;
const char *arg;
char *p;
int i;
u64 t;
int argc;
/*
* The bootloader passes a pointer to the boot descriptor in
* $a3, this is available as fw_arg3.
*/
octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
octeon_bootinfo =
cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
sysinfo = cvmx_sysinfo_get();
memset(sysinfo, 0, sizeof(*sysinfo));
sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
sysinfo->phy_mem_desc_addr = (u64)phys_to_virt(octeon_bootinfo->phy_mem_desc_addr);
if ((octeon_bootinfo->major_version > 1) ||
(octeon_bootinfo->major_version == 1 &&
octeon_bootinfo->minor_version >= 4))
cvmx_coremask_copy(&sysinfo->core_mask,
&octeon_bootinfo->ext_core_mask);
else
cvmx_coremask_set64(&sysinfo->core_mask,
octeon_bootinfo->core_mask);
/* Some broken u-boot pass garbage in upper bits, clear them out */
if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
for (i = 512; i < 1024; i++)
cvmx_coremask_clear_core(&sysinfo->core_mask, i);
sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
sysinfo->board_type = octeon_bootinfo->board_type;
sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
sizeof(sysinfo->mac_addr_base));
sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
memcpy(sysinfo->board_serial_number,
octeon_bootinfo->board_serial_number,
sizeof(sysinfo->board_serial_number));
sysinfo->compact_flash_common_base_addr =
octeon_bootinfo->compact_flash_common_base_addr;
sysinfo->compact_flash_attribute_base_addr =
octeon_bootinfo->compact_flash_attribute_base_addr;
sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
if (OCTEON_IS_OCTEON2()) {
/* I/O clock runs at a different rate than the CPU. */
union cvmx_mio_rst_boot rst_boot;
rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
} else if (OCTEON_IS_OCTEON3()) {
/* I/O clock runs at a different rate than the CPU. */
union cvmx_rst_boot rst_boot;
rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
} else {
octeon_io_clock_rate = sysinfo->cpu_clock_hz;
}
t = read_c0_cvmctl();
if ((t & (1ull << 27)) == 0) {
/*
* Setup the multiplier save/restore code if
* CvmCtl[NOMUL] clear.
*/
void *save;
void *save_end;
void *restore;
void *restore_end;
int save_len;
int restore_len;
int save_max = (char *)octeon_mult_save_end -
(char *)octeon_mult_save;
int restore_max = (char *)octeon_mult_restore_end -
(char *)octeon_mult_restore;
if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) {
save = octeon_mult_save3;
save_end = octeon_mult_save3_end;
restore = octeon_mult_restore3;
restore_end = octeon_mult_restore3_end;
} else {
save = octeon_mult_save2;
save_end = octeon_mult_save2_end;
restore = octeon_mult_restore2;
restore_end = octeon_mult_restore2_end;
}
save_len = (char *)save_end - (char *)save;
restore_len = (char *)restore_end - (char *)restore;
if (!WARN_ON(save_len > save_max ||
restore_len > restore_max)) {
memcpy(octeon_mult_save, save, save_len);
memcpy(octeon_mult_restore, restore, restore_len);
}
}
/*
* Only enable the LED controller if we're running on a CN38XX, CN58XX,
* or CN56XX. The CN30XX and CN31XX don't have an LED controller.
*/
if (!octeon_is_simulation() &&
octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
cvmx_write_csr(CVMX_LED_EN, 0);
cvmx_write_csr(CVMX_LED_PRT, 0);
cvmx_write_csr(CVMX_LED_DBG, 0);
cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
cvmx_write_csr(CVMX_LED_EN, 1);
}
/*
* We need to temporarily allocate all memory in the reserve32
* region. This makes sure the kernel doesn't allocate this
* memory when it is getting memory from the
* bootloader. Later, after the memory allocations are
* complete, the reserve32 will be freed.
*
* Allocate memory for RESERVED32 aligned on 2MB boundary. This
* is in case we later use hugetlb entries with it.
*/
if (CONFIG_CAVIUM_RESERVE32) {
int64_t addr =
cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
0, 0, 2 << 20,
"CAVIUM_RESERVE32", 0);
if (addr < 0)
pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
else
octeon_reserve32_memory = addr;
}
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
pr_info("Skipping L2 locking due to reduced L2 cache size\n");
} else {
uint32_t __maybe_unused ebase = read_c0_ebase() & 0x3ffff000;
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
/* TLB refill */
cvmx_l2c_lock_mem_region(ebase, 0x100);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
/* General exception */
cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
/* Interrupt handler */
cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
#endif
}
#endif
octeon_check_cpu_bist();
octeon_uart = octeon_get_boot_uart();
#ifdef CONFIG_SMP
octeon_write_lcd("LinuxSMP");
#else
octeon_write_lcd("Linux");
#endif
octeon_setup_delays();
/*
* BIST should always be enabled when doing a soft reset. L2
* Cache locking for instance is not cleared unless BIST is
* enabled. Unfortunately due to a chip errata G-200 for
* Cn38XX and CN31XX, BIST must be disabled on these parts.
*/
if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
OCTEON_IS_MODEL(OCTEON_CN31XX))
cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
else
cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
/* Default to 64MB in the simulator to speed things up */
if (octeon_is_simulation())
max_memory = 64ull << 20;
arg = strstr(arcs_cmdline, "mem=");
if (arg) {
max_memory = memparse(arg + 4, &p);
if (max_memory == 0)
max_memory = 32ull << 30;
if (*p == '@')
reserve_low_mem = memparse(p + 1, &p);
}
arcs_cmdline[0] = 0;
argc = octeon_boot_desc_ptr->argc;
for (i = 0; i < argc; i++) {
const char *arg =
cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
if ((strncmp(arg, "MEM=", 4) == 0) ||
(strncmp(arg, "mem=", 4) == 0)) {
max_memory = memparse(arg + 4, &p);
if (max_memory == 0)
max_memory = 32ull << 30;
if (*p == '@')
reserve_low_mem = memparse(p + 1, &p);
#ifdef CONFIG_KEXEC
} else if (strncmp(arg, "crashkernel=", 12) == 0) {
crashk_size = memparse(arg+12, &p);
if (*p == '@')
crashk_base = memparse(p+1, &p);
strcat(arcs_cmdline, " ");
strcat(arcs_cmdline, arg);
/*
* To do: switch parsing to new style, something like:
* parse_crashkernel(arg, sysinfo->system_dram_size,
* &crashk_size, &crashk_base);
*/
#endif
} else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
sizeof(arcs_cmdline) - 1) {
strcat(arcs_cmdline, " ");
strcat(arcs_cmdline, arg);
}
}
if (strstr(arcs_cmdline, "console=") == NULL) {
if (octeon_uart == 1)
strcat(arcs_cmdline, " console=ttyS1,115200");
else
strcat(arcs_cmdline, " console=ttyS0,115200");
}
mips_hpt_frequency = octeon_get_clock_rate();
octeon_init_cvmcount();
_machine_restart = octeon_restart;
_machine_halt = octeon_halt;
#ifdef CONFIG_KEXEC
_machine_kexec_shutdown = octeon_shutdown;
_machine_crash_shutdown = octeon_crash_shutdown;
_machine_kexec_prepare = octeon_kexec_prepare;
#ifdef CONFIG_SMP
_crash_smp_send_stop = octeon_crash_smp_send_stop;
#endif
#endif
octeon_user_io_init();
octeon_setup_smp();
}
/* Exclude a single page from the regions obtained in plat_mem_setup. */
#ifndef CONFIG_CRASH_DUMP
static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
{
if (addr > *mem && addr < *mem + *size) {
u64 inc = addr - *mem;
memblock_add(*mem, inc);
*mem += inc;
*size -= inc;
}
if (addr == *mem && *size > PAGE_SIZE) {
*mem += PAGE_SIZE;
*size -= PAGE_SIZE;
}
}
#endif /* CONFIG_CRASH_DUMP */
void __init fw_init_cmdline(void)
{
int i;
octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
for (i = 0; i < octeon_boot_desc_ptr->argc; i++) {
const char *arg =
cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
if (strlen(arcs_cmdline) + strlen(arg) + 1 <
sizeof(arcs_cmdline) - 1) {
strcat(arcs_cmdline, " ");
strcat(arcs_cmdline, arg);
}
}
}
void __init *plat_get_fdt(void)
{
octeon_bootinfo =
cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
return phys_to_virt(octeon_bootinfo->fdt_addr);
}
void __init plat_mem_setup(void)
{
uint64_t mem_alloc_size;
uint64_t total;
uint64_t crashk_end;
#ifndef CONFIG_CRASH_DUMP
int64_t memory;
#endif
total = 0;
crashk_end = 0;
/*
* The Mips memory init uses the first memory location for
* some memory vectors. When SPARSEMEM is in use, it doesn't
* verify that the size is big enough for the final
* vectors. Making the smallest chuck 4MB seems to be enough
* to consistently work.
*/
mem_alloc_size = 4 << 20;
if (mem_alloc_size > max_memory)
mem_alloc_size = max_memory;
/* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
#ifdef CONFIG_CRASH_DUMP
memblock_add(reserve_low_mem, max_memory);
total += max_memory;
#else
#ifdef CONFIG_KEXEC
if (crashk_size > 0) {
memblock_add(crashk_base, crashk_size);
crashk_end = crashk_base + crashk_size;
}
#endif
/*
* When allocating memory, we want incrementing addresses,
* which is handled by memblock
*/
cvmx_bootmem_lock();
while (total < max_memory) {
memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
__pa_symbol(&_end), -1,
0x100000,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
if (memory >= 0) {
u64 size = mem_alloc_size;
#ifdef CONFIG_KEXEC
uint64_t end;
#endif
/*
* exclude a page at the beginning and end of
* the 256MB PCIe 'hole' so the kernel will not
* try to allocate multi-page buffers that
* span the discontinuity.
*/
memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE,
&memory, &size);
memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE +
CVMX_PCIE_BAR1_PHYS_SIZE,
&memory, &size);
#ifdef CONFIG_KEXEC
end = memory + mem_alloc_size;
/*
* This function automatically merges address regions
* next to each other if they are received in
* incrementing order
*/
if (memory < crashk_base && end > crashk_end) {
/* region is fully in */
memblock_add(memory, crashk_base - memory);
total += crashk_base - memory;
memblock_add(crashk_end, end - crashk_end);
total += end - crashk_end;
continue;
}
if (memory >= crashk_base && end <= crashk_end)
/*
* Entire memory region is within the new
* kernel's memory, ignore it.
*/
continue;
if (memory > crashk_base && memory < crashk_end &&
end > crashk_end) {
/*
* Overlap with the beginning of the region,
* reserve the beginning.
*/
mem_alloc_size -= crashk_end - memory;
memory = crashk_end;
} else if (memory < crashk_base && end > crashk_base &&
end < crashk_end)
/*
* Overlap with the beginning of the region,
* chop of end.
*/
mem_alloc_size -= end - crashk_base;
#endif
memblock_add(memory, mem_alloc_size);
total += mem_alloc_size;
/* Recovering mem_alloc_size */
mem_alloc_size = 4 << 20;
} else {
break;
}
}
cvmx_bootmem_unlock();
#endif /* CONFIG_CRASH_DUMP */
/*
* Now that we've allocated the kernel memory it is safe to
* free the reserved region. We free it here so that builtin
* drivers can use the memory.
*/
if (octeon_reserve32_memory)
cvmx_bootmem_free_named("CAVIUM_RESERVE32");
if (total == 0)
panic("Unable to allocate memory from "
"cvmx_bootmem_phy_alloc");
}
/*
* Emit one character to the boot UART. Exported for use by the
* watchdog timer.
*/
void prom_putchar(char c)
{
uint64_t lsrval;
/* Spin until there is room */
do {
lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
} while ((lsrval & 0x20) == 0);
/* Write the byte */
cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
}
EXPORT_SYMBOL(prom_putchar);
void __init prom_free_prom_memory(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
/* Check for presence of Core-14449 fix. */
u32 insn;
u32 *foo;
foo = &insn;
asm volatile("# before" : : : "memory");
prefetch(foo);
asm volatile(
".set push\n\t"
".set noreorder\n\t"
"bal 1f\n\t"
"nop\n"
"1:\tlw %0,-12($31)\n\t"
".set pop\n\t"
: "=r" (insn) : : "$31", "memory");
if ((insn >> 26) != 0x33)
panic("No PREF instruction at Core-14449 probe point.");
if (((insn >> 16) & 0x1f) != 28)
panic("OCTEON II DCache prefetch workaround not in place (%04x).\n"
"Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).",
insn);
}
}
void __init octeon_fill_mac_addresses(void);
void __init device_tree_init(void)
{
const void *fdt;
bool do_prune;
bool fill_mac;
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
if (!fdt_check_header(&__appended_dtb)) {
fdt = &__appended_dtb;
do_prune = false;
fill_mac = true;
pr_info("Using appended Device Tree.\n");
} else
#endif
if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
if (fdt_check_header(fdt))
panic("Corrupt Device Tree passed to kernel.");
do_prune = false;
fill_mac = false;
pr_info("Using passed Device Tree.\n");
} else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
fdt = &__dtb_octeon_68xx_begin;
do_prune = true;
fill_mac = true;
} else {
fdt = &__dtb_octeon_3xxx_begin;
do_prune = true;
fill_mac = true;
}
initial_boot_params = (void *)fdt;
if (do_prune) {
octeon_prune_device_tree();
pr_info("Using internal Device Tree.\n");
}
if (fill_mac)
octeon_fill_mac_addresses();
unflatten_and_copy_device_tree();
init_octeon_system_type();
}
static int __initdata disable_octeon_edac_p;
static int __init disable_octeon_edac(char *str)
{
disable_octeon_edac_p = 1;
return 0;
}
early_param("disable_octeon_edac", disable_octeon_edac);
static char *edac_device_names[] = {
"octeon_l2c_edac",
"octeon_pc_edac",
};
static int __init edac_devinit(void)
{
struct platform_device *dev;
int i, err = 0;
int num_lmc;
char *name;
if (disable_octeon_edac_p)
return 0;
for (i = 0; i < ARRAY_SIZE(edac_device_names); i++) {
name = edac_device_names[i];
dev = platform_device_register_simple(name, -1, NULL, 0);
if (IS_ERR(dev)) {
pr_err("Registration of %s failed!\n", name);
err = PTR_ERR(dev);
}
}
num_lmc = OCTEON_IS_MODEL(OCTEON_CN68XX) ? 4 :
(OCTEON_IS_MODEL(OCTEON_CN56XX) ? 2 : 1);
for (i = 0; i < num_lmc; i++) {
dev = platform_device_register_simple("octeon_lmc_edac",
i, NULL, 0);
if (IS_ERR(dev)) {
pr_err("Registration of octeon_lmc_edac %d failed!\n", i);
err = PTR_ERR(dev);
}
}
return err;
}
device_initcall(edac_devinit);
static void __initdata *octeon_dummy_iospace;
static int __init octeon_no_pci_init(void)
{
/*
* Initially assume there is no PCI. The PCI/PCIe platform code will
* later re-initialize these to correct values if they are present.
*/
octeon_dummy_iospace = vzalloc(IO_SPACE_LIMIT);
set_io_port_base((unsigned long)octeon_dummy_iospace);
ioport_resource.start = RESOURCE_SIZE_MAX;
ioport_resource.end = 0;
return 0;
}
core_initcall(octeon_no_pci_init);
static int __init octeon_no_pci_release(void)
{
/*
* Release the allocated memory if a real IO space is there.
*/
if ((unsigned long)octeon_dummy_iospace != mips_io_port_base)
vfree(octeon_dummy_iospace);
return 0;
}
late_initcall(octeon_no_pci_release);
| linux-master | arch/mips/cavium-octeon/setup.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2016 Cavium, Inc.
*/
#include <linux/of_address.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/bitops.h>
#include <linux/of_irq.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/of.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-ciu2-defs.h>
#include <asm/octeon/cvmx-ciu3-defs.h>
static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2);
static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3);
static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info);
#define CIU3_MBOX_PER_CORE 10
/*
* The 8 most significant bits of the intsn identify the interrupt major block.
* Each major block might use its own interrupt domain. Thus 256 domains are
* needed.
*/
#define MAX_CIU3_DOMAINS 256
typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int);
/* Information for each ciu3 in the system */
struct octeon_ciu3_info {
u64 ciu3_addr;
int node;
struct irq_domain *domain[MAX_CIU3_DOMAINS];
octeon_ciu3_intsn2hw_t intsn2hw[MAX_CIU3_DOMAINS];
};
/* Each ciu3 in the system uses its own data (one ciu3 per node) */
static struct octeon_ciu3_info *octeon_ciu3_info_per_node[4];
struct octeon_irq_ciu_domain_data {
int num_sum; /* number of sum registers (2 or 3). */
};
/* Register offsets from ciu3_addr */
#define CIU3_CONST 0x220
#define CIU3_IDT_CTL(_idt) ((_idt) * 8 + 0x110000)
#define CIU3_IDT_PP(_idt, _idx) ((_idt) * 32 + (_idx) * 8 + 0x120000)
#define CIU3_IDT_IO(_idt) ((_idt) * 8 + 0x130000)
#define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000)
#define CIU3_DEST_IO_INT(_io) ((_io) * 8 + 0x210000)
#define CIU3_ISC_CTL(_intsn) ((_intsn) * 8 + 0x80000000)
#define CIU3_ISC_W1C(_intsn) ((_intsn) * 8 + 0x90000000)
#define CIU3_ISC_W1S(_intsn) ((_intsn) * 8 + 0xa0000000)
static __read_mostly int octeon_irq_ciu_to_irq[8][64];
struct octeon_ciu_chip_data {
union {
struct { /* only used for ciu3 */
u64 ciu3_addr;
unsigned int intsn;
};
struct { /* only used for ciu/ciu2 */
u8 line;
u8 bit;
};
};
int gpio_line;
int current_cpu; /* Next CPU expected to take this irq */
int ciu_node; /* NUMA node number of the CIU */
};
struct octeon_core_chip_data {
struct mutex core_irq_mutex;
bool current_en;
bool desired_en;
u8 bit;
};
#define MIPS_CORE_IRQ_LINES 8
static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
struct irq_chip *chip,
irq_flow_handler_t handler)
{
struct octeon_ciu_chip_data *cd;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd)
return -ENOMEM;
irq_set_chip_and_handler(irq, chip, handler);
cd->line = line;
cd->bit = bit;
cd->gpio_line = gpio_line;
irq_set_chip_data(irq, cd);
octeon_irq_ciu_to_irq[line][bit] = irq;
return 0;
}
static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
{
struct irq_data *data = irq_get_irq_data(irq);
struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
irq_set_chip_data(irq, NULL);
kfree(cd);
}
static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
int irq, int line, int bit)
{
struct device_node *of_node;
int ret;
of_node = irq_domain_get_of_node(domain);
if (!of_node)
return -EINVAL;
ret = irq_alloc_desc_at(irq, of_node_to_nid(of_node));
if (ret < 0)
return ret;
return irq_domain_associate(domain, irq, line << 6 | bit);
}
static int octeon_coreid_for_cpu(int cpu)
{
#ifdef CONFIG_SMP
return cpu_logical_map(cpu);
#else
return cvmx_get_core_num();
#endif
}
static int octeon_cpu_for_coreid(int coreid)
{
#ifdef CONFIG_SMP
return cpu_number_map(coreid);
#else
return smp_processor_id();
#endif
}
static void octeon_irq_core_ack(struct irq_data *data)
{
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
unsigned int bit = cd->bit;
/*
* We don't need to disable IRQs to make these atomic since
* they are already disabled earlier in the low level
* interrupt code.
*/
clear_c0_status(0x100 << bit);
/* The two user interrupts must be cleared manually. */
if (bit < 2)
clear_c0_cause(0x100 << bit);
}
static void octeon_irq_core_eoi(struct irq_data *data)
{
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
/*
* We don't need to disable IRQs to make these atomic since
* they are already disabled earlier in the low level
* interrupt code.
*/
set_c0_status(0x100 << cd->bit);
}
static void octeon_irq_core_set_enable_local(void *arg)
{
struct irq_data *data = arg;
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
unsigned int mask = 0x100 << cd->bit;
/*
* Interrupts are already disabled, so these are atomic.
*/
if (cd->desired_en)
set_c0_status(mask);
else
clear_c0_status(mask);
}
static void octeon_irq_core_disable(struct irq_data *data)
{
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
cd->desired_en = false;
}
static void octeon_irq_core_enable(struct irq_data *data)
{
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
cd->desired_en = true;
}
static void octeon_irq_core_bus_lock(struct irq_data *data)
{
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
mutex_lock(&cd->core_irq_mutex);
}
static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
{
struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
if (cd->desired_en != cd->current_en) {
on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
cd->current_en = cd->desired_en;
}
mutex_unlock(&cd->core_irq_mutex);
}
static struct irq_chip octeon_irq_chip_core = {
.name = "Core",
.irq_enable = octeon_irq_core_enable,
.irq_disable = octeon_irq_core_disable,
.irq_ack = octeon_irq_core_ack,
.irq_eoi = octeon_irq_core_eoi,
.irq_bus_lock = octeon_irq_core_bus_lock,
.irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
.irq_cpu_online = octeon_irq_core_eoi,
.irq_cpu_offline = octeon_irq_core_ack,
.flags = IRQCHIP_ONOFFLINE_ENABLED,
};
static void __init octeon_irq_init_core(void)
{
int i;
int irq;
struct octeon_core_chip_data *cd;
for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
cd = &octeon_irq_core_chip_data[i];
cd->current_en = false;
cd->desired_en = false;
cd->bit = i;
mutex_init(&cd->core_irq_mutex);
irq = OCTEON_IRQ_SW0 + i;
irq_set_chip_data(irq, cd);
irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
handle_percpu_irq);
}
}
static int next_cpu_for_irq(struct irq_data *data)
{
#ifdef CONFIG_SMP
int cpu;
const struct cpumask *mask = irq_data_get_affinity_mask(data);
int weight = cpumask_weight(mask);
struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
if (weight > 1) {
cpu = cd->current_cpu;
for (;;) {
cpu = cpumask_next(cpu, mask);
if (cpu >= nr_cpu_ids) {
cpu = -1;
continue;
} else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
break;
}
}
} else if (weight == 1) {
cpu = cpumask_first(mask);
} else {
cpu = smp_processor_id();
}
cd->current_cpu = cpu;
return cpu;
#else
return smp_processor_id();
#endif
}
static void octeon_irq_ciu_enable(struct irq_data *data)
{
int cpu = next_cpu_for_irq(data);
int coreid = octeon_coreid_for_cpu(cpu);
unsigned long *pen;
unsigned long flags;
struct octeon_ciu_chip_data *cd;
raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
cd = irq_data_get_irq_chip_data(data);
raw_spin_lock_irqsave(lock, flags);
if (cd->line == 0) {
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
__set_bit(cd->bit, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
* enabling the irq.
*/
wmb();
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
} else {
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
__set_bit(cd->bit, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
* enabling the irq.
*/
wmb();
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
}
raw_spin_unlock_irqrestore(lock, flags);
}
static void octeon_irq_ciu_enable_local(struct irq_data *data)
{
unsigned long *pen;
unsigned long flags;
struct octeon_ciu_chip_data *cd;
raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
cd = irq_data_get_irq_chip_data(data);
raw_spin_lock_irqsave(lock, flags);
if (cd->line == 0) {
pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
__set_bit(cd->bit, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
* enabling the irq.
*/
wmb();
cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
} else {
pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
__set_bit(cd->bit, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
* enabling the irq.
*/
wmb();
cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
}
raw_spin_unlock_irqrestore(lock, flags);
}
static void octeon_irq_ciu_disable_local(struct irq_data *data)
{
unsigned long *pen;
unsigned long flags;
struct octeon_ciu_chip_data *cd;
raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
cd = irq_data_get_irq_chip_data(data);
raw_spin_lock_irqsave(lock, flags);
if (cd->line == 0) {
pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
__clear_bit(cd->bit, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
* enabling the irq.
*/
wmb();
cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
} else {
pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
__clear_bit(cd->bit, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
* enabling the irq.
*/
wmb();
cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
}
raw_spin_unlock_irqrestore(lock, flags);
}
static void octeon_irq_ciu_disable_all(struct irq_data *data)
{
unsigned long flags;
unsigned long *pen;
int cpu;
struct octeon_ciu_chip_data *cd;
raw_spinlock_t *lock;
cd = irq_data_get_irq_chip_data(data);
for_each_online_cpu(cpu) {
int coreid = octeon_coreid_for_cpu(cpu);
lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
if (cd->line == 0)
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
else
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
raw_spin_lock_irqsave(lock, flags);
__clear_bit(cd->bit, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
* enabling the irq.
*/
wmb();
if (cd->line == 0)
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
else
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
raw_spin_unlock_irqrestore(lock, flags);
}
}
static void octeon_irq_ciu_enable_all(struct irq_data *data)
{
unsigned long flags;
unsigned long *pen;
int cpu;
struct octeon_ciu_chip_data *cd;
raw_spinlock_t *lock;
cd = irq_data_get_irq_chip_data(data);
for_each_online_cpu(cpu) {
int coreid = octeon_coreid_for_cpu(cpu);
lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
if (cd->line == 0)
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
else
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
raw_spin_lock_irqsave(lock, flags);
__set_bit(cd->bit, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
* enabling the irq.
*/
wmb();
if (cd->line == 0)
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
else
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
raw_spin_unlock_irqrestore(lock, flags);
}
}
/*
* Enable the irq on the next core in the affinity set for chips that
* have the EN*_W1{S,C} registers.
*/
static void octeon_irq_ciu_enable_v2(struct irq_data *data)
{
u64 mask;
int cpu = next_cpu_for_irq(data);
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
/*
* Called under the desc lock, so these should never get out
* of sync.
*/
if (cd->line == 0) {
int index = octeon_coreid_for_cpu(cpu) * 2;
set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
} else {
int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
}
}
/*
* Enable the irq in the sum2 registers.
*/
static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
{
u64 mask;
int cpu = next_cpu_for_irq(data);
int index = octeon_coreid_for_cpu(cpu);
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
}
/*
* Disable the irq in the sum2 registers.
*/
static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
{
u64 mask;
int cpu = next_cpu_for_irq(data);
int index = octeon_coreid_for_cpu(cpu);
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
}
static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
{
u64 mask;
int cpu = next_cpu_for_irq(data);
int index = octeon_coreid_for_cpu(cpu);
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
}
static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
{
int cpu;
struct octeon_ciu_chip_data *cd;
u64 mask;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
for_each_online_cpu(cpu) {
int coreid = octeon_coreid_for_cpu(cpu);
cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
}
}
/*
* Enable the irq on the current CPU for chips that
* have the EN*_W1{S,C} registers.
*/
static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
{
u64 mask;
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
if (cd->line == 0) {
int index = cvmx_get_core_num() * 2;
set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
} else {
int index = cvmx_get_core_num() * 2 + 1;
set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
}
}
static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
{
u64 mask;
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
if (cd->line == 0) {
int index = cvmx_get_core_num() * 2;
clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
} else {
int index = cvmx_get_core_num() * 2 + 1;
clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
}
}
/*
* Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
*/
static void octeon_irq_ciu_ack(struct irq_data *data)
{
u64 mask;
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
if (cd->line == 0) {
int index = cvmx_get_core_num() * 2;
cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
} else {
cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
}
}
/*
* Disable the irq on the all cores for chips that have the EN*_W1{S,C}
* registers.
*/
static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
{
int cpu;
u64 mask;
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
if (cd->line == 0) {
for_each_online_cpu(cpu) {
int index = octeon_coreid_for_cpu(cpu) * 2;
clear_bit(cd->bit,
&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
}
} else {
for_each_online_cpu(cpu) {
int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
clear_bit(cd->bit,
&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
}
}
}
/*
* Enable the irq on the all cores for chips that have the EN*_W1{S,C}
* registers.
*/
static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
{
int cpu;
u64 mask;
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
if (cd->line == 0) {
for_each_online_cpu(cpu) {
int index = octeon_coreid_for_cpu(cpu) * 2;
set_bit(cd->bit,
&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
}
} else {
for_each_online_cpu(cpu) {
int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
set_bit(cd->bit,
&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
}
}
}
static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
{
irqd_set_trigger_type(data, t);
if (t & IRQ_TYPE_EDGE_BOTH)
irq_set_handler_locked(data, handle_edge_irq);
else
irq_set_handler_locked(data, handle_level_irq);
return IRQ_SET_MASK_OK;
}
static void octeon_irq_gpio_setup(struct irq_data *data)
{
union cvmx_gpio_bit_cfgx cfg;
struct octeon_ciu_chip_data *cd;
u32 t = irqd_get_trigger_type(data);
cd = irq_data_get_irq_chip_data(data);
cfg.u64 = 0;
cfg.s.int_en = 1;
cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
/* 140 nS glitch filter*/
cfg.s.fil_cnt = 7;
cfg.s.fil_sel = 3;
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
}
static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
{
octeon_irq_gpio_setup(data);
octeon_irq_ciu_enable_v2(data);
}
static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
{
octeon_irq_gpio_setup(data);
octeon_irq_ciu_enable(data);
}
static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
{
irqd_set_trigger_type(data, t);
octeon_irq_gpio_setup(data);
if (t & IRQ_TYPE_EDGE_BOTH)
irq_set_handler_locked(data, handle_edge_irq);
else
irq_set_handler_locked(data, handle_level_irq);
return IRQ_SET_MASK_OK;
}
static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
{
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
octeon_irq_ciu_disable_all_v2(data);
}
static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
{
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
octeon_irq_ciu_disable_all(data);
}
static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
{
struct octeon_ciu_chip_data *cd;
u64 mask;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->gpio_line);
cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
}
#ifdef CONFIG_SMP
static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
{
int cpu = smp_processor_id();
cpumask_t new_affinity;
const struct cpumask *mask = irq_data_get_affinity_mask(data);
if (!cpumask_test_cpu(cpu, mask))
return;
if (cpumask_weight(mask) > 1) {
/*
* It has multi CPU affinity, just remove this CPU
* from the affinity set.
*/
cpumask_copy(&new_affinity, mask);
cpumask_clear_cpu(cpu, &new_affinity);
} else {
/* Otherwise, put it on lowest numbered online CPU. */
cpumask_clear(&new_affinity);
cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
}
irq_set_affinity_locked(data, &new_affinity, false);
}
static int octeon_irq_ciu_set_affinity(struct irq_data *data,
const struct cpumask *dest, bool force)
{
int cpu;
bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
unsigned long flags;
struct octeon_ciu_chip_data *cd;
unsigned long *pen;
raw_spinlock_t *lock;
cd = irq_data_get_irq_chip_data(data);
/*
* For non-v2 CIU, we will allow only single CPU affinity.
* This removes the need to do locking in the .ack/.eoi
* functions.
*/
if (cpumask_weight(dest) != 1)
return -EINVAL;
if (!enable_one)
return 0;
for_each_online_cpu(cpu) {
int coreid = octeon_coreid_for_cpu(cpu);
lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
raw_spin_lock_irqsave(lock, flags);
if (cd->line == 0)
pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
else
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
if (cpumask_test_cpu(cpu, dest) && enable_one) {
enable_one = false;
__set_bit(cd->bit, pen);
} else {
__clear_bit(cd->bit, pen);
}
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
* enabling the irq.
*/
wmb();
if (cd->line == 0)
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
else
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
raw_spin_unlock_irqrestore(lock, flags);
}
return 0;
}
/*
* Set affinity for the irq for chips that have the EN*_W1{S,C}
* registers.
*/
static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
const struct cpumask *dest,
bool force)
{
int cpu;
bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
u64 mask;
struct octeon_ciu_chip_data *cd;
if (!enable_one)
return 0;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << cd->bit;
if (cd->line == 0) {
for_each_online_cpu(cpu) {
unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
int index = octeon_coreid_for_cpu(cpu) * 2;
if (cpumask_test_cpu(cpu, dest) && enable_one) {
enable_one = false;
set_bit(cd->bit, pen);
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
} else {
clear_bit(cd->bit, pen);
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
}
}
} else {
for_each_online_cpu(cpu) {
unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
if (cpumask_test_cpu(cpu, dest) && enable_one) {
enable_one = false;
set_bit(cd->bit, pen);
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
} else {
clear_bit(cd->bit, pen);
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
}
}
}
return 0;
}
static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
const struct cpumask *dest,
bool force)
{
int cpu;
bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
u64 mask;
struct octeon_ciu_chip_data *cd;
if (!enable_one)
return 0;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << cd->bit;
for_each_online_cpu(cpu) {
int index = octeon_coreid_for_cpu(cpu);
if (cpumask_test_cpu(cpu, dest) && enable_one) {
enable_one = false;
cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
} else {
cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
}
}
return 0;
}
#endif
static unsigned int edge_startup(struct irq_data *data)
{
/* ack any pending edge-irq at startup, so there is
* an _edge_ to fire on when the event reappears.
*/
data->chip->irq_ack(data);
data->chip->irq_enable(data);
return 0;
}
/*
* Newer octeon chips have support for lockless CIU operation.
*/
static struct irq_chip octeon_irq_chip_ciu_v2 = {
.name = "CIU",
.irq_enable = octeon_irq_ciu_enable_v2,
.irq_disable = octeon_irq_ciu_disable_all_v2,
.irq_mask = octeon_irq_ciu_disable_local_v2,
.irq_unmask = octeon_irq_ciu_enable_v2,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};
static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
.name = "CIU",
.irq_enable = octeon_irq_ciu_enable_v2,
.irq_disable = octeon_irq_ciu_disable_all_v2,
.irq_ack = octeon_irq_ciu_ack,
.irq_mask = octeon_irq_ciu_disable_local_v2,
.irq_unmask = octeon_irq_ciu_enable_v2,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};
/*
* Newer octeon chips have support for lockless CIU operation.
*/
static struct irq_chip octeon_irq_chip_ciu_sum2 = {
.name = "CIU",
.irq_enable = octeon_irq_ciu_enable_sum2,
.irq_disable = octeon_irq_ciu_disable_all_sum2,
.irq_mask = octeon_irq_ciu_disable_local_sum2,
.irq_unmask = octeon_irq_ciu_enable_sum2,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};
static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
.name = "CIU",
.irq_enable = octeon_irq_ciu_enable_sum2,
.irq_disable = octeon_irq_ciu_disable_all_sum2,
.irq_ack = octeon_irq_ciu_ack_sum2,
.irq_mask = octeon_irq_ciu_disable_local_sum2,
.irq_unmask = octeon_irq_ciu_enable_sum2,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};
static struct irq_chip octeon_irq_chip_ciu = {
.name = "CIU",
.irq_enable = octeon_irq_ciu_enable,
.irq_disable = octeon_irq_ciu_disable_all,
.irq_mask = octeon_irq_ciu_disable_local,
.irq_unmask = octeon_irq_ciu_enable,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};
static struct irq_chip octeon_irq_chip_ciu_edge = {
.name = "CIU",
.irq_enable = octeon_irq_ciu_enable,
.irq_disable = octeon_irq_ciu_disable_all,
.irq_ack = octeon_irq_ciu_ack,
.irq_mask = octeon_irq_ciu_disable_local,
.irq_unmask = octeon_irq_ciu_enable,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};
/* The mbox versions don't do any affinity or round-robin. */
static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
.name = "CIU-M",
.irq_enable = octeon_irq_ciu_enable_all_v2,
.irq_disable = octeon_irq_ciu_disable_all_v2,
.irq_ack = octeon_irq_ciu_disable_local_v2,
.irq_eoi = octeon_irq_ciu_enable_local_v2,
.irq_cpu_online = octeon_irq_ciu_enable_local_v2,
.irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
.flags = IRQCHIP_ONOFFLINE_ENABLED,
};
static struct irq_chip octeon_irq_chip_ciu_mbox = {
.name = "CIU-M",
.irq_enable = octeon_irq_ciu_enable_all,
.irq_disable = octeon_irq_ciu_disable_all,
.irq_ack = octeon_irq_ciu_disable_local,
.irq_eoi = octeon_irq_ciu_enable_local,
.irq_cpu_online = octeon_irq_ciu_enable_local,
.irq_cpu_offline = octeon_irq_ciu_disable_local,
.flags = IRQCHIP_ONOFFLINE_ENABLED,
};
static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
.name = "CIU-GPIO",
.irq_enable = octeon_irq_ciu_enable_gpio_v2,
.irq_disable = octeon_irq_ciu_disable_gpio_v2,
.irq_ack = octeon_irq_ciu_gpio_ack,
.irq_mask = octeon_irq_ciu_disable_local_v2,
.irq_unmask = octeon_irq_ciu_enable_v2,
.irq_set_type = octeon_irq_ciu_gpio_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
.flags = IRQCHIP_SET_TYPE_MASKED,
};
static struct irq_chip octeon_irq_chip_ciu_gpio = {
.name = "CIU-GPIO",
.irq_enable = octeon_irq_ciu_enable_gpio,
.irq_disable = octeon_irq_ciu_disable_gpio,
.irq_mask = octeon_irq_ciu_disable_local,
.irq_unmask = octeon_irq_ciu_enable,
.irq_ack = octeon_irq_ciu_gpio_ack,
.irq_set_type = octeon_irq_ciu_gpio_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
.flags = IRQCHIP_SET_TYPE_MASKED,
};
/*
* Watchdog interrupts are special. They are associated with a single
* core, so we hardwire the affinity to that core.
*/
static void octeon_irq_ciu_wd_enable(struct irq_data *data)
{
unsigned long flags;
unsigned long *pen;
int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
int cpu = octeon_cpu_for_coreid(coreid);
raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
raw_spin_lock_irqsave(lock, flags);
pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
__set_bit(coreid, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
* the irq.
*/
wmb();
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
raw_spin_unlock_irqrestore(lock, flags);
}
/*
* Watchdog interrupts are special. They are associated with a single
* core, so we hardwire the affinity to that core.
*/
static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
{
int coreid = data->irq - OCTEON_IRQ_WDOG0;
int cpu = octeon_cpu_for_coreid(coreid);
set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
}
static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
.name = "CIU-W",
.irq_enable = octeon_irq_ciu1_wd_enable_v2,
.irq_disable = octeon_irq_ciu_disable_all_v2,
.irq_mask = octeon_irq_ciu_disable_local_v2,
.irq_unmask = octeon_irq_ciu_enable_local_v2,
};
static struct irq_chip octeon_irq_chip_ciu_wd = {
.name = "CIU-W",
.irq_enable = octeon_irq_ciu_wd_enable,
.irq_disable = octeon_irq_ciu_disable_all,
.irq_mask = octeon_irq_ciu_disable_local,
.irq_unmask = octeon_irq_ciu_enable_local,
};
static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
{
bool edge = false;
if (line == 0)
switch (bit) {
case 48 ... 49: /* GMX DRP */
case 50: /* IPD_DRP */
case 52 ... 55: /* Timers */
case 58: /* MPI */
edge = true;
break;
default:
break;
}
else /* line == 1 */
switch (bit) {
case 47: /* PTP */
edge = true;
break;
default:
break;
}
return edge;
}
struct octeon_irq_gpio_domain_data {
unsigned int base_hwirq;
};
static int octeon_irq_gpio_xlat(struct irq_domain *d,
struct device_node *node,
const u32 *intspec,
unsigned int intsize,
unsigned long *out_hwirq,
unsigned int *out_type)
{
unsigned int type;
unsigned int pin;
unsigned int trigger;
if (irq_domain_get_of_node(d) != node)
return -EINVAL;
if (intsize < 2)
return -EINVAL;
pin = intspec[0];
if (pin >= 16)
return -EINVAL;
trigger = intspec[1];
switch (trigger) {
case 1:
type = IRQ_TYPE_EDGE_RISING;
break;
case 2:
type = IRQ_TYPE_EDGE_FALLING;
break;
case 4:
type = IRQ_TYPE_LEVEL_HIGH;
break;
case 8:
type = IRQ_TYPE_LEVEL_LOW;
break;
default:
pr_err("Error: (%pOFn) Invalid irq trigger specification: %x\n",
node,
trigger);
type = IRQ_TYPE_LEVEL_LOW;
break;
}
*out_type = type;
*out_hwirq = pin;
return 0;
}
static int octeon_irq_ciu_xlat(struct irq_domain *d,
struct device_node *node,
const u32 *intspec,
unsigned int intsize,
unsigned long *out_hwirq,
unsigned int *out_type)
{
unsigned int ciu, bit;
struct octeon_irq_ciu_domain_data *dd = d->host_data;
ciu = intspec[0];
bit = intspec[1];
if (ciu >= dd->num_sum || bit > 63)
return -EINVAL;
*out_hwirq = (ciu << 6) | bit;
*out_type = 0;
return 0;
}
static struct irq_chip *octeon_irq_ciu_chip;
static struct irq_chip *octeon_irq_ciu_chip_edge;
static struct irq_chip *octeon_irq_gpio_chip;
static int octeon_irq_ciu_map(struct irq_domain *d,
unsigned int virq, irq_hw_number_t hw)
{
int rv;
unsigned int line = hw >> 6;
unsigned int bit = hw & 63;
struct octeon_irq_ciu_domain_data *dd = d->host_data;
if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
if (line == 2) {
if (octeon_irq_ciu_is_edge(line, bit))
rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
&octeon_irq_chip_ciu_sum2_edge,
handle_edge_irq);
else
rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
&octeon_irq_chip_ciu_sum2,
handle_level_irq);
} else {
if (octeon_irq_ciu_is_edge(line, bit))
rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
octeon_irq_ciu_chip_edge,
handle_edge_irq);
else
rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
octeon_irq_ciu_chip,
handle_level_irq);
}
return rv;
}
static int octeon_irq_gpio_map(struct irq_domain *d,
unsigned int virq, irq_hw_number_t hw)
{
struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
unsigned int line, bit;
int r;
line = (hw + gpiod->base_hwirq) >> 6;
bit = (hw + gpiod->base_hwirq) & 63;
if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
/*
* Default to handle_level_irq. If the DT contains a different
* trigger type, it will call the irq_set_type callback and
* the handler gets updated.
*/
r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
octeon_irq_gpio_chip, handle_level_irq);
return r;
}
static const struct irq_domain_ops octeon_irq_domain_ciu_ops = {
.map = octeon_irq_ciu_map,
.unmap = octeon_irq_free_cd,
.xlate = octeon_irq_ciu_xlat,
};
static const struct irq_domain_ops octeon_irq_domain_gpio_ops = {
.map = octeon_irq_gpio_map,
.unmap = octeon_irq_free_cd,
.xlate = octeon_irq_gpio_xlat,
};
static void octeon_irq_ip2_ciu(void)
{
const unsigned long core_id = cvmx_get_core_num();
u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
if (likely(ciu_sum)) {
int bit = fls64(ciu_sum) - 1;
int irq = octeon_irq_ciu_to_irq[0][bit];
if (likely(irq))
do_IRQ(irq);
else
spurious_interrupt();
} else {
spurious_interrupt();
}
}
static void octeon_irq_ip3_ciu(void)
{
u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
if (likely(ciu_sum)) {
int bit = fls64(ciu_sum) - 1;
int irq = octeon_irq_ciu_to_irq[1][bit];
if (likely(irq))
do_IRQ(irq);
else
spurious_interrupt();
} else {
spurious_interrupt();
}
}
static void octeon_irq_ip4_ciu(void)
{
int coreid = cvmx_get_core_num();
u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
ciu_sum &= ciu_en;
if (likely(ciu_sum)) {
int bit = fls64(ciu_sum) - 1;
int irq = octeon_irq_ciu_to_irq[2][bit];
if (likely(irq))
do_IRQ(irq);
else
spurious_interrupt();
} else {
spurious_interrupt();
}
}
static bool octeon_irq_use_ip4;
static void octeon_irq_local_enable_ip4(void *arg)
{
set_c0_status(STATUSF_IP4);
}
static void octeon_irq_ip4_mask(void)
{
clear_c0_status(STATUSF_IP4);
spurious_interrupt();
}
static void (*octeon_irq_ip2)(void);
static void (*octeon_irq_ip3)(void);
static void (*octeon_irq_ip4)(void);
void (*octeon_irq_setup_secondary)(void);
void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
{
octeon_irq_ip4 = h;
octeon_irq_use_ip4 = true;
on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
}
static void octeon_irq_percpu_enable(void)
{
irq_cpu_online();
}
static void octeon_irq_init_ciu_percpu(void)
{
int coreid = cvmx_get_core_num();
__this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
__this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
wmb();
raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
/*
* Disable All CIU Interrupts. The ones we need will be
* enabled later. Read the SUM register so we know the write
* completed.
*/
cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
}
static void octeon_irq_init_ciu2_percpu(void)
{
u64 regx, ipx;
int coreid = cvmx_get_core_num();
u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
/*
* Disable All CIU2 Interrupts. The ones we need will be
* enabled later. Read the SUM register so we know the write
* completed.
*
* There are 9 registers and 3 IPX levels with strides 0x1000
* and 0x200 respectively. Use loops to clear them.
*/
for (regx = 0; regx <= 0x8000; regx += 0x1000) {
for (ipx = 0; ipx <= 0x400; ipx += 0x200)
cvmx_write_csr(base + regx + ipx, 0);
}
cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
}
static void octeon_irq_setup_secondary_ciu(void)
{
octeon_irq_init_ciu_percpu();
octeon_irq_percpu_enable();
/* Enable the CIU lines */
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
if (octeon_irq_use_ip4)
set_c0_status(STATUSF_IP4);
else
clear_c0_status(STATUSF_IP4);
}
static void octeon_irq_setup_secondary_ciu2(void)
{
octeon_irq_init_ciu2_percpu();
octeon_irq_percpu_enable();
/* Enable the CIU lines */
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
if (octeon_irq_use_ip4)
set_c0_status(STATUSF_IP4);
else
clear_c0_status(STATUSF_IP4);
}
static int __init octeon_irq_init_ciu(
struct device_node *ciu_node, struct device_node *parent)
{
int i, r;
struct irq_chip *chip;
struct irq_chip *chip_edge;
struct irq_chip *chip_mbox;
struct irq_chip *chip_wd;
struct irq_domain *ciu_domain = NULL;
struct octeon_irq_ciu_domain_data *dd;
dd = kzalloc(sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
octeon_irq_init_ciu_percpu();
octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
octeon_irq_ip2 = octeon_irq_ip2_ciu;
octeon_irq_ip3 = octeon_irq_ip3_ciu;
if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
&& !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
octeon_irq_ip4 = octeon_irq_ip4_ciu;
dd->num_sum = 3;
octeon_irq_use_ip4 = true;
} else {
octeon_irq_ip4 = octeon_irq_ip4_mask;
dd->num_sum = 2;
octeon_irq_use_ip4 = false;
}
if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
chip = &octeon_irq_chip_ciu_v2;
chip_edge = &octeon_irq_chip_ciu_v2_edge;
chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
chip_wd = &octeon_irq_chip_ciu_wd_v2;
octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
} else {
chip = &octeon_irq_chip_ciu;
chip_edge = &octeon_irq_chip_ciu_edge;
chip_mbox = &octeon_irq_chip_ciu_mbox;
chip_wd = &octeon_irq_chip_ciu_wd;
octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
}
octeon_irq_ciu_chip = chip;
octeon_irq_ciu_chip_edge = chip_edge;
/* Mips internal */
octeon_irq_init_core();
ciu_domain = irq_domain_add_tree(
ciu_node, &octeon_irq_domain_ciu_ops, dd);
irq_set_default_host(ciu_domain);
/* CIU_0 */
for (i = 0; i < 16; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
if (r)
goto err;
}
r = irq_alloc_desc_at(OCTEON_IRQ_MBOX0, -1);
if (r < 0) {
pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_MBOX0");
goto err;
}
r = octeon_irq_set_ciu_mapping(
OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
if (r)
goto err;
r = irq_alloc_desc_at(OCTEON_IRQ_MBOX1, -1);
if (r < 0) {
pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_MBOX1");
goto err;
}
r = octeon_irq_set_ciu_mapping(
OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
if (r)
goto err;
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
if (r)
goto err;
}
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
if (r)
goto err;
}
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
if (r)
goto err;
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
if (r)
goto err;
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
if (r)
goto err;
}
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
if (r)
goto err;
r = irq_alloc_descs(OCTEON_IRQ_WDOG0, OCTEON_IRQ_WDOG0, 16, -1);
if (r < 0) {
pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_WDOGx");
goto err;
}
/* CIU_1 */
for (i = 0; i < 16; i++) {
r = octeon_irq_set_ciu_mapping(
i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
handle_level_irq);
if (r)
goto err;
}
/* Enable the CIU lines */
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
if (octeon_irq_use_ip4)
set_c0_status(STATUSF_IP4);
else
clear_c0_status(STATUSF_IP4);
return 0;
err:
return r;
}
static int __init octeon_irq_init_gpio(
struct device_node *gpio_node, struct device_node *parent)
{
struct octeon_irq_gpio_domain_data *gpiod;
u32 interrupt_cells;
unsigned int base_hwirq;
int r;
r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
if (r)
return r;
if (interrupt_cells == 1) {
u32 v;
r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
if (r) {
pr_warn("No \"interrupts\" property.\n");
return r;
}
base_hwirq = v;
} else if (interrupt_cells == 2) {
u32 v0, v1;
r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
if (r) {
pr_warn("No \"interrupts\" property.\n");
return r;
}
r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
if (r) {
pr_warn("No \"interrupts\" property.\n");
return r;
}
base_hwirq = (v0 << 6) | v1;
} else {
pr_warn("Bad \"#interrupt-cells\" property: %u\n",
interrupt_cells);
return -EINVAL;
}
gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
if (gpiod) {
/* gpio domain host_data is the base hwirq number. */
gpiod->base_hwirq = base_hwirq;
irq_domain_add_linear(
gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
} else {
pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
return -ENOMEM;
}
/*
* Clear the OF_POPULATED flag that was set by of_irq_init()
* so that all GPIO devices will be probed.
*/
of_node_clear_flag(gpio_node, OF_POPULATED);
return 0;
}
/*
* Watchdog interrupts are special. They are associated with a single
* core, so we hardwire the affinity to that core.
*/
static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
{
u64 mask;
u64 en_addr;
int coreid = data->irq - OCTEON_IRQ_WDOG0;
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
(0x1000ull * cd->line);
cvmx_write_csr(en_addr, mask);
}
static void octeon_irq_ciu2_enable(struct irq_data *data)
{
u64 mask;
u64 en_addr;
int cpu = next_cpu_for_irq(data);
int coreid = octeon_coreid_for_cpu(cpu);
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
(0x1000ull * cd->line);
cvmx_write_csr(en_addr, mask);
}
static void octeon_irq_ciu2_enable_local(struct irq_data *data)
{
u64 mask;
u64 en_addr;
int coreid = cvmx_get_core_num();
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
(0x1000ull * cd->line);
cvmx_write_csr(en_addr, mask);
}
static void octeon_irq_ciu2_disable_local(struct irq_data *data)
{
u64 mask;
u64 en_addr;
int coreid = cvmx_get_core_num();
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
(0x1000ull * cd->line);
cvmx_write_csr(en_addr, mask);
}
static void octeon_irq_ciu2_ack(struct irq_data *data)
{
u64 mask;
u64 en_addr;
int coreid = cvmx_get_core_num();
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
cvmx_write_csr(en_addr, mask);
}
static void octeon_irq_ciu2_disable_all(struct irq_data *data)
{
int cpu;
u64 mask;
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << (cd->bit);
for_each_online_cpu(cpu) {
u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
cvmx_write_csr(en_addr, mask);
}
}
static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
{
int cpu;
u64 mask;
mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
for_each_online_cpu(cpu) {
u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
octeon_coreid_for_cpu(cpu));
cvmx_write_csr(en_addr, mask);
}
}
static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
{
int cpu;
u64 mask;
mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
for_each_online_cpu(cpu) {
u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
octeon_coreid_for_cpu(cpu));
cvmx_write_csr(en_addr, mask);
}
}
static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
{
u64 mask;
u64 en_addr;
int coreid = cvmx_get_core_num();
mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
cvmx_write_csr(en_addr, mask);
}
static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
{
u64 mask;
u64 en_addr;
int coreid = cvmx_get_core_num();
mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
cvmx_write_csr(en_addr, mask);
}
#ifdef CONFIG_SMP
static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
const struct cpumask *dest, bool force)
{
int cpu;
bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
u64 mask;
struct octeon_ciu_chip_data *cd;
if (!enable_one)
return 0;
cd = irq_data_get_irq_chip_data(data);
mask = 1ull << cd->bit;
for_each_online_cpu(cpu) {
u64 en_addr;
if (cpumask_test_cpu(cpu, dest) && enable_one) {
enable_one = false;
en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
octeon_coreid_for_cpu(cpu)) +
(0x1000ull * cd->line);
} else {
en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
octeon_coreid_for_cpu(cpu)) +
(0x1000ull * cd->line);
}
cvmx_write_csr(en_addr, mask);
}
return 0;
}
#endif
static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
{
octeon_irq_gpio_setup(data);
octeon_irq_ciu2_enable(data);
}
static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
{
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
octeon_irq_ciu2_disable_all(data);
}
static struct irq_chip octeon_irq_chip_ciu2 = {
.name = "CIU2-E",
.irq_enable = octeon_irq_ciu2_enable,
.irq_disable = octeon_irq_ciu2_disable_all,
.irq_mask = octeon_irq_ciu2_disable_local,
.irq_unmask = octeon_irq_ciu2_enable,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu2_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};
static struct irq_chip octeon_irq_chip_ciu2_edge = {
.name = "CIU2-E",
.irq_enable = octeon_irq_ciu2_enable,
.irq_disable = octeon_irq_ciu2_disable_all,
.irq_ack = octeon_irq_ciu2_ack,
.irq_mask = octeon_irq_ciu2_disable_local,
.irq_unmask = octeon_irq_ciu2_enable,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu2_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};
static struct irq_chip octeon_irq_chip_ciu2_mbox = {
.name = "CIU2-M",
.irq_enable = octeon_irq_ciu2_mbox_enable_all,
.irq_disable = octeon_irq_ciu2_mbox_disable_all,
.irq_ack = octeon_irq_ciu2_mbox_disable_local,
.irq_eoi = octeon_irq_ciu2_mbox_enable_local,
.irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
.irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
.flags = IRQCHIP_ONOFFLINE_ENABLED,
};
static struct irq_chip octeon_irq_chip_ciu2_wd = {
.name = "CIU2-W",
.irq_enable = octeon_irq_ciu2_wd_enable,
.irq_disable = octeon_irq_ciu2_disable_all,
.irq_mask = octeon_irq_ciu2_disable_local,
.irq_unmask = octeon_irq_ciu2_enable_local,
};
static struct irq_chip octeon_irq_chip_ciu2_gpio = {
.name = "CIU-GPIO",
.irq_enable = octeon_irq_ciu2_enable_gpio,
.irq_disable = octeon_irq_ciu2_disable_gpio,
.irq_ack = octeon_irq_ciu_gpio_ack,
.irq_mask = octeon_irq_ciu2_disable_local,
.irq_unmask = octeon_irq_ciu2_enable,
.irq_set_type = octeon_irq_ciu_gpio_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu2_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
.flags = IRQCHIP_SET_TYPE_MASKED,
};
static int octeon_irq_ciu2_xlat(struct irq_domain *d,
struct device_node *node,
const u32 *intspec,
unsigned int intsize,
unsigned long *out_hwirq,
unsigned int *out_type)
{
unsigned int ciu, bit;
ciu = intspec[0];
bit = intspec[1];
*out_hwirq = (ciu << 6) | bit;
*out_type = 0;
return 0;
}
static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
{
bool edge = false;
if (line == 3) /* MIO */
switch (bit) {
case 2: /* IPD_DRP */
case 8 ... 11: /* Timers */
case 48: /* PTP */
edge = true;
break;
default:
break;
}
else if (line == 6) /* PKT */
switch (bit) {
case 52 ... 53: /* ILK_DRP */
case 8 ... 12: /* GMX_DRP */
edge = true;
break;
default:
break;
}
return edge;
}
static int octeon_irq_ciu2_map(struct irq_domain *d,
unsigned int virq, irq_hw_number_t hw)
{
unsigned int line = hw >> 6;
unsigned int bit = hw & 63;
/*
* Don't map irq if it is reserved for GPIO.
* (Line 7 are the GPIO lines.)
*/
if (line == 7)
return 0;
if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
if (octeon_irq_ciu2_is_edge(line, bit))
octeon_irq_set_ciu_mapping(virq, line, bit, 0,
&octeon_irq_chip_ciu2_edge,
handle_edge_irq);
else
octeon_irq_set_ciu_mapping(virq, line, bit, 0,
&octeon_irq_chip_ciu2,
handle_level_irq);
return 0;
}
static const struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
.map = octeon_irq_ciu2_map,
.unmap = octeon_irq_free_cd,
.xlate = octeon_irq_ciu2_xlat,
};
static void octeon_irq_ciu2(void)
{
int line;
int bit;
int irq;
u64 src_reg, src, sum;
const unsigned long core_id = cvmx_get_core_num();
sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
if (unlikely(!sum))
goto spurious;
line = fls64(sum) - 1;
src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
src = cvmx_read_csr(src_reg);
if (unlikely(!src))
goto spurious;
bit = fls64(src) - 1;
irq = octeon_irq_ciu_to_irq[line][bit];
if (unlikely(!irq))
goto spurious;
do_IRQ(irq);
goto out;
spurious:
spurious_interrupt();
out:
/* CN68XX pass 1.x has an errata that accessing the ACK registers
can stop interrupts from propagating */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
else
cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
return;
}
static void octeon_irq_ciu2_mbox(void)
{
int line;
const unsigned long core_id = cvmx_get_core_num();
u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
if (unlikely(!sum))
goto spurious;
line = fls64(sum) - 1;
do_IRQ(OCTEON_IRQ_MBOX0 + line);
goto out;
spurious:
spurious_interrupt();
out:
/* CN68XX pass 1.x has an errata that accessing the ACK registers
can stop interrupts from propagating */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
else
cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
return;
}
static int __init octeon_irq_init_ciu2(
struct device_node *ciu_node, struct device_node *parent)
{
unsigned int i, r;
struct irq_domain *ciu_domain = NULL;
octeon_irq_init_ciu2_percpu();
octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
octeon_irq_ip2 = octeon_irq_ciu2;
octeon_irq_ip3 = octeon_irq_ciu2_mbox;
octeon_irq_ip4 = octeon_irq_ip4_mask;
/* Mips internal */
octeon_irq_init_core();
ciu_domain = irq_domain_add_tree(
ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
irq_set_default_host(ciu_domain);
/* CUI2 */
for (i = 0; i < 64; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
if (r)
goto err;
}
for (i = 0; i < 32; i++) {
r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
&octeon_irq_chip_ciu2_wd, handle_level_irq);
if (r)
goto err;
}
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
if (r)
goto err;
}
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
if (r)
goto err;
}
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
if (r)
goto err;
}
irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
/* Enable the CIU lines */
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
clear_c0_status(STATUSF_IP4);
return 0;
err:
return r;
}
struct octeon_irq_cib_host_data {
raw_spinlock_t lock;
u64 raw_reg;
u64 en_reg;
int max_bits;
};
struct octeon_irq_cib_chip_data {
struct octeon_irq_cib_host_data *host_data;
int bit;
};
static void octeon_irq_cib_enable(struct irq_data *data)
{
unsigned long flags;
u64 en;
struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
struct octeon_irq_cib_host_data *host_data = cd->host_data;
raw_spin_lock_irqsave(&host_data->lock, flags);
en = cvmx_read_csr(host_data->en_reg);
en |= 1ull << cd->bit;
cvmx_write_csr(host_data->en_reg, en);
raw_spin_unlock_irqrestore(&host_data->lock, flags);
}
static void octeon_irq_cib_disable(struct irq_data *data)
{
unsigned long flags;
u64 en;
struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
struct octeon_irq_cib_host_data *host_data = cd->host_data;
raw_spin_lock_irqsave(&host_data->lock, flags);
en = cvmx_read_csr(host_data->en_reg);
en &= ~(1ull << cd->bit);
cvmx_write_csr(host_data->en_reg, en);
raw_spin_unlock_irqrestore(&host_data->lock, flags);
}
static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
{
irqd_set_trigger_type(data, t);
return IRQ_SET_MASK_OK;
}
static struct irq_chip octeon_irq_chip_cib = {
.name = "CIB",
.irq_enable = octeon_irq_cib_enable,
.irq_disable = octeon_irq_cib_disable,
.irq_mask = octeon_irq_cib_disable,
.irq_unmask = octeon_irq_cib_enable,
.irq_set_type = octeon_irq_cib_set_type,
};
static int octeon_irq_cib_xlat(struct irq_domain *d,
struct device_node *node,
const u32 *intspec,
unsigned int intsize,
unsigned long *out_hwirq,
unsigned int *out_type)
{
unsigned int type = 0;
if (intsize == 2)
type = intspec[1];
switch (type) {
case 0: /* unofficial value, but we might as well let it work. */
case 4: /* official value for level triggering. */
*out_type = IRQ_TYPE_LEVEL_HIGH;
break;
case 1: /* official value for edge triggering. */
*out_type = IRQ_TYPE_EDGE_RISING;
break;
default: /* Nothing else is acceptable. */
return -EINVAL;
}
*out_hwirq = intspec[0];
return 0;
}
static int octeon_irq_cib_map(struct irq_domain *d,
unsigned int virq, irq_hw_number_t hw)
{
struct octeon_irq_cib_host_data *host_data = d->host_data;
struct octeon_irq_cib_chip_data *cd;
if (hw >= host_data->max_bits) {
pr_err("ERROR: %s mapping %u is too big!\n",
irq_domain_get_of_node(d)->name, (unsigned)hw);
return -EINVAL;
}
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd)
return -ENOMEM;
cd->host_data = host_data;
cd->bit = hw;
irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
handle_simple_irq);
irq_set_chip_data(virq, cd);
return 0;
}
static const struct irq_domain_ops octeon_irq_domain_cib_ops = {
.map = octeon_irq_cib_map,
.unmap = octeon_irq_free_cd,
.xlate = octeon_irq_cib_xlat,
};
/* Chain to real handler. */
static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
{
u64 en;
u64 raw;
u64 bits;
int i;
int irq;
struct irq_domain *cib_domain = data;
struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
en = cvmx_read_csr(host_data->en_reg);
raw = cvmx_read_csr(host_data->raw_reg);
bits = en & raw;
for (i = 0; i < host_data->max_bits; i++) {
if ((bits & 1ull << i) == 0)
continue;
irq = irq_find_mapping(cib_domain, i);
if (!irq) {
unsigned long flags;
pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
i, host_data->raw_reg);
raw_spin_lock_irqsave(&host_data->lock, flags);
en = cvmx_read_csr(host_data->en_reg);
en &= ~(1ull << i);
cvmx_write_csr(host_data->en_reg, en);
cvmx_write_csr(host_data->raw_reg, 1ull << i);
raw_spin_unlock_irqrestore(&host_data->lock, flags);
} else {
struct irq_desc *desc = irq_to_desc(irq);
struct irq_data *irq_data = irq_desc_get_irq_data(desc);
/* If edge, acknowledge the bit we will be sending. */
if (irqd_get_trigger_type(irq_data) &
IRQ_TYPE_EDGE_BOTH)
cvmx_write_csr(host_data->raw_reg, 1ull << i);
generic_handle_irq_desc(desc);
}
}
return IRQ_HANDLED;
}
static int __init octeon_irq_init_cib(struct device_node *ciu_node,
struct device_node *parent)
{
struct resource res;
u32 val;
struct octeon_irq_cib_host_data *host_data;
int parent_irq;
int r;
struct irq_domain *cib_domain;
parent_irq = irq_of_parse_and_map(ciu_node, 0);
if (!parent_irq) {
pr_err("ERROR: Couldn't acquire parent_irq for %pOFn\n",
ciu_node);
return -EINVAL;
}
host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
if (!host_data)
return -ENOMEM;
raw_spin_lock_init(&host_data->lock);
r = of_address_to_resource(ciu_node, 0, &res);
if (r) {
pr_err("ERROR: Couldn't acquire reg(0) %pOFn\n", ciu_node);
return r;
}
host_data->raw_reg = (u64)phys_to_virt(res.start);
r = of_address_to_resource(ciu_node, 1, &res);
if (r) {
pr_err("ERROR: Couldn't acquire reg(1) %pOFn\n", ciu_node);
return r;
}
host_data->en_reg = (u64)phys_to_virt(res.start);
r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
if (r) {
pr_err("ERROR: Couldn't read cavium,max-bits from %pOFn\n",
ciu_node);
return r;
}
host_data->max_bits = val;
cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
&octeon_irq_domain_cib_ops,
host_data);
if (!cib_domain) {
pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
return -ENOMEM;
}
cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
r = request_irq(parent_irq, octeon_irq_cib_handler,
IRQF_NO_THREAD, "cib", cib_domain);
if (r) {
pr_err("request_irq cib failed %d\n", r);
return r;
}
pr_info("CIB interrupt controller probed: %llx %d\n",
host_data->raw_reg, host_data->max_bits);
return 0;
}
int octeon_irq_ciu3_xlat(struct irq_domain *d,
struct device_node *node,
const u32 *intspec,
unsigned int intsize,
unsigned long *out_hwirq,
unsigned int *out_type)
{
struct octeon_ciu3_info *ciu3_info = d->host_data;
unsigned int hwirq, type, intsn_major;
union cvmx_ciu3_iscx_ctl isc;
if (intsize < 2)
return -EINVAL;
hwirq = intspec[0];
type = intspec[1];
if (hwirq >= (1 << 20))
return -EINVAL;
intsn_major = hwirq >> 12;
switch (intsn_major) {
case 0x04: /* Software handled separately. */
return -EINVAL;
default:
break;
}
isc.u64 = cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq));
if (!isc.s.imp)
return -EINVAL;
switch (type) {
case 4: /* official value for level triggering. */
*out_type = IRQ_TYPE_LEVEL_HIGH;
break;
case 0: /* unofficial value, but we might as well let it work. */
case 1: /* official value for edge triggering. */
*out_type = IRQ_TYPE_EDGE_RISING;
break;
default: /* Nothing else is acceptable. */
return -EINVAL;
}
*out_hwirq = hwirq;
return 0;
}
void octeon_irq_ciu3_enable(struct irq_data *data)
{
int cpu;
union cvmx_ciu3_iscx_ctl isc_ctl;
union cvmx_ciu3_iscx_w1c isc_w1c;
u64 isc_ctl_addr;
struct octeon_ciu_chip_data *cd;
cpu = next_cpu_for_irq(data);
cd = irq_data_get_irq_chip_data(data);
isc_w1c.u64 = 0;
isc_w1c.s.en = 1;
cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
isc_ctl.u64 = 0;
isc_ctl.s.en = 1;
isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
cvmx_read_csr(isc_ctl_addr);
}
void octeon_irq_ciu3_disable(struct irq_data *data)
{
u64 isc_ctl_addr;
union cvmx_ciu3_iscx_w1c isc_w1c;
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
isc_w1c.u64 = 0;
isc_w1c.s.en = 1;
isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
cvmx_write_csr(isc_ctl_addr, 0);
cvmx_read_csr(isc_ctl_addr);
}
void octeon_irq_ciu3_ack(struct irq_data *data)
{
u64 isc_w1c_addr;
union cvmx_ciu3_iscx_w1c isc_w1c;
struct octeon_ciu_chip_data *cd;
u32 trigger_type = irqd_get_trigger_type(data);
/*
* We use a single irq_chip, so we have to do nothing to ack a
* level interrupt.
*/
if (!(trigger_type & IRQ_TYPE_EDGE_BOTH))
return;
cd = irq_data_get_irq_chip_data(data);
isc_w1c.u64 = 0;
isc_w1c.s.raw = 1;
isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
cvmx_read_csr(isc_w1c_addr);
}
void octeon_irq_ciu3_mask(struct irq_data *data)
{
union cvmx_ciu3_iscx_w1c isc_w1c;
u64 isc_w1c_addr;
struct octeon_ciu_chip_data *cd;
cd = irq_data_get_irq_chip_data(data);
isc_w1c.u64 = 0;
isc_w1c.s.en = 1;
isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
cvmx_read_csr(isc_w1c_addr);
}
void octeon_irq_ciu3_mask_ack(struct irq_data *data)
{
union cvmx_ciu3_iscx_w1c isc_w1c;
u64 isc_w1c_addr;
struct octeon_ciu_chip_data *cd;
u32 trigger_type = irqd_get_trigger_type(data);
cd = irq_data_get_irq_chip_data(data);
isc_w1c.u64 = 0;
isc_w1c.s.en = 1;
/*
* We use a single irq_chip, so only ack an edge (!level)
* interrupt.
*/
if (trigger_type & IRQ_TYPE_EDGE_BOTH)
isc_w1c.s.raw = 1;
isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
cvmx_read_csr(isc_w1c_addr);
}
#ifdef CONFIG_SMP
static int octeon_irq_ciu3_set_affinity(struct irq_data *data,
const struct cpumask *dest, bool force)
{
union cvmx_ciu3_iscx_ctl isc_ctl;
union cvmx_ciu3_iscx_w1c isc_w1c;
u64 isc_ctl_addr;
int cpu;
bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
return -EINVAL;
if (!enable_one)
return IRQ_SET_MASK_OK;
cd = irq_data_get_irq_chip_data(data);
cpu = cpumask_first(dest);
if (cpu >= nr_cpu_ids)
cpu = smp_processor_id();
cd->current_cpu = cpu;
isc_w1c.u64 = 0;
isc_w1c.s.en = 1;
cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
isc_ctl.u64 = 0;
isc_ctl.s.en = 1;
isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
cvmx_read_csr(isc_ctl_addr);
return IRQ_SET_MASK_OK;
}
#endif
static struct irq_chip octeon_irq_chip_ciu3 = {
.name = "CIU3",
.irq_startup = edge_startup,
.irq_enable = octeon_irq_ciu3_enable,
.irq_disable = octeon_irq_ciu3_disable,
.irq_ack = octeon_irq_ciu3_ack,
.irq_mask = octeon_irq_ciu3_mask,
.irq_mask_ack = octeon_irq_ciu3_mask_ack,
.irq_unmask = octeon_irq_ciu3_enable,
.irq_set_type = octeon_irq_ciu_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu3_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
#endif
};
int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw, struct irq_chip *chip)
{
struct octeon_ciu3_info *ciu3_info = d->host_data;
struct octeon_ciu_chip_data *cd = kzalloc_node(sizeof(*cd), GFP_KERNEL,
ciu3_info->node);
if (!cd)
return -ENOMEM;
cd->intsn = hw;
cd->current_cpu = -1;
cd->ciu3_addr = ciu3_info->ciu3_addr;
cd->ciu_node = ciu3_info->node;
irq_set_chip_and_handler(virq, chip, handle_edge_irq);
irq_set_chip_data(virq, cd);
return 0;
}
static int octeon_irq_ciu3_map(struct irq_domain *d,
unsigned int virq, irq_hw_number_t hw)
{
return octeon_irq_ciu3_mapx(d, virq, hw, &octeon_irq_chip_ciu3);
}
static const struct irq_domain_ops octeon_dflt_domain_ciu3_ops = {
.map = octeon_irq_ciu3_map,
.unmap = octeon_irq_free_cd,
.xlate = octeon_irq_ciu3_xlat,
};
static void octeon_irq_ciu3_ip2(void)
{
union cvmx_ciu3_destx_pp_int dest_pp_int;
struct octeon_ciu3_info *ciu3_info;
u64 ciu3_addr;
ciu3_info = __this_cpu_read(octeon_ciu3_info);
ciu3_addr = ciu3_info->ciu3_addr;
dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num()));
if (likely(dest_pp_int.s.intr)) {
irq_hw_number_t intsn = dest_pp_int.s.intsn;
irq_hw_number_t hw;
struct irq_domain *domain;
/* Get the domain to use from the major block */
int block = intsn >> 12;
int ret;
domain = ciu3_info->domain[block];
if (ciu3_info->intsn2hw[block])
hw = ciu3_info->intsn2hw[block](domain, intsn);
else
hw = intsn;
irq_enter();
ret = generic_handle_domain_irq(domain, hw);
irq_exit();
if (ret < 0) {
union cvmx_ciu3_iscx_w1c isc_w1c;
u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
isc_w1c.u64 = 0;
isc_w1c.s.en = 1;
cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
cvmx_read_csr(isc_w1c_addr);
spurious_interrupt();
}
} else {
spurious_interrupt();
}
}
/*
* 10 mbox per core starting from zero.
* Base mbox is core * 10
*/
static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core)
{
/* SW (mbox) are 0x04 in bits 12..19 */
return 0x04000 + CIU3_MBOX_PER_CORE * core;
}
static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox)
{
return octeon_irq_ciu3_base_mbox_intsn(core) + mbox;
}
static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
{
int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
return octeon_irq_ciu3_mbox_intsn_for_core(local_core, mbox);
}
static void octeon_irq_ciu3_mbox(void)
{
union cvmx_ciu3_destx_pp_int dest_pp_int;
struct octeon_ciu3_info *ciu3_info;
u64 ciu3_addr;
int core = cvmx_get_local_core_num();
ciu3_info = __this_cpu_read(octeon_ciu3_info);
ciu3_addr = ciu3_info->ciu3_addr;
dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core));
if (likely(dest_pp_int.s.intr)) {
irq_hw_number_t intsn = dest_pp_int.s.intsn;
int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core);
if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) {
do_IRQ(mbox + OCTEON_IRQ_MBOX0);
} else {
union cvmx_ciu3_iscx_w1c isc_w1c;
u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
isc_w1c.u64 = 0;
isc_w1c.s.en = 1;
cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
cvmx_read_csr(isc_w1c_addr);
spurious_interrupt();
}
} else {
spurious_interrupt();
}
}
void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
{
struct octeon_ciu3_info *ciu3_info;
unsigned int intsn;
union cvmx_ciu3_iscx_w1s isc_w1s;
u64 isc_w1s_addr;
if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE))
return;
intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
ciu3_info = per_cpu(octeon_ciu3_info, cpu);
isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn);
isc_w1s.u64 = 0;
isc_w1s.s.raw = 1;
cvmx_write_csr(isc_w1s_addr, isc_w1s.u64);
cvmx_read_csr(isc_w1s_addr);
}
static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
{
struct octeon_ciu3_info *ciu3_info;
unsigned int intsn;
u64 isc_ctl_addr, isc_w1c_addr;
union cvmx_ciu3_iscx_ctl isc_ctl;
unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
ciu3_info = per_cpu(octeon_ciu3_info, cpu);
isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn);
isc_ctl.u64 = 0;
isc_ctl.s.en = 1;
cvmx_write_csr(isc_w1c_addr, isc_ctl.u64);
cvmx_write_csr(isc_ctl_addr, 0);
if (en) {
unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
isc_ctl.u64 = 0;
isc_ctl.s.en = 1;
isc_ctl.s.idt = idt;
cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
}
cvmx_read_csr(isc_ctl_addr);
}
static void octeon_irq_ciu3_mbox_enable(struct irq_data *data)
{
int cpu;
unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
for_each_online_cpu(cpu)
octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
}
static void octeon_irq_ciu3_mbox_disable(struct irq_data *data)
{
int cpu;
unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
for_each_online_cpu(cpu)
octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
}
static void octeon_irq_ciu3_mbox_ack(struct irq_data *data)
{
struct octeon_ciu3_info *ciu3_info;
unsigned int intsn;
u64 isc_w1c_addr;
union cvmx_ciu3_iscx_w1c isc_w1c;
unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
intsn = octeon_irq_ciu3_mbox_intsn_for_core(cvmx_get_local_core_num(), mbox);
isc_w1c.u64 = 0;
isc_w1c.s.raw = 1;
ciu3_info = __this_cpu_read(octeon_ciu3_info);
isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
cvmx_read_csr(isc_w1c_addr);
}
static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data)
{
octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), true);
}
static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data)
{
octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), false);
}
static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info)
{
u64 b = ciu3_info->ciu3_addr;
int idt_ip2, idt_ip3, idt_ip4;
int unused_idt2;
int core = cvmx_get_local_core_num();
int i;
__this_cpu_write(octeon_ciu3_info, ciu3_info);
/*
* 4 idt per core starting from 1 because zero is reserved.
* Base idt per core is 4 * core + 1
*/
idt_ip2 = core * 4 + 1;
idt_ip3 = core * 4 + 2;
idt_ip4 = core * 4 + 3;
unused_idt2 = core * 4 + 4;
__this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2);
__this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3);
/* ip2 interrupts for this CPU */
cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0);
cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core);
cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0);
/* ip3 interrupts for this CPU */
cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1);
cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core);
cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0);
/* ip4 interrupts for this CPU */
cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2);
cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0);
cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0);
cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0);
cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0);
cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0);
for (i = 0; i < CIU3_MBOX_PER_CORE; i++) {
unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, i);
cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2);
cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0);
}
return 0;
}
static void octeon_irq_setup_secondary_ciu3(void)
{
struct octeon_ciu3_info *ciu3_info;
ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()];
octeon_irq_ciu3_alloc_resources(ciu3_info);
irq_cpu_online();
/* Enable the CIU lines */
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
if (octeon_irq_use_ip4)
set_c0_status(STATUSF_IP4);
else
clear_c0_status(STATUSF_IP4);
}
static struct irq_chip octeon_irq_chip_ciu3_mbox = {
.name = "CIU3-M",
.irq_enable = octeon_irq_ciu3_mbox_enable,
.irq_disable = octeon_irq_ciu3_mbox_disable,
.irq_ack = octeon_irq_ciu3_mbox_ack,
.irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online,
.irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline,
.flags = IRQCHIP_ONOFFLINE_ENABLED,
};
static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
struct device_node *parent)
{
int i, ret;
int node;
struct irq_domain *domain;
struct octeon_ciu3_info *ciu3_info;
struct resource res;
u64 base_addr;
union cvmx_ciu3_const consts;
node = 0; /* of_node_to_nid(ciu_node); */
ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);
if (!ciu3_info)
return -ENOMEM;
ret = of_address_to_resource(ciu_node, 0, &res);
if (WARN_ON(ret))
return ret;
ciu3_info->ciu3_addr = base_addr = (u64)phys_to_virt(res.start);
ciu3_info->node = node;
consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST);
octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3;
octeon_irq_ip2 = octeon_irq_ciu3_ip2;
octeon_irq_ip3 = octeon_irq_ciu3_mbox;
octeon_irq_ip4 = octeon_irq_ip4_mask;
if (node == cvmx_get_node_num()) {
/* Mips internal */
octeon_irq_init_core();
/* Only do per CPU things if it is the CIU of the boot node. */
i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
WARN_ON(i < 0);
for (i = 0; i < 8; i++)
irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0,
&octeon_irq_chip_ciu3_mbox, handle_percpu_irq);
}
/*
* Initialize all domains to use the default domain. Specific major
* blocks will overwrite the default domain as needed.
*/
domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
ciu3_info);
for (i = 0; i < MAX_CIU3_DOMAINS; i++)
ciu3_info->domain[i] = domain;
octeon_ciu3_info_per_node[node] = ciu3_info;
if (node == cvmx_get_node_num()) {
/* Only do per CPU things if it is the CIU of the boot node. */
octeon_irq_ciu3_alloc_resources(ciu3_info);
if (node == 0)
irq_set_default_host(domain);
octeon_irq_use_ip4 = false;
/* Enable the CIU lines */
set_c0_status(STATUSF_IP2 | STATUSF_IP3);
clear_c0_status(STATUSF_IP4);
}
return 0;
}
static struct of_device_id ciu_types[] __initdata = {
{.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
{.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
{.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
{.compatible = "cavium,octeon-7890-ciu3", .data = octeon_irq_init_ciu3},
{.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
{}
};
void __init arch_init_irq(void)
{
#ifdef CONFIG_SMP
/* Set the default affinity to the boot cpu. */
cpumask_clear(irq_default_affinity);
cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
#endif
of_irq_init(ciu_types);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned long cop0_cause;
unsigned long cop0_status;
while (1) {
cop0_cause = read_c0_cause();
cop0_status = read_c0_status();
cop0_cause &= cop0_status;
cop0_cause &= ST0_IM;
if (cop0_cause & STATUSF_IP2)
octeon_irq_ip2();
else if (cop0_cause & STATUSF_IP3)
octeon_irq_ip3();
else if (cop0_cause & STATUSF_IP4)
octeon_irq_ip4();
else if (cop0_cause)
do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
else
break;
}
}
#ifdef CONFIG_HOTPLUG_CPU
void octeon_fixup_irqs(void)
{
irq_cpu_offline();
}
#endif /* CONFIG_HOTPLUG_CPU */
struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block)
{
struct octeon_ciu3_info *ciu3_info;
ciu3_info = octeon_ciu3_info_per_node[node & CVMX_NODE_MASK];
return ciu3_info->domain[block];
}
EXPORT_SYMBOL(octeon_irq_get_block_domain);
| linux-master | arch/mips/cavium-octeon/octeon-irq.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Ani Joshi <[email protected]>
* Copyright (C) 2000, 2001 Ralf Baechle <[email protected]>
* Copyright (C) 2005 Ilya A. Volynets-Evenbakh <[email protected]>
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
* IP32 changes by Ilya.
* Copyright (C) 2010 Cavium Networks, Inc.
*/
#include <linux/dma-direct.h>
#include <linux/memblock.h>
#include <linux/swiotlb.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/bootinfo.h>
#include <asm/octeon/octeon.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
#include <asm/octeon/pci-octeon.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-pci-defs.h>
struct octeon_dma_map_ops {
dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
};
static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
{
if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
else
return paddr;
}
static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
{
if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
else
return daddr;
}
static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
paddr -= 0x400000000ull;
return octeon_hole_phys_to_dma(paddr);
}
static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
{
daddr = octeon_hole_dma_to_phys(daddr);
if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
daddr += 0x400000000ull;
return daddr;
}
static const struct octeon_dma_map_ops octeon_gen1_ops = {
.phys_to_dma = octeon_gen1_phys_to_dma,
.dma_to_phys = octeon_gen1_dma_to_phys,
};
static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return octeon_hole_phys_to_dma(paddr);
}
static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr)
{
return octeon_hole_dma_to_phys(daddr);
}
static const struct octeon_dma_map_ops octeon_gen2_ops = {
.phys_to_dma = octeon_gen2_phys_to_dma,
.dma_to_phys = octeon_gen2_dma_to_phys,
};
static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
paddr -= 0x400000000ull;
/* Anything in the BAR1 hole or above goes via BAR2 */
if (paddr >= 0xf0000000ull)
paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
return paddr;
}
static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
{
if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
daddr -= OCTEON_BAR2_PCI_ADDRESS;
if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
daddr += 0x400000000ull;
return daddr;
}
static const struct octeon_dma_map_ops octeon_big_ops = {
.phys_to_dma = octeon_big_phys_to_dma,
.dma_to_phys = octeon_big_dma_to_phys,
};
static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
phys_addr_t paddr)
{
if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
paddr -= 0x400000000ull;
/* Anything not in the BAR1 range goes via BAR2 */
if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
paddr = paddr - octeon_bar1_pci_phys;
else
paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
return paddr;
}
static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
dma_addr_t daddr)
{
if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
daddr -= OCTEON_BAR2_PCI_ADDRESS;
else
daddr += octeon_bar1_pci_phys;
if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
daddr += 0x400000000ull;
return daddr;
}
static const struct octeon_dma_map_ops octeon_small_ops = {
.phys_to_dma = octeon_small_phys_to_dma,
.dma_to_phys = octeon_small_dma_to_phys,
};
static const struct octeon_dma_map_ops *octeon_pci_dma_ops;
void __init octeon_pci_dma_init(void)
{
switch (octeon_dma_bar_type) {
case OCTEON_DMA_BAR_TYPE_PCIE:
octeon_pci_dma_ops = &octeon_gen1_ops;
break;
case OCTEON_DMA_BAR_TYPE_PCIE2:
octeon_pci_dma_ops = &octeon_gen2_ops;
break;
case OCTEON_DMA_BAR_TYPE_BIG:
octeon_pci_dma_ops = &octeon_big_ops;
break;
case OCTEON_DMA_BAR_TYPE_SMALL:
octeon_pci_dma_ops = &octeon_small_ops;
break;
default:
BUG();
}
}
#endif /* CONFIG_PCI */
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
#ifdef CONFIG_PCI
if (dev && dev_is_pci(dev))
return octeon_pci_dma_ops->phys_to_dma(dev, paddr);
#endif
return paddr;
}
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
#ifdef CONFIG_PCI
if (dev && dev_is_pci(dev))
return octeon_pci_dma_ops->dma_to_phys(dev, daddr);
#endif
return daddr;
}
void __init plat_swiotlb_setup(void)
{
phys_addr_t start, end;
phys_addr_t max_addr;
phys_addr_t addr_size;
size_t swiotlbsize;
u64 i;
max_addr = 0;
addr_size = 0;
for_each_mem_range(i, &start, &end) {
/* These addresses map low for PCI. */
if (start > 0x410000000ull && !OCTEON_IS_OCTEON2())
continue;
addr_size += (end - start);
if (max_addr < end)
max_addr = end;
}
swiotlbsize = PAGE_SIZE;
#ifdef CONFIG_PCI
/*
* For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
* size to a maximum of 64MB
*/
if (OCTEON_IS_MODEL(OCTEON_CN31XX)
|| OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
swiotlbsize = addr_size / 4;
if (swiotlbsize > 64 * (1<<20))
swiotlbsize = 64 * (1<<20);
} else if (max_addr > 0xf0000000ul) {
/*
* Otherwise only allocate a big iotlb if there is
* memory past the BAR1 hole.
*/
swiotlbsize = 64 * (1<<20);
}
#endif
#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
/* OCTEON II ohci is only 32-bit. */
if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
swiotlbsize = 64 * (1<<20);
#endif
swiotlb_adjust_size(swiotlbsize);
swiotlb_init(true, SWIOTLB_VERBOSE);
}
| linux-master | arch/mips/cavium-octeon/dma-octeon.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2017 Cavium, Inc.
* Copyright (C) 2008 Wind River Systems
*/
#include <linux/etherdevice.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <linux/platform_device.h>
#include <linux/libfdt.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-helper-board.h>
#ifdef CONFIG_USB
#include <linux/usb/ehci_def.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/octeon/cvmx-uctlx-defs.h>
#define CVMX_UAHCX_EHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000010ull))
#define CVMX_UAHCX_OHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000408ull))
static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
static int octeon2_usb_clock_start_cnt;
static int __init octeon2_usb_reset(void)
{
union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
u32 ucmd;
if (!OCTEON_IS_OCTEON2())
return 0;
clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
if (clk_rst_ctl.s.hrst) {
ucmd = cvmx_read64_uint32(CVMX_UAHCX_EHCI_USBCMD);
ucmd &= ~CMD_RUN;
cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd);
mdelay(2);
ucmd |= CMD_RESET;
cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd);
ucmd = cvmx_read64_uint32(CVMX_UAHCX_OHCI_USBCMD);
ucmd |= CMD_RUN;
cvmx_write64_uint32(CVMX_UAHCX_OHCI_USBCMD, ucmd);
}
return 0;
}
arch_initcall(octeon2_usb_reset);
static void octeon2_usb_clocks_start(struct device *dev)
{
u64 div;
union cvmx_uctlx_if_ena if_ena;
union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status;
int i;
unsigned long io_clk_64_to_ns;
u32 clock_rate = 12000000;
bool is_crystal_clock = false;
mutex_lock(&octeon2_usb_clocks_mutex);
octeon2_usb_clock_start_cnt++;
if (octeon2_usb_clock_start_cnt != 1)
goto exit;
io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate();
if (dev->of_node) {
struct device_node *uctl_node;
const char *clock_type;
uctl_node = of_get_parent(dev->of_node);
if (!uctl_node) {
dev_err(dev, "No UCTL device node\n");
goto exit;
}
i = of_property_read_u32(uctl_node,
"refclk-frequency", &clock_rate);
if (i) {
dev_err(dev, "No UCTL \"refclk-frequency\"\n");
of_node_put(uctl_node);
goto exit;
}
i = of_property_read_string(uctl_node,
"refclk-type", &clock_type);
of_node_put(uctl_node);
if (!i && strcmp("crystal", clock_type) == 0)
is_crystal_clock = true;
}
/*
* Step 1: Wait for voltages stable. That surely happened
* before starting the kernel.
*
* Step 2: Enable SCLK of UCTL by writing UCTL0_IF_ENA[EN] = 1
*/
if_ena.u64 = 0;
if_ena.s.en = 1;
cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64);
for (i = 0; i <= 1; i++) {
port_ctl_status.u64 =
cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
/* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
port_ctl_status.s.txvreftune = 15;
port_ctl_status.s.txrisetune = 1;
port_ctl_status.s.txpreemphasistune = 1;
cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
port_ctl_status.u64);
}
/* Step 3: Configure the reference clock, PHY, and HCLK */
clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
/*
* If the UCTL looks like it has already been started, skip
* the initialization, otherwise bus errors are obtained.
*/
if (clk_rst_ctl.s.hrst)
goto end_clock;
/* 3a */
clk_rst_ctl.s.p_por = 1;
clk_rst_ctl.s.hrst = 0;
clk_rst_ctl.s.p_prst = 0;
clk_rst_ctl.s.h_clkdiv_rst = 0;
clk_rst_ctl.s.o_clkdiv_rst = 0;
clk_rst_ctl.s.h_clkdiv_en = 0;
clk_rst_ctl.s.o_clkdiv_en = 0;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* 3b */
clk_rst_ctl.s.p_refclk_sel = is_crystal_clock ? 0 : 1;
switch (clock_rate) {
default:
pr_err("Invalid UCTL clock rate of %u, using 12000000 instead\n",
clock_rate);
fallthrough;
case 12000000:
clk_rst_ctl.s.p_refclk_div = 0;
break;
case 24000000:
clk_rst_ctl.s.p_refclk_div = 1;
break;
case 48000000:
clk_rst_ctl.s.p_refclk_div = 2;
break;
}
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* 3c */
div = octeon_get_io_clock_rate() / 130000000ull;
switch (div) {
case 0:
div = 1;
break;
case 1:
case 2:
case 3:
case 4:
break;
case 5:
div = 4;
break;
case 6:
case 7:
div = 6;
break;
case 8:
case 9:
case 10:
case 11:
div = 8;
break;
default:
div = 12;
break;
}
clk_rst_ctl.s.h_div = div;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* Read it back, */
clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
clk_rst_ctl.s.h_clkdiv_en = 1;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* 3d */
clk_rst_ctl.s.h_clkdiv_rst = 1;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* 3e: delay 64 io clocks */
ndelay(io_clk_64_to_ns);
/*
* Step 4: Program the power-on reset field in the UCTL
* clock-reset-control register.
*/
clk_rst_ctl.s.p_por = 0;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* Step 5: Wait 3 ms for the PHY clock to start. */
mdelay(3);
/* Steps 6..9 for ATE only, are skipped. */
/* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */
/* 10a */
clk_rst_ctl.s.o_clkdiv_rst = 1;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* 10b */
clk_rst_ctl.s.o_clkdiv_en = 1;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* 10c */
ndelay(io_clk_64_to_ns);
/*
* Step 11: Program the PHY reset field:
* UCTL0_CLK_RST_CTL[P_PRST] = 1
*/
clk_rst_ctl.s.p_prst = 1;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* Step 11b */
udelay(1);
/* Step 11c */
clk_rst_ctl.s.p_prst = 0;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* Step 11d */
mdelay(1);
/* Step 11e */
clk_rst_ctl.s.p_prst = 1;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
/* Step 12: Wait 1 uS. */
udelay(1);
/* Step 13: Program the HRESET_N field: UCTL0_CLK_RST_CTL[HRST] = 1 */
clk_rst_ctl.s.hrst = 1;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
end_clock:
/* Set uSOF cycle period to 60,000 bits. */
cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull);
exit:
mutex_unlock(&octeon2_usb_clocks_mutex);
}
static void octeon2_usb_clocks_stop(void)
{
mutex_lock(&octeon2_usb_clocks_mutex);
octeon2_usb_clock_start_cnt--;
mutex_unlock(&octeon2_usb_clocks_mutex);
}
static int octeon_ehci_power_on(struct platform_device *pdev)
{
octeon2_usb_clocks_start(&pdev->dev);
return 0;
}
static void octeon_ehci_power_off(struct platform_device *pdev)
{
octeon2_usb_clocks_stop();
}
static struct usb_ehci_pdata octeon_ehci_pdata = {
/* Octeon EHCI matches CPU endianness. */
#ifdef __BIG_ENDIAN
.big_endian_mmio = 1,
#endif
/*
* We can DMA from anywhere. But the descriptors must be in
* the lower 4GB.
*/
.dma_mask_64 = 0,
.power_on = octeon_ehci_power_on,
.power_off = octeon_ehci_power_off,
};
static void __init octeon_ehci_hw_start(struct device *dev)
{
union cvmx_uctlx_ehci_ctl ehci_ctl;
octeon2_usb_clocks_start(dev);
ehci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_EHCI_CTL(0));
/* Use 64-bit addressing. */
ehci_ctl.s.ehci_64b_addr_en = 1;
ehci_ctl.s.l2c_addr_msb = 0;
#ifdef __BIG_ENDIAN
ehci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
ehci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
#else
ehci_ctl.s.l2c_buff_emod = 0; /* not swapped. */
ehci_ctl.s.l2c_desc_emod = 0; /* not swapped. */
ehci_ctl.s.inv_reg_a2 = 1;
#endif
cvmx_write_csr(CVMX_UCTLX_EHCI_CTL(0), ehci_ctl.u64);
octeon2_usb_clocks_stop();
}
static int __init octeon_ehci_device_init(void)
{
struct platform_device *pd;
struct device_node *ehci_node;
int ret = 0;
ehci_node = of_find_node_by_name(NULL, "ehci");
if (!ehci_node)
return 0;
pd = of_find_device_by_node(ehci_node);
of_node_put(ehci_node);
if (!pd)
return 0;
pd->dev.platform_data = &octeon_ehci_pdata;
octeon_ehci_hw_start(&pd->dev);
put_device(&pd->dev);
return ret;
}
device_initcall(octeon_ehci_device_init);
static int octeon_ohci_power_on(struct platform_device *pdev)
{
octeon2_usb_clocks_start(&pdev->dev);
return 0;
}
static void octeon_ohci_power_off(struct platform_device *pdev)
{
octeon2_usb_clocks_stop();
}
static struct usb_ohci_pdata octeon_ohci_pdata = {
/* Octeon OHCI matches CPU endianness. */
#ifdef __BIG_ENDIAN
.big_endian_mmio = 1,
#endif
.power_on = octeon_ohci_power_on,
.power_off = octeon_ohci_power_off,
};
static void __init octeon_ohci_hw_start(struct device *dev)
{
union cvmx_uctlx_ohci_ctl ohci_ctl;
octeon2_usb_clocks_start(dev);
ohci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_OHCI_CTL(0));
ohci_ctl.s.l2c_addr_msb = 0;
#ifdef __BIG_ENDIAN
ohci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */
ohci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */
#else
ohci_ctl.s.l2c_buff_emod = 0; /* not swapped. */
ohci_ctl.s.l2c_desc_emod = 0; /* not swapped. */
ohci_ctl.s.inv_reg_a2 = 1;
#endif
cvmx_write_csr(CVMX_UCTLX_OHCI_CTL(0), ohci_ctl.u64);
octeon2_usb_clocks_stop();
}
static int __init octeon_ohci_device_init(void)
{
struct platform_device *pd;
struct device_node *ohci_node;
int ret = 0;
ohci_node = of_find_node_by_name(NULL, "ohci");
if (!ohci_node)
return 0;
pd = of_find_device_by_node(ohci_node);
of_node_put(ohci_node);
if (!pd)
return 0;
pd->dev.platform_data = &octeon_ohci_pdata;
octeon_ohci_hw_start(&pd->dev);
put_device(&pd->dev);
return ret;
}
device_initcall(octeon_ohci_device_init);
#endif /* CONFIG_USB */
/* Octeon Random Number Generator. */
static int __init octeon_rng_device_init(void)
{
struct platform_device *pd;
int ret = 0;
struct resource rng_resources[] = {
{
.flags = IORESOURCE_MEM,
.start = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS),
.end = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS) + 0xf
}, {
.flags = IORESOURCE_MEM,
.start = cvmx_build_io_address(8, 0),
.end = cvmx_build_io_address(8, 0) + 0x7
}
};
pd = platform_device_alloc("octeon_rng", -1);
if (!pd) {
ret = -ENOMEM;
goto out;
}
ret = platform_device_add_resources(pd, rng_resources,
ARRAY_SIZE(rng_resources));
if (ret)
goto fail;
ret = platform_device_add(pd);
if (ret)
goto fail;
return ret;
fail:
platform_device_put(pd);
out:
return ret;
}
device_initcall(octeon_rng_device_init);
static const struct of_device_id octeon_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "cavium,octeon-6335-uctl", },
{ .compatible = "cavium,octeon-5750-usbn", },
{ .compatible = "cavium,octeon-3860-bootbus", },
{ .compatible = "cavium,mdio-mux", },
{ .compatible = "gpio-leds", },
{},
};
static bool __init octeon_has_88e1145(void)
{
return !OCTEON_IS_MODEL(OCTEON_CN52XX) &&
!OCTEON_IS_MODEL(OCTEON_CN6XXX) &&
!OCTEON_IS_MODEL(OCTEON_CN56XX);
}
static bool __init octeon_has_fixed_link(int ipd_port)
{
switch (cvmx_sysinfo_get()->board_type) {
case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
case CVMX_BOARD_TYPE_CUST_NB5:
case CVMX_BOARD_TYPE_EBH3100:
/* Port 1 on these boards is always gigabit. */
return ipd_port == 1;
case CVMX_BOARD_TYPE_BBGW_REF:
/* Ports 0 and 1 connect to the switch. */
return ipd_port == 0 || ipd_port == 1;
}
return false;
}
static void __init octeon_fdt_set_phy(int eth, int phy_addr)
{
const __be32 *phy_handle;
const __be32 *alt_phy_handle;
const __be32 *reg;
u32 phandle;
int phy;
int alt_phy;
const char *p;
int current_len;
char new_name[20];
phy_handle = fdt_getprop(initial_boot_params, eth, "phy-handle", NULL);
if (!phy_handle)
return;
phandle = be32_to_cpup(phy_handle);
phy = fdt_node_offset_by_phandle(initial_boot_params, phandle);
alt_phy_handle = fdt_getprop(initial_boot_params, eth, "cavium,alt-phy-handle", NULL);
if (alt_phy_handle) {
u32 alt_phandle = be32_to_cpup(alt_phy_handle);
alt_phy = fdt_node_offset_by_phandle(initial_boot_params, alt_phandle);
} else {
alt_phy = -1;
}
if (phy_addr < 0 || phy < 0) {
/* Delete the PHY things */
fdt_nop_property(initial_boot_params, eth, "phy-handle");
/* This one may fail */
fdt_nop_property(initial_boot_params, eth, "cavium,alt-phy-handle");
if (phy >= 0)
fdt_nop_node(initial_boot_params, phy);
if (alt_phy >= 0)
fdt_nop_node(initial_boot_params, alt_phy);
return;
}
if (phy_addr >= 256 && alt_phy > 0) {
const struct fdt_property *phy_prop;
struct fdt_property *alt_prop;
fdt32_t phy_handle_name;
/* Use the alt phy node instead.*/
phy_prop = fdt_get_property(initial_boot_params, eth, "phy-handle", NULL);
phy_handle_name = phy_prop->nameoff;
fdt_nop_node(initial_boot_params, phy);
fdt_nop_property(initial_boot_params, eth, "phy-handle");
alt_prop = fdt_get_property_w(initial_boot_params, eth, "cavium,alt-phy-handle", NULL);
alt_prop->nameoff = phy_handle_name;
phy = alt_phy;
}
phy_addr &= 0xff;
if (octeon_has_88e1145()) {
fdt_nop_property(initial_boot_params, phy, "marvell,reg-init");
memset(new_name, 0, sizeof(new_name));
strcpy(new_name, "marvell,88e1145");
p = fdt_getprop(initial_boot_params, phy, "compatible",
¤t_len);
if (p && current_len >= strlen(new_name))
fdt_setprop_inplace(initial_boot_params, phy,
"compatible", new_name, current_len);
}
reg = fdt_getprop(initial_boot_params, phy, "reg", NULL);
if (phy_addr == be32_to_cpup(reg))
return;
fdt_setprop_inplace_cell(initial_boot_params, phy, "reg", phy_addr);
snprintf(new_name, sizeof(new_name), "ethernet-phy@%x", phy_addr);
p = fdt_get_name(initial_boot_params, phy, ¤t_len);
if (p && current_len == strlen(new_name))
fdt_set_name(initial_boot_params, phy, new_name);
else
pr_err("Error: could not rename ethernet phy: <%s>", p);
}
static void __init octeon_fdt_set_mac_addr(int n, u64 *pmac)
{
const u8 *old_mac;
int old_len;
u8 new_mac[6];
u64 mac = *pmac;
int r;
old_mac = fdt_getprop(initial_boot_params, n, "local-mac-address",
&old_len);
if (!old_mac || old_len != 6 || is_valid_ether_addr(old_mac))
return;
new_mac[0] = (mac >> 40) & 0xff;
new_mac[1] = (mac >> 32) & 0xff;
new_mac[2] = (mac >> 24) & 0xff;
new_mac[3] = (mac >> 16) & 0xff;
new_mac[4] = (mac >> 8) & 0xff;
new_mac[5] = mac & 0xff;
r = fdt_setprop_inplace(initial_boot_params, n, "local-mac-address",
new_mac, sizeof(new_mac));
if (r) {
pr_err("Setting \"local-mac-address\" failed %d", r);
return;
}
*pmac = mac + 1;
}
static void __init octeon_fdt_rm_ethernet(int node)
{
const __be32 *phy_handle;
phy_handle = fdt_getprop(initial_boot_params, node, "phy-handle", NULL);
if (phy_handle) {
u32 ph = be32_to_cpup(phy_handle);
int p = fdt_node_offset_by_phandle(initial_boot_params, ph);
if (p >= 0)
fdt_nop_node(initial_boot_params, p);
}
fdt_nop_node(initial_boot_params, node);
}
static void __init _octeon_rx_tx_delay(int eth, int rx_delay, int tx_delay)
{
fdt_setprop_inplace_cell(initial_boot_params, eth, "rx-delay",
rx_delay);
fdt_setprop_inplace_cell(initial_boot_params, eth, "tx-delay",
tx_delay);
}
static void __init octeon_rx_tx_delay(int eth, int iface, int port)
{
switch (cvmx_sysinfo_get()->board_type) {
case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
if (iface == 0) {
if (port == 0) {
/*
* Boards with gigabit WAN ports need a
* different setting that is compatible with
* 100 Mbit settings
*/
_octeon_rx_tx_delay(eth, 0xc, 0x0c);
return;
} else if (port == 1) {
/* Different config for switch port. */
_octeon_rx_tx_delay(eth, 0x0, 0x0);
return;
}
}
break;
case CVMX_BOARD_TYPE_UBNT_E100:
if (iface == 0 && port <= 2) {
_octeon_rx_tx_delay(eth, 0x0, 0x10);
return;
}
break;
}
fdt_nop_property(initial_boot_params, eth, "rx-delay");
fdt_nop_property(initial_boot_params, eth, "tx-delay");
}
static void __init octeon_fdt_pip_port(int iface, int i, int p, int max)
{
char name_buffer[20];
int eth;
int phy_addr;
int ipd_port;
int fixed_link;
snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", p);
eth = fdt_subnode_offset(initial_boot_params, iface, name_buffer);
if (eth < 0)
return;
if (p > max) {
pr_debug("Deleting port %x:%x\n", i, p);
octeon_fdt_rm_ethernet(eth);
return;
}
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
ipd_port = (0x100 * i) + (0x10 * p) + 0x800;
else
ipd_port = 16 * i + p;
phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
octeon_fdt_set_phy(eth, phy_addr);
fixed_link = fdt_subnode_offset(initial_boot_params, eth, "fixed-link");
if (fixed_link < 0)
WARN_ON(octeon_has_fixed_link(ipd_port));
else if (!octeon_has_fixed_link(ipd_port))
fdt_nop_node(initial_boot_params, fixed_link);
octeon_rx_tx_delay(eth, i, p);
}
static void __init octeon_fdt_pip_iface(int pip, int idx)
{
char name_buffer[20];
int iface;
int p;
int count = 0;
snprintf(name_buffer, sizeof(name_buffer), "interface@%d", idx);
iface = fdt_subnode_offset(initial_boot_params, pip, name_buffer);
if (iface < 0)
return;
if (cvmx_helper_interface_enumerate(idx) == 0)
count = cvmx_helper_ports_on_interface(idx);
for (p = 0; p < 16; p++)
octeon_fdt_pip_port(iface, idx, p, count - 1);
}
void __init octeon_fill_mac_addresses(void)
{
const char *alias_prop;
char name_buffer[20];
u64 mac_addr_base;
int aliases;
int pip;
int i;
aliases = fdt_path_offset(initial_boot_params, "/aliases");
if (aliases < 0)
return;
mac_addr_base =
((octeon_bootinfo->mac_addr_base[0] & 0xffull)) << 40 |
((octeon_bootinfo->mac_addr_base[1] & 0xffull)) << 32 |
((octeon_bootinfo->mac_addr_base[2] & 0xffull)) << 24 |
((octeon_bootinfo->mac_addr_base[3] & 0xffull)) << 16 |
((octeon_bootinfo->mac_addr_base[4] & 0xffull)) << 8 |
(octeon_bootinfo->mac_addr_base[5] & 0xffull);
for (i = 0; i < 2; i++) {
int mgmt;
snprintf(name_buffer, sizeof(name_buffer), "mix%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
name_buffer, NULL);
if (!alias_prop)
continue;
mgmt = fdt_path_offset(initial_boot_params, alias_prop);
if (mgmt < 0)
continue;
octeon_fdt_set_mac_addr(mgmt, &mac_addr_base);
}
alias_prop = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
if (!alias_prop)
return;
pip = fdt_path_offset(initial_boot_params, alias_prop);
if (pip < 0)
return;
for (i = 0; i <= 4; i++) {
int iface;
int p;
snprintf(name_buffer, sizeof(name_buffer), "interface@%d", i);
iface = fdt_subnode_offset(initial_boot_params, pip,
name_buffer);
if (iface < 0)
continue;
for (p = 0; p < 16; p++) {
int eth;
snprintf(name_buffer, sizeof(name_buffer),
"ethernet@%x", p);
eth = fdt_subnode_offset(initial_boot_params, iface,
name_buffer);
if (eth < 0)
continue;
octeon_fdt_set_mac_addr(eth, &mac_addr_base);
}
}
}
int __init octeon_prune_device_tree(void)
{
int i, max_port, uart_mask;
const char *pip_path;
const char *alias_prop;
char name_buffer[20];
int aliases;
if (fdt_check_header(initial_boot_params))
panic("Corrupt Device Tree.");
WARN(octeon_bootinfo->board_type == CVMX_BOARD_TYPE_CUST_DSR1000N,
"Built-in DTB booting is deprecated on %s. Please switch to use appended DTB.",
cvmx_board_type_to_string(octeon_bootinfo->board_type));
aliases = fdt_path_offset(initial_boot_params, "/aliases");
if (aliases < 0) {
pr_err("Error: No /aliases node in device tree.");
return -EINVAL;
}
if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
max_port = 2;
else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))
max_port = 1;
else
max_port = 0;
if (octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC10E)
max_port = 0;
for (i = 0; i < 2; i++) {
int mgmt;
snprintf(name_buffer, sizeof(name_buffer),
"mix%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
name_buffer, NULL);
if (alias_prop) {
mgmt = fdt_path_offset(initial_boot_params, alias_prop);
if (mgmt < 0)
continue;
if (i >= max_port) {
pr_debug("Deleting mix%d\n", i);
octeon_fdt_rm_ethernet(mgmt);
fdt_nop_property(initial_boot_params, aliases,
name_buffer);
} else {
int phy_addr = cvmx_helper_board_get_mii_address(CVMX_HELPER_BOARD_MGMT_IPD_PORT + i);
octeon_fdt_set_phy(mgmt, phy_addr);
}
}
}
pip_path = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
if (pip_path) {
int pip = fdt_path_offset(initial_boot_params, pip_path);
if (pip >= 0)
for (i = 0; i <= 4; i++)
octeon_fdt_pip_iface(pip, i);
}
/* I2C */
if (OCTEON_IS_MODEL(OCTEON_CN52XX) ||
OCTEON_IS_MODEL(OCTEON_CN63XX) ||
OCTEON_IS_MODEL(OCTEON_CN68XX) ||
OCTEON_IS_MODEL(OCTEON_CN56XX))
max_port = 2;
else
max_port = 1;
for (i = 0; i < 2; i++) {
int i2c;
snprintf(name_buffer, sizeof(name_buffer),
"twsi%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
name_buffer, NULL);
if (alias_prop) {
i2c = fdt_path_offset(initial_boot_params, alias_prop);
if (i2c < 0)
continue;
if (i >= max_port) {
pr_debug("Deleting twsi%d\n", i);
fdt_nop_node(initial_boot_params, i2c);
fdt_nop_property(initial_boot_params, aliases,
name_buffer);
}
}
}
/* SMI/MDIO */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
max_port = 4;
else if (OCTEON_IS_MODEL(OCTEON_CN52XX) ||
OCTEON_IS_MODEL(OCTEON_CN63XX) ||
OCTEON_IS_MODEL(OCTEON_CN56XX))
max_port = 2;
else
max_port = 1;
for (i = 0; i < 2; i++) {
int i2c;
snprintf(name_buffer, sizeof(name_buffer),
"smi%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
name_buffer, NULL);
if (alias_prop) {
i2c = fdt_path_offset(initial_boot_params, alias_prop);
if (i2c < 0)
continue;
if (i >= max_port) {
pr_debug("Deleting smi%d\n", i);
fdt_nop_node(initial_boot_params, i2c);
fdt_nop_property(initial_boot_params, aliases,
name_buffer);
}
}
}
/* Serial */
uart_mask = 3;
/* Right now CN52XX is the only chip with a third uart */
if (OCTEON_IS_MODEL(OCTEON_CN52XX))
uart_mask |= 4; /* uart2 */
for (i = 0; i < 3; i++) {
int uart;
snprintf(name_buffer, sizeof(name_buffer),
"uart%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
name_buffer, NULL);
if (alias_prop) {
uart = fdt_path_offset(initial_boot_params, alias_prop);
if (uart_mask & (1 << i)) {
__be32 f;
f = cpu_to_be32(octeon_get_io_clock_rate());
fdt_setprop_inplace(initial_boot_params,
uart, "clock-frequency",
&f, sizeof(f));
continue;
}
pr_debug("Deleting uart%d\n", i);
fdt_nop_node(initial_boot_params, uart);
fdt_nop_property(initial_boot_params, aliases,
name_buffer);
}
}
/* Compact Flash */
alias_prop = fdt_getprop(initial_boot_params, aliases,
"cf0", NULL);
if (alias_prop) {
union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
unsigned long base_ptr, region_base, region_size;
unsigned long region1_base = 0;
unsigned long region1_size = 0;
int cs, bootbus;
bool is_16bit = false;
bool is_true_ide = false;
__be32 new_reg[6];
__be32 *ranges;
int len;
int cf = fdt_path_offset(initial_boot_params, alias_prop);
base_ptr = 0;
if (octeon_bootinfo->major_version == 1
&& octeon_bootinfo->minor_version >= 1) {
if (octeon_bootinfo->compact_flash_common_base_addr)
base_ptr = octeon_bootinfo->compact_flash_common_base_addr;
} else {
base_ptr = 0x1d000800;
}
if (!base_ptr)
goto no_cf;
/* Find CS0 region. */
for (cs = 0; cs < 8; cs++) {
mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
region_base = mio_boot_reg_cfg.s.base << 16;
region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
&& base_ptr < region_base + region_size) {
is_16bit = mio_boot_reg_cfg.s.width;
break;
}
}
if (cs >= 7) {
/* cs and cs + 1 are CS0 and CS1, both must be less than 8. */
goto no_cf;
}
if (!(base_ptr & 0xfffful)) {
/*
* Boot loader signals availability of DMA (true_ide
* mode) by setting low order bits of base_ptr to
* zero.
*/
/* Asume that CS1 immediately follows. */
mio_boot_reg_cfg.u64 =
cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs + 1));
region1_base = mio_boot_reg_cfg.s.base << 16;
region1_size = (mio_boot_reg_cfg.s.size + 1) << 16;
if (!mio_boot_reg_cfg.s.en)
goto no_cf;
is_true_ide = true;
} else {
fdt_nop_property(initial_boot_params, cf, "cavium,true-ide");
fdt_nop_property(initial_boot_params, cf, "cavium,dma-engine-handle");
if (!is_16bit) {
__be32 width = cpu_to_be32(8);
fdt_setprop_inplace(initial_boot_params, cf,
"cavium,bus-width", &width, sizeof(width));
}
}
new_reg[0] = cpu_to_be32(cs);
new_reg[1] = cpu_to_be32(0);
new_reg[2] = cpu_to_be32(0x10000);
new_reg[3] = cpu_to_be32(cs + 1);
new_reg[4] = cpu_to_be32(0);
new_reg[5] = cpu_to_be32(0x10000);
fdt_setprop_inplace(initial_boot_params, cf,
"reg", new_reg, sizeof(new_reg));
bootbus = fdt_parent_offset(initial_boot_params, cf);
if (bootbus < 0)
goto no_cf;
ranges = fdt_getprop_w(initial_boot_params, bootbus, "ranges", &len);
if (!ranges || len < (5 * 8 * sizeof(__be32)))
goto no_cf;
ranges[(cs * 5) + 2] = cpu_to_be32(region_base >> 32);
ranges[(cs * 5) + 3] = cpu_to_be32(region_base & 0xffffffff);
ranges[(cs * 5) + 4] = cpu_to_be32(region_size);
if (is_true_ide) {
cs++;
ranges[(cs * 5) + 2] = cpu_to_be32(region1_base >> 32);
ranges[(cs * 5) + 3] = cpu_to_be32(region1_base & 0xffffffff);
ranges[(cs * 5) + 4] = cpu_to_be32(region1_size);
}
goto end_cf;
no_cf:
fdt_nop_node(initial_boot_params, cf);
end_cf:
;
}
/* 8 char LED */
alias_prop = fdt_getprop(initial_boot_params, aliases,
"led0", NULL);
if (alias_prop) {
union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
unsigned long base_ptr, region_base, region_size;
int cs, bootbus;
__be32 new_reg[6];
__be32 *ranges;
int len;
int led = fdt_path_offset(initial_boot_params, alias_prop);
base_ptr = octeon_bootinfo->led_display_base_addr;
if (base_ptr == 0)
goto no_led;
/* Find CS0 region. */
for (cs = 0; cs < 8; cs++) {
mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
region_base = mio_boot_reg_cfg.s.base << 16;
region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
&& base_ptr < region_base + region_size)
break;
}
if (cs > 7)
goto no_led;
new_reg[0] = cpu_to_be32(cs);
new_reg[1] = cpu_to_be32(0x20);
new_reg[2] = cpu_to_be32(0x20);
new_reg[3] = cpu_to_be32(cs);
new_reg[4] = cpu_to_be32(0);
new_reg[5] = cpu_to_be32(0x20);
fdt_setprop_inplace(initial_boot_params, led,
"reg", new_reg, sizeof(new_reg));
bootbus = fdt_parent_offset(initial_boot_params, led);
if (bootbus < 0)
goto no_led;
ranges = fdt_getprop_w(initial_boot_params, bootbus, "ranges", &len);
if (!ranges || len < (5 * 8 * sizeof(__be32)))
goto no_led;
ranges[(cs * 5) + 2] = cpu_to_be32(region_base >> 32);
ranges[(cs * 5) + 3] = cpu_to_be32(region_base & 0xffffffff);
ranges[(cs * 5) + 4] = cpu_to_be32(region_size);
goto end_led;
no_led:
fdt_nop_node(initial_boot_params, led);
end_led:
;
}
#ifdef CONFIG_USB
/* OHCI/UHCI USB */
alias_prop = fdt_getprop(initial_boot_params, aliases,
"uctl", NULL);
if (alias_prop) {
int uctl = fdt_path_offset(initial_boot_params, alias_prop);
if (uctl >= 0 && (!OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC2E)) {
pr_debug("Deleting uctl\n");
fdt_nop_node(initial_boot_params, uctl);
fdt_nop_property(initial_boot_params, aliases, "uctl");
} else if (octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC10E ||
octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC4E) {
/* Missing "refclk-type" defaults to crystal. */
fdt_nop_property(initial_boot_params, uctl, "refclk-type");
}
}
/* DWC2 USB */
alias_prop = fdt_getprop(initial_boot_params, aliases,
"usbn", NULL);
if (alias_prop) {
int usbn = fdt_path_offset(initial_boot_params, alias_prop);
if (usbn >= 0 && (current_cpu_type() == CPU_CAVIUM_OCTEON2 ||
!octeon_has_feature(OCTEON_FEATURE_USB))) {
pr_debug("Deleting usbn\n");
fdt_nop_node(initial_boot_params, usbn);
fdt_nop_property(initial_boot_params, aliases, "usbn");
} else {
__be32 new_f[1];
enum cvmx_helper_board_usb_clock_types c;
c = __cvmx_helper_board_usb_get_clock_type();
switch (c) {
case USB_CLOCK_TYPE_REF_48:
new_f[0] = cpu_to_be32(48000000);
fdt_setprop_inplace(initial_boot_params, usbn,
"refclk-frequency", new_f, sizeof(new_f));
fallthrough;
case USB_CLOCK_TYPE_REF_12:
/* Missing "refclk-type" defaults to external. */
fdt_nop_property(initial_boot_params, usbn, "refclk-type");
break;
default:
break;
}
}
}
#endif
return 0;
}
static int __init octeon_publish_devices(void)
{
return of_platform_populate(NULL, octeon_ids, NULL, NULL);
}
arch_initcall(octeon_publish_devices);
| linux-master | arch/mips/cavium-octeon/octeon-platform.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
*/
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/kexec.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/octeon/octeon.h>
#include "octeon_boot.h"
volatile unsigned long octeon_processor_boot = 0xff;
volatile unsigned long octeon_processor_sp;
volatile unsigned long octeon_processor_gp;
#ifdef CONFIG_RELOCATABLE
volatile unsigned long octeon_processor_relocated_kernel_entry;
#endif /* CONFIG_RELOCATABLE */
#ifdef CONFIG_HOTPLUG_CPU
uint64_t octeon_bootloader_entry_addr;
EXPORT_SYMBOL(octeon_bootloader_entry_addr);
#endif
extern void kernel_entry(unsigned long arg1, ...);
static void octeon_icache_flush(void)
{
asm volatile ("synci 0($0)\n");
}
static void (*octeon_message_functions[8])(void) = {
scheduler_ipi,
generic_smp_call_function_interrupt,
octeon_icache_flush,
};
static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
{
u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num());
u64 action;
int i;
/*
* Make sure the function array initialization remains
* correct.
*/
BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0));
BUILD_BUG_ON(SMP_CALL_FUNCTION != (1 << 1));
BUILD_BUG_ON(SMP_ICACHE_FLUSH != (1 << 2));
/*
* Load the mailbox register to figure out what we're supposed
* to do.
*/
action = cvmx_read_csr(mbox_clrx);
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
action &= 0xff;
else
action &= 0xffff;
/* Clear the mailbox to clear the interrupt */
cvmx_write_csr(mbox_clrx, action);
for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) {
if (action & 1) {
void (*fn)(void) = octeon_message_functions[i];
if (fn)
fn();
}
action >>= 1;
i++;
}
return IRQ_HANDLED;
}
/*
* Cause the function described by call_data to be executed on the passed
* cpu. When the function has finished, increment the finished field of
* call_data.
*/
void octeon_send_ipi_single(int cpu, unsigned int action)
{
int coreid = cpu_logical_map(cpu);
/*
pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
coreid, action);
*/
cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
}
static inline void octeon_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
octeon_send_ipi_single(i, action);
}
/*
* Detect available CPUs, populate cpu_possible_mask
*/
static void octeon_smp_hotplug_setup(void)
{
#ifdef CONFIG_HOTPLUG_CPU
struct linux_app_boot_info *labi;
if (!setup_max_cpus)
return;
labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
if (labi->labi_signature != LABI_SIGNATURE) {
pr_info("The bootloader on this board does not support HOTPLUG_CPU.");
return;
}
octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
#endif
}
static void __init octeon_smp_setup(void)
{
const int coreid = cvmx_get_core_num();
int cpus;
int id;
struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
#ifdef CONFIG_HOTPLUG_CPU
int core_mask = octeon_get_boot_coremask();
unsigned int num_cores = cvmx_octeon_num_cores();
#endif
/* The present CPUs are initially just the boot cpu (CPU 0). */
for (id = 0; id < NR_CPUS; id++) {
set_cpu_possible(id, id == 0);
set_cpu_present(id, id == 0);
}
__cpu_number_map[coreid] = 0;
__cpu_logical_map[0] = coreid;
/* The present CPUs get the lowest CPU numbers. */
cpus = 1;
for (id = 0; id < NR_CPUS; id++) {
if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) {
set_cpu_possible(cpus, true);
set_cpu_present(cpus, true);
__cpu_number_map[id] = cpus;
__cpu_logical_map[cpus] = id;
cpus++;
}
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* The possible CPUs are all those present on the chip. We
* will assign CPU numbers for possible cores as well. Cores
* are always consecutively numberd from 0.
*/
for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr &&
id < num_cores && id < NR_CPUS; id++) {
if (!(core_mask & (1 << id))) {
set_cpu_possible(cpus, true);
__cpu_number_map[id] = cpus;
__cpu_logical_map[cpus] = id;
cpus++;
}
}
#endif
octeon_smp_hotplug_setup();
}
#ifdef CONFIG_RELOCATABLE
int plat_post_relocation(long offset)
{
unsigned long entry = (unsigned long)kernel_entry;
/* Send secondaries into relocated kernel */
octeon_processor_relocated_kernel_entry = entry + offset;
return 0;
}
#endif /* CONFIG_RELOCATABLE */
/*
* Firmware CPU startup hook
*/
static int octeon_boot_secondary(int cpu, struct task_struct *idle)
{
int count;
pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
cpu_logical_map(cpu));
octeon_processor_sp = __KSTK_TOS(idle);
octeon_processor_gp = (unsigned long)(task_thread_info(idle));
octeon_processor_boot = cpu_logical_map(cpu);
mb();
count = 10000;
while (octeon_processor_sp && count) {
/* Waiting for processor to get the SP and GP */
udelay(1);
count--;
}
if (count == 0) {
pr_err("Secondary boot timeout\n");
return -ETIMEDOUT;
}
return 0;
}
/*
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
static void octeon_init_secondary(void)
{
unsigned int sr;
sr = set_c0_status(ST0_BEV);
write_c0_ebase((u32)ebase);
write_c0_status(sr);
octeon_check_cpu_bist();
octeon_init_cvmcount();
octeon_irq_setup_secondary();
}
/*
* Callout to firmware before smp_init
*/
static void __init octeon_prepare_cpus(unsigned int max_cpus)
{
/*
* Only the low order mailbox bits are used for IPIs, leave
* the other bits alone.
*/
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
mailbox_interrupt)) {
panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
}
}
/*
* Last chance for the board code to finish SMP initialization before
* the CPU is "online".
*/
static void octeon_smp_finish(void)
{
octeon_user_io_init();
/* to generate the first CPU timer interrupt */
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
/* State of each CPU. */
static DEFINE_PER_CPU(int, cpu_state);
static int octeon_cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
if (!octeon_bootloader_entry_addr)
return -ENOTSUPP;
set_cpu_online(cpu, false);
calculate_cpu_foreign_map();
octeon_fixup_irqs();
__flush_cache_all();
local_flush_tlb_all();
return 0;
}
static void octeon_cpu_die(unsigned int cpu)
{
int coreid = cpu_logical_map(cpu);
uint32_t mask, new_mask;
const struct cvmx_bootmem_named_block_desc *block_desc;
while (per_cpu(cpu_state, cpu) != CPU_DEAD)
cpu_relax();
/*
* This is a bit complicated strategics of getting/settig available
* cores mask, copied from bootloader
*/
mask = 1 << coreid;
/* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
if (!block_desc) {
struct linux_app_boot_info *labi;
labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
labi->avail_coremask |= mask;
new_mask = labi->avail_coremask;
} else { /* alternative, already initialized */
uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
*p |= mask;
new_mask = *p;
}
pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
mb();
cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
cvmx_write_csr(CVMX_CIU_PP_RST, 0);
}
void play_dead(void)
{
int cpu = cpu_number_map(cvmx_get_core_num());
idle_task_exit();
cpuhp_ap_report_dead();
octeon_processor_boot = 0xff;
per_cpu(cpu_state, cpu) = CPU_DEAD;
mb();
while (1) /* core will be reset here */
;
}
static void start_after_reset(void)
{
kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
}
static int octeon_update_boot_vector(unsigned int cpu)
{
int coreid = cpu_logical_map(cpu);
uint32_t avail_coremask;
const struct cvmx_bootmem_named_block_desc *block_desc;
struct boot_init_vector *boot_vect =
(struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
if (!block_desc) {
struct linux_app_boot_info *labi;
labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
avail_coremask = labi->avail_coremask;
labi->avail_coremask &= ~(1 << coreid);
} else { /* alternative, already initialized */
avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
}
if (!(avail_coremask & (1 << coreid))) {
/* core not available, assume, that caught by simple-executive */
cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
cvmx_write_csr(CVMX_CIU_PP_RST, 0);
}
boot_vect[coreid].app_start_func_addr =
(uint32_t) (unsigned long) start_after_reset;
boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
mb();
cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
return 0;
}
static int register_cavium_notifier(void)
{
return cpuhp_setup_state_nocalls(CPUHP_MIPS_SOC_PREPARE,
"mips/cavium:prepare",
octeon_update_boot_vector, NULL);
}
late_initcall(register_cavium_notifier);
#endif /* CONFIG_HOTPLUG_CPU */
static const struct plat_smp_ops octeon_smp_ops = {
.send_ipi_single = octeon_send_ipi_single,
.send_ipi_mask = octeon_send_ipi_mask,
.init_secondary = octeon_init_secondary,
.smp_finish = octeon_smp_finish,
.boot_secondary = octeon_boot_secondary,
.smp_setup = octeon_smp_setup,
.prepare_cpus = octeon_prepare_cpus,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = octeon_cpu_disable,
.cpu_die = octeon_cpu_die,
#endif
#ifdef CONFIG_KEXEC
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id)
{
scheduler_ipi();
return IRQ_HANDLED;
}
static irqreturn_t octeon_78xx_call_function_interrupt(int irq, void *dev_id)
{
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
static irqreturn_t octeon_78xx_icache_flush_interrupt(int irq, void *dev_id)
{
octeon_icache_flush();
return IRQ_HANDLED;
}
/*
* Callout to firmware before smp_init
*/
static void octeon_78xx_prepare_cpus(unsigned int max_cpus)
{
if (request_irq(OCTEON_IRQ_MBOX0 + 0,
octeon_78xx_reched_interrupt,
IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
octeon_78xx_reched_interrupt)) {
panic("Cannot request_irq for SchedulerIPI");
}
if (request_irq(OCTEON_IRQ_MBOX0 + 1,
octeon_78xx_call_function_interrupt,
IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
octeon_78xx_call_function_interrupt)) {
panic("Cannot request_irq for SMP-Call");
}
if (request_irq(OCTEON_IRQ_MBOX0 + 2,
octeon_78xx_icache_flush_interrupt,
IRQF_PERCPU | IRQF_NO_THREAD, "ICache-Flush",
octeon_78xx_icache_flush_interrupt)) {
panic("Cannot request_irq for ICache-Flush");
}
}
static void octeon_78xx_send_ipi_single(int cpu, unsigned int action)
{
int i;
for (i = 0; i < 8; i++) {
if (action & 1)
octeon_ciu3_mbox_send(cpu, i);
action >>= 1;
}
}
static void octeon_78xx_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int cpu;
for_each_cpu(cpu, mask)
octeon_78xx_send_ipi_single(cpu, action);
}
static const struct plat_smp_ops octeon_78xx_smp_ops = {
.send_ipi_single = octeon_78xx_send_ipi_single,
.send_ipi_mask = octeon_78xx_send_ipi_mask,
.init_secondary = octeon_init_secondary,
.smp_finish = octeon_smp_finish,
.boot_secondary = octeon_boot_secondary,
.smp_setup = octeon_smp_setup,
.prepare_cpus = octeon_78xx_prepare_cpus,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = octeon_cpu_disable,
.cpu_die = octeon_cpu_die,
#endif
#ifdef CONFIG_KEXEC
.kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
#endif
};
void __init octeon_setup_smp(void)
{
const struct plat_smp_ops *ops;
if (octeon_has_feature(OCTEON_FEATURE_CIU3))
ops = &octeon_78xx_smp_ops;
else
ops = &octeon_smp_ops;
register_smp_ops(ops);
}
| linux-master | arch/mips/cavium-octeon/smp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* SHA1 Secure Hash Algorithm.
*
* Adapted for OCTEON by Aaro Koskinen <[email protected]>.
*
* Based on crypto/sha1_generic.c, which is:
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <[email protected]>
* Copyright (c) Jean-Francois Dive <[email protected]>
*/
#include <linux/mm.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <asm/byteorder.h>
#include <asm/octeon/octeon.h>
#include <crypto/internal/hash.h>
#include "octeon-crypto.h"
/*
* We pass everything as 64-bit. OCTEON can handle misaligned data.
*/
static void octeon_sha1_store_hash(struct sha1_state *sctx)
{
u64 *hash = (u64 *)sctx->state;
union {
u32 word[2];
u64 dword;
} hash_tail = { { sctx->state[4], } };
write_octeon_64bit_hash_dword(hash[0], 0);
write_octeon_64bit_hash_dword(hash[1], 1);
write_octeon_64bit_hash_dword(hash_tail.dword, 2);
memzero_explicit(&hash_tail.word[0], sizeof(hash_tail.word[0]));
}
static void octeon_sha1_read_hash(struct sha1_state *sctx)
{
u64 *hash = (u64 *)sctx->state;
union {
u32 word[2];
u64 dword;
} hash_tail;
hash[0] = read_octeon_64bit_hash_dword(0);
hash[1] = read_octeon_64bit_hash_dword(1);
hash_tail.dword = read_octeon_64bit_hash_dword(2);
sctx->state[4] = hash_tail.word[0];
memzero_explicit(&hash_tail.dword, sizeof(hash_tail.dword));
}
static void octeon_sha1_transform(const void *_block)
{
const u64 *block = _block;
write_octeon_64bit_block_dword(block[0], 0);
write_octeon_64bit_block_dword(block[1], 1);
write_octeon_64bit_block_dword(block[2], 2);
write_octeon_64bit_block_dword(block[3], 3);
write_octeon_64bit_block_dword(block[4], 4);
write_octeon_64bit_block_dword(block[5], 5);
write_octeon_64bit_block_dword(block[6], 6);
octeon_sha1_start(block[7]);
}
static void __octeon_sha1_update(struct sha1_state *sctx, const u8 *data,
unsigned int len)
{
unsigned int partial;
unsigned int done;
const u8 *src;
partial = sctx->count % SHA1_BLOCK_SIZE;
sctx->count += len;
done = 0;
src = data;
if ((partial + len) >= SHA1_BLOCK_SIZE) {
if (partial) {
done = -partial;
memcpy(sctx->buffer + partial, data,
done + SHA1_BLOCK_SIZE);
src = sctx->buffer;
}
do {
octeon_sha1_transform(src);
done += SHA1_BLOCK_SIZE;
src = data + done;
} while (done + SHA1_BLOCK_SIZE <= len);
partial = 0;
}
memcpy(sctx->buffer + partial, src, len - done);
}
static int octeon_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
struct octeon_cop2_state state;
unsigned long flags;
/*
* Small updates never reach the crypto engine, so the generic sha1 is
* faster because of the heavyweight octeon_crypto_enable() /
* octeon_crypto_disable().
*/
if ((sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return crypto_sha1_update(desc, data, len);
flags = octeon_crypto_enable(&state);
octeon_sha1_store_hash(sctx);
__octeon_sha1_update(sctx, data, len);
octeon_sha1_read_hash(sctx);
octeon_crypto_disable(&state, flags);
return 0;
}
static int octeon_sha1_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
static const u8 padding[64] = { 0x80, };
struct octeon_cop2_state state;
__be32 *dst = (__be32 *)out;
unsigned int pad_len;
unsigned long flags;
unsigned int index;
__be64 bits;
int i;
/* Save number of bits. */
bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64. */
index = sctx->count & 0x3f;
pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
flags = octeon_crypto_enable(&state);
octeon_sha1_store_hash(sctx);
__octeon_sha1_update(sctx, padding, pad_len);
/* Append length (before padding). */
__octeon_sha1_update(sctx, (const u8 *)&bits, sizeof(bits));
octeon_sha1_read_hash(sctx);
octeon_crypto_disable(&state, flags);
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(sctx->state[i]);
/* Zeroize sensitive information. */
memset(sctx, 0, sizeof(*sctx));
return 0;
}
static int octeon_sha1_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int octeon_sha1_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg octeon_sha1_alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = octeon_sha1_update,
.final = octeon_sha1_final,
.export = octeon_sha1_export,
.import = octeon_sha1_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "octeon-sha1",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init octeon_sha1_mod_init(void)
{
if (!octeon_has_crypto())
return -ENOTSUPP;
return crypto_register_shash(&octeon_sha1_alg);
}
static void __exit octeon_sha1_mod_fini(void)
{
crypto_unregister_shash(&octeon_sha1_alg);
}
module_init(octeon_sha1_mod_init);
module_exit(octeon_sha1_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (OCTEON)");
MODULE_AUTHOR("Aaro Koskinen <[email protected]>");
| linux-master | arch/mips/cavium-octeon/crypto/octeon-sha1.c |
/*
* Cryptographic API.
*
* MD5 Message Digest Algorithm (RFC1321).
*
* Adapted for OCTEON by Aaro Koskinen <[email protected]>.
*
* Based on crypto/md5.c, which is:
*
* Derived from cryptoapi implementation, originally based on the
* public domain implementation written by Colin Plumb in 1993.
*
* Copyright (c) Cryptoapi developers.
* Copyright (c) 2002 James Morris <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <crypto/md5.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/byteorder.h>
#include <asm/octeon/octeon.h>
#include <crypto/internal/hash.h>
#include "octeon-crypto.h"
/*
* We pass everything as 64-bit. OCTEON can handle misaligned data.
*/
static void octeon_md5_store_hash(struct md5_state *ctx)
{
u64 *hash = (u64 *)ctx->hash;
write_octeon_64bit_hash_dword(hash[0], 0);
write_octeon_64bit_hash_dword(hash[1], 1);
}
static void octeon_md5_read_hash(struct md5_state *ctx)
{
u64 *hash = (u64 *)ctx->hash;
hash[0] = read_octeon_64bit_hash_dword(0);
hash[1] = read_octeon_64bit_hash_dword(1);
}
static void octeon_md5_transform(const void *_block)
{
const u64 *block = _block;
write_octeon_64bit_block_dword(block[0], 0);
write_octeon_64bit_block_dword(block[1], 1);
write_octeon_64bit_block_dword(block[2], 2);
write_octeon_64bit_block_dword(block[3], 3);
write_octeon_64bit_block_dword(block[4], 4);
write_octeon_64bit_block_dword(block[5], 5);
write_octeon_64bit_block_dword(block[6], 6);
octeon_md5_start(block[7]);
}
static int octeon_md5_init(struct shash_desc *desc)
{
struct md5_state *mctx = shash_desc_ctx(desc);
mctx->hash[0] = MD5_H0;
mctx->hash[1] = MD5_H1;
mctx->hash[2] = MD5_H2;
mctx->hash[3] = MD5_H3;
cpu_to_le32_array(mctx->hash, 4);
mctx->byte_count = 0;
return 0;
}
static int octeon_md5_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct md5_state *mctx = shash_desc_ctx(desc);
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
struct octeon_cop2_state state;
unsigned long flags;
mctx->byte_count += len;
if (avail > len) {
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
data, len);
return 0;
}
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data,
avail);
flags = octeon_crypto_enable(&state);
octeon_md5_store_hash(mctx);
octeon_md5_transform(mctx->block);
data += avail;
len -= avail;
while (len >= sizeof(mctx->block)) {
octeon_md5_transform(data);
data += sizeof(mctx->block);
len -= sizeof(mctx->block);
}
octeon_md5_read_hash(mctx);
octeon_crypto_disable(&state, flags);
memcpy(mctx->block, data, len);
return 0;
}
static int octeon_md5_final(struct shash_desc *desc, u8 *out)
{
struct md5_state *mctx = shash_desc_ctx(desc);
const unsigned int offset = mctx->byte_count & 0x3f;
char *p = (char *)mctx->block + offset;
int padding = 56 - (offset + 1);
struct octeon_cop2_state state;
unsigned long flags;
*p++ = 0x80;
flags = octeon_crypto_enable(&state);
octeon_md5_store_hash(mctx);
if (padding < 0) {
memset(p, 0x00, padding + sizeof(u64));
octeon_md5_transform(mctx->block);
p = (char *)mctx->block;
padding = 56;
}
memset(p, 0, padding);
mctx->block[14] = mctx->byte_count << 3;
mctx->block[15] = mctx->byte_count >> 29;
cpu_to_le32_array(mctx->block + 14, 2);
octeon_md5_transform(mctx->block);
octeon_md5_read_hash(mctx);
octeon_crypto_disable(&state, flags);
memcpy(out, mctx->hash, sizeof(mctx->hash));
memset(mctx, 0, sizeof(*mctx));
return 0;
}
static int octeon_md5_export(struct shash_desc *desc, void *out)
{
struct md5_state *ctx = shash_desc_ctx(desc);
memcpy(out, ctx, sizeof(*ctx));
return 0;
}
static int octeon_md5_import(struct shash_desc *desc, const void *in)
{
struct md5_state *ctx = shash_desc_ctx(desc);
memcpy(ctx, in, sizeof(*ctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = octeon_md5_init,
.update = octeon_md5_update,
.final = octeon_md5_final,
.export = octeon_md5_export,
.import = octeon_md5_import,
.descsize = sizeof(struct md5_state),
.statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name= "octeon-md5",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init md5_mod_init(void)
{
if (!octeon_has_crypto())
return -ENOTSUPP;
return crypto_register_shash(&alg);
}
static void __exit md5_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(md5_mod_init);
module_exit(md5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD5 Message Digest Algorithm (OCTEON)");
MODULE_AUTHOR("Aaro Koskinen <[email protected]>");
| linux-master | arch/mips/cavium-octeon/crypto/octeon-md5.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* SHA-224 and SHA-256 Secure Hash Algorithm.
*
* Adapted for OCTEON by Aaro Koskinen <[email protected]>.
*
* Based on crypto/sha256_generic.c, which is:
*
* Copyright (c) Jean-Luc Cooke <[email protected]>
* Copyright (c) Andrew McDonald <[email protected]>
* Copyright (c) 2002 James Morris <[email protected]>
* SHA224 Support Copyright 2007 Intel Corporation <[email protected]>
*/
#include <linux/mm.h>
#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <asm/byteorder.h>
#include <asm/octeon/octeon.h>
#include <crypto/internal/hash.h>
#include "octeon-crypto.h"
/*
* We pass everything as 64-bit. OCTEON can handle misaligned data.
*/
static void octeon_sha256_store_hash(struct sha256_state *sctx)
{
u64 *hash = (u64 *)sctx->state;
write_octeon_64bit_hash_dword(hash[0], 0);
write_octeon_64bit_hash_dword(hash[1], 1);
write_octeon_64bit_hash_dword(hash[2], 2);
write_octeon_64bit_hash_dword(hash[3], 3);
}
static void octeon_sha256_read_hash(struct sha256_state *sctx)
{
u64 *hash = (u64 *)sctx->state;
hash[0] = read_octeon_64bit_hash_dword(0);
hash[1] = read_octeon_64bit_hash_dword(1);
hash[2] = read_octeon_64bit_hash_dword(2);
hash[3] = read_octeon_64bit_hash_dword(3);
}
static void octeon_sha256_transform(const void *_block)
{
const u64 *block = _block;
write_octeon_64bit_block_dword(block[0], 0);
write_octeon_64bit_block_dword(block[1], 1);
write_octeon_64bit_block_dword(block[2], 2);
write_octeon_64bit_block_dword(block[3], 3);
write_octeon_64bit_block_dword(block[4], 4);
write_octeon_64bit_block_dword(block[5], 5);
write_octeon_64bit_block_dword(block[6], 6);
octeon_sha256_start(block[7]);
}
static void __octeon_sha256_update(struct sha256_state *sctx, const u8 *data,
unsigned int len)
{
unsigned int partial;
unsigned int done;
const u8 *src;
partial = sctx->count % SHA256_BLOCK_SIZE;
sctx->count += len;
done = 0;
src = data;
if ((partial + len) >= SHA256_BLOCK_SIZE) {
if (partial) {
done = -partial;
memcpy(sctx->buf + partial, data,
done + SHA256_BLOCK_SIZE);
src = sctx->buf;
}
do {
octeon_sha256_transform(src);
done += SHA256_BLOCK_SIZE;
src = data + done;
} while (done + SHA256_BLOCK_SIZE <= len);
partial = 0;
}
memcpy(sctx->buf + partial, src, len - done);
}
static int octeon_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
struct octeon_cop2_state state;
unsigned long flags;
/*
* Small updates never reach the crypto engine, so the generic sha256 is
* faster because of the heavyweight octeon_crypto_enable() /
* octeon_crypto_disable().
*/
if ((sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_update(desc, data, len);
flags = octeon_crypto_enable(&state);
octeon_sha256_store_hash(sctx);
__octeon_sha256_update(sctx, data, len);
octeon_sha256_read_hash(sctx);
octeon_crypto_disable(&state, flags);
return 0;
}
static int octeon_sha256_final(struct shash_desc *desc, u8 *out)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
static const u8 padding[64] = { 0x80, };
struct octeon_cop2_state state;
__be32 *dst = (__be32 *)out;
unsigned int pad_len;
unsigned long flags;
unsigned int index;
__be64 bits;
int i;
/* Save number of bits. */
bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64. */
index = sctx->count & 0x3f;
pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
flags = octeon_crypto_enable(&state);
octeon_sha256_store_hash(sctx);
__octeon_sha256_update(sctx, padding, pad_len);
/* Append length (before padding). */
__octeon_sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
octeon_sha256_read_hash(sctx);
octeon_crypto_disable(&state, flags);
/* Store state in digest */
for (i = 0; i < 8; i++)
dst[i] = cpu_to_be32(sctx->state[i]);
/* Zeroize sensitive information. */
memset(sctx, 0, sizeof(*sctx));
return 0;
}
static int octeon_sha224_final(struct shash_desc *desc, u8 *hash)
{
u8 D[SHA256_DIGEST_SIZE];
octeon_sha256_final(desc, D);
memcpy(hash, D, SHA224_DIGEST_SIZE);
memzero_explicit(D, SHA256_DIGEST_SIZE);
return 0;
}
static int octeon_sha256_export(struct shash_desc *desc, void *out)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int octeon_sha256_import(struct shash_desc *desc, const void *in)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg octeon_sha256_algs[2] = { {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_base_init,
.update = octeon_sha256_update,
.final = octeon_sha256_final,
.export = octeon_sha256_export,
.import = octeon_sha256_import,
.descsize = sizeof(struct sha256_state),
.statesize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name= "octeon-sha256",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = SHA224_DIGEST_SIZE,
.init = sha224_base_init,
.update = octeon_sha256_update,
.final = octeon_sha224_final,
.descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name= "octeon-sha224",
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static int __init octeon_sha256_mod_init(void)
{
if (!octeon_has_crypto())
return -ENOTSUPP;
return crypto_register_shashes(octeon_sha256_algs,
ARRAY_SIZE(octeon_sha256_algs));
}
static void __exit octeon_sha256_mod_fini(void)
{
crypto_unregister_shashes(octeon_sha256_algs,
ARRAY_SIZE(octeon_sha256_algs));
}
module_init(octeon_sha256_mod_init);
module_exit(octeon_sha256_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm (OCTEON)");
MODULE_AUTHOR("Aaro Koskinen <[email protected]>");
| linux-master | arch/mips/cavium-octeon/crypto/octeon-sha256.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* SHA-512 and SHA-384 Secure Hash Algorithm.
*
* Adapted for OCTEON by Aaro Koskinen <[email protected]>.
*
* Based on crypto/sha512_generic.c, which is:
*
* Copyright (c) Jean-Luc Cooke <[email protected]>
* Copyright (c) Andrew McDonald <[email protected]>
* Copyright (c) 2003 Kyle McMartin <[email protected]>
*/
#include <linux/mm.h>
#include <crypto/sha2.h>
#include <crypto/sha512_base.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <asm/byteorder.h>
#include <asm/octeon/octeon.h>
#include <crypto/internal/hash.h>
#include "octeon-crypto.h"
/*
* We pass everything as 64-bit. OCTEON can handle misaligned data.
*/
static void octeon_sha512_store_hash(struct sha512_state *sctx)
{
write_octeon_64bit_hash_sha512(sctx->state[0], 0);
write_octeon_64bit_hash_sha512(sctx->state[1], 1);
write_octeon_64bit_hash_sha512(sctx->state[2], 2);
write_octeon_64bit_hash_sha512(sctx->state[3], 3);
write_octeon_64bit_hash_sha512(sctx->state[4], 4);
write_octeon_64bit_hash_sha512(sctx->state[5], 5);
write_octeon_64bit_hash_sha512(sctx->state[6], 6);
write_octeon_64bit_hash_sha512(sctx->state[7], 7);
}
static void octeon_sha512_read_hash(struct sha512_state *sctx)
{
sctx->state[0] = read_octeon_64bit_hash_sha512(0);
sctx->state[1] = read_octeon_64bit_hash_sha512(1);
sctx->state[2] = read_octeon_64bit_hash_sha512(2);
sctx->state[3] = read_octeon_64bit_hash_sha512(3);
sctx->state[4] = read_octeon_64bit_hash_sha512(4);
sctx->state[5] = read_octeon_64bit_hash_sha512(5);
sctx->state[6] = read_octeon_64bit_hash_sha512(6);
sctx->state[7] = read_octeon_64bit_hash_sha512(7);
}
static void octeon_sha512_transform(const void *_block)
{
const u64 *block = _block;
write_octeon_64bit_block_sha512(block[0], 0);
write_octeon_64bit_block_sha512(block[1], 1);
write_octeon_64bit_block_sha512(block[2], 2);
write_octeon_64bit_block_sha512(block[3], 3);
write_octeon_64bit_block_sha512(block[4], 4);
write_octeon_64bit_block_sha512(block[5], 5);
write_octeon_64bit_block_sha512(block[6], 6);
write_octeon_64bit_block_sha512(block[7], 7);
write_octeon_64bit_block_sha512(block[8], 8);
write_octeon_64bit_block_sha512(block[9], 9);
write_octeon_64bit_block_sha512(block[10], 10);
write_octeon_64bit_block_sha512(block[11], 11);
write_octeon_64bit_block_sha512(block[12], 12);
write_octeon_64bit_block_sha512(block[13], 13);
write_octeon_64bit_block_sha512(block[14], 14);
octeon_sha512_start(block[15]);
}
static void __octeon_sha512_update(struct sha512_state *sctx, const u8 *data,
unsigned int len)
{
unsigned int part_len;
unsigned int index;
unsigned int i;
/* Compute number of bytes mod 128. */
index = sctx->count[0] % SHA512_BLOCK_SIZE;
/* Update number of bytes. */
if ((sctx->count[0] += len) < len)
sctx->count[1]++;
part_len = SHA512_BLOCK_SIZE - index;
/* Transform as many times as possible. */
if (len >= part_len) {
memcpy(&sctx->buf[index], data, part_len);
octeon_sha512_transform(sctx->buf);
for (i = part_len; i + SHA512_BLOCK_SIZE <= len;
i += SHA512_BLOCK_SIZE)
octeon_sha512_transform(&data[i]);
index = 0;
} else {
i = 0;
}
/* Buffer remaining input. */
memcpy(&sctx->buf[index], &data[i], len - i);
}
static int octeon_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
struct octeon_cop2_state state;
unsigned long flags;
/*
* Small updates never reach the crypto engine, so the generic sha512 is
* faster because of the heavyweight octeon_crypto_enable() /
* octeon_crypto_disable().
*/
if ((sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
return crypto_sha512_update(desc, data, len);
flags = octeon_crypto_enable(&state);
octeon_sha512_store_hash(sctx);
__octeon_sha512_update(sctx, data, len);
octeon_sha512_read_hash(sctx);
octeon_crypto_disable(&state, flags);
return 0;
}
static int octeon_sha512_final(struct shash_desc *desc, u8 *hash)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
static u8 padding[128] = { 0x80, };
struct octeon_cop2_state state;
__be64 *dst = (__be64 *)hash;
unsigned int pad_len;
unsigned long flags;
unsigned int index;
__be64 bits[2];
int i;
/* Save number of bits. */
bits[1] = cpu_to_be64(sctx->count[0] << 3);
bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
/* Pad out to 112 mod 128. */
index = sctx->count[0] & 0x7f;
pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
flags = octeon_crypto_enable(&state);
octeon_sha512_store_hash(sctx);
__octeon_sha512_update(sctx, padding, pad_len);
/* Append length (before padding). */
__octeon_sha512_update(sctx, (const u8 *)bits, sizeof(bits));
octeon_sha512_read_hash(sctx);
octeon_crypto_disable(&state, flags);
/* Store state in digest. */
for (i = 0; i < 8; i++)
dst[i] = cpu_to_be64(sctx->state[i]);
/* Zeroize sensitive information. */
memset(sctx, 0, sizeof(struct sha512_state));
return 0;
}
static int octeon_sha384_final(struct shash_desc *desc, u8 *hash)
{
u8 D[64];
octeon_sha512_final(desc, D);
memcpy(hash, D, 48);
memzero_explicit(D, 64);
return 0;
}
static struct shash_alg octeon_sha512_algs[2] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_base_init,
.update = octeon_sha512_update,
.final = octeon_sha512_final,
.descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_driver_name= "octeon-sha512",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_base_init,
.update = octeon_sha512_update,
.final = octeon_sha384_final,
.descsize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
.cra_driver_name= "octeon-sha384",
.cra_priority = OCTEON_CR_OPCODE_PRIORITY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static int __init octeon_sha512_mod_init(void)
{
if (!octeon_has_crypto())
return -ENOTSUPP;
return crypto_register_shashes(octeon_sha512_algs,
ARRAY_SIZE(octeon_sha512_algs));
}
static void __exit octeon_sha512_mod_fini(void)
{
crypto_unregister_shashes(octeon_sha512_algs,
ARRAY_SIZE(octeon_sha512_algs));
}
module_init(octeon_sha512_mod_init);
module_exit(octeon_sha512_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms (OCTEON)");
MODULE_AUTHOR("Aaro Koskinen <[email protected]>");
| linux-master | arch/mips/cavium-octeon/crypto/octeon-sha512.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2012 Cavium Networks
*/
#include <asm/cop2.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/sched/task_stack.h>
#include "octeon-crypto.h"
/**
* Enable access to Octeon's COP2 crypto hardware for kernel use. Wrap any
* crypto operations in calls to octeon_crypto_enable/disable in order to make
* sure the state of COP2 isn't corrupted if userspace is also performing
* hardware crypto operations. Allocate the state parameter on the stack.
* Returns with preemption disabled.
*
* @state: Pointer to state structure to store current COP2 state in.
*
* Returns: Flags to be passed to octeon_crypto_disable()
*/
unsigned long octeon_crypto_enable(struct octeon_cop2_state *state)
{
int status;
unsigned long flags;
preempt_disable();
local_irq_save(flags);
status = read_c0_status();
write_c0_status(status | ST0_CU2);
if (KSTK_STATUS(current) & ST0_CU2) {
octeon_cop2_save(&(current->thread.cp2));
KSTK_STATUS(current) &= ~ST0_CU2;
status &= ~ST0_CU2;
} else if (status & ST0_CU2) {
octeon_cop2_save(state);
}
local_irq_restore(flags);
return status & ST0_CU2;
}
EXPORT_SYMBOL_GPL(octeon_crypto_enable);
/**
* Disable access to Octeon's COP2 crypto hardware in the kernel. This must be
* called after an octeon_crypto_enable() before any context switch or return to
* userspace.
*
* @state: Pointer to COP2 state to restore
* @flags: Return value from octeon_crypto_enable()
*/
void octeon_crypto_disable(struct octeon_cop2_state *state,
unsigned long crypto_flags)
{
unsigned long flags;
local_irq_save(flags);
if (crypto_flags & ST0_CU2)
octeon_cop2_restore(state);
else
write_c0_status(read_c0_status() & ~ST0_CU2);
local_irq_restore(flags);
preempt_enable();
}
EXPORT_SYMBOL_GPL(octeon_crypto_disable);
| linux-master | arch/mips/cavium-octeon/crypto/octeon-crypto.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Small helper utilities.
*/
#include <linux/kernel.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-pip.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-ipd.h>
#include <asm/octeon/cvmx-spi.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-util.h>
#include <asm/octeon/cvmx-ipd-defs.h>
/**
* Convert a interface mode into a human readable string
*
* @mode: Mode to convert
*
* Returns String
*/
const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t
mode)
{
switch (mode) {
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
return "DISABLED";
case CVMX_HELPER_INTERFACE_MODE_RGMII:
return "RGMII";
case CVMX_HELPER_INTERFACE_MODE_GMII:
return "GMII";
case CVMX_HELPER_INTERFACE_MODE_SPI:
return "SPI";
case CVMX_HELPER_INTERFACE_MODE_PCIE:
return "PCIE";
case CVMX_HELPER_INTERFACE_MODE_XAUI:
return "XAUI";
case CVMX_HELPER_INTERFACE_MODE_SGMII:
return "SGMII";
case CVMX_HELPER_INTERFACE_MODE_PICMG:
return "PICMG";
case CVMX_HELPER_INTERFACE_MODE_NPI:
return "NPI";
case CVMX_HELPER_INTERFACE_MODE_LOOP:
return "LOOP";
}
return "UNKNOWN";
}
/**
* Setup Random Early Drop on a specific input queue
*
* @queue: Input queue to setup RED on (0-7)
* @pass_thresh:
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @drop_thresh:
* All incoming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
static int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
int drop_thresh)
{
union cvmx_ipd_qosx_red_marks red_marks;
union cvmx_ipd_red_quex_param red_param;
/* Set RED to begin dropping packets when there are pass_thresh buffers
left. It will linearly drop more packets until reaching drop_thresh
buffers */
red_marks.u64 = 0;
red_marks.s.drop = drop_thresh;
red_marks.s.pass = pass_thresh;
cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
/* Use the actual queue 0 counter, not the average */
red_param.u64 = 0;
red_param.s.prb_con =
(255ul << 24) / (red_marks.s.pass - red_marks.s.drop);
red_param.s.avg_con = 1;
red_param.s.new_con = 255;
red_param.s.use_pcnt = 1;
cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
return 0;
}
/**
* Setup Random Early Drop to automatically begin dropping packets.
*
* @pass_thresh:
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @drop_thresh:
* All incoming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
{
union cvmx_ipd_portx_bp_page_cnt page_cnt;
union cvmx_ipd_bp_prt_red_end ipd_bp_prt_red_end;
union cvmx_ipd_red_port_enable red_port_enable;
int queue;
int interface;
int port;
/* Disable backpressure based on queued buffers. It needs SW support */
page_cnt.u64 = 0;
page_cnt.s.bp_enb = 0;
page_cnt.s.page_cnt = 100;
for (interface = 0; interface < 2; interface++) {
for (port = cvmx_helper_get_first_ipd_port(interface);
port < cvmx_helper_get_last_ipd_port(interface); port++)
cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port),
page_cnt.u64);
}
for (queue = 0; queue < 8; queue++)
cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh);
/* Shutoff the dropping based on the per port page count. SW isn't
decrementing it right now */
ipd_bp_prt_red_end.u64 = 0;
ipd_bp_prt_red_end.s.prt_enb = 0;
cvmx_write_csr(CVMX_IPD_BP_PRT_RED_END, ipd_bp_prt_red_end.u64);
red_port_enable.u64 = 0;
red_port_enable.s.prt_enb = 0xfffffffffull;
red_port_enable.s.avg_dly = 10000;
red_port_enable.s.prb_dly = 10000;
cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64);
return 0;
}
EXPORT_SYMBOL_GPL(cvmx_helper_setup_red);
/**
* Setup the common GMX settings that determine the number of
* ports. These setting apply to almost all configurations of all
* chips.
*
* @interface: Interface to configure
* @num_ports: Number of ports on the interface
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_setup_gmx(int interface, int num_ports)
{
union cvmx_gmxx_tx_prts gmx_tx_prts;
union cvmx_gmxx_rx_prts gmx_rx_prts;
union cvmx_pko_reg_gmx_port_mode pko_mode;
union cvmx_gmxx_txx_thresh gmx_tx_thresh;
int index;
/* Tell GMX the number of TX ports on this interface */
gmx_tx_prts.u64 = cvmx_read_csr(CVMX_GMXX_TX_PRTS(interface));
gmx_tx_prts.s.prts = num_ports;
cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), gmx_tx_prts.u64);
/* Tell GMX the number of RX ports on this interface. This only
** applies to *GMII and XAUI ports */
if (cvmx_helper_interface_get_mode(interface) ==
CVMX_HELPER_INTERFACE_MODE_RGMII
|| cvmx_helper_interface_get_mode(interface) ==
CVMX_HELPER_INTERFACE_MODE_SGMII
|| cvmx_helper_interface_get_mode(interface) ==
CVMX_HELPER_INTERFACE_MODE_GMII
|| cvmx_helper_interface_get_mode(interface) ==
CVMX_HELPER_INTERFACE_MODE_XAUI) {
if (num_ports > 4) {
cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal "
"num_ports\n");
return -1;
}
gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface));
gmx_rx_prts.s.prts = num_ports;
cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64);
}
/* Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, and 50XX */
if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX)
&& !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
/* Tell PKO the number of ports on this interface */
pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
if (interface == 0) {
if (num_ports == 1)
pko_mode.s.mode0 = 4;
else if (num_ports == 2)
pko_mode.s.mode0 = 3;
else if (num_ports <= 4)
pko_mode.s.mode0 = 2;
else if (num_ports <= 8)
pko_mode.s.mode0 = 1;
else
pko_mode.s.mode0 = 0;
} else {
if (num_ports == 1)
pko_mode.s.mode1 = 4;
else if (num_ports == 2)
pko_mode.s.mode1 = 3;
else if (num_ports <= 4)
pko_mode.s.mode1 = 2;
else if (num_ports <= 8)
pko_mode.s.mode1 = 1;
else
pko_mode.s.mode1 = 0;
}
cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
}
/*
* Set GMX to buffer as much data as possible before starting
* transmit. This reduces the chances that we have a TX under
* run due to memory contention. Any packet that fits entirely
* in the GMX FIFO can never have an under run regardless of
* memory load.
*/
gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface));
if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX)) {
/* These chips have a fixed max threshold of 0x40 */
gmx_tx_thresh.s.cnt = 0x40;
} else {
/* Choose the max value for the number of ports */
if (num_ports <= 1)
gmx_tx_thresh.s.cnt = 0x100 / 1;
else if (num_ports == 2)
gmx_tx_thresh.s.cnt = 0x100 / 2;
else
gmx_tx_thresh.s.cnt = 0x100 / 4;
}
/*
* SPI and XAUI can have lots of ports but the GMX hardware
* only ever has a max of 4.
*/
if (num_ports > 4)
num_ports = 4;
for (index = 0; index < num_ports; index++)
cvmx_write_csr(CVMX_GMXX_TXX_THRESH(index, interface),
gmx_tx_thresh.u64);
return 0;
}
/**
* Returns the IPD/PKO port number for a port on the given
* interface.
*
* @interface: Interface to use
* @port: Port on the interface
*
* Returns IPD/PKO port number
*/
int cvmx_helper_get_ipd_port(int interface, int port)
{
switch (interface) {
case 0:
return port;
case 1:
return port + 16;
case 2:
return port + 32;
case 3:
return port + 36;
case 4:
return port + 40;
case 5:
return port + 44;
}
return -1;
}
EXPORT_SYMBOL_GPL(cvmx_helper_get_ipd_port);
/**
* Returns the interface number for an IPD/PKO port number.
*
* @ipd_port: IPD/PKO port number
*
* Returns Interface number
*/
int cvmx_helper_get_interface_num(int ipd_port)
{
if (ipd_port < 16)
return 0;
else if (ipd_port < 32)
return 1;
else if (ipd_port < 36)
return 2;
else if (ipd_port < 40)
return 3;
else if (ipd_port < 44)
return 4;
else if (ipd_port < 48)
return 5;
else
cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD "
"port number\n");
return -1;
}
EXPORT_SYMBOL_GPL(cvmx_helper_get_interface_num);
/**
* Returns the interface index number for an IPD/PKO port
* number.
*
* @ipd_port: IPD/PKO port number
*
* Returns Interface index number
*/
int cvmx_helper_get_interface_index_num(int ipd_port)
{
if (ipd_port < 32)
return ipd_port & 15;
else if (ipd_port < 36)
return ipd_port & 3;
else if (ipd_port < 40)
return ipd_port & 3;
else if (ipd_port < 44)
return ipd_port & 3;
else if (ipd_port < 48)
return ipd_port & 3;
else
cvmx_dprintf("cvmx_helper_get_interface_index_num: "
"Illegal IPD port number\n");
return -1;
}
EXPORT_SYMBOL_GPL(cvmx_helper_get_interface_index_num);
| linux-master | arch/mips/cavium-octeon/executive/cvmx-helper-util.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Functions for LOOP initialization, configuration,
* and monitoring.
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-pip-defs.h>
/**
* Probe a LOOP interface and determine the number of ports
* connected to it. The LOOP interface should still be down
* after this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_loop_probe(int interface)
{
union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
int num_ports = 4;
int port;
/* We need to disable length checking so packet < 64 bytes and jumbo
frames don't get errors */
for (port = 0; port < num_ports; port++) {
union cvmx_pip_prt_cfgx port_cfg;
int ipd_port = cvmx_helper_get_ipd_port(interface, port);
port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
port_cfg.s.maxerr_en = 0;
port_cfg.s.minerr_en = 0;
cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);
}
/* Disable FCS stripping for loopback ports */
ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
ipd_sub_port_fcs.s.port_bit2 = 0;
cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
return num_ports;
}
/**
* Bringup and enable a LOOP interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_loop_enable(int interface)
{
/* Do nothing. */
return 0;
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-helper-loop.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
*
* Fixes and workaround for Octeon chip errata. This file
* contains functions called by cvmx-helper to workaround known
* chip errata. For the most part, code doesn't need to call
* these functions directly.
*
*/
#include <linux/export.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-helper-jtag.h>
/**
* Due to errata G-720, the 2nd order CDR circuit on CN52XX pass
* 1 doesn't work properly. The following code disables 2nd order
* CDR for the specified QLM.
*
* @qlm: QLM to disable 2nd order CDR for.
*/
void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm)
{
int lane;
cvmx_helper_qlm_jtag_init();
/* We need to load all four lanes of the QLM, a total of 1072 bits */
for (lane = 0; lane < 4; lane++) {
/*
* Each lane has 268 bits. We need to set
* cfg_cdr_incx<67:64> = 3 and cfg_cdr_secord<77> =
* 1. All other bits are zero. Bits go in LSB first,
* so start off with the zeros for bits <63:0>.
*/
cvmx_helper_qlm_jtag_shift_zeros(qlm, 63 - 0 + 1);
/* cfg_cdr_incx<67:64>=3 */
cvmx_helper_qlm_jtag_shift(qlm, 67 - 64 + 1, 3);
/* Zeros for bits <76:68> */
cvmx_helper_qlm_jtag_shift_zeros(qlm, 76 - 68 + 1);
/* cfg_cdr_secord<77>=1 */
cvmx_helper_qlm_jtag_shift(qlm, 77 - 77 + 1, 1);
/* Zeros for bits <267:78> */
cvmx_helper_qlm_jtag_shift_zeros(qlm, 267 - 78 + 1);
}
cvmx_helper_qlm_jtag_update(qlm);
}
EXPORT_SYMBOL(__cvmx_helper_errata_qlm_disable_2nd_order_cdr);
| linux-master | arch/mips/cavium-octeon/executive/cvmx-helper-errata.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
*
* Helper utilities for qlm_jtag.
*
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-helper-jtag.h>
/**
* Initialize the internal QLM JTAG logic to allow programming
* of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions.
* These functions should only be used at the direction of Cavium
* Networks. Programming incorrect values into the JTAG chain
* can cause chip damage.
*/
void cvmx_helper_qlm_jtag_init(void)
{
union cvmx_ciu_qlm_jtgc jtgc;
uint32_t clock_div = 0;
uint32_t divisor = cvmx_sysinfo_get()->cpu_clock_hz / (25 * 1000000);
divisor = (divisor - 1) >> 2;
/* Convert the divisor into a power of 2 shift */
while (divisor) {
clock_div++;
divisor = divisor >> 1;
}
/*
* Clock divider for QLM JTAG operations. eclk is divided by
* 2^(CLK_DIV + 2)
*/
jtgc.u64 = 0;
jtgc.s.clk_div = clock_div;
jtgc.s.mux_sel = 0;
if (OCTEON_IS_MODEL(OCTEON_CN52XX))
jtgc.s.bypass = 0x3;
else
jtgc.s.bypass = 0xf;
cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
cvmx_read_csr(CVMX_CIU_QLM_JTGC);
}
/**
* Write up to 32bits into the QLM jtag chain. Bits are shifted
* into the MSB and out the LSB, so you should shift in the low
* order bits followed by the high order bits. The JTAG chain is
* 4 * 268 bits long, or 1072.
*
* @qlm: QLM to shift value into
* @bits: Number of bits to shift in (1-32).
* @data: Data to shift in. Bit 0 enters the chain first, followed by
* bit 1, etc.
*
* Returns The low order bits of the JTAG chain that shifted out of the
* circle.
*/
uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
{
union cvmx_ciu_qlm_jtgd jtgd;
jtgd.u64 = 0;
jtgd.s.shift = 1;
jtgd.s.shft_cnt = bits - 1;
jtgd.s.shft_reg = data;
if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
jtgd.s.select = 1 << qlm;
cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
do {
jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
} while (jtgd.s.shift);
return jtgd.s.shft_reg >> (32 - bits);
}
/**
* Shift long sequences of zeros into the QLM JTAG chain. It is
* common to need to shift more than 32 bits of zeros into the
* chain. This function is a convience wrapper around
* cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
* zeros at a time.
*
* @qlm: QLM to shift zeros into
* @bits:
*/
void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits)
{
while (bits > 0) {
int n = bits;
if (n > 32)
n = 32;
cvmx_helper_qlm_jtag_shift(qlm, n, 0);
bits -= n;
}
}
/**
* Program the QLM JTAG chain into all lanes of the QLM. You must
* have already shifted in 268*4, or 1072 bits into the JTAG
* chain. Updating invalid values can possibly cause chip damage.
*
* @qlm: QLM to program
*/
void cvmx_helper_qlm_jtag_update(int qlm)
{
union cvmx_ciu_qlm_jtgd jtgd;
/* Update the new data */
jtgd.u64 = 0;
jtgd.s.update = 1;
if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
jtgd.s.select = 1 << qlm;
cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
do {
jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
} while (jtgd.s.update);
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Functions for RGMII/GMII/MII initialization, configuration,
* and monitoring.
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-asxx-defs.h>
#include <asm/octeon/cvmx-dbg-defs.h>
/*
* Probe RGMII ports and determine the number present
*
* @interface: Interface to probe
*
* Returns Number of RGMII/GMII/MII ports (0-4).
*/
int __cvmx_helper_rgmii_probe(int interface)
{
int num_ports = 0;
union cvmx_gmxx_inf_mode mode;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (mode.s.type) {
if (OCTEON_IS_MODEL(OCTEON_CN38XX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX)) {
cvmx_dprintf("ERROR: RGMII initialize called in "
"SPI interface\n");
} else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
|| OCTEON_IS_MODEL(OCTEON_CN30XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX)) {
/*
* On these chips "type" says we're in
* GMII/MII mode. This limits us to 2 ports
*/
num_ports = 2;
} else {
cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
__func__);
}
} else {
if (OCTEON_IS_MODEL(OCTEON_CN38XX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX)) {
num_ports = 4;
} else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
|| OCTEON_IS_MODEL(OCTEON_CN30XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX)) {
num_ports = 3;
} else {
cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
__func__);
}
}
return num_ports;
}
/*
* Put an RGMII interface in loopback mode. Internal packets sent
* out will be received back again on the same port. Externally
* received packets will echo back out.
*
* @port: IPD port number to loop.
*/
void cvmx_helper_rgmii_internal_loopback(int port)
{
int interface = (port >> 4) & 1;
int index = port & 0xf;
uint64_t tmp;
union cvmx_gmxx_prtx_cfg gmx_cfg;
gmx_cfg.u64 = 0;
gmx_cfg.s.duplex = 1;
gmx_cfg.s.slottime = 1;
gmx_cfg.s.speed = 1;
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
}
/*
* Workaround ASX setup errata with CN38XX pass1
*
* @interface: Interface to setup
* @port: Port to setup (0..3)
* @cpu_clock_hz:
* Chip frequency in Hertz
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_errata_asx_pass1(int interface, int port,
int cpu_clock_hz)
{
/* Set hi water mark as per errata GMX-4 */
if (cpu_clock_hz >= 325000000 && cpu_clock_hz < 375000000)
cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 12);
else if (cpu_clock_hz >= 375000000 && cpu_clock_hz < 437000000)
cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 11);
else if (cpu_clock_hz >= 437000000 && cpu_clock_hz < 550000000)
cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 10);
else if (cpu_clock_hz >= 550000000 && cpu_clock_hz < 687000000)
cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 9);
else
cvmx_dprintf("Illegal clock frequency (%d). "
"CVMX_ASXX_TX_HI_WATERX not set\n", cpu_clock_hz);
return 0;
}
/*
* Configure all of the ASX, GMX, and PKO registers required
* to get RGMII to function on the supplied interface.
*
* @interface: PKO Interface to configure (0 or 1)
*
* Returns Zero on success
*/
int __cvmx_helper_rgmii_enable(int interface)
{
int num_ports = cvmx_helper_ports_on_interface(interface);
int port;
struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get();
union cvmx_gmxx_inf_mode mode;
union cvmx_asxx_tx_prt_en asx_tx;
union cvmx_asxx_rx_prt_en asx_rx;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (mode.s.en == 0)
return -1;
if ((OCTEON_IS_MODEL(OCTEON_CN38XX) ||
OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1)
/* Ignore SPI interfaces */
return -1;
/* Configure the ASX registers needed to use the RGMII ports */
asx_tx.u64 = 0;
asx_tx.s.prt_en = cvmx_build_mask(num_ports);
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
asx_rx.u64 = 0;
asx_rx.s.prt_en = cvmx_build_mask(num_ports);
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
/* Configure the GMX registers needed to use the RGMII ports */
for (port = 0; port < num_ports; port++) {
/* Setting of CVMX_GMXX_TXX_THRESH has been moved to
__cvmx_helper_setup_gmx() */
if (cvmx_octeon_is_pass1())
__cvmx_helper_errata_asx_pass1(interface, port,
sys_info_ptr->
cpu_clock_hz);
else {
/*
* Configure more flexible RGMII preamble
* checking. Pass 1 doesn't support this
* feature.
*/
union cvmx_gmxx_rxx_frm_ctl frm_ctl;
frm_ctl.u64 =
cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
(port, interface));
/* New field, so must be compile time */
frm_ctl.s.pre_free = 1;
cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface),
frm_ctl.u64);
}
/*
* Each pause frame transmitted will ask for about 10M
* bit times before resume. If buffer space comes
* available before that time has expired, an XON
* pause frame (0 time) will be transmitted to restart
* the flow.
*/
cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface),
20000);
cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL
(port, interface), 19000);
if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
16);
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
16);
} else {
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
24);
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
24);
}
}
__cvmx_helper_setup_gmx(interface, num_ports);
/* enable the ports now */
for (port = 0; port < num_ports; port++) {
union cvmx_gmxx_prtx_cfg gmx_cfg;
gmx_cfg.u64 =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface),
gmx_cfg.u64);
}
__cvmx_interrupt_asxx_enable(interface);
__cvmx_interrupt_gmxx_enable(interface);
return 0;
}
/*
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
union cvmx_helper_link_info __cvmx_helper_rgmii_link_get(int ipd_port)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
union cvmx_asxx_prt_loop asxx_prt_loop;
asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
if (asxx_prt_loop.s.int_loop & (1 << index)) {
/* Force 1Gbps full duplex on internal loopback */
union cvmx_helper_link_info result;
result.u64 = 0;
result.s.full_duplex = 1;
result.s.link_up = 1;
result.s.speed = 1000;
return result;
} else
return __cvmx_helper_board_link_get(ipd_port);
}
/*
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get().
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_rgmii_link_set(int ipd_port,
union cvmx_helper_link_info link_info)
{
int result = 0;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
union cvmx_gmxx_prtx_cfg original_gmx_cfg;
union cvmx_gmxx_prtx_cfg new_gmx_cfg;
union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
int i;
/* Ignore speed sets in the simulator */
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
return 0;
/* Read the current settings so we know the current enable state */
original_gmx_cfg.u64 =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
new_gmx_cfg = original_gmx_cfg;
/* Disable the lowest level RX */
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) &
~(1 << index));
memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
/* Disable all queues so that TX should become idle */
for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
int queue = cvmx_pko_get_base_queue(ipd_port) + i;
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
pko_mem_queue_qos.s.pid = ipd_port;
pko_mem_queue_qos.s.qid = queue;
pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
pko_mem_queue_qos.s.qos_mask = 0;
cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
}
/* Disable backpressure */
gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
gmx_tx_ovr_bp.s.bp &= ~(1 << index);
gmx_tx_ovr_bp.s.en |= 1 << index;
cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
/*
* Poll the GMX state machine waiting for it to become
* idle. Preferably we should only change speed when it is
* idle. If it doesn't become idle we will still do the speed
* change, but there is a slight chance that GMX will
* lockup.
*/
cvmx_write_csr(CVMX_NPI_DBG_SELECT,
interface * 0x800 + index * 0x100 + 0x880);
CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 7,
==, 0, 10000);
CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 0xf,
==, 0, 10000);
/* Disable the port before we make any changes */
new_gmx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
/* Set full/half duplex */
if (cvmx_octeon_is_pass1())
/* Half duplex is broken for 38XX Pass 1 */
new_gmx_cfg.s.duplex = 1;
else if (!link_info.s.link_up)
/* Force full duplex on down links */
new_gmx_cfg.s.duplex = 1;
else
new_gmx_cfg.s.duplex = link_info.s.full_duplex;
/* Set the link speed. Anything unknown is set to 1Gbps */
if (link_info.s.speed == 10) {
new_gmx_cfg.s.slottime = 0;
new_gmx_cfg.s.speed = 0;
} else if (link_info.s.speed == 100) {
new_gmx_cfg.s.slottime = 0;
new_gmx_cfg.s.speed = 0;
} else {
new_gmx_cfg.s.slottime = 1;
new_gmx_cfg.s.speed = 1;
}
/* Adjust the clocks */
if (link_info.s.speed == 10) {
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
} else if (link_info.s.speed == 100) {
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
} else {
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
}
if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
if ((link_info.s.speed == 10) || (link_info.s.speed == 100)) {
union cvmx_gmxx_inf_mode mode;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
/*
* Port .en .type .p0mii Configuration
* ---- --- ----- ------ -----------------------------------------
* X 0 X X All links are disabled.
* 0 1 X 0 Port 0 is RGMII
* 0 1 X 1 Port 0 is MII
* 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
* 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
* MII port is selected by GMX_PRT1_CFG[SPEED].
*/
/* In MII mode, CLK_CNT = 1. */
if (((index == 0) && (mode.s.p0mii == 1))
|| ((index != 0) && (mode.s.type == 1))) {
cvmx_write_csr(CVMX_GMXX_TXX_CLK
(index, interface), 1);
}
}
}
/* Do a read to make sure all setup stuff is complete */
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
/* Save the new GMX setting without enabling the port */
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
/* Enable the lowest level RX */
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1 <<
index));
/* Re-enable the TX path */
for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
int queue = cvmx_pko_get_base_queue(ipd_port) + i;
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS,
pko_mem_queue_qos_save[i].u64);
}
/* Restore backpressure */
cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
/* Restore the GMX enable state. Port config is complete */
new_gmx_cfg.s.en = original_gmx_cfg.s.en;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
return result;
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Functions for SPI initialization, configuration,
* and monitoring.
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-spi.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-pip-defs.h>
#include <asm/octeon/cvmx-pko-defs.h>
#include <asm/octeon/cvmx-spxx-defs.h>
#include <asm/octeon/cvmx-stxx-defs.h>
/*
* CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI
* initialization routines wait for SPI training. You can override the
* value using executive-config.h if necessary.
*/
#ifndef CVMX_HELPER_SPI_TIMEOUT
#define CVMX_HELPER_SPI_TIMEOUT 10
#endif
int __cvmx_helper_spi_enumerate(int interface)
{
if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
cvmx_spi4000_is_present(interface)) {
return 10;
} else {
return 16;
}
}
/**
* Probe a SPI interface and determine the number of ports
* connected to it. The SPI interface should still be down after
* this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_spi_probe(int interface)
{
int num_ports = 0;
if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
cvmx_spi4000_is_present(interface)) {
num_ports = 10;
} else {
union cvmx_pko_reg_crc_enable enable;
num_ports = 16;
/*
* Unlike the SPI4000, most SPI devices don't
* automatically put on the L2 CRC. For everything
* except for the SPI4000 have PKO append the L2 CRC
* to the packet.
*/
enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE);
enable.s.enable |= 0xffff << (interface * 16);
cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64);
}
__cvmx_helper_setup_gmx(interface, num_ports);
return num_ports;
}
/**
* Bringup and enable a SPI interface. After this call packet I/O
* should be fully functional. This is called with IPD enabled but
* PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_spi_enable(int interface)
{
/*
* Normally the ethernet L2 CRC is checked and stripped in the
* GMX block. When you are using SPI, this isn' the case and
* IPD needs to check the L2 CRC.
*/
int num_ports = cvmx_helper_ports_on_interface(interface);
int ipd_port;
for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports;
ipd_port++) {
union cvmx_pip_prt_cfgx port_config;
port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
port_config.s.crc_en = 1;
cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
}
if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX,
CVMX_HELPER_SPI_TIMEOUT, num_ports);
if (cvmx_spi4000_is_present(interface))
cvmx_spi4000_initialize(interface);
}
__cvmx_interrupt_spxx_int_msk_enable(interface);
__cvmx_interrupt_stxx_int_msk_enable(interface);
__cvmx_interrupt_gmxx_enable(interface);
return 0;
}
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
union cvmx_helper_link_info __cvmx_helper_spi_link_get(int ipd_port)
{
union cvmx_helper_link_info result;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
result.u64 = 0;
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
/* The simulator gives you a simulated full duplex link */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 10000;
} else if (cvmx_spi4000_is_present(interface)) {
union cvmx_gmxx_rxx_rx_inbnd inband =
cvmx_spi4000_check_speed(interface, index);
result.s.link_up = inband.s.status;
result.s.full_duplex = inband.s.duplex;
switch (inband.s.speed) {
case 0: /* 10 Mbps */
result.s.speed = 10;
break;
case 1: /* 100 Mbps */
result.s.speed = 100;
break;
case 2: /* 1 Gbps */
result.s.speed = 1000;
break;
case 3: /* Illegal */
result.s.speed = 0;
result.s.link_up = 0;
break;
}
} else {
/* For generic SPI we can't determine the link, just return some
sane results */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 10000;
}
return result;
}
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get().
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_spi_link_set(int ipd_port, union cvmx_helper_link_info link_info)
{
/* Nothing to do. If we have a SPI4000 then the setup was already performed
by cvmx_spi4000_check_speed(). If not then there isn't any link
info */
return 0;
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-helper-spi.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* Helper functions for common, but complicated tasks.
*
*/
#include <linux/bug.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-pip.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-ipd.h>
#include <asm/octeon/cvmx-spi.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
#include <asm/octeon/cvmx-pip-defs.h>
#include <asm/octeon/cvmx-asxx-defs.h>
/* Port count per interface */
static int interface_port_count[9];
/**
* Return the number of interfaces the chip has. Each interface
* may have multiple ports. Most chips support two interfaces,
* but the CNX0XX and CNX1XX are exceptions. These only support
* one interface.
*
* Returns Number of interfaces on chip
*/
int cvmx_helper_get_number_of_interfaces(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
return 9;
if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
return 7;
else
return 8;
}
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
return 4;
if (OCTEON_IS_MODEL(OCTEON_CN7XXX))
return 5;
else
return 3;
}
EXPORT_SYMBOL_GPL(cvmx_helper_get_number_of_interfaces);
/**
* Return the number of ports on an interface. Depending on the
* chip and configuration, this can be 1-16. A value of 0
* specifies that the interface doesn't exist or isn't usable.
*
* @interface: Interface to get the port count for
*
* Returns Number of ports on interface. Can be Zero.
*/
int cvmx_helper_ports_on_interface(int interface)
{
return interface_port_count[interface];
}
EXPORT_SYMBOL_GPL(cvmx_helper_ports_on_interface);
/**
* @INTERNAL
* Return interface mode for CN68xx.
*/
static cvmx_helper_interface_mode_t __cvmx_get_mode_cn68xx(int interface)
{
union cvmx_mio_qlmx_cfg qlm_cfg;
switch (interface) {
case 0:
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
/* QLM is disabled when QLM SPD is 15. */
if (qlm_cfg.s.qlm_spd == 15)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
if (qlm_cfg.s.qlm_cfg == 2)
return CVMX_HELPER_INTERFACE_MODE_SGMII;
else if (qlm_cfg.s.qlm_cfg == 3)
return CVMX_HELPER_INTERFACE_MODE_XAUI;
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
case 2:
case 3:
case 4:
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(interface));
/* QLM is disabled when QLM SPD is 15. */
if (qlm_cfg.s.qlm_spd == 15)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
if (qlm_cfg.s.qlm_cfg == 2)
return CVMX_HELPER_INTERFACE_MODE_SGMII;
else if (qlm_cfg.s.qlm_cfg == 3)
return CVMX_HELPER_INTERFACE_MODE_XAUI;
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
case 7:
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(3));
/* QLM is disabled when QLM SPD is 15. */
if (qlm_cfg.s.qlm_spd == 15) {
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
} else if (qlm_cfg.s.qlm_cfg != 0) {
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
if (qlm_cfg.s.qlm_cfg != 0)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
return CVMX_HELPER_INTERFACE_MODE_NPI;
case 8:
return CVMX_HELPER_INTERFACE_MODE_LOOP;
default:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
}
/**
* @INTERNAL
* Return interface mode for an Octeon II
*/
static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
{
union cvmx_gmxx_inf_mode mode;
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
return __cvmx_get_mode_cn68xx(interface);
if (interface == 2)
return CVMX_HELPER_INTERFACE_MODE_NPI;
if (interface == 3)
return CVMX_HELPER_INTERFACE_MODE_LOOP;
/* Only present in CN63XX & CN66XX Octeon model */
if ((OCTEON_IS_MODEL(OCTEON_CN63XX) &&
(interface == 4 || interface == 5)) ||
(OCTEON_IS_MODEL(OCTEON_CN66XX) &&
interface >= 4 && interface <= 7)) {
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
union cvmx_mio_qlmx_cfg mio_qlm_cfg;
/* QLM2 is SGMII0 and QLM1 is SGMII1 */
if (interface == 0)
mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
else if (interface == 1)
mio_qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
if (mio_qlm_cfg.s.qlm_spd == 15)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
if (mio_qlm_cfg.s.qlm_cfg == 9)
return CVMX_HELPER_INTERFACE_MODE_SGMII;
else if (mio_qlm_cfg.s.qlm_cfg == 11)
return CVMX_HELPER_INTERFACE_MODE_XAUI;
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
} else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
union cvmx_mio_qlmx_cfg qlm_cfg;
if (interface == 0) {
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(2));
if (qlm_cfg.s.qlm_cfg == 2)
return CVMX_HELPER_INTERFACE_MODE_SGMII;
else if (qlm_cfg.s.qlm_cfg == 3)
return CVMX_HELPER_INTERFACE_MODE_XAUI;
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
} else if (interface == 1) {
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
if (qlm_cfg.s.qlm_cfg == 2)
return CVMX_HELPER_INTERFACE_MODE_SGMII;
else if (qlm_cfg.s.qlm_cfg == 3)
return CVMX_HELPER_INTERFACE_MODE_XAUI;
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
} else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
if (interface == 0) {
union cvmx_mio_qlmx_cfg qlm_cfg;
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(0));
if (qlm_cfg.s.qlm_cfg == 2)
return CVMX_HELPER_INTERFACE_MODE_SGMII;
}
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
if (interface == 1 && OCTEON_IS_MODEL(OCTEON_CN63XX))
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
switch (mode.cn61xx.mode) {
case 0:
return CVMX_HELPER_INTERFACE_MODE_SGMII;
case 1:
return CVMX_HELPER_INTERFACE_MODE_XAUI;
default:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
} else {
if (!mode.s.en)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
if (mode.s.type)
return CVMX_HELPER_INTERFACE_MODE_GMII;
else
return CVMX_HELPER_INTERFACE_MODE_RGMII;
}
}
/**
* @INTERNAL
* Return interface mode for CN7XXX.
*/
static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface)
{
union cvmx_gmxx_inf_mode mode;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
switch (interface) {
case 0:
case 1:
switch (mode.cn68xx.mode) {
case 0:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
case 1:
case 2:
return CVMX_HELPER_INTERFACE_MODE_SGMII;
case 3:
return CVMX_HELPER_INTERFACE_MODE_XAUI;
default:
return CVMX_HELPER_INTERFACE_MODE_SGMII;
}
case 2:
return CVMX_HELPER_INTERFACE_MODE_NPI;
case 3:
return CVMX_HELPER_INTERFACE_MODE_LOOP;
case 4:
/* TODO: Implement support for AGL (RGMII). */
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
default:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
}
/**
* Get the operating mode of an interface. Depending on the Octeon
* chip and configuration, this function returns an enumeration
* of the type of packet I/O supported by an interface.
*
* @interface: Interface to probe
*
* Returns Mode of the interface. Unknown or unsupported interfaces return
* DISABLED.
*/
cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
{
union cvmx_gmxx_inf_mode mode;
if (interface < 0 ||
interface >= cvmx_helper_get_number_of_interfaces())
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
/*
* OCTEON III models
*/
if (OCTEON_IS_MODEL(OCTEON_CN7XXX))
return __cvmx_get_mode_cn7xxx(interface);
/*
* Octeon II models
*/
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
return __cvmx_get_mode_octeon2(interface);
/*
* Octeon and Octeon Plus models
*/
if (interface == 2)
return CVMX_HELPER_INTERFACE_MODE_NPI;
if (interface == 3) {
if (OCTEON_IS_MODEL(OCTEON_CN56XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX))
return CVMX_HELPER_INTERFACE_MODE_LOOP;
else
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
/* Interface 1 is always disabled on CN31XX and CN30XX */
if ((interface == 1)
&& (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX)))
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
switch (mode.cn52xx.mode) {
case 0:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
case 1:
return CVMX_HELPER_INTERFACE_MODE_XAUI;
case 2:
return CVMX_HELPER_INTERFACE_MODE_SGMII;
case 3:
return CVMX_HELPER_INTERFACE_MODE_PICMG;
default:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
} else {
if (!mode.s.en)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
if (mode.s.type) {
if (OCTEON_IS_MODEL(OCTEON_CN38XX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX))
return CVMX_HELPER_INTERFACE_MODE_SPI;
else
return CVMX_HELPER_INTERFACE_MODE_GMII;
} else
return CVMX_HELPER_INTERFACE_MODE_RGMII;
}
}
EXPORT_SYMBOL_GPL(cvmx_helper_interface_get_mode);
/**
* Configure the IPD/PIP tagging and QoS options for a specific
* port. This function determines the POW work queue entry
* contents for a port. The setup performed here is controlled by
* the defines in executive-config.h.
*
* @ipd_port: Port to configure. This follows the IPD numbering, not the
* per interface numbering
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_port_setup_ipd(int ipd_port)
{
union cvmx_pip_prt_cfgx port_config;
union cvmx_pip_prt_tagx tag_config;
port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(ipd_port));
/* Have each port go to a different POW queue */
port_config.s.qos = ipd_port & 0x7;
/* Process the headers and place the IP header in the work queue */
port_config.s.mode = CVMX_HELPER_INPUT_PORT_SKIP_MODE;
tag_config.s.ip6_src_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP;
tag_config.s.ip6_dst_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_IP;
tag_config.s.ip6_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT;
tag_config.s.ip6_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT;
tag_config.s.ip6_nxth_flag = CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER;
tag_config.s.ip4_src_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP;
tag_config.s.ip4_dst_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_IP;
tag_config.s.ip4_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT;
tag_config.s.ip4_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT;
tag_config.s.ip4_pctl_flag = CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL;
tag_config.s.inc_prt_flag = CVMX_HELPER_INPUT_TAG_INPUT_PORT;
tag_config.s.tcp6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
tag_config.s.tcp4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
tag_config.s.ip6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
tag_config.s.ip4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
tag_config.s.non_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
/* Put all packets in group 0. Other groups can be used by the app */
tag_config.s.grp = 0;
cvmx_pip_config_port(ipd_port, port_config, tag_config);
return 0;
}
/**
* This function sets the interface_port_count[interface] correctly,
* without modifying any hardware configuration. Hardware setup of
* the ports will be performed later.
*
* @interface: Interface to probe
*
* Returns Zero on success, negative on failure
*/
int cvmx_helper_interface_enumerate(int interface)
{
switch (cvmx_helper_interface_get_mode(interface)) {
/* These types don't support ports to IPD/PKO */
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
interface_port_count[interface] = 0;
break;
/* XAUI is a single high speed port */
case CVMX_HELPER_INTERFACE_MODE_XAUI:
interface_port_count[interface] =
__cvmx_helper_xaui_enumerate(interface);
break;
/*
* RGMII/GMII/MII are all treated about the same. Most
* functions refer to these ports as RGMII.
*/
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
interface_port_count[interface] =
__cvmx_helper_rgmii_enumerate(interface);
break;
/*
* SPI4 can have 1-16 ports depending on the device at
* the other end.
*/
case CVMX_HELPER_INTERFACE_MODE_SPI:
interface_port_count[interface] =
__cvmx_helper_spi_enumerate(interface);
break;
/*
* SGMII can have 1-4 ports depending on how many are
* hooked up.
*/
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
interface_port_count[interface] =
__cvmx_helper_sgmii_enumerate(interface);
break;
/* PCI target Network Packet Interface */
case CVMX_HELPER_INTERFACE_MODE_NPI:
interface_port_count[interface] =
__cvmx_helper_npi_enumerate(interface);
break;
/*
* Special loopback only ports. These are not the same
* as other ports in loopback mode.
*/
case CVMX_HELPER_INTERFACE_MODE_LOOP:
interface_port_count[interface] =
__cvmx_helper_loop_enumerate(interface);
break;
}
interface_port_count[interface] =
__cvmx_helper_board_interface_probe(interface,
interface_port_count
[interface]);
/* Make sure all global variables propagate to other cores */
CVMX_SYNCWS;
return 0;
}
/**
* This function probes an interface to determine the actual
* number of hardware ports connected to it. It doesn't setup the
* ports or enable them. The main goal here is to set the global
* interface_port_count[interface] correctly. Hardware setup of the
* ports will be performed later.
*
* @interface: Interface to probe
*
* Returns Zero on success, negative on failure
*/
int cvmx_helper_interface_probe(int interface)
{
cvmx_helper_interface_enumerate(interface);
/* At this stage in the game we don't want packets to be moving yet.
The following probe calls should perform hardware setup
needed to determine port counts. Receive must still be disabled */
switch (cvmx_helper_interface_get_mode(interface)) {
/* These types don't support ports to IPD/PKO */
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
break;
/* XAUI is a single high speed port */
case CVMX_HELPER_INTERFACE_MODE_XAUI:
__cvmx_helper_xaui_probe(interface);
break;
/*
* RGMII/GMII/MII are all treated about the same. Most
* functions refer to these ports as RGMII.
*/
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
__cvmx_helper_rgmii_probe(interface);
break;
/*
* SPI4 can have 1-16 ports depending on the device at
* the other end.
*/
case CVMX_HELPER_INTERFACE_MODE_SPI:
__cvmx_helper_spi_probe(interface);
break;
/*
* SGMII can have 1-4 ports depending on how many are
* hooked up.
*/
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
__cvmx_helper_sgmii_probe(interface);
break;
/* PCI target Network Packet Interface */
case CVMX_HELPER_INTERFACE_MODE_NPI:
__cvmx_helper_npi_probe(interface);
break;
/*
* Special loopback only ports. These are not the same
* as other ports in loopback mode.
*/
case CVMX_HELPER_INTERFACE_MODE_LOOP:
__cvmx_helper_loop_probe(interface);
break;
}
/* Make sure all global variables propagate to other cores */
CVMX_SYNCWS;
return 0;
}
/**
* Setup the IPD/PIP for the ports on an interface. Packet
* classification and tagging are set for every port on the
* interface. The number of ports on the interface must already
* have been probed.
*
* @interface: Interface to setup IPD/PIP for
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_interface_setup_ipd(int interface)
{
int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
int num_ports = interface_port_count[interface];
while (num_ports--) {
__cvmx_helper_port_setup_ipd(ipd_port);
ipd_port++;
}
return 0;
}
/**
* Setup global setting for IPD/PIP not related to a specific
* interface or port. This must be called before IPD is enabled.
*
* Returns Zero on success, negative on failure.
*/
static int __cvmx_helper_global_setup_ipd(void)
{
/* Setup the global packet input options */
cvmx_ipd_config(CVMX_FPA_PACKET_POOL_SIZE / 8,
CVMX_HELPER_FIRST_MBUFF_SKIP / 8,
CVMX_HELPER_NOT_FIRST_MBUFF_SKIP / 8,
/* The +8 is to account for the next ptr */
(CVMX_HELPER_FIRST_MBUFF_SKIP + 8) / 128,
/* The +8 is to account for the next ptr */
(CVMX_HELPER_NOT_FIRST_MBUFF_SKIP + 8) / 128,
CVMX_FPA_WQE_POOL,
CVMX_IPD_OPC_MODE_STT,
CVMX_HELPER_ENABLE_BACK_PRESSURE);
return 0;
}
/**
* Setup the PKO for the ports on an interface. The number of
* queues per port and the priority of each PKO output queue
* is set here. PKO must be disabled when this function is called.
*
* @interface: Interface to setup PKO for
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_interface_setup_pko(int interface)
{
/*
* Each packet output queue has an associated priority. The
* higher the priority, the more often it can send a packet. A
* priority of 8 means it can send in all 8 rounds of
* contention. We're going to make each queue one less than
* the last. The vector of priorities has been extended to
* support CN5xxx CPUs, where up to 16 queues can be
* associated to a port. To keep backward compatibility we
* don't change the initial 8 priorities and replicate them in
* the second half. With per-core PKO queues (PKO lockless
* operation) all queues have the same priority.
*/
uint64_t priorities[16] =
{ 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 };
/*
* Setup the IPD/PIP and PKO for the ports discovered
* above. Here packet classification, tagging and output
* priorities are set.
*/
int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
int num_ports = interface_port_count[interface];
while (num_ports--) {
cvmx_pko_config_port(ipd_port,
cvmx_pko_get_base_queue_per_core(ipd_port,
0),
cvmx_pko_get_num_queues(ipd_port),
priorities);
ipd_port++;
}
return 0;
}
/**
* Setup global setting for PKO not related to a specific
* interface or port. This must be called before PKO is enabled.
*
* Returns Zero on success, negative on failure.
*/
static int __cvmx_helper_global_setup_pko(void)
{
/*
* Disable tagwait FAU timeout. This needs to be done before
* anyone might start packet output using tags.
*/
union cvmx_iob_fau_timeout fau_to;
fau_to.u64 = 0;
fau_to.s.tout_val = 0xfff;
fau_to.s.tout_enb = 0;
cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
union cvmx_pko_reg_min_pkt min_pkt;
min_pkt.u64 = 0;
min_pkt.s.size1 = 59;
min_pkt.s.size2 = 59;
min_pkt.s.size3 = 59;
min_pkt.s.size4 = 59;
min_pkt.s.size5 = 59;
min_pkt.s.size6 = 59;
min_pkt.s.size7 = 59;
cvmx_write_csr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64);
}
return 0;
}
/**
* Setup global backpressure setting.
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_global_setup_backpressure(void)
{
#if CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE
/* Disable backpressure if configured to do so */
/* Disable backpressure (pause frame) generation */
int num_interfaces = cvmx_helper_get_number_of_interfaces();
int interface;
for (interface = 0; interface < num_interfaces; interface++) {
switch (cvmx_helper_interface_get_mode(interface)) {
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
case CVMX_HELPER_INTERFACE_MODE_NPI:
case CVMX_HELPER_INTERFACE_MODE_LOOP:
case CVMX_HELPER_INTERFACE_MODE_XAUI:
break;
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
case CVMX_HELPER_INTERFACE_MODE_SPI:
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
cvmx_gmx_set_backpressure_override(interface, 0xf);
break;
}
}
#endif
return 0;
}
/**
* Enable packet input/output from the hardware. This function is
* called after all internal setup is complete and IPD is enabled.
* After this function completes, packets will be accepted from the
* hardware ports. PKO should still be disabled to make sure packets
* aren't sent out partially setup hardware.
*
* @interface: Interface to enable
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_packet_hardware_enable(int interface)
{
int result = 0;
switch (cvmx_helper_interface_get_mode(interface)) {
/* These types don't support ports to IPD/PKO */
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
/* Nothing to do */
break;
/* XAUI is a single high speed port */
case CVMX_HELPER_INTERFACE_MODE_XAUI:
result = __cvmx_helper_xaui_enable(interface);
break;
/*
* RGMII/GMII/MII are all treated about the same. Most
* functions refer to these ports as RGMII
*/
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
result = __cvmx_helper_rgmii_enable(interface);
break;
/*
* SPI4 can have 1-16 ports depending on the device at
* the other end
*/
case CVMX_HELPER_INTERFACE_MODE_SPI:
result = __cvmx_helper_spi_enable(interface);
break;
/*
* SGMII can have 1-4 ports depending on how many are
* hooked up
*/
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
result = __cvmx_helper_sgmii_enable(interface);
break;
/* PCI target Network Packet Interface */
case CVMX_HELPER_INTERFACE_MODE_NPI:
result = __cvmx_helper_npi_enable(interface);
break;
/*
* Special loopback only ports. These are not the same
* as other ports in loopback mode
*/
case CVMX_HELPER_INTERFACE_MODE_LOOP:
result = __cvmx_helper_loop_enable(interface);
break;
}
return result;
}
/**
* Function to adjust internal IPD pointer alignments
*
* Returns 0 on success
* !0 on failure
*/
static int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
{
#define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES \
(CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP)
#define FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES \
(CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_NOT_FIRST_MBUFF_SKIP)
#define FIX_IPD_OUTPORT 0
/* Ports 0-15 are interface 0, 16-31 are interface 1 */
#define INTERFACE(port) (port >> 4)
#define INDEX(port) (port & 0xf)
uint64_t *p64;
union cvmx_pko_command_word0 pko_command;
union cvmx_buf_ptr g_buffer, pkt_buffer;
struct cvmx_wqe *work;
int size, num_segs = 0, wqe_pcnt, pkt_pcnt;
union cvmx_gmxx_prtx_cfg gmx_cfg;
int retry_cnt;
int retry_loop_cnt;
int i;
/* Save values for restore at end */
uint64_t prtx_cfg =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
uint64_t tx_ptr_en =
cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
uint64_t rx_ptr_en =
cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
uint64_t rxx_jabber =
cvmx_read_csr(CVMX_GMXX_RXX_JABBER
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
uint64_t frame_max =
cvmx_read_csr(CVMX_GMXX_RXX_FRM_MAX
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
/* Configure port to gig FDX as required for loopback mode */
cvmx_helper_rgmii_internal_loopback(FIX_IPD_OUTPORT);
/*
* Disable reception on all ports so if traffic is present it
* will not interfere.
*/
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0);
__delay(100000000ull);
for (retry_loop_cnt = 0; retry_loop_cnt < 10; retry_loop_cnt++) {
retry_cnt = 100000;
wqe_pcnt = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
pkt_pcnt = (wqe_pcnt >> 7) & 0x7f;
wqe_pcnt &= 0x7f;
num_segs = (2 + pkt_pcnt - wqe_pcnt) & 3;
if (num_segs == 0)
goto fix_ipd_exit;
num_segs += 1;
size =
FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES +
((num_segs - 1) * FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES) -
(FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES / 2);
cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)),
1 << INDEX(FIX_IPD_OUTPORT));
CVMX_SYNC;
g_buffer.u64 = 0;
g_buffer.s.addr =
cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_WQE_POOL));
if (g_buffer.s.addr == 0) {
cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
"buffer allocation failure.\n");
goto fix_ipd_exit;
}
g_buffer.s.pool = CVMX_FPA_WQE_POOL;
g_buffer.s.size = num_segs;
pkt_buffer.u64 = 0;
pkt_buffer.s.addr =
cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL));
if (pkt_buffer.s.addr == 0) {
cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
"buffer allocation failure.\n");
goto fix_ipd_exit;
}
pkt_buffer.s.i = 1;
pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL;
pkt_buffer.s.size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES;
p64 = (uint64_t *) cvmx_phys_to_ptr(pkt_buffer.s.addr);
p64[0] = 0xffffffffffff0000ull;
p64[1] = 0x08004510ull;
p64[2] = ((uint64_t) (size - 14) << 48) | 0x5ae740004000ull;
p64[3] = 0x3a5fc0a81073c0a8ull;
for (i = 0; i < num_segs; i++) {
if (i > 0)
pkt_buffer.s.size =
FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES;
if (i == (num_segs - 1))
pkt_buffer.s.i = 0;
*(uint64_t *) cvmx_phys_to_ptr(g_buffer.s.addr +
8 * i) = pkt_buffer.u64;
}
/* Build the PKO command */
pko_command.u64 = 0;
pko_command.s.segs = num_segs;
pko_command.s.total_bytes = size;
pko_command.s.dontfree = 0;
pko_command.s.gather = 1;
gmx_cfg.u64 =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG
(INDEX(FIX_IPD_OUTPORT),
INTERFACE(FIX_IPD_OUTPORT)));
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG
(INDEX(FIX_IPD_OUTPORT),
INTERFACE(FIX_IPD_OUTPORT)), gmx_cfg.u64);
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
1 << INDEX(FIX_IPD_OUTPORT));
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
1 << INDEX(FIX_IPD_OUTPORT));
cvmx_write_csr(CVMX_GMXX_RXX_JABBER
(INDEX(FIX_IPD_OUTPORT),
INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
(INDEX(FIX_IPD_OUTPORT),
INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
cvmx_pko_send_packet_prepare(FIX_IPD_OUTPORT,
cvmx_pko_get_base_queue
(FIX_IPD_OUTPORT),
CVMX_PKO_LOCK_CMD_QUEUE);
cvmx_pko_send_packet_finish(FIX_IPD_OUTPORT,
cvmx_pko_get_base_queue
(FIX_IPD_OUTPORT), pko_command,
g_buffer, CVMX_PKO_LOCK_CMD_QUEUE);
CVMX_SYNC;
do {
work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
retry_cnt--;
} while ((work == NULL) && (retry_cnt > 0));
if (!retry_cnt)
cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
"get_work() timeout occurred.\n");
/* Free packet */
if (work)
cvmx_helper_free_packet_data(work);
}
fix_ipd_exit:
/* Return CSR configs to saved values */
cvmx_write_csr(CVMX_GMXX_PRTX_CFG
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
prtx_cfg);
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
tx_ptr_en);
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
rx_ptr_en);
cvmx_write_csr(CVMX_GMXX_RXX_JABBER
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
rxx_jabber);
cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
frame_max);
cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0);
CVMX_SYNC;
if (num_segs)
cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT failed.\n");
return !!num_segs;
}
/**
* Called after all internal packet IO paths are setup. This
* function enables IPD/PIP and begins packet input and output.
*
* Returns Zero on success, negative on failure
*/
int cvmx_helper_ipd_and_packet_input_enable(void)
{
int num_interfaces;
int interface;
/* Enable IPD */
cvmx_ipd_enable();
/*
* Time to enable hardware ports packet input and output. Note
* that at this point IPD/PIP must be fully functional and PKO
* must be disabled
*/
num_interfaces = cvmx_helper_get_number_of_interfaces();
for (interface = 0; interface < num_interfaces; interface++) {
if (cvmx_helper_ports_on_interface(interface) > 0)
__cvmx_helper_packet_hardware_enable(interface);
}
/* Finally enable PKO now that the entire path is up and running */
cvmx_pko_enable();
if ((OCTEON_IS_MODEL(OCTEON_CN31XX_PASS1)
|| OCTEON_IS_MODEL(OCTEON_CN30XX_PASS1))
&& (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM))
__cvmx_helper_errata_fix_ipd_ptr_alignment();
return 0;
}
EXPORT_SYMBOL_GPL(cvmx_helper_ipd_and_packet_input_enable);
/**
* Initialize the PIP, IPD, and PKO hardware to support
* simple priority based queues for the ethernet ports. Each
* port is configured with a number of priority queues based
* on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
* priority than the previous.
*
* Returns Zero on success, non-zero on failure
*/
int cvmx_helper_initialize_packet_io_global(void)
{
int result = 0;
int interface;
union cvmx_l2c_cfg l2c_cfg;
const int num_interfaces = cvmx_helper_get_number_of_interfaces();
/*
* CN52XX pass 1: Due to a bug in 2nd order CDR, it needs to
* be disabled.
*/
if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
__cvmx_helper_errata_qlm_disable_2nd_order_cdr(1);
/*
* Tell L2 to give the IOB statically higher priority compared
* to the cores. This avoids conditions where IO blocks might
* be starved under very high L2 loads.
*/
l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
l2c_cfg.s.lrf_arb_mode = 0;
l2c_cfg.s.rfb_arb_mode = 0;
cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64);
cvmx_pko_initialize_global();
for (interface = 0; interface < num_interfaces; interface++) {
result |= cvmx_helper_interface_probe(interface);
if (cvmx_helper_ports_on_interface(interface) > 0)
cvmx_dprintf("Interface %d has %d ports (%s)\n",
interface,
cvmx_helper_ports_on_interface(interface),
cvmx_helper_interface_mode_to_string
(cvmx_helper_interface_get_mode
(interface)));
result |= __cvmx_helper_interface_setup_ipd(interface);
result |= __cvmx_helper_interface_setup_pko(interface);
}
result |= __cvmx_helper_global_setup_ipd();
result |= __cvmx_helper_global_setup_pko();
/* Enable any flow control and backpressure */
result |= __cvmx_helper_global_setup_backpressure();
#if CVMX_HELPER_ENABLE_IPD
result |= cvmx_helper_ipd_and_packet_input_enable();
#endif
return result;
}
EXPORT_SYMBOL_GPL(cvmx_helper_initialize_packet_io_global);
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port)
{
union cvmx_helper_link_info result;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
/* The default result will be a down link unless the code below
changes it */
result.u64 = 0;
if (index >= cvmx_helper_ports_on_interface(interface))
return result;
switch (cvmx_helper_interface_get_mode(interface)) {
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
/* Network links are not supported */
break;
case CVMX_HELPER_INTERFACE_MODE_XAUI:
result = __cvmx_helper_xaui_link_get(ipd_port);
break;
case CVMX_HELPER_INTERFACE_MODE_GMII:
if (index == 0)
result = __cvmx_helper_rgmii_link_get(ipd_port);
else {
WARN_ONCE(1, "Using deprecated link status - please update your DT");
result.s.full_duplex = 1;
result.s.link_up = 1;
result.s.speed = 1000;
}
break;
case CVMX_HELPER_INTERFACE_MODE_RGMII:
result = __cvmx_helper_rgmii_link_get(ipd_port);
break;
case CVMX_HELPER_INTERFACE_MODE_SPI:
result = __cvmx_helper_spi_link_get(ipd_port);
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
result = __cvmx_helper_sgmii_link_get(ipd_port);
break;
case CVMX_HELPER_INTERFACE_MODE_NPI:
case CVMX_HELPER_INTERFACE_MODE_LOOP:
/* Network links are not supported */
break;
}
return result;
}
EXPORT_SYMBOL_GPL(cvmx_helper_link_get);
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get().
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
int cvmx_helper_link_set(int ipd_port, union cvmx_helper_link_info link_info)
{
int result = -1;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
if (index >= cvmx_helper_ports_on_interface(interface))
return -1;
switch (cvmx_helper_interface_get_mode(interface)) {
case CVMX_HELPER_INTERFACE_MODE_DISABLED:
case CVMX_HELPER_INTERFACE_MODE_PCIE:
break;
case CVMX_HELPER_INTERFACE_MODE_XAUI:
result = __cvmx_helper_xaui_link_set(ipd_port, link_info);
break;
/*
* RGMII/GMII/MII are all treated about the same. Most
* functions refer to these ports as RGMII.
*/
case CVMX_HELPER_INTERFACE_MODE_RGMII:
case CVMX_HELPER_INTERFACE_MODE_GMII:
result = __cvmx_helper_rgmii_link_set(ipd_port, link_info);
break;
case CVMX_HELPER_INTERFACE_MODE_SPI:
result = __cvmx_helper_spi_link_set(ipd_port, link_info);
break;
case CVMX_HELPER_INTERFACE_MODE_SGMII:
case CVMX_HELPER_INTERFACE_MODE_PICMG:
result = __cvmx_helper_sgmii_link_set(ipd_port, link_info);
break;
case CVMX_HELPER_INTERFACE_MODE_NPI:
case CVMX_HELPER_INTERFACE_MODE_LOOP:
break;
}
return result;
}
EXPORT_SYMBOL_GPL(cvmx_helper_link_set);
| linux-master | arch/mips/cavium-octeon/executive/cvmx-helper.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* Helper functions to abstract board specific data about
* network ports from the rest of the cvmx-helper files.
*/
#include <linux/bug.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-bootinfo.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-util.h>
#include <asm/octeon/cvmx-helper-board.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-asxx-defs.h>
/*
* Return the MII PHY address associated with the given IPD
* port. A result of -1 means there isn't a MII capable PHY
* connected to this port. On chips supporting multiple MII
* busses the bus number is encoded in bits <15:8>.
*
* This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
* data to determine board types and revisions. It replies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
* @ipd_port: Octeon IPD port to get the MII address for.
*
* Returns MII PHY address and bus number or -1.
*/
int cvmx_helper_board_get_mii_address(int ipd_port)
{
switch (cvmx_sysinfo_get()->board_type) {
case CVMX_BOARD_TYPE_SIM:
/* Simulator doesn't have MII */
return -1;
case CVMX_BOARD_TYPE_EBT3000:
case CVMX_BOARD_TYPE_EBT5800:
case CVMX_BOARD_TYPE_THUNDER:
case CVMX_BOARD_TYPE_NICPRO2:
/* Interface 0 is SPI4, interface 1 is RGMII */
if ((ipd_port >= 16) && (ipd_port < 20))
return ipd_port - 16;
else
return -1;
case CVMX_BOARD_TYPE_KODAMA:
case CVMX_BOARD_TYPE_EBH3100:
case CVMX_BOARD_TYPE_HIKARI:
case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
/*
* Port 0 is WAN connected to a PHY, Port 1 is GMII
* connected to a switch
*/
if (ipd_port == 0)
return 4;
else if (ipd_port == 1)
return 9;
else
return -1;
case CVMX_BOARD_TYPE_NAC38:
/* Board has 8 RGMII ports PHYs are 0-7 */
if ((ipd_port >= 0) && (ipd_port < 4))
return ipd_port;
else if ((ipd_port >= 16) && (ipd_port < 20))
return ipd_port - 16 + 4;
else
return -1;
case CVMX_BOARD_TYPE_EBH3000:
/* Board has dual SPI4 and no PHYs */
return -1;
case CVMX_BOARD_TYPE_EBH5200:
case CVMX_BOARD_TYPE_EBH5201:
case CVMX_BOARD_TYPE_EBT5200:
/* Board has 2 management ports */
if ((ipd_port >= CVMX_HELPER_BOARD_MGMT_IPD_PORT) &&
(ipd_port < (CVMX_HELPER_BOARD_MGMT_IPD_PORT + 2)))
return ipd_port - CVMX_HELPER_BOARD_MGMT_IPD_PORT;
/*
* Board has 4 SGMII ports. The PHYs start right after the MII
* ports MII0 = 0, MII1 = 1, SGMII = 2-5.
*/
if ((ipd_port >= 0) && (ipd_port < 4))
return ipd_port + 2;
else
return -1;
case CVMX_BOARD_TYPE_EBH5600:
case CVMX_BOARD_TYPE_EBH5601:
case CVMX_BOARD_TYPE_EBH5610:
/* Board has 1 management port */
if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
return 0;
/*
* Board has 8 SGMII ports. 4 connect out, two connect
* to a switch, and 2 loop to each other
*/
if ((ipd_port >= 0) && (ipd_port < 4))
return ipd_port + 1;
else
return -1;
case CVMX_BOARD_TYPE_CUST_NB5:
if (ipd_port == 2)
return 4;
else
return -1;
case CVMX_BOARD_TYPE_NIC_XLE_4G:
/* Board has 4 SGMII ports. connected QLM3(interface 1) */
if ((ipd_port >= 16) && (ipd_port < 20))
return ipd_port - 16 + 1;
else
return -1;
case CVMX_BOARD_TYPE_NIC_XLE_10G:
case CVMX_BOARD_TYPE_NIC10E:
return -1;
case CVMX_BOARD_TYPE_NIC4E:
if (ipd_port >= 0 && ipd_port <= 3)
return (ipd_port + 0x1f) & 0x1f;
else
return -1;
case CVMX_BOARD_TYPE_NIC2E:
if (ipd_port >= 0 && ipd_port <= 1)
return ipd_port + 1;
else
return -1;
case CVMX_BOARD_TYPE_BBGW_REF:
/*
* No PHYs are connected to Octeon, everything is
* through switch.
*/
return -1;
case CVMX_BOARD_TYPE_CUST_WSX16:
if (ipd_port >= 0 && ipd_port <= 3)
return ipd_port;
else if (ipd_port >= 16 && ipd_port <= 19)
return ipd_port - 16 + 4;
else
return -1;
case CVMX_BOARD_TYPE_UBNT_E100:
if (ipd_port >= 0 && ipd_port <= 2)
return 7 - ipd_port;
else
return -1;
case CVMX_BOARD_TYPE_KONTRON_S1901:
if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
return 1;
else
return -1;
}
/* Some unknown board. Somebody forgot to update this function... */
cvmx_dprintf
("cvmx_helper_board_get_mii_address: Unknown board type %d\n",
cvmx_sysinfo_get()->board_type);
return -1;
}
/*
* This function is the board specific method of determining an
* ethernet ports link speed. Most Octeon boards have Marvell PHYs
* and are handled by the fall through case. This function must be
* updated for boards that don't have the normal Marvell PHYs.
*
* This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
* data to determine board types and revisions. It relies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
* @ipd_port: IPD input port associated with the port we want to get link
* status for.
*
* Returns The ports link status. If the link isn't fully resolved, this must
* return zero.
*/
union cvmx_helper_link_info __cvmx_helper_board_link_get(int ipd_port)
{
union cvmx_helper_link_info result;
WARN_ONCE(!octeon_is_simulation(),
"Using deprecated link status - please update your DT");
/* Unless we fix it later, all links are defaulted to down */
result.u64 = 0;
if (octeon_is_simulation()) {
/* The simulator gives you a simulated 1Gbps full duplex link */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 1000;
return result;
}
if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX)) {
/*
* We don't have a PHY address, so attempt to use
* in-band status. It is really important that boards
* not supporting in-band status never get
* here. Reading broken in-band status tends to do bad
* things
*/
union cvmx_gmxx_rxx_rx_inbnd inband_status;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
inband_status.u64 =
cvmx_read_csr(CVMX_GMXX_RXX_RX_INBND(index, interface));
result.s.link_up = inband_status.s.status;
result.s.full_duplex = inband_status.s.duplex;
switch (inband_status.s.speed) {
case 0: /* 10 Mbps */
result.s.speed = 10;
break;
case 1: /* 100 Mbps */
result.s.speed = 100;
break;
case 2: /* 1 Gbps */
result.s.speed = 1000;
break;
case 3: /* Illegal */
result.u64 = 0;
break;
}
} else {
/*
* We don't have a PHY address and we don't have
* in-band status. There is no way to determine the
* link speed. Return down assuming this port isn't
* wired
*/
result.u64 = 0;
}
/* If link is down, return all fields as zero. */
if (!result.s.link_up)
result.u64 = 0;
return result;
}
/*
* This function is called by cvmx_helper_interface_probe() after it
* determines the number of ports Octeon can support on a specific
* interface. This function is the per board location to override
* this value. It is called with the number of ports Octeon might
* support and should return the number of actual ports on the
* board.
*
* This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
* data to determine board types and revisions. It relies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
* @interface: Interface to probe
* @supported_ports:
* Number of ports Octeon supports.
*
* Returns Number of ports the actual board supports. Many times this will
* simple be "support_ports".
*/
int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
{
switch (cvmx_sysinfo_get()->board_type) {
case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
if (interface == 0)
return 2;
break;
case CVMX_BOARD_TYPE_BBGW_REF:
if (interface == 0)
return 2;
break;
case CVMX_BOARD_TYPE_NIC_XLE_4G:
if (interface == 0)
return 0;
break;
/* The 2nd interface on the EBH5600 is connected to the Marvel switch,
which we don't support. Disable ports connected to it */
case CVMX_BOARD_TYPE_EBH5600:
if (interface == 1)
return 0;
break;
}
return supported_ports;
}
/*
* Get the clock type used for the USB block based on board type.
* Used by the USB code for auto configuration of clock type.
*
* Return USB clock type enumeration
*/
enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void)
{
switch (cvmx_sysinfo_get()->board_type) {
case CVMX_BOARD_TYPE_BBGW_REF:
case CVMX_BOARD_TYPE_LANAI2_A:
case CVMX_BOARD_TYPE_LANAI2_U:
case CVMX_BOARD_TYPE_LANAI2_G:
case CVMX_BOARD_TYPE_NIC10E_66:
case CVMX_BOARD_TYPE_UBNT_E100:
return USB_CLOCK_TYPE_CRYSTAL_12;
case CVMX_BOARD_TYPE_NIC10E:
return USB_CLOCK_TYPE_REF_12;
default:
break;
}
/* Most boards except NIC10e use a 12MHz crystal */
if (OCTEON_IS_OCTEON2())
return USB_CLOCK_TYPE_CRYSTAL_12;
return USB_CLOCK_TYPE_REF_48;
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-helper-board.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Functions for SGMII initialization, configuration,
* and monitoring.
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-pcsx-defs.h>
#include <asm/octeon/cvmx-pcsxx-defs.h>
/**
* Perform initialization required only once for an SGMII port.
*
* @interface: Interface to init
* @index: Index of prot on the interface
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
{
const uint64_t clock_mhz = cvmx_sysinfo_get()->cpu_clock_hz / 1000000;
union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg;
union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
/* Disable GMX */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmxx_prtx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/*
* Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
* appropriate value. 1000BASE-X specifies a 10ms
* interval. SGMII specifies a 1.6ms interval.
*/
pcs_misc_ctl_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
pcsx_linkx_timer_count_reg.u64 =
cvmx_read_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
if (pcs_misc_ctl_reg.s.mode) {
/* 1000BASE-X */
pcsx_linkx_timer_count_reg.s.count =
(10000ull * clock_mhz) >> 10;
} else {
/* SGMII */
pcsx_linkx_timer_count_reg.s.count =
(1600ull * clock_mhz) >> 10;
}
cvmx_write_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface),
pcsx_linkx_timer_count_reg.u64);
/*
* Write the advertisement register to be used as the
* tx_Config_Reg<D15:D0> of the autonegotiation. In
* 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
* In SGMII PHY mode, tx_Config_Reg<D15:D0> is
* PCS*_SGM*_AN_ADV_REG. In SGMII MAC mode,
* tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
* step can be skipped.
*/
if (pcs_misc_ctl_reg.s.mode) {
/* 1000BASE-X */
union cvmx_pcsx_anx_adv_reg pcsx_anx_adv_reg;
pcsx_anx_adv_reg.u64 =
cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
pcsx_anx_adv_reg.s.rem_flt = 0;
pcsx_anx_adv_reg.s.pause = 3;
pcsx_anx_adv_reg.s.hfd = 1;
pcsx_anx_adv_reg.s.fd = 1;
cvmx_write_csr(CVMX_PCSX_ANX_ADV_REG(index, interface),
pcsx_anx_adv_reg.u64);
} else {
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
pcsx_miscx_ctl_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
if (pcsx_miscx_ctl_reg.s.mac_phy) {
/* PHY Mode */
union cvmx_pcsx_sgmx_an_adv_reg pcsx_sgmx_an_adv_reg;
pcsx_sgmx_an_adv_reg.u64 =
cvmx_read_csr(CVMX_PCSX_SGMX_AN_ADV_REG
(index, interface));
pcsx_sgmx_an_adv_reg.s.link = 1;
pcsx_sgmx_an_adv_reg.s.dup = 1;
pcsx_sgmx_an_adv_reg.s.speed = 2;
cvmx_write_csr(CVMX_PCSX_SGMX_AN_ADV_REG
(index, interface),
pcsx_sgmx_an_adv_reg.u64);
} else {
/* MAC Mode - Nothing to do */
}
}
return 0;
}
/**
* Initialize the SERTES link for the first time or after a loss
* of link.
*
* @interface: Interface to init
* @index: Index of prot on the interface
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
{
union cvmx_pcsx_mrx_control_reg control_reg;
/*
* Take PCS through a reset sequence.
* PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
* Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the
* value of the other PCS*_MR*_CONTROL_REG bits). Read
* PCS*_MR*_CONTROL_REG[RESET] until it changes value to
* zero.
*/
control_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
control_reg.s.reset = 1;
cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
control_reg.u64);
if (CVMX_WAIT_FOR_FIELD64
(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
union cvmx_pcsx_mrx_control_reg, reset, ==, 0, 10000)) {
cvmx_dprintf("SGMII%d: Timeout waiting for port %d "
"to finish reset\n",
interface, index);
return -1;
}
}
/*
* Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh
* sgmii negotiation starts.
*/
control_reg.s.rst_an = 1;
control_reg.s.an_en = 1;
control_reg.s.pwr_dn = 0;
cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
control_reg.u64);
/*
* Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating
* that sgmii autonegotiation is complete. In MAC mode this
* isn't an ethernet link, but a link between Octeon and the
* PHY.
*/
if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface),
union cvmx_pcsx_mrx_status_reg, an_cpt, ==, 1,
10000)) {
/* cvmx_dprintf("SGMII%d: Port %d link timeout\n", interface, index); */
return -1;
}
return 0;
}
/**
* Configure an SGMII link to the specified speed after the SERTES
* link is up.
*
* @interface: Interface to init
* @index: Index of prot on the interface
* @link_info: Link state to configure
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init_link_speed(int interface,
int index,
union cvmx_helper_link_info
link_info)
{
int is_enabled;
union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
/* Disable GMX before we make any changes. Remember the enable state */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
is_enabled = gmxx_prtx_cfg.s.en;
gmxx_prtx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/* Wait for GMX to be idle */
if (CVMX_WAIT_FOR_FIELD64
(CVMX_GMXX_PRTX_CFG(index, interface), union cvmx_gmxx_prtx_cfg,
rx_idle, ==, 1, 10000)
|| CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
10000)) {
cvmx_dprintf
("SGMII%d: Timeout waiting for port %d to be idle\n",
interface, index);
return -1;
}
/* Read GMX CFG again to make sure the disable completed */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
/*
* Get the misc control for PCS. We will need to set the
* duplication amount.
*/
pcsx_miscx_ctl_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
/*
* Use GMXENO to force the link down if the status we get says
* it should be down.
*/
pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
/* Only change the duplex setting if the link is up */
if (link_info.s.link_up)
gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
/* Do speed based setting for GMX */
switch (link_info.s.speed) {
case 10:
gmxx_prtx_cfg.s.speed = 0;
gmxx_prtx_cfg.s.speed_msb = 1;
gmxx_prtx_cfg.s.slottime = 0;
/* Setting from GMX-603 */
pcsx_miscx_ctl_reg.s.samp_pt = 25;
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
break;
case 100:
gmxx_prtx_cfg.s.speed = 0;
gmxx_prtx_cfg.s.speed_msb = 0;
gmxx_prtx_cfg.s.slottime = 0;
pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
break;
case 1000:
gmxx_prtx_cfg.s.speed = 1;
gmxx_prtx_cfg.s.speed_msb = 0;
gmxx_prtx_cfg.s.slottime = 1;
pcsx_miscx_ctl_reg.s.samp_pt = 1;
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 8192);
break;
default:
break;
}
/* Write the new misc control for PCS */
cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
pcsx_miscx_ctl_reg.u64);
/* Write the new GMX settings with the port still disabled */
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/* Read GMX CFG again to make sure the config completed */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
/* Restore the enabled / disabled state */
gmxx_prtx_cfg.s.en = is_enabled;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
return 0;
}
/**
* Bring up the SGMII interface to be ready for packet I/O but
* leave I/O disabled using the GMX override. This function
* follows the bringup documented in 10.6.3 of the manual.
*
* @interface: Interface to bringup
* @num_ports: Number of ports on the interface
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
{
int index;
__cvmx_helper_setup_gmx(interface, num_ports);
for (index = 0; index < num_ports; index++) {
int ipd_port = cvmx_helper_get_ipd_port(interface, index);
__cvmx_helper_sgmii_hardware_init_one_time(interface, index);
/* Linux kernel driver will call ....link_set with the
* proper link state. In the simulator there is no
* link state polling and hence it is set from
* here.
*/
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
__cvmx_helper_sgmii_link_set(ipd_port,
__cvmx_helper_sgmii_link_get(ipd_port));
}
return 0;
}
int __cvmx_helper_sgmii_enumerate(int interface)
{
return 4;
}
/**
* Probe a SGMII interface and determine the number of ports
* connected to it. The SGMII interface should still be down after
* this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_sgmii_probe(int interface)
{
union cvmx_gmxx_inf_mode mode;
/*
* Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
* interface needs to be enabled before IPD otherwise per port
* backpressure may not work properly
*/
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
mode.s.en = 1;
cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
return __cvmx_helper_sgmii_enumerate(interface);
}
/**
* Bringup and enable a SGMII interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_sgmii_enable(int interface)
{
int num_ports = cvmx_helper_ports_on_interface(interface);
int index;
__cvmx_helper_sgmii_hardware_init(interface, num_ports);
for (index = 0; index < num_ports; index++) {
union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
gmxx_prtx_cfg.u64 =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmxx_prtx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
gmxx_prtx_cfg.u64);
__cvmx_interrupt_pcsx_intx_en_reg_enable(index, interface);
}
__cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
__cvmx_interrupt_gmxx_enable(interface);
return 0;
}
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
union cvmx_helper_link_info __cvmx_helper_sgmii_link_get(int ipd_port)
{
union cvmx_helper_link_info result;
union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
result.u64 = 0;
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
/* The simulator gives you a simulated 1Gbps full duplex link */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 1000;
return result;
}
pcsx_mrx_control_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
if (pcsx_mrx_control_reg.s.loopbck1) {
/* Force 1Gbps full duplex link for internal loopback */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 1000;
return result;
}
pcs_misc_ctl_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
if (pcs_misc_ctl_reg.s.mode) {
/* 1000BASE-X */
/* FIXME */
} else {
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
pcsx_miscx_ctl_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
if (pcsx_miscx_ctl_reg.s.mac_phy) {
/* PHY Mode */
union cvmx_pcsx_mrx_status_reg pcsx_mrx_status_reg;
union cvmx_pcsx_anx_results_reg pcsx_anx_results_reg;
/*
* Don't bother continuing if the SERTES low
* level link is down
*/
pcsx_mrx_status_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG
(index, interface));
if (pcsx_mrx_status_reg.s.lnk_st == 0) {
if (__cvmx_helper_sgmii_hardware_init_link
(interface, index) != 0)
return result;
}
/* Read the autoneg results */
pcsx_anx_results_reg.u64 =
cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG
(index, interface));
if (pcsx_anx_results_reg.s.an_cpt) {
/*
* Auto negotiation is complete. Set
* status accordingly.
*/
result.s.full_duplex =
pcsx_anx_results_reg.s.dup;
result.s.link_up =
pcsx_anx_results_reg.s.link_ok;
switch (pcsx_anx_results_reg.s.spd) {
case 0:
result.s.speed = 10;
break;
case 1:
result.s.speed = 100;
break;
case 2:
result.s.speed = 1000;
break;
default:
result.s.speed = 0;
result.s.link_up = 0;
break;
}
} else {
/*
* Auto negotiation isn't
* complete. Return link down.
*/
result.s.speed = 0;
result.s.link_up = 0;
}
} else { /* MAC Mode */
result = __cvmx_helper_board_link_get(ipd_port);
}
}
return result;
}
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get().
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_sgmii_link_set(int ipd_port,
union cvmx_helper_link_info link_info)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
__cvmx_helper_sgmii_hardware_init_link(interface, index);
return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index,
link_info);
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#include <asm/octeon/octeon.h>
enum octeon_feature_bits __octeon_feature_bits __read_mostly;
EXPORT_SYMBOL_GPL(__octeon_feature_bits);
/**
* Read a byte of fuse data
* @byte_addr: address to read
*
* Returns fuse value: 0 or 1
*/
static uint8_t __init cvmx_fuse_read_byte(int byte_addr)
{
union cvmx_mio_fus_rcmd read_cmd;
read_cmd.u64 = 0;
read_cmd.s.addr = byte_addr;
read_cmd.s.pend = 1;
cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD))
&& read_cmd.s.pend)
;
return read_cmd.s.dat;
}
/*
* Version of octeon_model_get_string() that takes buffer as argument,
* as running early in u-boot static/global variables don't work when
* running from flash.
*/
static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
char *buffer)
{
const char *family;
const char *core_model;
char pass[4];
int clock_mhz;
const char *suffix;
int num_cores;
union cvmx_mio_fus_dat2 fus_dat2;
union cvmx_mio_fus_dat3 fus_dat3;
char fuse_model[10];
uint32_t fuse_data = 0;
uint64_t l2d_fus3 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
l2d_fus3 = (cvmx_read_csr(CVMX_L2D_FUS3) >> 34) & 0x3;
fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
num_cores = cvmx_octeon_num_cores();
/* Make sure the non existent devices look disabled */
switch ((chip_id >> 8) & 0xff) {
case 6: /* CN50XX */
case 2: /* CN30XX */
fus_dat3.s.nodfa_dte = 1;
fus_dat3.s.nozip = 1;
break;
case 4: /* CN57XX or CN56XX */
fus_dat3.s.nodfa_dte = 1;
break;
default:
break;
}
/* Make a guess at the suffix */
/* NSP = everything */
/* EXP = No crypto */
/* SCP = No DFA, No zip */
/* CP = No DFA, No crypto, No zip */
if (fus_dat3.s.nodfa_dte) {
if (fus_dat2.s.nocrypto)
suffix = "CP";
else
suffix = "SCP";
} else if (fus_dat2.s.nocrypto)
suffix = "EXP";
else
suffix = "NSP";
if (!fus_dat2.s.nocrypto)
__octeon_feature_bits |= OCTEON_HAS_CRYPTO;
/*
* Assume pass number is encoded using <5:3><2:0>. Exceptions
* will be fixed later.
*/
sprintf(pass, "%d.%d", (int)((chip_id >> 3) & 7) + 1, (int)chip_id & 7);
/*
* Use the number of cores to determine the last 2 digits of
* the model number. There are some exceptions that are fixed
* later.
*/
switch (num_cores) {
case 48:
core_model = "90";
break;
case 44:
core_model = "88";
break;
case 40:
core_model = "85";
break;
case 32:
core_model = "80";
break;
case 24:
core_model = "70";
break;
case 16:
core_model = "60";
break;
case 15:
core_model = "58";
break;
case 14:
core_model = "55";
break;
case 13:
core_model = "52";
break;
case 12:
core_model = "50";
break;
case 11:
core_model = "48";
break;
case 10:
core_model = "45";
break;
case 9:
core_model = "42";
break;
case 8:
core_model = "40";
break;
case 7:
core_model = "38";
break;
case 6:
core_model = "34";
break;
case 5:
core_model = "32";
break;
case 4:
core_model = "30";
break;
case 3:
core_model = "25";
break;
case 2:
core_model = "20";
break;
case 1:
core_model = "10";
break;
default:
core_model = "XX";
break;
}
/* Now figure out the family, the first two digits */
switch ((chip_id >> 8) & 0xff) {
case 0: /* CN38XX, CN37XX or CN36XX */
if (l2d_fus3) {
/*
* For some unknown reason, the 16 core one is
* called 37 instead of 36.
*/
if (num_cores >= 16)
family = "37";
else
family = "36";
} else
family = "38";
/*
* This series of chips didn't follow the standard
* pass numbering.
*/
switch (chip_id & 0xf) {
case 0:
strcpy(pass, "1.X");
break;
case 1:
strcpy(pass, "2.X");
break;
case 3:
strcpy(pass, "3.X");
break;
default:
strcpy(pass, "X.X");
break;
}
break;
case 1: /* CN31XX or CN3020 */
if ((chip_id & 0x10) || l2d_fus3)
family = "30";
else
family = "31";
/*
* This series of chips didn't follow the standard
* pass numbering.
*/
switch (chip_id & 0xf) {
case 0:
strcpy(pass, "1.0");
break;
case 2:
strcpy(pass, "1.1");
break;
default:
strcpy(pass, "X.X");
break;
}
break;
case 2: /* CN3010 or CN3005 */
family = "30";
/* A chip with half cache is an 05 */
if (l2d_fus3)
core_model = "05";
/*
* This series of chips didn't follow the standard
* pass numbering.
*/
switch (chip_id & 0xf) {
case 0:
strcpy(pass, "1.0");
break;
case 2:
strcpy(pass, "1.1");
break;
default:
strcpy(pass, "X.X");
break;
}
break;
case 3: /* CN58XX */
family = "58";
/* Special case. 4 core, half cache (CP with half cache) */
if ((num_cores == 4) && l2d_fus3 && !strncmp(suffix, "CP", 2))
core_model = "29";
/* Pass 1 uses different encodings for pass numbers */
if ((chip_id & 0xFF) < 0x8) {
switch (chip_id & 0x3) {
case 0:
strcpy(pass, "1.0");
break;
case 1:
strcpy(pass, "1.1");
break;
case 3:
strcpy(pass, "1.2");
break;
default:
strcpy(pass, "1.X");
break;
}
}
break;
case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */
if (fus_dat2.cn56xx.raid_en) {
if (l2d_fus3)
family = "55";
else
family = "57";
if (fus_dat2.cn56xx.nocrypto)
suffix = "SP";
else
suffix = "SSP";
} else {
if (fus_dat2.cn56xx.nocrypto)
suffix = "CP";
else {
suffix = "NSP";
if (fus_dat3.s.nozip)
suffix = "SCP";
if (fus_dat3.cn38xx.bar2_en)
suffix = "NSPB2";
}
if (l2d_fus3)
family = "54";
else
family = "56";
}
break;
case 6: /* CN50XX */
family = "50";
break;
case 7: /* CN52XX */
if (l2d_fus3)
family = "51";
else
family = "52";
break;
case 0x93: /* CN61XX */
family = "61";
if (fus_dat2.cn61xx.nocrypto && fus_dat2.cn61xx.dorm_crypto)
suffix = "AP";
if (fus_dat2.cn61xx.nocrypto)
suffix = "CP";
else if (fus_dat2.cn61xx.dorm_crypto)
suffix = "DAP";
else if (fus_dat3.cn61xx.nozip)
suffix = "SCP";
break;
case 0x90: /* CN63XX */
family = "63";
if (fus_dat3.s.l2c_crip == 2)
family = "62";
if (num_cores == 6) /* Other core counts match generic */
core_model = "35";
if (fus_dat2.cn63xx.nocrypto)
suffix = "CP";
else if (fus_dat2.cn63xx.dorm_crypto)
suffix = "DAP";
else if (fus_dat3.cn61xx.nozip)
suffix = "SCP";
else
suffix = "AAP";
break;
case 0x92: /* CN66XX */
family = "66";
if (num_cores == 6) /* Other core counts match generic */
core_model = "35";
if (fus_dat2.cn66xx.nocrypto && fus_dat2.cn66xx.dorm_crypto)
suffix = "AP";
if (fus_dat2.cn66xx.nocrypto)
suffix = "CP";
else if (fus_dat2.cn66xx.dorm_crypto)
suffix = "DAP";
else if (fus_dat3.cn61xx.nozip)
suffix = "SCP";
else
suffix = "AAP";
break;
case 0x91: /* CN68XX */
family = "68";
if (fus_dat2.cn68xx.nocrypto && fus_dat3.cn61xx.nozip)
suffix = "CP";
else if (fus_dat2.cn68xx.dorm_crypto)
suffix = "DAP";
else if (fus_dat3.cn61xx.nozip)
suffix = "SCP";
else if (fus_dat2.cn68xx.nocrypto)
suffix = "SP";
else
suffix = "AAP";
break;
case 0x94: /* CNF71XX */
family = "F71";
if (fus_dat3.cn61xx.nozip)
suffix = "SCP";
else
suffix = "AAP";
break;
case 0x95: /* CN78XX */
if (num_cores == 6) /* Other core counts match generic */
core_model = "35";
if (OCTEON_IS_MODEL(OCTEON_CN76XX))
family = "76";
else
family = "78";
if (fus_dat3.cn78xx.l2c_crip == 2)
family = "77";
if (fus_dat3.cn78xx.nozip
&& fus_dat3.cn78xx.nodfa_dte
&& fus_dat3.cn78xx.nohna_dte) {
if (fus_dat3.cn78xx.nozip &&
!fus_dat2.cn78xx.raid_en &&
fus_dat3.cn78xx.nohna_dte) {
suffix = "CP";
} else {
suffix = "SCP";
}
} else if (fus_dat2.cn78xx.raid_en == 0)
suffix = "HCP";
else
suffix = "AAP";
break;
case 0x96: /* CN70XX */
family = "70";
if (cvmx_read_csr(CVMX_MIO_FUS_PDF) & (0x1ULL << 32))
family = "71";
if (fus_dat2.cn70xx.nocrypto)
suffix = "CP";
else if (fus_dat3.cn70xx.nodfa_dte)
suffix = "SCP";
else
suffix = "AAP";
break;
case 0x97: /* CN73XX */
if (num_cores == 6) /* Other core counts match generic */
core_model = "35";
family = "73";
if (fus_dat3.cn73xx.l2c_crip == 2)
family = "72";
if (fus_dat3.cn73xx.nozip
&& fus_dat3.cn73xx.nodfa_dte
&& fus_dat3.cn73xx.nohna_dte) {
if (!fus_dat2.cn73xx.raid_en)
suffix = "CP";
else
suffix = "SCP";
} else
suffix = "AAP";
break;
case 0x98: /* CN75XX */
family = "F75";
if (fus_dat3.cn78xx.nozip
&& fus_dat3.cn78xx.nodfa_dte
&& fus_dat3.cn78xx.nohna_dte)
suffix = "SCP";
else
suffix = "AAP";
break;
default:
family = "XX";
core_model = "XX";
strcpy(pass, "X.X");
suffix = "XXX";
break;
}
clock_mhz = octeon_get_clock_rate() / 1000000;
if (family[0] != '3') {
int fuse_base = 384 / 8;
if (family[0] == '6')
fuse_base = 832 / 8;
/* Check for model in fuses, overrides normal decode */
/* This is _not_ valid for Octeon CN3XXX models */
fuse_data |= cvmx_fuse_read_byte(fuse_base + 3);
fuse_data = fuse_data << 8;
fuse_data |= cvmx_fuse_read_byte(fuse_base + 2);
fuse_data = fuse_data << 8;
fuse_data |= cvmx_fuse_read_byte(fuse_base + 1);
fuse_data = fuse_data << 8;
fuse_data |= cvmx_fuse_read_byte(fuse_base);
if (fuse_data & 0x7ffff) {
int model = fuse_data & 0x3fff;
int suffix = (fuse_data >> 14) & 0x1f;
if (suffix && model) {
/* Have both number and suffix in fuses, so both */
sprintf(fuse_model, "%d%c", model, 'A' + suffix - 1);
core_model = "";
family = fuse_model;
} else if (suffix && !model) {
/* Only have suffix, so add suffix to 'normal' model number */
sprintf(fuse_model, "%s%c", core_model, 'A' + suffix - 1);
core_model = fuse_model;
} else {
/* Don't have suffix, so just use model from fuses */
sprintf(fuse_model, "%d", model);
core_model = "";
family = fuse_model;
}
}
}
sprintf(buffer, "CN%s%sp%s-%d-%s", family, core_model, pass, clock_mhz, suffix);
return buffer;
}
/**
* Given the chip processor ID from COP0, this function returns a
* string representing the chip model number. The string is of the
* form CNXXXXpX.X-FREQ-SUFFIX.
* - XXXX = The chip model number
* - X.X = Chip pass number
* - FREQ = Current frequency in Mhz
* - SUFFIX = NSP, EXP, SCP, SSP, or CP
*
* @chip_id: Chip ID
*
* Returns Model string
*/
const char *__init octeon_model_get_string(uint32_t chip_id)
{
static char buffer[32];
return octeon_model_get_string_buffer(chip_id, buffer);
}
| linux-master | arch/mips/cavium-octeon/executive/octeon-model.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Utility functions to decode Octeon's RSL_INT_BLOCKS
* interrupts into error messages.
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-asxx-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#ifndef PRINT_ERROR
#define PRINT_ERROR(format, ...)
#endif
void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block);
/**
* Enable ASX error interrupts that exist on CN3XXX, CN50XX, and
* CN58XX.
*
* @block: Interface to enable 0-1
*/
void __cvmx_interrupt_asxx_enable(int block)
{
int mask;
union cvmx_asxx_int_en csr;
/*
* CN38XX and CN58XX have two interfaces with 4 ports per
* interface. All other chips have a max of 3 ports on
* interface 0
*/
if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
mask = 0xf; /* Set enables for 4 ports */
else
mask = 0x7; /* Set enables for 3 ports */
/* Enable interface interrupts */
csr.u64 = cvmx_read_csr(CVMX_ASXX_INT_EN(block));
csr.s.txpsh = mask;
csr.s.txpop = mask;
csr.s.ovrflw = mask;
cvmx_write_csr(CVMX_ASXX_INT_EN(block), csr.u64);
}
/**
* Enable GMX error reporting for the supplied interface
*
* @interface: Interface to enable
*/
void __cvmx_interrupt_gmxx_enable(int interface)
{
union cvmx_gmxx_inf_mode mode;
union cvmx_gmxx_tx_int_en gmx_tx_int_en;
int num_ports;
int index;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
if (mode.s.en) {
switch (mode.cn52xx.mode) {
case 1: /* XAUI */
num_ports = 1;
break;
case 2: /* SGMII */
case 3: /* PICMG */
num_ports = 4;
break;
default: /* Disabled */
num_ports = 0;
break;
}
} else
num_ports = 0;
} else {
if (mode.s.en) {
if (OCTEON_IS_MODEL(OCTEON_CN38XX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX)) {
/*
* SPI on CN38XX and CN58XX report all
* errors through port 0. RGMII needs
* to check all 4 ports
*/
if (mode.s.type)
num_ports = 1;
else
num_ports = 4;
} else {
/*
* CN30XX, CN31XX, and CN50XX have two
* or three ports. GMII and MII has 2,
* RGMII has three
*/
if (mode.s.type)
num_ports = 2;
else
num_ports = 3;
}
} else
num_ports = 0;
}
gmx_tx_int_en.u64 = 0;
if (num_ports) {
if (OCTEON_IS_MODEL(OCTEON_CN38XX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX))
gmx_tx_int_en.cn38xx.ncb_nxa = 1;
gmx_tx_int_en.s.pko_nxa = 1;
}
gmx_tx_int_en.s.undflw = (1 << num_ports) - 1;
cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
for (index = 0; index < num_ports; index++)
__cvmx_interrupt_gmxx_rxx_int_en_enable(index, interface);
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* This module provides system/board/application information obtained
* by the bootloader.
*/
#include <linux/export.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-sysinfo.h>
/*
* This structure defines the private state maintained by sysinfo module.
*/
static struct cvmx_sysinfo sysinfo; /* system information */
/*
* Returns the application information as obtained
* by the bootloader. This provides the core mask of the cores
* running the same application image, as well as the physical
* memory regions available to the core.
*/
struct cvmx_sysinfo *cvmx_sysinfo_get(void)
{
return &sysinfo;
}
EXPORT_SYMBOL(cvmx_sysinfo_get);
| linux-master | arch/mips/cavium-octeon/executive/cvmx-sysinfo.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Support library for the hardware Packet Output unit.
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-helper.h>
/*
* Internal state of packet output
*/
static int __cvmx_pko_int(int interface, int index)
{
switch (interface) {
case 0:
return index;
case 1:
return 4;
case 2:
return index + 0x08;
case 3:
return index + 0x0c;
case 4:
return index + 0x10;
case 5:
return 0x1c;
case 6:
return 0x1d;
case 7:
return 0x1e;
case 8:
return 0x1f;
default:
return -1;
}
}
static void __cvmx_pko_iport_config(int pko_port)
{
int queue;
const int num_queues = 1;
const int base_queue = pko_port;
const int static_priority_end = 1;
const int static_priority_base = 1;
for (queue = 0; queue < num_queues; queue++) {
union cvmx_pko_mem_iqueue_ptrs config;
cvmx_cmd_queue_result_t cmd_res;
uint64_t *buf_ptr;
config.u64 = 0;
config.s.index = queue;
config.s.qid = base_queue + queue;
config.s.ipid = pko_port;
config.s.tail = (queue == (num_queues - 1));
config.s.s_tail = (queue == static_priority_end);
config.s.static_p = (static_priority_base >= 0);
config.s.static_q = (queue <= static_priority_end);
config.s.qos_mask = 0xff;
cmd_res = cvmx_cmd_queue_initialize(
CVMX_CMD_QUEUE_PKO(base_queue + queue),
CVMX_PKO_MAX_QUEUE_DEPTH,
CVMX_FPA_OUTPUT_BUFFER_POOL,
(CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE -
CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8));
WARN(cmd_res,
"%s: cmd_res=%d pko_port=%d base_queue=%d num_queues=%d queue=%d\n",
__func__, (int)cmd_res, pko_port, base_queue,
num_queues, queue);
buf_ptr = (uint64_t *)cvmx_cmd_queue_buffer(
CVMX_CMD_QUEUE_PKO(base_queue + queue));
config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7;
CVMX_SYNCWS;
cvmx_write_csr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
}
}
static void __cvmx_pko_queue_alloc_o68(void)
{
int port;
for (port = 0; port < 48; port++)
__cvmx_pko_iport_config(port);
}
static void __cvmx_pko_port_map_o68(void)
{
int port;
int interface, index;
cvmx_helper_interface_mode_t mode;
union cvmx_pko_mem_iport_ptrs config;
/*
* Initialize every iport with the invalid eid.
*/
config.u64 = 0;
config.s.eid = 31; /* Invalid */
for (port = 0; port < 128; port++) {
config.s.ipid = port;
cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
}
/*
* Set up PKO_MEM_IPORT_PTRS
*/
for (port = 0; port < 48; port++) {
interface = cvmx_helper_get_interface_num(port);
index = cvmx_helper_get_interface_index_num(port);
mode = cvmx_helper_interface_get_mode(interface);
if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
continue;
config.s.ipid = port;
config.s.qos_mask = 0xff;
config.s.crc = 1;
config.s.min_pkt = 1;
config.s.intr = __cvmx_pko_int(interface, index);
config.s.eid = config.s.intr;
config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ?
index : port;
cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
}
}
static void __cvmx_pko_chip_init(void)
{
int i;
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
__cvmx_pko_port_map_o68();
__cvmx_pko_queue_alloc_o68();
return;
}
/*
* Initialize queues
*/
for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++) {
const uint64_t priority = 8;
cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
&priority);
}
}
/*
* Call before any other calls to initialize the packet
* output system. This does chip global config, and should only be
* done by one core.
*/
void cvmx_pko_initialize_global(void)
{
union cvmx_pko_reg_cmd_buf config;
/*
* Set the size of the PKO command buffers to an odd number of
* 64bit words. This allows the normal two word send to stay
* aligned and never span a command word buffer.
*/
config.u64 = 0;
config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1;
cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
/*
* Chip-specific setup.
*/
__cvmx_pko_chip_init();
/*
* If we aren't using all of the queues optimize PKO's
* internal memory.
*/
if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)
|| OCTEON_IS_MODEL(OCTEON_CN56XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX)) {
int num_interfaces = cvmx_helper_get_number_of_interfaces();
int last_port =
cvmx_helper_get_last_ipd_port(num_interfaces - 1);
int max_queues =
cvmx_pko_get_base_queue(last_port) +
cvmx_pko_get_num_queues(last_port);
if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
if (max_queues <= 32)
cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
else if (max_queues <= 64)
cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
} else {
if (max_queues <= 64)
cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
else if (max_queues <= 128)
cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
}
}
}
/*
* Enables the packet output hardware. It must already be
* configured.
*/
void cvmx_pko_enable(void)
{
union cvmx_pko_reg_flags flags;
flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
if (flags.s.ena_pko)
cvmx_dprintf
("Warning: Enabling PKO when PKO already enabled.\n");
flags.s.ena_dwb = 1;
flags.s.ena_pko = 1;
/*
* always enable big endian for 3-word command. Does nothing
* for 2-word.
*/
flags.s.store_be = 1;
cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64);
}
/*
* Disables the packet output. Does not affect any configuration.
*/
void cvmx_pko_disable(void)
{
union cvmx_pko_reg_flags pko_reg_flags;
pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
pko_reg_flags.s.ena_pko = 0;
cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
}
EXPORT_SYMBOL_GPL(cvmx_pko_disable);
/*
* Reset the packet output.
*/
static void __cvmx_pko_reset(void)
{
union cvmx_pko_reg_flags pko_reg_flags;
pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
pko_reg_flags.s.reset = 1;
cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
}
/*
* Shutdown and free resources required by packet output.
*/
void cvmx_pko_shutdown(void)
{
union cvmx_pko_mem_queue_ptrs config;
int queue;
cvmx_pko_disable();
for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
config.u64 = 0;
config.s.tail = 1;
config.s.index = 0;
config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
config.s.queue = queue & 0x7f;
config.s.qos_mask = 0;
config.s.buf_ptr = 0;
if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
union cvmx_pko_reg_queue_ptrs1 config1;
config1.u64 = 0;
config1.s.qid7 = queue >> 7;
cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
}
cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
}
__cvmx_pko_reset();
}
EXPORT_SYMBOL_GPL(cvmx_pko_shutdown);
/*
* Configure a output port and the associated queues for use.
*
* @port: Port to configure.
* @base_queue: First queue number to associate with this port.
* @num_queues: Number of queues to associate with this port
* @priority: Array of priority levels for each queue. Values are
* allowed to be 0-8. A value of 8 get 8 times the traffic
* of a value of 1. A value of 0 indicates that no rounds
* will be participated in. These priorities can be changed
* on the fly while the pko is enabled. A priority of 9
* indicates that static priority should be used. If static
* priority is used all queues with static priority must be
* contiguous starting at the base_queue, and lower numbered
* queues have higher priority than higher numbered queues.
* There must be num_queues elements in the array.
*/
cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
uint64_t num_queues,
const uint64_t priority[])
{
cvmx_pko_status_t result_code;
uint64_t queue;
union cvmx_pko_mem_queue_ptrs config;
union cvmx_pko_reg_queue_ptrs1 config1;
int static_priority_base = -1;
int static_priority_end = -1;
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
return CVMX_PKO_SUCCESS;
if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
&& (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
(unsigned long long)port);
return CVMX_PKO_INVALID_PORT;
}
if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
cvmx_dprintf
("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
(unsigned long long)(base_queue + num_queues));
return CVMX_PKO_INVALID_QUEUE;
}
if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
/*
* Validate the static queue priority setup and set
* static_priority_base and static_priority_end
* accordingly.
*/
for (queue = 0; queue < num_queues; queue++) {
/* Find first queue of static priority */
if (static_priority_base == -1
&& priority[queue] ==
CVMX_PKO_QUEUE_STATIC_PRIORITY)
static_priority_base = queue;
/* Find last queue of static priority */
if (static_priority_base != -1
&& static_priority_end == -1
&& priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
&& queue)
static_priority_end = queue - 1;
else if (static_priority_base != -1
&& static_priority_end == -1
&& queue == num_queues - 1)
/* all queues are static priority */
static_priority_end = queue;
/*
* Check to make sure all static priority
* queues are contiguous. Also catches some
* cases of static priorities not starting at
* queue 0.
*/
if (static_priority_end != -1
&& (int)queue > static_priority_end
&& priority[queue] ==
CVMX_PKO_QUEUE_STATIC_PRIORITY) {
cvmx_dprintf("ERROR: cvmx_pko_config_port: "
"Static priority queues aren't "
"contiguous or don't start at "
"base queue. q: %d, eq: %d\n",
(int)queue, static_priority_end);
return CVMX_PKO_INVALID_PRIORITY;
}
}
if (static_priority_base > 0) {
cvmx_dprintf("ERROR: cvmx_pko_config_port: Static "
"priority queues don't start at base "
"queue. sq: %d\n",
static_priority_base);
return CVMX_PKO_INVALID_PRIORITY;
}
#if 0
cvmx_dprintf("Port %d: Static priority queue base: %d, "
"end: %d\n", port,
static_priority_base, static_priority_end);
#endif
}
/*
* At this point, static_priority_base and static_priority_end
* are either both -1, or are valid start/end queue
* numbers.
*/
result_code = CVMX_PKO_SUCCESS;
#ifdef PKO_DEBUG
cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues,
CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
#endif
for (queue = 0; queue < num_queues; queue++) {
uint64_t *buf_ptr = NULL;
config1.u64 = 0;
config1.s.idx3 = queue >> 3;
config1.s.qid7 = (base_queue + queue) >> 7;
config.u64 = 0;
config.s.tail = queue == (num_queues - 1);
config.s.index = queue;
config.s.port = port;
config.s.queue = base_queue + queue;
if (!cvmx_octeon_is_pass1()) {
config.s.static_p = static_priority_base >= 0;
config.s.static_q = (int)queue <= static_priority_end;
config.s.s_tail = (int)queue == static_priority_end;
}
/*
* Convert the priority into an enable bit field. Try
* to space the bits out evenly so the packet don't
* get grouped up
*/
switch ((int)priority[queue]) {
case 0:
config.s.qos_mask = 0x00;
break;
case 1:
config.s.qos_mask = 0x01;
break;
case 2:
config.s.qos_mask = 0x11;
break;
case 3:
config.s.qos_mask = 0x49;
break;
case 4:
config.s.qos_mask = 0x55;
break;
case 5:
config.s.qos_mask = 0x57;
break;
case 6:
config.s.qos_mask = 0x77;
break;
case 7:
config.s.qos_mask = 0x7f;
break;
case 8:
config.s.qos_mask = 0xff;
break;
case CVMX_PKO_QUEUE_STATIC_PRIORITY:
if (!cvmx_octeon_is_pass1()) {
config.s.qos_mask = 0xff;
break;
}
fallthrough; /* to the error case, when Pass 1 */
default:
cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid "
"priority %llu\n",
(unsigned long long)priority[queue]);
config.s.qos_mask = 0xff;
result_code = CVMX_PKO_INVALID_PRIORITY;
break;
}
if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
cvmx_cmd_queue_result_t cmd_res =
cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO
(base_queue + queue),
CVMX_PKO_MAX_QUEUE_DEPTH,
CVMX_FPA_OUTPUT_BUFFER_POOL,
CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
-
CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST
* 8);
if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
switch (cmd_res) {
case CVMX_CMD_QUEUE_NO_MEMORY:
cvmx_dprintf("ERROR: "
"cvmx_pko_config_port: "
"Unable to allocate "
"output buffer.\n");
return CVMX_PKO_NO_MEMORY;
case CVMX_CMD_QUEUE_ALREADY_SETUP:
cvmx_dprintf
("ERROR: cvmx_pko_config_port: Port already setup.\n");
return CVMX_PKO_PORT_ALREADY_SETUP;
case CVMX_CMD_QUEUE_INVALID_PARAM:
default:
cvmx_dprintf
("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n");
return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
}
}
buf_ptr =
(uint64_t *)
cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO
(base_queue + queue));
config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
} else
config.s.buf_ptr = 0;
CVMX_SYNCWS;
if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
}
return result_code;
}
#ifdef PKO_DEBUG
/*
* Show map of ports -> queues for different cores.
*/
void cvmx_pko_show_queue_map()
{
int core, port;
int pko_output_ports = 36;
cvmx_dprintf("port");
for (port = 0; port < pko_output_ports; port++)
cvmx_dprintf("%3d ", port);
cvmx_dprintf("\n");
for (core = 0; core < CVMX_MAX_CORES; core++) {
cvmx_dprintf("\n%2d: ", core);
for (port = 0; port < pko_output_ports; port++) {
cvmx_dprintf("%3d ",
cvmx_pko_get_base_queue_per_core(port,
core));
}
}
cvmx_dprintf("\n");
}
#endif
/*
* Rate limit a PKO port to a max packets/sec. This function is only
* supported on CN51XX and higher, excluding CN58XX.
*
* @port: Port to rate limit
* @packets_s: Maximum packet/sec
* @burst: Maximum number of packets to burst in a row before rate
* limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
{
union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
pko_mem_port_rate0.u64 = 0;
pko_mem_port_rate0.s.pid = port;
pko_mem_port_rate0.s.rate_pkt =
cvmx_sysinfo_get()->cpu_clock_hz / packets_s / 16;
/* No cost per word since we are limited by packets/sec, not bits/sec */
pko_mem_port_rate0.s.rate_word = 0;
pko_mem_port_rate1.u64 = 0;
pko_mem_port_rate1.s.pid = port;
pko_mem_port_rate1.s.rate_lim =
((uint64_t) pko_mem_port_rate0.s.rate_pkt * burst) >> 8;
cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
return 0;
}
/*
* Rate limit a PKO port to a max bits/sec. This function is only
* supported on CN51XX and higher, excluding CN58XX.
*
* @port: Port to rate limit
* @bits_s: PKO rate limit in bits/sec
* @burst: Maximum number of bits to burst before rate
* limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst)
{
union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
uint64_t clock_rate = cvmx_sysinfo_get()->cpu_clock_hz;
uint64_t tokens_per_bit = clock_rate * 16 / bits_s;
pko_mem_port_rate0.u64 = 0;
pko_mem_port_rate0.s.pid = port;
/*
* Each packet has a 12 bytes of interframe gap, an 8 byte
* preamble, and a 4 byte CRC. These are not included in the
* per word count. Multiply by 8 to covert to bits and divide
* by 256 for limit granularity.
*/
pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
/* Each 8 byte word has 64bits */
pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit;
pko_mem_port_rate1.u64 = 0;
pko_mem_port_rate1.s.pid = port;
pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256;
cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
return 0;
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-pko.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Support functions for managing command queues used for
* various hardware blocks.
*/
#include <linux/kernel.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-cmd-queue.h>
#include <asm/octeon/cvmx-npei-defs.h>
#include <asm/octeon/cvmx-pexp-defs.h>
#include <asm/octeon/cvmx-pko-defs.h>
/*
* This application uses this pointer to access the global queue
* state. It points to a bootmem named block.
*/
__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
/*
* Initialize the Global queue state pointer.
*
* Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
{
char *alloc_name = "cvmx_cmd_queues";
extern uint64_t octeon_reserve32_memory;
if (likely(__cvmx_cmd_queue_state_ptr))
return CVMX_CMD_QUEUE_SUCCESS;
if (octeon_reserve32_memory)
__cvmx_cmd_queue_state_ptr =
cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
octeon_reserve32_memory,
octeon_reserve32_memory +
(CONFIG_CAVIUM_RESERVE32 <<
20) - 1, 128, alloc_name);
else
__cvmx_cmd_queue_state_ptr =
cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
128,
alloc_name);
if (__cvmx_cmd_queue_state_ptr)
memset(__cvmx_cmd_queue_state_ptr, 0,
sizeof(*__cvmx_cmd_queue_state_ptr));
else {
struct cvmx_bootmem_named_block_desc *block_desc =
cvmx_bootmem_find_named_block(alloc_name);
if (block_desc)
__cvmx_cmd_queue_state_ptr =
cvmx_phys_to_ptr(block_desc->base_addr);
else {
cvmx_dprintf
("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
alloc_name);
return CVMX_CMD_QUEUE_NO_MEMORY;
}
}
return CVMX_CMD_QUEUE_SUCCESS;
}
/*
* Initialize a command queue for use. The initial FPA buffer is
* allocated and the hardware unit is configured to point to the
* new command queue.
*
* @queue_id: Hardware command queue to initialize.
* @max_depth: Maximum outstanding commands that can be queued.
* @fpa_pool: FPA pool the command queues should come from.
* @pool_size: Size of each buffer in the FPA pool (bytes)
*
* Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
int max_depth, int fpa_pool,
int pool_size)
{
__cvmx_cmd_queue_state_t *qstate;
cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
if (result != CVMX_CMD_QUEUE_SUCCESS)
return result;
qstate = __cvmx_cmd_queue_get_state(queue_id);
if (qstate == NULL)
return CVMX_CMD_QUEUE_INVALID_PARAM;
/*
* We artificially limit max_depth to 1<<20 words. It is an
* arbitrary limit.
*/
if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
if ((max_depth < 0) || (max_depth > 1 << 20))
return CVMX_CMD_QUEUE_INVALID_PARAM;
} else if (max_depth != 0)
return CVMX_CMD_QUEUE_INVALID_PARAM;
if ((fpa_pool < 0) || (fpa_pool > 7))
return CVMX_CMD_QUEUE_INVALID_PARAM;
if ((pool_size < 128) || (pool_size > 65536))
return CVMX_CMD_QUEUE_INVALID_PARAM;
/* See if someone else has already initialized the queue */
if (qstate->base_ptr_div128) {
if (max_depth != (int)qstate->max_depth) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
"Queue already initialized with different "
"max_depth (%d).\n",
(int)qstate->max_depth);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if (fpa_pool != qstate->fpa_pool) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
"Queue already initialized with different "
"FPA pool (%u).\n",
qstate->fpa_pool);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
"Queue already initialized with different "
"FPA pool size (%u).\n",
(qstate->pool_size_m1 + 1) << 3);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
CVMX_SYNCWS;
return CVMX_CMD_QUEUE_ALREADY_SETUP;
} else {
union cvmx_fpa_ctl_status status;
void *buffer;
status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
if (!status.s.enb) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
"FPA is not enabled.\n");
return CVMX_CMD_QUEUE_NO_MEMORY;
}
buffer = cvmx_fpa_alloc(fpa_pool);
if (buffer == NULL) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
"Unable to allocate initial buffer.\n");
return CVMX_CMD_QUEUE_NO_MEMORY;
}
memset(qstate, 0, sizeof(*qstate));
qstate->max_depth = max_depth;
qstate->fpa_pool = fpa_pool;
qstate->pool_size_m1 = (pool_size >> 3) - 1;
qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
/*
* We zeroed the now serving field so we need to also
* zero the ticket.
*/
__cvmx_cmd_queue_state_ptr->
ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
CVMX_SYNCWS;
return CVMX_CMD_QUEUE_SUCCESS;
}
}
/*
* Shutdown a queue a free it's command buffers to the FPA. The
* hardware connected to the queue must be stopped before this
* function is called.
*
* @queue_id: Queue to shutdown
*
* Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
if (qptr == NULL) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
"get queue information.\n");
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if (cvmx_cmd_queue_length(queue_id) > 0) {
cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
"has data in it.\n");
return CVMX_CMD_QUEUE_FULL;
}
__cvmx_cmd_queue_lock(queue_id, qptr);
if (qptr->base_ptr_div128) {
cvmx_fpa_free(cvmx_phys_to_ptr
((uint64_t) qptr->base_ptr_div128 << 7),
qptr->fpa_pool, 0);
qptr->base_ptr_div128 = 0;
}
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_SUCCESS;
}
/*
* Return the number of command words pending in the queue. This
* function may be relatively slow for some hardware units.
*
* @queue_id: Hardware command queue to query
*
* Returns Number of outstanding commands
*/
int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
{
if (CVMX_ENABLE_PARAMETER_CHECKING) {
if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
/*
* The cast is here so gcc with check that all values in the
* cvmx_cmd_queue_id_t enumeration are here.
*/
switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
case CVMX_CMD_QUEUE_PKO_BASE:
/*
* FIXME: Need atomic lock on
* CVMX_PKO_REG_READ_IDX. Right now we are normally
* called with the queue lock, so that is a SLIGHT
* amount of protection.
*/
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
union cvmx_pko_mem_debug9 debug9;
debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
return debug9.cn38xx.doorbell;
} else {
union cvmx_pko_mem_debug8 debug8;
debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
return debug8.cn50xx.doorbell;
}
case CVMX_CMD_QUEUE_ZIP:
case CVMX_CMD_QUEUE_DFA:
case CVMX_CMD_QUEUE_RAID:
/* FIXME: Implement other lengths */
return 0;
case CVMX_CMD_QUEUE_DMA_BASE:
{
union cvmx_npei_dmax_counts dmax_counts;
dmax_counts.u64 =
cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
(queue_id & 0x7));
return dmax_counts.s.dbell;
}
case CVMX_CMD_QUEUE_END:
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
/*
* Return the command buffer to be written to. The purpose of this
* function is to allow CVMX routine access t othe low level buffer
* for initial hardware setup. User applications should not call this
* function directly.
*
* @queue_id: Command queue to query
*
* Returns Command buffer or NULL on failure
*/
void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
if (qptr && qptr->base_ptr_div128)
return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
else
return NULL;
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Implementation of the Level 2 Cache (L2C) control,
* measurement, and debugging facilities.
*/
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-l2c.h>
#include <asm/octeon/cvmx-spinlock.h>
/*
* This spinlock is used internally to ensure that only one core is
* performing certain L2 operations at a time.
*
* NOTE: This only protects calls from within a single application -
* if multiple applications or operating systems are running, then it
* is up to the user program to coordinate between them.
*/
static cvmx_spinlock_t cvmx_l2c_spinlock;
int cvmx_l2c_get_core_way_partition(uint32_t core)
{
uint32_t field;
/* Validate the core number */
if (core >= cvmx_octeon_num_cores())
return -1;
if (OCTEON_IS_MODEL(OCTEON_CN63XX))
return cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff;
/*
* Use the lower two bits of the coreNumber to determine the
* bit offset of the UMSK[] field in the L2C_SPAR register.
*/
field = (core & 0x3) * 8;
/*
* Return the UMSK[] field from the appropriate L2C_SPAR
* register based on the coreNumber.
*/
switch (core & 0xC) {
case 0x0:
return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field;
case 0x4:
return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field;
case 0x8:
return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field;
case 0xC:
return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field;
}
return 0;
}
int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
{
uint32_t field;
uint32_t valid_mask;
valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
mask &= valid_mask;
/* A UMSK setting which blocks all L2C Ways is an error on some chips */
if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
return -1;
/* Validate the core number */
if (core >= cvmx_octeon_num_cores())
return -1;
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
return 0;
}
/*
* Use the lower two bits of core to determine the bit offset of the
* UMSK[] field in the L2C_SPAR register.
*/
field = (core & 0x3) * 8;
/*
* Assign the new mask setting to the UMSK[] field in the appropriate
* L2C_SPAR register based on the core_num.
*
*/
switch (core & 0xC) {
case 0x0:
cvmx_write_csr(CVMX_L2C_SPAR0,
(cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) |
mask << field);
break;
case 0x4:
cvmx_write_csr(CVMX_L2C_SPAR1,
(cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) |
mask << field);
break;
case 0x8:
cvmx_write_csr(CVMX_L2C_SPAR2,
(cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) |
mask << field);
break;
case 0xC:
cvmx_write_csr(CVMX_L2C_SPAR3,
(cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) |
mask << field);
break;
}
return 0;
}
int cvmx_l2c_set_hw_way_partition(uint32_t mask)
{
uint32_t valid_mask;
valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
mask &= valid_mask;
/* A UMSK setting which blocks all L2C Ways is an error on some chips */
if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
return -1;
if (OCTEON_IS_MODEL(OCTEON_CN63XX))
cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
else
cvmx_write_csr(CVMX_L2C_SPAR4,
(cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
return 0;
}
int cvmx_l2c_get_hw_way_partition(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN63XX))
return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff;
else
return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
}
void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
uint32_t clear_on_read)
{
if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
union cvmx_l2c_pfctl pfctl;
pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
switch (counter) {
case 0:
pfctl.s.cnt0sel = event;
pfctl.s.cnt0ena = 1;
pfctl.s.cnt0rdclr = clear_on_read;
break;
case 1:
pfctl.s.cnt1sel = event;
pfctl.s.cnt1ena = 1;
pfctl.s.cnt1rdclr = clear_on_read;
break;
case 2:
pfctl.s.cnt2sel = event;
pfctl.s.cnt2ena = 1;
pfctl.s.cnt2rdclr = clear_on_read;
break;
case 3:
default:
pfctl.s.cnt3sel = event;
pfctl.s.cnt3ena = 1;
pfctl.s.cnt3rdclr = clear_on_read;
break;
}
cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
} else {
union cvmx_l2c_tadx_prf l2c_tadx_prf;
int tad;
cvmx_dprintf("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
if (clear_on_read)
cvmx_dprintf("L2C counters don't support clear on read for this chip\n");
l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
switch (counter) {
case 0:
l2c_tadx_prf.s.cnt0sel = event;
break;
case 1:
l2c_tadx_prf.s.cnt1sel = event;
break;
case 2:
l2c_tadx_prf.s.cnt2sel = event;
break;
default:
case 3:
l2c_tadx_prf.s.cnt3sel = event;
break;
}
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
cvmx_write_csr(CVMX_L2C_TADX_PRF(tad),
l2c_tadx_prf.u64);
}
}
uint64_t cvmx_l2c_read_perf(uint32_t counter)
{
switch (counter) {
case 0:
if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
return cvmx_read_csr(CVMX_L2C_PFC0);
else {
uint64_t counter = 0;
int tad;
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
return counter;
}
case 1:
if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
return cvmx_read_csr(CVMX_L2C_PFC1);
else {
uint64_t counter = 0;
int tad;
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
return counter;
}
case 2:
if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
return cvmx_read_csr(CVMX_L2C_PFC2);
else {
uint64_t counter = 0;
int tad;
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
return counter;
}
case 3:
default:
if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
return cvmx_read_csr(CVMX_L2C_PFC3);
else {
uint64_t counter = 0;
int tad;
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
return counter;
}
}
}
/*
* @INTERNAL
* Helper function use to fault in cache lines for L2 cache locking
*
* @addr: Address of base of memory region to read into L2 cache
* @len: Length (in bytes) of region to fault in
*/
static void fault_in(uint64_t addr, int len)
{
char *ptr;
/*
* Adjust addr and length so we get all cache lines even for
* small ranges spanning two cache lines.
*/
len += addr & CVMX_CACHE_LINE_MASK;
addr &= ~CVMX_CACHE_LINE_MASK;
ptr = cvmx_phys_to_ptr(addr);
/*
* Invalidate L1 cache to make sure all loads result in data
* being in L2.
*/
CVMX_DCACHE_INVALIDATE;
while (len > 0) {
READ_ONCE(*ptr);
len -= CVMX_CACHE_LINE_SIZE;
ptr += CVMX_CACHE_LINE_SIZE;
}
}
int cvmx_l2c_lock_line(uint64_t addr)
{
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
uint64_t assoc = cvmx_l2c_get_num_assoc();
uint64_t tag = addr >> shift;
uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT);
uint64_t way;
union cvmx_l2c_tadx_tag l2c_tadx_tag;
CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0);
/* Make sure we were able to lock the line */
for (way = 0; way < assoc; way++) {
CVMX_CACHE_LTGL2I(index | (way << shift), 0);
/* make sure CVMX_L2C_TADX_TAG is updated */
CVMX_SYNC;
l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
break;
}
/* Check if a valid line is found */
if (way >= assoc) {
/* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr); */
return -1;
}
/* Check if lock bit is not set */
if (!l2c_tadx_tag.s.lock) {
/* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr); */
return -1;
}
return way;
} else {
int retval = 0;
union cvmx_l2c_dbg l2cdbg;
union cvmx_l2c_lckbase lckbase;
union cvmx_l2c_lckoff lckoff;
union cvmx_l2t_err l2t_err;
cvmx_spinlock_lock(&cvmx_l2c_spinlock);
l2cdbg.u64 = 0;
lckbase.u64 = 0;
lckoff.u64 = 0;
/* Clear l2t error bits if set */
l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
l2t_err.s.lckerr = 1;
l2t_err.s.lckerr2 = 1;
cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
addr &= ~CVMX_CACHE_LINE_MASK;
/* Set this core as debug core */
l2cdbg.s.ppnum = cvmx_get_core_num();
CVMX_SYNC;
cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
cvmx_read_csr(CVMX_L2C_DBG);
lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
cvmx_read_csr(CVMX_L2C_LCKOFF);
if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
lckbase.s.lck_base = addr_tmp >> 7;
} else {
lckbase.s.lck_base = addr >> 7;
}
lckbase.s.lck_ena = 1;
cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
/* Make sure it gets there */
cvmx_read_csr(CVMX_L2C_LCKBASE);
fault_in(addr, CVMX_CACHE_LINE_SIZE);
lckbase.s.lck_ena = 0;
cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
/* Make sure it gets there */
cvmx_read_csr(CVMX_L2C_LCKBASE);
/* Stop being debug core */
cvmx_write_csr(CVMX_L2C_DBG, 0);
cvmx_read_csr(CVMX_L2C_DBG);
l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
retval = 1; /* We were unable to lock the line */
cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
return retval;
}
}
int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
{
int retval = 0;
/* Round start/end to cache line boundaries */
len += start & CVMX_CACHE_LINE_MASK;
start &= ~CVMX_CACHE_LINE_MASK;
len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
while (len) {
retval += cvmx_l2c_lock_line(start);
start += CVMX_CACHE_LINE_SIZE;
len -= CVMX_CACHE_LINE_SIZE;
}
return retval;
}
void cvmx_l2c_flush(void)
{
uint64_t assoc, set;
uint64_t n_assoc, n_set;
n_set = cvmx_l2c_get_num_sets();
n_assoc = cvmx_l2c_get_num_assoc();
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
uint64_t address;
/* These may look like constants, but they aren't... */
int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
for (set = 0; set < n_set; set++) {
for (assoc = 0; assoc < n_assoc; assoc++) {
address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
(assoc << assoc_shift) | (set << set_shift));
CVMX_CACHE_WBIL2I(address, 0);
}
}
} else {
for (set = 0; set < n_set; set++)
for (assoc = 0; assoc < n_assoc; assoc++)
cvmx_l2c_flush_line(assoc, set);
}
}
int cvmx_l2c_unlock_line(uint64_t address)
{
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
int assoc;
union cvmx_l2c_tag tag;
uint32_t tag_addr;
uint32_t index = cvmx_l2c_address_to_index(address);
tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
/*
* For 63XX, we can flush a line by using the physical
* address directly, so finding the cache line used by
* the address is only required to provide the proper
* return value for the function.
*/
for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
tag = cvmx_l2c_get_tag(assoc, index);
if (tag.s.V && (tag.s.addr == tag_addr)) {
CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
return tag.s.L;
}
}
} else {
int assoc;
union cvmx_l2c_tag tag;
uint32_t tag_addr;
uint32_t index = cvmx_l2c_address_to_index(address);
/* Compute portion of address that is stored in tag */
tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
tag = cvmx_l2c_get_tag(assoc, index);
if (tag.s.V && (tag.s.addr == tag_addr)) {
cvmx_l2c_flush_line(assoc, index);
return tag.s.L;
}
}
}
return 0;
}
int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
{
int num_unlocked = 0;
/* Round start/end to cache line boundaries */
len += start & CVMX_CACHE_LINE_MASK;
start &= ~CVMX_CACHE_LINE_MASK;
len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
while (len > 0) {
num_unlocked += cvmx_l2c_unlock_line(start);
start += CVMX_CACHE_LINE_SIZE;
len -= CVMX_CACHE_LINE_SIZE;
}
return num_unlocked;
}
/*
* Internal l2c tag types. These are converted to a generic structure
* that can be used on all chips.
*/
union __cvmx_l2c_tag {
uint64_t u64;
struct cvmx_l2c_tag_cn50xx {
__BITFIELD_FIELD(uint64_t reserved:40,
__BITFIELD_FIELD(uint64_t V:1, /* Line valid */
__BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
__BITFIELD_FIELD(uint64_t L:1, /* Line locked */
__BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
__BITFIELD_FIELD(uint64_t addr:20, /* Phys addr (33..14) */
;))))))
} cn50xx;
struct cvmx_l2c_tag_cn30xx {
__BITFIELD_FIELD(uint64_t reserved:41,
__BITFIELD_FIELD(uint64_t V:1, /* Line valid */
__BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
__BITFIELD_FIELD(uint64_t L:1, /* Line locked */
__BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
__BITFIELD_FIELD(uint64_t addr:19, /* Phys addr (33..15) */
;))))))
} cn30xx;
struct cvmx_l2c_tag_cn31xx {
__BITFIELD_FIELD(uint64_t reserved:42,
__BITFIELD_FIELD(uint64_t V:1, /* Line valid */
__BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
__BITFIELD_FIELD(uint64_t L:1, /* Line locked */
__BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
__BITFIELD_FIELD(uint64_t addr:18, /* Phys addr (33..16) */
;))))))
} cn31xx;
struct cvmx_l2c_tag_cn38xx {
__BITFIELD_FIELD(uint64_t reserved:43,
__BITFIELD_FIELD(uint64_t V:1, /* Line valid */
__BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
__BITFIELD_FIELD(uint64_t L:1, /* Line locked */
__BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
__BITFIELD_FIELD(uint64_t addr:17, /* Phys addr (33..17) */
;))))))
} cn38xx;
struct cvmx_l2c_tag_cn58xx {
__BITFIELD_FIELD(uint64_t reserved:44,
__BITFIELD_FIELD(uint64_t V:1, /* Line valid */
__BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
__BITFIELD_FIELD(uint64_t L:1, /* Line locked */
__BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
__BITFIELD_FIELD(uint64_t addr:16, /* Phys addr (33..18) */
;))))))
} cn58xx;
struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
};
/*
* @INTERNAL
* Function to read a L2C tag. This code make the current core
* the 'debug core' for the L2. This code must only be executed by
* 1 core at a time.
*
* @assoc: Association (way) of the tag to dump
* @index: Index of the cacheline
*
* Returns The Octeon model specific tag structure. This is
* translated by a wrapper function to a generic form that is
* easier for applications to use.
*/
static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
{
uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
uint64_t core = cvmx_get_core_num();
union __cvmx_l2c_tag tag_val;
uint64_t dbg_addr = CVMX_L2C_DBG;
unsigned long flags;
union cvmx_l2c_dbg debug_val;
debug_val.u64 = 0;
/*
* For low core count parts, the core number is always small
* enough to stay in the correct field and not set any
* reserved bits.
*/
debug_val.s.ppnum = core;
debug_val.s.l2t = 1;
debug_val.s.set = assoc;
local_irq_save(flags);
/*
* Make sure core is quiet (no prefetches, etc.) before
* entering debug mode.
*/
CVMX_SYNC;
/* Flush L1 to make sure debug load misses L1 */
CVMX_DCACHE_INVALIDATE;
/*
* The following must be done in assembly as when in debug
* mode all data loads from L2 return special debug data, not
* normal memory contents. Also, interrupts must be disabled,
* since if an interrupt occurs while in debug mode the ISR
* will get debug data from all its memory * reads instead of
* the contents of memory.
*/
asm volatile (
".set push\n\t"
".set mips64\n\t"
".set noreorder\n\t"
"sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */
"ld $0, 0(%[dbg_addr])\n\t"
"ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */
"sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */
"ld $0, 0(%[dbg_addr])\n\t"
"cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */
".set pop"
: [tag_val] "=r" (tag_val)
: [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
: "memory");
local_irq_restore(flags);
return tag_val;
}
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
{
union cvmx_l2c_tag tag;
tag.u64 = 0;
if ((int)association >= cvmx_l2c_get_num_assoc()) {
cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
return tag;
}
if ((int)index >= cvmx_l2c_get_num_sets()) {
cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
(int)index, cvmx_l2c_get_num_sets());
return tag;
}
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
union cvmx_l2c_tadx_tag l2c_tadx_tag;
uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
(association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
(index << CVMX_L2C_IDX_ADDR_SHIFT));
/*
* Use L2 cache Index load tag cache instruction, as
* hardware loads the virtual tag for the L2 cache
* block with the contents of L2C_TAD0_TAG
* register.
*/
CVMX_CACHE_LTGL2I(address, 0);
CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */
l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
tag.s.V = l2c_tadx_tag.s.valid;
tag.s.D = l2c_tadx_tag.s.dirty;
tag.s.L = l2c_tadx_tag.s.lock;
tag.s.U = l2c_tadx_tag.s.use;
tag.s.addr = l2c_tadx_tag.s.tag;
} else {
union __cvmx_l2c_tag tmp_tag;
/* __read_l2_tag is intended for internal use only */
tmp_tag = __read_l2_tag(association, index);
/*
* Convert all tag structure types to generic version,
* as it can represent all models.
*/
if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
tag.s.V = tmp_tag.cn58xx.V;
tag.s.D = tmp_tag.cn58xx.D;
tag.s.L = tmp_tag.cn58xx.L;
tag.s.U = tmp_tag.cn58xx.U;
tag.s.addr = tmp_tag.cn58xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
tag.s.V = tmp_tag.cn38xx.V;
tag.s.D = tmp_tag.cn38xx.D;
tag.s.L = tmp_tag.cn38xx.L;
tag.s.U = tmp_tag.cn38xx.U;
tag.s.addr = tmp_tag.cn38xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
tag.s.V = tmp_tag.cn31xx.V;
tag.s.D = tmp_tag.cn31xx.D;
tag.s.L = tmp_tag.cn31xx.L;
tag.s.U = tmp_tag.cn31xx.U;
tag.s.addr = tmp_tag.cn31xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
tag.s.V = tmp_tag.cn30xx.V;
tag.s.D = tmp_tag.cn30xx.D;
tag.s.L = tmp_tag.cn30xx.L;
tag.s.U = tmp_tag.cn30xx.U;
tag.s.addr = tmp_tag.cn30xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
tag.s.V = tmp_tag.cn50xx.V;
tag.s.D = tmp_tag.cn50xx.D;
tag.s.L = tmp_tag.cn50xx.L;
tag.s.U = tmp_tag.cn50xx.U;
tag.s.addr = tmp_tag.cn50xx.addr;
} else {
cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
}
}
return tag;
}
uint32_t cvmx_l2c_address_to_index(uint64_t addr)
{
uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
int indxalias = 0;
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
union cvmx_l2c_ctl l2c_ctl;
l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
indxalias = !l2c_ctl.s.disidxalias;
} else {
union cvmx_l2c_cfg l2c_cfg;
l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
indxalias = l2c_cfg.s.idxalias;
}
if (indxalias) {
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
idx ^= idx / cvmx_l2c_get_num_sets();
idx ^= a_14_12;
} else {
idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
}
}
idx &= CVMX_L2C_IDX_MASK;
return idx;
}
int cvmx_l2c_get_cache_size_bytes(void)
{
return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
CVMX_CACHE_LINE_SIZE;
}
/*
* Return log base 2 of the number of sets in the L2 cache
*/
int cvmx_l2c_get_set_bits(void)
{
int l2_set_bits;
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
l2_set_bits = 11; /* 2048 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
l2_set_bits = 10; /* 1024 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
l2_set_bits = 9; /* 512 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
l2_set_bits = 8; /* 256 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
l2_set_bits = 7; /* 128 sets */
else {
cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
l2_set_bits = 11; /* 2048 sets */
}
return l2_set_bits;
}
/* Return the number of sets in the L2 Cache */
int cvmx_l2c_get_num_sets(void)
{
return 1 << cvmx_l2c_get_set_bits();
}
/* Return the number of associations in the L2 Cache */
int cvmx_l2c_get_num_assoc(void)
{
int l2_assoc;
if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
OCTEON_IS_MODEL(OCTEON_CN52XX) ||
OCTEON_IS_MODEL(OCTEON_CN58XX) ||
OCTEON_IS_MODEL(OCTEON_CN50XX) ||
OCTEON_IS_MODEL(OCTEON_CN38XX))
l2_assoc = 8;
else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
l2_assoc = 16;
else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
OCTEON_IS_MODEL(OCTEON_CN30XX))
l2_assoc = 4;
else {
cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
l2_assoc = 8;
}
/* Check to see if part of the cache is disabled */
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
union cvmx_mio_fus_dat3 mio_fus_dat3;
mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
/*
* cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
* <2> will be not used for 63xx
* <1> disables 1/2 ways
* <0> disables 1/4 ways
* They are cumulative, so for 63xx:
* <1> <0>
* 0 0 16-way 2MB cache
* 0 1 12-way 1.5MB cache
* 1 0 8-way 1MB cache
* 1 1 4-way 512KB cache
*/
if (mio_fus_dat3.s.l2c_crip == 3)
l2_assoc = 4;
else if (mio_fus_dat3.s.l2c_crip == 2)
l2_assoc = 8;
else if (mio_fus_dat3.s.l2c_crip == 1)
l2_assoc = 12;
} else {
uint64_t l2d_fus3;
l2d_fus3 = cvmx_read_csr(CVMX_L2D_FUS3);
/*
* Using shifts here, as bit position names are
* different for each model but they all mean the
* same.
*/
if ((l2d_fus3 >> 35) & 0x1)
l2_assoc = l2_assoc >> 2;
else if ((l2d_fus3 >> 34) & 0x1)
l2_assoc = l2_assoc >> 1;
}
return l2_assoc;
}
/*
* Flush a line from the L2 cache
* This should only be called from one core at a time, as this routine
* sets the core to the 'debug' core in order to flush the line.
*
* @assoc: Association (or way) to flush
* @index: Index to flush
*/
void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
{
/* Check the range of the index. */
if (index > (uint32_t)cvmx_l2c_get_num_sets()) {
cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
return;
}
/* Check the range of association. */
if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) {
cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
return;
}
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
uint64_t address;
/* Create the address based on index and association.
* Bits<20:17> select the way of the cache block involved in
* the operation
* Bits<16:7> of the effect address select the index
*/
address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
(assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
(index << CVMX_L2C_IDX_ADDR_SHIFT));
CVMX_CACHE_WBIL2I(address, 0);
} else {
union cvmx_l2c_dbg l2cdbg;
l2cdbg.u64 = 0;
if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
l2cdbg.s.ppnum = cvmx_get_core_num();
l2cdbg.s.finv = 1;
l2cdbg.s.set = assoc;
cvmx_spinlock_lock(&cvmx_l2c_spinlock);
/*
* Enter debug mode, and make sure all other writes
* complete before we enter debug mode
*/
CVMX_SYNC;
cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
cvmx_read_csr(CVMX_L2C_DBG);
CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
index * CVMX_CACHE_LINE_SIZE),
0);
/* Exit debug mode */
CVMX_SYNC;
cvmx_write_csr(CVMX_L2C_DBG, 0);
cvmx_read_csr(CVMX_L2C_DBG);
cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
}
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-l2c.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* Support library for the SPI
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-spi.h>
#include <asm/octeon/cvmx-spxx-defs.h>
#include <asm/octeon/cvmx-stxx-defs.h>
#include <asm/octeon/cvmx-srxx-defs.h>
#define INVOKE_CB(function_p, args...) \
do { \
if (function_p) { \
res = function_p(args); \
if (res) \
return res; \
} \
} while (0)
#if CVMX_ENABLE_DEBUG_PRINTS
static const char *modes[] =
{ "UNKNOWN", "TX Halfplex", "Rx Halfplex", "Duplex" };
#endif
/* Default callbacks, can be overridden
* using cvmx_spi_get_callbacks/cvmx_spi_set_callbacks
*/
static cvmx_spi_callbacks_t cvmx_spi_callbacks = {
.reset_cb = cvmx_spi_reset_cb,
.calendar_setup_cb = cvmx_spi_calendar_setup_cb,
.clock_detect_cb = cvmx_spi_clock_detect_cb,
.training_cb = cvmx_spi_training_cb,
.calendar_sync_cb = cvmx_spi_calendar_sync_cb,
.interface_up_cb = cvmx_spi_interface_up_cb
};
/*
* Get current SPI4 initialization callbacks
*
* @callbacks: Pointer to the callbacks structure.to fill
*
* Returns Pointer to cvmx_spi_callbacks_t structure.
*/
void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks)
{
memcpy(callbacks, &cvmx_spi_callbacks, sizeof(cvmx_spi_callbacks));
}
/*
* Set new SPI4 initialization callbacks
*
* @new_callbacks: Pointer to an updated callbacks structure.
*/
void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks)
{
memcpy(&cvmx_spi_callbacks, new_callbacks, sizeof(cvmx_spi_callbacks));
}
/*
* Initialize and start the SPI interface.
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
* @num_ports: Number of SPI ports to configure
*
* Returns Zero on success, negative of failure.
*/
int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout,
int num_ports)
{
int res = -1;
if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
return res;
/* Callback to perform SPI4 reset */
INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
/* Callback to perform calendar setup */
INVOKE_CB(cvmx_spi_callbacks.calendar_setup_cb, interface, mode,
num_ports);
/* Callback to perform clock detection */
INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
/* Callback to perform SPI4 link training */
INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
/* Callback to perform calendar sync */
INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
timeout);
/* Callback to handle interface coming up */
INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
return res;
}
/*
* This routine restarts the SPI interface after it has lost synchronization
* with its correspondent system.
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
*
* Returns Zero on success, negative of failure.
*/
int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
{
int res = -1;
if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
return res;
cvmx_dprintf("SPI%d: Restart %s\n", interface, modes[mode]);
/* Callback to perform SPI4 reset */
INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
/* NOTE: Calendar setup is not performed during restart */
/* Refer to cvmx_spi_start_interface() for the full sequence */
/* Callback to perform clock detection */
INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
/* Callback to perform SPI4 link training */
INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
/* Callback to perform calendar sync */
INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
timeout);
/* Callback to handle interface coming up */
INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
return res;
}
EXPORT_SYMBOL_GPL(cvmx_spi_restart_interface);
/*
* Callback to perform SPI4 reset
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
*/
int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
{
union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
union cvmx_spxx_clk_ctl spxx_clk_ctl;
union cvmx_spxx_bist_stat spxx_bist_stat;
union cvmx_spxx_int_msk spxx_int_msk;
union cvmx_stxx_int_msk stxx_int_msk;
union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
int index;
uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
/* Disable SPI error events while we run BIST */
spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
/* Run BIST in the SPI interface */
cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
spxx_clk_ctl.u64 = 0;
spxx_clk_ctl.s.runbist = 1;
cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
__delay(10 * MS);
spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
if (spxx_bist_stat.s.stat0)
cvmx_dprintf
("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
interface);
if (spxx_bist_stat.s.stat1)
cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
interface);
if (spxx_bist_stat.s.stat2)
cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
interface);
/* Clear the calendar table after BIST to fix parity errors */
for (index = 0; index < 32; index++) {
union cvmx_srxx_spi4_calx srxx_spi4_calx;
union cvmx_stxx_spi4_calx stxx_spi4_calx;
srxx_spi4_calx.u64 = 0;
srxx_spi4_calx.s.oddpar = 1;
cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
srxx_spi4_calx.u64);
stxx_spi4_calx.u64 = 0;
stxx_spi4_calx.s.oddpar = 1;
cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
stxx_spi4_calx.u64);
}
/* Re enable reporting of error interrupts */
cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
cvmx_write_csr(CVMX_STXX_INT_REG(interface),
cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
/* Setup the CLKDLY right in the middle */
spxx_clk_ctl.u64 = 0;
spxx_clk_ctl.s.seetrn = 0;
spxx_clk_ctl.s.clkdly = 0x10;
spxx_clk_ctl.s.runbist = 0;
spxx_clk_ctl.s.statdrv = 0;
/* This should always be on the opposite edge as statdrv */
spxx_clk_ctl.s.statrcv = 1;
spxx_clk_ctl.s.sndtrn = 0;
spxx_clk_ctl.s.drptrn = 0;
spxx_clk_ctl.s.rcvtrn = 0;
spxx_clk_ctl.s.srxdlck = 0;
cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
__delay(100 * MS);
/* Reset SRX0 DLL */
spxx_clk_ctl.s.srxdlck = 1;
cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
/* Waiting for Inf0 Spi4 RX DLL to lock */
__delay(100 * MS);
/* Enable dynamic alignment */
spxx_trn4_ctl.s.trntest = 0;
spxx_trn4_ctl.s.jitter = 1;
spxx_trn4_ctl.s.clr_boot = 1;
spxx_trn4_ctl.s.set_boot = 0;
if (OCTEON_IS_MODEL(OCTEON_CN58XX))
spxx_trn4_ctl.s.maxdist = 3;
else
spxx_trn4_ctl.s.maxdist = 8;
spxx_trn4_ctl.s.macro_en = 1;
spxx_trn4_ctl.s.mux_en = 1;
cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
spxx_dbg_deskew_ctl.u64 = 0;
cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
spxx_dbg_deskew_ctl.u64);
return 0;
}
/*
* Callback to setup calendar and miscellaneous settings before clock detection
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
* @num_ports: Number of ports to configure on SPI
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
*/
int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
int num_ports)
{
int port;
int index;
if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
union cvmx_srxx_com_ctl srxx_com_ctl;
union cvmx_srxx_spi4_stat srxx_spi4_stat;
/* SRX0 number of Ports */
srxx_com_ctl.u64 = 0;
srxx_com_ctl.s.prts = num_ports - 1;
srxx_com_ctl.s.st_en = 0;
srxx_com_ctl.s.inf_en = 0;
cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
/* SRX0 Calendar Table. This round robbins through all ports */
port = 0;
index = 0;
while (port < num_ports) {
union cvmx_srxx_spi4_calx srxx_spi4_calx;
srxx_spi4_calx.u64 = 0;
srxx_spi4_calx.s.prt0 = port++;
srxx_spi4_calx.s.prt1 = port++;
srxx_spi4_calx.s.prt2 = port++;
srxx_spi4_calx.s.prt3 = port++;
srxx_spi4_calx.s.oddpar =
~(cvmx_dpop(srxx_spi4_calx.u64) & 1);
cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
srxx_spi4_calx.u64);
index++;
}
srxx_spi4_stat.u64 = 0;
srxx_spi4_stat.s.len = num_ports;
srxx_spi4_stat.s.m = 1;
cvmx_write_csr(CVMX_SRXX_SPI4_STAT(interface),
srxx_spi4_stat.u64);
}
if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
union cvmx_stxx_arb_ctl stxx_arb_ctl;
union cvmx_gmxx_tx_spi_max gmxx_tx_spi_max;
union cvmx_gmxx_tx_spi_thresh gmxx_tx_spi_thresh;
union cvmx_gmxx_tx_spi_ctl gmxx_tx_spi_ctl;
union cvmx_stxx_spi4_stat stxx_spi4_stat;
union cvmx_stxx_spi4_dat stxx_spi4_dat;
/* STX0 Config */
stxx_arb_ctl.u64 = 0;
stxx_arb_ctl.s.igntpa = 0;
stxx_arb_ctl.s.mintrn = 0;
cvmx_write_csr(CVMX_STXX_ARB_CTL(interface), stxx_arb_ctl.u64);
gmxx_tx_spi_max.u64 = 0;
gmxx_tx_spi_max.s.max1 = 8;
gmxx_tx_spi_max.s.max2 = 4;
gmxx_tx_spi_max.s.slice = 0;
cvmx_write_csr(CVMX_GMXX_TX_SPI_MAX(interface),
gmxx_tx_spi_max.u64);
gmxx_tx_spi_thresh.u64 = 0;
gmxx_tx_spi_thresh.s.thresh = 4;
cvmx_write_csr(CVMX_GMXX_TX_SPI_THRESH(interface),
gmxx_tx_spi_thresh.u64);
gmxx_tx_spi_ctl.u64 = 0;
gmxx_tx_spi_ctl.s.tpa_clr = 0;
gmxx_tx_spi_ctl.s.cont_pkt = 0;
cvmx_write_csr(CVMX_GMXX_TX_SPI_CTL(interface),
gmxx_tx_spi_ctl.u64);
/* STX0 Training Control */
stxx_spi4_dat.u64 = 0;
/*Minimum needed by dynamic alignment */
stxx_spi4_dat.s.alpha = 32;
stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface),
stxx_spi4_dat.u64);
/* STX0 Calendar Table. This round robbins through all ports */
port = 0;
index = 0;
while (port < num_ports) {
union cvmx_stxx_spi4_calx stxx_spi4_calx;
stxx_spi4_calx.u64 = 0;
stxx_spi4_calx.s.prt0 = port++;
stxx_spi4_calx.s.prt1 = port++;
stxx_spi4_calx.s.prt2 = port++;
stxx_spi4_calx.s.prt3 = port++;
stxx_spi4_calx.s.oddpar =
~(cvmx_dpop(stxx_spi4_calx.u64) & 1);
cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
stxx_spi4_calx.u64);
index++;
}
stxx_spi4_stat.u64 = 0;
stxx_spi4_stat.s.len = num_ports;
stxx_spi4_stat.s.m = 1;
cvmx_write_csr(CVMX_STXX_SPI4_STAT(interface),
stxx_spi4_stat.u64);
}
return 0;
}
/*
* Callback to perform clock detection
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
*/
int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
{
int clock_transitions;
union cvmx_spxx_clk_stat stat;
uint64_t timeout_time;
uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
/*
* Regardless of operating mode, both Tx and Rx clocks must be
* present for the SPI interface to operate.
*/
cvmx_dprintf("SPI%d: Waiting to see TsClk...\n", interface);
timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
/*
* Require 100 clock transitions in order to avoid any noise
* in the beginning.
*/
clock_transitions = 100;
do {
stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions) {
/*
* We've seen a clock transition, so decrement
* the number we still need.
*/
clock_transitions--;
cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
stat.s.s4clk0 = 0;
stat.s.s4clk1 = 0;
}
if (cvmx_get_cycle() > timeout_time) {
cvmx_dprintf("SPI%d: Timeout\n", interface);
return -1;
}
} while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);
cvmx_dprintf("SPI%d: Waiting to see RsClk...\n", interface);
timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
/*
* Require 100 clock transitions in order to avoid any noise in the
* beginning.
*/
clock_transitions = 100;
do {
stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions) {
/*
* We've seen a clock transition, so decrement
* the number we still need
*/
clock_transitions--;
cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
stat.s.d4clk0 = 0;
stat.s.d4clk1 = 0;
}
if (cvmx_get_cycle() > timeout_time) {
cvmx_dprintf("SPI%d: Timeout\n", interface);
return -1;
}
} while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);
return 0;
}
/*
* Callback to perform link training
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for link to be trained (in seconds)
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
*/
int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
{
union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
union cvmx_spxx_clk_stat stat;
uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
int rx_training_needed;
/* SRX0 & STX0 Inf0 Links are configured - begin training */
union cvmx_spxx_clk_ctl spxx_clk_ctl;
spxx_clk_ctl.u64 = 0;
spxx_clk_ctl.s.seetrn = 0;
spxx_clk_ctl.s.clkdly = 0x10;
spxx_clk_ctl.s.runbist = 0;
spxx_clk_ctl.s.statdrv = 0;
/* This should always be on the opposite edge as statdrv */
spxx_clk_ctl.s.statrcv = 1;
spxx_clk_ctl.s.sndtrn = 1;
spxx_clk_ctl.s.drptrn = 1;
spxx_clk_ctl.s.rcvtrn = 1;
spxx_clk_ctl.s.srxdlck = 1;
cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
__delay(1000 * MS);
/* SRX0 clear the boot bit */
spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
spxx_trn4_ctl.s.clr_boot = 1;
cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
/* Wait for the training sequence to complete */
cvmx_dprintf("SPI%d: Waiting for training\n", interface);
__delay(1000 * MS);
/* Wait a really long time here */
timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
/*
* The HRM says we must wait for 34 + 16 * MAXDIST training sequences.
* We'll be pessimistic and wait for a lot more.
*/
rx_training_needed = 500;
do {
stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
if (stat.s.srxtrn && rx_training_needed) {
rx_training_needed--;
cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
stat.s.srxtrn = 0;
}
if (cvmx_get_cycle() > timeout_time) {
cvmx_dprintf("SPI%d: Timeout\n", interface);
return -1;
}
} while (stat.s.srxtrn == 0);
return 0;
}
/*
* Callback to perform calendar data synchronization
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for calendar data in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
*/
int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
{
uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
/* SRX0 interface should be good, send calendar data */
union cvmx_srxx_com_ctl srxx_com_ctl;
cvmx_dprintf
("SPI%d: Rx is synchronized, start sending calendar data\n",
interface);
srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
srxx_com_ctl.s.inf_en = 1;
srxx_com_ctl.s.st_en = 1;
cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
}
if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
/* STX0 has achieved sync */
/* The corespondant board should be sending calendar data */
/* Enable the STX0 STAT receiver. */
union cvmx_spxx_clk_stat stat;
uint64_t timeout_time;
union cvmx_stxx_com_ctl stxx_com_ctl;
stxx_com_ctl.u64 = 0;
stxx_com_ctl.s.st_en = 1;
cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
/* Waiting for calendar sync on STX0 STAT */
cvmx_dprintf("SPI%d: Waiting to sync on STX[%d] STAT\n",
interface, interface);
timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
/* SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10) */
do {
stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
if (cvmx_get_cycle() > timeout_time) {
cvmx_dprintf("SPI%d: Timeout\n", interface);
return -1;
}
} while (stat.s.stxcal == 0);
}
return 0;
}
/*
* Callback to handle interface up
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
*/
int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode)
{
union cvmx_gmxx_rxx_frm_min gmxx_rxx_frm_min;
union cvmx_gmxx_rxx_frm_max gmxx_rxx_frm_max;
union cvmx_gmxx_rxx_jabber gmxx_rxx_jabber;
if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
union cvmx_srxx_com_ctl srxx_com_ctl;
srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
srxx_com_ctl.s.inf_en = 1;
cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
cvmx_dprintf("SPI%d: Rx is now up\n", interface);
}
if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
union cvmx_stxx_com_ctl stxx_com_ctl;
stxx_com_ctl.u64 = cvmx_read_csr(CVMX_STXX_COM_CTL(interface));
stxx_com_ctl.s.inf_en = 1;
cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
cvmx_dprintf("SPI%d: Tx is now up\n", interface);
}
gmxx_rxx_frm_min.u64 = 0;
gmxx_rxx_frm_min.s.len = 64;
cvmx_write_csr(CVMX_GMXX_RXX_FRM_MIN(0, interface),
gmxx_rxx_frm_min.u64);
gmxx_rxx_frm_max.u64 = 0;
gmxx_rxx_frm_max.s.len = 64 * 1024 - 4;
cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(0, interface),
gmxx_rxx_frm_max.u64);
gmxx_rxx_jabber.u64 = 0;
gmxx_rxx_jabber.s.cnt = 64 * 1024 - 4;
cvmx_write_csr(CVMX_GMXX_RXX_JABBER(0, interface), gmxx_rxx_jabber.u64);
return 0;
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-spi.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2017 Cavium, Inc.
*/
/*
We install this program at the bootvector:
------------------------------------
.set noreorder
.set nomacro
.set noat
reset_vector:
dmtc0 $k0, $31, 0 # Save $k0 to DESAVE
dmtc0 $k1, $31, 3 # Save $k1 to KScratch2
mfc0 $k0, $12, 0 # Status
mfc0 $k1, $15, 1 # Ebase
ori $k0, 0x84 # Enable 64-bit addressing, set
# ERL (should already be set)
andi $k1, 0x3ff # mask out core ID
mtc0 $k0, $12, 0 # Status
sll $k1, 5
lui $k0, 0xbfc0
cache 17, 0($0) # Core-14345, clear L1 Dcache virtual
# tags if the core hit an NMI
ld $k0, 0x78($k0) # k0 <- (bfc00078) pointer to the reset vector
synci 0($0) # Invalidate ICache to get coherent
# view of target code.
daddu $k0, $k0, $k1
nop
ld $k0, 0($k0) # k0 <- core specific target address
dmfc0 $k1, $31, 3 # Restore $k1 from KScratch2
beqz $k0, wait_loop # Spin in wait loop
nop
jr $k0
nop
nop # NOPs needed here to fill delay slots
nop # on endian reversal of previous instructions
wait_loop:
wait
nop
b wait_loop
nop
nop
nop
------------------------------------
0000000000000000 <reset_vector>:
0: 40baf800 dmtc0 k0,c0_desave
4: 40bbf803 dmtc0 k1,c0_kscratch2
8: 401a6000 mfc0 k0,c0_status
c: 401b7801 mfc0 k1,c0_ebase
10: 375a0084 ori k0,k0,0x84
14: 337b03ff andi k1,k1,0x3ff
18: 409a6000 mtc0 k0,c0_status
1c: 001bd940 sll k1,k1,0x5
20: 3c1abfc0 lui k0,0xbfc0
24: bc110000 cache 0x11,0(zero)
28: df5a0078 ld k0,120(k0)
2c: 041f0000 synci 0(zero)
30: 035bd02d daddu k0,k0,k1
34: 00000000 nop
38: df5a0000 ld k0,0(k0)
3c: 403bf803 dmfc0 k1,c0_kscratch2
40: 13400005 beqz k0,58 <wait_loop>
44: 00000000 nop
48: 03400008 jr k0
4c: 00000000 nop
50: 00000000 nop
54: 00000000 nop
0000000000000058 <wait_loop>:
58: 42000020 wait
5c: 00000000 nop
60: 1000fffd b 58 <wait_loop>
64: 00000000 nop
68: 00000000 nop
6c: 00000000 nop
*/
#include <asm/octeon/cvmx-boot-vector.h>
static unsigned long long _cvmx_bootvector_data[16] = {
0x40baf80040bbf803ull, /* patch low order 8-bits if no KScratch*/
0x401a6000401b7801ull,
0x375a0084337b03ffull,
0x409a6000001bd940ull,
0x3c1abfc0bc110000ull,
0xdf5a0078041f0000ull,
0x035bd02d00000000ull,
0xdf5a0000403bf803ull, /* patch low order 8-bits if no KScratch*/
0x1340000500000000ull,
0x0340000800000000ull,
0x0000000000000000ull,
0x4200002000000000ull,
0x1000fffd00000000ull,
0x0000000000000000ull,
OCTEON_BOOT_MOVEABLE_MAGIC1,
0 /* To be filled in with address of vector block*/
};
/* 2^10 CPUs */
#define VECTOR_TABLE_SIZE (1024 * sizeof(struct cvmx_boot_vector_element))
static void cvmx_boot_vector_init(void *mem)
{
uint64_t kseg0_mem;
int i;
memset(mem, 0, VECTOR_TABLE_SIZE);
kseg0_mem = cvmx_ptr_to_phys(mem) | 0x8000000000000000ull;
for (i = 0; i < 15; i++) {
uint64_t v = _cvmx_bootvector_data[i];
if (OCTEON_IS_OCTEON1PLUS() && (i == 0 || i == 7))
v &= 0xffffffff00000000ull; /* KScratch not availble. */
cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, i * 8);
cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, v);
}
cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, 15 * 8);
cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, kseg0_mem);
cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0x81fc0000);
}
/**
* Get a pointer to the per-core table of reset vector pointers
*
*/
struct cvmx_boot_vector_element *cvmx_boot_vector_get(void)
{
struct cvmx_boot_vector_element *ret;
ret = cvmx_bootmem_alloc_named_range_once(VECTOR_TABLE_SIZE, 0,
(1ull << 32) - 1, 8, "__boot_vector1__", cvmx_boot_vector_init);
return ret;
}
EXPORT_SYMBOL(cvmx_boot_vector_get);
| linux-master | arch/mips/cavium-octeon/executive/cvmx-boot-vector.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Simple allocate only memory allocator. Used to allocate memory at
* application start time.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-spinlock.h>
#include <asm/octeon/cvmx-bootmem.h>
/*#define DEBUG */
static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
/* See header file for descriptions of functions */
/*
* This macro returns a member of the
* cvmx_bootmem_named_block_desc_t structure. These members can't
* be directly addressed as they might be in memory not directly
* reachable. In the case where bootmem is compiled with
* LINUX_HOST, the structure itself might be located on a remote
* Octeon. The argument "field" is the member name of the
* cvmx_bootmem_named_block_desc_t to read. Regardless of the type
* of the field, the return type is always a uint64_t. The "addr"
* parameter is the physical address of the structure.
*/
#define CVMX_BOOTMEM_NAMED_GET_FIELD(addr, field) \
__cvmx_bootmem_desc_get(addr, \
offsetof(struct cvmx_bootmem_named_block_desc, field), \
sizeof_field(struct cvmx_bootmem_named_block_desc, field))
/*
* This function is the implementation of the get macros defined
* for individual structure members. The argument are generated
* by the macros inorder to read only the needed memory.
*
* @param base 64bit physical address of the complete structure
* @param offset Offset from the beginning of the structure to the member being
* accessed.
* @param size Size of the structure member.
*
* @return Value of the structure member promoted into a uint64_t.
*/
static inline uint64_t __cvmx_bootmem_desc_get(uint64_t base, int offset,
int size)
{
base = (1ull << 63) | (base + offset);
switch (size) {
case 4:
return cvmx_read64_uint32(base);
case 8:
return cvmx_read64_uint64(base);
default:
return 0;
}
}
/*
* Wrapper functions are provided for reading/writing the size and
* next block values as these may not be directly addressible (in 32
* bit applications, for instance.) Offsets of data elements in
* bootmem list, must match cvmx_bootmem_block_header_t.
*/
#define NEXT_OFFSET 0
#define SIZE_OFFSET 8
static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size)
{
cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
}
static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next)
{
cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
}
static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr)
{
return cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63));
}
static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
{
return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63));
}
/*
* Allocate a block of memory from the free list that was
* passed to the application by the bootloader within a specified
* address range. This is an allocate-only algorithm, so
* freeing memory is not possible. Allocation will fail if
* memory cannot be allocated in the requested range.
*
* @size: Size in bytes of block to allocate
* @min_addr: defines the minimum address of the range
* @max_addr: defines the maximum address of the range
* @alignment: Alignment required - must be power of 2
* Returns pointer to block of memory, NULL on error
*/
static void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
uint64_t min_addr, uint64_t max_addr)
{
int64_t address;
address =
cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);
if (address > 0)
return cvmx_phys_to_ptr(address);
else
return NULL;
}
void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
uint64_t alignment)
{
return cvmx_bootmem_alloc_range(size, alignment, address,
address + size);
}
void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
uint64_t max_addr, uint64_t align,
char *name)
{
int64_t addr;
addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr,
align, name, 0);
if (addr >= 0)
return cvmx_phys_to_ptr(addr);
else
return NULL;
}
void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name)
{
return cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name);
}
EXPORT_SYMBOL(cvmx_bootmem_alloc_named);
void cvmx_bootmem_lock(void)
{
cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
}
void cvmx_bootmem_unlock(void)
{
cvmx_spinlock_unlock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
}
int cvmx_bootmem_init(void *mem_desc_ptr)
{
/* Here we set the global pointer to the bootmem descriptor
* block. This pointer will be used directly, so we will set
* it up to be directly usable by the application. It is set
* up as follows for the various runtime/ABI combinations:
*
* Linux 64 bit: Set XKPHYS bit
* Linux 32 bit: use mmap to create mapping, use virtual address
* CVMX 64 bit: use physical address directly
* CVMX 32 bit: use physical address directly
*
* Note that the CVMX environment assumes the use of 1-1 TLB
* mappings so that the physical addresses can be used
* directly
*/
if (!cvmx_bootmem_desc) {
#if defined(CVMX_ABI_64)
/* Set XKPHYS bit */
cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
#else
cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
#endif
}
return 0;
}
/*
* The cvmx_bootmem_phy* functions below return 64 bit physical
* addresses, and expose more features that the cvmx_bootmem_functions
* above. These are required for full memory space access in 32 bit
* applications, as well as for using some advance features. Most
* applications should not need to use these.
*/
int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
uint64_t address_max, uint64_t alignment,
uint32_t flags)
{
uint64_t head_addr;
uint64_t ent_addr;
/* points to previous list entry, NULL current entry is head of list */
uint64_t prev_addr = 0;
uint64_t new_ent_addr = 0;
uint64_t desired_min_addr;
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "
"min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
(unsigned long long)req_size,
(unsigned long long)address_min,
(unsigned long long)address_max,
(unsigned long long)alignment);
#endif
if (cvmx_bootmem_desc->major_version > 3) {
cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
"version: %d.%d at addr: %p\n",
(int)cvmx_bootmem_desc->major_version,
(int)cvmx_bootmem_desc->minor_version,
cvmx_bootmem_desc);
goto error_out;
}
/*
* Do a variety of checks to validate the arguments. The
* allocator code will later assume that these checks have
* been made. We validate that the requested constraints are
* not self-contradictory before we look through the list of
* available memory.
*/
/* 0 is not a valid req_size for this allocator */
if (!req_size)
goto error_out;
/* Round req_size up to mult of minimum alignment bytes */
req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
/*
* Convert !0 address_min and 0 address_max to special case of
* range that specifies an exact memory block to allocate. Do
* this before other checks and adjustments so that this
* tranformation will be validated.
*/
if (address_min && !address_max)
address_max = address_min + req_size;
else if (!address_min && !address_max)
address_max = ~0ull; /* If no limits given, use max limits */
/*
* Enforce minimum alignment (this also keeps the minimum free block
* req_size the same as the alignment req_size.
*/
if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;
/*
* Adjust address minimum based on requested alignment (round
* up to meet alignment). Do this here so we can reject
* impossible requests up front. (NOP for address_min == 0)
*/
if (alignment)
address_min = ALIGN(address_min, alignment);
/*
* Reject inconsistent args. We have adjusted these, so this
* may fail due to our internal changes even if this check
* would pass for the values the user supplied.
*/
if (req_size > address_max - address_min)
goto error_out;
/* Walk through the list entries - first fit found is returned */
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_lock();
head_addr = cvmx_bootmem_desc->head_addr;
ent_addr = head_addr;
for (; ent_addr;
prev_addr = ent_addr,
ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
uint64_t usable_base, usable_max;
uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);
if (cvmx_bootmem_phy_get_next(ent_addr)
&& ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {
cvmx_dprintf("Internal bootmem_alloc() error: ent: "
"0x%llx, next: 0x%llx\n",
(unsigned long long)ent_addr,
(unsigned long long)
cvmx_bootmem_phy_get_next(ent_addr));
goto error_out;
}
/*
* Determine if this is an entry that can satisfy the
* request Check to make sure entry is large enough to
* satisfy request.
*/
usable_base =
ALIGN(max(address_min, ent_addr), alignment);
usable_max = min(address_max, ent_addr + ent_size);
/*
* We should be able to allocate block at address
* usable_base.
*/
desired_min_addr = usable_base;
/*
* Determine if request can be satisfied from the
* current entry.
*/
if (!((ent_addr + ent_size) > usable_base
&& ent_addr < address_max
&& req_size <= usable_max - usable_base))
continue;
/*
* We have found an entry that has room to satisfy the
* request, so allocate it from this entry. If end
* CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from
* the end of this block rather than the beginning.
*/
if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) {
desired_min_addr = usable_max - req_size;
/*
* Align desired address down to required
* alignment.
*/
desired_min_addr &= ~(alignment - 1);
}
/* Match at start of entry */
if (desired_min_addr == ent_addr) {
if (req_size < ent_size) {
/*
* big enough to create a new block
* from top portion of block.
*/
new_ent_addr = ent_addr + req_size;
cvmx_bootmem_phy_set_next(new_ent_addr,
cvmx_bootmem_phy_get_next(ent_addr));
cvmx_bootmem_phy_set_size(new_ent_addr,
ent_size -
req_size);
/*
* Adjust next pointer as following
* code uses this.
*/
cvmx_bootmem_phy_set_next(ent_addr,
new_ent_addr);
}
/*
* adjust prev ptr or head to remove this
* entry from list.
*/
if (prev_addr)
cvmx_bootmem_phy_set_next(prev_addr,
cvmx_bootmem_phy_get_next(ent_addr));
else
/*
* head of list being returned, so
* update head ptr.
*/
cvmx_bootmem_desc->head_addr =
cvmx_bootmem_phy_get_next(ent_addr);
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_unlock();
return desired_min_addr;
}
/*
* block returned doesn't start at beginning of entry,
* so we know that we will be splitting a block off
* the front of this one. Create a new block from the
* beginning, add to list, and go to top of loop
* again.
*
* create new block from high portion of
* block, so that top block starts at desired
* addr.
*/
new_ent_addr = desired_min_addr;
cvmx_bootmem_phy_set_next(new_ent_addr,
cvmx_bootmem_phy_get_next
(ent_addr));
cvmx_bootmem_phy_set_size(new_ent_addr,
cvmx_bootmem_phy_get_size
(ent_addr) -
(desired_min_addr -
ent_addr));
cvmx_bootmem_phy_set_size(ent_addr,
desired_min_addr - ent_addr);
cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
/* Loop again to handle actual alloc from new block */
}
error_out:
/* We didn't find anything, so return error */
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_unlock();
return -1;
}
int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
{
uint64_t cur_addr;
uint64_t prev_addr = 0; /* zero is invalid */
int retval = 0;
#ifdef DEBUG
cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n",
(unsigned long long)phy_addr, (unsigned long long)size);
#endif
if (cvmx_bootmem_desc->major_version > 3) {
cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
"version: %d.%d at addr: %p\n",
(int)cvmx_bootmem_desc->major_version,
(int)cvmx_bootmem_desc->minor_version,
cvmx_bootmem_desc);
return 0;
}
/* 0 is not a valid size for this allocator */
if (!size)
return 0;
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_lock();
cur_addr = cvmx_bootmem_desc->head_addr;
if (cur_addr == 0 || phy_addr < cur_addr) {
/* add at front of list - special case with changing head ptr */
if (cur_addr && phy_addr + size > cur_addr)
goto bootmem_free_done; /* error, overlapping section */
else if (phy_addr + size == cur_addr) {
/* Add to front of existing first block */
cvmx_bootmem_phy_set_next(phy_addr,
cvmx_bootmem_phy_get_next
(cur_addr));
cvmx_bootmem_phy_set_size(phy_addr,
cvmx_bootmem_phy_get_size
(cur_addr) + size);
cvmx_bootmem_desc->head_addr = phy_addr;
} else {
/* New block before first block. OK if cur_addr is 0 */
cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
cvmx_bootmem_phy_set_size(phy_addr, size);
cvmx_bootmem_desc->head_addr = phy_addr;
}
retval = 1;
goto bootmem_free_done;
}
/* Find place in list to add block */
while (cur_addr && phy_addr > cur_addr) {
prev_addr = cur_addr;
cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
}
if (!cur_addr) {
/*
* We have reached the end of the list, add on to end,
* checking to see if we need to combine with last
* block
*/
if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
phy_addr) {
cvmx_bootmem_phy_set_size(prev_addr,
cvmx_bootmem_phy_get_size
(prev_addr) + size);
} else {
cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
cvmx_bootmem_phy_set_size(phy_addr, size);
cvmx_bootmem_phy_set_next(phy_addr, 0);
}
retval = 1;
goto bootmem_free_done;
} else {
/*
* insert between prev and cur nodes, checking for
* merge with either/both.
*/
if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
phy_addr) {
/* Merge with previous */
cvmx_bootmem_phy_set_size(prev_addr,
cvmx_bootmem_phy_get_size
(prev_addr) + size);
if (phy_addr + size == cur_addr) {
/* Also merge with current */
cvmx_bootmem_phy_set_size(prev_addr,
cvmx_bootmem_phy_get_size(cur_addr) +
cvmx_bootmem_phy_get_size(prev_addr));
cvmx_bootmem_phy_set_next(prev_addr,
cvmx_bootmem_phy_get_next(cur_addr));
}
retval = 1;
goto bootmem_free_done;
} else if (phy_addr + size == cur_addr) {
/* Merge with current */
cvmx_bootmem_phy_set_size(phy_addr,
cvmx_bootmem_phy_get_size
(cur_addr) + size);
cvmx_bootmem_phy_set_next(phy_addr,
cvmx_bootmem_phy_get_next
(cur_addr));
cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
retval = 1;
goto bootmem_free_done;
}
/* It is a standalone block, add in between prev and cur */
cvmx_bootmem_phy_set_size(phy_addr, size);
cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
}
retval = 1;
bootmem_free_done:
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_unlock();
return retval;
}
/*
* Finds a named memory block by name.
* Also used for finding an unused entry in the named block table.
*
* @name: Name of memory block to find. If NULL pointer given, then
* finds unused descriptor, if available.
*
* @flags: Flags to control options for the allocation.
*
* Returns Pointer to memory block descriptor, NULL if not found.
* If NULL returned when name parameter is NULL, then no memory
* block descriptors are available.
*/
static struct cvmx_bootmem_named_block_desc *
cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
{
unsigned int i;
struct cvmx_bootmem_named_block_desc *named_block_array_ptr;
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
#endif
/*
* Lock the structure to make sure that it is not being
* changed while we are examining it.
*/
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_lock();
/* Use XKPHYS for 64 bit linux */
named_block_array_ptr = (struct cvmx_bootmem_named_block_desc *)
cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
#ifdef DEBUG
cvmx_dprintf
("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n",
named_block_array_ptr);
#endif
if (cvmx_bootmem_desc->major_version == 3) {
for (i = 0;
i < cvmx_bootmem_desc->named_block_num_blocks; i++) {
if ((name && named_block_array_ptr[i].size
&& !strncmp(name, named_block_array_ptr[i].name,
cvmx_bootmem_desc->named_block_name_len
- 1))
|| (!name && !named_block_array_ptr[i].size)) {
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_unlock();
return &(named_block_array_ptr[i]);
}
}
} else {
cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
"version: %d.%d at addr: %p\n",
(int)cvmx_bootmem_desc->major_version,
(int)cvmx_bootmem_desc->minor_version,
cvmx_bootmem_desc);
}
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_unlock();
return NULL;
}
void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr,
uint64_t max_addr, uint64_t align,
char *name,
void (*init) (void *))
{
int64_t addr;
void *ptr;
uint64_t named_block_desc_addr;
named_block_desc_addr = (uint64_t)
cvmx_bootmem_phy_named_block_find(name,
(uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING);
if (named_block_desc_addr) {
addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_desc_addr,
base_addr);
return cvmx_phys_to_ptr(addr);
}
addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr,
align, name,
(uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING);
if (addr < 0)
return NULL;
ptr = cvmx_phys_to_ptr(addr);
if (init)
init(ptr);
else
memset(ptr, 0, size);
return ptr;
}
EXPORT_SYMBOL(cvmx_bootmem_alloc_named_range_once);
struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name)
{
return cvmx_bootmem_phy_named_block_find(name, 0);
}
EXPORT_SYMBOL(cvmx_bootmem_find_named_block);
/*
* Frees a named block.
*
* @name: name of block to free
* @flags: flags for passing options
*
* Returns 0 on failure
* 1 on success
*/
static int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
{
struct cvmx_bootmem_named_block_desc *named_block_ptr;
if (cvmx_bootmem_desc->major_version != 3) {
cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
"%d.%d at addr: %p\n",
(int)cvmx_bootmem_desc->major_version,
(int)cvmx_bootmem_desc->minor_version,
cvmx_bootmem_desc);
return 0;
}
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
#endif
/*
* Take lock here, as name lookup/block free/name free need to
* be atomic.
*/
cvmx_bootmem_lock();
named_block_ptr =
cvmx_bootmem_phy_named_block_find(name,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
if (named_block_ptr) {
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_named_block_free: "
"%s, base: 0x%llx, size: 0x%llx\n",
name,
(unsigned long long)named_block_ptr->base_addr,
(unsigned long long)named_block_ptr->size);
#endif
__cvmx_bootmem_phy_free(named_block_ptr->base_addr,
named_block_ptr->size,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
named_block_ptr->size = 0;
/* Set size to zero to indicate block not used. */
}
cvmx_bootmem_unlock();
return named_block_ptr != NULL; /* 0 on failure, 1 on success */
}
int cvmx_bootmem_free_named(char *name)
{
return cvmx_bootmem_phy_named_block_free(name, 0);
}
int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
uint64_t max_addr,
uint64_t alignment,
char *name,
uint32_t flags)
{
int64_t addr_allocated;
struct cvmx_bootmem_named_block_desc *named_block_desc_ptr;
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: "
"0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
(unsigned long long)size,
(unsigned long long)min_addr,
(unsigned long long)max_addr,
(unsigned long long)alignment,
name);
#endif
if (cvmx_bootmem_desc->major_version != 3) {
cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
"%d.%d at addr: %p\n",
(int)cvmx_bootmem_desc->major_version,
(int)cvmx_bootmem_desc->minor_version,
cvmx_bootmem_desc);
return -1;
}
/*
* Take lock here, as name lookup/block alloc/name add need to
* be atomic.
*/
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
/* Get pointer to first available named block descriptor */
named_block_desc_ptr =
cvmx_bootmem_phy_named_block_find(NULL,
flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
/*
* Check to see if name already in use, return error if name
* not available or no more room for blocks.
*/
if (cvmx_bootmem_phy_named_block_find(name,
flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) {
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
return -1;
}
/*
* Round size up to mult of minimum alignment bytes We need
* the actual size allocated to allow for blocks to be
* coalesced when they are freed. The alloc routine does the
* same rounding up on all allocations.
*/
size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);
addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr,
alignment,
flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
if (addr_allocated >= 0) {
named_block_desc_ptr->base_addr = addr_allocated;
named_block_desc_ptr->size = size;
strscpy(named_block_desc_ptr->name, name,
cvmx_bootmem_desc->named_block_name_len);
}
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock));
return addr_allocated;
}
struct cvmx_bootmem_desc *cvmx_bootmem_get_desc(void)
{
return cvmx_bootmem_desc;
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-bootmem.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Functions for NPI initialization, configuration,
* and monitoring.
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-pip-defs.h>
/**
* Probe a NPI interface and determine the number of ports
* connected to it. The NPI interface should still be down
* after this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_npi_probe(int interface)
{
#if CVMX_PKO_QUEUES_PER_PORT_PCI > 0
if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
return 4;
else if (OCTEON_IS_MODEL(OCTEON_CN56XX)
&& !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
/* The packet engines didn't exist before pass 2 */
return 4;
else if (OCTEON_IS_MODEL(OCTEON_CN52XX)
&& !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
/* The packet engines didn't exist before pass 2 */
return 4;
#endif
return 0;
}
/**
* Bringup and enable a NPI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_npi_enable(int interface)
{
/*
* On CN50XX, CN52XX, and CN56XX we need to disable length
* checking so packet < 64 bytes and jumbo frames don't get
* errors.
*/
if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
!OCTEON_IS_MODEL(OCTEON_CN58XX)) {
int num_ports = cvmx_helper_ports_on_interface(interface);
int port;
for (port = 0; port < num_ports; port++) {
union cvmx_pip_prt_cfgx port_cfg;
int ipd_port =
cvmx_helper_get_ipd_port(interface, port);
port_cfg.u64 =
cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
port_cfg.s.maxerr_en = 0;
port_cfg.s.minerr_en = 0;
cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port),
port_cfg.u64);
}
}
/* Enables are controlled by the remote host, so nothing to do here */
return 0;
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-helper-npi.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2009 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* Automatically generated functions useful for enabling
* and decoding RSL_INT_BLOCKS interrupts.
*
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-pcsx-defs.h>
#include <asm/octeon/cvmx-pcsxx-defs.h>
#include <asm/octeon/cvmx-spxx-defs.h>
#include <asm/octeon/cvmx-stxx-defs.h>
#ifndef PRINT_ERROR
#define PRINT_ERROR(format, ...)
#endif
/**
* __cvmx_interrupt_gmxx_rxx_int_en_enable - enable all interrupt bits in cvmx_gmxx_rxx_int_en_t
* @index: interrupt register offset
* @block: interrupt register block_id
*/
void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
{
union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, block),
cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, block)));
gmx_rx_int_en.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
/* Skipping gmx_rx_int_en.s.reserved_29_63 */
gmx_rx_int_en.s.hg2cc = 1;
gmx_rx_int_en.s.hg2fld = 1;
gmx_rx_int_en.s.undat = 1;
gmx_rx_int_en.s.uneop = 1;
gmx_rx_int_en.s.unsop = 1;
gmx_rx_int_en.s.bad_term = 1;
gmx_rx_int_en.s.bad_seq = 1;
gmx_rx_int_en.s.rem_fault = 1;
gmx_rx_int_en.s.loc_fault = 1;
gmx_rx_int_en.s.pause_drp = 1;
/* Skipping gmx_rx_int_en.s.reserved_16_18 */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
/* Skipping gmx_rx_int_en.s.reserved_9_9 */
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/* Skipping gmx_rx_int_en.s.reserved_5_6 */
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
/* Skipping gmx_rx_int_en.s.reserved_2_2 */
gmx_rx_int_en.s.carext = 1;
/* Skipping gmx_rx_int_en.s.reserved_0_0 */
}
if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
/* Skipping gmx_rx_int_en.s.reserved_19_63 */
/*gmx_rx_int_en.s.phy_dupx = 1; */
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
gmx_rx_int_en.s.niberr = 1;
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
gmx_rx_int_en.s.alnerr = 1;
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
gmx_rx_int_en.s.maxerr = 1;
gmx_rx_int_en.s.carext = 1;
gmx_rx_int_en.s.minerr = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
/* Skipping gmx_rx_int_en.s.reserved_20_63 */
gmx_rx_int_en.s.pause_drp = 1;
/*gmx_rx_int_en.s.phy_dupx = 1; */
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
gmx_rx_int_en.s.niberr = 1;
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/* Skipping gmx_rx_int_en.s.reserved_6_6 */
gmx_rx_int_en.s.alnerr = 1;
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
/* Skipping gmx_rx_int_en.s.reserved_2_2 */
gmx_rx_int_en.s.carext = 1;
/* Skipping gmx_rx_int_en.s.reserved_0_0 */
}
if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
/* Skipping gmx_rx_int_en.s.reserved_19_63 */
/*gmx_rx_int_en.s.phy_dupx = 1; */
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
gmx_rx_int_en.s.niberr = 1;
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
gmx_rx_int_en.s.alnerr = 1;
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
gmx_rx_int_en.s.maxerr = 1;
gmx_rx_int_en.s.carext = 1;
gmx_rx_int_en.s.minerr = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
/* Skipping gmx_rx_int_en.s.reserved_19_63 */
/*gmx_rx_int_en.s.phy_dupx = 1; */
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
gmx_rx_int_en.s.niberr = 1;
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
gmx_rx_int_en.s.alnerr = 1;
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
gmx_rx_int_en.s.maxerr = 1;
gmx_rx_int_en.s.carext = 1;
gmx_rx_int_en.s.minerr = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
/* Skipping gmx_rx_int_en.s.reserved_20_63 */
gmx_rx_int_en.s.pause_drp = 1;
/*gmx_rx_int_en.s.phy_dupx = 1; */
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
gmx_rx_int_en.s.niberr = 1;
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
gmx_rx_int_en.s.alnerr = 1;
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
gmx_rx_int_en.s.maxerr = 1;
gmx_rx_int_en.s.carext = 1;
gmx_rx_int_en.s.minerr = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
/* Skipping gmx_rx_int_en.s.reserved_29_63 */
gmx_rx_int_en.s.hg2cc = 1;
gmx_rx_int_en.s.hg2fld = 1;
gmx_rx_int_en.s.undat = 1;
gmx_rx_int_en.s.uneop = 1;
gmx_rx_int_en.s.unsop = 1;
gmx_rx_int_en.s.bad_term = 1;
gmx_rx_int_en.s.bad_seq = 0;
gmx_rx_int_en.s.rem_fault = 1;
gmx_rx_int_en.s.loc_fault = 0;
gmx_rx_int_en.s.pause_drp = 1;
/* Skipping gmx_rx_int_en.s.reserved_16_18 */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
gmx_rx_int_en.s.ovrerr = 1;
/* Skipping gmx_rx_int_en.s.reserved_9_9 */
gmx_rx_int_en.s.skperr = 1;
gmx_rx_int_en.s.rcverr = 1;
/* Skipping gmx_rx_int_en.s.reserved_5_6 */
/*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
gmx_rx_int_en.s.jabber = 1;
/* Skipping gmx_rx_int_en.s.reserved_2_2 */
gmx_rx_int_en.s.carext = 1;
/* Skipping gmx_rx_int_en.s.reserved_0_0 */
}
cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, block), gmx_rx_int_en.u64);
}
/**
* __cvmx_interrupt_pcsx_intx_en_reg_enable - enable all interrupt bits in cvmx_pcsx_intx_en_reg_t
* @index: interrupt register offset
* @block: interrupt register block_id
*/
void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block)
{
union cvmx_pcsx_intx_en_reg pcs_int_en_reg;
cvmx_write_csr(CVMX_PCSX_INTX_REG(index, block),
cvmx_read_csr(CVMX_PCSX_INTX_REG(index, block)));
pcs_int_en_reg.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
/* Skipping pcs_int_en_reg.s.reserved_12_63 */
/*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
pcs_int_en_reg.s.sync_bad_en = 1;
pcs_int_en_reg.s.an_bad_en = 1;
pcs_int_en_reg.s.rxlock_en = 1;
pcs_int_en_reg.s.rxbad_en = 1;
/*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
pcs_int_en_reg.s.txbad_en = 1;
pcs_int_en_reg.s.txfifo_en = 1;
pcs_int_en_reg.s.txfifu_en = 1;
pcs_int_en_reg.s.an_err_en = 1;
/*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
/*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
}
if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
/* Skipping pcs_int_en_reg.s.reserved_12_63 */
/*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
pcs_int_en_reg.s.sync_bad_en = 1;
pcs_int_en_reg.s.an_bad_en = 1;
pcs_int_en_reg.s.rxlock_en = 1;
pcs_int_en_reg.s.rxbad_en = 1;
/*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
pcs_int_en_reg.s.txbad_en = 1;
pcs_int_en_reg.s.txfifo_en = 1;
pcs_int_en_reg.s.txfifu_en = 1;
pcs_int_en_reg.s.an_err_en = 1;
/*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
/*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
}
cvmx_write_csr(CVMX_PCSX_INTX_EN_REG(index, block), pcs_int_en_reg.u64);
}
/**
* __cvmx_interrupt_pcsxx_int_en_reg_enable - enable all interrupt bits in cvmx_pcsxx_int_en_reg_t
* @index: interrupt register block_id
*/
void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index)
{
union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
cvmx_write_csr(CVMX_PCSXX_INT_REG(index),
cvmx_read_csr(CVMX_PCSXX_INT_REG(index)));
pcsx_int_en_reg.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
/* Skipping pcsx_int_en_reg.s.reserved_6_63 */
pcsx_int_en_reg.s.algnlos_en = 1;
pcsx_int_en_reg.s.synlos_en = 1;
pcsx_int_en_reg.s.bitlckls_en = 1;
pcsx_int_en_reg.s.rxsynbad_en = 1;
pcsx_int_en_reg.s.rxbad_en = 1;
pcsx_int_en_reg.s.txflt_en = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
/* Skipping pcsx_int_en_reg.s.reserved_6_63 */
pcsx_int_en_reg.s.algnlos_en = 1;
pcsx_int_en_reg.s.synlos_en = 1;
pcsx_int_en_reg.s.bitlckls_en = 0; /* Happens if XAUI module is not installed */
pcsx_int_en_reg.s.rxsynbad_en = 1;
pcsx_int_en_reg.s.rxbad_en = 1;
pcsx_int_en_reg.s.txflt_en = 1;
}
cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(index), pcsx_int_en_reg.u64);
}
/**
* __cvmx_interrupt_spxx_int_msk_enable - enable all interrupt bits in cvmx_spxx_int_msk_t
* @index: interrupt register block_id
*/
void __cvmx_interrupt_spxx_int_msk_enable(int index)
{
union cvmx_spxx_int_msk spx_int_msk;
cvmx_write_csr(CVMX_SPXX_INT_REG(index),
cvmx_read_csr(CVMX_SPXX_INT_REG(index)));
spx_int_msk.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
/* Skipping spx_int_msk.s.reserved_12_63 */
spx_int_msk.s.calerr = 1;
spx_int_msk.s.syncerr = 1;
spx_int_msk.s.diperr = 1;
spx_int_msk.s.tpaovr = 1;
spx_int_msk.s.rsverr = 1;
spx_int_msk.s.drwnng = 1;
spx_int_msk.s.clserr = 1;
spx_int_msk.s.spiovr = 1;
/* Skipping spx_int_msk.s.reserved_2_3 */
spx_int_msk.s.abnorm = 1;
spx_int_msk.s.prtnxa = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
/* Skipping spx_int_msk.s.reserved_12_63 */
spx_int_msk.s.calerr = 1;
spx_int_msk.s.syncerr = 1;
spx_int_msk.s.diperr = 1;
spx_int_msk.s.tpaovr = 1;
spx_int_msk.s.rsverr = 1;
spx_int_msk.s.drwnng = 1;
spx_int_msk.s.clserr = 1;
spx_int_msk.s.spiovr = 1;
/* Skipping spx_int_msk.s.reserved_2_3 */
spx_int_msk.s.abnorm = 1;
spx_int_msk.s.prtnxa = 1;
}
cvmx_write_csr(CVMX_SPXX_INT_MSK(index), spx_int_msk.u64);
}
/**
* __cvmx_interrupt_stxx_int_msk_enable - enable all interrupt bits in cvmx_stxx_int_msk_t
* @index: interrupt register block_id
*/
void __cvmx_interrupt_stxx_int_msk_enable(int index)
{
union cvmx_stxx_int_msk stx_int_msk;
cvmx_write_csr(CVMX_STXX_INT_REG(index),
cvmx_read_csr(CVMX_STXX_INT_REG(index)));
stx_int_msk.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
/* Skipping stx_int_msk.s.reserved_8_63 */
stx_int_msk.s.frmerr = 1;
stx_int_msk.s.unxfrm = 1;
stx_int_msk.s.nosync = 1;
stx_int_msk.s.diperr = 1;
stx_int_msk.s.datovr = 1;
stx_int_msk.s.ovrbst = 1;
stx_int_msk.s.calpar1 = 1;
stx_int_msk.s.calpar0 = 1;
}
if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
/* Skipping stx_int_msk.s.reserved_8_63 */
stx_int_msk.s.frmerr = 1;
stx_int_msk.s.unxfrm = 1;
stx_int_msk.s.nosync = 1;
stx_int_msk.s.diperr = 1;
stx_int_msk.s.datovr = 1;
stx_int_msk.s.ovrbst = 1;
stx_int_msk.s.calpar1 = 1;
stx_int_msk.s.calpar0 = 1;
}
cvmx_write_csr(CVMX_STXX_INT_MSK(index), stx_int_msk.u64);
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c |
/***********************license start***************
* Author: Cavium Networks
*
* Contact: [email protected]
* This file is part of the OCTEON SDK
*
* Copyright (C) 2003-2018 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Functions for XAUI initialization, configuration,
* and monitoring.
*
*/
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-pko-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-pcsx-defs.h>
#include <asm/octeon/cvmx-pcsxx-defs.h>
int __cvmx_helper_xaui_enumerate(int interface)
{
union cvmx_gmxx_hg2_control gmx_hg2_control;
/* If HiGig2 is enabled return 16 ports, otherwise return 1 port */
gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface));
if (gmx_hg2_control.s.hg2tx_en)
return 16;
else
return 1;
}
/*
* Probe a XAUI interface and determine the number of ports
* connected to it. The XAUI interface should still be down
* after this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_xaui_probe(int interface)
{
int i;
union cvmx_gmxx_inf_mode mode;
/*
* Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
* interface needs to be enabled before IPD otherwise per port
* backpressure may not work properly.
*/
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
mode.s.en = 1;
cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
__cvmx_helper_setup_gmx(interface, 1);
/*
* Setup PKO to support 16 ports for HiGig2 virtual
* ports. We're pointing all of the PKO packet ports for this
* interface to the XAUI. This allows us to use HiGig2
* backpressure per port.
*/
for (i = 0; i < 16; i++) {
union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
pko_mem_port_ptrs.u64 = 0;
/*
* We set each PKO port to have equal priority in a
* round robin fashion.
*/
pko_mem_port_ptrs.s.static_p = 0;
pko_mem_port_ptrs.s.qos_mask = 0xff;
/* All PKO ports map to the same XAUI hardware port */
pko_mem_port_ptrs.s.eid = interface * 4;
pko_mem_port_ptrs.s.pid = interface * 16 + i;
cvmx_write_csr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
}
return __cvmx_helper_xaui_enumerate(interface);
}
/*
* Bringup and enable a XAUI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_xaui_enable(int interface)
{
union cvmx_gmxx_prtx_cfg gmx_cfg;
union cvmx_pcsxx_control1_reg xauiCtl;
union cvmx_pcsxx_misc_ctl_reg xauiMiscCtl;
union cvmx_gmxx_tx_xaui_ctl gmxXauiTxCtl;
union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
union cvmx_gmxx_tx_int_en gmx_tx_int_en;
union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
/* Setup PKND */
if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
gmx_cfg.s.pknd = cvmx_helper_get_ipd_port(interface, 0);
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
}
/* (1) Interface has already been enabled. */
/* (2) Disable GMX. */
xauiMiscCtl.u64 = cvmx_read_csr(CVMX_PCSXX_MISC_CTL_REG(interface));
xauiMiscCtl.s.gmxeno = 1;
cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
/* (3) Disable GMX and PCSX interrupts. */
gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(0, interface));
cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
gmx_tx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_TX_INT_EN(interface));
cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
pcsx_int_en_reg.u64 = cvmx_read_csr(CVMX_PCSXX_INT_EN_REG(interface));
cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
/* (4) Bring up the PCSX and GMX reconciliation layer. */
/* (4)a Set polarity and lane swapping. */
/* (4)b */
gmxXauiTxCtl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
/* Enable better IFG packing and improves performance */
gmxXauiTxCtl.s.dic_en = 1;
gmxXauiTxCtl.s.uni_en = 0;
cvmx_write_csr(CVMX_GMXX_TX_XAUI_CTL(interface), gmxXauiTxCtl.u64);
/* (4)c Aply reset sequence */
xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
xauiCtl.s.lo_pwr = 0;
/* Issuing a reset here seems to hang some CN66XX/CN68XX chips. */
if (!OCTEON_IS_MODEL(OCTEON_CN66XX) &&
!OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X) &&
!OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X))
xauiCtl.s.reset = 1;
cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64);
/* Wait for PCS to come out of reset */
if (CVMX_WAIT_FOR_FIELD64
(CVMX_PCSXX_CONTROL1_REG(interface), union cvmx_pcsxx_control1_reg,
reset, ==, 0, 10000))
return -1;
/* Wait for PCS to be aligned */
if (CVMX_WAIT_FOR_FIELD64
(CVMX_PCSXX_10GBX_STATUS_REG(interface),
union cvmx_pcsxx_10gbx_status_reg, alignd, ==, 1, 10000))
return -1;
/* Wait for RX to be ready */
if (CVMX_WAIT_FOR_FIELD64
(CVMX_GMXX_RX_XAUI_CTL(interface), union cvmx_gmxx_rx_xaui_ctl,
status, ==, 0, 10000))
return -1;
/* (6) Configure GMX */
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
gmx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
/* Wait for GMX RX to be idle */
if (CVMX_WAIT_FOR_FIELD64
(CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
rx_idle, ==, 1, 10000))
return -1;
/* Wait for GMX TX to be idle */
if (CVMX_WAIT_FOR_FIELD64
(CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
tx_idle, ==, 1, 10000))
return -1;
/* GMX configure */
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
gmx_cfg.s.speed = 1;
gmx_cfg.s.speed_msb = 0;
gmx_cfg.s.slottime = 1;
cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), 1);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
/* (7) Clear out any error state */
cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(0, interface),
cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(0, interface)));
cvmx_write_csr(CVMX_GMXX_TX_INT_REG(interface),
cvmx_read_csr(CVMX_GMXX_TX_INT_REG(interface)));
cvmx_write_csr(CVMX_PCSXX_INT_REG(interface),
cvmx_read_csr(CVMX_PCSXX_INT_REG(interface)));
/* Wait for receive link */
if (CVMX_WAIT_FOR_FIELD64
(CVMX_PCSXX_STATUS1_REG(interface), union cvmx_pcsxx_status1_reg,
rcv_lnk, ==, 1, 10000))
return -1;
if (CVMX_WAIT_FOR_FIELD64
(CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
xmtflt, ==, 0, 10000))
return -1;
if (CVMX_WAIT_FOR_FIELD64
(CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
rcvflt, ==, 0, 10000))
return -1;
cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), gmx_rx_int_en.u64);
cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), pcsx_int_en_reg.u64);
/* (8) Enable packet reception */
xauiMiscCtl.s.gmxeno = 0;
cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
__cvmx_interrupt_pcsx_intx_en_reg_enable(0, interface);
__cvmx_interrupt_pcsx_intx_en_reg_enable(1, interface);
__cvmx_interrupt_pcsx_intx_en_reg_enable(2, interface);
__cvmx_interrupt_pcsx_intx_en_reg_enable(3, interface);
__cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
__cvmx_interrupt_gmxx_enable(interface);
return 0;
}
/*
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
union cvmx_helper_link_info __cvmx_helper_xaui_link_get(int ipd_port)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
union cvmx_pcsxx_status1_reg pcsxx_status1_reg;
union cvmx_helper_link_info result;
gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
pcsxx_status1_reg.u64 =
cvmx_read_csr(CVMX_PCSXX_STATUS1_REG(interface));
result.u64 = 0;
/* Only return a link if both RX and TX are happy */
if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0) &&
(pcsxx_status1_reg.s.rcv_lnk == 1)) {
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 10000;
} else {
/* Disable GMX and PCSX interrupts. */
cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
}
return result;
}
/*
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get().
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_xaui_link_set(int ipd_port, union cvmx_helper_link_info link_info)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
/* If the link shouldn't be up, then just return */
if (!link_info.s.link_up)
return 0;
/* Do nothing if both RX and TX are happy */
if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0))
return 0;
/* Bring the link up */
return __cvmx_helper_xaui_enable(interface);
}
| linux-master | arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Suspend support specific for mips.
*
* Copyright (C) 2009 Lemote Inc.
* Author: Hu Hongbing <[email protected]>
* Wu Zhangjin <[email protected]>
*/
#include <asm/sections.h>
#include <asm/fpu.h>
#include <asm/dsp.h>
static u32 saved_status;
struct pt_regs saved_regs;
void save_processor_state(void)
{
saved_status = read_c0_status();
if (is_fpu_owner())
save_fp(current);
save_dsp(current);
}
void restore_processor_state(void)
{
write_c0_status(saved_status);
if (is_fpu_owner())
restore_fp(current);
restore_dsp(current);
}
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end));
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}
| linux-master | arch/mips/power/cpu.c |
// SPDX-License-Identifier: GPL-2.0
#include <asm/tlbflush.h>
extern int restore_image(void);
int swsusp_arch_resume(void)
{
/* Avoid TLB mismatch during and after kernel resume */
local_flush_tlb_all();
return restore_image();
}
| linux-master | arch/mips/power/hibernate.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 Lemote Inc.
* Author: Fuxin Zhang, [email protected]
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/irq_cpu.h>
#include <asm/i8259.h>
#include <asm/mipsregs.h>
#include <loongson.h>
#include <machine.h>
#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* cpu timer */
#define LOONGSON_NORTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 6) /* bonito */
#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 3) /* cpu serial port */
#define LOONGSON_SOUTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 2) /* i8259 */
#define LOONGSON_INT_BIT_INT0 (1 << 11)
#define LOONGSON_INT_BIT_INT1 (1 << 12)
/*
* The generic i8259_irq() make the kernel hang on booting. Since we cannot
* get the irq via the IRR directly, we access the ISR instead.
*/
int mach_i8259_irq(void)
{
int irq, isr;
irq = -1;
if ((LOONGSON_INTISR & LOONGSON_INTEN) & LOONGSON_INT_BIT_INT0) {
raw_spin_lock(&i8259A_lock);
isr = inb(PIC_MASTER_CMD) &
~inb(PIC_MASTER_IMR) & ~(1 << PIC_CASCADE_IR);
if (!isr)
isr = (inb(PIC_SLAVE_CMD) & ~inb(PIC_SLAVE_IMR)) << 8;
irq = ffs(isr) - 1;
if (unlikely(irq == 7)) {
/*
* This may be a spurious interrupt.
*
* Read the interrupt status register (ISR). If the most
* significant bit is not set then there is no valid
* interrupt.
*/
outb(0x0B, PIC_MASTER_ISR); /* ISR register */
if (~inb(PIC_MASTER_ISR) & 0x80)
irq = -1;
}
raw_spin_unlock(&i8259A_lock);
}
return irq;
}
EXPORT_SYMBOL(mach_i8259_irq);
static void i8259_irqdispatch(void)
{
int irq;
irq = mach_i8259_irq();
if (irq >= 0)
do_IRQ(irq);
else
spurious_interrupt();
}
void mach_irq_dispatch(unsigned int pending)
{
if (pending & CAUSEF_IP7)
do_IRQ(LOONGSON_TIMER_IRQ);
else if (pending & CAUSEF_IP6) { /* North Bridge, Perf counter */
bonito_irqdispatch();
} else if (pending & CAUSEF_IP3) /* CPU UART */
do_IRQ(LOONGSON_UART_IRQ);
else if (pending & CAUSEF_IP2) /* South Bridge */
i8259_irqdispatch();
else
spurious_interrupt();
}
static irqreturn_t ip6_action(int cpl, void *dev_id)
{
return IRQ_HANDLED;
}
void __init mach_init_irq(void)
{
/* init all controller
* 0-15 ------> i8259 interrupt
* 16-23 ------> mips cpu interrupt
* 32-63 ------> bonito irq
*/
/* setup cs5536 as high level trigger */
LOONGSON_INTPOL = LOONGSON_INT_BIT_INT0 | LOONGSON_INT_BIT_INT1;
LOONGSON_INTEDGE &= ~(LOONGSON_INT_BIT_INT0 | LOONGSON_INT_BIT_INT1);
/* Sets the first-level interrupt dispatcher. */
mips_cpu_irq_init();
init_i8259_irqs();
bonito_irq_init();
/* setup north bridge irq (bonito) */
if (request_irq(LOONGSON_NORTH_BRIDGE_IRQ, ip6_action,
IRQF_SHARED | IRQF_NO_THREAD, "cascade", ip6_action))
pr_err("Failed to register north bridge cascade interrupt\n");
/* setup source bridge irq (i8259) */
if (request_irq(LOONGSON_SOUTH_BRIDGE_IRQ, no_action,
IRQF_NO_THREAD | IRQF_NO_SUSPEND, "cascade", NULL))
pr_err("Failed to register south bridge cascade interrupt\n");
}
| linux-master | arch/mips/loongson2ef/lemote-2f/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <asm/bootinfo.h>
#include <loongson.h>
void __init mach_prom_init_machtype(void)
{
/* We share the same kernel image file among Lemote 2F family
* of machines, and provide the machtype= kernel command line
* to users to indicate their machine, this command line will
* be passed by the latest PMON automatically. and fortunately,
* up to now, we can get the machine type from the PMON_VER=
* commandline directly except the NAS machine, In the old
* machines, this will help the users a lot.
*
* If no "machtype=" passed, get machine type from "PMON_VER=".
* PMON_VER=LM8089 Lemote 8.9'' netbook
* LM8101 Lemote 10.1'' netbook
* (The above two netbooks have the same kernel support)
* LM6XXX Lemote FuLoong(2F) box series
* LM9XXX Lemote LynLoong PC series
*/
if (strstr(arcs_cmdline, "PMON_VER=LM")) {
if (strstr(arcs_cmdline, "PMON_VER=LM8"))
mips_machtype = MACH_LEMOTE_YL2F89;
else if (strstr(arcs_cmdline, "PMON_VER=LM6"))
mips_machtype = MACH_LEMOTE_FL2F;
else if (strstr(arcs_cmdline, "PMON_VER=LM9"))
mips_machtype = MACH_LEMOTE_LL2F;
else
mips_machtype = MACH_LEMOTE_NAS;
strcat(arcs_cmdline, " machtype=");
strcat(arcs_cmdline, get_system_type());
strcat(arcs_cmdline, " ");
}
}
| linux-master | arch/mips/loongson2ef/lemote-2f/machtype.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Basic KB3310B Embedded Controller support for the YeeLoong 2F netbook
*
* Copyright (C) 2008 Lemote Inc.
* Author: liujl <[email protected]>, 2008-04-20
*/
#include <linux/io.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include "ec_kb3310b.h"
static DEFINE_SPINLOCK(index_access_lock);
static DEFINE_SPINLOCK(port_access_lock);
unsigned char ec_read(unsigned short addr)
{
unsigned char value;
unsigned long flags;
spin_lock_irqsave(&index_access_lock, flags);
outb((addr & 0xff00) >> 8, EC_IO_PORT_HIGH);
outb((addr & 0x00ff), EC_IO_PORT_LOW);
value = inb(EC_IO_PORT_DATA);
spin_unlock_irqrestore(&index_access_lock, flags);
return value;
}
EXPORT_SYMBOL_GPL(ec_read);
void ec_write(unsigned short addr, unsigned char val)
{
unsigned long flags;
spin_lock_irqsave(&index_access_lock, flags);
outb((addr & 0xff00) >> 8, EC_IO_PORT_HIGH);
outb((addr & 0x00ff), EC_IO_PORT_LOW);
outb(val, EC_IO_PORT_DATA);
/* flush the write action */
inb(EC_IO_PORT_DATA);
spin_unlock_irqrestore(&index_access_lock, flags);
}
EXPORT_SYMBOL_GPL(ec_write);
/*
* This function is used for EC command writes and corresponding status queries.
*/
int ec_query_seq(unsigned char cmd)
{
int timeout;
unsigned char status;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&port_access_lock, flags);
/* make chip goto reset mode */
udelay(EC_REG_DELAY);
outb(cmd, EC_CMD_PORT);
udelay(EC_REG_DELAY);
/* check if the command is received by ec */
timeout = EC_CMD_TIMEOUT;
status = inb(EC_STS_PORT);
while (timeout-- && (status & (1 << 1))) {
status = inb(EC_STS_PORT);
udelay(EC_REG_DELAY);
}
spin_unlock_irqrestore(&port_access_lock, flags);
if (timeout <= 0) {
printk(KERN_ERR "%s: deadable error : timeout...\n", __func__);
ret = -EINVAL;
} else
printk(KERN_INFO
"(%x/%d)ec issued command %d status : 0x%x\n",
timeout, EC_CMD_TIMEOUT - timeout, cmd, status);
return ret;
}
EXPORT_SYMBOL_GPL(ec_query_seq);
/*
* Send query command to EC to get the proper event number
*/
int ec_query_event_num(void)
{
return ec_query_seq(CMD_GET_EVENT_NUM);
}
EXPORT_SYMBOL(ec_query_event_num);
/*
* Get event number from EC
*
* NOTE: This routine must follow the query_event_num function in the
* interrupt.
*/
int ec_get_event_num(void)
{
int timeout = 100;
unsigned char value;
unsigned char status;
udelay(EC_REG_DELAY);
status = inb(EC_STS_PORT);
udelay(EC_REG_DELAY);
while (timeout-- && !(status & (1 << 0))) {
status = inb(EC_STS_PORT);
udelay(EC_REG_DELAY);
}
if (timeout <= 0) {
pr_info("%s: get event number timeout.\n", __func__);
return -EINVAL;
}
value = inb(EC_DAT_PORT);
udelay(EC_REG_DELAY);
return value;
}
EXPORT_SYMBOL(ec_get_event_num);
| linux-master | arch/mips/loongson2ef/lemote-2f/ec_kb3310b.c |
/*
* Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, [email protected]
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/cpufreq.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <asm/mach-loongson2ef/loongson.h>
enum {
DC_ZERO, DC_25PT = 2, DC_37PT, DC_50PT, DC_62PT, DC_75PT,
DC_87PT, DC_DISABLE, DC_RESV
};
struct cpufreq_frequency_table loongson2_clockmod_table[] = {
{0, DC_RESV, CPUFREQ_ENTRY_INVALID},
{0, DC_ZERO, CPUFREQ_ENTRY_INVALID},
{0, DC_25PT, 0},
{0, DC_37PT, 0},
{0, DC_50PT, 0},
{0, DC_62PT, 0},
{0, DC_75PT, 0},
{0, DC_87PT, 0},
{0, DC_DISABLE, 0},
{0, DC_RESV, CPUFREQ_TABLE_END},
};
EXPORT_SYMBOL_GPL(loongson2_clockmod_table);
int loongson2_cpu_set_rate(unsigned long rate_khz)
{
struct cpufreq_frequency_table *pos;
int regval;
cpufreq_for_each_valid_entry(pos, loongson2_clockmod_table)
if (rate_khz == pos->frequency)
break;
if (rate_khz != pos->frequency)
return -ENOTSUPP;
regval = readl(LOONGSON_CHIPCFG);
regval = (regval & ~0x7) | (pos->driver_data - 1);
writel(regval, LOONGSON_CHIPCFG);
return 0;
}
EXPORT_SYMBOL_GPL(loongson2_cpu_set_rate);
| linux-master | arch/mips/loongson2ef/lemote-2f/clock.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Board-specific reboot/shutdown routines
*
* Copyright (c) 2009 Philippe Vachon <[email protected]>
*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <asm/bootinfo.h>
#include <loongson.h>
#include <cs5536/cs5536.h>
#include "ec_kb3310b.h"
static void reset_cpu(void)
{
/*
* reset cpu to full speed, this is needed when enabling cpu frequency
* scalling
*/
writel(readl(LOONGSON_CHIPCFG) | 0x7, LOONGSON_CHIPCFG);
}
/* reset support for fuloong2f */
static void fl2f_reboot(void)
{
reset_cpu();
/* send a reset signal to south bridge.
*
* NOTE: if enable "Power Management" in kernel, rtl8169 will not reset
* normally with this reset operation and it will not work in PMON, but
* you can type halt command and then reboot, seems the hardware reset
* logic not work normally.
*/
{
u32 hi, lo;
_rdmsr(DIVIL_MSR_REG(DIVIL_SOFT_RESET), &hi, &lo);
lo |= 0x00000001;
_wrmsr(DIVIL_MSR_REG(DIVIL_SOFT_RESET), hi, lo);
}
}
static void fl2f_shutdown(void)
{
u32 hi, lo, val;
int gpio_base;
/* get gpio base */
_rdmsr(DIVIL_MSR_REG(DIVIL_LBAR_GPIO), &hi, &lo);
gpio_base = lo & 0xff00;
/* make cs5536 gpio13 output enable */
val = inl(gpio_base + GPIOL_OUT_EN);
val &= ~(1 << (16 + 13));
val |= (1 << 13);
outl(val, gpio_base + GPIOL_OUT_EN);
mmiowb();
/* make cs5536 gpio13 output low level voltage. */
val = inl(gpio_base + GPIOL_OUT_VAL) & ~(1 << (13));
val |= (1 << (16 + 13));
outl(val, gpio_base + GPIOL_OUT_VAL);
mmiowb();
}
/* reset support for yeeloong2f and mengloong2f notebook */
static void ml2f_reboot(void)
{
reset_cpu();
/* sending an reset signal to EC(embedded controller) */
ec_write(REG_RESET, BIT_RESET_ON);
}
#define yl2f89_reboot ml2f_reboot
/* menglong(7inches) laptop has different shutdown logic from 8.9inches */
#define EC_SHUTDOWN_IO_PORT_HIGH 0xff2d
#define EC_SHUTDOWN_IO_PORT_LOW 0xff2e
#define EC_SHUTDOWN_IO_PORT_DATA 0xff2f
#define REG_SHUTDOWN_HIGH 0xFC
#define REG_SHUTDOWN_LOW 0x29
#define BIT_SHUTDOWN_ON (1 << 1)
static void ml2f_shutdown(void)
{
u8 val;
u64 i;
outb(REG_SHUTDOWN_HIGH, EC_SHUTDOWN_IO_PORT_HIGH);
outb(REG_SHUTDOWN_LOW, EC_SHUTDOWN_IO_PORT_LOW);
mmiowb();
val = inb(EC_SHUTDOWN_IO_PORT_DATA);
outb(val & (~BIT_SHUTDOWN_ON), EC_SHUTDOWN_IO_PORT_DATA);
mmiowb();
/* need enough wait here... how many microseconds needs? */
for (i = 0; i < 0x10000; i++)
delay();
outb(val | BIT_SHUTDOWN_ON, EC_SHUTDOWN_IO_PORT_DATA);
mmiowb();
}
static void yl2f89_shutdown(void)
{
/* cpu-gpio0 output low */
LOONGSON_GPIODATA &= ~0x00000001;
/* cpu-gpio0 as output */
LOONGSON_GPIOIE &= ~0x00000001;
}
void mach_prepare_reboot(void)
{
switch (mips_machtype) {
case MACH_LEMOTE_FL2F:
case MACH_LEMOTE_NAS:
case MACH_LEMOTE_LL2F:
fl2f_reboot();
break;
case MACH_LEMOTE_ML2F7:
ml2f_reboot();
break;
case MACH_LEMOTE_YL2F89:
yl2f89_reboot();
break;
default:
break;
}
}
void mach_prepare_shutdown(void)
{
switch (mips_machtype) {
case MACH_LEMOTE_FL2F:
case MACH_LEMOTE_NAS:
case MACH_LEMOTE_LL2F:
fl2f_shutdown();
break;
case MACH_LEMOTE_ML2F7:
ml2f_shutdown();
break;
case MACH_LEMOTE_YL2F89:
yl2f89_shutdown();
break;
default:
break;
}
}
| linux-master | arch/mips/loongson2ef/lemote-2f/reset.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/dma-direct.h>
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr | 0x80000000;
}
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
if (dma_addr > 0x8fffffff)
return dma_addr;
return dma_addr & 0x0fffffff;
}
| linux-master | arch/mips/loongson2ef/lemote-2f/dma.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Lemote loongson2f family machines' specific suspend support
*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin <[email protected]>
*/
#include <linux/suspend.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <linux/i8042.h>
#include <linux/export.h>
#include <asm/i8259.h>
#include <asm/mipsregs.h>
#include <asm/bootinfo.h>
#include <loongson.h>
#include <cs5536/cs5536_mfgpt.h>
#include "ec_kb3310b.h"
#define I8042_KBD_IRQ 1
#define I8042_CTR_KBDINT 0x01
#define I8042_CTR_KBDDIS 0x10
static unsigned char i8042_ctr;
static int i8042_enable_kbd_port(void)
{
if (i8042_command(&i8042_ctr, I8042_CMD_CTL_RCTR)) {
pr_err("i8042.c: Can't read CTR while enabling i8042 kbd port."
"\n");
return -EIO;
}
i8042_ctr &= ~I8042_CTR_KBDDIS;
i8042_ctr |= I8042_CTR_KBDINT;
if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR)) {
i8042_ctr &= ~I8042_CTR_KBDINT;
i8042_ctr |= I8042_CTR_KBDDIS;
pr_err("i8042.c: Failed to enable KBD port.\n");
return -EIO;
}
return 0;
}
void setup_wakeup_events(void)
{
int irq_mask;
switch (mips_machtype) {
case MACH_LEMOTE_ML2F7:
case MACH_LEMOTE_YL2F89:
/* open the keyboard irq in i8259A */
outb((0xff & ~(1 << I8042_KBD_IRQ)), PIC_MASTER_IMR);
irq_mask = inb(PIC_MASTER_IMR);
/* enable keyboard port */
i8042_enable_kbd_port();
/* Wakeup CPU via SCI lid open event */
outb(irq_mask & ~(1 << PIC_CASCADE_IR), PIC_MASTER_IMR);
inb(PIC_MASTER_IMR);
outb(0xff & ~(1 << (SCI_IRQ_NUM - 8)), PIC_SLAVE_IMR);
inb(PIC_SLAVE_IMR);
break;
default:
break;
}
}
static struct delayed_work lid_task;
static int initialized;
/* yeeloong_report_lid_status will be implemented in yeeloong_laptop.c */
sci_handler yeeloong_report_lid_status;
EXPORT_SYMBOL(yeeloong_report_lid_status);
static void yeeloong_lid_update_task(struct work_struct *work)
{
if (yeeloong_report_lid_status)
yeeloong_report_lid_status(BIT_LID_DETECT_ON);
}
int wakeup_loongson(void)
{
int irq;
/* query the interrupt number */
irq = mach_i8259_irq();
if (irq < 0)
return 0;
printk(KERN_INFO "%s: irq = %d\n", __func__, irq);
if (irq == I8042_KBD_IRQ)
return 1;
else if (irq == SCI_IRQ_NUM) {
int ret, sci_event;
/* query the event number */
ret = ec_query_seq(CMD_GET_EVENT_NUM);
if (ret < 0)
return 0;
sci_event = ec_get_event_num();
if (sci_event < 0)
return 0;
if (sci_event == EVENT_LID) {
int lid_status;
/* check the LID status */
lid_status = ec_read(REG_LID_DETECT);
/* wakeup cpu when people open the LID */
if (lid_status == BIT_LID_DETECT_ON) {
/* If we call it directly here, the WARNING
* will be sent out by getnstimeofday
* via "WARN_ON(timekeeping_suspended);"
* because we can not schedule in suspend mode.
*/
if (initialized == 0) {
INIT_DELAYED_WORK(&lid_task,
yeeloong_lid_update_task);
initialized = 1;
}
schedule_delayed_work(&lid_task, 1);
return 1;
}
}
}
return 0;
}
void __weak mach_suspend(void)
{
disable_mfgpt0_counter();
}
void __weak mach_resume(void)
{
enable_mfgpt0_counter();
}
| linux-master | arch/mips/loongson2ef/lemote-2f/pm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
*/
#include <linux/interrupt.h>
#include <asm/irq_cpu.h>
#include <asm/i8259.h>
#include <loongson.h>
static void i8259_irqdispatch(void)
{
int irq;
irq = i8259_irq();
if (irq >= 0)
do_IRQ(irq);
else
spurious_interrupt();
}
asmlinkage void mach_irq_dispatch(unsigned int pending)
{
if (pending & CAUSEF_IP7)
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
else if (pending & CAUSEF_IP6) /* perf counter loverflow */
return;
else if (pending & CAUSEF_IP5)
i8259_irqdispatch();
else if (pending & CAUSEF_IP2)
bonito_irqdispatch();
else
spurious_interrupt();
}
void __init mach_init_irq(void)
{
int irq;
/* init all controller
* 0-15 ------> i8259 interrupt
* 16-23 ------> mips cpu interrupt
* 32-63 ------> bonito irq
*/
/* most bonito irq should be level triggered */
LOONGSON_INTEDGE = LOONGSON_ICU_SYSTEMERR | LOONGSON_ICU_MASTERERR |
LOONGSON_ICU_RETRYERR | LOONGSON_ICU_MBOXES;
/* Sets the first-level interrupt dispatcher. */
mips_cpu_irq_init();
init_i8259_irqs();
bonito_irq_init();
/* bonito irq at IP2 */
irq = MIPS_CPU_IRQ_BASE + 2;
if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to request irq %d (cascade)\n", irq);
/* 8259 irq at IP5 */
irq = MIPS_CPU_IRQ_BASE + 5;
if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to request irq %d (cascade)\n", irq);
}
| linux-master | arch/mips/loongson2ef/fuloong-2e/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Board-specific reboot/shutdown routines
* Copyright (c) 2009 Philippe Vachon <[email protected]>
*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <loongson.h>
void mach_prepare_reboot(void)
{
LOONGSON_GENCFG &= ~(1 << 2);
LOONGSON_GENCFG |= (1 << 2);
}
void mach_prepare_shutdown(void)
{
}
| linux-master | arch/mips/loongson2ef/fuloong-2e/reset.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/dma-direct.h>
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr | 0x80000000;
}
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr & 0x7fffffff;
}
| linux-master | arch/mips/loongson2ef/fuloong-2e/dma.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 Ralf Baechle ([email protected])
*
* Copyright (C) 2009 Lemote, Inc.
* Author: Yan hua ([email protected])
* Author: Wu Zhangjin ([email protected])
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/serial_8250.h>
#include <asm/bootinfo.h>
#include <loongson.h>
#include <machine.h>
#define PORT(int, clk) \
{ \
.irq = int, \
.uartclk = clk, \
.iotype = UPIO_PORT, \
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
.regshift = 0, \
}
#define PORT_M(int, clk) \
{ \
.irq = MIPS_CPU_IRQ_BASE + (int), \
.uartclk = clk, \
.iotype = UPIO_MEM, \
.membase = (void __iomem *)NULL, \
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
.regshift = 0, \
}
static struct plat_serial8250_port uart8250_data[MACH_LOONGSON_END + 1] = {
[MACH_LOONGSON_UNKNOWN] = {},
[MACH_LEMOTE_FL2E] = PORT(4, 1843200),
[MACH_LEMOTE_FL2F] = PORT(3, 1843200),
[MACH_LEMOTE_ML2F7] = PORT_M(3, 3686400),
[MACH_LEMOTE_YL2F89] = PORT_M(3, 3686400),
[MACH_DEXXON_GDIUM2F10] = PORT_M(3, 3686400),
[MACH_LEMOTE_NAS] = PORT_M(3, 3686400),
[MACH_LEMOTE_LL2F] = PORT(3, 1843200),
[MACH_LOONGSON_END] = {},
};
static struct platform_device uart8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
};
static int __init serial_init(void)
{
unsigned char iotype;
iotype = uart8250_data[mips_machtype].iotype;
if (UPIO_MEM == iotype) {
uart8250_data[mips_machtype].mapbase =
loongson_uart_base;
uart8250_data[mips_machtype].membase =
(void __iomem *)_loongson_uart_base;
}
else if (UPIO_PORT == iotype)
uart8250_data[mips_machtype].iobase =
loongson_uart_base - LOONGSON_PCIIO_BASE;
memset(&uart8250_data[mips_machtype + 1], 0,
sizeof(struct plat_serial8250_port));
uart8250_device.dev.platform_data = &uart8250_data[mips_machtype];
return platform_device_register(&uart8250_device);
}
module_init(serial_init);
static void __exit serial_exit(void)
{
platform_device_unregister(&uart8250_device);
}
module_exit(serial_exit);
| linux-master | arch/mips/loongson2ef/common/serial.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <loongson.h>
/*
* the first level int-handler will jump here if it is a bonito irq
*/
void bonito_irqdispatch(void)
{
u32 int_status;
int i;
/* workaround the IO dma problem: let cpu looping to allow DMA finish */
int_status = LOONGSON_INTISR;
while (int_status & (1 << 10)) {
udelay(1);
int_status = LOONGSON_INTISR;
}
/* Get pending sources, masked by current enables */
int_status = LOONGSON_INTISR & LOONGSON_INTEN;
if (int_status) {
i = __ffs(int_status);
do_IRQ(LOONGSON_IRQ_BASE + i);
}
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending;
pending = read_c0_cause() & read_c0_status() & ST0_IM;
/* machine-specific plat_irq_dispatch */
mach_irq_dispatch(pending);
}
void __init arch_init_irq(void)
{
/*
* Clear all of the interrupts while we change the able around a bit.
* int-handler is not on bootstrap
*/
clear_c0_status(ST0_IM | ST0_BEV);
/* no steer */
LOONGSON_INTSTEER = 0;
/*
* Mask out all interrupt by writing "1" to all bit position in
* the interrupt reset reg.
*/
LOONGSON_INTENCLR = ~0;
/* machine specific irq init */
mach_init_irq();
}
| linux-master | arch/mips/loongson2ef/common/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Lemote Fuloong platform support
*
* Copyright(c) 2010 Arnaud Patard <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/mc146818rtc.h>
static struct resource loongson_rtc_resources[] = {
{
.start = RTC_PORT(0),
.end = RTC_PORT(1),
.flags = IORESOURCE_IO,
}, {
.start = RTC_IRQ,
.end = RTC_IRQ,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device loongson_rtc_device = {
.name = "rtc_cmos",
.id = -1,
.resource = loongson_rtc_resources,
.num_resources = ARRAY_SIZE(loongson_rtc_resources),
};
static int __init loongson_rtc_platform_init(void)
{
platform_device_register(&loongson_rtc_device);
return 0;
}
device_initcall(loongson_rtc_platform_init);
| linux-master | arch/mips/loongson2ef/common/rtc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2001 MontaVista Software Inc.
* Author: Jun Sun, [email protected] or [email protected]
* Copyright (C) 2000, 2001 Ralf Baechle ([email protected])
*
* Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
*/
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <loongson.h>
static inline void bonito_irq_enable(struct irq_data *d)
{
LOONGSON_INTENSET = (1 << (d->irq - LOONGSON_IRQ_BASE));
mmiowb();
}
static inline void bonito_irq_disable(struct irq_data *d)
{
LOONGSON_INTENCLR = (1 << (d->irq - LOONGSON_IRQ_BASE));
mmiowb();
}
static struct irq_chip bonito_irq_type = {
.name = "bonito_irq",
.irq_mask = bonito_irq_disable,
.irq_unmask = bonito_irq_enable,
};
void bonito_irq_init(void)
{
u32 i;
for (i = LOONGSON_IRQ_BASE; i < LOONGSON_IRQ_BASE + 32; i++)
irq_set_chip_and_handler(i, &bonito_irq_type,
handle_level_irq);
#ifdef CONFIG_CPU_LOONGSON2E
i = LOONGSON_IRQ_BASE + 10;
if (request_irq(i, no_action, 0, "dma_timeout", NULL))
pr_err("Failed to request irq %d (dma_timeout)\n", i);
#endif
}
| linux-master | arch/mips/loongson2ef/common/bonito-irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <linux/memblock.h>
#include <asm/bootinfo.h>
#include <asm/traps.h>
#include <asm/smp-ops.h>
#include <asm/cacheflush.h>
#include <asm/fw/fw.h>
#include <loongson.h>
/* Loongson CPU address windows config space base address */
unsigned long __maybe_unused _loongson_addrwincfg_base;
static void __init mips_nmi_setup(void)
{
void *base;
base = (void *)(CAC_BASE + 0x380);
memcpy(base, except_vec_nmi, 0x80);
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
void __init prom_init(void)
{
#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
_loongson_addrwincfg_base = (unsigned long)
ioremap(LOONGSON_ADDRWINCFG_BASE, LOONGSON_ADDRWINCFG_SIZE);
#endif
fw_init_cmdline();
prom_init_machtype();
prom_init_env();
/* init base address of io space */
set_io_port_base((unsigned long)
ioremap(LOONGSON_PCIIO_BASE, LOONGSON_PCIIO_SIZE));
prom_init_memory();
/*init the uart base address */
prom_init_uart_base();
board_nmi_handler_setup = mips_nmi_setup;
}
| linux-master | arch/mips/loongson2ef/common/init.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, [email protected]
*/
#include <linux/pci.h>
#include <pci.h>
#include <loongson.h>
static struct resource loongson_pci_mem_resource = {
.name = "pci memory space",
.start = LOONGSON_PCI_MEM_START,
.end = LOONGSON_PCI_MEM_END,
.flags = IORESOURCE_MEM,
};
static struct resource loongson_pci_io_resource = {
.name = "pci io space",
.start = LOONGSON_PCI_IO_START,
.end = IO_SPACE_LIMIT,
.flags = IORESOURCE_IO,
};
static struct pci_controller loongson_pci_controller = {
.pci_ops = &loongson_pci_ops,
.io_resource = &loongson_pci_io_resource,
.mem_resource = &loongson_pci_mem_resource,
.mem_offset = 0x00000000UL,
.io_offset = 0x00000000UL,
};
static void __init setup_pcimap(void)
{
/*
* local to PCI mapping for CPU accessing PCI space
* CPU address space [256M,448M] is window for accessing pci space
* we set pcimap_lo[0,1,2] to map it to pci space[0M,64M], [320M,448M]
*
* pcimap: PCI_MAP2 PCI_Mem_Lo2 PCI_Mem_Lo1 PCI_Mem_Lo0
* [<2G] [384M,448M] [320M,384M] [0M,64M]
*/
LOONGSON_PCIMAP = LOONGSON_PCIMAP_PCIMAP_2 |
LOONGSON_PCIMAP_WIN(2, LOONGSON_PCILO2_BASE) |
LOONGSON_PCIMAP_WIN(1, LOONGSON_PCILO1_BASE) |
LOONGSON_PCIMAP_WIN(0, 0);
/*
* PCI-DMA to local mapping: [2G,2G+256M] -> [0M,256M]
*/
LOONGSON_PCIBASE0 = 0x80000000ul; /* base: 2G -> mmap: 0M */
/* size: 256M, burst transmission, pre-fetch enable, 64bit */
LOONGSON_PCI_HIT0_SEL_L = 0xc000000cul;
LOONGSON_PCI_HIT0_SEL_H = 0xfffffffful;
LOONGSON_PCI_HIT1_SEL_L = 0x00000006ul; /* set this BAR as invalid */
LOONGSON_PCI_HIT1_SEL_H = 0x00000000ul;
LOONGSON_PCI_HIT2_SEL_L = 0x00000006ul; /* set this BAR as invalid */
LOONGSON_PCI_HIT2_SEL_H = 0x00000000ul;
/* avoid deadlock of PCI reading/writing lock operation */
LOONGSON_PCI_ISR4C = 0xd2000001ul;
/* can not change gnt to break pci transfer when device's gnt not
deassert for some broken device */
LOONGSON_PXARB_CFG = 0x00fe0105ul;
#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
/*
* set cpu addr window2 to map CPU address space to PCI address space
*/
LOONGSON_ADDRWIN_CPUTOPCI(ADDRWIN_WIN2, LOONGSON_CPU_MEM_SRC,
LOONGSON_PCI_MEM_DST, MMAP_CPUTOPCI_SIZE);
#endif
}
static int __init pcibios_init(void)
{
setup_pcimap();
loongson_pci_controller.io_map_base = mips_io_port_base;
register_pci_controller(&loongson_pci_controller);
return 0;
}
arch_initcall(pcibios_init);
| linux-master | arch/mips/loongson2ef/common/pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*/
#include <linux/fs.h>
#include <linux/fcntl.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <asm/bootinfo.h>
#include <loongson.h>
#include <mem.h>
#include <pci.h>
u32 memsize, highmemsize;
void __init prom_init_memory(void)
{
memblock_add(0x0, (memsize << 20));
#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
{
int bit;
bit = fls(memsize + highmemsize);
if (bit != ffs(memsize + highmemsize))
bit += 20;
else
bit = bit + 20 - 1;
/* set cpu window3 to map CPU to DDR: 2G -> 2G */
LOONGSON_ADDRWIN_CPUTODDR(ADDRWIN_WIN3, 0x80000000ul,
0x80000000ul, (1 << bit));
mmiowb();
}
#endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
#ifdef CONFIG_64BIT
if (highmemsize > 0)
memblock_add(LOONGSON_HIGHMEM_START, highmemsize << 20);
#endif /* !CONFIG_64BIT */
}
| linux-master | arch/mips/loongson2ef/common/mem.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*
* Copyright (c) 2009 Zhang Le <[email protected]>
*/
#include <linux/errno.h>
#include <asm/bootinfo.h>
#include <loongson.h>
#include <machine.h>
/* please ensure the length of the machtype string is less than 50 */
#define MACHTYPE_LEN 50
static const char *system_types[] = {
[MACH_LOONGSON_UNKNOWN] = "unknown loongson machine",
[MACH_LEMOTE_FL2E] = "lemote-fuloong-2e-box",
[MACH_LEMOTE_FL2F] = "lemote-fuloong-2f-box",
[MACH_LEMOTE_ML2F7] = "lemote-mengloong-2f-7inches",
[MACH_LEMOTE_YL2F89] = "lemote-yeeloong-2f-8.9inches",
[MACH_DEXXON_GDIUM2F10] = "dexxon-gdium-2f",
[MACH_LEMOTE_NAS] = "lemote-nas-2f",
[MACH_LEMOTE_LL2F] = "lemote-lynloong-2f",
[MACH_LOONGSON_END] = NULL,
};
const char *get_system_type(void)
{
return system_types[mips_machtype];
}
void __weak __init mach_prom_init_machtype(void)
{
}
void __init prom_init_machtype(void)
{
char *p, str[MACHTYPE_LEN + 1];
int machtype = MACH_LEMOTE_FL2E;
mips_machtype = LOONGSON_MACHTYPE;
p = strstr(arcs_cmdline, "machtype=");
if (!p) {
mach_prom_init_machtype();
return;
}
p += strlen("machtype=");
strncpy(str, p, MACHTYPE_LEN);
str[MACHTYPE_LEN] = '\0';
p = strstr(str, " ");
if (p)
*p = '\0';
for (; system_types[machtype]; machtype++)
if (strstr(system_types[machtype], str)) {
mips_machtype = machtype;
break;
}
}
| linux-master | arch/mips/loongson2ef/common/machtype.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, [email protected]
*/
#include <linux/err.h>
#include <linux/smp.h>
#include <linux/platform_device.h>
static struct platform_device loongson2_cpufreq_device = {
.name = "loongson2_cpufreq",
.id = -1,
};
static int __init loongson2_cpufreq_init(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
/* Only 2F revision and it's successors support CPUFreq */
if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON2F)
return platform_device_register(&loongson2_cpufreq_device);
return -ENODEV;
}
arch_initcall(loongson2_cpufreq_init);
| linux-master | arch/mips/loongson2ef/common/platform.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.