python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0+
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <asm/sections.h>
#include "decode-insn.h"
#include "simulate-insn.h"
/* Return:
* INSN_REJECTED If instruction is one not allowed to kprobe,
* INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
*/
enum probe_insn __kprobes
csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api)
{
probe_opcode_t insn = le32_to_cpu(*addr);
CSKY_INSN_SET_SIMULATE(br16, insn);
CSKY_INSN_SET_SIMULATE(bt16, insn);
CSKY_INSN_SET_SIMULATE(bf16, insn);
CSKY_INSN_SET_SIMULATE(jmp16, insn);
CSKY_INSN_SET_SIMULATE(jsr16, insn);
CSKY_INSN_SET_SIMULATE(lrw16, insn);
CSKY_INSN_SET_SIMULATE(pop16, insn);
CSKY_INSN_SET_SIMULATE(br32, insn);
CSKY_INSN_SET_SIMULATE(bt32, insn);
CSKY_INSN_SET_SIMULATE(bf32, insn);
CSKY_INSN_SET_SIMULATE(jmp32, insn);
CSKY_INSN_SET_SIMULATE(jsr32, insn);
CSKY_INSN_SET_SIMULATE(lrw32, insn);
CSKY_INSN_SET_SIMULATE(pop32, insn);
CSKY_INSN_SET_SIMULATE(bez32, insn);
CSKY_INSN_SET_SIMULATE(bnez32, insn);
CSKY_INSN_SET_SIMULATE(bnezad32, insn);
CSKY_INSN_SET_SIMULATE(bhsz32, insn);
CSKY_INSN_SET_SIMULATE(bhz32, insn);
CSKY_INSN_SET_SIMULATE(blsz32, insn);
CSKY_INSN_SET_SIMULATE(blz32, insn);
CSKY_INSN_SET_SIMULATE(bsr32, insn);
CSKY_INSN_SET_SIMULATE(jmpi32, insn);
CSKY_INSN_SET_SIMULATE(jsri32, insn);
return INSN_GOOD;
}
| linux-master | arch/csky/kernel/probes/decode-insn.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2016 Pratyush Anand <[email protected]>
*/
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/uprobes.h>
#include <asm/cacheflush.h>
#include "decode-insn.h"
#define UPROBE_TRAP_NR UINT_MAX
bool is_swbp_insn(uprobe_opcode_t *insn)
{
return (*insn & 0xffff) == UPROBE_SWBP_INSN;
}
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
{
return instruction_pointer(regs);
}
int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long addr)
{
probe_opcode_t insn;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
auprobe->insn_size = is_insn32(insn) ? 4 : 2;
switch (csky_probe_decode_insn(&insn, &auprobe->api)) {
case INSN_REJECTED:
return -EINVAL;
case INSN_GOOD_NO_SLOT:
auprobe->simulate = true;
break;
default:
break;
}
return 0;
}
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
utask->autask.saved_trap_no = current->thread.trap_no;
current->thread.trap_no = UPROBE_TRAP_NR;
instruction_pointer_set(regs, utask->xol_vaddr);
user_enable_single_step(current);
return 0;
}
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR);
current->thread.trap_no = utask->autask.saved_trap_no;
instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
user_disable_single_step(current);
return 0;
}
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
{
if (t->thread.trap_no != UPROBE_TRAP_NR)
return true;
return false;
}
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
probe_opcode_t insn;
unsigned long addr;
if (!auprobe->simulate)
return false;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
addr = instruction_pointer(regs);
if (auprobe->api.handler)
auprobe->api.handler(insn, addr, regs);
return true;
}
void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
current->thread.trap_no = utask->autask.saved_trap_no;
/*
* Task has received a fatal signal, so reset back to probed
* address.
*/
instruction_pointer_set(regs, utask->vaddr);
user_disable_single_step(current);
}
bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
struct pt_regs *regs)
{
if (ctx == RP_CHECK_CHAIN_CALL)
return regs->usp <= ret->stack;
else
return regs->usp < ret->stack;
}
unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
struct pt_regs *regs)
{
unsigned long ra;
ra = regs->lr;
regs->lr = trampoline_vaddr;
return ra;
}
int arch_uprobe_exception_notify(struct notifier_block *self,
unsigned long val, void *data)
{
return NOTIFY_DONE;
}
int uprobe_breakpoint_handler(struct pt_regs *regs)
{
if (uprobe_pre_sstep_notifier(regs))
return 1;
return 0;
}
int uprobe_single_step_handler(struct pt_regs *regs)
{
if (uprobe_post_sstep_notifier(regs))
return 1;
return 0;
}
| linux-master | arch/csky/kernel/probes/uprobes.c |
// SPDX-License-Identifier: GPL-2.0+
#define pr_fmt(fmt) "kprobes: " fmt
#include <linux/kprobes.h>
#include <linux/extable.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
#include <asm/ptrace.h>
#include <linux/uaccess.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
#include "decode-insn.h"
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
struct csky_insn_patch {
kprobe_opcode_t *addr;
u32 opcode;
atomic_t cpu_count;
};
static int __kprobes patch_text_cb(void *priv)
{
struct csky_insn_patch *param = priv;
unsigned int addr = (unsigned int)param->addr;
if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) {
*(u16 *) addr = cpu_to_le16(param->opcode);
dcache_wb_range(addr, addr + 2);
atomic_inc(¶m->cpu_count);
} else {
while (atomic_read(¶m->cpu_count) <= num_online_cpus())
cpu_relax();
}
icache_inv_range(addr, addr + 2);
return 0;
}
static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
{
struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) };
return stop_machine_cpuslocked(patch_text_cb, ¶m, cpu_online_mask);
}
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
p->ainsn.api.restore = (unsigned long)p->addr + offset;
patch_text(p->ainsn.api.insn, p->opcode);
}
static void __kprobes arch_prepare_simulate(struct kprobe *p)
{
p->ainsn.api.restore = 0;
}
static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (p->ainsn.api.handler)
p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
post_kprobe_handler(kcb, regs);
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
unsigned long probe_addr = (unsigned long)p->addr;
if (probe_addr & 0x1)
return -EILSEQ;
/* copy instruction */
p->opcode = le32_to_cpu(*p->addr);
/* decode instruction */
switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) {
case INSN_REJECTED: /* insn not supported */
return -EINVAL;
case INSN_GOOD_NO_SLOT: /* insn need simulation */
p->ainsn.api.insn = NULL;
break;
case INSN_GOOD: /* instruction uses slot */
p->ainsn.api.insn = get_insn_slot();
if (!p->ainsn.api.insn)
return -ENOMEM;
break;
}
/* prepare the instruction */
if (p->ainsn.api.insn)
arch_prepare_ss_slot(p);
else
arch_prepare_simulate(p);
return 0;
}
/* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
patch_text(p->addr, USR_BKPT);
}
/* remove breakpoint from text */
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
patch_text(p->addr, p->opcode);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
if (p->ainsn.api.insn) {
free_insn_slot(p->ainsn.api.insn, 0);
p->ainsn.api.insn = NULL;
}
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static void __kprobes set_current_kprobe(struct kprobe *p)
{
__this_cpu_write(current_kprobe, p);
}
/*
* Interrupts need to be disabled before single-step mode is set, and not
* reenabled until after single-step mode ends.
* Without disabling interrupt on local CPU, there is a chance of
* interrupt occurrence in the period of exception return and start of
* out-of-line single-step, that result in wrongly single stepping
* into the interrupt handler.
*/
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
kcb->saved_sr = regs->sr;
regs->sr &= ~BIT(6);
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
regs->sr = kcb->saved_sr;
}
static void __kprobes
set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
{
unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
kcb->ss_ctx.ss_pending = true;
kcb->ss_ctx.match_addr = addr + offset;
}
static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
{
kcb->ss_ctx.ss_pending = false;
kcb->ss_ctx.match_addr = 0;
}
#define TRACE_MODE_SI BIT(14)
#define TRACE_MODE_MASK ~(0x3 << 14)
#define TRACE_MODE_RUN 0
static void __kprobes setup_singlestep(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb, int reenter)
{
unsigned long slot;
if (reenter) {
save_previous_kprobe(kcb);
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_REENTER;
} else {
kcb->kprobe_status = KPROBE_HIT_SS;
}
if (p->ainsn.api.insn) {
/* prepare for single stepping */
slot = (unsigned long)p->ainsn.api.insn;
set_ss_context(kcb, slot, p); /* mark pending ss */
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
instruction_pointer_set(regs, slot);
} else {
/* insn simulation */
arch_simulate_insn(p, regs);
}
}
static int __kprobes reenter_kprobe(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
switch (kcb->kprobe_status) {
case KPROBE_HIT_SSDONE:
case KPROBE_HIT_ACTIVE:
kprobes_inc_nmissed_count(p);
setup_singlestep(p, regs, kcb, 1);
break;
case KPROBE_HIT_SS:
case KPROBE_REENTER:
pr_warn("Failed to recover from reentered kprobes.\n");
dump_kprobe(p);
BUG();
break;
default:
WARN_ON(1);
return 0;
}
return 1;
}
static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
if (!cur)
return;
/* return addr restore if non-branching insn */
if (cur->ainsn.api.restore != 0)
regs->pc = cur->ainsn.api.restore;
/* restore back original saved kprobe variables and continue */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
return;
}
/* call post handler */
kcb->kprobe_status = KPROBE_HIT_SSDONE;
if (cur->post_handler) {
/* post_handler can hit breakpoint and single step
* again, so we enable D-flag for recursive exception.
*/
cur->post_handler(cur, regs, 0);
}
reset_current_kprobe();
}
int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
switch (kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe and the ip points back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs->pc = (unsigned long) cur->addr;
BUG_ON(!instruction_pointer(regs));
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if (fixup_exception(regs))
return 1;
}
return 0;
}
int __kprobes
kprobe_breakpoint_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur_kprobe;
struct kprobe_ctlblk *kcb;
unsigned long addr = instruction_pointer(regs);
kcb = get_kprobe_ctlblk();
cur_kprobe = kprobe_running();
p = get_kprobe((kprobe_opcode_t *) addr);
if (p) {
if (cur_kprobe) {
if (reenter_kprobe(p, regs, kcb))
return 1;
} else {
/* Probe hit */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
* pre-handler and it returned non-zero, it will
* modify the execution path and no need to single
* stepping. Let's just reset current kprobe and exit.
*
* pre_handler can hit a breakpoint and can step thru
* before return.
*/
if (!p->pre_handler || !p->pre_handler(p, regs))
setup_singlestep(p, regs, kcb, 0);
else
reset_current_kprobe();
}
return 1;
}
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
* Return back to original instruction, and continue.
*/
return 0;
}
int __kprobes
kprobe_single_step_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if ((kcb->ss_ctx.ss_pending)
&& (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
clear_ss_context(kcb); /* clear pending ss */
kprobes_restore_local_irqflag(kcb, regs);
regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
post_kprobe_handler(kcb, regs);
return 1;
}
return 0;
}
/*
* Provide a blacklist of symbols identifying ranges which cannot be kprobed.
* This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
*/
int __init arch_populate_kprobe_blacklist(void)
{
int ret;
ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
(unsigned long)__irqentry_text_end);
return ret;
}
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
{
return (void *)kretprobe_trampoline_handler(regs, NULL);
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->lr;
ri->fp = NULL;
regs->lr = (unsigned long) &__kretprobe_trampoline;
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
}
int __init arch_init_kprobes(void)
{
return 0;
}
| linux-master | arch/csky/kernel/probes/kprobes.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/time.h>
#include <linux/types.h>
extern
int __vdso_clock_gettime(clockid_t clock,
struct old_timespec32 *ts);
int __vdso_clock_gettime(clockid_t clock,
struct old_timespec32 *ts)
{
return __cvdso_clock_gettime32(clock, ts);
}
int __vdso_clock_gettime64(clockid_t clock,
struct __kernel_timespec *ts);
int __vdso_clock_gettime64(clockid_t clock,
struct __kernel_timespec *ts)
{
return __cvdso_clock_gettime(clock, ts);
}
extern
int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
struct timezone *tz);
int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
struct timezone *tz)
{
return __cvdso_gettimeofday(tv, tz);
}
extern
int __vdso_clock_getres(clockid_t clock_id,
struct old_timespec32 *res);
int __vdso_clock_getres(clockid_t clock_id,
struct old_timespec32 *res)
{
return __cvdso_clock_getres_time32(clock_id, res);
}
| linux-master | arch/csky/kernel/vdso/vgettimeofday.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/export.h>
#include <linux/compiler.h>
#include <uapi/linux/swab.h>
unsigned int notrace __bswapsi2(unsigned int u)
{
return ___constant_swab32(u);
}
EXPORT_SYMBOL(__bswapsi2);
| linux-master | arch/csky/abiv1/bswapsi.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/shm.h>
#include <linux/sched.h>
#include <linux/random.h>
#include <linux/io.h>
#define COLOUR_ALIGN(addr,pgoff) \
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches. We need to ensure that
* a specific page of an object is always mapped at a multiple of
* SHMLBA bytes.
*
* We unconditionally provide this function for all cases.
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int do_align = 0;
struct vm_unmapped_area_info info;
/*
* We only need to do colour alignment if either the I or D
* caches alias.
*/
do_align = filp || (flags & MAP_SHARED);
/*
* We enforce the MAP_FIXED case.
*/
if (flags & MAP_FIXED) {
if (flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
}
if (len > TASK_SIZE)
return -ENOMEM;
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
| linux-master | arch/csky/abiv1/mmap.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/export.h>
#include <linux/compiler.h>
#include <uapi/linux/swab.h>
unsigned long long notrace __bswapdi2(unsigned long long u)
{
return ___constant_swab64(u);
}
EXPORT_SYMBOL(__bswapdi2);
| linux-master | arch/csky/abiv1/bswapdi.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/spinlock.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/cachectl.h>
#include <asm/tlbflush.h>
#define PG_dcache_clean PG_arch_1
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping;
if (is_zero_pfn(folio_pfn(folio)))
return;
mapping = folio_flush_mapping(folio);
if (mapping && !folio_mapped(folio))
clear_bit(PG_dcache_clean, &folio->flags);
else {
dcache_wbinv_all();
if (mapping)
icache_inv_all();
set_bit(PG_dcache_clean, &folio->flags);
}
}
EXPORT_SYMBOL(flush_dcache_folio);
void flush_dcache_page(struct page *page)
{
flush_dcache_folio(page_folio(page));
}
EXPORT_SYMBOL(flush_dcache_page);
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep, unsigned int nr)
{
unsigned long pfn = pte_pfn(*ptep);
struct folio *folio;
flush_tlb_page(vma, addr);
if (!pfn_valid(pfn))
return;
if (is_zero_pfn(pfn))
return;
folio = page_folio(pfn_to_page(pfn));
if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
dcache_wbinv_all();
if (folio_flush_mapping(folio)) {
if (vma->vm_flags & VM_EXEC)
icache_inv_all();
}
}
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
dcache_wbinv_all();
if (vma->vm_flags & VM_EXEC)
icache_inv_all();
}
| linux-master | arch/csky/abiv1/cacheflush.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
static int align_kern_enable = 1;
static int align_usr_enable = 1;
static int align_kern_count = 0;
static int align_usr_count = 0;
static inline uint32_t get_ptreg(struct pt_regs *regs, uint32_t rx)
{
return rx == 15 ? regs->lr : *((uint32_t *)&(regs->a0) - 2 + rx);
}
static inline void put_ptreg(struct pt_regs *regs, uint32_t rx, uint32_t val)
{
if (rx == 15)
regs->lr = val;
else
*((uint32_t *)&(regs->a0) - 2 + rx) = val;
}
/*
* Get byte-value from addr and set it to *valp.
*
* Success: return 0
* Failure: return 1
*/
static int ldb_asm(uint32_t addr, uint32_t *valp)
{
uint32_t val;
int err;
asm volatile (
"movi %0, 0\n"
"1:\n"
"ldb %1, (%2)\n"
"br 3f\n"
"2:\n"
"movi %0, 1\n"
"br 3f\n"
".section __ex_table,\"a\"\n"
".align 2\n"
".long 1b, 2b\n"
".previous\n"
"3:\n"
: "=&r"(err), "=r"(val)
: "r" (addr)
);
*valp = val;
return err;
}
/*
* Put byte-value to addr.
*
* Success: return 0
* Failure: return 1
*/
static int stb_asm(uint32_t addr, uint32_t val)
{
int err;
asm volatile (
"movi %0, 0\n"
"1:\n"
"stb %1, (%2)\n"
"br 3f\n"
"2:\n"
"movi %0, 1\n"
"br 3f\n"
".section __ex_table,\"a\"\n"
".align 2\n"
".long 1b, 2b\n"
".previous\n"
"3:\n"
: "=&r"(err)
: "r"(val), "r" (addr)
);
return err;
}
/*
* Get half-word from [rx + imm]
*
* Success: return 0
* Failure: return 1
*/
static int ldh_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
{
uint32_t byte0, byte1;
if (ldb_asm(addr, &byte0))
return 1;
addr += 1;
if (ldb_asm(addr, &byte1))
return 1;
byte0 |= byte1 << 8;
put_ptreg(regs, rz, byte0);
return 0;
}
/*
* Store half-word to [rx + imm]
*
* Success: return 0
* Failure: return 1
*/
static int sth_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
{
uint32_t byte0, byte1;
byte0 = byte1 = get_ptreg(regs, rz);
byte0 &= 0xff;
if (stb_asm(addr, byte0))
return 1;
addr += 1;
byte1 = (byte1 >> 8) & 0xff;
if (stb_asm(addr, byte1))
return 1;
return 0;
}
/*
* Get word from [rx + imm]
*
* Success: return 0
* Failure: return 1
*/
static int ldw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
{
uint32_t byte0, byte1, byte2, byte3;
if (ldb_asm(addr, &byte0))
return 1;
addr += 1;
if (ldb_asm(addr, &byte1))
return 1;
addr += 1;
if (ldb_asm(addr, &byte2))
return 1;
addr += 1;
if (ldb_asm(addr, &byte3))
return 1;
byte0 |= byte1 << 8;
byte0 |= byte2 << 16;
byte0 |= byte3 << 24;
put_ptreg(regs, rz, byte0);
return 0;
}
/*
* Store word to [rx + imm]
*
* Success: return 0
* Failure: return 1
*/
static int stw_c(struct pt_regs *regs, uint32_t rz, uint32_t addr)
{
uint32_t byte0, byte1, byte2, byte3;
byte0 = byte1 = byte2 = byte3 = get_ptreg(regs, rz);
byte0 &= 0xff;
if (stb_asm(addr, byte0))
return 1;
addr += 1;
byte1 = (byte1 >> 8) & 0xff;
if (stb_asm(addr, byte1))
return 1;
addr += 1;
byte2 = (byte2 >> 16) & 0xff;
if (stb_asm(addr, byte2))
return 1;
addr += 1;
byte3 = (byte3 >> 24) & 0xff;
if (stb_asm(addr, byte3))
return 1;
return 0;
}
extern int fixup_exception(struct pt_regs *regs);
#define OP_LDH 0xc000
#define OP_STH 0xd000
#define OP_LDW 0x8000
#define OP_STW 0x9000
void csky_alignment(struct pt_regs *regs)
{
int ret;
uint16_t tmp;
uint32_t opcode = 0;
uint32_t rx = 0;
uint32_t rz = 0;
uint32_t imm = 0;
uint32_t addr = 0;
if (!user_mode(regs))
goto kernel_area;
if (!align_usr_enable) {
pr_err("%s user disabled.\n", __func__);
goto bad_area;
}
align_usr_count++;
ret = get_user(tmp, (uint16_t *)instruction_pointer(regs));
if (ret) {
pr_err("%s get_user failed.\n", __func__);
goto bad_area;
}
goto good_area;
kernel_area:
if (!align_kern_enable) {
pr_err("%s kernel disabled.\n", __func__);
goto bad_area;
}
align_kern_count++;
tmp = *(uint16_t *)instruction_pointer(regs);
good_area:
opcode = (uint32_t)tmp;
rx = opcode & 0xf;
imm = (opcode >> 4) & 0xf;
rz = (opcode >> 8) & 0xf;
opcode &= 0xf000;
if (rx == 0 || rx == 1 || rz == 0 || rz == 1)
goto bad_area;
switch (opcode) {
case OP_LDH:
addr = get_ptreg(regs, rx) + (imm << 1);
ret = ldh_c(regs, rz, addr);
break;
case OP_LDW:
addr = get_ptreg(regs, rx) + (imm << 2);
ret = ldw_c(regs, rz, addr);
break;
case OP_STH:
addr = get_ptreg(regs, rx) + (imm << 1);
ret = sth_c(regs, rz, addr);
break;
case OP_STW:
addr = get_ptreg(regs, rx) + (imm << 2);
ret = stw_c(regs, rz, addr);
break;
}
if (ret)
goto bad_area;
regs->pc += 2;
return;
bad_area:
if (!user_mode(regs)) {
if (fixup_exception(regs))
return;
bust_spinlocks(1);
pr_alert("%s opcode: %x, rz: %d, rx: %d, imm: %d, addr: %x.\n",
__func__, opcode, rz, rx, imm, addr);
show_regs(regs);
bust_spinlocks(0);
make_task_dead(SIGKILL);
}
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
}
static struct ctl_table alignment_tbl[5] = {
{
.procname = "kernel_enable",
.data = &align_kern_enable,
.maxlen = sizeof(align_kern_enable),
.mode = 0666,
.proc_handler = &proc_dointvec
},
{
.procname = "user_enable",
.data = &align_usr_enable,
.maxlen = sizeof(align_usr_enable),
.mode = 0666,
.proc_handler = &proc_dointvec
},
{
.procname = "kernel_count",
.data = &align_kern_count,
.maxlen = sizeof(align_kern_count),
.mode = 0666,
.proc_handler = &proc_dointvec
},
{
.procname = "user_count",
.data = &align_usr_count,
.maxlen = sizeof(align_usr_count),
.mode = 0666,
.proc_handler = &proc_dointvec
},
{}
};
static int __init csky_alignment_init(void)
{
register_sysctl_init("csky/csky_alignment", alignment_tbl);
return 0;
}
arch_initcall(csky_alignment_init);
| linux-master | arch/csky/abiv1/alignment.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARC simulation Platform support code
*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/init.h>
#include <asm/mach_desc.h>
/*----------------------- Machine Descriptions ------------------------------
*
* Machine description is simply a set of platform/board specific callbacks
* This is not directly related to DeviceTree based dynamic device creation,
* however as part of early device tree scan, we also select the right
* callback set, by matching the DT compatible name.
*/
static const char *simulation_compat[] __initconst = {
#ifdef CONFIG_ISA_ARCOMPACT
"snps,nsim",
"snps,nsimosci",
#else
"snps,nsimosci_hs",
"snps,zebu_hs",
#endif
NULL,
};
MACHINE_START(SIMULATION, "simulation")
.dt_compat = simulation_compat,
MACHINE_END
| linux-master | arch/arc/plat-sim/platform.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARC HSDK Platform support code
*
* Copyright (C) 2017 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/init.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/smp.h>
#include <asm/arcregs.h>
#include <asm/io.h>
#include <asm/mach_desc.h>
int arc_hsdk_axi_dmac_coherent __section(".data") = 0;
#define ARC_CCM_UNUSED_ADDR 0x60000000
#define ARC_PERIPHERAL_BASE 0xf0000000
#define CREG_BASE (ARC_PERIPHERAL_BASE + 0x1000)
#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
#define HSDK_GPIO_INTC (ARC_PERIPHERAL_BASE + 0x3000)
static void __init hsdk_enable_gpio_intc_wire(void)
{
/*
* Peripherals on CPU Card are wired to cpu intc via intermediate
* DW APB GPIO blocks (mainly for debouncing)
*
* ---------------------
* | snps,archs-intc |
* ---------------------
* |
* ----------------------
* | snps,archs-idu-intc |
* ----------------------
* | | | | |
* | [eth] [USB] [... other peripherals]
* |
* -------------------
* | snps,dw-apb-intc |
* -------------------
* | | | |
* [Bt] [HAPS] [... other peripherals]
*
* Current implementation of "irq-dw-apb-ictl" driver doesn't work well
* with stacked INTCs. In particular problem happens if its master INTC
* not yet instantiated. See discussion here -
* https://lore.kernel.org/lkml/[email protected]
*
* So setup the first gpio block as a passive pass thru and hide it from
* DT hardware topology - connect intc directly to cpu intc
* The GPIO "wire" needs to be init nevertheless (here)
*
* One side adv is that peripheral interrupt handling avoids one nested
* intc ISR hop
*
* According to HSDK User's Manual [1], "Table 2 Interrupt Mapping"
* we have the following GPIO input lines used as sources of interrupt:
* - GPIO[0] - Bluetooth interrupt of RS9113 module
* - GPIO[2] - HAPS interrupt (on HapsTrak 3 connector)
* - GPIO[3] - Audio codec (MAX9880A) interrupt
* - GPIO[8-23] - Available on Arduino and PMOD_x headers
* For now there's no use of Arduino and PMOD_x headers in Linux
* use-case so we only enable lines 0, 2 and 3.
*
* [1] https://github.com/foss-for-synopsys-dwc-arc-processors/ARC-Development-Systems-Forum/wiki/docs/ARC_HSDK_User_Guide.pdf
*/
#define GPIO_INTEN (HSDK_GPIO_INTC + 0x30)
#define GPIO_INTMASK (HSDK_GPIO_INTC + 0x34)
#define GPIO_INTTYPE_LEVEL (HSDK_GPIO_INTC + 0x38)
#define GPIO_INT_POLARITY (HSDK_GPIO_INTC + 0x3c)
#define GPIO_INT_CONNECTED_MASK 0x0d
iowrite32(0xffffffff, (void __iomem *) GPIO_INTMASK);
iowrite32(~GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTMASK);
iowrite32(0x00000000, (void __iomem *) GPIO_INTTYPE_LEVEL);
iowrite32(0xffffffff, (void __iomem *) GPIO_INT_POLARITY);
iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
}
static int __init hsdk_tweak_node_coherency(const char *path, bool coherent)
{
void *fdt = initial_boot_params;
const void *prop;
int node, ret;
bool dt_coh_set;
node = fdt_path_offset(fdt, path);
if (node < 0)
goto tweak_fail;
prop = fdt_getprop(fdt, node, "dma-coherent", &ret);
if (!prop && ret != -FDT_ERR_NOTFOUND)
goto tweak_fail;
dt_coh_set = ret != -FDT_ERR_NOTFOUND;
ret = 0;
/* need to remove "dma-coherent" property */
if (dt_coh_set && !coherent)
ret = fdt_delprop(fdt, node, "dma-coherent");
/* need to set "dma-coherent" property */
if (!dt_coh_set && coherent)
ret = fdt_setprop(fdt, node, "dma-coherent", NULL, 0);
if (ret < 0)
goto tweak_fail;
return 0;
tweak_fail:
pr_err("failed to tweak %s to %scoherent\n", path, coherent ? "" : "non");
return -EFAULT;
}
enum hsdk_axi_masters {
M_HS_CORE = 0,
M_HS_RTT,
M_AXI_TUN,
M_HDMI_VIDEO,
M_HDMI_AUDIO,
M_USB_HOST,
M_ETHERNET,
M_SDIO,
M_GPU,
M_DMAC_0,
M_DMAC_1,
M_DVFS
};
#define UPDATE_VAL 1
/*
* This is modified configuration of AXI bridge. Default settings
* are specified in "Table 111 CREG Address Decoder register reset values".
*
* AXI_M_m_SLV{0|1} - Slave Select register for master 'm'.
* Possible slaves are:
* - 0 => no slave selected
* - 1 => DDR controller port #1
* - 2 => SRAM controller
* - 3 => AXI tunnel
* - 4 => EBI controller
* - 5 => ROM controller
* - 6 => AXI2APB bridge
* - 7 => DDR controller port #2
* - 8 => DDR controller port #3
* - 9 => HS38x4 IOC
* - 10 => HS38x4 DMI
* AXI_M_m_OFFSET{0|1} - Addr Offset register for master 'm'
*
* Please read ARC HS Development IC Specification, section 17.2 for more
* information about apertures configuration.
*
* m master AXI_M_m_SLV0 AXI_M_m_SLV1 AXI_M_m_OFFSET0 AXI_M_m_OFFSET1
* 0 HS (CBU) 0x11111111 0x63111111 0xFEDCBA98 0x0E543210
* 1 HS (RTT) 0x77777777 0x77777777 0xFEDCBA98 0x76543210
* 2 AXI Tunnel 0x88888888 0x88888888 0xFEDCBA98 0x76543210
* 3 HDMI-VIDEO 0x77777777 0x77777777 0xFEDCBA98 0x76543210
* 4 HDMI-ADUIO 0x77777777 0x77777777 0xFEDCBA98 0x76543210
* 5 USB-HOST 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98
* 6 ETHERNET 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98
* 7 SDIO 0x77777777 0x77999999 0xFEDCBA98 0x76DCBA98
* 8 GPU 0x77777777 0x77777777 0xFEDCBA98 0x76543210
* 9 DMAC (port #1) 0x77777777 0x77777777 0xFEDCBA98 0x76543210
* 10 DMAC (port #2) 0x77777777 0x77777777 0xFEDCBA98 0x76543210
* 11 DVFS 0x00000000 0x60000000 0x00000000 0x00000000
*/
#define CREG_AXI_M_SLV0(m) ((void __iomem *)(CREG_BASE + 0x20 * (m)))
#define CREG_AXI_M_SLV1(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x04))
#define CREG_AXI_M_OFT0(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x08))
#define CREG_AXI_M_OFT1(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x0C))
#define CREG_AXI_M_UPDT(m) ((void __iomem *)(CREG_BASE + 0x20 * (m) + 0x14))
#define CREG_AXI_M_HS_CORE_BOOT ((void __iomem *)(CREG_BASE + 0x010))
#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180))
#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194))
static void __init hsdk_init_memory_bridge_axi_dmac(void)
{
bool coherent = !!arc_hsdk_axi_dmac_coherent;
u32 axi_m_slv1, axi_m_oft1;
/*
* Don't tweak memory bridge configuration if we failed to tweak DTB
* as we will end up in a inconsistent state.
*/
if (hsdk_tweak_node_coherency("/soc/dmac@80000", coherent))
return;
if (coherent) {
axi_m_slv1 = 0x77999999;
axi_m_oft1 = 0x76DCBA98;
} else {
axi_m_slv1 = 0x77777777;
axi_m_oft1 = 0x76543210;
}
writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_0));
writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_0));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_1));
writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_1));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
}
static void __init hsdk_init_memory_bridge(void)
{
u32 reg;
/*
* M_HS_CORE has one unique register - BOOT.
* We need to clean boot mirror (BOOT[1:0]) bits in them to avoid first
* aperture to be masked by 'boot mirror'.
*/
reg = readl(CREG_AXI_M_HS_CORE_BOOT) & (~0x3);
writel(reg, CREG_AXI_M_HS_CORE_BOOT);
writel(0x11111111, CREG_AXI_M_SLV0(M_HS_CORE));
writel(0x63111111, CREG_AXI_M_SLV1(M_HS_CORE));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HS_CORE));
writel(0x0E543210, CREG_AXI_M_OFT1(M_HS_CORE));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HS_CORE));
writel(0x77777777, CREG_AXI_M_SLV0(M_HS_RTT));
writel(0x77777777, CREG_AXI_M_SLV1(M_HS_RTT));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HS_RTT));
writel(0x76543210, CREG_AXI_M_OFT1(M_HS_RTT));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HS_RTT));
writel(0x88888888, CREG_AXI_M_SLV0(M_AXI_TUN));
writel(0x88888888, CREG_AXI_M_SLV1(M_AXI_TUN));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_AXI_TUN));
writel(0x76543210, CREG_AXI_M_OFT1(M_AXI_TUN));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_AXI_TUN));
writel(0x77777777, CREG_AXI_M_SLV0(M_HDMI_VIDEO));
writel(0x77777777, CREG_AXI_M_SLV1(M_HDMI_VIDEO));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HDMI_VIDEO));
writel(0x76543210, CREG_AXI_M_OFT1(M_HDMI_VIDEO));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HDMI_VIDEO));
writel(0x77777777, CREG_AXI_M_SLV0(M_HDMI_AUDIO));
writel(0x77777777, CREG_AXI_M_SLV1(M_HDMI_AUDIO));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_HDMI_AUDIO));
writel(0x76543210, CREG_AXI_M_OFT1(M_HDMI_AUDIO));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_HDMI_AUDIO));
writel(0x77777777, CREG_AXI_M_SLV0(M_USB_HOST));
writel(0x77999999, CREG_AXI_M_SLV1(M_USB_HOST));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_USB_HOST));
writel(0x76DCBA98, CREG_AXI_M_OFT1(M_USB_HOST));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_USB_HOST));
writel(0x77777777, CREG_AXI_M_SLV0(M_ETHERNET));
writel(0x77999999, CREG_AXI_M_SLV1(M_ETHERNET));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_ETHERNET));
writel(0x76DCBA98, CREG_AXI_M_OFT1(M_ETHERNET));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_ETHERNET));
writel(0x77777777, CREG_AXI_M_SLV0(M_SDIO));
writel(0x77999999, CREG_AXI_M_SLV1(M_SDIO));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_SDIO));
writel(0x76DCBA98, CREG_AXI_M_OFT1(M_SDIO));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_SDIO));
writel(0x77777777, CREG_AXI_M_SLV0(M_GPU));
writel(0x77777777, CREG_AXI_M_SLV1(M_GPU));
writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_GPU));
writel(0x76543210, CREG_AXI_M_OFT1(M_GPU));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU));
writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS));
writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS));
writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS));
writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS));
writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS));
hsdk_init_memory_bridge_axi_dmac();
/*
* PAE remapping for DMA clients does not work due to an RTL bug, so
* CREG_PAE register must be programmed to all zeroes, otherwise it
* will cause problems with DMA to/from peripherals even if PAE40 is
* not used.
*/
writel(0x00000000, CREG_PAE);
writel(UPDATE_VAL, CREG_PAE_UPDT);
}
static void __init hsdk_init_early(void)
{
hsdk_init_memory_bridge();
/*
* Switch SDIO external ciu clock divider from default div-by-8 to
* minimum possible div-by-2.
*/
iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
hsdk_enable_gpio_intc_wire();
}
static const char *hsdk_compat[] __initconst = {
"snps,hsdk",
NULL,
};
MACHINE_START(SIMULATION, "hsdk")
.dt_compat = hsdk_compat,
.init_early = hsdk_init_early,
MACHINE_END
| linux-master | arch/arc/plat-hsdk/platform.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* AXS101/AXS103 Software Development Platform
*
* Copyright (C) 2013-15 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <asm/asm-offsets.h>
#include <asm/io.h>
#include <asm/mach_desc.h>
#include <soc/arc/mcip.h>
#define AXS_MB_CGU 0xE0010000
#define AXS_MB_CREG 0xE0011000
#define CREG_MB_IRQ_MUX (AXS_MB_CREG + 0x214)
#define CREG_MB_SW_RESET (AXS_MB_CREG + 0x220)
#define CREG_MB_VER (AXS_MB_CREG + 0x230)
#define CREG_MB_CONFIG (AXS_MB_CREG + 0x234)
#define AXC001_CREG 0xF0001000
#define AXC001_GPIO_INTC 0xF0003000
static void __init axs10x_enable_gpio_intc_wire(void)
{
/*
* Peripherals on CPU Card and Mother Board are wired to cpu intc via
* intermediate DW APB GPIO blocks (mainly for debouncing)
*
* ---------------------
* | snps,arc700-intc |
* ---------------------
* | #7 | #15
* ------------------- -------------------
* | snps,dw-apb-gpio | | snps,dw-apb-gpio |
* ------------------- -------------------
* | #12 |
* | [ Debug UART on cpu card ]
* |
* ------------------------
* | snps,dw-apb-intc (MB)|
* ------------------------
* | | | |
* [eth] [uart] [... other perip on Main Board]
*
* Current implementation of "irq-dw-apb-ictl" driver doesn't work well
* with stacked INTCs. In particular problem happens if its master INTC
* not yet instantiated. See discussion here -
* https://lore.kernel.org/lkml/[email protected]
*
* So setup the first gpio block as a passive pass thru and hide it from
* DT hardware topology - connect MB intc directly to cpu intc
* The GPIO "wire" needs to be init nevertheless (here)
*
* One side adv is that peripheral interrupt handling avoids one nested
* intc ISR hop
*/
#define GPIO_INTEN (AXC001_GPIO_INTC + 0x30)
#define GPIO_INTMASK (AXC001_GPIO_INTC + 0x34)
#define GPIO_INTTYPE_LEVEL (AXC001_GPIO_INTC + 0x38)
#define GPIO_INT_POLARITY (AXC001_GPIO_INTC + 0x3c)
#define MB_TO_GPIO_IRQ 12
iowrite32(~(1 << MB_TO_GPIO_IRQ), (void __iomem *) GPIO_INTMASK);
iowrite32(0, (void __iomem *) GPIO_INTTYPE_LEVEL);
iowrite32(~0, (void __iomem *) GPIO_INT_POLARITY);
iowrite32(1 << MB_TO_GPIO_IRQ, (void __iomem *) GPIO_INTEN);
}
static void __init axs10x_print_board_ver(unsigned int creg, const char *str)
{
union ver {
struct {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:11, y:12, m:4, d:5;
#else
unsigned int d:5, m:4, y:12, pad:11;
#endif
};
unsigned int val;
} board;
board.val = ioread32((void __iomem *)creg);
pr_info("AXS: %s FPGA Date: %u-%u-%u\n", str, board.d, board.m,
board.y);
}
static void __init axs10x_early_init(void)
{
int mb_rev;
char mb[32];
/* Determine motherboard version */
if (ioread32((void __iomem *) CREG_MB_CONFIG) & (1 << 28))
mb_rev = 3; /* HT-3 (rev3.0) */
else
mb_rev = 2; /* HT-2 (rev2.0) */
axs10x_enable_gpio_intc_wire();
scnprintf(mb, 32, "MainBoard v%d", mb_rev);
axs10x_print_board_ver(CREG_MB_VER, mb);
}
#ifdef CONFIG_AXS101
#define CREG_CPU_ADDR_770 (AXC001_CREG + 0x20)
#define CREG_CPU_ADDR_TUNN (AXC001_CREG + 0x60)
#define CREG_CPU_ADDR_770_UPD (AXC001_CREG + 0x34)
#define CREG_CPU_ADDR_TUNN_UPD (AXC001_CREG + 0x74)
#define CREG_CPU_ARC770_IRQ_MUX (AXC001_CREG + 0x114)
#define CREG_CPU_GPIO_UART_MUX (AXC001_CREG + 0x120)
/*
* Set up System Memory Map for ARC cpu / peripherals controllers
*
* Each AXI master has a 4GB memory map specified as 16 apertures of 256MB, each
* of which maps to a corresponding 256MB aperture in Target slave memory map.
*
* e.g. ARC cpu AXI Master's aperture 8 (0x8000_0000) is mapped to aperture 0
* (0x0000_0000) of DDR Port 0 (slave #1)
*
* Access from cpu to MB controllers such as GMAC is setup using AXI Tunnel:
* which has master/slaves on both ends.
* e.g. aperture 14 (0xE000_0000) of ARC cpu is mapped to aperture 14
* (0xE000_0000) of CPU Card AXI Tunnel slave (slave #3) which is mapped to
* MB AXI Tunnel Master, which also has a mem map setup
*
* In the reverse direction, MB AXI Masters (e.g. GMAC) mem map is setup
* to map to MB AXI Tunnel slave which connects to CPU Card AXI Tunnel Master
*/
struct aperture {
unsigned int slave_sel:4, slave_off:4, pad:24;
};
/* CPU Card target slaves */
#define AXC001_SLV_NONE 0
#define AXC001_SLV_DDR_PORT0 1
#define AXC001_SLV_SRAM 2
#define AXC001_SLV_AXI_TUNNEL 3
#define AXC001_SLV_AXI2APB 6
#define AXC001_SLV_DDR_PORT1 7
/* MB AXI Target slaves */
#define AXS_MB_SLV_NONE 0
#define AXS_MB_SLV_AXI_TUNNEL_CPU 1
#define AXS_MB_SLV_AXI_TUNNEL_HAPS 2
#define AXS_MB_SLV_SRAM 3
#define AXS_MB_SLV_CONTROL 4
/* MB AXI masters */
#define AXS_MB_MST_TUNNEL_CPU 0
#define AXS_MB_MST_USB_OHCI 10
/*
* memmap for ARC core on CPU Card
*/
static const struct aperture axc001_memmap[16] = {
{AXC001_SLV_AXI_TUNNEL, 0x0},
{AXC001_SLV_AXI_TUNNEL, 0x1},
{AXC001_SLV_SRAM, 0x0}, /* 0x2000_0000: Local SRAM */
{AXC001_SLV_NONE, 0x0},
{AXC001_SLV_NONE, 0x0},
{AXC001_SLV_NONE, 0x0},
{AXC001_SLV_NONE, 0x0},
{AXC001_SLV_NONE, 0x0},
{AXC001_SLV_DDR_PORT0, 0x0}, /* 0x8000_0000: DDR 0..256M */
{AXC001_SLV_DDR_PORT0, 0x1}, /* 0x9000_0000: DDR 256..512M */
{AXC001_SLV_DDR_PORT0, 0x2},
{AXC001_SLV_DDR_PORT0, 0x3},
{AXC001_SLV_NONE, 0x0},
{AXC001_SLV_AXI_TUNNEL, 0xD},
{AXC001_SLV_AXI_TUNNEL, 0xE}, /* MB: CREG, CGU... */
{AXC001_SLV_AXI2APB, 0x0}, /* CPU Card local CREG, CGU... */
};
/*
* memmap for CPU Card AXI Tunnel Master (for access by MB controllers)
* GMAC (MB) -> MB AXI Tunnel slave -> CPU Card AXI Tunnel Master -> DDR
*/
static const struct aperture axc001_axi_tunnel_memmap[16] = {
{AXC001_SLV_AXI_TUNNEL, 0x0},
{AXC001_SLV_AXI_TUNNEL, 0x1},
{AXC001_SLV_SRAM, 0x0},
{AXC001_SLV_NONE, 0x0},
{AXC001_SLV_NONE, 0x0},
{AXC001_SLV_NONE, 0x0},
{AXC001_SLV_NONE, 0x0},
{AXC001_SLV_NONE, 0x0},
{AXC001_SLV_DDR_PORT1, 0x0},
{AXC001_SLV_DDR_PORT1, 0x1},
{AXC001_SLV_DDR_PORT1, 0x2},
{AXC001_SLV_DDR_PORT1, 0x3},
{AXC001_SLV_NONE, 0x0},
{AXC001_SLV_AXI_TUNNEL, 0xD},
{AXC001_SLV_AXI_TUNNEL, 0xE},
{AXC001_SLV_AXI2APB, 0x0},
};
/*
* memmap for MB AXI Masters
* Same mem map for all perip controllers as well as MB AXI Tunnel Master
*/
static const struct aperture axs_mb_memmap[16] = {
{AXS_MB_SLV_SRAM, 0x0},
{AXS_MB_SLV_SRAM, 0x0},
{AXS_MB_SLV_NONE, 0x0},
{AXS_MB_SLV_NONE, 0x0},
{AXS_MB_SLV_NONE, 0x0},
{AXS_MB_SLV_NONE, 0x0},
{AXS_MB_SLV_NONE, 0x0},
{AXS_MB_SLV_NONE, 0x0},
{AXS_MB_SLV_AXI_TUNNEL_CPU, 0x8}, /* DDR on CPU Card */
{AXS_MB_SLV_AXI_TUNNEL_CPU, 0x9}, /* DDR on CPU Card */
{AXS_MB_SLV_AXI_TUNNEL_CPU, 0xA},
{AXS_MB_SLV_AXI_TUNNEL_CPU, 0xB},
{AXS_MB_SLV_NONE, 0x0},
{AXS_MB_SLV_AXI_TUNNEL_HAPS, 0xD},
{AXS_MB_SLV_CONTROL, 0x0}, /* MB Local CREG, CGU... */
{AXS_MB_SLV_AXI_TUNNEL_CPU, 0xF},
};
static noinline void __init
axs101_set_memmap(void __iomem *base, const struct aperture map[16])
{
unsigned int slave_select, slave_offset;
int i;
slave_select = slave_offset = 0;
for (i = 0; i < 8; i++) {
slave_select |= map[i].slave_sel << (i << 2);
slave_offset |= map[i].slave_off << (i << 2);
}
iowrite32(slave_select, base + 0x0); /* SLV0 */
iowrite32(slave_offset, base + 0x8); /* OFFSET0 */
slave_select = slave_offset = 0;
for (i = 0; i < 8; i++) {
slave_select |= map[i+8].slave_sel << (i << 2);
slave_offset |= map[i+8].slave_off << (i << 2);
}
iowrite32(slave_select, base + 0x4); /* SLV1 */
iowrite32(slave_offset, base + 0xC); /* OFFSET1 */
}
static void __init axs101_early_init(void)
{
int i;
/* ARC 770D memory view */
axs101_set_memmap((void __iomem *) CREG_CPU_ADDR_770, axc001_memmap);
iowrite32(1, (void __iomem *) CREG_CPU_ADDR_770_UPD);
/* AXI tunnel memory map (incoming traffic from MB into CPU Card */
axs101_set_memmap((void __iomem *) CREG_CPU_ADDR_TUNN,
axc001_axi_tunnel_memmap);
iowrite32(1, (void __iomem *) CREG_CPU_ADDR_TUNN_UPD);
/* MB peripherals memory map */
for (i = AXS_MB_MST_TUNNEL_CPU; i <= AXS_MB_MST_USB_OHCI; i++)
axs101_set_memmap((void __iomem *) AXS_MB_CREG + (i << 4),
axs_mb_memmap);
iowrite32(0x3ff, (void __iomem *) AXS_MB_CREG + 0x100); /* Update */
/* GPIO pins 18 and 19 are used as UART rx and tx, respectively. */
iowrite32(0x01, (void __iomem *) CREG_CPU_GPIO_UART_MUX);
/* Set up the MB interrupt system: mux interrupts to GPIO7) */
iowrite32(0x01, (void __iomem *) CREG_MB_IRQ_MUX);
/* reset ethernet and ULPI interfaces */
iowrite32(0x18, (void __iomem *) CREG_MB_SW_RESET);
/* map GPIO 14:10 to ARC 9:5 (IRQ mux change for MB v2 onwards) */
iowrite32(0x52, (void __iomem *) CREG_CPU_ARC770_IRQ_MUX);
axs10x_early_init();
}
#endif /* CONFIG_AXS101 */
#ifdef CONFIG_AXS103
#define AXC003_CREG 0xF0001000
#define AXC003_MST_AXI_TUNNEL 0
#define AXC003_MST_HS38 1
#define CREG_CPU_AXI_M0_IRQ_MUX (AXC003_CREG + 0x440)
#define CREG_CPU_GPIO_UART_MUX (AXC003_CREG + 0x480)
#define CREG_CPU_TUN_IO_CTRL (AXC003_CREG + 0x494)
static void __init axs103_early_init(void)
{
#ifdef CONFIG_ARC_MCIP
/*
* AXS103 configurations for SMP/QUAD configurations share device tree
* which defaults to 100 MHz. However recent failures of Quad config
* revealed P&R timing violations so clamp it down to safe 50 MHz
* Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
* of fudging the freq in DT
*/
#define AXS103_QUAD_CORE_CPU_FREQ_HZ 50000000
unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
if (num_cores > 2) {
u32 freq;
int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk");
const struct fdt_property *prop;
prop = fdt_get_property(initial_boot_params, off,
"assigned-clock-rates", NULL);
freq = be32_to_cpu(*(u32 *)(prop->data));
/* Patching .dtb in-place with new core clock value */
if (freq != AXS103_QUAD_CORE_CPU_FREQ_HZ) {
freq = cpu_to_be32(AXS103_QUAD_CORE_CPU_FREQ_HZ);
fdt_setprop_inplace(initial_boot_params, off,
"assigned-clock-rates", &freq, sizeof(freq));
}
}
#endif
/* Memory maps already config in pre-bootloader */
/* set GPIO mux to UART */
iowrite32(0x01, (void __iomem *) CREG_CPU_GPIO_UART_MUX);
iowrite32((0x00100000U | 0x000C0000U | 0x00003322U),
(void __iomem *) CREG_CPU_TUN_IO_CTRL);
/* Set up the AXS_MB interrupt system.*/
iowrite32(12, (void __iomem *) (CREG_CPU_AXI_M0_IRQ_MUX
+ (AXC003_MST_HS38 << 2)));
/* connect ICTL - Main Board with GPIO line */
iowrite32(0x01, (void __iomem *) CREG_MB_IRQ_MUX);
axs10x_print_board_ver(AXC003_CREG + 4088, "AXC003 CPU Card");
axs10x_early_init();
}
#endif
#ifdef CONFIG_AXS101
static const char *axs101_compat[] __initconst = {
"snps,axs101",
NULL,
};
MACHINE_START(AXS101, "axs101")
.dt_compat = axs101_compat,
.init_early = axs101_early_init,
MACHINE_END
#endif /* CONFIG_AXS101 */
#ifdef CONFIG_AXS103
static const char *axs103_compat[] __initconst = {
"snps,axs103",
NULL,
};
MACHINE_START(AXS103, "axs103")
.dt_compat = axs103_compat,
.init_early = axs103_early_init,
MACHINE_END
/*
* For the VDK OS-kit, to get the offset to pid and command fields
*/
char coware_swa_pid_offset[TASK_PID];
char coware_swa_comm_offset[TASK_COMM];
#endif /* CONFIG_AXS103 */
| linux-master | arch/arc/plat-axs10x/axs10x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Abilis Systems TB10x platform initialisation
*
* Copyright (C) Abilis Systems 2012
*
* Author: Christian Ruppert <[email protected]>
*/
#include <linux/init.h>
#include <asm/mach_desc.h>
static const char *tb10x_compat[] __initdata = {
"abilis,arc-tb10x",
NULL,
};
MACHINE_START(TB10x, "tb10x")
.dt_compat = tb10x_compat,
MACHINE_END
| linux-master | arch/arc/plat-tb10x/tb10x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARC700 mmap
*
* (started from arm version - for VIPT alias handling)
*
* Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/sched/mm.h>
#include <asm/cacheflush.h>
#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
(((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
/*
* Ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches.
* We need to ensure that
* a specific page of an object is always mapped at a multiple of
* SHMLBA bytes.
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int do_align = 0;
int aliasing = cache_is_vipt_aliasing();
struct vm_unmapped_area_info info;
/*
* We only need to do colour alignment if D cache aliases.
*/
if (aliasing)
do_align = filp || (flags & MAP_SHARED);
/*
* We enforce the MAP_FIXED case.
*/
if (flags & MAP_FIXED) {
if (aliasing && flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
}
if (len > TASK_SIZE)
return -ENOMEM;
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
static const pgprot_t protection_map[16] = {
[VM_NONE] = PAGE_U_NONE,
[VM_READ] = PAGE_U_R,
[VM_WRITE] = PAGE_U_R,
[VM_WRITE | VM_READ] = PAGE_U_R,
[VM_EXEC] = PAGE_U_X_R,
[VM_EXEC | VM_READ] = PAGE_U_X_R,
[VM_EXEC | VM_WRITE] = PAGE_U_X_R,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R,
[VM_SHARED] = PAGE_U_NONE,
[VM_SHARED | VM_READ] = PAGE_U_R,
[VM_SHARED | VM_WRITE] = PAGE_U_W_R,
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R,
[VM_SHARED | VM_EXEC] = PAGE_U_X_R,
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R,
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R
};
DECLARE_VM_GET_PAGE_PROT
| linux-master | arch/arc/mm/mmap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/initrd.h>
#endif
#include <linux/of_fdt.h>
#include <linux/swap.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/arcregs.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
EXPORT_SYMBOL(empty_zero_page);
static const unsigned long low_mem_start = CONFIG_LINUX_RAM_BASE;
static unsigned long low_mem_sz;
#ifdef CONFIG_HIGHMEM
static unsigned long min_high_pfn, max_high_pfn;
static phys_addr_t high_mem_start;
static phys_addr_t high_mem_sz;
unsigned long arch_pfn_offset;
EXPORT_SYMBOL(arch_pfn_offset);
#endif
long __init arc_get_mem_sz(void)
{
return low_mem_sz;
}
/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
static int __init setup_mem_sz(char *str)
{
low_mem_sz = memparse(str, NULL) & PAGE_MASK;
/* early console might not be setup yet - it will show up later */
pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(low_mem_sz));
return 0;
}
early_param("mem", setup_mem_sz);
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
int in_use = 0;
if (!low_mem_sz) {
if (base != low_mem_start)
panic("CONFIG_LINUX_RAM_BASE != DT memory { }");
low_mem_sz = size;
in_use = 1;
memblock_add_node(base, size, 0, MEMBLOCK_NONE);
} else {
#ifdef CONFIG_HIGHMEM
high_mem_start = base;
high_mem_sz = size;
in_use = 1;
memblock_add_node(base, size, 1, MEMBLOCK_NONE);
memblock_reserve(base, size);
#endif
}
pr_info("Memory @ %llx [%lldM] %s\n",
base, TO_MB(size), !in_use ? "Not used":"");
}
/*
* First memory setup routine called from setup_arch()
* 1. setup swapper's mm @init_mm
* 2. Count the pages we have and setup bootmem allocator
* 3. zone setup
*/
void __init setup_arch_memory(void)
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
setup_initial_init_mm(_text, _etext, _edata, _end);
/* first page of system - kernel .vector starts here */
min_low_pfn = virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE);
/* Last usable page of low mem */
max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
/*------------- bootmem allocator setup -----------------------*/
/*
* seed the bootmem allocator after any DT memory node parsing or
* "mem=xxx" cmdline overrides have potentially updated @arc_mem_sz
*
* Only low mem is added, otherwise we have crashes when allocating
* mem_map[] itself. NO_BOOTMEM allocates mem_map[] at the end of
* avail memory, ending in highmem with a > 32-bit address. However
* it then tries to memset it with a truncaed 32-bit handle, causing
* the crash
*/
memblock_reserve(CONFIG_LINUX_LINK_BASE,
__pa(_end) - CONFIG_LINUX_LINK_BASE);
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size) {
memblock_reserve(phys_initrd_start, phys_initrd_size);
initrd_start = (unsigned long)__va(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
}
#endif
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
memblock_dump_all();
/*----------------- node/zones setup --------------------------*/
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
/*
* On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
* than addresses in normal aka low memory (0x8000_0000 based).
* Even with PAE, the huge peripheral space hole would waste a lot of
* mem with single contiguous mem_map[].
* Thus when HIGHMEM on ARC is enabled the memory map corresponding
* to the hole is freed and ARC specific version of pfn_valid()
* handles the hole in the memory map.
*/
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
/*
* max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
* For HIGHMEM without PAE max_high_pfn should be less than
* min_low_pfn to guarantee that these two regions don't overlap.
* For PAE case highmem is greater than lowmem, so it is natural
* to use max_high_pfn.
*
* In both cases, holes should be handled by pfn_valid().
*/
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
arch_pfn_offset = min(min_low_pfn, min_high_pfn);
kmap_init();
#else /* CONFIG_HIGHMEM */
/* pfn_valid() uses this when FLATMEM=y and HIGHMEM=n */
max_mapnr = max_low_pfn - min_low_pfn;
#endif /* CONFIG_HIGHMEM */
free_area_init(max_zone_pfn);
}
static void __init highmem_init(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
memblock_phys_free(high_mem_start, high_mem_sz);
for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
free_highmem_page(pfn_to_page(tmp));
#endif
}
/*
* mem_init - initializes memory
*
* Frees up bootmem
* Calculates and displays memory available/used
*/
void __init mem_init(void)
{
memblock_free_all();
highmem_init();
BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE);
BUILD_BUG_ON((PTRS_PER_PMD * sizeof(pmd_t)) > PAGE_SIZE);
BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE);
}
#ifdef CONFIG_HIGHMEM
int pfn_valid(unsigned long pfn)
{
return (pfn >= min_high_pfn && pfn <= max_high_pfn) ||
(pfn >= min_low_pfn && pfn <= max_low_pfn);
}
EXPORT_SYMBOL(pfn_valid);
#endif
| linux-master | arch/arc/mm/init.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* Borrowed heavily from MIPS
*/
#include <linux/export.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(instruction_pointer(regs));
if (fixup) {
regs->ret = fixup->fixup;
return 1;
}
return 0;
}
| linux-master | arch/arc/mm/extable.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/dma-map-ops.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
/*
* ARCH specific callbacks for generic noncoherent DMA ops
* - hardware IOC not available (or "dma-coherent" not set for device in DT)
* - But still handle both coherent and non-coherent requests from caller
*
* For DMA coherent hardware (IOC) generic code suffices
*/
void arch_dma_prep_coherent(struct page *page, size_t size)
{
/*
* Evict any existing L1 and/or L2 lines for the backing page
* in case it was used earlier as a normal "cached" page.
* Yeah this bit us - STAR 9000898266
*
* Although core does call flush_cache_vmap(), it gets kvaddr hence
* can't be used to efficiently flush L1 and/or L2 which need paddr
* Currently flush_cache_vmap nukes the L1 cache completely which
* will be optimized as a separate commit
*/
dma_cache_wback_inv(page_to_phys(page), size);
}
/*
* Cache operations depending on function and direction argument, inspired by
* https://lore.kernel.org/lkml/[email protected]
* "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
* dma-mapping: provide a generic dma-noncoherent implementation)"
*
* | map == for_device | unmap == for_cpu
* |----------------------------------------------------------------
* TO_DEV | writeback writeback | none none
* FROM_DEV | invalidate invalidate | invalidate* invalidate*
* BIDIR | writeback+inv writeback+inv | invalidate invalidate
*
* [*] needed for CPU speculative prefetches
*
* NOTE: we don't check the validity of direction argument as it is done in
* upper layer functions (in include/linux/dma-mapping.h)
*/
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
dma_cache_wback(paddr, size);
break;
case DMA_FROM_DEVICE:
dma_cache_inv(paddr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(paddr, size);
break;
default:
break;
}
}
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
break;
/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
dma_cache_inv(paddr, size);
break;
default:
break;
}
}
/*
* Plug in direct dma map ops.
*/
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
/*
* IOC hardware snoops all DMA traffic keeping the caches consistent
* with memory - eliding need for any explicit cache maintenance of
* DMA buffers.
*/
if (is_isa_arcv2() && ioc_enable && coherent)
dev->dma_coherent = true;
dev_info(dev, "use %scoherent DMA ops\n",
dev->dma_coherent ? "" : "non");
}
| linux-master | arch/arc/mm/dma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/cache.h>
static inline bool arc_uncached_addr_space(phys_addr_t paddr)
{
if (is_isa_arcompact()) {
if (paddr >= ARC_UNCACHED_ADDR_SPACE)
return true;
} else if (paddr >= perip_base && paddr <= perip_end) {
return true;
}
return false;
}
void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
{
/*
* If the region is h/w uncached, MMU mapping can be elided as optim
* The cast to u32 is fine as this region can only be inside 4GB
*/
if (arc_uncached_addr_space(paddr))
return (void __iomem *)(u32)paddr;
return ioremap_prot(paddr, size,
pgprot_val(pgprot_noncached(PAGE_KERNEL)));
}
EXPORT_SYMBOL(ioremap);
/*
* ioremap with access flags
* Cache semantics wise it is same as ioremap - "forced" uncached.
* However unlike vanilla ioremap which bypasses ARC MMU for addresses in
* ARC hardware uncached region, this one still goes thru the MMU as caller
* might need finer access control (R/W/X)
*/
void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
unsigned long flags)
{
pgprot_t prot = __pgprot(flags);
/* force uncached */
return generic_ioremap_prot(paddr, size, pgprot_noncached(prot));
}
EXPORT_SYMBOL(ioremap_prot);
void iounmap(volatile void __iomem *addr)
{
/* weird double cast to handle phys_addr_t > 32 bits */
if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
return;
generic_iounmap(addr);
}
EXPORT_SYMBOL(iounmap);
| linux-master | arch/arc/mm/ioremap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARC Cache Management
*
* Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/cache.h>
#include <linux/mmu_context.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/pagemap.h>
#include <asm/cacheflush.h>
#include <asm/cachectl.h>
#include <asm/setup.h>
#ifdef CONFIG_ISA_ARCV2
#define USE_RGN_FLSH 1
#endif
static int l2_line_sz;
static int ioc_exists;
int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
static struct cpuinfo_arc_cache {
unsigned int sz_k, line_len, colors;
} ic_info, dc_info, slc_info;
void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op, const int full_page);
void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
static int read_decode_cache_bcr_arcv2(int c, char *buf, int len)
{
struct cpuinfo_arc_cache *p_slc = &slc_info;
struct bcr_identity ident;
struct bcr_generic sbcr;
struct bcr_clust_cfg cbcr;
struct bcr_volatile vol;
int n = 0;
READ_BCR(ARC_REG_SLC_BCR, sbcr);
if (sbcr.ver) {
struct bcr_slc_cfg slc_cfg;
READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
p_slc->sz_k = 128 << slc_cfg.sz;
l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
n += scnprintf(buf + n, len - n,
"SLC\t\t: %uK, %uB Line%s\n",
p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable));
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
if (cbcr.c) {
ioc_exists = 1;
/*
* As for today we don't support both IOC and ZONE_HIGHMEM enabled
* simultaneously. This happens because as of today IOC aperture covers
* only ZONE_NORMAL (low mem) and any dma transactions outside this
* region won't be HW coherent.
* If we want to use both IOC and ZONE_HIGHMEM we can use
* bounce_buffer to handle dma transactions to HIGHMEM.
* Also it is possible to modify dma_direct cache ops or increase IOC
* aperture size if we are planning to use HIGHMEM without PAE.
*/
if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
ioc_enable = 0;
} else {
ioc_enable = 0;
}
READ_BCR(AUX_IDENTITY, ident);
/* HS 2.0 didn't have AUX_VOL */
if (ident.family > 0x51) {
READ_BCR(AUX_VOL, vol);
perip_base = vol.start << 28;
/* HS 3.0 has limit and strict-ordering fields */
if (ident.family > 0x52)
perip_end = (vol.limit << 28) - 1;
}
n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
perip_base,
IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
return n;
}
int arc_cache_mumbojumbo(int c, char *buf, int len)
{
struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info;
struct bcr_cache ibcr, dbcr;
int vipt, assoc;
int n = 0;
READ_BCR(ARC_REG_IC_BCR, ibcr);
if (!ibcr.ver)
goto dc_chk;
if (is_isa_arcompact() && (ibcr.ver <= 3)) {
BUG_ON(ibcr.config != 3);
assoc = 2; /* Fixed to 2w set assoc */
} else if (is_isa_arcv2() && (ibcr.ver >= 4)) {
assoc = 1 << ibcr.config; /* 1,2,4,8 */
}
p_ic->line_len = 8 << ibcr.line_len;
p_ic->sz_k = 1 << (ibcr.sz - 1);
p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE);
n += scnprintf(buf + n, len - n,
"I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n",
p_ic->sz_k, assoc, p_ic->line_len,
p_ic->colors > 1 ? " aliasing" : "",
IS_USED_CFG(CONFIG_ARC_HAS_ICACHE));
dc_chk:
READ_BCR(ARC_REG_DC_BCR, dbcr);
if (!dbcr.ver)
goto slc_chk;
if (is_isa_arcompact() && (dbcr.ver <= 3)) {
BUG_ON(dbcr.config != 2);
vipt = 1;
assoc = 4; /* Fixed to 4w set assoc */
p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE);
} else if (is_isa_arcv2() && (dbcr.ver >= 4)) {
vipt = 0;
assoc = 1 << dbcr.config; /* 1,2,4,8 */
p_dc->colors = 1; /* PIPT so can't VIPT alias */
}
p_dc->line_len = 16 << dbcr.line_len;
p_dc->sz_k = 1 << (dbcr.sz - 1);
n += scnprintf(buf + n, len - n,
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
p_dc->sz_k, assoc, p_dc->line_len,
vipt ? "VIPT" : "PIPT",
p_dc->colors > 1 ? " aliasing" : "",
IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
slc_chk:
if (is_isa_arcv2())
n += read_decode_cache_bcr_arcv2(c, buf + n, len - n);
return n;
}
/*
* Line Operation on {I,D}-Cache
*/
#define OP_INV 0x1
#define OP_FLUSH 0x2
#define OP_FLUSH_N_INV 0x3
#define OP_INV_IC 0x4
/*
* Cache Flush programming model
*
* ARC700 MMUv3 I$ and D$ are both VIPT and can potentially alias.
* Programming model requires both paddr and vaddr irrespecive of aliasing
* considerations:
* - vaddr in {I,D}C_IV?L
* - paddr in {I,D}C_PTAG
*
* In HS38x (MMUv4), D$ is PIPT, I$ is VIPT and can still alias.
* Programming model is different for aliasing vs. non-aliasing I$
* - D$ / Non-aliasing I$: only paddr in {I,D}C_IV?L
* - Aliasing I$: same as ARC700 above (so MMUv3 routine used for MMUv4 I$)
*
* - If PAE40 is enabled, independent of aliasing considerations, the higher
* bits needs to be written into PTAG_HI
*/
static inline
void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op, const int full_page)
{
unsigned int aux_cmd, aux_tag;
int num_lines;
if (op == OP_INV_IC) {
aux_cmd = ARC_REG_IC_IVIL;
aux_tag = ARC_REG_IC_PTAG;
} else {
aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
aux_tag = ARC_REG_DC_PTAG;
}
/* Ensure we properly floor/ceil the non-line aligned/sized requests
* and have @paddr - aligned to cache line and integral @num_lines.
* This however can be avoided for page sized since:
* -@paddr will be cache-line aligned already (being page aligned)
* -@sz will be integral multiple of line size (being page sized).
*/
if (!full_page) {
sz += paddr & ~CACHE_LINE_MASK;
paddr &= CACHE_LINE_MASK;
vaddr &= CACHE_LINE_MASK;
}
num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
/*
* MMUv3, cache ops require paddr in PTAG reg
* if V-P const for loop, PTAG can be written once outside loop
*/
if (full_page)
write_aux_reg(aux_tag, paddr);
/*
* This is technically for MMU v4, using the MMU v3 programming model
* Special work for HS38 aliasing I-cache configuration with PAE40
* - upper 8 bits of paddr need to be written into PTAG_HI
* - (and needs to be written before the lower 32 bits)
* Note that PTAG_HI is hoisted outside the line loop
*/
if (is_pae40_enabled() && op == OP_INV_IC)
write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
while (num_lines-- > 0) {
if (!full_page) {
write_aux_reg(aux_tag, paddr);
paddr += L1_CACHE_BYTES;
}
write_aux_reg(aux_cmd, vaddr);
vaddr += L1_CACHE_BYTES;
}
}
#ifndef USE_RGN_FLSH
/*
*/
static inline
void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op, const int full_page)
{
unsigned int aux_cmd;
int num_lines;
if (op == OP_INV_IC) {
aux_cmd = ARC_REG_IC_IVIL;
} else {
/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
}
/* Ensure we properly floor/ceil the non-line aligned/sized requests
* and have @paddr - aligned to cache line and integral @num_lines.
* This however can be avoided for page sized since:
* -@paddr will be cache-line aligned already (being page aligned)
* -@sz will be integral multiple of line size (being page sized).
*/
if (!full_page) {
sz += paddr & ~CACHE_LINE_MASK;
paddr &= CACHE_LINE_MASK;
}
num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
/*
* For HS38 PAE40 configuration
* - upper 8 bits of paddr need to be written into PTAG_HI
* - (and needs to be written before the lower 32 bits)
*/
if (is_pae40_enabled()) {
if (op == OP_INV_IC)
/*
* Non aliasing I-cache in HS38,
* aliasing I-cache handled in __cache_line_loop_v3()
*/
write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
else
write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
}
while (num_lines-- > 0) {
write_aux_reg(aux_cmd, paddr);
paddr += L1_CACHE_BYTES;
}
}
#else
/*
* optimized flush operation which takes a region as opposed to iterating per line
*/
static inline
void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op, const int full_page)
{
unsigned int s, e;
/* Only for Non aliasing I-cache in HS38 */
if (op == OP_INV_IC) {
s = ARC_REG_IC_IVIR;
e = ARC_REG_IC_ENDR;
} else {
s = ARC_REG_DC_STARTR;
e = ARC_REG_DC_ENDR;
}
if (!full_page) {
/* for any leading gap between @paddr and start of cache line */
sz += paddr & ~CACHE_LINE_MASK;
paddr &= CACHE_LINE_MASK;
/*
* account for any trailing gap to end of cache line
* this is equivalent to DIV_ROUND_UP() in line ops above
*/
sz += L1_CACHE_BYTES - 1;
}
if (is_pae40_enabled()) {
/* TBD: check if crossing 4TB boundary */
if (op == OP_INV_IC)
write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
else
write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
}
/* ENDR needs to be set ahead of START */
write_aux_reg(e, paddr + sz); /* ENDR is exclusive */
write_aux_reg(s, paddr);
/* caller waits on DC_CTRL.FS */
}
#endif
#ifdef CONFIG_ARC_MMU_V3
#define __cache_line_loop __cache_line_loop_v3
#else
#define __cache_line_loop __cache_line_loop_v4
#endif
#ifdef CONFIG_ARC_HAS_DCACHE
/***************************************************************
* Machine specific helpers for Entire D-Cache or Per Line ops
*/
#ifndef USE_RGN_FLSH
/*
* this version avoids extra read/write of DC_CTRL for flush or invalid ops
* in the non region flush regime (such as for ARCompact)
*/
static inline void __before_dc_op(const int op)
{
if (op == OP_FLUSH_N_INV) {
/* Dcache provides 2 cmd: FLUSH or INV
* INV in turn has sub-modes: DISCARD or FLUSH-BEFORE
* flush-n-inv is achieved by INV cmd but with IM=1
* So toggle INV sub-mode depending on op request and default
*/
const unsigned int ctl = ARC_REG_DC_CTRL;
write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
}
}
#else
static inline void __before_dc_op(const int op)
{
const unsigned int ctl = ARC_REG_DC_CTRL;
unsigned int val = read_aux_reg(ctl);
if (op == OP_FLUSH_N_INV) {
val |= DC_CTRL_INV_MODE_FLUSH;
}
if (op != OP_INV_IC) {
/*
* Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
* combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
*/
val &= ~DC_CTRL_RGN_OP_MSK;
if (op & OP_INV)
val |= DC_CTRL_RGN_OP_INV;
}
write_aux_reg(ctl, val);
}
#endif
static inline void __after_dc_op(const int op)
{
if (op & OP_FLUSH) {
const unsigned int ctl = ARC_REG_DC_CTRL;
unsigned int reg;
/* flush / flush-n-inv both wait */
while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
;
/* Switch back to default Invalidate mode */
if (op == OP_FLUSH_N_INV)
write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
}
}
/*
* Operation on Entire D-Cache
* @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
* Note that constant propagation ensures all the checks are gone
* in generated code
*/
static inline void __dc_entire_op(const int op)
{
int aux;
__before_dc_op(op);
if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
aux = ARC_REG_DC_IVDC;
else
aux = ARC_REG_DC_FLSH;
write_aux_reg(aux, 0x1);
__after_dc_op(op);
}
static inline void __dc_disable(void)
{
const int r = ARC_REG_DC_CTRL;
__dc_entire_op(OP_FLUSH_N_INV);
write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
}
static void __dc_enable(void)
{
const int r = ARC_REG_DC_CTRL;
write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
}
/* For kernel mappings cache operation: index is same as paddr */
#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
/*
* D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
*/
static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op)
{
const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
unsigned long flags;
local_irq_save(flags);
__before_dc_op(op);
__cache_line_loop(paddr, vaddr, sz, op, full_page);
__after_dc_op(op);
local_irq_restore(flags);
}
#else
#define __dc_entire_op(op)
#define __dc_disable()
#define __dc_enable()
#define __dc_line_op(paddr, vaddr, sz, op)
#define __dc_line_op_k(paddr, sz, op)
#endif /* CONFIG_ARC_HAS_DCACHE */
#ifdef CONFIG_ARC_HAS_ICACHE
static inline void __ic_entire_inv(void)
{
write_aux_reg(ARC_REG_IC_IVIC, 1);
read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
}
static inline void
__ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz)
{
const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
unsigned long flags;
local_irq_save(flags);
(*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
local_irq_restore(flags);
}
#ifndef CONFIG_SMP
#define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
#else
struct ic_inv_args {
phys_addr_t paddr, vaddr;
int sz;
};
static void __ic_line_inv_vaddr_helper(void *info)
{
struct ic_inv_args *ic_inv = info;
__ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
}
static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz)
{
struct ic_inv_args ic_inv = {
.paddr = paddr,
.vaddr = vaddr,
.sz = sz
};
on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
}
#endif /* CONFIG_SMP */
#else /* !CONFIG_ARC_HAS_ICACHE */
#define __ic_entire_inv()
#define __ic_line_inv_vaddr(pstart, vstart, sz)
#endif /* CONFIG_ARC_HAS_ICACHE */
static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
* SLC is shared between all cores and concurrent aux operations from
* multiple cores need to be serialized using a spinlock
* A concurrent operation can be silently ignored and/or the old/new
* operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
* below)
*/
static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned int ctrl;
phys_addr_t end;
spin_lock_irqsave(&lock, flags);
/*
* The Region Flush operation is specified by CTRL.RGN_OP[11..9]
* - b'000 (default) is Flush,
* - b'001 is Invalidate if CTRL.IM == 0
* - b'001 is Flush-n-Invalidate if CTRL.IM == 1
*/
ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
/* Don't rely on default value of IM bit */
if (!(op & OP_FLUSH)) /* i.e. OP_INV */
ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
else
ctrl |= SLC_CTRL_IM;
if (op & OP_INV)
ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
else
ctrl &= ~SLC_CTRL_RGN_OP_INV;
write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
/*
* Lower bits are ignored, no need to clip
* END needs to be setup before START (latter triggers the operation)
* END can't be same as START, so add (l2_line_sz - 1) to sz
*/
end = paddr + sz + l2_line_sz - 1;
if (is_pae40_enabled())
write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
if (is_pae40_enabled())
write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
read_aux_reg(ARC_REG_SLC_CTRL);
while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
spin_unlock_irqrestore(&lock, flags);
#endif
}
static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
* SLC is shared between all cores and concurrent aux operations from
* multiple cores need to be serialized using a spinlock
* A concurrent operation can be silently ignored and/or the old/new
* operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
* below)
*/
static DEFINE_SPINLOCK(lock);
const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
unsigned int ctrl, cmd;
unsigned long flags;
int num_lines;
spin_lock_irqsave(&lock, flags);
ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
/* Don't rely on default value of IM bit */
if (!(op & OP_FLUSH)) /* i.e. OP_INV */
ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
else
ctrl |= SLC_CTRL_IM;
write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
sz += paddr & ~SLC_LINE_MASK;
paddr &= SLC_LINE_MASK;
num_lines = DIV_ROUND_UP(sz, l2_line_sz);
while (num_lines-- > 0) {
write_aux_reg(cmd, paddr);
paddr += l2_line_sz;
}
/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
read_aux_reg(ARC_REG_SLC_CTRL);
while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
spin_unlock_irqrestore(&lock, flags);
#endif
}
#define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op)
noinline static void slc_entire_op(const int op)
{
unsigned int ctrl, r = ARC_REG_SLC_CTRL;
ctrl = read_aux_reg(r);
if (!(op & OP_FLUSH)) /* i.e. OP_INV */
ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
else
ctrl |= SLC_CTRL_IM;
write_aux_reg(r, ctrl);
if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
else
write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
read_aux_reg(r);
/* Important to wait for flush to complete */
while (read_aux_reg(r) & SLC_CTRL_BUSY);
}
static inline void arc_slc_disable(void)
{
const int r = ARC_REG_SLC_CTRL;
slc_entire_op(OP_FLUSH_N_INV);
write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
}
static inline void arc_slc_enable(void)
{
const int r = ARC_REG_SLC_CTRL;
write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
}
/***********************************************************
* Exported APIs
*/
/*
* Handle cache congruency of kernel and userspace mappings of page when kernel
* writes-to/reads-from
*
* The idea is to defer flushing of kernel mapping after a WRITE, possible if:
* -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
* -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
* -In SMP, if hardware caches are coherent
*
* There's a corollary case, where kernel READs from a userspace mapped page.
* If the U-mapping is not congruent to K-mapping, former needs flushing.
*/
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping;
if (!cache_is_vipt_aliasing()) {
clear_bit(PG_dc_clean, &folio->flags);
return;
}
/* don't handle anon pages here */
mapping = folio_flush_mapping(folio);
if (!mapping)
return;
/*
* pagecache page, file not yet mapped to userspace
* Make a note that K-mapping is dirty
*/
if (!mapping_mapped(mapping)) {
clear_bit(PG_dc_clean, &folio->flags);
} else if (folio_mapped(folio)) {
/* kernel reading from page with U-mapping */
phys_addr_t paddr = (unsigned long)folio_address(folio);
unsigned long vaddr = folio_pos(folio);
/*
* vaddr is not actually the virtual address, but is
* congruent to every user mapping.
*/
if (addr_not_cache_congruent(paddr, vaddr))
__flush_dcache_pages(paddr, vaddr,
folio_nr_pages(folio));
}
}
EXPORT_SYMBOL(flush_dcache_folio);
void flush_dcache_page(struct page *page)
{
return flush_dcache_folio(page_folio(page));
}
EXPORT_SYMBOL(flush_dcache_page);
/*
* DMA ops for systems with L1 cache only
* Make memory coherent with L1 cache by flushing/invalidating L1 lines
*/
static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
{
__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
}
static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
{
__dc_line_op_k(start, sz, OP_INV);
}
static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
{
__dc_line_op_k(start, sz, OP_FLUSH);
}
/*
* DMA ops for systems with both L1 and L2 caches, but without IOC
* Both L1 and L2 lines need to be explicitly flushed/invalidated
*/
static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
{
__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
slc_op(start, sz, OP_FLUSH_N_INV);
}
static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
{
__dc_line_op_k(start, sz, OP_INV);
slc_op(start, sz, OP_INV);
}
static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
{
__dc_line_op_k(start, sz, OP_FLUSH);
slc_op(start, sz, OP_FLUSH);
}
/*
* Exported DMA API
*/
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
{
__dma_cache_wback_inv(start, sz);
}
EXPORT_SYMBOL(dma_cache_wback_inv);
void dma_cache_inv(phys_addr_t start, unsigned long sz)
{
__dma_cache_inv(start, sz);
}
EXPORT_SYMBOL(dma_cache_inv);
void dma_cache_wback(phys_addr_t start, unsigned long sz)
{
__dma_cache_wback(start, sz);
}
EXPORT_SYMBOL(dma_cache_wback);
/*
* This is API for making I/D Caches consistent when modifying
* kernel code (loadable modules, kprobes, kgdb...)
* This is called on insmod, with kernel virtual address for CODE of
* the module. ARC cache maintenance ops require PHY address thus we
* need to convert vmalloc addr to PHY addr
*/
void flush_icache_range(unsigned long kstart, unsigned long kend)
{
unsigned int tot_sz;
WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
/* Shortcut for bigger flush ranges.
* Here we don't care if this was kernel virtual or phy addr
*/
tot_sz = kend - kstart;
if (tot_sz > PAGE_SIZE) {
flush_cache_all();
return;
}
/* Case: Kernel Phy addr (0x8000_0000 onwards) */
if (likely(kstart > PAGE_OFFSET)) {
/*
* The 2nd arg despite being paddr will be used to index icache
* This is OK since no alternate virtual mappings will exist
* given the callers for this case: kprobe/kgdb in built-in
* kernel code only.
*/
__sync_icache_dcache(kstart, kstart, kend - kstart);
return;
}
/*
* Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
* (1) ARC Cache Maintenance ops only take Phy addr, hence special
* handling of kernel vaddr.
*
* (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
* it still needs to handle a 2 page scenario, where the range
* straddles across 2 virtual pages and hence need for loop
*/
while (tot_sz > 0) {
unsigned int off, sz;
unsigned long phy, pfn;
off = kstart % PAGE_SIZE;
pfn = vmalloc_to_pfn((void *)kstart);
phy = (pfn << PAGE_SHIFT) + off;
sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
__sync_icache_dcache(phy, kstart, sz);
kstart += sz;
tot_sz -= sz;
}
}
EXPORT_SYMBOL(flush_icache_range);
/*
* General purpose helper to make I and D cache lines consistent.
* @paddr is phy addr of region
* @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
* However in one instance, when called by kprobe (for a breakpt in
* builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
* use a paddr to index the cache (despite VIPT). This is fine since a
* builtin kernel page will not have any virtual mappings.
* kprobe on loadable module will be kernel vaddr.
*/
void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
{
__dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
__ic_line_inv_vaddr(paddr, vaddr, len);
}
/* wrapper to compile time eliminate alignment checks in flush loop */
void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
{
__ic_line_inv_vaddr(paddr, vaddr, nr * PAGE_SIZE);
}
/*
* wrapper to clearout kernel or userspace mappings of a page
* For kernel mappings @vaddr == @paddr
*/
void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
{
__dc_line_op(paddr, vaddr & PAGE_MASK, nr * PAGE_SIZE, OP_FLUSH_N_INV);
}
noinline void flush_cache_all(void)
{
unsigned long flags;
local_irq_save(flags);
__ic_entire_inv();
__dc_entire_op(OP_FLUSH_N_INV);
local_irq_restore(flags);
}
#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
void flush_cache_mm(struct mm_struct *mm)
{
flush_cache_all();
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
unsigned long pfn)
{
phys_addr_t paddr = pfn << PAGE_SHIFT;
u_vaddr &= PAGE_MASK;
__flush_dcache_pages(paddr, u_vaddr, 1);
if (vma->vm_flags & VM_EXEC)
__inv_icache_pages(paddr, u_vaddr, 1);
}
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
flush_cache_all();
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page,
unsigned long u_vaddr)
{
/* TBD: do we really need to clear the kernel mapping */
__flush_dcache_pages((phys_addr_t)page_address(page), u_vaddr, 1);
__flush_dcache_pages((phys_addr_t)page_address(page),
(phys_addr_t)page_address(page), 1);
}
#endif
void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma)
{
struct folio *src = page_folio(from);
struct folio *dst = page_folio(to);
void *kfrom = kmap_atomic(from);
void *kto = kmap_atomic(to);
int clean_src_k_mappings = 0;
/*
* If SRC page was already mapped in userspace AND it's U-mapping is
* not congruent with K-mapping, sync former to physical page so that
* K-mapping in memcpy below, sees the right data
*
* Note that while @u_vaddr refers to DST page's userspace vaddr, it is
* equally valid for SRC page as well
*
* For !VIPT cache, all of this gets compiled out as
* addr_not_cache_congruent() is 0
*/
if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
__flush_dcache_pages((unsigned long)kfrom, u_vaddr, 1);
clean_src_k_mappings = 1;
}
copy_page(kto, kfrom);
/*
* Mark DST page K-mapping as dirty for a later finalization by
* update_mmu_cache(). Although the finalization could have been done
* here as well (given that both vaddr/paddr are available).
* But update_mmu_cache() already has code to do that for other
* non copied user pages (e.g. read faults which wire in pagecache page
* directly).
*/
clear_bit(PG_dc_clean, &dst->flags);
/*
* if SRC was already usermapped and non-congruent to kernel mapping
* sync the kernel mapping back to physical page
*/
if (clean_src_k_mappings) {
__flush_dcache_pages((unsigned long)kfrom,
(unsigned long)kfrom, 1);
} else {
clear_bit(PG_dc_clean, &src->flags);
}
kunmap_atomic(kto);
kunmap_atomic(kfrom);
}
void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
{
struct folio *folio = page_folio(page);
clear_page(to);
clear_bit(PG_dc_clean, &folio->flags);
}
EXPORT_SYMBOL(clear_user_page);
/**********************************************************************
* Explicit Cache flush request from user space via syscall
* Needed for JITs which generate code on the fly
*/
SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
{
/* TBD: optimize this */
flush_cache_all();
return 0;
}
/*
* IO-Coherency (IOC) setup rules:
*
* 1. Needs to be at system level, so only once by Master core
* Non-Masters need not be accessing caches at that time
* - They are either HALT_ON_RESET and kick started much later or
* - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
* doesn't perturb caches or coherency unit
*
* 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
* otherwise any straggler data might behave strangely post IOC enabling
*
* 3. All Caches need to be disabled when setting up IOC to elide any in-flight
* Coherency transactions
*/
static noinline void __init arc_ioc_setup(void)
{
unsigned int ioc_base, mem_sz;
/*
* If IOC was already enabled (due to bootloader) it technically needs to
* be reconfigured with aperture base,size corresponding to Linux memory map
* which will certainly be different than uboot's. But disabling and
* reenabling IOC when DMA might be potentially active is tricky business.
* To avoid random memory issues later, just panic here and ask user to
* upgrade bootloader to one which doesn't enable IOC
*/
if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
panic("IOC already enabled, please upgrade bootloader!\n");
if (!ioc_enable)
return;
/* Flush + invalidate + disable L1 dcache */
__dc_disable();
/* Flush + invalidate SLC */
if (read_aux_reg(ARC_REG_SLC_BCR))
slc_entire_op(OP_FLUSH_N_INV);
/*
* currently IOC Aperture covers entire DDR
* TBD: fix for PGU + 1GB of low mem
* TBD: fix for PAE
*/
mem_sz = arc_get_mem_sz();
if (!is_power_of_2(mem_sz) || mem_sz < 4096)
panic("IOC Aperture size must be power of 2 larger than 4KB");
/*
* IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
* so setting 0x11 implies 512MB, 0x12 implies 1GB...
*/
write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
/* for now assume kernel base is start of IOC aperture */
ioc_base = CONFIG_LINUX_RAM_BASE;
if (ioc_base % mem_sz != 0)
panic("IOC Aperture start must be aligned to the size of the aperture");
write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
/* Re-enable L1 dcache */
__dc_enable();
}
/*
* Cache related boot time checks/setups only needed on master CPU:
* - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
* Assume SMP only, so all cores will have same cache config. A check on
* one core suffices for all
* - IOC setup / dma callbacks only need to be done once
*/
static noinline void __init arc_cache_init_master(void)
{
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &ic_info;
if (!ic->line_len)
panic("cache support enabled but non-existent cache\n");
if (ic->line_len != L1_CACHE_BYTES)
panic("ICache line [%d] != kernel Config [%d]",
ic->line_len, L1_CACHE_BYTES);
/*
* In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3
*/
if (is_isa_arcv2() && ic->colors > 1)
_cache_line_loop_ic_fn = __cache_line_loop_v3;
else
_cache_line_loop_ic_fn = __cache_line_loop;
}
if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
struct cpuinfo_arc_cache *dc = &dc_info;
if (!dc->line_len)
panic("cache support enabled but non-existent cache\n");
if (dc->line_len != L1_CACHE_BYTES)
panic("DCache line [%d] != kernel Config [%d]",
dc->line_len, L1_CACHE_BYTES);
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
if (is_isa_arcompact()) {
int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
if (dc->colors > 1) {
if (!handled)
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
if (CACHE_COLORS_NUM != dc->colors)
panic("CACHE_COLORS_NUM not optimized for config\n");
} else if (handled && dc->colors == 1) {
panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
}
}
}
/*
* Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
* or equal to any cache line length.
*/
BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
"SMP_CACHE_BYTES must be >= any cache line length");
if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
panic("L2 Cache line [%d] > kernel Config [%d]\n",
l2_line_sz, SMP_CACHE_BYTES);
/* Note that SLC disable not formally supported till HS 3.0 */
if (is_isa_arcv2() && l2_line_sz && !slc_enable)
arc_slc_disable();
if (is_isa_arcv2() && ioc_exists)
arc_ioc_setup();
if (is_isa_arcv2() && l2_line_sz && slc_enable) {
__dma_cache_wback_inv = __dma_cache_wback_inv_slc;
__dma_cache_inv = __dma_cache_inv_slc;
__dma_cache_wback = __dma_cache_wback_slc;
} else {
__dma_cache_wback_inv = __dma_cache_wback_inv_l1;
__dma_cache_inv = __dma_cache_inv_l1;
__dma_cache_wback = __dma_cache_wback_l1;
}
/*
* In case of IOC (say IOC+SLC case), pointers above could still be set
* but end up not being relevant as the first function in chain is not
* called at all for devices using coherent DMA.
* arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
*/
}
void __ref arc_cache_init(void)
{
unsigned int __maybe_unused cpu = smp_processor_id();
if (!cpu)
arc_cache_init_master();
/*
* In PAE regime, TLB and cache maintenance ops take wider addresses
* And even if PAE is not enabled in kernel, the upper 32-bits still need
* to be zeroed to keep the ops sane.
* As an optimization for more common !PAE enabled case, zero them out
* once at init, rather than checking/setting to 0 for every runtime op
*/
if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
if (l2_line_sz) {
write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
}
}
}
| linux-master | arch/arc/mm/cache.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
/*
* HIGHMEM API:
*
* kmap() API provides sleep semantics hence referred to as "permanent maps"
* It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
* for book-keeping
*
* kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
* shortlived ala "temporary mappings" which historically were implemented as
* fixmaps (compile time addr etc). Their book-keeping is done per cpu.
*
* Both these facts combined (preemption disabled and per-cpu allocation)
* means the total number of concurrent fixmaps will be limited to max
* such allocations in a single control path. Thus KM_TYPE_NR (another
* historic relic) is a small'ish number which caps max percpu fixmaps
*
* ARC HIGHMEM Details
*
* - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
* is now shared between vmalloc and kmap (non overlapping though)
*
* - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
* This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
* 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
*
* - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
* CPU. So the number of CPUs sharing a single PTE page is limited.
*
* - pkmap being preemptible, in theory could do with more than 256 concurrent
* mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
* the PGD and only works with a single page table @pkmap_page_table, hence
* sets the limit
*/
extern pte_t * pkmap_page_table;
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
{
pmd_t *pmd_k = pmd_off_k(kvaddr);
pte_t *pte_k;
pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!pte_k)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd_k, pte_k);
return pte_k;
}
void __init kmap_init(void)
{
/* Due to recursive include hell, we can't do this in processor.h */
BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
alloc_kmap_pgtable(FIXMAP_BASE);
}
| linux-master | arch/arc/mm/highmem.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Page Fault Handling for ARC (TLB Miss / ProtV)
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/signal.h>
#include <linux/interrupt.h>
#include <linux/sched/signal.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/mm_types.h>
#include <asm/entry.h>
#include <asm/mmu.h>
/*
* kernel virtual address is required to implement vmalloc/pkmap/fixmap
* Refer to asm/processor.h for System Memory Map
*
* It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
* from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
*/
noinline static int handle_kernel_vaddr_fault(unsigned long address)
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*/
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pgd = pgd_offset(current->active_mm, address);
pgd_k = pgd_offset_k(address);
if (pgd_none (*pgd_k))
goto bad_area;
if (!pgd_present(*pgd))
set_pgd(pgd, *pgd_k);
p4d = p4d_offset(pgd, address);
p4d_k = p4d_offset(pgd_k, address);
if (p4d_none(*p4d_k))
goto bad_area;
if (!p4d_present(*p4d))
set_p4d(p4d, *p4d_k);
pud = pud_offset(p4d, address);
pud_k = pud_offset(p4d_k, address);
if (pud_none(*pud_k))
goto bad_area;
if (!pud_present(*pud))
set_pud(pud, *pud_k);
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (pmd_none(*pmd_k))
goto bad_area;
if (!pmd_present(*pmd))
set_pmd(pmd, *pmd_k);
/* XXX: create the TLB entry here */
return 0;
bad_area:
return 1;
}
void do_page_fault(unsigned long address, struct pt_regs *regs)
{
struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
int sig, si_code = SEGV_MAPERR;
unsigned int write = 0, exec = 0, mask;
vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
unsigned int flags; /* handle_mm_fault() input */
/*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
if (address >= VMALLOC_START && !user_mode(regs)) {
if (unlikely(handle_kernel_vaddr_fault(address)))
goto no_context;
else
return;
}
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (faulthandler_disabled() || !mm)
goto no_context;
if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */
write = 1;
else if ((regs->ecr.vec == ECR_V_PROTV) &&
(regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
exec = 1;
flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (write)
flags |= FAULT_FLAG_WRITE;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
goto bad_area_nosemaphore;
/*
* vm_area is good, now check permissions for this memory access
*/
mask = VM_READ;
if (write)
mask = VM_WRITE;
if (exec)
mask = VM_EXEC;
if (!(vma->vm_flags & mask)) {
si_code = SEGV_ACCERR;
goto bad_area;
}
fault = handle_mm_fault(vma, address, flags, regs);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
return;
/*
* Fault retry nuances, mmap_lock already relinquished by core mm
*/
if (unlikely(fault & VM_FAULT_RETRY)) {
flags |= FAULT_FLAG_TRIED;
goto retry;
}
bad_area:
mmap_read_unlock(mm);
bad_area_nosemaphore:
/*
* Major/minor page fault accounting
* (in case of retry we only land here once)
*/
if (likely(!(fault & VM_FAULT_ERROR)))
/* Normal return path: fault Handled Gracefully */
return;
if (!user_mode(regs))
goto no_context;
if (fault & VM_FAULT_OOM) {
pagefault_out_of_memory();
return;
}
if (fault & VM_FAULT_SIGBUS) {
sig = SIGBUS;
si_code = BUS_ADRERR;
}
else {
sig = SIGSEGV;
}
tsk->thread.fault_address = address;
force_sig_fault(sig, si_code, (void __user *)address);
return;
no_context:
if (fixup_exception(regs))
return;
die("Oops", regs, address);
}
| linux-master | arch/arc/mm/fault.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
*/
#include <linux/module.h>
#include <linux/bug.h>
#include <linux/mm_types.h>
#include <asm/arcregs.h>
#include <asm/setup.h>
#include <asm/mmu_context.h>
#include <asm/mmu.h>
/* A copy of the ASID from the PID reg is kept in asid_cache */
DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
static struct cpuinfo_arc_mmu {
unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;
} mmuinfo;
/*
* Utility Routine to erase a J-TLB entry
* Caller needs to setup Index Reg (manually or via getIndex)
*/
static inline void __tlb_entry_erase(void)
{
write_aux_reg(ARC_REG_TLBPD1, 0);
if (is_pae40_enabled())
write_aux_reg(ARC_REG_TLBPD1HI, 0);
write_aux_reg(ARC_REG_TLBPD0, 0);
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
}
static void utlb_invalidate(void)
{
write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
}
#ifdef CONFIG_ARC_MMU_V3
static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
{
unsigned int idx;
write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
idx = read_aux_reg(ARC_REG_TLBINDEX);
return idx;
}
static void tlb_entry_erase(unsigned int vaddr_n_asid)
{
unsigned int idx;
/* Locate the TLB entry for this vaddr + ASID */
idx = tlb_entry_lkup(vaddr_n_asid);
/* No error means entry found, zero it out */
if (likely(!(idx & TLB_LKUP_ERR))) {
__tlb_entry_erase();
} else {
/* Duplicate entry error */
WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
vaddr_n_asid);
}
}
static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
{
unsigned int idx;
/*
* First verify if entry for this vaddr+ASID already exists
* This also sets up PD0 (vaddr, ASID..) for final commit
*/
idx = tlb_entry_lkup(pd0);
/*
* If Not already present get a free slot from MMU.
* Otherwise, Probe would have located the entry and set INDEX Reg
* with existing location. This will cause Write CMD to over-write
* existing entry with new PD0 and PD1
*/
if (likely(idx & TLB_LKUP_ERR))
write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
/* setup the other half of TLB entry (pfn, rwx..) */
write_aux_reg(ARC_REG_TLBPD1, pd1);
/*
* Commit the Entry to MMU
* It doesn't sound safe to use the TLBWriteNI cmd here
* which doesn't flush uTLBs. I'd rather be safe than sorry.
*/
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
}
#else /* MMUv4 */
static void tlb_entry_erase(unsigned int vaddr_n_asid)
{
write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
}
static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
{
write_aux_reg(ARC_REG_TLBPD0, pd0);
if (!is_pae40_enabled()) {
write_aux_reg(ARC_REG_TLBPD1, pd1);
} else {
write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);
write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
}
write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
}
#endif
/*
* Un-conditionally (without lookup) erase the entire MMU contents
*/
noinline void local_flush_tlb_all(void)
{
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned long flags;
unsigned int entry;
int num_tlb = mmu->sets * mmu->ways;
local_irq_save(flags);
/* Load PD0 and PD1 with template for a Blank Entry */
write_aux_reg(ARC_REG_TLBPD1, 0);
if (is_pae40_enabled())
write_aux_reg(ARC_REG_TLBPD1HI, 0);
write_aux_reg(ARC_REG_TLBPD0, 0);
for (entry = 0; entry < num_tlb; entry++) {
/* write this entry to the TLB */
write_aux_reg(ARC_REG_TLBINDEX, entry);
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
}
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
const int stlb_idx = 0x800;
/* Blank sTLB entry */
write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
write_aux_reg(ARC_REG_TLBINDEX, entry);
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
}
}
utlb_invalidate();
local_irq_restore(flags);
}
/*
* Flush the entire MM for userland. The fastest way is to move to Next ASID
*/
noinline void local_flush_tlb_mm(struct mm_struct *mm)
{
/*
* Small optimisation courtesy IA64
* flush_mm called during fork,exit,munmap etc, multiple times as well.
* Only for fork( ) do we need to move parent to a new MMU ctxt,
* all other cases are NOPs, hence this check.
*/
if (atomic_read(&mm->mm_users) == 0)
return;
/*
* - Move to a new ASID, but only if the mm is still wired in
* (Android Binder ended up calling this for vma->mm != tsk->mm,
* causing h/w - s/w ASID to get out of sync)
* - Also get_new_mmu_context() new implementation allocates a new
* ASID only if it is not allocated already - so unallocate first
*/
destroy_context(mm);
if (current->mm == mm)
get_new_mmu_context(mm);
}
/*
* Flush a Range of TLB entries for userland.
* @start is inclusive, while @end is exclusive
* Difference between this and Kernel Range Flush is
* -Here the fastest way (if range is too large) is to move to next ASID
* without doing any explicit Shootdown
* -In case of kernel Flush, entry has to be shot down explicitly
*/
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
const unsigned int cpu = smp_processor_id();
unsigned long flags;
/* If range @start to @end is more than 32 TLB entries deep,
* its better to move to a new ASID rather than searching for
* individual entries and then shooting them down
*
* The calc above is rough, doesn't account for unaligned parts,
* since this is heuristics based anyways
*/
if (unlikely((end - start) >= PAGE_SIZE * 32)) {
local_flush_tlb_mm(vma->vm_mm);
return;
}
/*
* @start moved to page start: this alone suffices for checking
* loop end condition below, w/o need for aligning @end to end
* e.g. 2000 to 4001 will anyhow loop twice
*/
start &= PAGE_MASK;
local_irq_save(flags);
if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
while (start < end) {
tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
start += PAGE_SIZE;
}
}
local_irq_restore(flags);
}
/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
* @start, @end interpreted as kvaddr
* Interestingly, shared TLB entries can also be flushed using just
* @start,@end alone (interpreted as user vaddr), although technically SASID
* is also needed. However our smart TLbProbe lookup takes care of that.
*/
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long flags;
/* exactly same as above, except for TLB entry not taking ASID */
if (unlikely((end - start) >= PAGE_SIZE * 32)) {
local_flush_tlb_all();
return;
}
start &= PAGE_MASK;
local_irq_save(flags);
while (start < end) {
tlb_entry_erase(start);
start += PAGE_SIZE;
}
local_irq_restore(flags);
}
/*
* Delete TLB entry in MMU for a given page (??? address)
* NOTE One TLB entry contains translation for single PAGE
*/
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
const unsigned int cpu = smp_processor_id();
unsigned long flags;
/* Note that it is critical that interrupts are DISABLED between
* checking the ASID and using it flush the TLB entry
*/
local_irq_save(flags);
if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
}
local_irq_restore(flags);
}
#ifdef CONFIG_SMP
struct tlb_args {
struct vm_area_struct *ta_vma;
unsigned long ta_start;
unsigned long ta_end;
};
static inline void ipi_flush_tlb_page(void *arg)
{
struct tlb_args *ta = arg;
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
}
static inline void ipi_flush_tlb_range(void *arg)
{
struct tlb_args *ta = arg;
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void ipi_flush_pmd_tlb_range(void *arg)
{
struct tlb_args *ta = arg;
local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
}
#endif
static inline void ipi_flush_tlb_kernel_range(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
}
void flush_tlb_all(void)
{
on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
}
void flush_tlb_mm(struct mm_struct *mm)
{
on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
mm, 1);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
struct tlb_args ta = {
.ta_vma = vma,
.ta_start = uaddr
};
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct tlb_args ta = {
.ta_vma = vma,
.ta_start = start,
.ta_end = end
};
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct tlb_args ta = {
.ta_vma = vma,
.ta_start = start,
.ta_end = end
};
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
}
#endif
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
struct tlb_args ta = {
.ta_start = start,
.ta_end = end
};
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
}
#endif
/*
* Routine to create a TLB entry
*/
static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
{
unsigned long flags;
unsigned int asid_or_sasid, rwx;
unsigned long pd0;
phys_addr_t pd1;
/*
* create_tlb() assumes that current->mm == vma->mm, since
* -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
* -completes the lazy write to SASID reg (again valid for curr tsk)
*
* Removing the assumption involves
* -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
* -More importantly it makes this handler inconsistent with fast-path
* TLB Refill handler which always deals with "current"
*
* Lets see the use cases when current->mm != vma->mm and we land here
* 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
* Here VM wants to pre-install a TLB entry for user stack while
* current->mm still points to pre-execve mm (hence the condition).
* However the stack vaddr is soon relocated (randomization) and
* move_page_tables() tries to undo that TLB entry.
* Thus not creating TLB entry is not any worse.
*
* 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
* breakpoint in debugged task. Not creating a TLB now is not
* performance critical.
*
* Both the cases above are not good enough for code churn.
*/
if (current->active_mm != vma->vm_mm)
return;
local_irq_save(flags);
vaddr &= PAGE_MASK;
/* update this PTE credentials */
pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
/* Create HW TLB(PD0,PD1) from PTE */
/* ASID for this task */
asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
/*
* ARC MMU provides fully orthogonal access bits for K/U mode,
* however Linux only saves 1 set to save PTE real-estate
* Here we convert 3 PTE bits into 6 MMU bits:
* -Kernel only entries have Kr Kw Kx 0 0 0
* -User entries have mirrored K and U bits
*/
rwx = pte_val(*ptep) & PTE_BITS_RWX;
if (pte_val(*ptep) & _PAGE_GLOBAL)
rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
else
rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
tlb_entry_insert(pd0, pd1);
local_irq_restore(flags);
}
/*
* Called at the end of pagefault, for a userspace mapped page
* -pre-install the corresponding TLB entry into MMU
* -Finalize the delayed D-cache flush of kernel mapping of page due to
* flush_dcache_page(), copy_user_page()
*
* Note that flush (when done) involves both WBACK - so physical page is
* in sync as well as INV - so any non-congruent aliases don't remain
*/
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr)
{
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
struct page *page = pfn_to_page(pte_pfn(*ptep));
create_tlb(vma, vaddr, ptep);
if (page == ZERO_PAGE(0)) {
return;
}
/*
* Exec page : Independent of aliasing/page-color considerations,
* since icache doesn't snoop dcache on ARC, any dirty
* K-mapping of a code page needs to be wback+inv so that
* icache fetch by userspace sees code correctly.
* !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
* so userspace sees the right data.
* (Avoids the flush for Non-exec + congruent mapping case)
*/
if ((vma->vm_flags & VM_EXEC) ||
addr_not_cache_congruent(paddr, vaddr)) {
struct folio *folio = page_folio(page);
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
if (dirty) {
unsigned long offset = offset_in_folio(folio, paddr);
nr = folio_nr_pages(folio);
paddr -= offset;
vaddr -= offset;
/* wback + inv dcache lines (K-mapping) */
__flush_dcache_pages(paddr, paddr, nr);
/* invalidate any existing icache lines (U-mapping) */
if (vma->vm_flags & VM_EXEC)
__inv_icache_pages(paddr, vaddr, nr);
}
}
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
* support.
*
* Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
* new bit "SZ" in TLB page descriptor to distinguish between them.
* Super Page size is configurable in hardware (4K to 16M), but fixed once
* RTL builds.
*
* The exact THP size a Linux configuration will support is a function of:
* - MMU page size (typical 8K, RTL fixed)
* - software page walker address split between PGD:PTE:PFN (typical
* 11:8:13, but can be changed with 1 line)
* So for above default, THP size supported is 8K * (2^8) = 2M
*
* Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
* reduces to 1 level (as PTE is folded into PGD and canonically referred
* to as PMD).
* Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
*/
void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd)
{
pte_t pte = __pte(pmd_val(*pmd));
update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR);
}
void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
unsigned int cpu;
unsigned long flags;
local_irq_save(flags);
cpu = smp_processor_id();
if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
unsigned int asid = hw_pid(vma->vm_mm, cpu);
/* No need to loop here: this will always be for 1 Huge Page */
tlb_entry_erase(start | _PAGE_HW_SZ | asid);
}
local_irq_restore(flags);
}
#endif
/* Read the Cache Build Configuration Registers, Decode them and save into
* the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs
*/
int arc_mmu_mumbojumbo(int c, char *buf, int len)
{
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned int bcr, u_dtlb, u_itlb, sasid;
struct bcr_mmu_3 *mmu3;
struct bcr_mmu_4 *mmu4;
char super_pg[64] = "";
int n = 0;
bcr = read_aux_reg(ARC_REG_MMU_BCR);
mmu->ver = (bcr >> 24);
if (is_isa_arcompact() && mmu->ver == 3) {
mmu3 = (struct bcr_mmu_3 *)&bcr;
mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
mmu->sets = 1 << mmu3->sets;
mmu->ways = 1 << mmu3->ways;
u_dtlb = mmu3->u_dtlb;
u_itlb = mmu3->u_itlb;
sasid = mmu3->sasid;
} else {
mmu4 = (struct bcr_mmu_4 *)&bcr;
mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
mmu->sets = 64 << mmu4->n_entry;
mmu->ways = mmu4->n_ways * 2;
u_dtlb = mmu4->u_dtlb * 4;
u_itlb = mmu4->u_itlb * 4;
sasid = mmu4->sasid;
mmu->pae = mmu4->pae;
}
if (mmu->s_pg_sz_m)
scnprintf(super_pg, 64, "/%dM%s",
mmu->s_pg_sz_m,
IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");
n += scnprintf(buf + n, len - n,
"MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",
mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
mmu->sets, mmu->ways,
u_dtlb, u_itlb,
IS_AVAIL1(sasid, ", SASID"),
IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
return n;
}
int pae40_exist_but_not_enab(void)
{
return mmuinfo.pae && !is_pae40_enabled();
}
void arc_mmu_init(void)
{
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
int compat = 0;
/*
* Can't be done in processor.h due to header include dependencies
*/
BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
/*
* stack top size sanity check,
* Can't be done in processor.h due to header include dependencies
*/
BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
/*
* Ensure that MMU features assumed by kernel exist in hardware.
* - For older ARC700 cpus, only v3 supported
* - For HS cpus, v4 was baseline and v5 is backwards compatible
* (will run older software).
*/
if (is_isa_arcompact() && mmu->ver == 3)
compat = 1;
else if (is_isa_arcv2() && mmu->ver >= 4)
compat = 1;
if (!compat)
panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
(unsigned long)TO_MB(HPAGE_PMD_SIZE));
if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
panic("Hardware doesn't support PAE40\n");
/* Enable the MMU with ASID 0 */
mmu_setup_asid(NULL, 0);
/* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */
mmu_setup_pgd(NULL, swapper_pg_dir);
if (pae40_exist_but_not_enab())
write_aux_reg(ARC_REG_TLBPD1HI, 0);
}
/*
* TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
* The mapping is Column-first.
* --------------------- -----------
* |way0|way1|way2|way3| |way0|way1|
* --------------------- -----------
* [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
* [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
* ~ ~ ~ ~
* [set127] | 508| 509| 510| 511| | 254| 255|
* --------------------- -----------
* For normal operations we don't(must not) care how above works since
* MMU cmd getIndex(vaddr) abstracts that out.
* However for walking WAYS of a SET, we need to know this
*/
#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
/* Handling of Duplicate PD (TLB entry) in MMU.
* -Could be due to buggy customer tapeouts or obscure kernel bugs
* -MMU complaints not at the time of duplicate PD installation, but at the
* time of lookup matching multiple ways.
* -Ideally these should never happen - but if they do - workaround by deleting
* the duplicate one.
* -Knob to be verbose abt it.(TODO: hook them up to debugfs)
*/
volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned long flags;
int set, n_ways = mmu->ways;
n_ways = min(n_ways, 4);
BUG_ON(mmu->ways > 4);
local_irq_save(flags);
/* loop thru all sets of TLB */
for (set = 0; set < mmu->sets; set++) {
int is_valid, way;
unsigned int pd0[4];
/* read out all the ways of current set */
for (way = 0, is_valid = 0; way < n_ways; way++) {
write_aux_reg(ARC_REG_TLBINDEX,
SET_WAY_TO_IDX(mmu, set, way));
write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
is_valid |= pd0[way] & _PAGE_PRESENT;
pd0[way] &= PAGE_MASK;
}
/* If all the WAYS in SET are empty, skip to next SET */
if (!is_valid)
continue;
/* Scan the set for duplicate ways: needs a nested loop */
for (way = 0; way < n_ways - 1; way++) {
int n;
if (!pd0[way])
continue;
for (n = way + 1; n < n_ways; n++) {
if (pd0[way] != pd0[n])
continue;
if (!dup_pd_silent)
pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
pd0[way], set, way, n);
/*
* clear entry @way and not @n.
* This is critical to our optimised loop
*/
pd0[way] = 0;
write_aux_reg(ARC_REG_TLBINDEX,
SET_WAY_TO_IDX(mmu, set, way));
__tlb_entry_erase();
}
}
}
local_irq_restore(flags);
}
| linux-master | arch/arc/mm/tlb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* fpu.c - save/restore of Floating Point Unit Registers on task switch
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/sched.h>
#include <asm/fpu.h>
#ifdef CONFIG_ISA_ARCOMPACT
/*
* To save/restore FPU regs, simplest scheme would use LR/SR insns.
* However since SR serializes the pipeline, an alternate "hack" can be used
* which uses the FPU Exchange insn (DEXCL) to r/w FPU regs.
*
* Store to 64bit dpfp1 reg from a pair of core regs:
* dexcl1 0, r1, r0 ; where r1:r0 is the 64 bit val
*
* Read from dpfp1 into pair of core regs (w/o clobbering dpfp1)
* mov_s r3, 0
* daddh11 r1, r3, r3 ; get "hi" into r1 (dpfp1 unchanged)
* dexcl1 r0, r1, r3 ; get "low" into r0 (dpfp1 low clobbered)
* dexcl1 0, r1, r0 ; restore dpfp1 to orig value
*
* However we can tweak the read, so that read-out of outgoing task's FPU regs
* and write of incoming task's regs happen in one shot. So all the work is
* done before context switch
*/
void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
{
unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l;
unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l;
const unsigned int zero = 0;
__asm__ __volatile__(
"daddh11 %0, %2, %2\n"
"dexcl1 %1, %3, %4\n"
: "=&r" (*(saveto + 1)), /* early clobber must here */
"=&r" (*(saveto))
: "r" (zero), "r" (*(readfrom + 1)), "r" (*(readfrom))
);
__asm__ __volatile__(
"daddh22 %0, %2, %2\n"
"dexcl2 %1, %3, %4\n"
: "=&r"(*(saveto + 3)), /* early clobber must here */
"=&r"(*(saveto + 2))
: "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
);
}
#else
void fpu_init_task(struct pt_regs *regs)
{
const unsigned int fwe = 0x80000000;
/* default rounding mode */
write_aux_reg(ARC_REG_FPU_CTRL, 0x100);
/* Initialize to zero: setting requires FWE be set */
write_aux_reg(ARC_REG_FPU_STATUS, fwe);
}
void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
{
struct arc_fpu *save = &prev->thread.fpu;
struct arc_fpu *restore = &next->thread.fpu;
const unsigned int fwe = 0x80000000;
save->ctrl = read_aux_reg(ARC_REG_FPU_CTRL);
save->status = read_aux_reg(ARC_REG_FPU_STATUS);
write_aux_reg(ARC_REG_FPU_CTRL, restore->ctrl);
write_aux_reg(ARC_REG_FPU_STATUS, (fwe | restore->status));
}
#endif
| linux-master | arch/arc/kernel/fpu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*
* Based on reduced version of METAG
*/
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/mach_desc.h>
#include <asm/serial.h>
#ifdef CONFIG_SERIAL_EARLYCON
static unsigned int __initdata arc_base_baud;
unsigned int __init arc_early_base_baud(void)
{
return arc_base_baud/16;
}
static void __init arc_set_early_base_baud(unsigned long dt_root)
{
if (of_flat_dt_is_compatible(dt_root, "abilis,arc-tb10x"))
arc_base_baud = 166666666; /* Fixed 166.6MHz clk (TB10x) */
else if (of_flat_dt_is_compatible(dt_root, "snps,arc-sdp") ||
of_flat_dt_is_compatible(dt_root, "snps,hsdk"))
arc_base_baud = 33333333; /* Fixed 33MHz clk (AXS10x & HSDK) */
else
arc_base_baud = 50000000; /* Fixed default 50MHz */
}
#else
#define arc_set_early_base_baud(dt_root)
#endif
static const void * __init arch_get_next_mach(const char *const **match)
{
static const struct machine_desc *mdesc = __arch_info_begin;
const struct machine_desc *m = mdesc;
if (m >= __arch_info_end)
return NULL;
mdesc++;
*match = m->dt_compat;
return m;
}
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt: virtual address pointer to dt blob
*
* If a dtb was passed to the kernel, then use it to choose the correct
* machine_desc and to setup the system.
*/
const struct machine_desc * __init setup_machine_fdt(void *dt)
{
const struct machine_desc *mdesc;
unsigned long dt_root;
if (!early_init_dt_scan(dt))
return NULL;
mdesc = of_flat_dt_match_machine(NULL, arch_get_next_mach);
if (!mdesc)
machine_halt();
dt_root = of_get_flat_dt_root();
arc_set_early_base_baud(dt_root);
return mdesc;
}
| linux-master | arch/arc/kernel/devtree.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* Amit Bhor, Kanika Nema: Codito Technologies 2004
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/elf.h>
#include <linux/tick.h>
#include <asm/fpu.h>
SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
{
task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
return 0;
}
/*
* We return the user space TLS data ptr as sys-call return code
* Ideally it should be copy to user.
* However we can cheat by the fact that some sys-calls do return
* absurdly high values
* Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
* it won't be considered a sys-call error
* and it will be loads better than copy-to-user, which is a definite
* D-TLB Miss
*/
SYSCALL_DEFINE0(arc_gettls)
{
return task_thread_info(current)->thr_ptr;
}
SYSCALL_DEFINE3(arc_usr_cmpxchg, int __user *, uaddr, int, expected, int, new)
{
struct pt_regs *regs = current_pt_regs();
u32 uval;
int ret;
/*
* This is only for old cores lacking LLOCK/SCOND, which by definition
* can't possibly be SMP. Thus doesn't need to be SMP safe.
* And this also helps reduce the overhead for serializing in
* the UP case
*/
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
/* Z indicates to userspace if operation succeeded */
regs->status32 &= ~STATUS_Z_MASK;
ret = access_ok(uaddr, sizeof(*uaddr));
if (!ret)
goto fail;
again:
preempt_disable();
ret = __get_user(uval, uaddr);
if (ret)
goto fault;
if (uval != expected)
goto out;
ret = __put_user(new, uaddr);
if (ret)
goto fault;
regs->status32 |= STATUS_Z_MASK;
out:
preempt_enable();
return uval;
fault:
preempt_enable();
if (unlikely(ret != -EFAULT))
goto fail;
mmap_read_lock(current->mm);
ret = fixup_user_fault(current->mm, (unsigned long) uaddr,
FAULT_FLAG_WRITE, NULL);
mmap_read_unlock(current->mm);
if (likely(!ret))
goto again;
fail:
force_sig(SIGSEGV);
return ret;
}
#ifdef CONFIG_ISA_ARCV2
void arch_cpu_idle(void)
{
/* Re-enable interrupts <= default irq priority before committing SLEEP */
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
__asm__ __volatile__(
"sleep %0 \n"
:
:"I"(arg)); /* can't be "r" has to be embedded const */
raw_local_irq_disable();
}
#else /* ARC700 */
void arch_cpu_idle(void)
{
/* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
__asm__ __volatile__("sleep 0x3 \n");
raw_local_irq_disable();
}
#endif
asmlinkage void ret_from_fork(void);
/*
* Copy architecture-specific thread state
*
* Layout of Child kernel mode stack as setup at the end of this function is
*
* | ... |
* | ... |
* | unused |
* | |
* ------------------
* | r25 | <==== top of Stack (thread_info.ksp)
* ~ ~
* | --to-- | (CALLEE Regs of kernel mode)
* | r13 |
* ------------------
* | fp |
* | blink | @ret_from_fork
* ------------------
* | |
* ~ ~
* ~ ~
* | |
* ------------------
* | r12 |
* ~ ~
* | --to-- | (scratch Regs of user mode)
* | r0 |
* ------------------
* | SP |
* | orig_r0 |
* | event/ECR |
* ------------------ <===== END of PAGE
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct pt_regs *c_regs; /* child's pt_regs */
unsigned long *childksp; /* to unwind out of __switch_to() */
struct callee_regs *c_callee; /* child's callee regs */
struct callee_regs *parent_callee; /* paren't callee */
struct pt_regs *regs = current_pt_regs();
/* Mark the specific anchors to begin with (see pic above) */
c_regs = task_pt_regs(p);
childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
c_callee = ((struct callee_regs *)childksp) - 1;
/*
* __switch_to() uses thread_info.ksp to start unwinding stack
* For kernel threads we don't need to create callee regs, the
* stack layout nevertheless needs to remain the same.
* Also, since __switch_to anyways unwinds callee regs, we use
* this to populate kernel thread entry-pt/args into callee regs,
* so that ret_from_kernel_thread() becomes simpler.
*/
task_thread_info(p)->ksp = (unsigned long)c_callee; /* THREAD_INFO_KSP */
/* __switch_to expects FP(0), BLINK(return addr) at top */
childksp[0] = 0; /* fp */
childksp[1] = (unsigned long)ret_from_fork; /* blink */
if (unlikely(args->fn)) {
memset(c_regs, 0, sizeof(struct pt_regs));
c_callee->r13 = (unsigned long)args->fn_arg;
c_callee->r14 = (unsigned long)args->fn;
return 0;
}
/*--------- User Task Only --------------*/
/* __switch_to expects FP(0), BLINK(return addr) at top of stack */
childksp[0] = 0; /* for POP fp */
childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
/* Copy parents pt regs on child's kernel mode stack */
*c_regs = *regs;
if (usp)
c_regs->sp = usp;
c_regs->r0 = 0; /* fork returns 0 in child */
parent_callee = ((struct callee_regs *)regs) - 1;
*c_callee = *parent_callee;
if (unlikely(clone_flags & CLONE_SETTLS)) {
/*
* set task's userland tls data ptr from 4th arg
* clone C-lib call is difft from clone sys-call
*/
task_thread_info(p)->thr_ptr = tls;
} else {
/* Normal fork case: set parent's TLS ptr in child */
task_thread_info(p)->thr_ptr =
task_thread_info(current)->thr_ptr;
}
/*
* setup usermode thread pointer #1:
* when child is picked by scheduler, __switch_to() uses @c_callee to
* populate usermode callee regs: this works (despite being in a kernel
* function) since special return path for child @ret_from_fork()
* ensures those regs are not clobbered all the way to RTIE to usermode
*/
c_callee->r25 = task_thread_info(p)->thr_ptr;
return 0;
}
/*
* Do necessary setup to start up a new user task
*/
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
{
regs->sp = usp;
regs->ret = pc;
/*
* [U]ser Mode bit set
* [L] ZOL loop inhibited to begin with - cleared by a LP insn
* Interrupts enabled
*/
regs->status32 = STATUS_U_MASK | STATUS_L_MASK | ISA_INIT_STATUS_BITS;
fpu_init_task(regs);
/* bogus seed values for debugging */
regs->lp_start = 0x10;
regs->lp_end = 0x80;
}
/*
* Some archs flush debug and FPU info here
*/
void flush_thread(void)
{
}
int elf_check_arch(const struct elf32_hdr *x)
{
unsigned int eflags;
if (x->e_machine != EM_ARC_INUSE) {
pr_err("ELF not built for %s ISA\n",
is_isa_arcompact() ? "ARCompact":"ARCv2");
return 0;
}
eflags = x->e_flags;
if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
pr_err("ABI mismatch - you need newer toolchain\n");
force_fatal_sig(SIGSEGV);
return 0;
}
return 1;
}
EXPORT_SYMBOL(elf_check_arch);
| linux-master | arch/arc/kernel/process.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/unistd.h>
#include <asm/syscalls.h>
#define sys_clone sys_clone_wrapper
#define sys_clone3 sys_clone3_wrapper
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
void *sys_call_table[NR_syscalls] = {
[0 ... NR_syscalls-1] = sys_ni_syscall,
#include <asm/unistd.h>
};
| linux-master | arch/arc/kernel/sys.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <asm/mach_desc.h>
#include <asm/irq_regs.h>
#include <asm/smp.h>
/*
* Late Interrupt system init called from start_kernel for Boot CPU only
*
* Since slab must already be initialized, platforms can start doing any
* needed request_irq( )s
*/
void __init init_IRQ(void)
{
/*
* process the entire interrupt tree in one go
* Any external intc will be setup provided DT chains them
* properly
*/
irqchip_init();
#ifdef CONFIG_SMP
/* a SMP H/w block could do IPI IRQ request here */
if (plat_smp_ops.init_per_cpu)
plat_smp_ops.init_per_cpu(smp_processor_id());
#endif
if (machine_desc->init_per_cpu)
machine_desc->init_per_cpu(smp_processor_id());
}
/*
* "C" Entry point for any ARC ISR, called from low level vector handler
* @irq is the vector number read from ICAUSE reg of on-chip intc
*/
void arch_do_IRQ(unsigned int hwirq, struct pt_regs *regs)
{
struct pt_regs *old_regs;
irq_enter();
old_regs = set_irq_regs(regs);
generic_handle_domain_irq(NULL, hwirq);
set_irq_regs(old_regs);
irq_exit();
}
| linux-master | arch/arc/kernel/irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
*
* Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/smp.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/spinlock.h>
#include <soc/arc/mcip.h>
#include <asm/irqflags-arcv2.h>
#include <asm/setup.h>
static DEFINE_RAW_SPINLOCK(mcip_lock);
#ifdef CONFIG_SMP
static char smp_cpuinfo_buf[128];
/*
* Set mask to halt GFRC if any online core in SMP cluster is halted.
* Only works for ARC HS v3.0+, on earlier versions has no effect.
*/
static void mcip_update_gfrc_halt_mask(int cpu)
{
struct bcr_generic gfrc;
unsigned long flags;
u32 gfrc_halt_mask;
READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
/*
* CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
* GFRC 0x3 version.
*/
if (gfrc.ver < 0x3)
return;
raw_spin_lock_irqsave(&mcip_lock, flags);
__mcip_cmd(CMD_GFRC_READ_CORE, 0);
gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
gfrc_halt_mask |= BIT(cpu);
__mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
static void mcip_update_debug_halt_mask(int cpu)
{
u32 mcip_mask = 0;
unsigned long flags;
raw_spin_lock_irqsave(&mcip_lock, flags);
/*
* mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
* commands. So read it once instead of reading both CMD_DEBUG_READ_MASK
* and CMD_DEBUG_READ_SELECT.
*/
__mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
mcip_mask |= BIT(cpu);
__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
/*
* Parameter specified halt cause:
* STATUS32[H]/actionpoint/breakpoint/self-halt
* We choose all of them (0xF).
*/
__mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
static void mcip_setup_per_cpu(int cpu)
{
struct mcip_bcr mp;
READ_BCR(ARC_REG_MCIP_BCR, mp);
smp_ipi_irq_setup(cpu, IPI_IRQ);
smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
/* Update GFRC halt mask as new CPU came online */
if (mp.gfrc)
mcip_update_gfrc_halt_mask(cpu);
/* Update MCIP debug mask as new CPU came online */
if (mp.dbg)
mcip_update_debug_halt_mask(cpu);
}
static void mcip_ipi_send(int cpu)
{
unsigned long flags;
int ipi_was_pending;
/* ARConnect can only send IPI to others */
if (unlikely(cpu == raw_smp_processor_id())) {
arc_softirq_trigger(SOFTIRQ_IRQ);
return;
}
raw_spin_lock_irqsave(&mcip_lock, flags);
/*
* If receiver already has a pending interrupt, elide sending this one.
* Linux cross core calling works well with concurrent IPIs
* coalesced into one
* see arch/arc/kernel/smp.c: ipi_send_msg_one()
*/
__mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
if (!ipi_was_pending)
__mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
static void mcip_ipi_clear(int irq)
{
unsigned int cpu, c;
unsigned long flags;
if (unlikely(irq == SOFTIRQ_IRQ)) {
arc_softirq_clear(irq);
return;
}
raw_spin_lock_irqsave(&mcip_lock, flags);
/* Who sent the IPI */
__mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
/*
* In rare case, multiple concurrent IPIs sent to same target can
* possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
* "vectored" (multiple bits sets) as opposed to typical single bit
*/
do {
c = __ffs(cpu); /* 0,1,2,3 */
__mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
cpu &= ~(1U << c);
} while (cpu);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
static void mcip_probe_n_setup(void)
{
struct mcip_bcr mp;
READ_BCR(ARC_REG_MCIP_BCR, mp);
sprintf(smp_cpuinfo_buf,
"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
mp.ver, mp.num_cores,
IS_AVAIL1(mp.ipi, "IPI "),
IS_AVAIL1(mp.idu, "IDU "),
IS_AVAIL1(mp.dbg, "DEBUG "),
IS_AVAIL1(mp.gfrc, "GFRC"));
}
struct plat_smp_ops plat_smp_ops = {
.info = smp_cpuinfo_buf,
.init_early_smp = mcip_probe_n_setup,
.init_per_cpu = mcip_setup_per_cpu,
.ipi_send = mcip_ipi_send,
.ipi_clear = mcip_ipi_clear,
};
#endif
/***************************************************************************
* ARCv2 Interrupt Distribution Unit (IDU)
*
* Connects external "COMMON" IRQs to core intc, providing:
* -dynamic routing (IRQ affinity)
* -load balancing (Round Robin interrupt distribution)
* -1:N distribution
*
* It physically resides in the MCIP hw block
*/
#include <linux/irqchip.h>
#include <linux/of.h>
#include <linux/of_irq.h>
/*
* Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
*/
static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
{
__mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
}
static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
bool set_distr, unsigned int distr)
{
union {
unsigned int word;
struct {
unsigned int distr:2, pad:2, lvl:1, pad2:27;
};
} data;
data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
if (set_distr)
data.distr = distr;
if (set_lvl)
data.lvl = lvl;
__mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
}
static void idu_irq_mask_raw(irq_hw_number_t hwirq)
{
unsigned long flags;
raw_spin_lock_irqsave(&mcip_lock, flags);
__mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
static void idu_irq_mask(struct irq_data *data)
{
idu_irq_mask_raw(data->hwirq);
}
static void idu_irq_unmask(struct irq_data *data)
{
unsigned long flags;
raw_spin_lock_irqsave(&mcip_lock, flags);
__mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
static void idu_irq_ack(struct irq_data *data)
{
unsigned long flags;
raw_spin_lock_irqsave(&mcip_lock, flags);
__mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
static void idu_irq_mask_ack(struct irq_data *data)
{
unsigned long flags;
raw_spin_lock_irqsave(&mcip_lock, flags);
__mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
__mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
}
static int
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
bool force)
{
unsigned long flags;
cpumask_t online;
unsigned int destination_bits;
unsigned int distribution_mode;
/* errout if no online cpu per @cpumask */
if (!cpumask_and(&online, cpumask, cpu_online_mask))
return -EINVAL;
raw_spin_lock_irqsave(&mcip_lock, flags);
destination_bits = cpumask_bits(&online)[0];
idu_set_dest(data->hwirq, destination_bits);
if (ffs(destination_bits) == fls(destination_bits))
distribution_mode = IDU_M_DISTRI_DEST;
else
distribution_mode = IDU_M_DISTRI_RR;
idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
return IRQ_SET_MASK_OK;
}
static int idu_irq_set_type(struct irq_data *data, u32 type)
{
unsigned long flags;
/*
* ARCv2 IDU HW does not support inverse polarity, so these are the
* only interrupt types supported.
*/
if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
return -EINVAL;
raw_spin_lock_irqsave(&mcip_lock, flags);
idu_set_mode(data->hwirq, true,
type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
IDU_M_TRIG_LEVEL,
false, 0);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
return 0;
}
static void idu_irq_enable(struct irq_data *data)
{
/*
* By default send all common interrupts to all available online CPUs.
* The affinity of common interrupts in IDU must be set manually since
* in some cases the kernel will not call irq_set_affinity() by itself:
* 1. When the kernel is not configured with support of SMP.
* 2. When the kernel is configured with support of SMP but upper
* interrupt controllers does not support setting of the affinity
* and cannot propagate it to IDU.
*/
idu_irq_set_affinity(data, cpu_online_mask, false);
idu_irq_unmask(data);
}
static struct irq_chip idu_irq_chip = {
.name = "MCIP IDU Intc",
.irq_mask = idu_irq_mask,
.irq_unmask = idu_irq_unmask,
.irq_ack = idu_irq_ack,
.irq_mask_ack = idu_irq_mask_ack,
.irq_enable = idu_irq_enable,
.irq_set_type = idu_irq_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = idu_irq_set_affinity,
#endif
};
static void idu_cascade_isr(struct irq_desc *desc)
{
struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
struct irq_chip *core_chip = irq_desc_get_chip(desc);
irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ;
chained_irq_enter(core_chip, desc);
generic_handle_domain_irq(idu_domain, idu_hwirq);
chained_irq_exit(core_chip, desc);
}
static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
return 0;
}
static const struct irq_domain_ops idu_irq_ops = {
.xlate = irq_domain_xlate_onetwocell,
.map = idu_irq_map,
};
/*
* [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
* [24, 23+C]: If C > 0 then "C" common IRQs
* [24+C, N]: Not statically assigned, private-per-core
*/
static int __init
idu_of_init(struct device_node *intc, struct device_node *parent)
{
struct irq_domain *domain;
int nr_irqs;
int i, virq;
struct mcip_bcr mp;
struct mcip_idu_bcr idu_bcr;
READ_BCR(ARC_REG_MCIP_BCR, mp);
if (!mp.idu)
panic("IDU not detected, but DeviceTree using it");
READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr);
nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr);
pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs);
domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
/* Parent interrupts (core-intc) are already mapped */
for (i = 0; i < nr_irqs; i++) {
/* Mask all common interrupts by default */
idu_irq_mask_raw(i);
/*
* Return parent uplink IRQs (towards core intc) 24,25,.....
* this step has been done before already
* however we need it to get the parent virq and set IDU handler
* as first level isr
*/
virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ);
BUG_ON(!virq);
irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
}
__mcip_cmd(CMD_IDU_ENABLE, 0);
return 0;
}
IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);
| linux-master | arch/arc/kernel/mcip.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/ptrace.h>
#include <linux/sched/task_stack.h>
#include <linux/regset.h>
#include <linux/unistd.h>
#include <linux/elf.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
#ifdef CONFIG_ISA_ARCOMPACT
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(bta),
REG_OFFSET_NAME(lp_start),
REG_OFFSET_NAME(lp_end),
REG_OFFSET_NAME(lp_count),
REG_OFFSET_NAME(status32),
REG_OFFSET_NAME(ret),
REG_OFFSET_NAME(blink),
REG_OFFSET_NAME(fp),
REG_OFFSET_NAME(r26),
REG_OFFSET_NAME(r12),
REG_OFFSET_NAME(r11),
REG_OFFSET_NAME(r10),
REG_OFFSET_NAME(r9),
REG_OFFSET_NAME(r8),
REG_OFFSET_NAME(r7),
REG_OFFSET_NAME(r6),
REG_OFFSET_NAME(r5),
REG_OFFSET_NAME(r4),
REG_OFFSET_NAME(r3),
REG_OFFSET_NAME(r2),
REG_OFFSET_NAME(r1),
REG_OFFSET_NAME(r0),
REG_OFFSET_NAME(sp),
REG_OFFSET_NAME(orig_r0),
REG_OFFSET_NAME(ecr),
REG_OFFSET_END,
};
#else
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(orig_r0),
REG_OFFSET_NAME(ecr),
REG_OFFSET_NAME(bta),
REG_OFFSET_NAME(r26),
REG_OFFSET_NAME(fp),
REG_OFFSET_NAME(sp),
REG_OFFSET_NAME(r12),
REG_OFFSET_NAME(r30),
#ifdef CONFIG_ARC_HAS_ACCL_REGS
REG_OFFSET_NAME(r58),
REG_OFFSET_NAME(r59),
#endif
#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
REG_OFFSET_NAME(DSP_CTRL),
#endif
REG_OFFSET_NAME(r0),
REG_OFFSET_NAME(r1),
REG_OFFSET_NAME(r2),
REG_OFFSET_NAME(r3),
REG_OFFSET_NAME(r4),
REG_OFFSET_NAME(r5),
REG_OFFSET_NAME(r6),
REG_OFFSET_NAME(r7),
REG_OFFSET_NAME(r8),
REG_OFFSET_NAME(r9),
REG_OFFSET_NAME(r10),
REG_OFFSET_NAME(r11),
REG_OFFSET_NAME(blink),
REG_OFFSET_NAME(lp_end),
REG_OFFSET_NAME(lp_start),
REG_OFFSET_NAME(lp_count),
REG_OFFSET_NAME(ei),
REG_OFFSET_NAME(ldi),
REG_OFFSET_NAME(jli),
REG_OFFSET_NAME(ret),
REG_OFFSET_NAME(status32),
REG_OFFSET_END,
};
#endif
static struct callee_regs *task_callee_regs(struct task_struct *tsk)
{
struct callee_regs *tmp = (struct callee_regs *)tsk->thread.callee_reg;
return tmp;
}
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const struct pt_regs *ptregs = task_pt_regs(target);
const struct callee_regs *cregs = task_callee_regs(target);
unsigned int stop_pc_val;
membuf_zero(&to, 4); // pad
membuf_store(&to, ptregs->bta);
membuf_store(&to, ptregs->lp_start);
membuf_store(&to, ptregs->lp_end);
membuf_store(&to, ptregs->lp_count);
membuf_store(&to, ptregs->status32);
membuf_store(&to, ptregs->ret);
membuf_store(&to, ptregs->blink);
membuf_store(&to, ptregs->fp);
membuf_store(&to, ptregs->r26); // gp
membuf_store(&to, ptregs->r12);
membuf_store(&to, ptregs->r11);
membuf_store(&to, ptregs->r10);
membuf_store(&to, ptregs->r9);
membuf_store(&to, ptregs->r8);
membuf_store(&to, ptregs->r7);
membuf_store(&to, ptregs->r6);
membuf_store(&to, ptregs->r5);
membuf_store(&to, ptregs->r4);
membuf_store(&to, ptregs->r3);
membuf_store(&to, ptregs->r2);
membuf_store(&to, ptregs->r1);
membuf_store(&to, ptregs->r0);
membuf_store(&to, ptregs->sp);
membuf_zero(&to, 4); // pad2
membuf_store(&to, cregs->r25);
membuf_store(&to, cregs->r24);
membuf_store(&to, cregs->r23);
membuf_store(&to, cregs->r22);
membuf_store(&to, cregs->r21);
membuf_store(&to, cregs->r20);
membuf_store(&to, cregs->r19);
membuf_store(&to, cregs->r18);
membuf_store(&to, cregs->r17);
membuf_store(&to, cregs->r16);
membuf_store(&to, cregs->r15);
membuf_store(&to, cregs->r14);
membuf_store(&to, cregs->r13);
membuf_store(&to, target->thread.fault_address); // efa
if (in_brkpt_trap(ptregs)) {
stop_pc_val = target->thread.fault_address;
pr_debug("\t\tstop_pc (brk-pt)\n");
} else {
stop_pc_val = ptregs->ret;
pr_debug("\t\tstop_pc (others)\n");
}
return membuf_store(&to, stop_pc_val); // stop_pc
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
const struct pt_regs *ptregs = task_pt_regs(target);
const struct callee_regs *cregs = task_callee_regs(target);
int ret = 0;
#define REG_IN_CHUNK(FIRST, NEXT, PTR) \
if (!ret) \
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
(void *)(PTR), \
offsetof(struct user_regs_struct, FIRST), \
offsetof(struct user_regs_struct, NEXT));
#define REG_IN_ONE(LOC, PTR) \
if (!ret) \
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
(void *)(PTR), \
offsetof(struct user_regs_struct, LOC), \
offsetof(struct user_regs_struct, LOC) + 4);
#define REG_IGNORE_ONE(LOC) \
if (!ret) \
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, \
offsetof(struct user_regs_struct, LOC), \
offsetof(struct user_regs_struct, LOC) + 4);
REG_IGNORE_ONE(pad);
REG_IN_ONE(scratch.bta, &ptregs->bta);
REG_IN_ONE(scratch.lp_start, &ptregs->lp_start);
REG_IN_ONE(scratch.lp_end, &ptregs->lp_end);
REG_IN_ONE(scratch.lp_count, &ptregs->lp_count);
REG_IGNORE_ONE(scratch.status32);
REG_IN_ONE(scratch.ret, &ptregs->ret);
REG_IN_ONE(scratch.blink, &ptregs->blink);
REG_IN_ONE(scratch.fp, &ptregs->fp);
REG_IN_ONE(scratch.gp, &ptregs->r26);
REG_IN_ONE(scratch.r12, &ptregs->r12);
REG_IN_ONE(scratch.r11, &ptregs->r11);
REG_IN_ONE(scratch.r10, &ptregs->r10);
REG_IN_ONE(scratch.r9, &ptregs->r9);
REG_IN_ONE(scratch.r8, &ptregs->r8);
REG_IN_ONE(scratch.r7, &ptregs->r7);
REG_IN_ONE(scratch.r6, &ptregs->r6);
REG_IN_ONE(scratch.r5, &ptregs->r5);
REG_IN_ONE(scratch.r4, &ptregs->r4);
REG_IN_ONE(scratch.r3, &ptregs->r3);
REG_IN_ONE(scratch.r2, &ptregs->r2);
REG_IN_ONE(scratch.r1, &ptregs->r1);
REG_IN_ONE(scratch.r0, &ptregs->r0);
REG_IN_ONE(scratch.sp, &ptregs->sp);
REG_IGNORE_ONE(pad2);
REG_IN_ONE(callee.r25, &cregs->r25);
REG_IN_ONE(callee.r24, &cregs->r24);
REG_IN_ONE(callee.r23, &cregs->r23);
REG_IN_ONE(callee.r22, &cregs->r22);
REG_IN_ONE(callee.r21, &cregs->r21);
REG_IN_ONE(callee.r20, &cregs->r20);
REG_IN_ONE(callee.r19, &cregs->r19);
REG_IN_ONE(callee.r18, &cregs->r18);
REG_IN_ONE(callee.r17, &cregs->r17);
REG_IN_ONE(callee.r16, &cregs->r16);
REG_IN_ONE(callee.r15, &cregs->r15);
REG_IN_ONE(callee.r14, &cregs->r14);
REG_IN_ONE(callee.r13, &cregs->r13);
REG_IGNORE_ONE(efa); /* efa update invalid */
REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
return ret;
}
#ifdef CONFIG_ISA_ARCV2
static int arcv2regs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const struct pt_regs *regs = task_pt_regs(target);
if (IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS))
/*
* itemized copy not needed like above as layout of regs (r30,r58,r59)
* is exactly same in kernel (pt_regs) and userspace (user_regs_arcv2)
*/
return membuf_write(&to, ®s->r30, sizeof(struct user_regs_arcv2));
membuf_write(&to, ®s->r30, 4); /* r30 only */
return membuf_zero(&to, sizeof(struct user_regs_arcv2) - 4);
}
static int arcv2regs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
const struct pt_regs *regs = task_pt_regs(target);
int ret, copy_sz;
if (IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS))
copy_sz = sizeof(struct user_regs_arcv2);
else
copy_sz = 4; /* r30 only */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, (void *)®s->r30,
0, copy_sz);
return ret;
}
#endif
enum arc_getset {
REGSET_CMN,
REGSET_ARCV2,
};
static const struct user_regset arc_regsets[] = {
[REGSET_CMN] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
.regset_get = genregs_get,
.set = genregs_set,
},
#ifdef CONFIG_ISA_ARCV2
[REGSET_ARCV2] = {
.core_note_type = NT_ARC_V2,
.n = ELF_ARCV2REG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
.regset_get = arcv2regs_get,
.set = arcv2regs_set,
},
#endif
};
static const struct user_regset_view user_arc_view = {
.name = "arc",
.e_machine = EM_ARC_INUSE,
.regsets = arc_regsets,
.n = ARRAY_SIZE(arc_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_arc_view;
}
void ptrace_disable(struct task_struct *child)
{
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret = -EIO;
pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
switch (request) {
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->thr_ptr,
(unsigned long __user *)data);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
if (ptrace_report_syscall_entry(regs))
return ULONG_MAX;
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall_get_nr(current, regs));
#endif
return regs->r8;
}
asmlinkage void syscall_trace_exit(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
ptrace_report_syscall_exit(regs, 0);
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_exit(regs, regs_return_value(regs));
#endif
}
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return (addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1));
}
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
| linux-master | arch/arc/kernel/ptrace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* several functions that help interpret ARC instructions
* used for unaligned accesses, kprobes and kgdb
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/types.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <asm/disasm.h>
#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_EMUL_UNALIGNED) || \
defined(CONFIG_KPROBES)
/* disasm_instr: Analyses instruction at addr, stores
* findings in *state
*/
void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
int userspace, struct pt_regs *regs, struct callee_regs *cregs)
{
int fieldA = 0;
int fieldC = 0, fieldCisReg = 0;
uint16_t word1 = 0, word0 = 0;
int subopcode, is_linked, op_format;
uint16_t *ins_ptr;
uint16_t ins_buf[4];
int bytes_not_copied = 0;
memset(state, 0, sizeof(struct disasm_state));
/* This fetches the upper part of the 32 bit instruction
* in both the cases of Little Endian or Big Endian configurations. */
if (userspace) {
bytes_not_copied = copy_from_user(ins_buf,
(const void __user *) addr, 8);
if (bytes_not_copied > 6)
goto fault;
ins_ptr = ins_buf;
} else {
ins_ptr = (uint16_t *) addr;
}
word1 = *((uint16_t *)addr);
state->major_opcode = (word1 >> 11) & 0x1F;
/* Check if the instruction is 32 bit or 16 bit instruction */
if (state->major_opcode < 0x0B) {
if (bytes_not_copied > 4)
goto fault;
state->instr_len = 4;
word0 = *((uint16_t *)(addr+2));
state->words[0] = (word1 << 16) | word0;
} else {
state->instr_len = 2;
state->words[0] = word1;
}
/* Read the second word in case of limm */
word1 = *((uint16_t *)(addr + state->instr_len));
word0 = *((uint16_t *)(addr + state->instr_len + 2));
state->words[1] = (word1 << 16) | word0;
switch (state->major_opcode) {
case op_Bcc:
state->is_branch = 1;
/* unconditional branch s25, conditional branch s21 */
fieldA = (IS_BIT(state->words[0], 16)) ?
FIELD_s25(state->words[0]) :
FIELD_s21(state->words[0]);
state->delay_slot = IS_BIT(state->words[0], 5);
state->target = fieldA + (addr & ~0x3);
state->flow = direct_jump;
break;
case op_BLcc:
if (IS_BIT(state->words[0], 16)) {
/* Branch and Link*/
/* unconditional branch s25, conditional branch s21 */
fieldA = (IS_BIT(state->words[0], 17)) ?
(FIELD_s25(state->words[0]) & ~0x3) :
FIELD_s21(state->words[0]);
state->flow = direct_call;
} else {
/*Branch On Compare */
fieldA = FIELD_s9(state->words[0]) & ~0x3;
state->flow = direct_jump;
}
state->delay_slot = IS_BIT(state->words[0], 5);
state->target = fieldA + (addr & ~0x3);
state->is_branch = 1;
break;
case op_LD: /* LD<zz> a,[b,s9] */
state->write = 0;
state->di = BITS(state->words[0], 11, 11);
if (state->di)
break;
state->x = BITS(state->words[0], 6, 6);
state->zz = BITS(state->words[0], 7, 8);
state->aa = BITS(state->words[0], 9, 10);
state->wb_reg = FIELD_B(state->words[0]);
if (state->wb_reg == REG_LIMM) {
state->instr_len += 4;
state->aa = 0;
state->src1 = state->words[1];
} else {
state->src1 = get_reg(state->wb_reg, regs, cregs);
}
state->src2 = FIELD_s9(state->words[0]);
state->dest = FIELD_A(state->words[0]);
state->pref = (state->dest == REG_LIMM);
break;
case op_ST:
state->write = 1;
state->di = BITS(state->words[0], 5, 5);
if (state->di)
break;
state->aa = BITS(state->words[0], 3, 4);
state->zz = BITS(state->words[0], 1, 2);
state->src1 = FIELD_C(state->words[0]);
if (state->src1 == REG_LIMM) {
state->instr_len += 4;
state->src1 = state->words[1];
} else {
state->src1 = get_reg(state->src1, regs, cregs);
}
state->wb_reg = FIELD_B(state->words[0]);
if (state->wb_reg == REG_LIMM) {
state->aa = 0;
state->instr_len += 4;
state->src2 = state->words[1];
} else {
state->src2 = get_reg(state->wb_reg, regs, cregs);
}
state->src3 = FIELD_s9(state->words[0]);
break;
case op_MAJOR_4:
subopcode = MINOR_OPCODE(state->words[0]);
switch (subopcode) {
case 32: /* Jcc */
case 33: /* Jcc.D */
case 34: /* JLcc */
case 35: /* JLcc.D */
is_linked = 0;
if (subopcode == 33 || subopcode == 35)
state->delay_slot = 1;
if (subopcode == 34 || subopcode == 35)
is_linked = 1;
fieldCisReg = 0;
op_format = BITS(state->words[0], 22, 23);
if (op_format == 0 || ((op_format == 3) &&
(!IS_BIT(state->words[0], 5)))) {
fieldC = FIELD_C(state->words[0]);
if (fieldC == REG_LIMM) {
fieldC = state->words[1];
state->instr_len += 4;
} else {
fieldCisReg = 1;
}
} else if (op_format == 1 || ((op_format == 3)
&& (IS_BIT(state->words[0], 5)))) {
fieldC = FIELD_C(state->words[0]);
} else {
/* op_format == 2 */
fieldC = FIELD_s12(state->words[0]);
}
if (!fieldCisReg) {
state->target = fieldC;
state->flow = is_linked ?
direct_call : direct_jump;
} else {
state->target = get_reg(fieldC, regs, cregs);
state->flow = is_linked ?
indirect_call : indirect_jump;
}
state->is_branch = 1;
break;
case 40: /* LPcc */
if (BITS(state->words[0], 22, 23) == 3) {
/* Conditional LPcc u7 */
fieldC = FIELD_C(state->words[0]);
fieldC = fieldC << 1;
fieldC += (addr & ~0x03);
state->is_branch = 1;
state->flow = direct_jump;
state->target = fieldC;
}
/* For Unconditional lp, next pc is the fall through
* which is updated */
break;
case 48 ... 55: /* LD a,[b,c] */
state->di = BITS(state->words[0], 15, 15);
if (state->di)
break;
state->x = BITS(state->words[0], 16, 16);
state->zz = BITS(state->words[0], 17, 18);
state->aa = BITS(state->words[0], 22, 23);
state->wb_reg = FIELD_B(state->words[0]);
if (state->wb_reg == REG_LIMM) {
state->instr_len += 4;
state->src1 = state->words[1];
} else {
state->src1 = get_reg(state->wb_reg, regs,
cregs);
}
state->src2 = FIELD_C(state->words[0]);
if (state->src2 == REG_LIMM) {
state->instr_len += 4;
state->src2 = state->words[1];
} else {
state->src2 = get_reg(state->src2, regs,
cregs);
}
state->dest = FIELD_A(state->words[0]);
if (state->dest == REG_LIMM)
state->pref = 1;
break;
case 10: /* MOV */
/* still need to check for limm to extract instr len */
/* MOV is special case because it only takes 2 args */
switch (BITS(state->words[0], 22, 23)) {
case 0: /* OP a,b,c */
if (FIELD_C(state->words[0]) == REG_LIMM)
state->instr_len += 4;
break;
case 1: /* OP a,b,u6 */
break;
case 2: /* OP b,b,s12 */
break;
case 3: /* OP.cc b,b,c/u6 */
if ((!IS_BIT(state->words[0], 5)) &&
(FIELD_C(state->words[0]) == REG_LIMM))
state->instr_len += 4;
break;
}
break;
default:
/* Not a Load, Jump or Loop instruction */
/* still need to check for limm to extract instr len */
switch (BITS(state->words[0], 22, 23)) {
case 0: /* OP a,b,c */
if ((FIELD_B(state->words[0]) == REG_LIMM) ||
(FIELD_C(state->words[0]) == REG_LIMM))
state->instr_len += 4;
break;
case 1: /* OP a,b,u6 */
break;
case 2: /* OP b,b,s12 */
break;
case 3: /* OP.cc b,b,c/u6 */
if ((!IS_BIT(state->words[0], 5)) &&
((FIELD_B(state->words[0]) == REG_LIMM) ||
(FIELD_C(state->words[0]) == REG_LIMM)))
state->instr_len += 4;
break;
}
break;
}
break;
/* 16 Bit Instructions */
case op_LD_ADD: /* LD_S|LDB_S|LDW_S a,[b,c] */
state->zz = BITS(state->words[0], 3, 4);
state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src2 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
state->dest = FIELD_S_A(state->words[0]);
break;
case op_ADD_MOV_CMP:
/* check for limm, ignore mov_s h,b (== mov_s 0,b) */
if ((BITS(state->words[0], 3, 4) < 3) &&
(FIELD_S_H(state->words[0]) == REG_LIMM))
state->instr_len += 4;
break;
case op_S:
subopcode = BITS(state->words[0], 5, 7);
switch (subopcode) {
case 0: /* j_s */
case 1: /* j_s.d */
case 2: /* jl_s */
case 3: /* jl_s.d */
state->target = get_reg(FIELD_S_B(state->words[0]),
regs, cregs);
state->delay_slot = subopcode & 1;
state->flow = (subopcode >= 2) ?
direct_call : indirect_jump;
break;
case 7:
switch (BITS(state->words[0], 8, 10)) {
case 4: /* jeq_s [blink] */
case 5: /* jne_s [blink] */
case 6: /* j_s [blink] */
case 7: /* j_s.d [blink] */
state->delay_slot = (subopcode == 7);
state->flow = indirect_jump;
state->target = get_reg(31, regs, cregs);
default:
break;
}
default:
break;
}
break;
case op_LD_S: /* LD_S c, [b, u7] */
state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src2 = FIELD_S_u7(state->words[0]);
state->dest = FIELD_S_C(state->words[0]);
break;
case op_LDB_S:
case op_STB_S:
/* no further handling required as byte accesses should not
* cause an unaligned access exception */
state->zz = 1;
break;
case op_LDWX_S: /* LDWX_S c, [b, u6] */
state->x = 1;
fallthrough;
case op_LDW_S: /* LDW_S c, [b, u6] */
state->zz = 2;
state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src2 = FIELD_S_u6(state->words[0]);
state->dest = FIELD_S_C(state->words[0]);
break;
case op_ST_S: /* ST_S c, [b, u7] */
state->write = 1;
state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src3 = FIELD_S_u7(state->words[0]);
break;
case op_STW_S: /* STW_S c,[b,u6] */
state->write = 1;
state->zz = 2;
state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
state->src3 = FIELD_S_u6(state->words[0]);
break;
case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
/* note: we are ignoring possibility of:
* ADD_S, SUB_S, PUSH_S, POP_S as these should not
* cause unaligned exception anyway */
state->write = BITS(state->words[0], 6, 6);
state->zz = BITS(state->words[0], 5, 5);
if (state->zz)
break; /* byte accesses should not come here */
if (!state->write) {
state->src1 = get_reg(28, regs, cregs);
state->src2 = FIELD_S_u7(state->words[0]);
state->dest = FIELD_S_B(state->words[0]);
} else {
state->src1 = get_reg(FIELD_S_B(state->words[0]), regs,
cregs);
state->src2 = get_reg(28, regs, cregs);
state->src3 = FIELD_S_u7(state->words[0]);
}
break;
case op_GP: /* LD_S|LDB_S|LDW_S r0,[gp,s11/s9/s10] */
/* note: ADD_S r0, gp, s11 is ignored */
state->zz = BITS(state->words[0], 9, 10);
state->src1 = get_reg(26, regs, cregs);
state->src2 = state->zz ? FIELD_S_s10(state->words[0]) :
FIELD_S_s11(state->words[0]);
state->dest = 0;
break;
case op_Pcl: /* LD_S b,[pcl,u10] */
state->src1 = regs->ret & ~3;
state->src2 = FIELD_S_u10(state->words[0]);
state->dest = FIELD_S_B(state->words[0]);
break;
case op_BR_S:
state->target = FIELD_S_s8(state->words[0]) + (addr & ~0x03);
state->flow = direct_jump;
state->is_branch = 1;
break;
case op_B_S:
fieldA = (BITS(state->words[0], 9, 10) == 3) ?
FIELD_S_s7(state->words[0]) :
FIELD_S_s10(state->words[0]);
state->target = fieldA + (addr & ~0x03);
state->flow = direct_jump;
state->is_branch = 1;
break;
case op_BL_S:
state->target = FIELD_S_s13(state->words[0]) + (addr & ~0x03);
state->flow = direct_call;
state->is_branch = 1;
break;
default:
break;
}
if (bytes_not_copied <= (8 - state->instr_len))
return;
fault: state->fault = 1;
}
long __kprobes get_reg(int reg, struct pt_regs *regs,
struct callee_regs *cregs)
{
long *p;
#if defined(CONFIG_ISA_ARCOMPACT)
if (reg <= 12) {
p = ®s->r0;
return p[-reg];
}
#else /* CONFIG_ISA_ARCV2 */
if (reg <= 11) {
p = ®s->r0;
return p[reg];
}
if (reg == 12)
return regs->r12;
if (reg == 30)
return regs->r30;
#ifdef CONFIG_ARC_HAS_ACCL_REGS
if (reg == 58)
return regs->r58;
if (reg == 59)
return regs->r59;
#endif
#endif
if (cregs && (reg <= 25)) {
p = &cregs->r13;
return p[13 - reg];
}
if (reg == 26)
return regs->r26;
if (reg == 27)
return regs->fp;
if (reg == 28)
return regs->sp;
if (reg == 31)
return regs->blink;
return 0;
}
void __kprobes set_reg(int reg, long val, struct pt_regs *regs,
struct callee_regs *cregs)
{
long *p;
#if defined(CONFIG_ISA_ARCOMPACT)
switch (reg) {
case 0 ... 12:
p = ®s->r0;
p[-reg] = val;
break;
case 13 ... 25:
if (cregs) {
p = &cregs->r13;
p[13 - reg] = val;
}
break;
case 26:
regs->r26 = val;
break;
case 27:
regs->fp = val;
break;
case 28:
regs->sp = val;
break;
case 31:
regs->blink = val;
break;
default:
break;
}
#else /* CONFIG_ISA_ARCV2 */
switch (reg) {
case 0 ... 11:
p = ®s->r0;
p[reg] = val;
break;
case 12:
regs->r12 = val;
break;
case 13 ... 25:
if (cregs) {
p = &cregs->r13;
p[13 - reg] = val;
}
break;
case 26:
regs->r26 = val;
break;
case 27:
regs->fp = val;
break;
case 28:
regs->sp = val;
break;
case 30:
regs->r30 = val;
break;
case 31:
regs->blink = val;
break;
#ifdef CONFIG_ARC_HAS_ACCL_REGS
case 58:
regs->r58 = val;
break;
case 59:
regs->r59 = val;
break;
#endif
default:
break;
}
#endif
}
/*
* Disassembles the insn at @pc and sets @next_pc to next PC (which could be
* @pc +2/4/6 (ARCompact ISA allows free intermixing of 16/32 bit insns).
*
* If @pc is a branch
* -@tgt_if_br is set to branch target.
* -If branch has delay slot, @next_pc updated with actual next PC.
*/
int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
struct callee_regs *cregs,
unsigned long *next_pc, unsigned long *tgt_if_br)
{
struct disasm_state instr;
disasm_instr(pc, &instr, 0, regs, cregs);
*next_pc = pc + instr.instr_len;
/* Instruction with possible two targets branch, jump and loop */
if (instr.is_branch)
*tgt_if_br = instr.target;
/* For the instructions with delay slots, the fall through is the
* instruction following the instruction in delay slot.
*/
if (instr.delay_slot) {
struct disasm_state instr_d;
disasm_instr(*next_pc, &instr_d, 0, regs, cregs);
*next_pc += instr_d.instr_len;
}
/* Zero Overhead Loop - end of the loop */
if (!(regs->status32 & STATUS32_L) && (*next_pc == regs->lp_end)
&& (regs->lp_count > 1)) {
*next_pc = regs->lp_start;
}
return instr.is_branch;
}
#endif /* CONFIG_KGDB || CONFIG_ARC_EMUL_UNALIGNED || CONFIG_KPROBES */
| linux-master | arch/arc/kernel/disasm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Traps/Non-MMU Exception handling for ARC
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* vineetg: May 2011
* -user-space unaligned access emulation
*
* Rahul Trivedi: Codito Technologies 2004
*/
#include <linux/sched/signal.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
#include <linux/kprobes.h>
#include <linux/kgdb.h>
#include <asm/entry.h>
#include <asm/setup.h>
#include <asm/unaligned.h>
#include <asm/kprobes.h>
void die(const char *str, struct pt_regs *regs, unsigned long address)
{
show_kernel_fault_diag(str, regs, address);
/* DEAD END */
__asm__("flag 1");
}
/*
* Helper called for bulk of exceptions NOT needing specific handling
* -for user faults enqueues requested signal
* -for kernel, chk if due to copy_(to|from)_user, otherwise die()
*/
static noinline int
unhandled_exception(const char *str, struct pt_regs *regs,
int signo, int si_code, void __user *addr)
{
if (user_mode(regs)) {
struct task_struct *tsk = current;
tsk->thread.fault_address = (__force unsigned int)addr;
force_sig_fault(signo, si_code, addr);
} else {
/* If not due to copy_(to|from)_user, we are doomed */
if (fixup_exception(regs))
return 0;
die(str, regs, (unsigned long)addr);
}
return 1;
}
#define DO_ERROR_INFO(signr, str, name, sicode) \
int name(unsigned long address, struct pt_regs *regs) \
{ \
return unhandled_exception(str, regs, signr, sicode, \
(void __user *)address); \
}
/*
* Entry points for exceptions NOT needing specific handling
*/
DO_ERROR_INFO(SIGILL, "Priv Op/Disabled Extn", do_privilege_fault, ILL_PRVOPC)
DO_ERROR_INFO(SIGILL, "Invalid Extn Insn", do_extension_fault, ILL_ILLOPC)
DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR)
DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
DO_ERROR_INFO(SIGSEGV, "gcc generated __builtin_trap", do_trap5_error, 0)
/*
* Entry Point for Misaligned Data access Exception, for emulating in software
*/
int do_misaligned_access(unsigned long address, struct pt_regs *regs,
struct callee_regs *cregs)
{
/* If emulation not enabled, or failed, kill the task */
if (misaligned_fixup(address, regs, cregs) != 0)
return do_misaligned_error(address, regs);
return 0;
}
/*
* Entry point for miscll errors such as Nested Exceptions
* -Duplicate TLB entry is handled seperately though
*/
void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
{
die("Unhandled Machine Check Exception", regs, address);
}
/*
* Entry point for traps induced by ARCompact TRAP_S <n> insn
* This is same family as TRAP0/SWI insn (use the same vector).
* The only difference being SWI insn take no operand, while TRAP_S does
* which reflects in ECR Reg as 8 bit param.
* Thus TRAP_S <n> can be used for specific purpose
* -1 used for software breakpointing (gdb)
* -2 used by kprobes
* -5 __builtin_trap() generated by gcc (2018.03 onwards) for toggle such as
* -fno-isolate-erroneous-paths-dereference
*/
void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
{
switch (regs->ecr.param) {
case 1:
trap_is_brkpt(address, regs);
break;
case 2:
trap_is_kprobe(address, regs);
break;
case 3:
case 4:
kgdb_trap(regs);
break;
case 5:
do_trap5_error(address, regs);
break;
default:
break;
}
}
/*
* Entry point for Instruction Error Exception
* -For a corner case, ARC kprobes implementation resorts to using
* this exception, hence the check
*/
void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)
{
int rc;
/* Check if this exception is caused by kprobes */
rc = notify_die(DIE_IERR, "kprobe_ierr", regs, address, 0, SIGILL);
if (rc == NOTIFY_STOP)
return;
insterror_is_error(address, regs);
}
/*
* abort() call generated by older gcc for __builtin_trap()
*/
void abort(void)
{
__asm__ __volatile__("trap_s 5\n");
}
| linux-master | arch/arc/kernel/traps.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/thread_info.h>
#include <linux/kbuild.h>
#include <linux/ptrace.h>
#include <asm/hardirq.h>
#include <asm/page.h>
int main(void)
{
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
BLANK();
DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
DEFINE(THREAD_FAULT_ADDR,
offsetof(struct thread_struct, fault_address));
BLANK();
DEFINE(THREAD_INFO_KSP, offsetof(struct thread_info, ksp));
DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
DEFINE(THREAD_INFO_PREEMPT_COUNT,
offsetof(struct thread_info, preempt_count));
BLANK();
DEFINE(TASK_ACT_MM, offsetof(struct task_struct, active_mm));
DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
DEFINE(TASK_PID, offsetof(struct task_struct, pid));
DEFINE(TASK_COMM, offsetof(struct task_struct, comm));
DEFINE(MM_CTXT, offsetof(struct mm_struct, context));
DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
DEFINE(MM_CTXT_ASID, offsetof(mm_context_t, asid));
BLANK();
DEFINE(PT_status32, offsetof(struct pt_regs, status32));
DEFINE(PT_event, offsetof(struct pt_regs, ecr));
DEFINE(PT_bta, offsetof(struct pt_regs, bta));
DEFINE(PT_sp, offsetof(struct pt_regs, sp));
DEFINE(PT_r0, offsetof(struct pt_regs, r0));
DEFINE(PT_r1, offsetof(struct pt_regs, r1));
DEFINE(PT_r2, offsetof(struct pt_regs, r2));
DEFINE(PT_r3, offsetof(struct pt_regs, r3));
DEFINE(PT_r4, offsetof(struct pt_regs, r4));
DEFINE(PT_r5, offsetof(struct pt_regs, r5));
DEFINE(PT_r6, offsetof(struct pt_regs, r6));
DEFINE(PT_r7, offsetof(struct pt_regs, r7));
DEFINE(PT_r8, offsetof(struct pt_regs, r8));
DEFINE(PT_r10, offsetof(struct pt_regs, r10));
DEFINE(PT_r26, offsetof(struct pt_regs, r26));
DEFINE(PT_ret, offsetof(struct pt_regs, ret));
DEFINE(PT_blink, offsetof(struct pt_regs, blink));
OFFSET(PT_fp, pt_regs, fp);
DEFINE(PT_lpe, offsetof(struct pt_regs, lp_end));
DEFINE(PT_lpc, offsetof(struct pt_regs, lp_count));
#ifdef CONFIG_ISA_ARCV2
OFFSET(PT_r12, pt_regs, r12);
OFFSET(PT_r30, pt_regs, r30);
#endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS
OFFSET(PT_r58, pt_regs, r58);
OFFSET(PT_r59, pt_regs, r59);
#endif
#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
OFFSET(PT_DSP_CTRL, pt_regs, DSP_CTRL);
#endif
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
return 0;
}
| linux-master | arch/arc/kernel/asm-offsets.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/kernel.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <asm/unwind.h>
static inline void arc_write_me(unsigned short *addr, unsigned long value)
{
*addr = (value & 0xffff0000) >> 16;
*(addr + 1) = (value & 0xffff);
}
/*
* This gets called before relocation loop in generic loader
* Make a note of the section index of unwinding section
*/
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstr, struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL;
#endif
mod->arch.secstr = secstr;
return 0;
}
void module_arch_cleanup(struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
if (mod->arch.unw_info)
unwind_remove_table(mod->arch.unw_info, 0);
#endif
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex, /* sec index for sym tbl */
unsigned int relsec, /* sec index for relo sec */
struct module *module)
{
int i, n, relo_type;
Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym_entry, *sym_sec;
Elf32_Addr relocation, location, tgt_addr;
unsigned int tgtsec;
/*
* @relsec has relocations e.g. .rela.init.text
* @tgtsec is section to patch e.g. .init.text
*/
tgtsec = sechdrs[relsec].sh_info;
tgt_addr = sechdrs[tgtsec].sh_addr;
sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
pr_debug("\nSection to fixup %s @%x\n",
module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr);
pr_debug("=========================================================\n");
pr_debug("r_off\tr_add\tst_value ADDRESS VALUE\n");
pr_debug("=========================================================\n");
/* Loop thru entries in relocation section */
for (i = 0; i < n; i++) {
const char *s;
/* This is where to make the change */
location = tgt_addr + rel_entry[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym_entry = sym_sec + ELF32_R_SYM(rel_entry[i].r_info);
relocation = sym_entry->st_value + rel_entry[i].r_addend;
if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) {
s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name;
} else {
s = strtab + sym_entry->st_name;
}
pr_debug(" %x\t%x\t%x %x %x [%s]\n",
rel_entry[i].r_offset, rel_entry[i].r_addend,
sym_entry->st_value, location, relocation, s);
/* This assumes modules are built with -mlong-calls
* so any branches/jumps are absolute 32 bit jmps
* global data access again is abs 32 bit.
* Both of these are handled by same relocation type
*/
relo_type = ELF32_R_TYPE(rel_entry[i].r_info);
if (likely(R_ARC_32_ME == relo_type)) /* ME ( S + A ) */
arc_write_me((unsigned short *)location, relocation);
else if (R_ARC_32 == relo_type) /* ( S + A ) */
*((Elf32_Addr *) location) = relocation;
else if (R_ARC_32_PCREL == relo_type) /* ( S + A ) - PDATA ) */
*((Elf32_Addr *) location) = relocation - location;
else
goto relo_err;
}
#ifdef CONFIG_ARC_DW2_UNWIND
if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
module->arch.unw_sec_idx = tgtsec;
#endif
return 0;
relo_err:
pr_err("%s: unknown relocation: %u\n",
module->name, ELF32_R_TYPE(rel_entry[i].r_info));
return -ENOEXEC;
}
/* Just before lift off: After sections have been relocated, we add the
* dwarf section to unwinder table pool
* This couldn't be done in module_frob_arch_sections() because
* relocations had not been applied by then
*/
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
void *unw;
int unwsec = mod->arch.unw_sec_idx;
if (unwsec) {
unw = unwind_add_table(mod, (void *)sechdrs[unwsec].sh_addr,
sechdrs[unwsec].sh_size);
mod->arch.unw_info = unw;
}
#endif
return 0;
}
| linux-master | arch/arc/kernel/module.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* arc_hostlink.c: Pseudo-driver for Metaware provided "hostlink" facility
*
* Allows Linux userland access to host in absence of any peripherals.
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/fs.h> /* file_operations */
#include <linux/miscdevice.h>
#include <linux/mm.h> /* VM_IO */
#include <linux/module.h>
#include <linux/uaccess.h>
static unsigned char __HOSTLINK__[4 * PAGE_SIZE] __aligned(PAGE_SIZE);
static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma)
{
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot)) {
pr_warn("Hostlink buffer mmap ERROR\n");
return -EAGAIN;
}
return 0;
}
static long arc_hl_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
/* we only support, returning the physical addr to mmap in user space */
put_user((unsigned int)__HOSTLINK__, (int __user *)arg);
return 0;
}
static const struct file_operations arc_hl_fops = {
.unlocked_ioctl = arc_hl_ioctl,
.mmap = arc_hl_mmap,
};
static struct miscdevice arc_hl_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "hostlink",
.fops = &arc_hl_fops
};
static int __init arc_hl_init(void)
{
pr_info("ARC Hostlink driver mmap at 0x%p\n", __HOSTLINK__);
return misc_register(&arc_hl_dev);
}
module_init(arc_hl_init);
| linux-master | arch/arc/kernel/arc_hostlink.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/reboot.h>
#include <linux/pm.h>
void machine_halt(void)
{
/* Halt the processor */
__asm__ __volatile__("flag 1\n");
}
void machine_restart(char *__unused)
{
/* Soft reset : jump to reset vector */
pr_info("Put your restart handler here\n");
machine_halt();
}
void machine_power_off(void)
{
/* FIXME :: power off ??? */
machine_halt();
}
void (*pm_power_off) (void) = NULL;
EXPORT_SYMBOL(pm_power_off);
| linux-master | arch/arc/kernel/reset.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
* Copyright (C) 2002-2006 Novell, Inc.
* Jan Beulich <[email protected]>
*
* A simple API for unwinding kernel stacks. This is used for
* debugging and error reporting purposes. The kernel doesn't need
* full-blown stack unwinding with all the bells and whistles, so there
* is not much point in implementing the full Dwarf2 unwind API.
*/
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
#include <asm/sections.h>
#include <asm/unaligned.h>
#include <asm/unwind.h>
extern char __start_unwind[], __end_unwind[];
/* extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];*/
/* #define UNWIND_DEBUG */
#ifdef UNWIND_DEBUG
int dbg_unw;
#define unw_debug(fmt, ...) \
do { \
if (dbg_unw) \
pr_info(fmt, ##__VA_ARGS__); \
} while (0);
#else
#define unw_debug(fmt, ...)
#endif
#define MAX_STACK_DEPTH 8
#define EXTRA_INFO(f) { \
BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
% sizeof_field(struct unwind_frame_info, f)) \
+ offsetof(struct unwind_frame_info, f) \
/ sizeof_field(struct unwind_frame_info, f), \
sizeof_field(struct unwind_frame_info, f) \
}
#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
static const struct {
unsigned offs:BITS_PER_LONG / 2;
unsigned width:BITS_PER_LONG / 2;
} reg_info[] = {
UNW_REGISTER_INFO};
#undef PTREGS_INFO
#undef EXTRA_INFO
#ifndef REG_INVALID
#define REG_INVALID(r) (reg_info[r].width == 0)
#endif
#define DW_CFA_nop 0x00
#define DW_CFA_set_loc 0x01
#define DW_CFA_advance_loc1 0x02
#define DW_CFA_advance_loc2 0x03
#define DW_CFA_advance_loc4 0x04
#define DW_CFA_offset_extended 0x05
#define DW_CFA_restore_extended 0x06
#define DW_CFA_undefined 0x07
#define DW_CFA_same_value 0x08
#define DW_CFA_register 0x09
#define DW_CFA_remember_state 0x0a
#define DW_CFA_restore_state 0x0b
#define DW_CFA_def_cfa 0x0c
#define DW_CFA_def_cfa_register 0x0d
#define DW_CFA_def_cfa_offset 0x0e
#define DW_CFA_def_cfa_expression 0x0f
#define DW_CFA_expression 0x10
#define DW_CFA_offset_extended_sf 0x11
#define DW_CFA_def_cfa_sf 0x12
#define DW_CFA_def_cfa_offset_sf 0x13
#define DW_CFA_val_offset 0x14
#define DW_CFA_val_offset_sf 0x15
#define DW_CFA_val_expression 0x16
#define DW_CFA_lo_user 0x1c
#define DW_CFA_GNU_window_save 0x2d
#define DW_CFA_GNU_args_size 0x2e
#define DW_CFA_GNU_negative_offset_extended 0x2f
#define DW_CFA_hi_user 0x3f
#define DW_EH_PE_FORM 0x07
#define DW_EH_PE_native 0x00
#define DW_EH_PE_leb128 0x01
#define DW_EH_PE_data2 0x02
#define DW_EH_PE_data4 0x03
#define DW_EH_PE_data8 0x04
#define DW_EH_PE_signed 0x08
#define DW_EH_PE_ADJUST 0x70
#define DW_EH_PE_abs 0x00
#define DW_EH_PE_pcrel 0x10
#define DW_EH_PE_textrel 0x20
#define DW_EH_PE_datarel 0x30
#define DW_EH_PE_funcrel 0x40
#define DW_EH_PE_aligned 0x50
#define DW_EH_PE_indirect 0x80
#define DW_EH_PE_omit 0xff
#define CIE_ID 0
typedef unsigned long uleb128_t;
typedef signed long sleb128_t;
static struct unwind_table {
struct {
unsigned long pc;
unsigned long range;
} core, init;
const void *address;
unsigned long size;
const unsigned char *header;
unsigned long hdrsz;
struct unwind_table *link;
const char *name;
} root_table;
struct unwind_item {
enum item_location {
Nowhere,
Memory,
Register,
Value
} where;
uleb128_t value;
};
struct unwind_state {
uleb128_t loc, org;
const u8 *cieStart, *cieEnd;
uleb128_t codeAlign;
sleb128_t dataAlign;
struct cfa {
uleb128_t reg, offs;
} cfa;
struct unwind_item regs[ARRAY_SIZE(reg_info)];
unsigned stackDepth:8;
unsigned version:8;
const u8 *label;
const u8 *stack[MAX_STACK_DEPTH];
};
static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
static struct unwind_table *find_table(unsigned long pc)
{
struct unwind_table *table;
for (table = &root_table; table; table = table->link)
if ((pc >= table->core.pc
&& pc < table->core.pc + table->core.range)
|| (pc >= table->init.pc
&& pc < table->init.pc + table->init.range))
break;
return table;
}
static unsigned long read_pointer(const u8 **pLoc,
const void *end, signed ptrType);
static void init_unwind_hdr(struct unwind_table *table,
void *(*alloc) (unsigned long));
/*
* wrappers for header alloc (vs. calling one vs. other at call site)
* to elide section mismatches warnings
*/
static void *__init unw_hdr_alloc_early(unsigned long sz)
{
return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS);
}
static void init_unwind_table(struct unwind_table *table, const char *name,
const void *core_start, unsigned long core_size,
const void *init_start, unsigned long init_size,
const void *table_start, unsigned long table_size,
const u8 *header_start, unsigned long header_size)
{
table->core.pc = (unsigned long)core_start;
table->core.range = core_size;
table->init.pc = (unsigned long)init_start;
table->init.range = init_size;
table->address = table_start;
table->size = table_size;
/* To avoid the pointer addition with NULL pointer.*/
if (header_start != NULL) {
const u8 *ptr = header_start + 4;
const u8 *end = header_start + header_size;
/* See if the linker provided table looks valid. */
if (header_size <= 4
|| header_start[0] != 1
|| (void *)read_pointer(&ptr, end, header_start[1])
!= table_start
|| header_start[2] == DW_EH_PE_omit
|| read_pointer(&ptr, end, header_start[2]) <= 0
|| header_start[3] == DW_EH_PE_omit)
header_start = NULL;
}
table->hdrsz = header_size;
smp_wmb();
table->header = header_start;
table->link = NULL;
table->name = name;
}
void __init arc_unwind_init(void)
{
init_unwind_table(&root_table, "kernel", _text, _end - _text, NULL, 0,
__start_unwind, __end_unwind - __start_unwind,
NULL, 0);
/*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
init_unwind_hdr(&root_table, unw_hdr_alloc_early);
}
static const u32 bad_cie, not_fde;
static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *);
static const u32 *__cie_for_fde(const u32 *fde);
static signed fde_pointer_type(const u32 *cie);
struct eh_frame_hdr_table_entry {
unsigned long start, fde;
};
static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2)
{
const struct eh_frame_hdr_table_entry *e1 = p1;
const struct eh_frame_hdr_table_entry *e2 = p2;
return (e1->start > e2->start) - (e1->start < e2->start);
}
static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
{
struct eh_frame_hdr_table_entry *e1 = p1;
struct eh_frame_hdr_table_entry *e2 = p2;
swap(e1->start, e2->start);
swap(e1->fde, e2->fde);
}
static void init_unwind_hdr(struct unwind_table *table,
void *(*alloc) (unsigned long))
{
const u8 *ptr;
unsigned long tableSize = table->size, hdrSize;
unsigned int n;
const u32 *fde;
struct {
u8 version;
u8 eh_frame_ptr_enc;
u8 fde_count_enc;
u8 table_enc;
unsigned long eh_frame_ptr;
unsigned int fde_count;
struct eh_frame_hdr_table_entry table[];
} __attribute__ ((__packed__)) *header;
if (table->header)
return;
if (table->hdrsz)
pr_warn(".eh_frame_hdr for '%s' present but unusable\n",
table->name);
if (tableSize & (sizeof(*fde) - 1))
return;
for (fde = table->address, n = 0;
tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
const u32 *cie = cie_for_fde(fde, table);
signed ptrType;
if (cie == ¬_fde)
continue;
if (cie == NULL || cie == &bad_cie)
goto ret_err;
ptrType = fde_pointer_type(cie);
if (ptrType < 0)
goto ret_err;
ptr = (const u8 *)(fde + 2);
if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
ptrType)) {
/* FIXME_Rajesh We have 4 instances of null addresses
* instead of the initial loc addr
* return;
*/
WARN(1, "unwinder: FDE->initial_location NULL %p\n",
(const u8 *)(fde + 1) + *fde);
}
++n;
}
if (tableSize || !n)
goto ret_err;
hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
+ 2 * n * sizeof(unsigned long);
header = alloc(hdrSize);
if (!header)
goto ret_err;
header->version = 1;
header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
header->table_enc = DW_EH_PE_abs | DW_EH_PE_native;
put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
BUILD_BUG_ON(offsetof(typeof(*header), fde_count)
% __alignof(typeof(header->fde_count)));
header->fde_count = n;
BUILD_BUG_ON(offsetof(typeof(*header), table)
% __alignof(typeof(*header->table)));
for (fde = table->address, tableSize = table->size, n = 0;
tableSize;
tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
const u32 *cie = __cie_for_fde(fde);
if (fde[1] == CIE_ID)
continue; /* this is a CIE */
ptr = (const u8 *)(fde + 2);
header->table[n].start = read_pointer(&ptr,
(const u8 *)(fde + 1) +
*fde,
fde_pointer_type(cie));
header->table[n].fde = (unsigned long)fde;
++n;
}
WARN_ON(n != header->fde_count);
sort(header->table,
n,
sizeof(*header->table),
cmp_eh_frame_hdr_table_entries, swap_eh_frame_hdr_table_entries);
table->hdrsz = hdrSize;
smp_wmb();
table->header = (const void *)header;
return;
ret_err:
panic("Attention !!! Dwarf FDE parsing errors\n");
}
#ifdef CONFIG_MODULES
static void *unw_hdr_alloc(unsigned long sz)
{
return kmalloc(sz, GFP_KERNEL);
}
static struct unwind_table *last_table;
/* Must be called with module_mutex held. */
void *unwind_add_table(struct module *module, const void *table_start,
unsigned long table_size)
{
struct unwind_table *table;
struct module_memory *core_text;
struct module_memory *init_text;
if (table_size <= 0)
return NULL;
table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return NULL;
core_text = &module->mem[MOD_TEXT];
init_text = &module->mem[MOD_INIT_TEXT];
init_unwind_table(table, module->name, core_text->base, core_text->size,
init_text->base, init_text->size, table_start, table_size, NULL, 0);
init_unwind_hdr(table, unw_hdr_alloc);
#ifdef UNWIND_DEBUG
unw_debug("Table added for [%s] %lx %lx\n",
module->name, table->core.pc, table->core.range);
#endif
if (last_table)
last_table->link = table;
else
root_table.link = table;
last_table = table;
return table;
}
struct unlink_table_info {
struct unwind_table *table;
int init_only;
};
static int unlink_table(void *arg)
{
struct unlink_table_info *info = arg;
struct unwind_table *table = info->table, *prev;
for (prev = &root_table; prev->link && prev->link != table;
prev = prev->link)
;
if (prev->link) {
if (info->init_only) {
table->init.pc = 0;
table->init.range = 0;
info->table = NULL;
} else {
prev->link = table->link;
if (!prev->link)
last_table = prev;
}
} else
info->table = NULL;
return 0;
}
/* Must be called with module_mutex held. */
void unwind_remove_table(void *handle, int init_only)
{
struct unwind_table *table = handle;
struct unlink_table_info info;
if (!table || table == &root_table)
return;
if (init_only && table == last_table) {
table->init.pc = 0;
table->init.range = 0;
return;
}
info.table = table;
info.init_only = init_only;
unlink_table(&info); /* XXX: SMP */
kfree(table->header);
kfree(table);
}
#endif /* CONFIG_MODULES */
static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
{
const u8 *cur = *pcur;
uleb128_t value;
unsigned int shift;
for (shift = 0, value = 0; cur < end; shift += 7) {
if (shift + 7 > 8 * sizeof(value)
&& (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
cur = end + 1;
break;
}
value |= (uleb128_t) (*cur & 0x7f) << shift;
if (!(*cur++ & 0x80))
break;
}
*pcur = cur;
return value;
}
static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
{
const u8 *cur = *pcur;
sleb128_t value;
unsigned int shift;
for (shift = 0, value = 0; cur < end; shift += 7) {
if (shift + 7 > 8 * sizeof(value)
&& (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
cur = end + 1;
break;
}
value |= (sleb128_t) (*cur & 0x7f) << shift;
if (!(*cur & 0x80)) {
value |= -(*cur++ & 0x40) << shift;
break;
}
}
*pcur = cur;
return value;
}
static const u32 *__cie_for_fde(const u32 *fde)
{
const u32 *cie;
cie = fde + 1 - fde[1] / sizeof(*fde);
return cie;
}
static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
{
const u32 *cie;
if (!*fde || (*fde & (sizeof(*fde) - 1)))
return &bad_cie;
if (fde[1] == CIE_ID)
return ¬_fde; /* this is a CIE */
if ((fde[1] & (sizeof(*fde) - 1)))
/* || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address) */
return NULL; /* this is not a valid FDE */
cie = __cie_for_fde(fde);
if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
|| (*cie & (sizeof(*cie) - 1))
|| (cie[1] != CIE_ID))
return NULL; /* this is not a (valid) CIE */
return cie;
}
static unsigned long read_pointer(const u8 **pLoc, const void *end,
signed ptrType)
{
unsigned long value = 0;
union {
const u8 *p8;
const u16 *p16u;
const s16 *p16s;
const u32 *p32u;
const s32 *p32s;
const unsigned long *pul;
} ptr;
if (ptrType < 0 || ptrType == DW_EH_PE_omit)
return 0;
ptr.p8 = *pLoc;
switch (ptrType & DW_EH_PE_FORM) {
case DW_EH_PE_data2:
if (end < (const void *)(ptr.p16u + 1))
return 0;
if (ptrType & DW_EH_PE_signed)
value = get_unaligned((u16 *) ptr.p16s++);
else
value = get_unaligned((u16 *) ptr.p16u++);
break;
case DW_EH_PE_data4:
#ifdef CONFIG_64BIT
if (end < (const void *)(ptr.p32u + 1))
return 0;
if (ptrType & DW_EH_PE_signed)
value = get_unaligned(ptr.p32s++);
else
value = get_unaligned(ptr.p32u++);
break;
case DW_EH_PE_data8:
BUILD_BUG_ON(sizeof(u64) != sizeof(value));
#else
BUILD_BUG_ON(sizeof(u32) != sizeof(value));
#endif
fallthrough;
case DW_EH_PE_native:
if (end < (const void *)(ptr.pul + 1))
return 0;
value = get_unaligned((unsigned long *)ptr.pul++);
break;
case DW_EH_PE_leb128:
BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
value = ptrType & DW_EH_PE_signed ? get_sleb128(&ptr.p8, end)
: get_uleb128(&ptr.p8, end);
if ((const void *)ptr.p8 > end)
return 0;
break;
default:
return 0;
}
switch (ptrType & DW_EH_PE_ADJUST) {
case DW_EH_PE_abs:
break;
case DW_EH_PE_pcrel:
value += (unsigned long)*pLoc;
break;
default:
return 0;
}
if ((ptrType & DW_EH_PE_indirect)
&& __get_user(value, (unsigned long __user *)value))
return 0;
*pLoc = ptr.p8;
return value;
}
static signed fde_pointer_type(const u32 *cie)
{
const u8 *ptr = (const u8 *)(cie + 2);
unsigned int version = *ptr;
if (*++ptr) {
const char *aug;
const u8 *end = (const u8 *)(cie + 1) + *cie;
uleb128_t len;
/* check if augmentation size is first (and thus present) */
if (*ptr != 'z')
return -1;
/* check if augmentation string is nul-terminated */
aug = (const void *)ptr;
ptr = memchr(aug, 0, end - ptr);
if (ptr == NULL)
return -1;
++ptr; /* skip terminator */
get_uleb128(&ptr, end); /* skip code alignment */
get_sleb128(&ptr, end); /* skip data alignment */
/* skip return address column */
version <= 1 ? (void) ++ptr : (void)get_uleb128(&ptr, end);
len = get_uleb128(&ptr, end); /* augmentation length */
if (ptr + len < ptr || ptr + len > end)
return -1;
end = ptr + len;
while (*++aug) {
if (ptr >= end)
return -1;
switch (*aug) {
case 'L':
++ptr;
break;
case 'P':{
signed ptrType = *ptr++;
if (!read_pointer(&ptr, end, ptrType)
|| ptr > end)
return -1;
}
break;
case 'R':
return *ptr;
default:
return -1;
}
}
}
return DW_EH_PE_native | DW_EH_PE_abs;
}
static int advance_loc(unsigned long delta, struct unwind_state *state)
{
state->loc += delta * state->codeAlign;
/* FIXME_Rajesh: Probably we are defining for the initial range as well;
return delta > 0;
*/
unw_debug("delta %3lu => loc 0x%lx: ", delta, state->loc);
return 1;
}
static void set_rule(uleb128_t reg, enum item_location where, uleb128_t value,
struct unwind_state *state)
{
if (reg < ARRAY_SIZE(state->regs)) {
state->regs[reg].where = where;
state->regs[reg].value = value;
#ifdef UNWIND_DEBUG
unw_debug("r%lu: ", reg);
switch (where) {
case Nowhere:
unw_debug("s ");
break;
case Memory:
unw_debug("c(%lu) ", value);
break;
case Register:
unw_debug("r(%lu) ", value);
break;
case Value:
unw_debug("v(%lu) ", value);
break;
default:
break;
}
#endif
}
}
static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
signed ptrType, struct unwind_state *state)
{
union {
const u8 *p8;
const u16 *p16;
const u32 *p32;
} ptr;
int result = 1;
u8 opcode;
if (start != state->cieStart) {
state->loc = state->org;
result =
processCFI(state->cieStart, state->cieEnd, 0, ptrType,
state);
if (targetLoc == 0 && state->label == NULL)
return result;
}
for (ptr.p8 = start; result && ptr.p8 < end;) {
switch (*ptr.p8 >> 6) {
uleb128_t value;
case 0:
opcode = *ptr.p8++;
switch (opcode) {
case DW_CFA_nop:
unw_debug("cfa nop ");
break;
case DW_CFA_set_loc:
state->loc = read_pointer(&ptr.p8, end,
ptrType);
if (state->loc == 0)
result = 0;
unw_debug("cfa_set_loc: 0x%lx ", state->loc);
break;
case DW_CFA_advance_loc1:
unw_debug("\ncfa advance loc1:");
result = ptr.p8 < end
&& advance_loc(*ptr.p8++, state);
break;
case DW_CFA_advance_loc2:
value = *ptr.p8++;
value += *ptr.p8++ << 8;
unw_debug("\ncfa advance loc2:");
result = ptr.p8 <= end + 2
/* && advance_loc(*ptr.p16++, state); */
&& advance_loc(value, state);
break;
case DW_CFA_advance_loc4:
unw_debug("\ncfa advance loc4:");
result = ptr.p8 <= end + 4
&& advance_loc(*ptr.p32++, state);
break;
case DW_CFA_offset_extended:
value = get_uleb128(&ptr.p8, end);
unw_debug("cfa_offset_extended: ");
set_rule(value, Memory,
get_uleb128(&ptr.p8, end), state);
break;
case DW_CFA_val_offset:
value = get_uleb128(&ptr.p8, end);
set_rule(value, Value,
get_uleb128(&ptr.p8, end), state);
break;
case DW_CFA_offset_extended_sf:
value = get_uleb128(&ptr.p8, end);
set_rule(value, Memory,
get_sleb128(&ptr.p8, end), state);
break;
case DW_CFA_val_offset_sf:
value = get_uleb128(&ptr.p8, end);
set_rule(value, Value,
get_sleb128(&ptr.p8, end), state);
break;
case DW_CFA_restore_extended:
unw_debug("cfa_restore_extended: ");
case DW_CFA_undefined:
unw_debug("cfa_undefined: ");
case DW_CFA_same_value:
unw_debug("cfa_same_value: ");
set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0,
state);
break;
case DW_CFA_register:
unw_debug("cfa_register: ");
value = get_uleb128(&ptr.p8, end);
set_rule(value,
Register,
get_uleb128(&ptr.p8, end), state);
break;
case DW_CFA_remember_state:
unw_debug("cfa_remember_state: ");
if (ptr.p8 == state->label) {
state->label = NULL;
return 1;
}
if (state->stackDepth >= MAX_STACK_DEPTH)
return 0;
state->stack[state->stackDepth++] = ptr.p8;
break;
case DW_CFA_restore_state:
unw_debug("cfa_restore_state: ");
if (state->stackDepth) {
const uleb128_t loc = state->loc;
const u8 *label = state->label;
state->label =
state->stack[state->stackDepth - 1];
memcpy(&state->cfa, &badCFA,
sizeof(state->cfa));
memset(state->regs, 0,
sizeof(state->regs));
state->stackDepth = 0;
result =
processCFI(start, end, 0, ptrType,
state);
state->loc = loc;
state->label = label;
} else
return 0;
break;
case DW_CFA_def_cfa:
state->cfa.reg = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
fallthrough;
case DW_CFA_def_cfa_offset:
state->cfa.offs = get_uleb128(&ptr.p8, end);
unw_debug("cfa_def_cfa_offset: 0x%lx ",
state->cfa.offs);
break;
case DW_CFA_def_cfa_sf:
state->cfa.reg = get_uleb128(&ptr.p8, end);
fallthrough;
case DW_CFA_def_cfa_offset_sf:
state->cfa.offs = get_sleb128(&ptr.p8, end)
* state->dataAlign;
break;
case DW_CFA_def_cfa_register:
unw_debug("cfa_def_cfa_register: ");
state->cfa.reg = get_uleb128(&ptr.p8, end);
break;
/*todo case DW_CFA_def_cfa_expression: */
/*todo case DW_CFA_expression: */
/*todo case DW_CFA_val_expression: */
case DW_CFA_GNU_args_size:
get_uleb128(&ptr.p8, end);
break;
case DW_CFA_GNU_negative_offset_extended:
value = get_uleb128(&ptr.p8, end);
set_rule(value,
Memory,
(uleb128_t) 0 - get_uleb128(&ptr.p8,
end),
state);
break;
case DW_CFA_GNU_window_save:
default:
unw_debug("UNKNOWN OPCODE 0x%x\n", opcode);
result = 0;
break;
}
break;
case 1:
unw_debug("\ncfa_adv_loc: ");
result = advance_loc(*ptr.p8++ & 0x3f, state);
break;
case 2:
unw_debug("cfa_offset: ");
value = *ptr.p8++ & 0x3f;
set_rule(value, Memory, get_uleb128(&ptr.p8, end),
state);
break;
case 3:
unw_debug("cfa_restore: ");
set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
break;
}
if (ptr.p8 > end)
result = 0;
if (result && targetLoc != 0 && targetLoc < state->loc)
return 1;
}
return result && ptr.p8 == end && (targetLoc == 0 || (
/*todo While in theory this should apply, gcc in practice omits
everything past the function prolog, and hence the location
never reaches the end of the function.
targetLoc < state->loc && */ state->label == NULL));
}
/* Unwind to previous to frame. Returns 0 if successful, negative
* number in case of an error. */
int arc_unwind(struct unwind_frame_info *frame)
{
#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
const u32 *fde = NULL, *cie = NULL;
const u8 *ptr = NULL, *end = NULL;
unsigned long pc = UNW_PC(frame) - frame->call_frame;
unsigned long startLoc = 0, endLoc = 0, cfa;
unsigned int i;
signed ptrType = -1;
uleb128_t retAddrReg = 0;
const struct unwind_table *table;
struct unwind_state state;
unsigned long *fptr;
unsigned long addr;
unw_debug("\n\nUNWIND FRAME:\n");
unw_debug("PC: 0x%lx BLINK: 0x%lx, SP: 0x%lx, FP: 0x%x\n",
UNW_PC(frame), UNW_BLINK(frame), UNW_SP(frame),
UNW_FP(frame));
if (UNW_PC(frame) == 0)
return -EINVAL;
#ifdef UNWIND_DEBUG
{
unsigned long *sptr = (unsigned long *)UNW_SP(frame);
unw_debug("\nStack Dump:\n");
for (i = 0; i < 20; i++, sptr++)
unw_debug("0x%p: 0x%lx\n", sptr, *sptr);
unw_debug("\n");
}
#endif
table = find_table(pc);
if (table != NULL
&& !(table->size & (sizeof(*fde) - 1))) {
const u8 *hdr = table->header;
unsigned long tableSize;
smp_rmb();
if (hdr && hdr[0] == 1) {
switch (hdr[3] & DW_EH_PE_FORM) {
case DW_EH_PE_native:
tableSize = sizeof(unsigned long);
break;
case DW_EH_PE_data2:
tableSize = 2;
break;
case DW_EH_PE_data4:
tableSize = 4;
break;
case DW_EH_PE_data8:
tableSize = 8;
break;
default:
tableSize = 0;
break;
}
ptr = hdr + 4;
end = hdr + table->hdrsz;
if (tableSize && read_pointer(&ptr, end, hdr[1])
== (unsigned long)table->address
&& (i = read_pointer(&ptr, end, hdr[2])) > 0
&& i == (end - ptr) / (2 * tableSize)
&& !((end - ptr) % (2 * tableSize))) {
do {
const u8 *cur =
ptr + (i / 2) * (2 * tableSize);
startLoc = read_pointer(&cur,
cur + tableSize,
hdr[3]);
if (pc < startLoc)
i /= 2;
else {
ptr = cur - tableSize;
i = (i + 1) / 2;
}
} while (startLoc && i > 1);
if (i == 1
&& (startLoc = read_pointer(&ptr,
ptr + tableSize,
hdr[3])) != 0
&& pc >= startLoc)
fde = (void *)read_pointer(&ptr,
ptr +
tableSize,
hdr[3]);
}
}
if (fde != NULL) {
cie = cie_for_fde(fde, table);
ptr = (const u8 *)(fde + 2);
if (cie != NULL
&& cie != &bad_cie
&& cie != ¬_fde
&& (ptrType = fde_pointer_type(cie)) >= 0
&& read_pointer(&ptr,
(const u8 *)(fde + 1) + *fde,
ptrType) == startLoc) {
if (!(ptrType & DW_EH_PE_indirect))
ptrType &=
DW_EH_PE_FORM | DW_EH_PE_signed;
endLoc =
startLoc + read_pointer(&ptr,
(const u8 *)(fde +
1) +
*fde, ptrType);
if (pc >= endLoc) {
fde = NULL;
cie = NULL;
}
} else {
fde = NULL;
cie = NULL;
}
}
}
if (cie != NULL) {
memset(&state, 0, sizeof(state));
state.cieEnd = ptr; /* keep here temporarily */
ptr = (const u8 *)(cie + 2);
end = (const u8 *)(cie + 1) + *cie;
frame->call_frame = 1;
if (*++ptr) {
/* check if augmentation size is first (thus present) */
if (*ptr == 'z') {
while (++ptr < end && *ptr) {
switch (*ptr) {
/* chk for ignorable or already handled
* nul-terminated augmentation string */
case 'L':
case 'P':
case 'R':
continue;
case 'S':
frame->call_frame = 0;
continue;
default:
break;
}
break;
}
}
if (ptr >= end || *ptr)
cie = NULL;
}
++ptr;
}
if (cie != NULL) {
/* get code alignment factor */
state.codeAlign = get_uleb128(&ptr, end);
/* get data alignment factor */
state.dataAlign = get_sleb128(&ptr, end);
if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
cie = NULL;
else {
retAddrReg =
state.version <= 1 ? *ptr++ : get_uleb128(&ptr,
end);
unw_debug("CIE Frame Info:\n");
unw_debug("return Address register 0x%lx\n",
retAddrReg);
unw_debug("data Align: %ld\n", state.dataAlign);
unw_debug("code Align: %lu\n", state.codeAlign);
/* skip augmentation */
if (((const char *)(cie + 2))[1] == 'z') {
uleb128_t augSize = get_uleb128(&ptr, end);
ptr += augSize;
}
if (ptr > end || retAddrReg >= ARRAY_SIZE(reg_info)
|| REG_INVALID(retAddrReg)
|| reg_info[retAddrReg].width !=
sizeof(unsigned long))
cie = NULL;
}
}
if (cie != NULL) {
state.cieStart = ptr;
ptr = state.cieEnd;
state.cieEnd = end;
end = (const u8 *)(fde + 1) + *fde;
/* skip augmentation */
if (((const char *)(cie + 2))[1] == 'z') {
uleb128_t augSize = get_uleb128(&ptr, end);
if ((ptr += augSize) > end)
fde = NULL;
}
}
if (cie == NULL || fde == NULL) {
#ifdef CONFIG_FRAME_POINTER
unsigned long top, bottom;
top = STACK_TOP_UNW(frame->task);
bottom = STACK_BOTTOM_UNW(frame->task);
#if FRAME_RETADDR_OFFSET < 0
if (UNW_SP(frame) < top && UNW_FP(frame) <= UNW_SP(frame)
&& bottom < UNW_FP(frame)
#else
if (UNW_SP(frame) > top && UNW_FP(frame) >= UNW_SP(frame)
&& bottom > UNW_FP(frame)
#endif
&& !((UNW_SP(frame) | UNW_FP(frame))
& (sizeof(unsigned long) - 1))) {
unsigned long link;
if (!__get_user(link, (unsigned long *)
(UNW_FP(frame) + FRAME_LINK_OFFSET))
#if FRAME_RETADDR_OFFSET < 0
&& link > bottom && link < UNW_FP(frame)
#else
&& link > UNW_FP(frame) && link < bottom
#endif
&& !(link & (sizeof(link) - 1))
&& !__get_user(UNW_PC(frame),
(unsigned long *)(UNW_FP(frame)
+ FRAME_RETADDR_OFFSET)))
{
UNW_SP(frame) =
UNW_FP(frame) + FRAME_RETADDR_OFFSET
#if FRAME_RETADDR_OFFSET < 0
-
#else
+
#endif
sizeof(UNW_PC(frame));
UNW_FP(frame) = link;
return 0;
}
}
#endif
return -ENXIO;
}
state.org = startLoc;
memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
unw_debug("\nProcess instructions\n");
/* process instructions
* For ARC, we optimize by having blink(retAddrReg) with
* the sameValue in the leaf function, so we should not check
* state.regs[retAddrReg].where == Nowhere
*/
if (!processCFI(ptr, end, pc, ptrType, &state)
|| state.loc > endLoc
/* || state.regs[retAddrReg].where == Nowhere */
|| state.cfa.reg >= ARRAY_SIZE(reg_info)
|| reg_info[state.cfa.reg].width != sizeof(unsigned long)
|| state.cfa.offs % sizeof(unsigned long))
return -EIO;
#ifdef UNWIND_DEBUG
unw_debug("\n");
unw_debug("\nRegister State Based on the rules parsed from FDE:\n");
for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
if (REG_INVALID(i))
continue;
switch (state.regs[i].where) {
case Nowhere:
break;
case Memory:
unw_debug(" r%d: c(%lu),", i, state.regs[i].value);
break;
case Register:
unw_debug(" r%d: r(%lu),", i, state.regs[i].value);
break;
case Value:
unw_debug(" r%d: v(%lu),", i, state.regs[i].value);
break;
}
}
unw_debug("\n");
#endif
/* update frame */
if (frame->call_frame
&& !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
frame->call_frame = 0;
cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
startLoc = min_t(unsigned long, UNW_SP(frame), cfa);
endLoc = max_t(unsigned long, UNW_SP(frame), cfa);
if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
startLoc = min(STACK_LIMIT(cfa), cfa);
endLoc = max(STACK_LIMIT(cfa), cfa);
}
unw_debug("\nCFA reg: 0x%lx, offset: 0x%lx => 0x%lx\n",
state.cfa.reg, state.cfa.offs, cfa);
for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
if (REG_INVALID(i)) {
if (state.regs[i].where == Nowhere)
continue;
return -EIO;
}
switch (state.regs[i].where) {
default:
break;
case Register:
if (state.regs[i].value >= ARRAY_SIZE(reg_info)
|| REG_INVALID(state.regs[i].value)
|| reg_info[i].width >
reg_info[state.regs[i].value].width)
return -EIO;
switch (reg_info[state.regs[i].value].width) {
case sizeof(u8):
state.regs[i].value =
FRAME_REG(state.regs[i].value, const u8);
break;
case sizeof(u16):
state.regs[i].value =
FRAME_REG(state.regs[i].value, const u16);
break;
case sizeof(u32):
state.regs[i].value =
FRAME_REG(state.regs[i].value, const u32);
break;
#ifdef CONFIG_64BIT
case sizeof(u64):
state.regs[i].value =
FRAME_REG(state.regs[i].value, const u64);
break;
#endif
default:
return -EIO;
}
break;
}
}
unw_debug("\nRegister state after evaluation with realtime Stack:\n");
fptr = (unsigned long *)(&frame->regs);
for (i = 0; i < ARRAY_SIZE(state.regs); ++i, fptr++) {
if (REG_INVALID(i))
continue;
switch (state.regs[i].where) {
case Nowhere:
if (reg_info[i].width != sizeof(UNW_SP(frame))
|| &FRAME_REG(i, __typeof__(UNW_SP(frame)))
!= &UNW_SP(frame))
continue;
UNW_SP(frame) = cfa;
break;
case Register:
switch (reg_info[i].width) {
case sizeof(u8):
FRAME_REG(i, u8) = state.regs[i].value;
break;
case sizeof(u16):
FRAME_REG(i, u16) = state.regs[i].value;
break;
case sizeof(u32):
FRAME_REG(i, u32) = state.regs[i].value;
break;
#ifdef CONFIG_64BIT
case sizeof(u64):
FRAME_REG(i, u64) = state.regs[i].value;
break;
#endif
default:
return -EIO;
}
break;
case Value:
if (reg_info[i].width != sizeof(unsigned long))
return -EIO;
FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
* state.dataAlign;
break;
case Memory:
addr = cfa + state.regs[i].value * state.dataAlign;
if ((state.regs[i].value * state.dataAlign)
% sizeof(unsigned long)
|| addr < startLoc
|| addr + sizeof(unsigned long) < addr
|| addr + sizeof(unsigned long) > endLoc)
return -EIO;
switch (reg_info[i].width) {
case sizeof(u8):
__get_user(FRAME_REG(i, u8),
(u8 __user *)addr);
break;
case sizeof(u16):
__get_user(FRAME_REG(i, u16),
(u16 __user *)addr);
break;
case sizeof(u32):
__get_user(FRAME_REG(i, u32),
(u32 __user *)addr);
break;
#ifdef CONFIG_64BIT
case sizeof(u64):
__get_user(FRAME_REG(i, u64),
(u64 __user *)addr);
break;
#endif
default:
return -EIO;
}
break;
}
unw_debug("r%d: 0x%lx ", i, *fptr);
}
return 0;
#undef FRAME_REG
}
EXPORT_SYMBOL(arc_unwind);
| linux-master | arch/arc/kernel/unwind.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/root_dev.h>
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <linux/cpu.h>
#include <linux/of_clk.h>
#include <linux/of_fdt.h>
#include <linux/of.h>
#include <linux/cache.h>
#include <uapi/linux/mount.h>
#include <asm/sections.h>
#include <asm/arcregs.h>
#include <asm/asserts.h>
#include <asm/tlb.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/irq.h>
#include <asm/unwind.h>
#include <asm/mach_desc.h>
#include <asm/smp.h>
#include <asm/dsp-impl.h>
#include <soc/arc/mcip.h>
#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
unsigned int intr_to_DE_cnt;
/* Part of U-boot ABI: see head.S */
int __initdata uboot_tag;
int __initdata uboot_magic;
char __initdata *uboot_arg;
const struct machine_desc *machine_desc;
struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc {
int arcver;
unsigned int t0:1, t1:1;
struct {
unsigned long base;
unsigned int sz;
} iccm, dccm;
};
#ifdef CONFIG_ISA_ARCV2
static const struct id_to_str arc_hs_rel[] = {
/* ID.ARCVER, Release */
{ 0x51, "R2.0" },
{ 0x52, "R2.1" },
{ 0x53, "R3.0" },
};
static const struct id_to_str arc_hs_ver54_rel[] = {
/* UARCH.MAJOR, Release */
{ 0, "R3.10a"},
{ 1, "R3.50a"},
{ 2, "R3.60a"},
{ 3, "R4.00a"},
{ 0xFF, NULL }
};
#endif
static int
arcompact_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
{
int n = 0;
#ifdef CONFIG_ISA_ARCOMPACT
char *cpu_nm, *isa_nm = "ARCompact";
struct bcr_fp_arcompact fpu_sp, fpu_dp;
int atomic = 0, be, present;
int bpu_full, bpu_cache, bpu_pred;
struct bcr_bpu_arcompact bpu;
struct bcr_iccm_arcompact iccm;
struct bcr_dccm_arcompact dccm;
struct bcr_generic isa;
READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
if (!isa.ver) /* ISA BCR absent, use Kconfig info */
atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
else {
/* ARC700_BUILD only has 2 bits of isa info */
atomic = isa.info & 1;
}
be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
if (info->arcver < 0x34)
cpu_nm = "ARC750";
else
cpu_nm = "ARC770";
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s (%s ISA) %s%s%s\n",
c, cpu_nm, isa_nm,
IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL1(be, "[Big-Endian]"));
READ_BCR(ARC_REG_FP_BCR, fpu_sp);
READ_BCR(ARC_REG_DPFP_BCR, fpu_dp);
if (fpu_sp.ver | fpu_dp.ver)
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
IS_AVAIL1(fpu_sp.ver, "SP "),
IS_AVAIL1(fpu_dp.ver, "DP "));
READ_BCR(ARC_REG_BPU_BCR, bpu);
bpu_full = bpu.fam ? 1 : 0;
bpu_cache = 256 << (bpu.ent - 1);
bpu_pred = 256 << (bpu.ent - 1);
n += scnprintf(buf + n, len - n,
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n",
IS_AVAIL1(bpu_full, "full"),
IS_AVAIL1(!bpu_full, "partial"),
bpu_cache, bpu_pred);
READ_BCR(ARC_REG_ICCM_BUILD, iccm);
if (iccm.ver) {
info->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
info->iccm.base = iccm.base << 16;
}
READ_BCR(ARC_REG_DCCM_BUILD, dccm);
if (dccm.ver) {
unsigned long base;
info->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
info->dccm.base = base & ~0xF;
}
/* ARCompact ISA specific sanity checks */
present = fpu_dp.ver; /* SP has no arch visible regs */
CHK_OPT_STRICT(CONFIG_ARC_FPU_SAVE_RESTORE, present);
#endif
return n;
}
static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
{
int n = 0;
#ifdef CONFIG_ISA_ARCV2
const char *release, *cpu_nm, *isa_nm = "ARCv2";
int dual_issue = 0, dual_enb = 0, mpy_opt, present;
int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk;
char mpy_nm[16], lpb_nm[32];
struct bcr_isa_arcv2 isa;
struct bcr_mpy mpy;
struct bcr_fp_arcv2 fpu;
struct bcr_bpu_arcv2 bpu;
struct bcr_lpb lpb;
struct bcr_iccm_arcv2 iccm;
struct bcr_dccm_arcv2 dccm;
struct bcr_erp erp;
/*
* Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
* ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
* releases only update it.
*/
cpu_nm = "HS38";
if (info->arcver > 0x50 && info->arcver <= 0x53) {
release = arc_hs_rel[info->arcver - 0x51].str;
} else {
const struct id_to_str *tbl;
struct bcr_uarch_build uarch;
READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
if (uarch.maj == tbl->id) {
release = tbl->str;
break;
}
}
if (uarch.prod == 4) {
unsigned int exec_ctrl;
cpu_nm = "HS48";
dual_issue = 1;
/* if dual issue hardware, is it enabled ? */
READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
dual_enb = !(exec_ctrl & 1);
}
}
READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
c, cpu_nm, release, isa_nm,
IS_AVAIL1(isa.be, "[Big-Endian]"),
IS_AVAIL3(dual_issue, dual_enb, " Dual-Issue "));
READ_BCR(ARC_REG_MPY_BCR, mpy);
mpy_opt = 2; /* stock MPY/MPYH */
if (mpy.dsp) /* OPT 7-9 */
mpy_opt = mpy.dsp + 6;
scnprintf(mpy_nm, 16, "mpy[opt %d] ", mpy_opt);
READ_BCR(ARC_REG_FP_V2_BCR, fpu);
n += scnprintf(buf + n, len - n, "ISA Extn\t: %s%s%s%s%s%s%s%s%s%s%s\n",
IS_AVAIL2(isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
IS_AVAIL2(isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
IS_AVAIL1(mpy.ver, mpy_nm),
IS_AVAIL1(isa.div_rem, "div_rem "),
IS_AVAIL1((fpu.sp | fpu.dp), " FPU:"),
IS_AVAIL1(fpu.sp, " sp"),
IS_AVAIL1(fpu.dp, " dp"));
READ_BCR(ARC_REG_BPU_BCR, bpu);
bpu_full = bpu.ft;
bpu_cache = 256 << bpu.bce;
bpu_pred = 2048 << bpu.pte;
bpu_ret_stk = 4 << bpu.rse;
READ_BCR(ARC_REG_LPB_BUILD, lpb);
if (lpb.ver) {
unsigned int ctl;
ctl = read_aux_reg(ARC_REG_LPB_CTRL);
scnprintf(lpb_nm, sizeof(lpb_nm), " Loop Buffer:%d %s",
lpb.entries, IS_DISABLED_RUN(!ctl));
}
n += scnprintf(buf + n, len - n,
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d%s\n",
IS_AVAIL1(bpu_full, "full"),
IS_AVAIL1(!bpu_full, "partial"),
bpu_cache, bpu_pred, bpu_ret_stk,
lpb_nm);
READ_BCR(ARC_REG_ICCM_BUILD, iccm);
if (iccm.ver) {
unsigned long base;
info->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
if (iccm.sz00 == 0xF && iccm.sz01 > 0)
info->iccm.sz <<= iccm.sz01;
base = read_aux_reg(ARC_REG_AUX_ICCM);
info->iccm.base = base & 0xF0000000;
}
READ_BCR(ARC_REG_DCCM_BUILD, dccm);
if (dccm.ver) {
unsigned long base;
info->dccm.sz = 256 << dccm.sz0;
if (dccm.sz0 == 0xF && dccm.sz1 > 0)
info->dccm.sz <<= dccm.sz1;
base = read_aux_reg(ARC_REG_AUX_DCCM);
info->dccm.base = base & 0xF0000000;
}
/* Error Protection: ECC/Parity */
READ_BCR(ARC_REG_ERP_BUILD, erp);
if (erp.ver) {
struct ctl_erp ctl;
READ_BCR(ARC_REG_ERP_CTRL, ctl);
/* inverted bits: 0 means enabled */
n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
IS_AVAIL3(erp.ic, !ctl.dpi, "IC "),
IS_AVAIL3(erp.dc, !ctl.dpd, "DC "),
IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
}
/* ARCv2 ISA specific sanity checks */
present = fpu.sp | fpu.dp | mpy.dsp; /* DSP and/or FPU */
CHK_OPT_STRICT(CONFIG_ARC_HAS_ACCL_REGS, present);
dsp_config_check();
#endif
return n;
}
static char *arc_cpu_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
{
struct bcr_identity ident;
struct bcr_timer timer;
struct bcr_generic bcr;
struct mcip_bcr mp;
struct bcr_actionpoint ap;
unsigned long vec_base;
int ap_num, ap_full, smart, rtt, n;
memset(info, 0, sizeof(struct cpuinfo_arc));
READ_BCR(AUX_IDENTITY, ident);
info->arcver = ident.family;
n = scnprintf(buf, len,
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
ident.family, ident.cpu_id, ident.chip_id);
if (is_isa_arcompact()) {
n += arcompact_mumbojumbo(c, info, buf + n, len - n);
} else if (is_isa_arcv2()){
n += arcv2_mumbojumbo(c, info, buf + n, len - n);
}
n += arc_mmu_mumbojumbo(c, buf + n, len - n);
n += arc_cache_mumbojumbo(c, buf + n, len - n);
READ_BCR(ARC_REG_TIMERS_BCR, timer);
info->t0 = timer.t0;
info->t1 = timer.t1;
READ_BCR(ARC_REG_MCIP_BCR, mp);
vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
n += scnprintf(buf + n, len - n,
"Timers\t\t: %s%s%s%s%s%s\nVector Table\t: %#lx\n",
IS_AVAIL1(timer.t0, "Timer0 "),
IS_AVAIL1(timer.t1, "Timer1 "),
IS_AVAIL2(timer.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
IS_AVAIL2(mp.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
vec_base);
READ_BCR(ARC_REG_AP_BCR, ap);
if (ap.ver) {
ap_num = 2 << ap.num;
ap_full = !ap.min;
}
READ_BCR(ARC_REG_SMART_BCR, bcr);
smart = bcr.ver ? 1 : 0;
READ_BCR(ARC_REG_RTT_BCR, bcr);
rtt = bcr.ver ? 1 : 0;
if (ap.ver | smart | rtt) {
n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
IS_AVAIL1(smart, "smaRT "),
IS_AVAIL1(rtt, "RTT "));
if (ap.ver) {
n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
ap_num,
ap_full ? "full":"min");
}
n += scnprintf(buf + n, len - n, "\n");
}
if (info->dccm.sz || info->iccm.sz)
n += scnprintf(buf + n, len - n,
"Extn [CCM]\t: DCCM @ %lx, %d KB / ICCM: @ %lx, %d KB\n",
info->dccm.base, TO_KB(info->dccm.sz),
info->iccm.base, TO_KB(info->iccm.sz));
return buf;
}
void chk_opt_strict(char *opt_name, bool hw_exists, bool opt_ena)
{
if (hw_exists && !opt_ena)
pr_warn(" ! Enable %s for working apps\n", opt_name);
else if (!hw_exists && opt_ena)
panic("Disable %s, hardware NOT present\n", opt_name);
}
void chk_opt_weak(char *opt_name, bool hw_exists, bool opt_ena)
{
if (!hw_exists && opt_ena)
panic("Disable %s, hardware NOT present\n", opt_name);
}
/*
* ISA agnostic sanity checks
*/
static void arc_chk_core_config(struct cpuinfo_arc *info)
{
if (!info->t0)
panic("Timer0 is not present!\n");
if (!info->t1)
panic("Timer1 is not present!\n");
#ifdef CONFIG_ARC_HAS_DCCM
/*
* DCCM can be arbit placed in hardware.
* Make sure it's placement/sz matches what Linux is built with
*/
if ((unsigned int)__arc_dccm_base != info->dccm.base)
panic("Linux built with incorrect DCCM Base address\n");
if (CONFIG_ARC_DCCM_SZ * SZ_1K != info->dccm.sz)
panic("Linux built with incorrect DCCM Size\n");
#endif
#ifdef CONFIG_ARC_HAS_ICCM
if (CONFIG_ARC_ICCM_SZ * SZ_1K != info->iccm.sz)
panic("Linux built with incorrect ICCM Size\n");
#endif
}
/*
* Initialize and setup the processor core
* This is called by all the CPUs thus should not do special case stuff
* such as only for boot CPU etc
*/
void setup_processor(void)
{
struct cpuinfo_arc info;
int c = smp_processor_id();
char str[512];
pr_info("%s", arc_cpu_mumbojumbo(c, &info, str, sizeof(str)));
pr_info("%s", arc_platform_smp_cpuinfo());
arc_chk_core_config(&info);
arc_init_IRQ();
arc_mmu_init();
arc_cache_init();
}
static inline bool uboot_arg_invalid(unsigned long addr)
{
/*
* Check that it is a untranslated address (although MMU is not enabled
* yet, it being a high address ensures this is not by fluke)
*/
if (addr < PAGE_OFFSET)
return true;
/* Check that address doesn't clobber resident kernel image */
return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
}
#define IGNORE_ARGS "Ignore U-boot args: "
/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
#define UBOOT_TAG_NONE 0
#define UBOOT_TAG_CMDLINE 1
#define UBOOT_TAG_DTB 2
/* We always pass 0 as magic from U-boot */
#define UBOOT_MAGIC_VALUE 0
void __init handle_uboot_args(void)
{
bool use_embedded_dtb = true;
bool append_cmdline = false;
/* check that we know this tag */
if (uboot_tag != UBOOT_TAG_NONE &&
uboot_tag != UBOOT_TAG_CMDLINE &&
uboot_tag != UBOOT_TAG_DTB) {
pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
goto ignore_uboot_args;
}
if (uboot_magic != UBOOT_MAGIC_VALUE) {
pr_warn(IGNORE_ARGS "non zero uboot magic\n");
goto ignore_uboot_args;
}
if (uboot_tag != UBOOT_TAG_NONE &&
uboot_arg_invalid((unsigned long)uboot_arg)) {
pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
goto ignore_uboot_args;
}
/* see if U-boot passed an external Device Tree blob */
if (uboot_tag == UBOOT_TAG_DTB) {
machine_desc = setup_machine_fdt((void *)uboot_arg);
/* external Device Tree blob is invalid - use embedded one */
use_embedded_dtb = !machine_desc;
}
if (uboot_tag == UBOOT_TAG_CMDLINE)
append_cmdline = true;
ignore_uboot_args:
if (use_embedded_dtb) {
machine_desc = setup_machine_fdt(__dtb_start);
if (!machine_desc)
panic("Embedded DT invalid\n");
}
/*
* NOTE: @boot_command_line is populated by setup_machine_fdt() so this
* append processing can only happen after.
*/
if (append_cmdline) {
/* Ensure a whitespace between the 2 cmdlines */
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
}
}
void __init setup_arch(char **cmdline_p)
{
handle_uboot_args();
/* Save unparsed command line copy for /proc/cmdline */
*cmdline_p = boot_command_line;
/* To force early parsing of things like mem=xxx */
parse_early_param();
/* Platform/board specific: e.g. early console registration */
if (machine_desc->init_early)
machine_desc->init_early();
smp_init_cpus();
setup_processor();
setup_arch_memory();
/* copy flat DT out of .init and then unflatten it */
unflatten_and_copy_device_tree();
/* Can be issue if someone passes cmd line arg "ro"
* But that is unlikely so keeping it as it is
*/
root_mountflags &= ~MS_RDONLY;
arc_unwind_init();
}
/*
* Called from start_kernel() - boot CPU only
*/
void __init time_init(void)
{
of_clk_init(NULL);
timer_probe();
}
static int __init customize_machine(void)
{
if (machine_desc->init_machine)
machine_desc->init_machine();
return 0;
}
arch_initcall(customize_machine);
static int __init init_late_machine(void)
{
if (machine_desc->init_late)
machine_desc->init_late();
return 0;
}
late_initcall(init_late_machine);
/*
* Get CPU information for use by the procfs.
*/
#define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c)))
#define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p))
static int show_cpuinfo(struct seq_file *m, void *v)
{
char *str;
int cpu_id = ptr_to_cpu(v);
struct device *cpu_dev = get_cpu_device(cpu_id);
struct cpuinfo_arc info;
struct clk *cpu_clk;
unsigned long freq = 0;
if (!cpu_online(cpu_id)) {
seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
goto done;
}
str = (char *)__get_free_page(GFP_KERNEL);
if (!str)
goto done;
seq_printf(m, arc_cpu_mumbojumbo(cpu_id, &info, str, PAGE_SIZE));
cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
cpu_id);
} else {
freq = clk_get_rate(cpu_clk);
}
if (freq)
seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
freq / 1000000, (freq / 10000) % 100);
seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100);
seq_printf(m, arc_platform_smp_cpuinfo());
free_page((unsigned long)str);
done:
seq_printf(m, "\n");
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
/*
* Callback returns cpu-id to iterator for show routine, NULL to stop.
* However since NULL is also a valid cpu-id (0), we use a round-about
* way to pass it w/o having to kmalloc/free a 2 byte string.
* Encode cpu-id as 0xFFcccc, which is decoded by show routine.
*/
return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo
};
static DEFINE_PER_CPU(struct cpu, cpu_topology);
static int __init topology_init(void)
{
int cpu;
for_each_present_cpu(cpu)
register_cpu(&per_cpu(cpu_topology, cpu), cpu);
return 0;
}
subsys_initcall(topology_init);
| linux-master | arch/arc/kernel/setup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
*
* vineetg : May 2011
* -Adapted (from .26 to .35)
* -original contribution by [email protected]
*/
#include <linux/types.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <asm/disasm.h>
#ifdef CONFIG_CPU_BIG_ENDIAN
#define BE 1
#define FIRST_BYTE_16 "swap %1, %1\n swape %1, %1\n"
#define FIRST_BYTE_32 "swape %1, %1\n"
#else
#define BE 0
#define FIRST_BYTE_16
#define FIRST_BYTE_32
#endif
#define __get8_unaligned_check(val, addr, err) \
__asm__( \
"1: ldb.ab %1, [%2, 1]\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 4\n" \
"3: mov %0, 1\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b, 3b\n" \
" .previous\n" \
: "=r" (err), "=&r" (val), "=r" (addr) \
: "0" (err), "2" (addr))
#define get16_unaligned_check(val, addr) \
do { \
unsigned int err = 0, v, a = addr; \
__get8_unaligned_check(v, a, err); \
val = v << ((BE) ? 8 : 0); \
__get8_unaligned_check(v, a, err); \
val |= v << ((BE) ? 0 : 8); \
if (err) \
goto fault; \
} while (0)
#define get32_unaligned_check(val, addr) \
do { \
unsigned int err = 0, v, a = addr; \
__get8_unaligned_check(v, a, err); \
val = v << ((BE) ? 24 : 0); \
__get8_unaligned_check(v, a, err); \
val |= v << ((BE) ? 16 : 8); \
__get8_unaligned_check(v, a, err); \
val |= v << ((BE) ? 8 : 16); \
__get8_unaligned_check(v, a, err); \
val |= v << ((BE) ? 0 : 24); \
if (err) \
goto fault; \
} while (0)
#define put16_unaligned_check(val, addr) \
do { \
unsigned int err = 0, v = val, a = addr;\
\
__asm__( \
FIRST_BYTE_16 \
"1: stb.ab %1, [%2, 1]\n" \
" lsr %1, %1, 8\n" \
"2: stb %1, [%2]\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .align 4\n" \
"4: mov %0, 1\n" \
" j 3b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b, 4b\n" \
" .long 2b, 4b\n" \
" .previous\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
\
if (err) \
goto fault; \
} while (0)
#define put32_unaligned_check(val, addr) \
do { \
unsigned int err = 0, v = val, a = addr;\
\
__asm__( \
FIRST_BYTE_32 \
"1: stb.ab %1, [%2, 1]\n" \
" lsr %1, %1, 8\n" \
"2: stb.ab %1, [%2, 1]\n" \
" lsr %1, %1, 8\n" \
"3: stb.ab %1, [%2, 1]\n" \
" lsr %1, %1, 8\n" \
"4: stb %1, [%2]\n" \
"5:\n" \
" .section .fixup,\"ax\"\n" \
" .align 4\n" \
"6: mov %0, 1\n" \
" j 5b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b, 6b\n" \
" .long 2b, 6b\n" \
" .long 3b, 6b\n" \
" .long 4b, 6b\n" \
" .previous\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
\
if (err) \
goto fault; \
} while (0)
/* sysctl hooks */
int unaligned_enabled __read_mostly = 1; /* Enabled by default */
int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
struct callee_regs *cregs)
{
int val;
/* register write back */
if ((state->aa == 1) || (state->aa == 2)) {
set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
if (state->aa == 2)
state->src2 = 0;
}
if (state->zz == 0) {
get32_unaligned_check(val, state->src1 + state->src2);
} else {
get16_unaligned_check(val, state->src1 + state->src2);
if (state->x)
val = (val << 16) >> 16;
}
if (state->pref == 0)
set_reg(state->dest, val, regs, cregs);
return;
fault: state->fault = 1;
}
static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
struct callee_regs *cregs)
{
/* register write back */
if ((state->aa == 1) || (state->aa == 2)) {
set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
if (state->aa == 3)
state->src3 = 0;
} else if (state->aa == 3) {
if (state->zz == 2) {
set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
regs, cregs);
} else if (!state->zz) {
set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
regs, cregs);
} else {
goto fault;
}
}
/* write fix-up */
if (!state->zz)
put32_unaligned_check(state->src1, state->src2 + state->src3);
else
put16_unaligned_check(state->src1, state->src2 + state->src3);
return;
fault: state->fault = 1;
}
/*
* Handle an unaligned access
* Returns 0 if successfully handled, 1 if some error happened
*/
int misaligned_fixup(unsigned long address, struct pt_regs *regs,
struct callee_regs *cregs)
{
struct disasm_state state;
char buf[TASK_COMM_LEN];
/* handle user mode only and only if enabled by sysadmin */
if (!user_mode(regs) || !unaligned_enabled)
return 1;
if (no_unaligned_warning) {
pr_warn_once("%s(%d) made unaligned access which was emulated"
" by kernel assist\n. This can degrade application"
" performance significantly\n. To enable further"
" logging of such instances, please \n"
" echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
get_task_comm(buf, current), task_pid_nr(current));
} else {
/* Add rate limiting if it gets down to it */
pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
get_task_comm(buf, current), task_pid_nr(current),
address, regs->ret);
}
disasm_instr(regs->ret, &state, 1, regs, cregs);
if (state.fault)
goto fault;
/* ldb/stb should not have unaligned exception */
if ((state.zz == 1) || (state.di))
goto fault;
if (!state.write)
fixup_load(&state, regs, cregs);
else
fixup_store(&state, regs, cregs);
if (state.fault)
goto fault;
/* clear any remnants of delay slot */
if (delay_mode(regs)) {
regs->ret = regs->bta & ~1U;
regs->status32 &= ~STATUS_DE_MASK;
} else {
regs->ret += state.instr_len;
/* handle zero-overhead-loop */
if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
regs->ret = regs->lp_start;
regs->lp_count--;
}
}
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
return 0;
fault:
pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
state.words[0], address);
return 1;
}
| linux-master | arch/arc/kernel/unaligned.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Linux performance counter support for ARC CPUs.
// This code is inspired by the perf support of various other architectures.
//
// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <asm/arcregs.h>
#include <asm/stacktrace.h>
/* HW holds 8 symbols + one for null terminator */
#define ARCPMU_EVENT_NAME_LEN 9
/*
* Some ARC pct quirks:
*
* PERF_COUNT_HW_STALLED_CYCLES_BACKEND
* PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
* The ARC 700 can either measure stalls per pipeline stage, or all stalls
* combined; for now we assign all stalls to STALLED_CYCLES_BACKEND
* and all pipeline flushes (e.g. caused by mispredicts, etc.) to
* STALLED_CYCLES_FRONTEND.
*
* We could start multiple performance counters and combine everything
* afterwards, but that makes it complicated.
*
* Note that I$ cache misses aren't counted by either of the two!
*/
/*
* ARC PCT has hardware conditions with fixed "names" but variable "indexes"
* (based on a specific RTL build)
* Below is the static map between perf generic/arc specific event_id and
* h/w condition names.
* At the time of probe, we loop thru each index and find it's name to
* complete the mapping of perf event_id to h/w index as latter is needed
* to program the counter really
*/
static const char * const arc_pmu_ev_hw_map[] = {
/* count cycles */
[PERF_COUNT_HW_CPU_CYCLES] = "crun",
[PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
[PERF_COUNT_HW_BUS_CYCLES] = "crun",
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
/* counts condition */
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
/* All jump instructions that are taken */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
#ifdef CONFIG_ISA_ARCV2
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
#else
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
[PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
#endif
[PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */
[PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */
[PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */
[PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */
[PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */
[PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */
[PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */
[PERF_COUNT_HW_CACHE_REFERENCES] = "imemrdc", /* Instr: mem read cached */
[PERF_COUNT_HW_CACHE_MISSES] = "dclm", /* D-cache Load Miss */
};
#define C(_x) PERF_COUNT_HW_CACHE_##_x
#define CACHE_OP_UNSUPPORTED 0xffff
static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
[C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = PERF_COUNT_ARC_STC,
[C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PERF_COUNT_HW_INSTRUCTIONS,
[C(RESULT_MISS)] = PERF_COUNT_ARC_ICM,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
[C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB,
},
/* DTLB LD/ST Miss not segregated by h/w*/
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
[C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
enum arc_pmu_attr_groups {
ARCPMU_ATTR_GR_EVENTS,
ARCPMU_ATTR_GR_FORMATS,
ARCPMU_NR_ATTR_GR
};
struct arc_pmu_raw_event_entry {
char name[ARCPMU_EVENT_NAME_LEN];
};
struct arc_pmu {
struct pmu pmu;
unsigned int irq;
int n_counters;
int n_events;
u64 max_period;
int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
struct arc_pmu_raw_event_entry *raw_entry;
struct attribute **attrs;
struct perf_pmu_events_attr *attr;
const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1];
};
struct arc_pmu_cpu {
/*
* A 1 bit for an index indicates that the counter is being used for
* an event. A 0 means that the counter can be used.
*/
unsigned long used_mask[BITS_TO_LONGS(ARC_PERF_MAX_COUNTERS)];
/*
* The events that are active on the PMU for the given index.
*/
struct perf_event *act_counter[ARC_PERF_MAX_COUNTERS];
};
struct arc_callchain_trace {
int depth;
void *perf_stuff;
};
static int callchain_trace(unsigned int addr, void *data)
{
struct arc_callchain_trace *ctrl = data;
struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
perf_callchain_store(entry, addr);
if (ctrl->depth++ < 3)
return 0;
return -1;
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
struct arc_callchain_trace ctrl = {
.depth = 0,
.perf_stuff = entry,
};
arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
}
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
/*
* User stack can't be unwound trivially with kernel dwarf unwinder
* So for now just record the user PC
*/
perf_callchain_store(entry, instruction_pointer(regs));
}
static struct arc_pmu *arc_pmu;
static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
/* read counter #idx; note that counter# != event# on ARC! */
static u64 arc_pmu_read_counter(int idx)
{
u32 tmp;
u64 result;
/*
* ARC supports making 'snapshots' of the counters, so we don't
* need to care about counters wrapping to 0 underneath our feet
*/
write_aux_reg(ARC_REG_PCT_INDEX, idx);
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
result |= read_aux_reg(ARC_REG_PCT_SNAPL);
return result;
}
static void arc_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
u64 prev_raw_count = local64_read(&hwc->prev_count);
u64 new_raw_count = arc_pmu_read_counter(idx);
s64 delta = new_raw_count - prev_raw_count;
/*
* We aren't afraid of hwc->prev_count changing beneath our feet
* because there's no way for us to re-enter this function anytime.
*/
local64_set(&hwc->prev_count, new_raw_count);
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
}
static void arc_pmu_read(struct perf_event *event)
{
arc_perf_event_update(event, &event->hw, event->hw.idx);
}
static int arc_pmu_cache_event(u64 config)
{
unsigned int cache_type, cache_op, cache_result;
int ret;
cache_type = (config >> 0) & 0xff;
cache_op = (config >> 8) & 0xff;
cache_result = (config >> 16) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ret = arc_pmu_cache_map[cache_type][cache_op][cache_result];
if (ret == CACHE_OP_UNSUPPORTED)
return -ENOENT;
pr_debug("init cache event: type/op/result %d/%d/%d with h/w %d \'%s\'\n",
cache_type, cache_op, cache_result, ret,
arc_pmu_ev_hw_map[ret]);
return ret;
}
/* initializes hw_perf_event structure if event is supported */
static int arc_pmu_event_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int ret;
if (!is_sampling_event(event)) {
hwc->sample_period = arc_pmu->max_period;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
hwc->config = 0;
if (is_isa_arcv2()) {
/* "exclude user" means "count only kernel" */
if (event->attr.exclude_user)
hwc->config |= ARC_REG_PCT_CONFIG_KERN;
/* "exclude kernel" means "count only user" */
if (event->attr.exclude_kernel)
hwc->config |= ARC_REG_PCT_CONFIG_USER;
}
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
if (event->attr.config >= PERF_COUNT_HW_MAX)
return -ENOENT;
if (arc_pmu->ev_hw_idx[event->attr.config] < 0)
return -ENOENT;
hwc->config |= arc_pmu->ev_hw_idx[event->attr.config];
pr_debug("init event %d with h/w %08x \'%s\'\n",
(int)event->attr.config, (int)hwc->config,
arc_pmu_ev_hw_map[event->attr.config]);
return 0;
case PERF_TYPE_HW_CACHE:
ret = arc_pmu_cache_event(event->attr.config);
if (ret < 0)
return ret;
hwc->config |= arc_pmu->ev_hw_idx[ret];
pr_debug("init cache event with h/w %08x \'%s\'\n",
(int)hwc->config, arc_pmu_ev_hw_map[ret]);
return 0;
case PERF_TYPE_RAW:
if (event->attr.config >= arc_pmu->n_events)
return -ENOENT;
hwc->config |= event->attr.config;
pr_debug("init raw event with idx %lld \'%s\'\n",
event->attr.config,
arc_pmu->raw_entry[event->attr.config].name);
return 0;
default:
return -ENOENT;
}
}
/* starts all counters */
static void arc_pmu_enable(struct pmu *pmu)
{
u32 tmp;
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
}
/* stops all counters */
static void arc_pmu_disable(struct pmu *pmu)
{
u32 tmp;
tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
}
static int arc_pmu_event_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int idx = hwc->idx;
int overflow = 0;
u64 value;
if (unlikely(left <= -period)) {
/* left underflowed by more than period. */
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
overflow = 1;
} else if (unlikely(left <= 0)) {
/* left underflowed by less than period. */
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
overflow = 1;
}
if (left > arc_pmu->max_period)
left = arc_pmu->max_period;
value = arc_pmu->max_period - left;
local64_set(&hwc->prev_count, value);
/* Select counter */
write_aux_reg(ARC_REG_PCT_INDEX, idx);
/* Write value */
write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
perf_event_update_userpage(event);
return overflow;
}
/*
* Assigns hardware counter to hardware condition.
* Note that there is no separate start/stop mechanism;
* stopping is achieved by assigning the 'never' condition
*/
static void arc_pmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (WARN_ON_ONCE(idx == -1))
return;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
arc_pmu_event_set_period(event);
/* Enable interrupt for this counter */
if (is_sampling_event(event))
write_aux_reg(ARC_REG_PCT_INT_CTRL,
read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
/* enable ARC pmu here */
write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config); /* condition */
}
static void arc_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
/* Disable interrupt for this counter */
if (is_sampling_event(event)) {
/*
* Reset interrupt flag by writing of 1. This is required
* to make sure pending interrupt was not left.
*/
write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
write_aux_reg(ARC_REG_PCT_INT_CTRL,
read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
}
if (!(event->hw.state & PERF_HES_STOPPED)) {
/* stop hw counter here */
write_aux_reg(ARC_REG_PCT_INDEX, idx);
/* condition code #0 is always "never" */
write_aux_reg(ARC_REG_PCT_CONFIG, 0);
event->hw.state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) &&
!(event->hw.state & PERF_HES_UPTODATE)) {
arc_perf_event_update(event, &event->hw, idx);
event->hw.state |= PERF_HES_UPTODATE;
}
}
static void arc_pmu_del(struct perf_event *event, int flags)
{
struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
arc_pmu_stop(event, PERF_EF_UPDATE);
__clear_bit(event->hw.idx, pmu_cpu->used_mask);
pmu_cpu->act_counter[event->hw.idx] = 0;
perf_event_update_userpage(event);
}
/* allocate hardware counter and optionally start counting */
static int arc_pmu_add(struct perf_event *event, int flags)
{
struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
struct hw_perf_event *hwc = &event->hw;
int idx;
idx = ffz(pmu_cpu->used_mask[0]);
if (idx == arc_pmu->n_counters)
return -EAGAIN;
__set_bit(idx, pmu_cpu->used_mask);
hwc->idx = idx;
write_aux_reg(ARC_REG_PCT_INDEX, idx);
pmu_cpu->act_counter[idx] = event;
if (is_sampling_event(event)) {
/* Mimic full counter overflow as other arches do */
write_aux_reg(ARC_REG_PCT_INT_CNTL,
lower_32_bits(arc_pmu->max_period));
write_aux_reg(ARC_REG_PCT_INT_CNTH,
upper_32_bits(arc_pmu->max_period));
}
write_aux_reg(ARC_REG_PCT_CONFIG, 0);
write_aux_reg(ARC_REG_PCT_COUNTL, 0);
write_aux_reg(ARC_REG_PCT_COUNTH, 0);
local64_set(&hwc->prev_count, 0);
hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (flags & PERF_EF_START)
arc_pmu_start(event, PERF_EF_RELOAD);
perf_event_update_userpage(event);
return 0;
}
#ifdef CONFIG_ISA_ARCV2
static irqreturn_t arc_pmu_intr(int irq, void *dev)
{
struct perf_sample_data data;
struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
struct pt_regs *regs;
unsigned int active_ints;
int idx;
arc_pmu_disable(&arc_pmu->pmu);
active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT);
if (!active_ints)
goto done;
regs = get_irq_regs();
do {
struct perf_event *event;
struct hw_perf_event *hwc;
idx = __ffs(active_ints);
/* Reset interrupt flag by writing of 1 */
write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
/*
* On reset of "interrupt active" bit corresponding
* "interrupt enable" bit gets automatically reset as well.
* Now we need to re-enable interrupt for the counter.
*/
write_aux_reg(ARC_REG_PCT_INT_CTRL,
read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
event = pmu_cpu->act_counter[idx];
hwc = &event->hw;
WARN_ON_ONCE(hwc->idx != idx);
arc_perf_event_update(event, &event->hw, event->hw.idx);
perf_sample_data_init(&data, 0, hwc->last_period);
if (arc_pmu_event_set_period(event)) {
if (perf_event_overflow(event, &data, regs))
arc_pmu_stop(event, 0);
}
active_ints &= ~BIT(idx);
} while (active_ints);
done:
arc_pmu_enable(&arc_pmu->pmu);
return IRQ_HANDLED;
}
#else
static irqreturn_t arc_pmu_intr(int irq, void *dev)
{
return IRQ_NONE;
}
#endif /* CONFIG_ISA_ARCV2 */
static void arc_cpu_pmu_irq_init(void *data)
{
int irq = *(int *)data;
enable_percpu_irq(irq, IRQ_TYPE_NONE);
/* Clear all pending interrupt flags */
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
}
/* Event field occupies the bottom 15 bits of our config field */
PMU_FORMAT_ATTR(event, "config:0-14");
static struct attribute *arc_pmu_format_attrs[] = {
&format_attr_event.attr,
NULL,
};
static struct attribute_group arc_pmu_format_attr_gr = {
.name = "format",
.attrs = arc_pmu_format_attrs,
};
static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct perf_pmu_events_attr *pmu_attr;
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
}
/*
* We don't add attrs here as we don't have pre-defined list of perf events.
* We will generate and add attrs dynamically in probe() after we read HW
* configuration.
*/
static struct attribute_group arc_pmu_events_attr_gr = {
.name = "events",
};
static void arc_pmu_add_raw_event_attr(int j, char *str)
{
memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
arc_pmu->attr[j].id = j;
arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
}
static int arc_pmu_raw_alloc(struct device *dev)
{
arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
if (!arc_pmu->attr)
return -ENOMEM;
arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
if (!arc_pmu->attrs)
return -ENOMEM;
arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
if (!arc_pmu->raw_entry)
return -ENOMEM;
return 0;
}
static inline bool event_in_hw_event_map(int i, char *name)
{
if (!arc_pmu_ev_hw_map[i])
return false;
if (!strlen(arc_pmu_ev_hw_map[i]))
return false;
if (strcmp(arc_pmu_ev_hw_map[i], name))
return false;
return true;
}
static void arc_pmu_map_hw_event(int j, char *str)
{
int i;
/* See if HW condition has been mapped to a perf event_id */
for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
if (event_in_hw_event_map(i, str)) {
pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
i, str, j);
arc_pmu->ev_hw_idx[i] = j;
}
}
}
static int arc_pmu_device_probe(struct platform_device *pdev)
{
struct arc_reg_pct_build pct_bcr;
struct arc_reg_cc_build cc_bcr;
int i, has_interrupts, irq = -1;
int counter_size; /* in bits */
union cc_name {
struct {
u32 word0, word1;
char sentinel;
} indiv;
char str[ARCPMU_EVENT_NAME_LEN];
} cc_name;
READ_BCR(ARC_REG_PCT_BUILD, pct_bcr);
if (!pct_bcr.v) {
pr_err("This core does not have performance counters!\n");
return -ENODEV;
}
BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
return -EINVAL;
READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
return -EINVAL;
arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
if (!arc_pmu)
return -ENOMEM;
arc_pmu->n_events = cc_bcr.c;
if (arc_pmu_raw_alloc(&pdev->dev))
return -ENOMEM;
has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
arc_pmu->n_counters = pct_bcr.c;
counter_size = 32 + (pct_bcr.s << 4);
arc_pmu->max_period = (1ULL << counter_size) / 2 - 1ULL;
pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
arc_pmu->n_counters, counter_size, cc_bcr.c,
has_interrupts ? ", [overflow IRQ support]" : "");
cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
arc_pmu->ev_hw_idx[i] = -1;
/* loop thru all available h/w condition indexes */
for (i = 0; i < cc_bcr.c; i++) {
write_aux_reg(ARC_REG_CC_INDEX, i);
cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0));
cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1));
arc_pmu_map_hw_event(i, cc_name.str);
arc_pmu_add_raw_event_attr(i, cc_name.str);
}
arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
arc_pmu->pmu = (struct pmu) {
.pmu_enable = arc_pmu_enable,
.pmu_disable = arc_pmu_disable,
.event_init = arc_pmu_event_init,
.add = arc_pmu_add,
.del = arc_pmu_del,
.start = arc_pmu_start,
.stop = arc_pmu_stop,
.read = arc_pmu_read,
.attr_groups = arc_pmu->attr_groups,
};
if (has_interrupts) {
irq = platform_get_irq(pdev, 0);
if (irq >= 0) {
int ret;
arc_pmu->irq = irq;
/* intc map function ensures irq_set_percpu_devid() called */
ret = request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
this_cpu_ptr(&arc_pmu_cpu));
if (!ret)
on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
else
irq = -1;
}
}
if (irq == -1)
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
/*
* perf parser doesn't really like '-' symbol in events name, so let's
* use '_' in arc pct name as it goes to kernel PMU event prefix.
*/
return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
}
static const struct of_device_id arc_pmu_match[] = {
{ .compatible = "snps,arc700-pct" },
{ .compatible = "snps,archs-pct" },
{},
};
MODULE_DEVICE_TABLE(of, arc_pmu_match);
static struct platform_driver arc_pmu_driver = {
.driver = {
.name = "arc-pct",
.of_match_table = of_match_ptr(arc_pmu_match),
},
.probe = arc_pmu_device_probe,
};
module_platform_driver(arc_pmu_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mischa Jonker <[email protected]>");
MODULE_DESCRIPTION("ARC PMU driver");
| linux-master | arch/arc/kernel/perf_event.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include "asm/cacheflush.h"
#define JUMPLABEL_ERR "ARC: jump_label: ERROR: "
/* Halt system on fatal error to make debug easier */
#define arc_jl_fatal(format...) \
({ \
pr_err(JUMPLABEL_ERR format); \
BUG(); \
})
static inline u32 arc_gen_nop(void)
{
/* 1x 32bit NOP in middle endian */
return 0x7000264a;
}
/*
* Atomic update of patched instruction is only available if this
* instruction doesn't cross L1 cache line boundary. You can read about
* the way we achieve this in arc/include/asm/jump_label.h
*/
static inline void instruction_align_assert(void *addr, int len)
{
unsigned long a = (unsigned long)addr;
if ((a >> L1_CACHE_SHIFT) != ((a + len - 1) >> L1_CACHE_SHIFT))
arc_jl_fatal("instruction (addr %px) cross L1 cache line border",
addr);
}
/*
* ARCv2 'Branch unconditionally' instruction:
* 00000ssssssssss1SSSSSSSSSSNRtttt
* s S[n:0] lower bits signed immediate (number is bitfield size)
* S S[m:n+1] upper bits signed immediate (number is bitfield size)
* t S[24:21] upper bits signed immediate (branch unconditionally far)
* N N <.d> delay slot mode
* R R Reserved
*/
static inline u32 arc_gen_branch(jump_label_t pc, jump_label_t target)
{
u32 instruction_l, instruction_r;
u32 pcl = pc & GENMASK(31, 2);
u32 u_offset = target - pcl;
u32 s, S, t;
/*
* Offset in 32-bit branch instruction must to fit into s25.
* Something is terribly broken if we get such huge offset within one
* function.
*/
if ((s32)u_offset < -16777216 || (s32)u_offset > 16777214)
arc_jl_fatal("gen branch with offset (%d) not fit in s25",
(s32)u_offset);
/*
* All instructions are aligned by 2 bytes so we should never get offset
* here which is not 2 bytes aligned.
*/
if (u_offset & 0x1)
arc_jl_fatal("gen branch with offset (%d) unaligned to 2 bytes",
(s32)u_offset);
s = (u_offset >> 1) & GENMASK(9, 0);
S = (u_offset >> 11) & GENMASK(9, 0);
t = (u_offset >> 21) & GENMASK(3, 0);
/* 00000ssssssssss1 */
instruction_l = (s << 1) | 0x1;
/* SSSSSSSSSSNRtttt */
instruction_r = (S << 6) | t;
return (instruction_r << 16) | (instruction_l & GENMASK(15, 0));
}
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
jump_label_t *instr_addr = (jump_label_t *)entry->code;
u32 instr;
instruction_align_assert(instr_addr, JUMP_LABEL_NOP_SIZE);
if (type == JUMP_LABEL_JMP)
instr = arc_gen_branch(entry->code, entry->target);
else
instr = arc_gen_nop();
WRITE_ONCE(*instr_addr, instr);
flush_icache_range(entry->code, entry->code + JUMP_LABEL_NOP_SIZE);
}
#ifdef CONFIG_ARC_DBG_JUMP_LABEL
#define SELFTEST_MSG "ARC: instruction generation self-test: "
struct arc_gen_branch_testdata {
jump_label_t pc;
jump_label_t target_address;
u32 expected_instr;
};
static __init int branch_gen_test(const struct arc_gen_branch_testdata *test)
{
u32 instr_got;
instr_got = arc_gen_branch(test->pc, test->target_address);
if (instr_got == test->expected_instr)
return 0;
pr_err(SELFTEST_MSG "FAIL:\n arc_gen_branch(0x%08x, 0x%08x) != 0x%08x, got 0x%08x\n",
test->pc, test->target_address,
test->expected_instr, instr_got);
return -EFAULT;
}
/*
* Offset field in branch instruction is not continuous. Test all
* available offset field and sign combinations. Test data is generated
* from real working code.
*/
static const struct arc_gen_branch_testdata arcgenbr_test_data[] __initconst = {
{0x90007548, 0x90007514, 0xffcf07cd}, /* tiny (-52) offs */
{0x9000c9c0, 0x9000c782, 0xffcf05c3}, /* tiny (-574) offs */
{0x9000cc1c, 0x9000c782, 0xffcf0367}, /* tiny (-1178) offs */
{0x9009dce0, 0x9009d106, 0xff8f0427}, /* small (-3034) offs */
{0x9000f5de, 0x90007d30, 0xfc0f0755}, /* big (-30892) offs */
{0x900a2444, 0x90035f64, 0xc9cf0321}, /* huge (-443616) offs */
{0x90007514, 0x9000752c, 0x00000019}, /* tiny (+24) offs */
{0x9001a578, 0x9001a77a, 0x00000203}, /* tiny (+514) offs */
{0x90031ed8, 0x90032634, 0x0000075d}, /* tiny (+1884) offs */
{0x9008c7f2, 0x9008d3f0, 0x00400401}, /* small (+3072) offs */
{0x9000bb38, 0x9003b340, 0x17c00009}, /* big (+194568) offs */
{0x90008f44, 0x90578d80, 0xb7c2063d} /* huge (+5701180) offs */
};
static __init int instr_gen_test(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(arcgenbr_test_data); i++)
if (branch_gen_test(&arcgenbr_test_data[i]))
return -EFAULT;
pr_info(SELFTEST_MSG "OK\n");
return 0;
}
early_initcall(instr_gen_test);
#endif /* CONFIG_ARC_DBG_JUMP_LABEL */
| linux-master | arch/arc/kernel/jump_label.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
#include <asm/irq.h>
#define NR_EXCEPTIONS 16
struct bcr_irq_arcv2 {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8;
#else
unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3;
#endif
};
/*
* Early Hardware specific Interrupt setup
* -Called very early (start_kernel -> setup_arch -> setup_processor)
* -Platform Independent (must for any ARC Core)
* -Needed for each CPU (hence not foldable into init_IRQ)
*/
void arc_init_IRQ(void)
{
unsigned int tmp, irq_prio, i;
struct bcr_irq_arcv2 irq_bcr;
struct aux_irq_ctrl {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int res3:18, save_idx_regs:1, res2:1,
save_u_to_u:1, save_lp_regs:1, save_blink:1,
res:4, save_nr_gpr_pairs:5;
#else
unsigned int save_nr_gpr_pairs:5, res:4,
save_blink:1, save_lp_regs:1, save_u_to_u:1,
res2:1, save_idx_regs:1, res3:18;
#endif
} ictrl;
*(unsigned int *)&ictrl = 0;
#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE
ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */
ictrl.save_blink = 1;
ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */
ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */
ictrl.save_idx_regs = 1; /* JLI, LDI, EI */
#endif
WRITE_AUX(AUX_IRQ_CTRL, ictrl);
/*
* ARCv2 core intc provides multiple interrupt priorities (upto 16).
* Typical builds though have only two levels (0-high, 1-low)
* Linux by default uses lower prio 1 for most irqs, reserving 0 for
* NMI style interrupts in future (say perf)
*/
READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
irq_prio = irq_bcr.prio; /* Encoded as N-1 for N levels */
pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
irq_prio + 1, ARCV2_IRQ_DEF_PRIO,
irq_bcr.firq ? " FIRQ (not used)":"");
/*
* Set a default priority for all available interrupts to prevent
* switching of register banks if Fast IRQ and multiple register banks
* are supported by CPU.
* Also disable private-per-core IRQ lines so faulty external HW won't
* trigger interrupt that kernel is not ready to handle.
*/
for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) {
write_aux_reg(AUX_IRQ_SELECT, i);
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
/*
* Only mask cpu private IRQs here.
* "common" interrupts are masked at IDU, otherwise it would
* need to be unmasked at each cpu, with IPIs
*/
if (i < FIRST_EXT_IRQ)
write_aux_reg(AUX_IRQ_ENABLE, 0);
}
/* setup status32, don't enable intr yet as kernel doesn't want */
tmp = read_aux_reg(ARC_REG_STATUS32);
tmp |= ARCV2_IRQ_DEF_PRIO << 1;
tmp &= ~STATUS_IE_MASK;
asm volatile("kflag %0 \n"::"r"(tmp));
}
static void arcv2_irq_mask(struct irq_data *data)
{
write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_ENABLE, 0);
}
static void arcv2_irq_unmask(struct irq_data *data)
{
write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_ENABLE, 1);
}
static void arcv2_irq_enable(struct irq_data *data)
{
/* set default priority */
write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
/*
* hw auto enables (linux unmask) all by default
* So no need to do IRQ_ENABLE here
* XXX: However OSCI LAN need it
*/
write_aux_reg(AUX_IRQ_ENABLE, 1);
}
static struct irq_chip arcv2_irq_chip = {
.name = "ARCv2 core Intc",
.irq_mask = arcv2_irq_mask,
.irq_unmask = arcv2_irq_unmask,
.irq_enable = arcv2_irq_enable
};
static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
/*
* core intc IRQs [16, 23]:
* Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
*/
if (hw < FIRST_EXT_IRQ) {
/*
* A subsequent request_percpu_irq() fails if percpu_devid is
* not set. That in turns sets NOAUTOEN, meaning each core needs
* to call enable_percpu_irq()
*/
irq_set_percpu_devid(irq);
irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
} else {
irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
}
return 0;
}
static const struct irq_domain_ops arcv2_irq_ops = {
.xlate = irq_domain_xlate_onecell,
.map = arcv2_irq_map,
};
static int __init
init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
{
struct irq_domain *root_domain;
struct bcr_irq_arcv2 irq_bcr;
unsigned int nr_cpu_irqs;
READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
nr_cpu_irqs = irq_bcr.irqs + NR_EXCEPTIONS;
if (parent)
panic("DeviceTree incore intc not a root irq controller\n");
root_domain = irq_domain_add_linear(intc, nr_cpu_irqs, &arcv2_irq_ops, NULL);
if (!root_domain)
panic("root irq domain not avail\n");
/*
* Needed for primary domain lookup to succeed
* This is a primary irqchip, and can never have a parent
*/
irq_set_default_host(root_domain);
#ifdef CONFIG_SMP
irq_create_mapping(root_domain, IPI_IRQ);
#endif
irq_create_mapping(root_domain, SOFTIRQ_IRQ);
return 0;
}
IRQCHIP_DECLARE(arc_intc, "snps,archs-intc", init_onchip_IRQ);
| linux-master | arch/arc/kernel/intc-arcv2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* stacktrace.c : stacktracing APIs needed by rest of kernel
* (wrappers over ARC dwarf based unwinder)
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* vineetg: aug 2009
* -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
* for displaying task's kernel mode call stack in /proc/<pid>/stack
* -Iterator based approach to have single copy of unwinding core and APIs
* needing unwinding, implement the logic in iterator regarding:
* = which frame onwards to start capture
* = which frame to stop capturing (wchan)
* = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
*
* vineetg: March 2009
* -Implemented correct versions of thread_saved_pc() and __get_wchan()
*
* rajeshwarr: 2008
* -Initial implementation
*/
#include <linux/ptrace.h>
#include <linux/export.h>
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/sched/debug.h>
#include <asm/arcregs.h>
#include <asm/unwind.h>
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
/*-------------------------------------------------------------------------
* Unwinder Iterator
*-------------------------------------------------------------------------
*/
#ifdef CONFIG_ARC_DW2_UNWIND
static int
seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
struct unwind_frame_info *frame_info)
{
if (regs) {
/*
* Asynchronous unwinding of intr/exception
* - Just uses the pt_regs passed
*/
frame_info->task = tsk;
frame_info->regs.r27 = regs->fp;
frame_info->regs.r28 = regs->sp;
frame_info->regs.r31 = regs->blink;
frame_info->regs.r63 = regs->ret;
frame_info->call_frame = 0;
} else if (tsk == NULL || tsk == current) {
/*
* synchronous unwinding (e.g. dump_stack)
* - uses current values of SP and friends
*/
unsigned long fp, sp, blink, ret;
frame_info->task = current;
__asm__ __volatile__(
"mov %0,r27\n\t"
"mov %1,r28\n\t"
"mov %2,r31\n\t"
"mov %3,r63\n\t"
: "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
);
frame_info->regs.r27 = fp;
frame_info->regs.r28 = sp;
frame_info->regs.r31 = blink;
frame_info->regs.r63 = ret;
frame_info->call_frame = 0;
} else {
/*
* Asynchronous unwinding of a likely sleeping task
* - first ensure it is actually sleeping
* - if so, it will be in __switch_to, kernel mode SP of task
* is safe-kept and BLINK at a well known location in there
*/
if (task_is_running(tsk))
return -1;
frame_info->task = tsk;
frame_info->regs.r27 = TSK_K_FP(tsk);
frame_info->regs.r28 = TSK_K_ESP(tsk);
frame_info->regs.r31 = TSK_K_BLINK(tsk);
frame_info->regs.r63 = (unsigned int)__switch_to;
/* In the prologue of __switch_to, first FP is saved on stack
* and then SP is copied to FP. Dwarf assumes cfa as FP based
* but we didn't save FP. The value retrieved above is FP's
* state in previous frame.
* As a work around for this, we unwind from __switch_to start
* and adjust SP accordingly. The other limitation is that
* __switch_to macro is dwarf rules are not generated for inline
* assembly code
*/
frame_info->regs.r27 = 0;
frame_info->regs.r28 += 60;
frame_info->call_frame = 0;
}
return 0;
}
#endif
notrace noinline unsigned int
arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
int (*consumer_fn) (unsigned int, void *), void *arg)
{
#ifdef CONFIG_ARC_DW2_UNWIND
int ret = 0, cnt = 0;
unsigned int address;
struct unwind_frame_info frame_info;
if (seed_unwind_frame_info(tsk, regs, &frame_info))
return 0;
while (1) {
address = UNW_PC(&frame_info);
if (!address || !__kernel_text_address(address))
break;
if (consumer_fn(address, arg) == -1)
break;
ret = arc_unwind(&frame_info);
if (ret)
break;
frame_info.regs.r63 = frame_info.regs.r31;
if (cnt++ > 128) {
printk("unwinder looping too long, aborting !\n");
return 0;
}
}
return address; /* return the last address it saw */
#else
/* On ARC, only Dward based unwinder works. fp based backtracing is
* not possible (-fno-omit-frame-pointer) because of the way function
* prologue is setup (callee regs saved and then fp set and not other
* way around
*/
pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
return 0;
#endif
}
/*-------------------------------------------------------------------------
* callbacks called by unwinder iterator to implement kernel APIs
*
* The callback can return -1 to force the iterator to stop, which by default
* keeps going till the bottom-most frame.
*-------------------------------------------------------------------------
*/
/* Call-back which plugs into unwinding core to dump the stack in
* case of panic/OOPs/BUG etc
*/
static int __print_sym(unsigned int address, void *arg)
{
const char *loglvl = arg;
printk("%s %pS\n", loglvl, (void *)address);
return 0;
}
#ifdef CONFIG_STACKTRACE
/* Call-back which plugs into unwinding core to capture the
* traces needed by kernel on /proc/<pid>/stack
*/
static int __collect_all(unsigned int address, void *arg)
{
struct stack_trace *trace = arg;
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = address;
if (trace->nr_entries >= trace->max_entries)
return -1;
return 0;
}
static int __collect_all_but_sched(unsigned int address, void *arg)
{
struct stack_trace *trace = arg;
if (in_sched_functions(address))
return 0;
if (trace->skip > 0)
trace->skip--;
else
trace->entries[trace->nr_entries++] = address;
if (trace->nr_entries >= trace->max_entries)
return -1;
return 0;
}
#endif
static int __get_first_nonsched(unsigned int address, void *unused)
{
if (in_sched_functions(address))
return 0;
return -1;
}
/*-------------------------------------------------------------------------
* APIs expected by various kernel sub-systems
*-------------------------------------------------------------------------
*/
noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
const char *loglvl)
{
printk("%s\nStack Trace:\n", loglvl);
arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
}
EXPORT_SYMBOL(show_stacktrace);
/* Expected by sched Code */
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
show_stacktrace(tsk, NULL, loglvl);
}
/* Another API expected by schedular, shows up in "ps" as Wait Channel
* Of course just returning schedule( ) would be pointless so unwind until
* the function is not in schedular code
*/
unsigned int __get_wchan(struct task_struct *tsk)
{
return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
}
#ifdef CONFIG_STACKTRACE
/*
* API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
* A typical use is when /proc/<pid>/stack is queried by userland
*/
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
/* Assumes @tsk is sleeping so unwinds from __switch_to */
arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
}
void save_stack_trace(struct stack_trace *trace)
{
/* Pass NULL for task so it unwinds the current call frame */
arc_unwind_core(NULL, NULL, __collect_all, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
| linux-master | arch/arc/kernel/stacktrace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* arcksyms.c - Exporting symbols not exportable from their own sources
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/module.h>
/* libgcc functions, not part of kernel sources */
extern void __ashldi3(void);
extern void __ashrdi3(void);
extern void __divsi3(void);
extern void __divsf3(void);
extern void __lshrdi3(void);
extern void __modsi3(void);
extern void __muldi3(void);
extern void __ucmpdi2(void);
extern void __udivsi3(void);
extern void __umodsi3(void);
extern void __cmpdi2(void);
extern void __fixunsdfsi(void);
extern void __muldf3(void);
extern void __divdf3(void);
extern void __floatunsidf(void);
extern void __floatunsisf(void);
extern void __udivdi3(void);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__divsf3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__ucmpdi2);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__cmpdi2);
EXPORT_SYMBOL(__fixunsdfsi);
EXPORT_SYMBOL(__muldf3);
EXPORT_SYMBOL(__divdf3);
EXPORT_SYMBOL(__floatunsidf);
EXPORT_SYMBOL(__floatunsisf);
EXPORT_SYMBOL(__udivdi3);
/* ARC optimised assembler routines */
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strlen);
| linux-master | arch/arc/kernel/arcksyms.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
#include <asm/irq.h>
#define NR_CPU_IRQS 32 /* number of irq lines coming in */
#define TIMER0_IRQ 3 /* Fixed by ISA */
/*
* Early Hardware specific Interrupt setup
* -Platform independent, needed for each CPU (not foldable into init_IRQ)
* -Called very early (start_kernel -> setup_arch -> setup_processor)
*
* what it does ?
* -Optionally, setup the High priority Interrupts as Level 2 IRQs
*/
void arc_init_IRQ(void)
{
unsigned int level_mask = 0, i;
/* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
/*
* Write to register, even if no LV2 IRQs configured to reset it
* in case bootloader had mucked with it
*/
write_aux_reg(AUX_IRQ_LEV, level_mask);
if (level_mask)
pr_info("Level-2 interrupts bitset %x\n", level_mask);
/*
* Disable all IRQ lines so faulty external hardware won't
* trigger interrupt that kernel is not ready to handle.
*/
for (i = TIMER0_IRQ; i < NR_CPU_IRQS; i++) {
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
ienb &= ~(1 << i);
write_aux_reg(AUX_IENABLE, ienb);
}
}
/*
* ARC700 core includes a simple on-chip intc supporting
* -per IRQ enable/disable
* -2 levels of interrupts (high/low)
* -all interrupts being level triggered
*
* To reduce platform code, we assume all IRQs directly hooked-up into intc.
* Platforms with external intc, hence cascaded IRQs, are free to over-ride
* below, per IRQ.
*/
static void arc_irq_mask(struct irq_data *data)
{
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
ienb &= ~(1 << data->hwirq);
write_aux_reg(AUX_IENABLE, ienb);
}
static void arc_irq_unmask(struct irq_data *data)
{
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
ienb |= (1 << data->hwirq);
write_aux_reg(AUX_IENABLE, ienb);
}
static struct irq_chip onchip_intc = {
.name = "ARC In-core Intc",
.irq_mask = arc_irq_mask,
.irq_unmask = arc_irq_unmask,
};
static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
switch (hw) {
case TIMER0_IRQ:
irq_set_percpu_devid(irq);
irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
break;
default:
irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
}
return 0;
}
static const struct irq_domain_ops arc_intc_domain_ops = {
.xlate = irq_domain_xlate_onecell,
.map = arc_intc_domain_map,
};
static int __init
init_onchip_IRQ(struct device_node *intc, struct device_node *parent)
{
struct irq_domain *root_domain;
if (parent)
panic("DeviceTree incore intc not a root irq controller\n");
root_domain = irq_domain_add_linear(intc, NR_CPU_IRQS,
&arc_intc_domain_ops, NULL);
if (!root_domain)
panic("root irq domain not avail\n");
/*
* Needed for primary domain lookup to succeed
* This is a primary irqchip, and can never have a parent
*/
irq_set_default_host(root_domain);
return 0;
}
IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ);
/*
* arch_local_irq_enable - Enable interrupts.
*
* 1. Explicitly called to re-enable interrupts
* 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
* which maybe in hard ISR itself
*
* Semantics of this function change depending on where it is called from:
*
* -If called from hard-ISR, it must not invert interrupt priorities
* e.g. suppose TIMER is high priority (Level 2) IRQ
* Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
* Here local_irq_enable( ) shd not re-enable lower priority interrupts
* -If called from soft-ISR, it must re-enable all interrupts
* soft ISR are low priority jobs which can be very slow, thus all IRQs
* must be enabled while they run.
* Now hardware context wise we may still be in L2 ISR (not done rtie)
* still we must re-enable both L1 and L2 IRQs
* Another twist is prev scenario with flow being
* L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
* here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
* over-written (this is deficiency in ARC700 Interrupt mechanism)
*/
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
void arch_local_irq_enable(void)
{
unsigned long flags = arch_local_save_flags();
if (flags & STATUS_A2_MASK)
flags |= STATUS_E2_MASK;
else if (flags & STATUS_A1_MASK)
flags |= STATUS_E1_MASK;
arch_local_irq_restore(flags);
}
EXPORT_SYMBOL(arch_local_irq_enable);
#endif
| linux-master | arch/arc/kernel/intc-compact.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Signal Handling for ARC
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* vineetg: Jan 2010 (Restarting of timer related syscalls)
*
* vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
* -do_signal() supports TIF_RESTORE_SIGMASK
* -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
* -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
* -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
* -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
* the job to do_signal()
*
* vineetg: July 2009
* -Modified Code to support the uClibc provided userland sigreturn stub
* to avoid kernel synthesing it on user stack at runtime, costing TLB
* probes and Cache line flushes.
*
* vineetg: July 2009
* -In stash_usr_regs( ) and restore_usr_regs( ), save/restore of user regs
* in done in block copy rather than one word at a time.
* This saves around 2K of code and improves LMBench lat_sig <catch>
*
* rajeshwarr: Feb 2009
* - Support for Realtime Signals
*
* vineetg: Aug 11th 2008: Bug #94183
* -ViXS were still seeing crashes when using insmod to load drivers.
* It turned out that the code to change Execute permssions for TLB entries
* of user was not guarded for interrupts (mod_tlb_permission)
* This was causing TLB entries to be overwritten on unrelated indexes
*
* Vineetg: July 15th 2008: Bug #94183
* -Exception happens in Delay slot of a JMP, and before user space resumes,
* Signal is delivered (Ctrl + C) = >SIGINT.
* setup_frame( ) sets up PC,SP,BLINK to enable user space signal handler
* to run, but doesn't clear the Delay slot bit from status32. As a result,
* on resuming user mode, signal handler branches off to BTA of orig JMP
* -FIX: clear the DE bit from status32 in setup_frame( )
*
* Rahul Trivedi, Kanika Nema: Codito Technologies 2004
*/
#include <linux/signal.h>
#include <linux/ptrace.h>
#include <linux/personality.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/resume_user_mode.h>
#include <linux/sched/task_stack.h>
#include <asm/ucontext.h>
#include <asm/entry.h>
struct rt_sigframe {
struct siginfo info;
struct ucontext uc;
#define MAGIC_SIGALTSTK 0x07302004
unsigned int sigret_magic;
};
static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
{
int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT
struct user_regs_arcv2 v2abi;
v2abi.r30 = regs->r30;
#ifdef CONFIG_ARC_HAS_ACCL_REGS
v2abi.r58 = regs->r58;
v2abi.r59 = regs->r59;
#else
v2abi.r58 = v2abi.r59 = 0;
#endif
err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
#endif
return err;
}
static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
{
int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT
struct user_regs_arcv2 v2abi;
err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
regs->r30 = v2abi.r30;
#ifdef CONFIG_ARC_HAS_ACCL_REGS
regs->r58 = v2abi.r58;
regs->r59 = v2abi.r59;
#endif
#endif
return err;
}
static int
stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t *set)
{
int err;
struct user_regs_struct uregs;
uregs.scratch.bta = regs->bta;
uregs.scratch.lp_start = regs->lp_start;
uregs.scratch.lp_end = regs->lp_end;
uregs.scratch.lp_count = regs->lp_count;
uregs.scratch.status32 = regs->status32;
uregs.scratch.ret = regs->ret;
uregs.scratch.blink = regs->blink;
uregs.scratch.fp = regs->fp;
uregs.scratch.gp = regs->r26;
uregs.scratch.r12 = regs->r12;
uregs.scratch.r11 = regs->r11;
uregs.scratch.r10 = regs->r10;
uregs.scratch.r9 = regs->r9;
uregs.scratch.r8 = regs->r8;
uregs.scratch.r7 = regs->r7;
uregs.scratch.r6 = regs->r6;
uregs.scratch.r5 = regs->r5;
uregs.scratch.r4 = regs->r4;
uregs.scratch.r3 = regs->r3;
uregs.scratch.r2 = regs->r2;
uregs.scratch.r1 = regs->r1;
uregs.scratch.r0 = regs->r0;
uregs.scratch.sp = regs->sp;
err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
sizeof(sf->uc.uc_mcontext.regs.scratch));
if (is_isa_arcv2())
err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
return err ? -EFAULT : 0;
}
static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
{
sigset_t set;
int err;
struct user_regs_struct uregs;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
err |= __copy_from_user(&uregs.scratch,
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
if (is_isa_arcv2())
err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
if (err)
return -EFAULT;
set_current_blocked(&set);
regs->bta = uregs.scratch.bta;
regs->lp_start = uregs.scratch.lp_start;
regs->lp_end = uregs.scratch.lp_end;
regs->lp_count = uregs.scratch.lp_count;
regs->status32 = uregs.scratch.status32;
regs->ret = uregs.scratch.ret;
regs->blink = uregs.scratch.blink;
regs->fp = uregs.scratch.fp;
regs->r26 = uregs.scratch.gp;
regs->r12 = uregs.scratch.r12;
regs->r11 = uregs.scratch.r11;
regs->r10 = uregs.scratch.r10;
regs->r9 = uregs.scratch.r9;
regs->r8 = uregs.scratch.r8;
regs->r7 = uregs.scratch.r7;
regs->r6 = uregs.scratch.r6;
regs->r5 = uregs.scratch.r5;
regs->r4 = uregs.scratch.r4;
regs->r3 = uregs.scratch.r3;
regs->r2 = uregs.scratch.r2;
regs->r1 = uregs.scratch.r1;
regs->r0 = uregs.scratch.r0;
regs->sp = uregs.scratch.sp;
return 0;
}
static inline int is_do_ss_needed(unsigned int magic)
{
if (MAGIC_SIGALTSTK == magic)
return 1;
else
return 0;
}
SYSCALL_DEFINE0(rt_sigreturn)
{
struct rt_sigframe __user *sf;
unsigned int magic;
struct pt_regs *regs = current_pt_regs();
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
/* Since we stacked the signal on a word boundary,
* then 'sp' should be word aligned here. If it's
* not, then the user is trying to mess with us.
*/
if (regs->sp & 3)
goto badframe;
sf = (struct rt_sigframe __force __user *)(regs->sp);
if (!access_ok(sf, sizeof(*sf)))
goto badframe;
if (__get_user(magic, &sf->sigret_magic))
goto badframe;
if (unlikely(is_do_ss_needed(magic)))
if (restore_altstack(&sf->uc.uc_stack))
goto badframe;
if (restore_usr_regs(regs, sf))
goto badframe;
/* Don't restart from sigreturn */
syscall_wont_restart(regs);
/*
* Ensure that sigreturn always returns to user mode (in case the
* regs saved on user stack got fudged between save and sigreturn)
* Otherwise it is easy to panic the kernel with a custom
* signal handler and/or restorer which clobberes the status32/ret
* to return to a bogus location in kernel mode.
*/
regs->status32 |= STATUS_U_MASK;
return regs->r0;
badframe:
force_sig(SIGSEGV);
return 0;
}
/*
* Determine which stack to use..
*/
static inline void __user *get_sigframe(struct ksignal *ksig,
struct pt_regs *regs,
unsigned long framesize)
{
unsigned long sp = sigsp(regs->sp, ksig);
void __user *frame;
/* No matter what happens, 'sp' must be word
* aligned otherwise nasty things could happen
*/
/* ATPCS B01 mandates 8-byte alignment */
frame = (void __user *)((sp - framesize) & ~7);
/* Check that we can actually write to the signal frame */
if (!access_ok(frame, framesize))
frame = NULL;
return frame;
}
static int
setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *sf;
unsigned int magic = 0;
int err = 0;
sf = get_sigframe(ksig, regs, sizeof(struct rt_sigframe));
if (!sf)
return 1;
/*
* w/o SA_SIGINFO, struct ucontext is partially populated (only
* uc_mcontext/uc_sigmask) for kernel's normal user state preservation
* during signal handler execution. This works for SA_SIGINFO as well
* although the semantics are now overloaded (the same reg state can be
* inspected by userland: but are they allowed to fiddle with it ?
*/
err |= stash_usr_regs(sf, regs, set);
/*
* SA_SIGINFO requires 3 args to signal handler:
* #1: sig-no (common to any handler)
* #2: struct siginfo
* #3: struct ucontext (completely populated)
*/
if (unlikely(ksig->ka.sa.sa_flags & SA_SIGINFO)) {
err |= copy_siginfo_to_user(&sf->info, &ksig->info);
err |= __put_user(0, &sf->uc.uc_flags);
err |= __put_user(NULL, &sf->uc.uc_link);
err |= __save_altstack(&sf->uc.uc_stack, regs->sp);
/* setup args 2 and 3 for user mode handler */
regs->r1 = (unsigned long)&sf->info;
regs->r2 = (unsigned long)&sf->uc;
/*
* small optim to avoid unconditionally calling do_sigaltstack
* in sigreturn path, now that we only have rt_sigreturn
*/
magic = MAGIC_SIGALTSTK;
}
err |= __put_user(magic, &sf->sigret_magic);
if (err)
return err;
/* #1 arg to the user Signal handler */
regs->r0 = ksig->sig;
/* setup PC of user space signal handler */
regs->ret = (unsigned long)ksig->ka.sa.sa_handler;
/*
* handler returns using sigreturn stub provided already by userspace
* If not, nuke the process right away
*/
if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
return 1;
regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
/* User Stack for signal handler will be above the frame just carved */
regs->sp = (unsigned long)sf;
/*
* Bug 94183, Clear the DE bit, so that when signal handler
* starts to run, it doesn't use BTA
*/
regs->status32 &= ~STATUS_DE_MASK;
regs->status32 |= STATUS_L_MASK;
return err;
}
static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
{
switch (regs->r0) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
/*
* ERESTARTNOHAND means that the syscall should
* only be restarted if there was no handler for
* the signal, and since we only get here if there
* is a handler, we don't restart
*/
regs->r0 = -EINTR; /* ERESTART_xxx is internal */
break;
case -ERESTARTSYS:
/*
* ERESTARTSYS means to restart the syscall if
* there is no handler or the handler was
* registered with SA_RESTART
*/
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->r0 = -EINTR;
break;
}
fallthrough;
case -ERESTARTNOINTR:
/*
* ERESTARTNOINTR means that the syscall should
* be called again after the signal handler returns.
* Setup reg state just as it was before doing the trap
* r0 has been clobbered with sys call ret code thus it
* needs to be reloaded with orig first arg to syscall
* in orig_r0. Rest of relevant reg-file:
* r8 (syscall num) and (r1 - r7) will be reset to
* their orig user space value when we ret from kernel
*/
regs->r0 = regs->orig_r0;
regs->ret -= is_isa_arcv2() ? 2 : 4;
break;
}
}
/*
* OK, we're invoking a handler
*/
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int failed;
/* Set up the stack frame */
failed = setup_rt_frame(ksig, oldset, regs);
signal_setup_done(failed, ksig, 0);
}
void do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
int restart_scall;
restart_scall = in_syscall(regs) && syscall_restartable(regs);
if (test_thread_flag(TIF_SIGPENDING) && get_signal(&ksig)) {
if (restart_scall) {
arc_restart_syscall(&ksig.ka, regs);
syscall_wont_restart(regs); /* No more restarts */
}
handle_signal(&ksig, regs);
return;
}
if (restart_scall) {
/* No handler for syscall: restart it */
if (regs->r0 == -ERESTARTNOHAND ||
regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) {
regs->r0 = regs->orig_r0;
regs->ret -= is_isa_arcv2() ? 2 : 4;
} else if (regs->r0 == -ERESTART_RESTARTBLOCK) {
regs->r8 = __NR_restart_syscall;
regs->ret -= is_isa_arcv2() ? 2 : 4;
}
syscall_wont_restart(regs); /* No more restarts */
}
/* If there's no signal to deliver, restore the saved sigmask back */
restore_saved_sigmask();
}
void do_notify_resume(struct pt_regs *regs)
{
/*
* ASM glue guarantees that this is only called when returning to
* user mode
*/
if (test_thread_flag(TIF_NOTIFY_RESUME))
resume_user_mode_work(regs);
}
| linux-master | arch/arc/kernel/signal.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* kgdb support for ARC
*
* Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/kgdb.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/disasm.h>
#include <asm/cacheflush.h>
static void to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
struct callee_regs *cregs)
{
int regno;
for (regno = 0; regno <= 26; regno++)
gdb_regs[_R0 + regno] = get_reg(regno, kernel_regs, cregs);
for (regno = 27; regno < GDB_MAX_REGS; regno++)
gdb_regs[regno] = 0;
gdb_regs[_FP] = kernel_regs->fp;
gdb_regs[__SP] = kernel_regs->sp;
gdb_regs[_BLINK] = kernel_regs->blink;
gdb_regs[_RET] = kernel_regs->ret;
gdb_regs[_STATUS32] = kernel_regs->status32;
gdb_regs[_LP_COUNT] = kernel_regs->lp_count;
gdb_regs[_LP_END] = kernel_regs->lp_end;
gdb_regs[_LP_START] = kernel_regs->lp_start;
gdb_regs[_BTA] = kernel_regs->bta;
gdb_regs[_STOP_PC] = kernel_regs->ret;
}
static void from_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
struct callee_regs *cregs)
{
int regno;
for (regno = 0; regno <= 26; regno++)
set_reg(regno, gdb_regs[regno + _R0], kernel_regs, cregs);
kernel_regs->fp = gdb_regs[_FP];
kernel_regs->sp = gdb_regs[__SP];
kernel_regs->blink = gdb_regs[_BLINK];
kernel_regs->ret = gdb_regs[_RET];
kernel_regs->status32 = gdb_regs[_STATUS32];
kernel_regs->lp_count = gdb_regs[_LP_COUNT];
kernel_regs->lp_end = gdb_regs[_LP_END];
kernel_regs->lp_start = gdb_regs[_LP_START];
kernel_regs->bta = gdb_regs[_BTA];
}
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
{
to_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
current->thread.callee_reg);
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
{
from_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
current->thread.callee_reg);
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
struct task_struct *task)
{
if (task)
to_gdb_regs(gdb_regs, task_pt_regs(task),
(struct callee_regs *) task->thread.callee_reg);
}
struct single_step_data_t {
uint16_t opcode[2];
unsigned long address[2];
int is_branch;
int armed;
} single_step_data;
static void undo_single_step(struct pt_regs *regs)
{
if (single_step_data.armed) {
int i;
for (i = 0; i < (single_step_data.is_branch ? 2 : 1); i++) {
memcpy((void *) single_step_data.address[i],
&single_step_data.opcode[i],
BREAK_INSTR_SIZE);
flush_icache_range(single_step_data.address[i],
single_step_data.address[i] +
BREAK_INSTR_SIZE);
}
single_step_data.armed = 0;
}
}
static void place_trap(unsigned long address, void *save)
{
memcpy(save, (void *) address, BREAK_INSTR_SIZE);
memcpy((void *) address, &arch_kgdb_ops.gdb_bpt_instr,
BREAK_INSTR_SIZE);
flush_icache_range(address, address + BREAK_INSTR_SIZE);
}
static void do_single_step(struct pt_regs *regs)
{
single_step_data.is_branch = disasm_next_pc((unsigned long)
regs->ret, regs, (struct callee_regs *)
current->thread.callee_reg,
&single_step_data.address[0],
&single_step_data.address[1]);
place_trap(single_step_data.address[0], &single_step_data.opcode[0]);
if (single_step_data.is_branch) {
place_trap(single_step_data.address[1],
&single_step_data.opcode[1]);
}
single_step_data.armed++;
}
int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
char *remcomInBuffer, char *remcomOutBuffer,
struct pt_regs *regs)
{
unsigned long addr;
char *ptr;
undo_single_step(regs);
switch (remcomInBuffer[0]) {
case 's':
case 'c':
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
regs->ret = addr;
fallthrough;
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
if (remcomInBuffer[0] == 's') {
do_single_step(regs);
atomic_set(&kgdb_cpu_doing_single_step,
smp_processor_id());
}
return 0;
}
return -1;
}
int kgdb_arch_init(void)
{
single_step_data.armed = 0;
return 0;
}
void kgdb_trap(struct pt_regs *regs)
{
/* trap_s 3 is used for breakpoints that overwrite existing
* instructions, while trap_s 4 is used for compiled breakpoints.
*
* with trap_s 3 breakpoints the original instruction needs to be
* restored and continuation needs to start at the location of the
* breakpoint.
*
* with trap_s 4 (compiled) breakpoints, continuation needs to
* start after the breakpoint.
*/
if (regs->ecr.param == 3)
instruction_pointer(regs) -= BREAK_INSTR_SIZE;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
}
void kgdb_arch_exit(void)
{
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
instruction_pointer(regs) = ip;
}
void kgdb_call_nmi_hook(void *ignored)
{
/* Default implementation passes get_irq_regs() but we don't */
kgdb_nmicallback(raw_smp_processor_id(), NULL);
}
const struct kgdb_arch arch_kgdb_ops = {
/* breakpoint instruction: TRAP_S 0x3 */
#ifdef CONFIG_CPU_BIG_ENDIAN
.gdb_bpt_instr = {0x78, 0x7e},
#else
.gdb_bpt_instr = {0x7e, 0x78},
#endif
};
| linux-master | arch/arc/kernel/kgdb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/types.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/current.h>
#include <asm/disasm.h>
#define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
(unsigned long)current_thread_info() + THREAD_SIZE - (addr))
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
/* Attempt to probe at unaligned address */
if ((unsigned long)p->addr & 0x01)
return -EINVAL;
/* Address should not be in exception handling code */
p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
p->opcode = *p->addr;
return 0;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = UNIMP_S_INSTRUCTION;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
arch_disarm_kprobe(p);
/* Can we remove the kprobe in the middle of kprobe handling? */
if (p->ainsn.t1_addr) {
*(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
flush_icache_range((unsigned long)p->ainsn.t1_addr,
(unsigned long)p->ainsn.t1_addr +
sizeof(kprobe_opcode_t));
p->ainsn.t1_addr = NULL;
}
if (p->ainsn.t2_addr) {
*(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
flush_icache_range((unsigned long)p->ainsn.t2_addr,
(unsigned long)p->ainsn.t2_addr +
sizeof(kprobe_opcode_t));
p->ainsn.t2_addr = NULL;
}
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static inline void __kprobes set_current_kprobe(struct kprobe *p)
{
__this_cpu_write(current_kprobe, p);
}
static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
struct pt_regs *regs)
{
/* Remove the trap instructions inserted for single step and
* restore the original instructions
*/
if (p->ainsn.t1_addr) {
*(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
flush_icache_range((unsigned long)p->ainsn.t1_addr,
(unsigned long)p->ainsn.t1_addr +
sizeof(kprobe_opcode_t));
p->ainsn.t1_addr = NULL;
}
if (p->ainsn.t2_addr) {
*(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
flush_icache_range((unsigned long)p->ainsn.t2_addr,
(unsigned long)p->ainsn.t2_addr +
sizeof(kprobe_opcode_t));
p->ainsn.t2_addr = NULL;
}
return;
}
static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
{
unsigned long next_pc;
unsigned long tgt_if_br = 0;
int is_branch;
unsigned long bta;
/* Copy the opcode back to the kprobe location and execute the
* instruction. Because of this we will not be able to get into the
* same kprobe until this kprobe is done
*/
*(p->addr) = p->opcode;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
/* Now we insert the trap at the next location after this instruction to
* single step. If it is a branch we insert the trap at possible branch
* targets
*/
bta = regs->bta;
if (regs->status32 & 0x40) {
/* We are in a delay slot with the branch taken */
next_pc = bta & ~0x01;
if (!p->ainsn.is_short) {
if (bta & 0x01)
regs->blink += 2;
else {
/* Branch not taken */
next_pc += 2;
/* next pc is taken from bta after executing the
* delay slot instruction
*/
regs->bta += 2;
}
}
is_branch = 0;
} else
is_branch =
disasm_next_pc((unsigned long)p->addr, regs,
(struct callee_regs *) current->thread.callee_reg,
&next_pc, &tgt_if_br);
p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
*(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
flush_icache_range((unsigned long)p->ainsn.t1_addr,
(unsigned long)p->ainsn.t1_addr +
sizeof(kprobe_opcode_t));
if (is_branch) {
p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
*(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
flush_icache_range((unsigned long)p->ainsn.t2_addr,
(unsigned long)p->ainsn.t2_addr +
sizeof(kprobe_opcode_t));
}
}
int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
preempt_disable();
kcb = get_kprobe_ctlblk();
p = get_kprobe((unsigned long *)addr);
if (p) {
/*
* We have reentered the kprobe_handler, since another kprobe
* was hit while within the handler, we save the original
* kprobes and single step on the instruction of the new probe
* without calling any user handlers to avoid recursive
* kprobes.
*/
if (kprobe_running()) {
save_previous_kprobe(kcb);
set_current_kprobe(p);
kprobes_inc_nmissed_count(p);
setup_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
}
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/* If we have no pre-handler or it returned 0, we continue with
* normal processing. If we have a pre-handler and it returned
* non-zero - which means user handler setup registers to exit
* to another instruction, we must skip the single stepping.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
} else {
reset_current_kprobe();
preempt_enable_no_resched();
}
return 1;
}
/* no_kprobe: */
preempt_enable_no_resched();
return 0;
}
static int __kprobes arc_post_kprobe_handler(unsigned long addr,
struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (!cur)
return 0;
resume_execution(cur, addr, regs);
/* Rearm the kprobe */
arch_arm_kprobe(cur);
/*
* When we return from trap instruction we go to the next instruction
* We restored the actual instruction in resume_exectuiont and we to
* return to the same address and execute it
*/
regs->ret = addr;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
preempt_enable_no_resched();
return 1;
}
/*
* Fault can be for the instruction being single stepped or for the
* pre/post handlers in the module.
* This is applicable for applications like user probes, where we have the
* probe in user space and the handlers in the kernel
*/
int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
switch (kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single stepped
* caused the fault. We reset the current kprobe and allow the
* exception handler as if it is regular exception. In our
* case it doesn't matter because the system will be halted
*/
resume_execution(cur, (unsigned long)cur->addr, regs);
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* We are here because the instructions in the pre/post handler
* caused the fault.
*/
/*
* In case the user-specified fault handler returned zero,
* try to fix up.
*/
if (fixup_exception(regs))
return 1;
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
*/
break;
default:
break;
}
return 0;
}
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = data;
unsigned long addr = args->err;
int ret = NOTIFY_DONE;
switch (val) {
case DIE_IERR:
if (arc_kprobe_handler(addr, args->regs))
return NOTIFY_STOP;
break;
case DIE_TRAP:
if (arc_post_kprobe_handler(addr, args->regs))
return NOTIFY_STOP;
break;
default:
break;
}
return ret;
}
static void __used kretprobe_trampoline_holder(void)
{
__asm__ __volatile__(".global __kretprobe_trampoline\n"
"__kretprobe_trampoline:\n"
"nop\n");
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->blink;
ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->blink = (unsigned long)&__kretprobe_trampoline;
}
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
regs->ret = __kretprobe_trampoline_handler(regs, NULL);
/* By returning a non zero value, we are telling the kprobe handler
* that we don't want the post_handler to run
*/
return 1;
}
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &__kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{
/* Registering the trampoline code for the kret probe */
return register_kprobe(&trampoline_p);
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
if (p->addr == (kprobe_opcode_t *) &__kretprobe_trampoline)
return 1;
return 0;
}
void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
{
notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP);
}
| linux-master | arch/arc/kernel/kprobes.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/ptrace.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/kdev_t.h>
#include <linux/proc_fs.h>
#include <linux/file.h>
#include <linux/sched/mm.h>
#include <linux/sched/debug.h>
#include <asm/arcregs.h>
#include <asm/irqflags.h>
#define ARC_PATH_MAX 256
static noinline void print_regs_scratch(struct pt_regs *regs)
{
pr_cont("BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
regs->bta, regs->sp, regs->fp, (void *)regs->blink);
pr_cont("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
regs->lp_start, regs->lp_end, regs->lp_count);
pr_info("r00: 0x%08lx\tr01: 0x%08lx\tr02: 0x%08lx\n" \
"r03: 0x%08lx\tr04: 0x%08lx\tr05: 0x%08lx\n" \
"r06: 0x%08lx\tr07: 0x%08lx\tr08: 0x%08lx\n" \
"r09: 0x%08lx\tr10: 0x%08lx\tr11: 0x%08lx\n" \
"r12: 0x%08lx\t",
regs->r0, regs->r1, regs->r2,
regs->r3, regs->r4, regs->r5,
regs->r6, regs->r7, regs->r8,
regs->r9, regs->r10, regs->r11,
regs->r12);
}
static void print_regs_callee(struct callee_regs *regs)
{
pr_cont("r13: 0x%08lx\tr14: 0x%08lx\n" \
"r15: 0x%08lx\tr16: 0x%08lx\tr17: 0x%08lx\n" \
"r18: 0x%08lx\tr19: 0x%08lx\tr20: 0x%08lx\n" \
"r21: 0x%08lx\tr22: 0x%08lx\tr23: 0x%08lx\n" \
"r24: 0x%08lx\tr25: 0x%08lx\n",
regs->r13, regs->r14,
regs->r15, regs->r16, regs->r17,
regs->r18, regs->r19, regs->r20,
regs->r21, regs->r22, regs->r23,
regs->r24, regs->r25);
}
static void print_task_path_n_nm(struct task_struct *tsk)
{
char *path_nm = NULL;
struct mm_struct *mm;
struct file *exe_file;
char buf[ARC_PATH_MAX];
mm = get_task_mm(tsk);
if (!mm)
goto done;
exe_file = get_mm_exe_file(mm);
mmput(mm);
if (exe_file) {
path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
fput(exe_file);
}
done:
pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
}
static void show_faulting_vma(unsigned long address)
{
struct vm_area_struct *vma;
struct mm_struct *active_mm = current->active_mm;
/* can't use print_vma_addr() yet as it doesn't check for
* non-inclusive vma
*/
mmap_read_lock(active_mm);
vma = vma_lookup(active_mm, address);
/* Lookup the vma at the address and report if the container VMA is not
* found
*/
if (vma) {
char buf[ARC_PATH_MAX];
char *nm = "?";
if (vma->vm_file) {
nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
if (IS_ERR(nm))
nm = "?";
}
pr_info(" @off 0x%lx in [%s] VMA: 0x%08lx to 0x%08lx\n",
vma->vm_start < TASK_UNMAPPED_BASE ?
address : address - vma->vm_start,
nm, vma->vm_start, vma->vm_end);
} else
pr_info(" @No matching VMA found\n");
mmap_read_unlock(active_mm);
}
static void show_ecr_verbose(struct pt_regs *regs)
{
unsigned int vec, cause_code;
unsigned long address;
/* For Data fault, this is data address not instruction addr */
address = current->thread.fault_address;
vec = regs->ecr.vec;
cause_code = regs->ecr.cause;
/* For DTLB Miss or ProtV, display the memory involved too */
if (vec == ECR_V_DTLB_MISS) {
pr_cont("Invalid %s @ 0x%08lx by insn @ %pS\n",
(cause_code == 0x01) ? "Read" :
((cause_code == 0x02) ? "Write" : "EX"),
address, (void *)regs->ret);
} else if (vec == ECR_V_ITLB_MISS) {
pr_cont("Insn could not be fetched\n");
} else if (vec == ECR_V_MACH_CHK) {
pr_cont("Machine Check (%s)\n", (cause_code == 0x0) ?
"Double Fault" : "Other Fatal Err");
} else if (vec == ECR_V_PROTV) {
if (cause_code == ECR_C_PROTV_INST_FETCH)
pr_cont("Execute from Non-exec Page\n");
else if (cause_code == ECR_C_PROTV_MISALIG_DATA &&
IS_ENABLED(CONFIG_ISA_ARCOMPACT))
pr_cont("Misaligned r/w from 0x%08lx\n", address);
else
pr_cont("%s access not allowed on page\n",
(cause_code == 0x01) ? "Read" :
((cause_code == 0x02) ? "Write" : "EX"));
} else if (vec == ECR_V_INSN_ERR) {
pr_cont("Illegal Insn\n");
#ifdef CONFIG_ISA_ARCV2
} else if (vec == ECR_V_MEM_ERR) {
if (cause_code == 0x00)
pr_cont("Bus Error from Insn Mem\n");
else if (cause_code == 0x10)
pr_cont("Bus Error from Data Mem\n");
else
pr_cont("Bus Error, check PRM\n");
} else if (vec == ECR_V_MISALIGN) {
pr_cont("Misaligned r/w from 0x%08lx\n", address);
#endif
} else if (vec == ECR_V_TRAP) {
if (regs->ecr.param == 5)
pr_cont("gcc generated __builtin_trap\n");
} else {
pr_cont("Check Programmer's Manual\n");
}
}
/************************************************************************
* API called by rest of kernel
***********************************************************************/
void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct callee_regs *cregs = (struct callee_regs *)tsk->thread.callee_reg;
/*
* generic code calls us with preemption disabled, but some calls
* here could sleep, so re-enable to avoid lockdep splat
*/
preempt_enable();
print_task_path_n_nm(tsk);
show_regs_print_info(KERN_INFO);
show_ecr_verbose(regs);
if (user_mode(regs))
show_faulting_vma(regs->ret); /* faulting code, not data */
pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\n",
regs->ecr.full, current->thread.fault_address, regs->ret);
pr_info("STAT32: 0x%08lx", regs->status32);
#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit" " : ""
#ifdef CONFIG_ISA_ARCOMPACT
pr_cont(" [%2s%2s%2s%2s%2s%2s%2s]",
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
STS_BIT(regs, DE), STS_BIT(regs, AE),
STS_BIT(regs, A2), STS_BIT(regs, A1),
STS_BIT(regs, E2), STS_BIT(regs, E1));
#else
pr_cont(" [%2s%2s%2s%2s] ",
STS_BIT(regs, IE),
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
STS_BIT(regs, DE), STS_BIT(regs, AE));
#endif
print_regs_scratch(regs);
if (cregs)
print_regs_callee(cregs);
preempt_disable();
}
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
unsigned long address)
{
current->thread.fault_address = address;
/* Show fault description */
pr_info("\n%s\n", str);
/* Caller and Callee regs */
show_regs(regs);
/* Show stack trace if this Fatality happened in kernel mode */
if (!user_mode(regs))
show_stacktrace(current, regs, KERN_DEFAULT);
}
| linux-master | arch/arc/kernel/troubleshoot.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* RajeshwarR: Dec 11, 2007
* -- Added support for Inter Processor Interrupts
*
* Vineetg: Nov 1st, 2007
* -- Initial Write (Borrowed heavily from ARM)
*/
#include <linux/spinlock.h>
#include <linux/sched/mm.h>
#include <linux/interrupt.h>
#include <linux/profile.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/irq.h>
#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/reboot.h>
#include <linux/irqdomain.h>
#include <linux/export.h>
#include <linux/of_fdt.h>
#include <asm/mach_desc.h>
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/processor.h>
#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
#endif
struct plat_smp_ops __weak plat_smp_ops;
/* XXX: per cpu ? Only needed once in early secondary boot */
struct task_struct *secondary_idle_tsk;
/* Called from start_kernel */
void __init smp_prepare_boot_cpu(void)
{
}
static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
{
unsigned long dt_root = of_get_flat_dt_root();
const char *buf;
buf = of_get_flat_dt_prop(dt_root, name, NULL);
if (!buf)
return -EINVAL;
if (cpulist_parse(buf, cpumask))
return -EINVAL;
return 0;
}
/*
* Read from DeviceTree and setup cpu possible mask. If there is no
* "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
*/
static void __init arc_init_cpu_possible(void)
{
struct cpumask cpumask;
if (arc_get_cpu_map("possible-cpus", &cpumask)) {
pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
NR_CPUS);
cpumask_setall(&cpumask);
}
if (!cpumask_test_cpu(0, &cpumask))
panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
init_cpu_possible(&cpumask);
}
/*
* Called from setup_arch() before calling setup_processor()
*
* - Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system.
* - Call early smp init hook. This can initialize a specific multi-core
* IP which is say common to several platforms (hence not part of
* platform specific int_early() hook)
*/
void __init smp_init_cpus(void)
{
arc_init_cpu_possible();
if (plat_smp_ops.init_early_smp)
plat_smp_ops.init_early_smp();
}
/* called from init ( ) => process 1 */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
/*
* if platform didn't set the present map already, do it now
* boot cpu is set to present already by init/main.c
*/
if (num_present_cpus() <= 1)
init_cpu_present(cpu_possible_mask);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
/*
* Default smp boot helper for Run-on-reset case where all cores start off
* together. Non-masters need to wait for Master to start running.
* This is implemented using a flag in memory, which Non-masters spin-wait on.
* Master sets it to cpu-id of core to "ungate" it.
*/
static volatile int wake_flag;
#ifdef CONFIG_ISA_ARCOMPACT
#define __boot_read(f) f
#define __boot_write(f, v) f = v
#else
#define __boot_read(f) arc_read_uncached_32(&f)
#define __boot_write(f, v) arc_write_uncached_32(&f, v)
#endif
static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
{
BUG_ON(cpu == 0);
__boot_write(wake_flag, cpu);
}
void arc_platform_smp_wait_to_boot(int cpu)
{
/* for halt-on-reset, we've waited already */
if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
return;
while (__boot_read(wake_flag) != cpu)
;
__boot_write(wake_flag, 0);
}
const char *arc_platform_smp_cpuinfo(void)
{
return plat_smp_ops.info ? : "";
}
/*
* The very first "C" code executed by secondary
* Called from asm stub in head.S
* "current"/R25 already setup by low level boot code
*/
void start_kernel_secondary(void)
{
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
/* MMU, Caches, Vector Table, Interrupts etc */
setup_processor();
mmget(mm);
mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
/* Some SMP H/w setup - for each cpu */
if (plat_smp_ops.init_per_cpu)
plat_smp_ops.init_per_cpu(cpu);
if (machine_desc->init_per_cpu)
machine_desc->init_per_cpu(cpu);
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
/*
* Called from kernel_init( ) -> smp_init( ) - for each CPU
*
* At this point, Secondary Processor is "HALT"ed:
* -It booted, but was halted in head.S
* -It was configured to halt-on-reset
* So need to wake it up.
*
* Essential requirements being where to run from (PC) and stack (SP)
*/
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
unsigned long wait_till;
secondary_idle_tsk = idle;
pr_info("Idle Task [%d] %p", cpu, idle);
pr_info("Trying to bring up CPU%u ...\n", cpu);
if (plat_smp_ops.cpu_kick)
plat_smp_ops.cpu_kick(cpu,
(unsigned long)first_lines_of_secondary);
else
arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
/* wait for 1 sec after kicking the secondary */
wait_till = jiffies + HZ;
while (time_before(jiffies, wait_till)) {
if (cpu_online(cpu))
break;
}
if (!cpu_online(cpu)) {
pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu);
return -1;
}
secondary_idle_tsk = NULL;
return 0;
}
/*****************************************************************************/
/* Inter Processor Interrupt Handling */
/*****************************************************************************/
enum ipi_msg_type {
IPI_EMPTY = 0,
IPI_RESCHEDULE = 1,
IPI_CALL_FUNC,
IPI_CPU_STOP,
};
/*
* In arches with IRQ for each msg type (above), receiver can use IRQ-id to
* figure out what msg was sent. For those which don't (ARC has dedicated IPI
* IRQ), the msg-type needs to be conveyed via per-cpu data
*/
static DEFINE_PER_CPU(unsigned long, ipi_data);
static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
{
unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
unsigned long old, new;
unsigned long flags;
pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
local_irq_save(flags);
/*
* Atomically write new msg bit (in case others are writing too),
* and read back old value
*/
do {
new = old = *ipi_data_ptr;
new |= 1U << msg;
} while (cmpxchg(ipi_data_ptr, old, new) != old);
/*
* Call the platform specific IPI kick function, but avoid if possible:
* Only do so if there's no pending msg from other concurrent sender(s).
* Otherwise, receiver will see this msg as well when it takes the
* IPI corresponding to that msg. This is true, even if it is already in
* IPI handler, because !@old means it has not yet dequeued the msg(s)
* so @new msg can be a free-loader
*/
if (plat_smp_ops.ipi_send && !old)
plat_smp_ops.ipi_send(cpu);
local_irq_restore(flags);
}
static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
{
unsigned int cpu;
for_each_cpu(cpu, callmap)
ipi_send_msg_one(cpu, msg);
}
void arch_smp_send_reschedule(int cpu)
{
ipi_send_msg_one(cpu, IPI_RESCHEDULE);
}
void smp_send_stop(void)
{
struct cpumask targets;
cpumask_copy(&targets, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &targets);
ipi_send_msg(&targets, IPI_CPU_STOP);
}
void arch_send_call_function_single_ipi(int cpu)
{
ipi_send_msg_one(cpu, IPI_CALL_FUNC);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
ipi_send_msg(mask, IPI_CALL_FUNC);
}
/*
* ipi_cpu_stop - handle IPI from smp_send_stop()
*/
static void ipi_cpu_stop(void)
{
machine_halt();
}
static inline int __do_IPI(unsigned long msg)
{
int rc = 0;
switch (msg) {
case IPI_RESCHEDULE:
scheduler_ipi();
break;
case IPI_CALL_FUNC:
generic_smp_call_function_interrupt();
break;
case IPI_CPU_STOP:
ipi_cpu_stop();
break;
default:
rc = 1;
}
return rc;
}
/*
* arch-common ISR to handle for inter-processor interrupts
* Has hooks for platform specific IPI
*/
static irqreturn_t do_IPI(int irq, void *dev_id)
{
unsigned long pending;
unsigned long __maybe_unused copy;
pr_debug("IPI [%ld] received on cpu %d\n",
*this_cpu_ptr(&ipi_data), smp_processor_id());
if (plat_smp_ops.ipi_clear)
plat_smp_ops.ipi_clear(irq);
/*
* "dequeue" the msg corresponding to this IPI (and possibly other
* piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
*/
copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
do {
unsigned long msg = __ffs(pending);
int rc;
rc = __do_IPI(msg);
if (rc)
pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
pending &= ~(1U << msg);
} while (pending);
return IRQ_HANDLED;
}
/*
* API called by platform code to hookup arch-common ISR to their IPI IRQ
*
* Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
* function needs to call irq_set_percpu_devid() for IPI IRQ, otherwise
* request_percpu_irq() below will fail
*/
static DEFINE_PER_CPU(int, ipi_dev);
int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
{
int *dev = per_cpu_ptr(&ipi_dev, cpu);
unsigned int virq = irq_find_mapping(NULL, hwirq);
if (!virq)
panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
/* Boot cpu calls request, all call enable */
if (!cpu) {
int rc;
rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
if (rc)
panic("Percpu IRQ request failed for %u\n", virq);
}
enable_percpu_irq(virq, 0);
return 0;
}
| linux-master | arch/arc/kernel/smp.c |
// SPDX-License-Identifier: GPL-2.0
/* BPF JIT compiler for RV64G
*
* Copyright(c) 2019 Björn Töpel <[email protected]>
*
*/
#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/memory.h>
#include <linux/stop_machine.h>
#include <asm/patch.h>
#include "bpf_jit.h"
#define RV_FENTRY_NINSNS 2
#define RV_REG_TCC RV_REG_A6
#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
static const int regmap[] = {
[BPF_REG_0] = RV_REG_A5,
[BPF_REG_1] = RV_REG_A0,
[BPF_REG_2] = RV_REG_A1,
[BPF_REG_3] = RV_REG_A2,
[BPF_REG_4] = RV_REG_A3,
[BPF_REG_5] = RV_REG_A4,
[BPF_REG_6] = RV_REG_S1,
[BPF_REG_7] = RV_REG_S2,
[BPF_REG_8] = RV_REG_S3,
[BPF_REG_9] = RV_REG_S4,
[BPF_REG_FP] = RV_REG_S5,
[BPF_REG_AX] = RV_REG_T0,
};
static const int pt_regmap[] = {
[RV_REG_A0] = offsetof(struct pt_regs, a0),
[RV_REG_A1] = offsetof(struct pt_regs, a1),
[RV_REG_A2] = offsetof(struct pt_regs, a2),
[RV_REG_A3] = offsetof(struct pt_regs, a3),
[RV_REG_A4] = offsetof(struct pt_regs, a4),
[RV_REG_A5] = offsetof(struct pt_regs, a5),
[RV_REG_S1] = offsetof(struct pt_regs, s1),
[RV_REG_S2] = offsetof(struct pt_regs, s2),
[RV_REG_S3] = offsetof(struct pt_regs, s3),
[RV_REG_S4] = offsetof(struct pt_regs, s4),
[RV_REG_S5] = offsetof(struct pt_regs, s5),
[RV_REG_T0] = offsetof(struct pt_regs, t0),
};
enum {
RV_CTX_F_SEEN_TAIL_CALL = 0,
RV_CTX_F_SEEN_CALL = RV_REG_RA,
RV_CTX_F_SEEN_S1 = RV_REG_S1,
RV_CTX_F_SEEN_S2 = RV_REG_S2,
RV_CTX_F_SEEN_S3 = RV_REG_S3,
RV_CTX_F_SEEN_S4 = RV_REG_S4,
RV_CTX_F_SEEN_S5 = RV_REG_S5,
RV_CTX_F_SEEN_S6 = RV_REG_S6,
};
static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx)
{
u8 reg = regmap[bpf_reg];
switch (reg) {
case RV_CTX_F_SEEN_S1:
case RV_CTX_F_SEEN_S2:
case RV_CTX_F_SEEN_S3:
case RV_CTX_F_SEEN_S4:
case RV_CTX_F_SEEN_S5:
case RV_CTX_F_SEEN_S6:
__set_bit(reg, &ctx->flags);
}
return reg;
};
static bool seen_reg(int reg, struct rv_jit_context *ctx)
{
switch (reg) {
case RV_CTX_F_SEEN_CALL:
case RV_CTX_F_SEEN_S1:
case RV_CTX_F_SEEN_S2:
case RV_CTX_F_SEEN_S3:
case RV_CTX_F_SEEN_S4:
case RV_CTX_F_SEEN_S5:
case RV_CTX_F_SEEN_S6:
return test_bit(reg, &ctx->flags);
}
return false;
}
static void mark_fp(struct rv_jit_context *ctx)
{
__set_bit(RV_CTX_F_SEEN_S5, &ctx->flags);
}
static void mark_call(struct rv_jit_context *ctx)
{
__set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
}
static bool seen_call(struct rv_jit_context *ctx)
{
return test_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
}
static void mark_tail_call(struct rv_jit_context *ctx)
{
__set_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
}
static bool seen_tail_call(struct rv_jit_context *ctx)
{
return test_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
}
static u8 rv_tail_call_reg(struct rv_jit_context *ctx)
{
mark_tail_call(ctx);
if (seen_call(ctx)) {
__set_bit(RV_CTX_F_SEEN_S6, &ctx->flags);
return RV_REG_S6;
}
return RV_REG_A6;
}
static bool is_32b_int(s64 val)
{
return -(1L << 31) <= val && val < (1L << 31);
}
static bool in_auipc_jalr_range(s64 val)
{
/*
* auipc+jalr can reach any signed PC-relative offset in the range
* [-2^31 - 2^11, 2^31 - 2^11).
*/
return (-(1L << 31) - (1L << 11)) <= val &&
val < ((1L << 31) - (1L << 11));
}
/* Emit fixed-length instructions for address */
static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx)
{
/*
* Use the ro_insns(RX) to calculate the offset as the BPF program will
* finally run from this memory region.
*/
u64 ip = (u64)(ctx->ro_insns + ctx->ninsns);
s64 off = addr - ip;
s64 upper = (off + (1 << 11)) >> 12;
s64 lower = off & 0xfff;
if (extra_pass && !in_auipc_jalr_range(off)) {
pr_err("bpf-jit: target offset 0x%llx is out of range\n", off);
return -ERANGE;
}
emit(rv_auipc(rd, upper), ctx);
emit(rv_addi(rd, rd, lower), ctx);
return 0;
}
/* Emit variable-length instructions for 32-bit and 64-bit imm */
static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
{
/* Note that the immediate from the add is sign-extended,
* which means that we need to compensate this by adding 2^12,
* when the 12th bit is set. A simpler way of doing this, and
* getting rid of the check, is to just add 2**11 before the
* shift. The "Loading a 32-Bit constant" example from the
* "Computer Organization and Design, RISC-V edition" book by
* Patterson/Hennessy highlights this fact.
*
* This also means that we need to process LSB to MSB.
*/
s64 upper = (val + (1 << 11)) >> 12;
/* Sign-extend lower 12 bits to 64 bits since immediates for li, addiw,
* and addi are signed and RVC checks will perform signed comparisons.
*/
s64 lower = ((val & 0xfff) << 52) >> 52;
int shift;
if (is_32b_int(val)) {
if (upper)
emit_lui(rd, upper, ctx);
if (!upper) {
emit_li(rd, lower, ctx);
return;
}
emit_addiw(rd, rd, lower, ctx);
return;
}
shift = __ffs(upper);
upper >>= shift;
shift += 12;
emit_imm(rd, upper, ctx);
emit_slli(rd, rd, shift, ctx);
if (lower)
emit_addi(rd, rd, lower, ctx);
}
static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
{
int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8;
if (seen_reg(RV_REG_RA, ctx)) {
emit_ld(RV_REG_RA, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
emit_ld(RV_REG_FP, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
if (seen_reg(RV_REG_S1, ctx)) {
emit_ld(RV_REG_S1, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S2, ctx)) {
emit_ld(RV_REG_S2, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S3, ctx)) {
emit_ld(RV_REG_S3, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S4, ctx)) {
emit_ld(RV_REG_S4, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S5, ctx)) {
emit_ld(RV_REG_S5, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S6, ctx)) {
emit_ld(RV_REG_S6, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
/* Set return value. */
if (!is_tail_call)
emit_mv(RV_REG_A0, RV_REG_A5, ctx);
emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
is_tail_call ? (RV_FENTRY_NINSNS + 1) * 4 : 0, /* skip reserved nops and TCC init */
ctx);
}
static void emit_bcc(u8 cond, u8 rd, u8 rs, int rvoff,
struct rv_jit_context *ctx)
{
switch (cond) {
case BPF_JEQ:
emit(rv_beq(rd, rs, rvoff >> 1), ctx);
return;
case BPF_JGT:
emit(rv_bltu(rs, rd, rvoff >> 1), ctx);
return;
case BPF_JLT:
emit(rv_bltu(rd, rs, rvoff >> 1), ctx);
return;
case BPF_JGE:
emit(rv_bgeu(rd, rs, rvoff >> 1), ctx);
return;
case BPF_JLE:
emit(rv_bgeu(rs, rd, rvoff >> 1), ctx);
return;
case BPF_JNE:
emit(rv_bne(rd, rs, rvoff >> 1), ctx);
return;
case BPF_JSGT:
emit(rv_blt(rs, rd, rvoff >> 1), ctx);
return;
case BPF_JSLT:
emit(rv_blt(rd, rs, rvoff >> 1), ctx);
return;
case BPF_JSGE:
emit(rv_bge(rd, rs, rvoff >> 1), ctx);
return;
case BPF_JSLE:
emit(rv_bge(rs, rd, rvoff >> 1), ctx);
}
}
static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff,
struct rv_jit_context *ctx)
{
s64 upper, lower;
if (is_13b_int(rvoff)) {
emit_bcc(cond, rd, rs, rvoff, ctx);
return;
}
/* Adjust for jal */
rvoff -= 4;
/* Transform, e.g.:
* bne rd,rs,foo
* to
* beq rd,rs,<.L1>
* (auipc foo)
* jal(r) foo
* .L1
*/
cond = invert_bpf_cond(cond);
if (is_21b_int(rvoff)) {
emit_bcc(cond, rd, rs, 8, ctx);
emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx);
return;
}
/* 32b No need for an additional rvoff adjustment, since we
* get that from the auipc at PC', where PC = PC' + 4.
*/
upper = (rvoff + (1 << 11)) >> 12;
lower = rvoff & 0xfff;
emit_bcc(cond, rd, rs, 12, ctx);
emit(rv_auipc(RV_REG_T1, upper), ctx);
emit(rv_jalr(RV_REG_ZERO, RV_REG_T1, lower), ctx);
}
static void emit_zext_32(u8 reg, struct rv_jit_context *ctx)
{
emit_slli(reg, reg, 32, ctx);
emit_srli(reg, reg, 32, ctx);
}
static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
{
int tc_ninsn, off, start_insn = ctx->ninsns;
u8 tcc = rv_tail_call_reg(ctx);
/* a0: &ctx
* a1: &array
* a2: index
*
* if (index >= array->map.max_entries)
* goto out;
*/
tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] :
ctx->offset[0];
emit_zext_32(RV_REG_A2, ctx);
off = offsetof(struct bpf_array, map.max_entries);
if (is_12b_check(off, insn))
return -1;
emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx);
off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
emit_branch(BPF_JGE, RV_REG_A2, RV_REG_T1, off, ctx);
/* if (--TCC < 0)
* goto out;
*/
emit_addi(RV_REG_TCC, tcc, -1, ctx);
off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
emit_branch(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx);
/* prog = array->ptrs[index];
* if (!prog)
* goto out;
*/
emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx);
emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx);
off = offsetof(struct bpf_array, ptrs);
if (is_12b_check(off, insn))
return -1;
emit_ld(RV_REG_T2, off, RV_REG_T2, ctx);
off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
emit_branch(BPF_JEQ, RV_REG_T2, RV_REG_ZERO, off, ctx);
/* goto *(prog->bpf_func + 4); */
off = offsetof(struct bpf_prog, bpf_func);
if (is_12b_check(off, insn))
return -1;
emit_ld(RV_REG_T3, off, RV_REG_T2, ctx);
__build_epilogue(true, ctx);
return 0;
}
static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
struct rv_jit_context *ctx)
{
u8 code = insn->code;
switch (code) {
case BPF_JMP | BPF_JA:
case BPF_JMP | BPF_CALL:
case BPF_JMP | BPF_EXIT:
case BPF_JMP | BPF_TAIL_CALL:
break;
default:
*rd = bpf_to_rv_reg(insn->dst_reg, ctx);
}
if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) ||
code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) ||
code & BPF_LDX || code & BPF_STX)
*rs = bpf_to_rv_reg(insn->src_reg, ctx);
}
static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
{
emit_mv(RV_REG_T2, *rd, ctx);
emit_zext_32(RV_REG_T2, ctx);
emit_mv(RV_REG_T1, *rs, ctx);
emit_zext_32(RV_REG_T1, ctx);
*rd = RV_REG_T2;
*rs = RV_REG_T1;
}
static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
{
emit_addiw(RV_REG_T2, *rd, 0, ctx);
emit_addiw(RV_REG_T1, *rs, 0, ctx);
*rd = RV_REG_T2;
*rs = RV_REG_T1;
}
static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
{
emit_mv(RV_REG_T2, *rd, ctx);
emit_zext_32(RV_REG_T2, ctx);
emit_zext_32(RV_REG_T1, ctx);
*rd = RV_REG_T2;
}
static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
{
emit_addiw(RV_REG_T2, *rd, 0, ctx);
*rd = RV_REG_T2;
}
static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
struct rv_jit_context *ctx)
{
s64 upper, lower;
if (rvoff && fixed_addr && is_21b_int(rvoff)) {
emit(rv_jal(rd, rvoff >> 1), ctx);
return 0;
} else if (in_auipc_jalr_range(rvoff)) {
upper = (rvoff + (1 << 11)) >> 12;
lower = rvoff & 0xfff;
emit(rv_auipc(RV_REG_T1, upper), ctx);
emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
return 0;
}
pr_err("bpf-jit: target offset 0x%llx is out of range\n", rvoff);
return -ERANGE;
}
static bool is_signed_bpf_cond(u8 cond)
{
return cond == BPF_JSGT || cond == BPF_JSLT ||
cond == BPF_JSGE || cond == BPF_JSLE;
}
static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
{
s64 off = 0;
u64 ip;
if (addr && ctx->insns && ctx->ro_insns) {
/*
* Use the ro_insns(RX) to calculate the offset as the BPF
* program will finally run from this memory region.
*/
ip = (u64)(long)(ctx->ro_insns + ctx->ninsns);
off = addr - ip;
}
return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
}
static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
struct rv_jit_context *ctx)
{
u8 r0;
int jmp_offset;
if (off) {
if (is_12b_int(off)) {
emit_addi(RV_REG_T1, rd, off, ctx);
} else {
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
}
rd = RV_REG_T1;
}
switch (imm) {
/* lock *(u32/u64 *)(dst_reg + off16) <op>= src_reg */
case BPF_ADD:
emit(is64 ? rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0) :
rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
break;
case BPF_AND:
emit(is64 ? rv_amoand_d(RV_REG_ZERO, rs, rd, 0, 0) :
rv_amoand_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
break;
case BPF_OR:
emit(is64 ? rv_amoor_d(RV_REG_ZERO, rs, rd, 0, 0) :
rv_amoor_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
break;
case BPF_XOR:
emit(is64 ? rv_amoxor_d(RV_REG_ZERO, rs, rd, 0, 0) :
rv_amoxor_w(RV_REG_ZERO, rs, rd, 0, 0), ctx);
break;
/* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */
case BPF_ADD | BPF_FETCH:
emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) :
rv_amoadd_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
emit_zext_32(rs, ctx);
break;
case BPF_AND | BPF_FETCH:
emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) :
rv_amoand_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
emit_zext_32(rs, ctx);
break;
case BPF_OR | BPF_FETCH:
emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) :
rv_amoor_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
emit_zext_32(rs, ctx);
break;
case BPF_XOR | BPF_FETCH:
emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) :
rv_amoxor_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
emit_zext_32(rs, ctx);
break;
/* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
case BPF_XCHG:
emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) :
rv_amoswap_w(rs, rs, rd, 0, 0), ctx);
if (!is64)
emit_zext_32(rs, ctx);
break;
/* r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg); */
case BPF_CMPXCHG:
r0 = bpf_to_rv_reg(BPF_REG_0, ctx);
emit(is64 ? rv_addi(RV_REG_T2, r0, 0) :
rv_addiw(RV_REG_T2, r0, 0), ctx);
emit(is64 ? rv_lr_d(r0, 0, rd, 0, 0) :
rv_lr_w(r0, 0, rd, 0, 0), ctx);
jmp_offset = ninsns_rvoff(8);
emit(rv_bne(RV_REG_T2, r0, jmp_offset >> 1), ctx);
emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 0) :
rv_sc_w(RV_REG_T3, rs, rd, 0, 0), ctx);
jmp_offset = ninsns_rvoff(-6);
emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx);
emit(rv_fence(0x3, 0x3), ctx);
break;
}
}
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
int regs_offset = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
*(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0;
regs->epc = (unsigned long)&ex->fixup - offset;
return true;
}
/* For accesses to BTF pointers, add an entry to the exception table */
static int add_exception_handler(const struct bpf_insn *insn,
struct rv_jit_context *ctx,
int dst_reg, int insn_len)
{
struct exception_table_entry *ex;
unsigned long pc;
off_t ins_offset;
off_t fixup_offset;
if (!ctx->insns || !ctx->ro_insns || !ctx->prog->aux->extable ||
(BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX))
return 0;
if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries))
return -EINVAL;
if (WARN_ON_ONCE(insn_len > ctx->ninsns))
return -EINVAL;
if (WARN_ON_ONCE(!rvc_enabled() && insn_len == 1))
return -EINVAL;
ex = &ctx->prog->aux->extable[ctx->nexentries];
pc = (unsigned long)&ctx->ro_insns[ctx->ninsns - insn_len];
/*
* This is the relative offset of the instruction that may fault from
* the exception table itself. This will be written to the exception
* table and if this instruction faults, the destination register will
* be set to '0' and the execution will jump to the next instruction.
*/
ins_offset = pc - (long)&ex->insn;
if (WARN_ON_ONCE(ins_offset >= 0 || ins_offset < INT_MIN))
return -ERANGE;
/*
* Since the extable follows the program, the fixup offset is always
* negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
* to keep things simple, and put the destination register in the upper
* bits. We don't need to worry about buildtime or runtime sort
* modifying the upper bits because the table is already sorted, and
* isn't part of the main exception table.
*
* The fixup_offset is set to the next instruction from the instruction
* that may fault. The execution will jump to this after handling the
* fault.
*/
fixup_offset = (long)&ex->fixup - (pc + insn_len * sizeof(u16));
if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, fixup_offset))
return -ERANGE;
/*
* The offsets above have been calculated using the RO buffer but we
* need to use the R/W buffer for writes.
* switch ex to rw buffer for writing.
*/
ex = (void *)ctx->insns + ((void *)ex - (void *)ctx->ro_insns);
ex->insn = ins_offset;
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, fixup_offset) |
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
ex->type = EX_TYPE_BPF;
ctx->nexentries++;
return 0;
}
static int gen_jump_or_nops(void *target, void *ip, u32 *insns, bool is_call)
{
s64 rvoff;
struct rv_jit_context ctx;
ctx.ninsns = 0;
ctx.insns = (u16 *)insns;
if (!target) {
emit(rv_nop(), &ctx);
emit(rv_nop(), &ctx);
return 0;
}
rvoff = (s64)(target - ip);
return emit_jump_and_link(is_call ? RV_REG_T0 : RV_REG_ZERO, rvoff, false, &ctx);
}
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
void *old_addr, void *new_addr)
{
u32 old_insns[RV_FENTRY_NINSNS], new_insns[RV_FENTRY_NINSNS];
bool is_call = poke_type == BPF_MOD_CALL;
int ret;
if (!is_kernel_text((unsigned long)ip) &&
!is_bpf_text_address((unsigned long)ip))
return -ENOTSUPP;
ret = gen_jump_or_nops(old_addr, ip, old_insns, is_call);
if (ret)
return ret;
if (memcmp(ip, old_insns, RV_FENTRY_NINSNS * 4))
return -EFAULT;
ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call);
if (ret)
return ret;
cpus_read_lock();
mutex_lock(&text_mutex);
if (memcmp(ip, new_insns, RV_FENTRY_NINSNS * 4))
ret = patch_text(ip, new_insns, RV_FENTRY_NINSNS);
mutex_unlock(&text_mutex);
cpus_read_unlock();
return ret;
}
static void store_args(int nregs, int args_off, struct rv_jit_context *ctx)
{
int i;
for (i = 0; i < nregs; i++) {
emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx);
args_off -= 8;
}
}
static void restore_args(int nregs, int args_off, struct rv_jit_context *ctx)
{
int i;
for (i = 0; i < nregs; i++) {
emit_ld(RV_REG_A0 + i, -args_off, RV_REG_FP, ctx);
args_off -= 8;
}
}
static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off,
int run_ctx_off, bool save_ret, struct rv_jit_context *ctx)
{
int ret, branch_off;
struct bpf_prog *p = l->link.prog;
int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
if (l->cookie) {
emit_imm(RV_REG_T1, l->cookie, ctx);
emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_T1, ctx);
} else {
emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_ZERO, ctx);
}
/* arg1: prog */
emit_imm(RV_REG_A0, (const s64)p, ctx);
/* arg2: &run_ctx */
emit_addi(RV_REG_A1, RV_REG_FP, -run_ctx_off, ctx);
ret = emit_call((const u64)bpf_trampoline_enter(p), true, ctx);
if (ret)
return ret;
/* if (__bpf_prog_enter(prog) == 0)
* goto skip_exec_of_prog;
*/
branch_off = ctx->ninsns;
/* nop reserved for conditional jump */
emit(rv_nop(), ctx);
/* store prog start time */
emit_mv(RV_REG_S1, RV_REG_A0, ctx);
/* arg1: &args_off */
emit_addi(RV_REG_A0, RV_REG_FP, -args_off, ctx);
if (!p->jited)
/* arg2: progs[i]->insnsi for interpreter */
emit_imm(RV_REG_A1, (const s64)p->insnsi, ctx);
ret = emit_call((const u64)p->bpf_func, true, ctx);
if (ret)
return ret;
if (save_ret)
emit_sd(RV_REG_FP, -retval_off, regmap[BPF_REG_0], ctx);
/* update branch with beqz */
if (ctx->insns) {
int offset = ninsns_rvoff(ctx->ninsns - branch_off);
u32 insn = rv_beq(RV_REG_A0, RV_REG_ZERO, offset >> 1);
*(u32 *)(ctx->insns + branch_off) = insn;
}
/* arg1: prog */
emit_imm(RV_REG_A0, (const s64)p, ctx);
/* arg2: prog start time */
emit_mv(RV_REG_A1, RV_REG_S1, ctx);
/* arg3: &run_ctx */
emit_addi(RV_REG_A2, RV_REG_FP, -run_ctx_off, ctx);
ret = emit_call((const u64)bpf_trampoline_exit(p), true, ctx);
return ret;
}
static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
const struct btf_func_model *m,
struct bpf_tramp_links *tlinks,
void *func_addr, u32 flags,
struct rv_jit_context *ctx)
{
int i, ret, offset;
int *branches_off = NULL;
int stack_size = 0, nregs = m->nr_args;
int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off;
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
void *orig_call = func_addr;
bool save_ret;
u32 insn;
/* Two types of generated trampoline stack layout:
*
* 1. trampoline called from function entry
* --------------------------------------
* FP + 8 [ RA to parent func ] return address to parent
* function
* FP + 0 [ FP of parent func ] frame pointer of parent
* function
* FP - 8 [ T0 to traced func ] return address of traced
* function
* FP - 16 [ FP of traced func ] frame pointer of traced
* function
* --------------------------------------
*
* 2. trampoline called directly
* --------------------------------------
* FP - 8 [ RA to caller func ] return address to caller
* function
* FP - 16 [ FP of caller func ] frame pointer of caller
* function
* --------------------------------------
*
* FP - retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
* BPF_TRAMP_F_RET_FENTRY_RET
* [ argN ]
* [ ... ]
* FP - args_off [ arg1 ]
*
* FP - nregs_off [ regs count ]
*
* FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG
*
* FP - run_ctx_off [ bpf_tramp_run_ctx ]
*
* FP - sreg_off [ callee saved reg ]
*
* [ pads ] pads for 16 bytes alignment
*/
if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
return -ENOTSUPP;
/* extra regiters for struct arguments */
for (i = 0; i < m->nr_args; i++)
if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
nregs += round_up(m->arg_size[i], 8) / 8 - 1;
/* 8 arguments passed by registers */
if (nregs > 8)
return -ENOTSUPP;
/* room of trampoline frame to store return address and frame pointer */
stack_size += 16;
save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
if (save_ret) {
stack_size += 8;
retval_off = stack_size;
}
stack_size += nregs * 8;
args_off = stack_size;
stack_size += 8;
nregs_off = stack_size;
if (flags & BPF_TRAMP_F_IP_ARG) {
stack_size += 8;
ip_off = stack_size;
}
stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
run_ctx_off = stack_size;
stack_size += 8;
sreg_off = stack_size;
stack_size = round_up(stack_size, 16);
if (func_addr) {
/* For the trampoline called from function entry,
* the frame of traced function and the frame of
* trampoline need to be considered.
*/
emit_addi(RV_REG_SP, RV_REG_SP, -16, ctx);
emit_sd(RV_REG_SP, 8, RV_REG_RA, ctx);
emit_sd(RV_REG_SP, 0, RV_REG_FP, ctx);
emit_addi(RV_REG_FP, RV_REG_SP, 16, ctx);
emit_addi(RV_REG_SP, RV_REG_SP, -stack_size, ctx);
emit_sd(RV_REG_SP, stack_size - 8, RV_REG_T0, ctx);
emit_sd(RV_REG_SP, stack_size - 16, RV_REG_FP, ctx);
emit_addi(RV_REG_FP, RV_REG_SP, stack_size, ctx);
} else {
/* For the trampoline called directly, just handle
* the frame of trampoline.
*/
emit_addi(RV_REG_SP, RV_REG_SP, -stack_size, ctx);
emit_sd(RV_REG_SP, stack_size - 8, RV_REG_RA, ctx);
emit_sd(RV_REG_SP, stack_size - 16, RV_REG_FP, ctx);
emit_addi(RV_REG_FP, RV_REG_SP, stack_size, ctx);
}
/* callee saved register S1 to pass start time */
emit_sd(RV_REG_FP, -sreg_off, RV_REG_S1, ctx);
/* store ip address of the traced function */
if (flags & BPF_TRAMP_F_IP_ARG) {
emit_imm(RV_REG_T1, (const s64)func_addr, ctx);
emit_sd(RV_REG_FP, -ip_off, RV_REG_T1, ctx);
}
emit_li(RV_REG_T1, nregs, ctx);
emit_sd(RV_REG_FP, -nregs_off, RV_REG_T1, ctx);
store_args(nregs, args_off, ctx);
/* skip to actual body of traced function */
if (flags & BPF_TRAMP_F_SKIP_FRAME)
orig_call += RV_FENTRY_NINSNS * 4;
if (flags & BPF_TRAMP_F_CALL_ORIG) {
emit_imm(RV_REG_A0, (const s64)im, ctx);
ret = emit_call((const u64)__bpf_tramp_enter, true, ctx);
if (ret)
return ret;
}
for (i = 0; i < fentry->nr_links; i++) {
ret = invoke_bpf_prog(fentry->links[i], args_off, retval_off, run_ctx_off,
flags & BPF_TRAMP_F_RET_FENTRY_RET, ctx);
if (ret)
return ret;
}
if (fmod_ret->nr_links) {
branches_off = kcalloc(fmod_ret->nr_links, sizeof(int), GFP_KERNEL);
if (!branches_off)
return -ENOMEM;
/* cleanup to avoid garbage return value confusion */
emit_sd(RV_REG_FP, -retval_off, RV_REG_ZERO, ctx);
for (i = 0; i < fmod_ret->nr_links; i++) {
ret = invoke_bpf_prog(fmod_ret->links[i], args_off, retval_off,
run_ctx_off, true, ctx);
if (ret)
goto out;
emit_ld(RV_REG_T1, -retval_off, RV_REG_FP, ctx);
branches_off[i] = ctx->ninsns;
/* nop reserved for conditional jump */
emit(rv_nop(), ctx);
}
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
restore_args(nregs, args_off, ctx);
ret = emit_call((const u64)orig_call, true, ctx);
if (ret)
goto out;
emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
im->ip_after_call = ctx->insns + ctx->ninsns;
/* 2 nops reserved for auipc+jalr pair */
emit(rv_nop(), ctx);
emit(rv_nop(), ctx);
}
/* update branches saved in invoke_bpf_mod_ret with bnez */
for (i = 0; ctx->insns && i < fmod_ret->nr_links; i++) {
offset = ninsns_rvoff(ctx->ninsns - branches_off[i]);
insn = rv_bne(RV_REG_T1, RV_REG_ZERO, offset >> 1);
*(u32 *)(ctx->insns + branches_off[i]) = insn;
}
for (i = 0; i < fexit->nr_links; i++) {
ret = invoke_bpf_prog(fexit->links[i], args_off, retval_off,
run_ctx_off, false, ctx);
if (ret)
goto out;
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
im->ip_epilogue = ctx->insns + ctx->ninsns;
emit_imm(RV_REG_A0, (const s64)im, ctx);
ret = emit_call((const u64)__bpf_tramp_exit, true, ctx);
if (ret)
goto out;
}
if (flags & BPF_TRAMP_F_RESTORE_REGS)
restore_args(nregs, args_off, ctx);
if (save_ret)
emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
if (func_addr) {
/* trampoline called from function entry */
emit_ld(RV_REG_T0, stack_size - 8, RV_REG_SP, ctx);
emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx);
emit_addi(RV_REG_SP, RV_REG_SP, stack_size, ctx);
emit_ld(RV_REG_RA, 8, RV_REG_SP, ctx);
emit_ld(RV_REG_FP, 0, RV_REG_SP, ctx);
emit_addi(RV_REG_SP, RV_REG_SP, 16, ctx);
if (flags & BPF_TRAMP_F_SKIP_FRAME)
/* return to parent function */
emit_jalr(RV_REG_ZERO, RV_REG_RA, 0, ctx);
else
/* return to traced function */
emit_jalr(RV_REG_ZERO, RV_REG_T0, 0, ctx);
} else {
/* trampoline called directly */
emit_ld(RV_REG_RA, stack_size - 8, RV_REG_SP, ctx);
emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx);
emit_addi(RV_REG_SP, RV_REG_SP, stack_size, ctx);
emit_jalr(RV_REG_ZERO, RV_REG_RA, 0, ctx);
}
ret = ctx->ninsns;
out:
kfree(branches_off);
return ret;
}
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
void *image_end, const struct btf_func_model *m,
u32 flags, struct bpf_tramp_links *tlinks,
void *func_addr)
{
int ret;
struct rv_jit_context ctx;
ctx.ninsns = 0;
ctx.insns = NULL;
ctx.ro_insns = NULL;
ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
if (ret < 0)
return ret;
if (ninsns_rvoff(ret) > (long)image_end - (long)image)
return -EFBIG;
ctx.ninsns = 0;
/*
* The bpf_int_jit_compile() uses a RW buffer (ctx.insns) to write the
* JITed instructions and later copies it to a RX region (ctx.ro_insns).
* It also uses ctx.ro_insns to calculate offsets for jumps etc. As the
* trampoline image uses the same memory area for writing and execution,
* both ctx.insns and ctx.ro_insns can be set to image.
*/
ctx.insns = image;
ctx.ro_insns = image;
ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
if (ret < 0)
return ret;
bpf_flush_icache(ctx.insns, ctx.insns + ctx.ninsns);
return ninsns_rvoff(ret);
}
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
bool extra_pass)
{
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
BPF_CLASS(insn->code) == BPF_JMP;
int s, e, rvoff, ret, i = insn - ctx->prog->insnsi;
struct bpf_prog_aux *aux = ctx->prog->aux;
u8 rd = -1, rs = -1, code = insn->code;
s16 off = insn->off;
s32 imm = insn->imm;
init_regs(&rd, &rs, insn, ctx);
switch (code) {
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
if (imm == 1) {
/* Special mov32 for zext */
emit_zext_32(rd, ctx);
break;
}
switch (insn->off) {
case 0:
emit_mv(rd, rs, ctx);
break;
case 8:
case 16:
emit_slli(RV_REG_T1, rs, 64 - insn->off, ctx);
emit_srai(rd, RV_REG_T1, 64 - insn->off, ctx);
break;
case 32:
emit_addiw(rd, rs, 0, ctx);
break;
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
/* dst = dst OP src */
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_X:
emit_add(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_X:
if (is64)
emit_sub(rd, rd, rs, ctx);
else
emit_subw(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_AND | BPF_X:
emit_and(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_X:
emit_or(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_X:
emit_xor(rd, rd, rs, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_X:
emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
if (off)
emit(is64 ? rv_div(rd, rd, rs) : rv_divw(rd, rd, rs), ctx);
else
emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
if (off)
emit(is64 ? rv_rem(rd, rd, rs) : rv_remw(rd, rd, rs), ctx);
else
emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU64 | BPF_RSH | BPF_X:
emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_X:
case BPF_ALU64 | BPF_ARSH | BPF_X:
emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
/* dst = -dst */
case BPF_ALU | BPF_NEG:
case BPF_ALU64 | BPF_NEG:
emit_sub(rd, RV_REG_ZERO, rd, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
/* dst = BSWAP##imm(dst) */
case BPF_ALU | BPF_END | BPF_FROM_LE:
switch (imm) {
case 16:
emit_slli(rd, rd, 48, ctx);
emit_srli(rd, rd, 48, ctx);
break;
case 32:
if (!aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case 64:
/* Do nothing */
break;
}
break;
case BPF_ALU | BPF_END | BPF_FROM_BE:
case BPF_ALU64 | BPF_END | BPF_FROM_LE:
emit_li(RV_REG_T2, 0, ctx);
emit_andi(RV_REG_T1, rd, 0xff, ctx);
emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
emit_srli(rd, rd, 8, ctx);
if (imm == 16)
goto out_be;
emit_andi(RV_REG_T1, rd, 0xff, ctx);
emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
emit_srli(rd, rd, 8, ctx);
emit_andi(RV_REG_T1, rd, 0xff, ctx);
emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
emit_srli(rd, rd, 8, ctx);
if (imm == 32)
goto out_be;
emit_andi(RV_REG_T1, rd, 0xff, ctx);
emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
emit_srli(rd, rd, 8, ctx);
emit_andi(RV_REG_T1, rd, 0xff, ctx);
emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
emit_srli(rd, rd, 8, ctx);
emit_andi(RV_REG_T1, rd, 0xff, ctx);
emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
emit_srli(rd, rd, 8, ctx);
emit_andi(RV_REG_T1, rd, 0xff, ctx);
emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
emit_srli(rd, rd, 8, ctx);
out_be:
emit_andi(RV_REG_T1, rd, 0xff, ctx);
emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
emit_mv(rd, RV_REG_T2, ctx);
break;
/* dst = imm */
case BPF_ALU | BPF_MOV | BPF_K:
case BPF_ALU64 | BPF_MOV | BPF_K:
emit_imm(rd, imm, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
/* dst = dst OP imm */
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_ADD | BPF_K:
if (is_12b_int(imm)) {
emit_addi(rd, rd, imm, ctx);
} else {
emit_imm(RV_REG_T1, imm, ctx);
emit_add(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_K:
if (is_12b_int(-imm)) {
emit_addi(rd, rd, -imm, ctx);
} else {
emit_imm(RV_REG_T1, imm, ctx);
emit_sub(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_K:
if (is_12b_int(imm)) {
emit_andi(rd, rd, imm, ctx);
} else {
emit_imm(RV_REG_T1, imm, ctx);
emit_and(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU64 | BPF_OR | BPF_K:
if (is_12b_int(imm)) {
emit(rv_ori(rd, rd, imm), ctx);
} else {
emit_imm(RV_REG_T1, imm, ctx);
emit_or(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU64 | BPF_XOR | BPF_K:
if (is_12b_int(imm)) {
emit(rv_xori(rd, rd, imm), ctx);
} else {
emit_imm(RV_REG_T1, imm, ctx);
emit_xor(rd, rd, RV_REG_T1, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU64 | BPF_MUL | BPF_K:
emit_imm(RV_REG_T1, imm, ctx);
emit(is64 ? rv_mul(rd, rd, RV_REG_T1) :
rv_mulw(rd, rd, RV_REG_T1), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_K:
emit_imm(RV_REG_T1, imm, ctx);
if (off)
emit(is64 ? rv_div(rd, rd, RV_REG_T1) :
rv_divw(rd, rd, RV_REG_T1), ctx);
else
emit(is64 ? rv_divu(rd, rd, RV_REG_T1) :
rv_divuw(rd, rd, RV_REG_T1), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_K:
emit_imm(RV_REG_T1, imm, ctx);
if (off)
emit(is64 ? rv_rem(rd, rd, RV_REG_T1) :
rv_remw(rd, rd, RV_REG_T1), ctx);
else
emit(is64 ? rv_remu(rd, rd, RV_REG_T1) :
rv_remuw(rd, rd, RV_REG_T1), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_K:
emit_slli(rd, rd, imm, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
if (is64)
emit_srli(rd, rd, imm, ctx);
else
emit(rv_srliw(rd, rd, imm), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
if (is64)
emit_srai(rd, rd, imm, ctx);
else
emit(rv_sraiw(rd, rd, imm), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
/* JUMP off */
case BPF_JMP | BPF_JA:
case BPF_JMP32 | BPF_JA:
if (BPF_CLASS(code) == BPF_JMP)
rvoff = rv_offset(i, off, ctx);
else
rvoff = rv_offset(i, imm, ctx);
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
if (ret)
return ret;
break;
/* IF (dst COND src) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP32 | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP32 | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP32 | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP32 | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP32 | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP32 | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP32 | BPF_JSLE | BPF_X:
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X:
rvoff = rv_offset(i, off, ctx);
if (!is64) {
s = ctx->ninsns;
if (is_signed_bpf_cond(BPF_OP(code)))
emit_sext_32_rd_rs(&rd, &rs, ctx);
else
emit_zext_32_rd_rs(&rd, &rs, ctx);
e = ctx->ninsns;
/* Adjust for extra insns */
rvoff -= ninsns_rvoff(e - s);
}
if (BPF_OP(code) == BPF_JSET) {
/* Adjust for and */
rvoff -= 4;
emit_and(RV_REG_T1, rd, rs, ctx);
emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
ctx);
} else {
emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
}
break;
/* IF (dst COND imm) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP32 | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP32 | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP32 | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP32 | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_K:
rvoff = rv_offset(i, off, ctx);
s = ctx->ninsns;
if (imm) {
emit_imm(RV_REG_T1, imm, ctx);
rs = RV_REG_T1;
} else {
/* If imm is 0, simply use zero register. */
rs = RV_REG_ZERO;
}
if (!is64) {
if (is_signed_bpf_cond(BPF_OP(code)))
emit_sext_32_rd(&rd, ctx);
else
emit_zext_32_rd_t1(&rd, ctx);
}
e = ctx->ninsns;
/* Adjust for extra insns */
rvoff -= ninsns_rvoff(e - s);
emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
break;
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_K:
rvoff = rv_offset(i, off, ctx);
s = ctx->ninsns;
if (is_12b_int(imm)) {
emit_andi(RV_REG_T1, rd, imm, ctx);
} else {
emit_imm(RV_REG_T1, imm, ctx);
emit_and(RV_REG_T1, rd, RV_REG_T1, ctx);
}
/* For jset32, we should clear the upper 32 bits of t1, but
* sign-extension is sufficient here and saves one instruction,
* as t1 is used only in comparison against zero.
*/
if (!is64 && imm < 0)
emit_addiw(RV_REG_T1, RV_REG_T1, 0, ctx);
e = ctx->ninsns;
rvoff -= ninsns_rvoff(e - s);
emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
break;
/* function call */
case BPF_JMP | BPF_CALL:
{
bool fixed_addr;
u64 addr;
mark_call(ctx);
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
&addr, &fixed_addr);
if (ret < 0)
return ret;
ret = emit_call(addr, fixed_addr, ctx);
if (ret)
return ret;
emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
break;
}
/* tail call */
case BPF_JMP | BPF_TAIL_CALL:
if (emit_bpf_tail_call(i, ctx))
return -1;
break;
/* function return */
case BPF_JMP | BPF_EXIT:
if (i == ctx->prog->len - 1)
break;
rvoff = epilogue_offset(ctx);
ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
if (ret)
return ret;
break;
/* dst = imm64 */
case BPF_LD | BPF_IMM | BPF_DW:
{
struct bpf_insn insn1 = insn[1];
u64 imm64;
imm64 = (u64)insn1.imm << 32 | (u32)imm;
if (bpf_pseudo_func(insn)) {
/* fixed-length insns for extra jit pass */
ret = emit_addr(rd, imm64, extra_pass, ctx);
if (ret)
return ret;
} else {
emit_imm(rd, imm64, ctx);
}
return 1;
}
/* LDX: dst = *(unsigned size *)(src + off) */
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
/* LDSX: dst = *(signed size *)(src + off) */
case BPF_LDX | BPF_MEMSX | BPF_B:
case BPF_LDX | BPF_MEMSX | BPF_H:
case BPF_LDX | BPF_MEMSX | BPF_W:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
{
int insn_len, insns_start;
bool sign_ext;
sign_ext = BPF_MODE(insn->code) == BPF_MEMSX ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
switch (BPF_SIZE(code)) {
case BPF_B:
if (is_12b_int(off)) {
insns_start = ctx->ninsns;
if (sign_ext)
emit(rv_lb(rd, off, rs), ctx);
else
emit(rv_lbu(rd, off, rs), ctx);
insn_len = ctx->ninsns - insns_start;
break;
}
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
insns_start = ctx->ninsns;
if (sign_ext)
emit(rv_lb(rd, 0, RV_REG_T1), ctx);
else
emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
insn_len = ctx->ninsns - insns_start;
break;
case BPF_H:
if (is_12b_int(off)) {
insns_start = ctx->ninsns;
if (sign_ext)
emit(rv_lh(rd, off, rs), ctx);
else
emit(rv_lhu(rd, off, rs), ctx);
insn_len = ctx->ninsns - insns_start;
break;
}
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
insns_start = ctx->ninsns;
if (sign_ext)
emit(rv_lh(rd, 0, RV_REG_T1), ctx);
else
emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
insn_len = ctx->ninsns - insns_start;
break;
case BPF_W:
if (is_12b_int(off)) {
insns_start = ctx->ninsns;
if (sign_ext)
emit(rv_lw(rd, off, rs), ctx);
else
emit(rv_lwu(rd, off, rs), ctx);
insn_len = ctx->ninsns - insns_start;
break;
}
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
insns_start = ctx->ninsns;
if (sign_ext)
emit(rv_lw(rd, 0, RV_REG_T1), ctx);
else
emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
insn_len = ctx->ninsns - insns_start;
break;
case BPF_DW:
if (is_12b_int(off)) {
insns_start = ctx->ninsns;
emit_ld(rd, off, rs, ctx);
insn_len = ctx->ninsns - insns_start;
break;
}
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
insns_start = ctx->ninsns;
emit_ld(rd, 0, RV_REG_T1, ctx);
insn_len = ctx->ninsns - insns_start;
break;
}
ret = add_exception_handler(insn, ctx, rd, insn_len);
if (ret)
return ret;
if (BPF_SIZE(code) != BPF_DW && insn_is_zext(&insn[1]))
return 1;
break;
}
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_B:
emit_imm(RV_REG_T1, imm, ctx);
if (is_12b_int(off)) {
emit(rv_sb(rd, off, RV_REG_T1), ctx);
break;
}
emit_imm(RV_REG_T2, off, ctx);
emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx);
break;
case BPF_ST | BPF_MEM | BPF_H:
emit_imm(RV_REG_T1, imm, ctx);
if (is_12b_int(off)) {
emit(rv_sh(rd, off, RV_REG_T1), ctx);
break;
}
emit_imm(RV_REG_T2, off, ctx);
emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx);
break;
case BPF_ST | BPF_MEM | BPF_W:
emit_imm(RV_REG_T1, imm, ctx);
if (is_12b_int(off)) {
emit_sw(rd, off, RV_REG_T1, ctx);
break;
}
emit_imm(RV_REG_T2, off, ctx);
emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
emit_sw(RV_REG_T2, 0, RV_REG_T1, ctx);
break;
case BPF_ST | BPF_MEM | BPF_DW:
emit_imm(RV_REG_T1, imm, ctx);
if (is_12b_int(off)) {
emit_sd(rd, off, RV_REG_T1, ctx);
break;
}
emit_imm(RV_REG_T2, off, ctx);
emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
break;
/* STX: *(size *)(dst + off) = src */
case BPF_STX | BPF_MEM | BPF_B:
if (is_12b_int(off)) {
emit(rv_sb(rd, off, rs), ctx);
break;
}
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
emit(rv_sb(RV_REG_T1, 0, rs), ctx);
break;
case BPF_STX | BPF_MEM | BPF_H:
if (is_12b_int(off)) {
emit(rv_sh(rd, off, rs), ctx);
break;
}
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
emit(rv_sh(RV_REG_T1, 0, rs), ctx);
break;
case BPF_STX | BPF_MEM | BPF_W:
if (is_12b_int(off)) {
emit_sw(rd, off, rs, ctx);
break;
}
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
emit_sw(RV_REG_T1, 0, rs, ctx);
break;
case BPF_STX | BPF_MEM | BPF_DW:
if (is_12b_int(off)) {
emit_sd(rd, off, rs, ctx);
break;
}
emit_imm(RV_REG_T1, off, ctx);
emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
emit_sd(RV_REG_T1, 0, rs, ctx);
break;
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
emit_atomic(rd, rs, off, imm,
BPF_SIZE(code) == BPF_DW, ctx);
break;
default:
pr_err("bpf-jit: unknown opcode %02x\n", code);
return -EINVAL;
}
return 0;
}
void bpf_jit_build_prologue(struct rv_jit_context *ctx)
{
int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
if (bpf_stack_adjust)
mark_fp(ctx);
if (seen_reg(RV_REG_RA, ctx))
stack_adjust += 8;
stack_adjust += 8; /* RV_REG_FP */
if (seen_reg(RV_REG_S1, ctx))
stack_adjust += 8;
if (seen_reg(RV_REG_S2, ctx))
stack_adjust += 8;
if (seen_reg(RV_REG_S3, ctx))
stack_adjust += 8;
if (seen_reg(RV_REG_S4, ctx))
stack_adjust += 8;
if (seen_reg(RV_REG_S5, ctx))
stack_adjust += 8;
if (seen_reg(RV_REG_S6, ctx))
stack_adjust += 8;
stack_adjust = round_up(stack_adjust, 16);
stack_adjust += bpf_stack_adjust;
store_offset = stack_adjust - 8;
/* nops reserved for auipc+jalr pair */
for (i = 0; i < RV_FENTRY_NINSNS; i++)
emit(rv_nop(), ctx);
/* First instruction is always setting the tail-call-counter
* (TCC) register. This instruction is skipped for tail calls.
* Force using a 4-byte (non-compressed) instruction.
*/
emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx);
emit_addi(RV_REG_SP, RV_REG_SP, -stack_adjust, ctx);
if (seen_reg(RV_REG_RA, ctx)) {
emit_sd(RV_REG_SP, store_offset, RV_REG_RA, ctx);
store_offset -= 8;
}
emit_sd(RV_REG_SP, store_offset, RV_REG_FP, ctx);
store_offset -= 8;
if (seen_reg(RV_REG_S1, ctx)) {
emit_sd(RV_REG_SP, store_offset, RV_REG_S1, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S2, ctx)) {
emit_sd(RV_REG_SP, store_offset, RV_REG_S2, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S3, ctx)) {
emit_sd(RV_REG_SP, store_offset, RV_REG_S3, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S4, ctx)) {
emit_sd(RV_REG_SP, store_offset, RV_REG_S4, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S5, ctx)) {
emit_sd(RV_REG_SP, store_offset, RV_REG_S5, ctx);
store_offset -= 8;
}
if (seen_reg(RV_REG_S6, ctx)) {
emit_sd(RV_REG_SP, store_offset, RV_REG_S6, ctx);
store_offset -= 8;
}
emit_addi(RV_REG_FP, RV_REG_SP, stack_adjust, ctx);
if (bpf_stack_adjust)
emit_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust, ctx);
/* Program contains calls and tail calls, so RV_REG_TCC need
* to be saved across calls.
*/
if (seen_tail_call(ctx) && seen_call(ctx))
emit_mv(RV_REG_TCC_SAVED, RV_REG_TCC, ctx);
ctx->stack_size = stack_adjust;
}
void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
{
__build_epilogue(false, ctx);
}
bool bpf_jit_supports_kfunc_call(void)
{
return true;
}
| linux-master | arch/riscv/net/bpf_jit_comp64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* BPF JIT compiler for RV32G
*
* Copyright (c) 2020 Luke Nelson <[email protected]>
* Copyright (c) 2020 Xi Wang <[email protected]>
*
* The code is based on the BPF JIT compiler for RV64G by Björn Töpel and
* the BPF JIT compiler for 32-bit ARM by Shubham Bansal and Mircea Gherzan.
*/
#include <linux/bpf.h>
#include <linux/filter.h>
#include "bpf_jit.h"
/*
* Stack layout during BPF program execution:
*
* high
* RV32 fp => +----------+
* | saved ra |
* | saved fp | RV32 callee-saved registers
* | ... |
* +----------+ <= (fp - 4 * NR_SAVED_REGISTERS)
* | hi(R6) |
* | lo(R6) |
* | hi(R7) | JIT scratch space for BPF registers
* | lo(R7) |
* | ... |
* BPF_REG_FP => +----------+ <= (fp - 4 * NR_SAVED_REGISTERS
* | | - 4 * BPF_JIT_SCRATCH_REGS)
* | |
* | ... | BPF program stack
* | |
* RV32 sp => +----------+
* | |
* | ... | Function call stack
* | |
* +----------+
* low
*/
enum {
/* Stack layout - these are offsets from top of JIT scratch space. */
BPF_R6_HI,
BPF_R6_LO,
BPF_R7_HI,
BPF_R7_LO,
BPF_R8_HI,
BPF_R8_LO,
BPF_R9_HI,
BPF_R9_LO,
BPF_AX_HI,
BPF_AX_LO,
/* Stack space for BPF_REG_6 through BPF_REG_9 and BPF_REG_AX. */
BPF_JIT_SCRATCH_REGS,
};
/* Number of callee-saved registers stored to stack: ra, fp, s1--s7. */
#define NR_SAVED_REGISTERS 9
/* Offset from fp for BPF registers stored on stack. */
#define STACK_OFFSET(k) (-4 - (4 * NR_SAVED_REGISTERS) - (4 * (k)))
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define RV_REG_TCC RV_REG_T6
#define RV_REG_TCC_SAVED RV_REG_S7
static const s8 bpf2rv32[][2] = {
/* Return value from in-kernel function, and exit value from eBPF. */
[BPF_REG_0] = {RV_REG_S2, RV_REG_S1},
/* Arguments from eBPF program to in-kernel function. */
[BPF_REG_1] = {RV_REG_A1, RV_REG_A0},
[BPF_REG_2] = {RV_REG_A3, RV_REG_A2},
[BPF_REG_3] = {RV_REG_A5, RV_REG_A4},
[BPF_REG_4] = {RV_REG_A7, RV_REG_A6},
[BPF_REG_5] = {RV_REG_S4, RV_REG_S3},
/*
* Callee-saved registers that in-kernel function will preserve.
* Stored on the stack.
*/
[BPF_REG_6] = {STACK_OFFSET(BPF_R6_HI), STACK_OFFSET(BPF_R6_LO)},
[BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
[BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
[BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
/* Read-only frame pointer to access BPF stack. */
[BPF_REG_FP] = {RV_REG_S6, RV_REG_S5},
/* Temporary register for blinding constants. Stored on the stack. */
[BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
/*
* Temporary registers used by the JIT to operate on registers stored
* on the stack. Save t0 and t1 to be used as temporaries in generated
* code.
*/
[TMP_REG_1] = {RV_REG_T3, RV_REG_T2},
[TMP_REG_2] = {RV_REG_T5, RV_REG_T4},
};
static s8 hi(const s8 *r)
{
return r[0];
}
static s8 lo(const s8 *r)
{
return r[1];
}
static void emit_imm(const s8 rd, s32 imm, struct rv_jit_context *ctx)
{
u32 upper = (imm + (1 << 11)) >> 12;
u32 lower = imm & 0xfff;
if (upper) {
emit(rv_lui(rd, upper), ctx);
emit(rv_addi(rd, rd, lower), ctx);
} else {
emit(rv_addi(rd, RV_REG_ZERO, lower), ctx);
}
}
static void emit_imm32(const s8 *rd, s32 imm, struct rv_jit_context *ctx)
{
/* Emit immediate into lower bits. */
emit_imm(lo(rd), imm, ctx);
/* Sign-extend into upper bits. */
if (imm >= 0)
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
else
emit(rv_addi(hi(rd), RV_REG_ZERO, -1), ctx);
}
static void emit_imm64(const s8 *rd, s32 imm_hi, s32 imm_lo,
struct rv_jit_context *ctx)
{
emit_imm(lo(rd), imm_lo, ctx);
emit_imm(hi(rd), imm_hi, ctx);
}
static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
{
int stack_adjust = ctx->stack_size;
const s8 *r0 = bpf2rv32[BPF_REG_0];
/* Set return value if not tail call. */
if (!is_tail_call) {
emit(rv_addi(RV_REG_A0, lo(r0), 0), ctx);
emit(rv_addi(RV_REG_A1, hi(r0), 0), ctx);
}
/* Restore callee-saved registers. */
emit(rv_lw(RV_REG_RA, stack_adjust - 4, RV_REG_SP), ctx);
emit(rv_lw(RV_REG_FP, stack_adjust - 8, RV_REG_SP), ctx);
emit(rv_lw(RV_REG_S1, stack_adjust - 12, RV_REG_SP), ctx);
emit(rv_lw(RV_REG_S2, stack_adjust - 16, RV_REG_SP), ctx);
emit(rv_lw(RV_REG_S3, stack_adjust - 20, RV_REG_SP), ctx);
emit(rv_lw(RV_REG_S4, stack_adjust - 24, RV_REG_SP), ctx);
emit(rv_lw(RV_REG_S5, stack_adjust - 28, RV_REG_SP), ctx);
emit(rv_lw(RV_REG_S6, stack_adjust - 32, RV_REG_SP), ctx);
emit(rv_lw(RV_REG_S7, stack_adjust - 36, RV_REG_SP), ctx);
emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx);
if (is_tail_call) {
/*
* goto *(t0 + 4);
* Skips first instruction of prologue which initializes tail
* call counter. Assumes t0 contains address of target program,
* see emit_bpf_tail_call.
*/
emit(rv_jalr(RV_REG_ZERO, RV_REG_T0, 4), ctx);
} else {
emit(rv_jalr(RV_REG_ZERO, RV_REG_RA, 0), ctx);
}
}
static bool is_stacked(s8 reg)
{
return reg < 0;
}
static const s8 *bpf_get_reg64(const s8 *reg, const s8 *tmp,
struct rv_jit_context *ctx)
{
if (is_stacked(hi(reg))) {
emit(rv_lw(hi(tmp), hi(reg), RV_REG_FP), ctx);
emit(rv_lw(lo(tmp), lo(reg), RV_REG_FP), ctx);
reg = tmp;
}
return reg;
}
static void bpf_put_reg64(const s8 *reg, const s8 *src,
struct rv_jit_context *ctx)
{
if (is_stacked(hi(reg))) {
emit(rv_sw(RV_REG_FP, hi(reg), hi(src)), ctx);
emit(rv_sw(RV_REG_FP, lo(reg), lo(src)), ctx);
}
}
static const s8 *bpf_get_reg32(const s8 *reg, const s8 *tmp,
struct rv_jit_context *ctx)
{
if (is_stacked(lo(reg))) {
emit(rv_lw(lo(tmp), lo(reg), RV_REG_FP), ctx);
reg = tmp;
}
return reg;
}
static void bpf_put_reg32(const s8 *reg, const s8 *src,
struct rv_jit_context *ctx)
{
if (is_stacked(lo(reg))) {
emit(rv_sw(RV_REG_FP, lo(reg), lo(src)), ctx);
if (!ctx->prog->aux->verifier_zext)
emit(rv_sw(RV_REG_FP, hi(reg), RV_REG_ZERO), ctx);
} else if (!ctx->prog->aux->verifier_zext) {
emit(rv_addi(hi(reg), RV_REG_ZERO, 0), ctx);
}
}
static void emit_jump_and_link(u8 rd, s32 rvoff, bool force_jalr,
struct rv_jit_context *ctx)
{
s32 upper, lower;
if (rvoff && is_21b_int(rvoff) && !force_jalr) {
emit(rv_jal(rd, rvoff >> 1), ctx);
return;
}
upper = (rvoff + (1 << 11)) >> 12;
lower = rvoff & 0xfff;
emit(rv_auipc(RV_REG_T1, upper), ctx);
emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
}
static void emit_alu_i64(const s8 *dst, s32 imm,
struct rv_jit_context *ctx, const u8 op)
{
const s8 *tmp1 = bpf2rv32[TMP_REG_1];
const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
switch (op) {
case BPF_MOV:
emit_imm32(rd, imm, ctx);
break;
case BPF_AND:
if (is_12b_int(imm)) {
emit(rv_andi(lo(rd), lo(rd), imm), ctx);
} else {
emit_imm(RV_REG_T0, imm, ctx);
emit(rv_and(lo(rd), lo(rd), RV_REG_T0), ctx);
}
if (imm >= 0)
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
break;
case BPF_OR:
if (is_12b_int(imm)) {
emit(rv_ori(lo(rd), lo(rd), imm), ctx);
} else {
emit_imm(RV_REG_T0, imm, ctx);
emit(rv_or(lo(rd), lo(rd), RV_REG_T0), ctx);
}
if (imm < 0)
emit(rv_ori(hi(rd), RV_REG_ZERO, -1), ctx);
break;
case BPF_XOR:
if (is_12b_int(imm)) {
emit(rv_xori(lo(rd), lo(rd), imm), ctx);
} else {
emit_imm(RV_REG_T0, imm, ctx);
emit(rv_xor(lo(rd), lo(rd), RV_REG_T0), ctx);
}
if (imm < 0)
emit(rv_xori(hi(rd), hi(rd), -1), ctx);
break;
case BPF_LSH:
if (imm >= 32) {
emit(rv_slli(hi(rd), lo(rd), imm - 32), ctx);
emit(rv_addi(lo(rd), RV_REG_ZERO, 0), ctx);
} else if (imm == 0) {
/* Do nothing. */
} else {
emit(rv_srli(RV_REG_T0, lo(rd), 32 - imm), ctx);
emit(rv_slli(hi(rd), hi(rd), imm), ctx);
emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx);
emit(rv_slli(lo(rd), lo(rd), imm), ctx);
}
break;
case BPF_RSH:
if (imm >= 32) {
emit(rv_srli(lo(rd), hi(rd), imm - 32), ctx);
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
} else if (imm == 0) {
/* Do nothing. */
} else {
emit(rv_slli(RV_REG_T0, hi(rd), 32 - imm), ctx);
emit(rv_srli(lo(rd), lo(rd), imm), ctx);
emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
emit(rv_srli(hi(rd), hi(rd), imm), ctx);
}
break;
case BPF_ARSH:
if (imm >= 32) {
emit(rv_srai(lo(rd), hi(rd), imm - 32), ctx);
emit(rv_srai(hi(rd), hi(rd), 31), ctx);
} else if (imm == 0) {
/* Do nothing. */
} else {
emit(rv_slli(RV_REG_T0, hi(rd), 32 - imm), ctx);
emit(rv_srli(lo(rd), lo(rd), imm), ctx);
emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
emit(rv_srai(hi(rd), hi(rd), imm), ctx);
}
break;
}
bpf_put_reg64(dst, rd, ctx);
}
static void emit_alu_i32(const s8 *dst, s32 imm,
struct rv_jit_context *ctx, const u8 op)
{
const s8 *tmp1 = bpf2rv32[TMP_REG_1];
const s8 *rd = bpf_get_reg32(dst, tmp1, ctx);
switch (op) {
case BPF_MOV:
emit_imm(lo(rd), imm, ctx);
break;
case BPF_ADD:
if (is_12b_int(imm)) {
emit(rv_addi(lo(rd), lo(rd), imm), ctx);
} else {
emit_imm(RV_REG_T0, imm, ctx);
emit(rv_add(lo(rd), lo(rd), RV_REG_T0), ctx);
}
break;
case BPF_SUB:
if (is_12b_int(-imm)) {
emit(rv_addi(lo(rd), lo(rd), -imm), ctx);
} else {
emit_imm(RV_REG_T0, imm, ctx);
emit(rv_sub(lo(rd), lo(rd), RV_REG_T0), ctx);
}
break;
case BPF_AND:
if (is_12b_int(imm)) {
emit(rv_andi(lo(rd), lo(rd), imm), ctx);
} else {
emit_imm(RV_REG_T0, imm, ctx);
emit(rv_and(lo(rd), lo(rd), RV_REG_T0), ctx);
}
break;
case BPF_OR:
if (is_12b_int(imm)) {
emit(rv_ori(lo(rd), lo(rd), imm), ctx);
} else {
emit_imm(RV_REG_T0, imm, ctx);
emit(rv_or(lo(rd), lo(rd), RV_REG_T0), ctx);
}
break;
case BPF_XOR:
if (is_12b_int(imm)) {
emit(rv_xori(lo(rd), lo(rd), imm), ctx);
} else {
emit_imm(RV_REG_T0, imm, ctx);
emit(rv_xor(lo(rd), lo(rd), RV_REG_T0), ctx);
}
break;
case BPF_LSH:
if (is_12b_int(imm)) {
emit(rv_slli(lo(rd), lo(rd), imm), ctx);
} else {
emit_imm(RV_REG_T0, imm, ctx);
emit(rv_sll(lo(rd), lo(rd), RV_REG_T0), ctx);
}
break;
case BPF_RSH:
if (is_12b_int(imm)) {
emit(rv_srli(lo(rd), lo(rd), imm), ctx);
} else {
emit_imm(RV_REG_T0, imm, ctx);
emit(rv_srl(lo(rd), lo(rd), RV_REG_T0), ctx);
}
break;
case BPF_ARSH:
if (is_12b_int(imm)) {
emit(rv_srai(lo(rd), lo(rd), imm), ctx);
} else {
emit_imm(RV_REG_T0, imm, ctx);
emit(rv_sra(lo(rd), lo(rd), RV_REG_T0), ctx);
}
break;
}
bpf_put_reg32(dst, rd, ctx);
}
static void emit_alu_r64(const s8 *dst, const s8 *src,
struct rv_jit_context *ctx, const u8 op)
{
const s8 *tmp1 = bpf2rv32[TMP_REG_1];
const s8 *tmp2 = bpf2rv32[TMP_REG_2];
const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
switch (op) {
case BPF_MOV:
emit(rv_addi(lo(rd), lo(rs), 0), ctx);
emit(rv_addi(hi(rd), hi(rs), 0), ctx);
break;
case BPF_ADD:
if (rd == rs) {
emit(rv_srli(RV_REG_T0, lo(rd), 31), ctx);
emit(rv_slli(hi(rd), hi(rd), 1), ctx);
emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx);
emit(rv_slli(lo(rd), lo(rd), 1), ctx);
} else {
emit(rv_add(lo(rd), lo(rd), lo(rs)), ctx);
emit(rv_sltu(RV_REG_T0, lo(rd), lo(rs)), ctx);
emit(rv_add(hi(rd), hi(rd), hi(rs)), ctx);
emit(rv_add(hi(rd), hi(rd), RV_REG_T0), ctx);
}
break;
case BPF_SUB:
emit(rv_sub(RV_REG_T1, hi(rd), hi(rs)), ctx);
emit(rv_sltu(RV_REG_T0, lo(rd), lo(rs)), ctx);
emit(rv_sub(hi(rd), RV_REG_T1, RV_REG_T0), ctx);
emit(rv_sub(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_AND:
emit(rv_and(lo(rd), lo(rd), lo(rs)), ctx);
emit(rv_and(hi(rd), hi(rd), hi(rs)), ctx);
break;
case BPF_OR:
emit(rv_or(lo(rd), lo(rd), lo(rs)), ctx);
emit(rv_or(hi(rd), hi(rd), hi(rs)), ctx);
break;
case BPF_XOR:
emit(rv_xor(lo(rd), lo(rd), lo(rs)), ctx);
emit(rv_xor(hi(rd), hi(rd), hi(rs)), ctx);
break;
case BPF_MUL:
emit(rv_mul(RV_REG_T0, hi(rs), lo(rd)), ctx);
emit(rv_mul(hi(rd), hi(rd), lo(rs)), ctx);
emit(rv_mulhu(RV_REG_T1, lo(rd), lo(rs)), ctx);
emit(rv_add(hi(rd), hi(rd), RV_REG_T0), ctx);
emit(rv_mul(lo(rd), lo(rd), lo(rs)), ctx);
emit(rv_add(hi(rd), hi(rd), RV_REG_T1), ctx);
break;
case BPF_LSH:
emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx);
emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx);
emit(rv_sll(hi(rd), lo(rd), RV_REG_T0), ctx);
emit(rv_addi(lo(rd), RV_REG_ZERO, 0), ctx);
emit(rv_jal(RV_REG_ZERO, 16), ctx);
emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx);
emit(rv_srli(RV_REG_T0, lo(rd), 1), ctx);
emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx);
emit(rv_srl(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx);
emit(rv_sll(hi(rd), hi(rd), lo(rs)), ctx);
emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx);
emit(rv_sll(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_RSH:
emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx);
emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx);
emit(rv_srl(lo(rd), hi(rd), RV_REG_T0), ctx);
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
emit(rv_jal(RV_REG_ZERO, 16), ctx);
emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx);
emit(rv_slli(RV_REG_T0, hi(rd), 1), ctx);
emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx);
emit(rv_sll(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx);
emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx);
emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
emit(rv_srl(hi(rd), hi(rd), lo(rs)), ctx);
break;
case BPF_ARSH:
emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx);
emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx);
emit(rv_sra(lo(rd), hi(rd), RV_REG_T0), ctx);
emit(rv_srai(hi(rd), hi(rd), 31), ctx);
emit(rv_jal(RV_REG_ZERO, 16), ctx);
emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx);
emit(rv_slli(RV_REG_T0, hi(rd), 1), ctx);
emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx);
emit(rv_sll(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx);
emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx);
emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
emit(rv_sra(hi(rd), hi(rd), lo(rs)), ctx);
break;
case BPF_NEG:
emit(rv_sub(lo(rd), RV_REG_ZERO, lo(rd)), ctx);
emit(rv_sltu(RV_REG_T0, RV_REG_ZERO, lo(rd)), ctx);
emit(rv_sub(hi(rd), RV_REG_ZERO, hi(rd)), ctx);
emit(rv_sub(hi(rd), hi(rd), RV_REG_T0), ctx);
break;
}
bpf_put_reg64(dst, rd, ctx);
}
static void emit_alu_r32(const s8 *dst, const s8 *src,
struct rv_jit_context *ctx, const u8 op)
{
const s8 *tmp1 = bpf2rv32[TMP_REG_1];
const s8 *tmp2 = bpf2rv32[TMP_REG_2];
const s8 *rd = bpf_get_reg32(dst, tmp1, ctx);
const s8 *rs = bpf_get_reg32(src, tmp2, ctx);
switch (op) {
case BPF_MOV:
emit(rv_addi(lo(rd), lo(rs), 0), ctx);
break;
case BPF_ADD:
emit(rv_add(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_SUB:
emit(rv_sub(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_AND:
emit(rv_and(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_OR:
emit(rv_or(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_XOR:
emit(rv_xor(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_MUL:
emit(rv_mul(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_DIV:
emit(rv_divu(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_MOD:
emit(rv_remu(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_LSH:
emit(rv_sll(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_RSH:
emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_ARSH:
emit(rv_sra(lo(rd), lo(rd), lo(rs)), ctx);
break;
case BPF_NEG:
emit(rv_sub(lo(rd), RV_REG_ZERO, lo(rd)), ctx);
break;
}
bpf_put_reg32(dst, rd, ctx);
}
static int emit_branch_r64(const s8 *src1, const s8 *src2, s32 rvoff,
struct rv_jit_context *ctx, const u8 op)
{
int e, s = ctx->ninsns;
const s8 *tmp1 = bpf2rv32[TMP_REG_1];
const s8 *tmp2 = bpf2rv32[TMP_REG_2];
const s8 *rs1 = bpf_get_reg64(src1, tmp1, ctx);
const s8 *rs2 = bpf_get_reg64(src2, tmp2, ctx);
/*
* NO_JUMP skips over the rest of the instructions and the
* emit_jump_and_link, meaning the BPF branch is not taken.
* JUMP skips directly to the emit_jump_and_link, meaning
* the BPF branch is taken.
*
* The fallthrough case results in the BPF branch being taken.
*/
#define NO_JUMP(idx) (6 + (2 * (idx)))
#define JUMP(idx) (2 + (2 * (idx)))
switch (op) {
case BPF_JEQ:
emit(rv_bne(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(rv_bne(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JGT:
emit(rv_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(rv_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(rv_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JLT:
emit(rv_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(rv_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(rv_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JGE:
emit(rv_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(rv_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(rv_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JLE:
emit(rv_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(rv_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(rv_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JNE:
emit(rv_bne(hi(rs1), hi(rs2), JUMP(1)), ctx);
emit(rv_beq(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JSGT:
emit(rv_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(rv_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(rv_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JSLT:
emit(rv_blt(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(rv_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(rv_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JSGE:
emit(rv_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(rv_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(rv_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JSLE:
emit(rv_blt(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(rv_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(rv_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JSET:
emit(rv_and(RV_REG_T0, hi(rs1), hi(rs2)), ctx);
emit(rv_bne(RV_REG_T0, RV_REG_ZERO, JUMP(2)), ctx);
emit(rv_and(RV_REG_T0, lo(rs1), lo(rs2)), ctx);
emit(rv_beq(RV_REG_T0, RV_REG_ZERO, NO_JUMP(0)), ctx);
break;
}
#undef NO_JUMP
#undef JUMP
e = ctx->ninsns;
/* Adjust for extra insns. */
rvoff -= ninsns_rvoff(e - s);
emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
return 0;
}
static int emit_bcc(u8 op, u8 rd, u8 rs, int rvoff, struct rv_jit_context *ctx)
{
int e, s = ctx->ninsns;
bool far = false;
int off;
if (op == BPF_JSET) {
/*
* BPF_JSET is a special case: it has no inverse so we always
* treat it as a far branch.
*/
far = true;
} else if (!is_13b_int(rvoff)) {
op = invert_bpf_cond(op);
far = true;
}
/*
* For a far branch, the condition is negated and we jump over the
* branch itself, and the two instructions from emit_jump_and_link.
* For a near branch, just use rvoff.
*/
off = far ? 6 : (rvoff >> 1);
switch (op) {
case BPF_JEQ:
emit(rv_beq(rd, rs, off), ctx);
break;
case BPF_JGT:
emit(rv_bgtu(rd, rs, off), ctx);
break;
case BPF_JLT:
emit(rv_bltu(rd, rs, off), ctx);
break;
case BPF_JGE:
emit(rv_bgeu(rd, rs, off), ctx);
break;
case BPF_JLE:
emit(rv_bleu(rd, rs, off), ctx);
break;
case BPF_JNE:
emit(rv_bne(rd, rs, off), ctx);
break;
case BPF_JSGT:
emit(rv_bgt(rd, rs, off), ctx);
break;
case BPF_JSLT:
emit(rv_blt(rd, rs, off), ctx);
break;
case BPF_JSGE:
emit(rv_bge(rd, rs, off), ctx);
break;
case BPF_JSLE:
emit(rv_ble(rd, rs, off), ctx);
break;
case BPF_JSET:
emit(rv_and(RV_REG_T0, rd, rs), ctx);
emit(rv_beq(RV_REG_T0, RV_REG_ZERO, off), ctx);
break;
}
if (far) {
e = ctx->ninsns;
/* Adjust for extra insns. */
rvoff -= ninsns_rvoff(e - s);
emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
}
return 0;
}
static int emit_branch_r32(const s8 *src1, const s8 *src2, s32 rvoff,
struct rv_jit_context *ctx, const u8 op)
{
int e, s = ctx->ninsns;
const s8 *tmp1 = bpf2rv32[TMP_REG_1];
const s8 *tmp2 = bpf2rv32[TMP_REG_2];
const s8 *rs1 = bpf_get_reg32(src1, tmp1, ctx);
const s8 *rs2 = bpf_get_reg32(src2, tmp2, ctx);
e = ctx->ninsns;
/* Adjust for extra insns. */
rvoff -= ninsns_rvoff(e - s);
if (emit_bcc(op, lo(rs1), lo(rs2), rvoff, ctx))
return -1;
return 0;
}
static void emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
{
const s8 *r0 = bpf2rv32[BPF_REG_0];
const s8 *r5 = bpf2rv32[BPF_REG_5];
u32 upper = ((u32)addr + (1 << 11)) >> 12;
u32 lower = addr & 0xfff;
/* R1-R4 already in correct registers---need to push R5 to stack. */
emit(rv_addi(RV_REG_SP, RV_REG_SP, -16), ctx);
emit(rv_sw(RV_REG_SP, 0, lo(r5)), ctx);
emit(rv_sw(RV_REG_SP, 4, hi(r5)), ctx);
/* Backup TCC. */
emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx);
/*
* Use lui/jalr pair to jump to absolute address. Don't use emit_imm as
* the number of emitted instructions should not depend on the value of
* addr.
*/
emit(rv_lui(RV_REG_T1, upper), ctx);
emit(rv_jalr(RV_REG_RA, RV_REG_T1, lower), ctx);
/* Restore TCC. */
emit(rv_addi(RV_REG_TCC, RV_REG_TCC_SAVED, 0), ctx);
/* Set return value and restore stack. */
emit(rv_addi(lo(r0), RV_REG_A0, 0), ctx);
emit(rv_addi(hi(r0), RV_REG_A1, 0), ctx);
emit(rv_addi(RV_REG_SP, RV_REG_SP, 16), ctx);
}
static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
{
/*
* R1 -> &ctx
* R2 -> &array
* R3 -> index
*/
int tc_ninsn, off, start_insn = ctx->ninsns;
const s8 *arr_reg = bpf2rv32[BPF_REG_2];
const s8 *idx_reg = bpf2rv32[BPF_REG_3];
tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] :
ctx->offset[0];
/* max_entries = array->map.max_entries; */
off = offsetof(struct bpf_array, map.max_entries);
if (is_12b_check(off, insn))
return -1;
emit(rv_lw(RV_REG_T1, off, lo(arr_reg)), ctx);
/*
* if (index >= max_entries)
* goto out;
*/
off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
emit_bcc(BPF_JGE, lo(idx_reg), RV_REG_T1, off, ctx);
/*
* if (--tcc < 0)
* goto out;
*/
emit(rv_addi(RV_REG_TCC, RV_REG_TCC, -1), ctx);
off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
emit_bcc(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx);
/*
* prog = array->ptrs[index];
* if (!prog)
* goto out;
*/
emit(rv_slli(RV_REG_T0, lo(idx_reg), 2), ctx);
emit(rv_add(RV_REG_T0, RV_REG_T0, lo(arr_reg)), ctx);
off = offsetof(struct bpf_array, ptrs);
if (is_12b_check(off, insn))
return -1;
emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx);
off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
emit_bcc(BPF_JEQ, RV_REG_T0, RV_REG_ZERO, off, ctx);
/*
* tcc = temp_tcc;
* goto *(prog->bpf_func + 4);
*/
off = offsetof(struct bpf_prog, bpf_func);
if (is_12b_check(off, insn))
return -1;
emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx);
/* Epilogue jumps to *(t0 + 4). */
__build_epilogue(true, ctx);
return 0;
}
static int emit_load_r64(const s8 *dst, const s8 *src, s16 off,
struct rv_jit_context *ctx, const u8 size)
{
const s8 *tmp1 = bpf2rv32[TMP_REG_1];
const s8 *tmp2 = bpf2rv32[TMP_REG_2];
const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
emit_imm(RV_REG_T0, off, ctx);
emit(rv_add(RV_REG_T0, RV_REG_T0, lo(rs)), ctx);
switch (size) {
case BPF_B:
emit(rv_lbu(lo(rd), 0, RV_REG_T0), ctx);
if (!ctx->prog->aux->verifier_zext)
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
break;
case BPF_H:
emit(rv_lhu(lo(rd), 0, RV_REG_T0), ctx);
if (!ctx->prog->aux->verifier_zext)
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
break;
case BPF_W:
emit(rv_lw(lo(rd), 0, RV_REG_T0), ctx);
if (!ctx->prog->aux->verifier_zext)
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
break;
case BPF_DW:
emit(rv_lw(lo(rd), 0, RV_REG_T0), ctx);
emit(rv_lw(hi(rd), 4, RV_REG_T0), ctx);
break;
}
bpf_put_reg64(dst, rd, ctx);
return 0;
}
static int emit_store_r64(const s8 *dst, const s8 *src, s16 off,
struct rv_jit_context *ctx, const u8 size,
const u8 mode)
{
const s8 *tmp1 = bpf2rv32[TMP_REG_1];
const s8 *tmp2 = bpf2rv32[TMP_REG_2];
const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
if (mode == BPF_ATOMIC && size != BPF_W)
return -1;
emit_imm(RV_REG_T0, off, ctx);
emit(rv_add(RV_REG_T0, RV_REG_T0, lo(rd)), ctx);
switch (size) {
case BPF_B:
emit(rv_sb(RV_REG_T0, 0, lo(rs)), ctx);
break;
case BPF_H:
emit(rv_sh(RV_REG_T0, 0, lo(rs)), ctx);
break;
case BPF_W:
switch (mode) {
case BPF_MEM:
emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx);
break;
case BPF_ATOMIC: /* Only BPF_ADD supported */
emit(rv_amoadd_w(RV_REG_ZERO, lo(rs), RV_REG_T0, 0, 0),
ctx);
break;
}
break;
case BPF_DW:
emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx);
emit(rv_sw(RV_REG_T0, 4, hi(rs)), ctx);
break;
}
return 0;
}
static void emit_rev16(const s8 rd, struct rv_jit_context *ctx)
{
emit(rv_slli(rd, rd, 16), ctx);
emit(rv_slli(RV_REG_T1, rd, 8), ctx);
emit(rv_srli(rd, rd, 8), ctx);
emit(rv_add(RV_REG_T1, rd, RV_REG_T1), ctx);
emit(rv_srli(rd, RV_REG_T1, 16), ctx);
}
static void emit_rev32(const s8 rd, struct rv_jit_context *ctx)
{
emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 0), ctx);
emit(rv_andi(RV_REG_T0, rd, 255), ctx);
emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx);
emit(rv_srli(rd, rd, 8), ctx);
emit(rv_andi(RV_REG_T0, rd, 255), ctx);
emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx);
emit(rv_srli(rd, rd, 8), ctx);
emit(rv_andi(RV_REG_T0, rd, 255), ctx);
emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx);
emit(rv_srli(rd, rd, 8), ctx);
emit(rv_andi(RV_REG_T0, rd, 255), ctx);
emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
emit(rv_addi(rd, RV_REG_T1, 0), ctx);
}
static void emit_zext64(const s8 *dst, struct rv_jit_context *ctx)
{
const s8 *rd;
const s8 *tmp1 = bpf2rv32[TMP_REG_1];
rd = bpf_get_reg64(dst, tmp1, ctx);
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
bpf_put_reg64(dst, rd, ctx);
}
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
bool extra_pass)
{
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
BPF_CLASS(insn->code) == BPF_JMP;
int s, e, rvoff, i = insn - ctx->prog->insnsi;
u8 code = insn->code;
s16 off = insn->off;
s32 imm = insn->imm;
const s8 *dst = bpf2rv32[insn->dst_reg];
const s8 *src = bpf2rv32[insn->src_reg];
const s8 *tmp1 = bpf2rv32[TMP_REG_1];
const s8 *tmp2 = bpf2rv32[TMP_REG_2];
switch (code) {
case BPF_ALU64 | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_RSH | BPF_X:
case BPF_ALU64 | BPF_ARSH | BPF_X:
if (BPF_SRC(code) == BPF_K) {
emit_imm32(tmp2, imm, ctx);
src = tmp2;
}
emit_alu_r64(dst, src, ctx, BPF_OP(code));
break;
case BPF_ALU64 | BPF_NEG:
emit_alu_r64(dst, tmp2, ctx, BPF_OP(code));
break;
case BPF_ALU64 | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_K:
goto notsupported;
case BPF_ALU64 | BPF_MOV | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_OR | BPF_K:
case BPF_ALU64 | BPF_XOR | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
emit_alu_i64(dst, imm, ctx, BPF_OP(code));
break;
case BPF_ALU | BPF_MOV | BPF_X:
if (imm == 1) {
/* Special mov32 for zext. */
emit_zext64(dst, ctx);
break;
}
fallthrough;
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU | BPF_ARSH | BPF_X:
if (BPF_SRC(code) == BPF_K) {
emit_imm32(tmp2, imm, ctx);
src = tmp2;
}
emit_alu_r32(dst, src, ctx, BPF_OP(code));
break;
case BPF_ALU | BPF_MOV | BPF_K:
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU | BPF_ARSH | BPF_K:
/*
* mul,div,mod are handled in the BPF_X case since there are
* no RISC-V I-type equivalents.
*/
emit_alu_i32(dst, imm, ctx, BPF_OP(code));
break;
case BPF_ALU | BPF_NEG:
/*
* src is ignored---choose tmp2 as a dummy register since it
* is not on the stack.
*/
emit_alu_r32(dst, tmp2, ctx, BPF_OP(code));
break;
case BPF_ALU | BPF_END | BPF_FROM_LE:
{
const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
switch (imm) {
case 16:
emit(rv_slli(lo(rd), lo(rd), 16), ctx);
emit(rv_srli(lo(rd), lo(rd), 16), ctx);
fallthrough;
case 32:
if (!ctx->prog->aux->verifier_zext)
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
break;
case 64:
/* Do nothing. */
break;
default:
pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
return -1;
}
bpf_put_reg64(dst, rd, ctx);
break;
}
case BPF_ALU | BPF_END | BPF_FROM_BE:
{
const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
switch (imm) {
case 16:
emit_rev16(lo(rd), ctx);
if (!ctx->prog->aux->verifier_zext)
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
break;
case 32:
emit_rev32(lo(rd), ctx);
if (!ctx->prog->aux->verifier_zext)
emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
break;
case 64:
/* Swap upper and lower halves. */
emit(rv_addi(RV_REG_T0, lo(rd), 0), ctx);
emit(rv_addi(lo(rd), hi(rd), 0), ctx);
emit(rv_addi(hi(rd), RV_REG_T0, 0), ctx);
/* Swap each half. */
emit_rev32(lo(rd), ctx);
emit_rev32(hi(rd), ctx);
break;
default:
pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
return -1;
}
bpf_put_reg64(dst, rd, ctx);
break;
}
case BPF_JMP | BPF_JA:
rvoff = rv_offset(i, off, ctx);
emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
break;
case BPF_JMP | BPF_CALL:
{
bool fixed;
int ret;
u64 addr;
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
&fixed);
if (ret < 0)
return ret;
emit_call(fixed, addr, ctx);
break;
}
case BPF_JMP | BPF_TAIL_CALL:
if (emit_bpf_tail_call(i, ctx))
return -1;
break;
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JNE | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP32 | BPF_JLE | BPF_X:
case BPF_JMP32 | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP32 | BPF_JLT | BPF_X:
case BPF_JMP32 | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP32 | BPF_JGE | BPF_X:
case BPF_JMP32 | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_X:
case BPF_JMP32 | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_X:
case BPF_JMP32 | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSLT | BPF_X:
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP32 | BPF_JSGT | BPF_X:
case BPF_JMP32 | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_K:
rvoff = rv_offset(i, off, ctx);
if (BPF_SRC(code) == BPF_K) {
s = ctx->ninsns;
emit_imm32(tmp2, imm, ctx);
src = tmp2;
e = ctx->ninsns;
rvoff -= ninsns_rvoff(e - s);
}
if (is64)
emit_branch_r64(dst, src, rvoff, ctx, BPF_OP(code));
else
emit_branch_r32(dst, src, rvoff, ctx, BPF_OP(code));
break;
case BPF_JMP | BPF_EXIT:
if (i == ctx->prog->len - 1)
break;
rvoff = epilogue_offset(ctx);
emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
break;
case BPF_LD | BPF_IMM | BPF_DW:
{
struct bpf_insn insn1 = insn[1];
s32 imm_lo = imm;
s32 imm_hi = insn1.imm;
const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
emit_imm64(rd, imm_hi, imm_lo, ctx);
bpf_put_reg64(dst, rd, ctx);
return 1;
}
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW:
if (emit_load_r64(dst, src, off, ctx, BPF_SIZE(code)))
return -1;
break;
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
break;
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_DW:
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_W:
case BPF_STX | BPF_MEM | BPF_DW:
if (BPF_CLASS(code) == BPF_ST) {
emit_imm32(tmp2, imm, ctx);
src = tmp2;
}
if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code),
BPF_MODE(code)))
return -1;
break;
case BPF_STX | BPF_ATOMIC | BPF_W:
if (insn->imm != BPF_ADD) {
pr_info_once(
"bpf-jit: not supported: atomic operation %02x ***\n",
insn->imm);
return -EFAULT;
}
if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code),
BPF_MODE(code)))
return -1;
break;
/* No hardware support for 8-byte atomics in RV32. */
case BPF_STX | BPF_ATOMIC | BPF_DW:
/* Fallthrough. */
notsupported:
pr_info_once("bpf-jit: not supported: opcode %02x ***\n", code);
return -EFAULT;
default:
pr_err("bpf-jit: unknown opcode %02x\n", code);
return -EINVAL;
}
return 0;
}
void bpf_jit_build_prologue(struct rv_jit_context *ctx)
{
const s8 *fp = bpf2rv32[BPF_REG_FP];
const s8 *r1 = bpf2rv32[BPF_REG_1];
int stack_adjust = 0;
int bpf_stack_adjust =
round_up(ctx->prog->aux->stack_depth, STACK_ALIGN);
/* Make space for callee-saved registers. */
stack_adjust += NR_SAVED_REGISTERS * sizeof(u32);
/* Make space for BPF registers on stack. */
stack_adjust += BPF_JIT_SCRATCH_REGS * sizeof(u32);
/* Make space for BPF stack. */
stack_adjust += bpf_stack_adjust;
/* Round up for stack alignment. */
stack_adjust = round_up(stack_adjust, STACK_ALIGN);
/*
* The first instruction sets the tail-call-counter (TCC) register.
* This instruction is skipped by tail calls.
*/
emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx);
emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx);
/* Save callee-save registers. */
emit(rv_sw(RV_REG_SP, stack_adjust - 4, RV_REG_RA), ctx);
emit(rv_sw(RV_REG_SP, stack_adjust - 8, RV_REG_FP), ctx);
emit(rv_sw(RV_REG_SP, stack_adjust - 12, RV_REG_S1), ctx);
emit(rv_sw(RV_REG_SP, stack_adjust - 16, RV_REG_S2), ctx);
emit(rv_sw(RV_REG_SP, stack_adjust - 20, RV_REG_S3), ctx);
emit(rv_sw(RV_REG_SP, stack_adjust - 24, RV_REG_S4), ctx);
emit(rv_sw(RV_REG_SP, stack_adjust - 28, RV_REG_S5), ctx);
emit(rv_sw(RV_REG_SP, stack_adjust - 32, RV_REG_S6), ctx);
emit(rv_sw(RV_REG_SP, stack_adjust - 36, RV_REG_S7), ctx);
/* Set fp: used as the base address for stacked BPF registers. */
emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx);
/* Set up BPF frame pointer. */
emit(rv_addi(lo(fp), RV_REG_SP, bpf_stack_adjust), ctx);
emit(rv_addi(hi(fp), RV_REG_ZERO, 0), ctx);
/* Set up BPF context pointer. */
emit(rv_addi(lo(r1), RV_REG_A0, 0), ctx);
emit(rv_addi(hi(r1), RV_REG_ZERO, 0), ctx);
ctx->stack_size = stack_adjust;
}
void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
{
__build_epilogue(false, ctx);
}
| linux-master | arch/riscv/net/bpf_jit_comp32.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common functionality for RV32 and RV64 BPF JIT compilers
*
* Copyright (c) 2019 Björn Töpel <[email protected]>
*
*/
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/memory.h>
#include <asm/patch.h>
#include "bpf_jit.h"
/* Number of iterations to try until offsets converge. */
#define NR_JIT_ITERATIONS 32
static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset)
{
const struct bpf_prog *prog = ctx->prog;
int i;
for (i = 0; i < prog->len; i++) {
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
ret = bpf_jit_emit_insn(insn, ctx, extra_pass);
/* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */
if (ret > 0)
i++;
if (offset)
offset[i] = ctx->ninsns;
if (ret < 0)
return ret;
}
return 0;
}
bool bpf_jit_needs_zext(void)
{
return true;
}
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
unsigned int prog_size = 0, extable_size = 0;
bool tmp_blinded = false, extra_pass = false;
struct bpf_prog *tmp, *orig_prog = prog;
int pass = 0, prev_ninsns = 0, i;
struct rv_jit_data *jit_data;
struct rv_jit_context *ctx;
if (!prog->jit_requested)
return orig_prog;
tmp = bpf_jit_blind_constants(prog);
if (IS_ERR(tmp))
return orig_prog;
if (tmp != prog) {
tmp_blinded = true;
prog = tmp;
}
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
if (!jit_data) {
prog = orig_prog;
goto out;
}
prog->aux->jit_data = jit_data;
}
ctx = &jit_data->ctx;
if (ctx->offset) {
extra_pass = true;
prog_size = sizeof(*ctx->insns) * ctx->ninsns;
goto skip_init_ctx;
}
ctx->prog = prog;
ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
if (!ctx->offset) {
prog = orig_prog;
goto out_offset;
}
if (build_body(ctx, extra_pass, NULL)) {
prog = orig_prog;
goto out_offset;
}
for (i = 0; i < prog->len; i++) {
prev_ninsns += 32;
ctx->offset[i] = prev_ninsns;
}
for (i = 0; i < NR_JIT_ITERATIONS; i++) {
pass++;
ctx->ninsns = 0;
bpf_jit_build_prologue(ctx);
ctx->prologue_len = ctx->ninsns;
if (build_body(ctx, extra_pass, ctx->offset)) {
prog = orig_prog;
goto out_offset;
}
ctx->epilogue_offset = ctx->ninsns;
bpf_jit_build_epilogue(ctx);
if (ctx->ninsns == prev_ninsns) {
if (jit_data->header)
break;
/* obtain the actual image size */
extable_size = prog->aux->num_exentries *
sizeof(struct exception_table_entry);
prog_size = sizeof(*ctx->insns) * ctx->ninsns;
jit_data->ro_header =
bpf_jit_binary_pack_alloc(prog_size + extable_size,
&jit_data->ro_image, sizeof(u32),
&jit_data->header, &jit_data->image,
bpf_fill_ill_insns);
if (!jit_data->ro_header) {
prog = orig_prog;
goto out_offset;
}
/*
* Use the image(RW) for writing the JITed instructions. But also save
* the ro_image(RX) for calculating the offsets in the image. The RW
* image will be later copied to the RX image from where the program
* will run. The bpf_jit_binary_pack_finalize() will do this copy in the
* final step.
*/
ctx->ro_insns = (u16 *)jit_data->ro_image;
ctx->insns = (u16 *)jit_data->image;
/*
* Now, when the image is allocated, the image can
* potentially shrink more (auipc/jalr -> jal).
*/
}
prev_ninsns = ctx->ninsns;
}
if (i == NR_JIT_ITERATIONS) {
pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
prog = orig_prog;
goto out_free_hdr;
}
if (extable_size)
prog->aux->extable = (void *)ctx->ro_insns + prog_size;
skip_init_ctx:
pass++;
ctx->ninsns = 0;
ctx->nexentries = 0;
bpf_jit_build_prologue(ctx);
if (build_body(ctx, extra_pass, NULL)) {
prog = orig_prog;
goto out_free_hdr;
}
bpf_jit_build_epilogue(ctx);
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, prog_size, pass, ctx->insns);
prog->bpf_func = (void *)ctx->ro_insns;
prog->jited = 1;
prog->jited_len = prog_size;
if (!prog->is_func || extra_pass) {
if (WARN_ON(bpf_jit_binary_pack_finalize(prog, jit_data->ro_header,
jit_data->header))) {
/* ro_header has been freed */
jit_data->ro_header = NULL;
prog = orig_prog;
goto out_offset;
}
/*
* The instructions have now been copied to the ROX region from
* where they will execute.
* Write any modified data cache blocks out to memory and
* invalidate the corresponding blocks in the instruction cache.
*/
bpf_flush_icache(jit_data->ro_header, ctx->ro_insns + ctx->ninsns);
for (i = 0; i < prog->len; i++)
ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
bpf_prog_fill_jited_linfo(prog, ctx->offset);
out_offset:
kfree(ctx->offset);
kfree(jit_data);
prog->aux->jit_data = NULL;
}
out:
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
out_free_hdr:
if (jit_data->header) {
bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size,
sizeof(jit_data->header->size));
bpf_jit_binary_pack_free(jit_data->ro_header, jit_data->header);
}
goto out_offset;
}
u64 bpf_jit_alloc_exec_limit(void)
{
return BPF_JIT_REGION_SIZE;
}
void *bpf_jit_alloc_exec(unsigned long size)
{
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
BPF_JIT_REGION_END, GFP_KERNEL,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
void bpf_jit_free_exec(void *addr)
{
return vfree(addr);
}
void *bpf_arch_text_copy(void *dst, void *src, size_t len)
{
int ret;
mutex_lock(&text_mutex);
ret = patch_text_nosync(dst, src, len);
mutex_unlock(&text_mutex);
if (ret)
return ERR_PTR(-EINVAL);
return dst;
}
int bpf_arch_text_invalidate(void *dst, size_t len)
{
int ret;
mutex_lock(&text_mutex);
ret = patch_text_set_nosync(dst, 0, len);
mutex_unlock(&text_mutex);
return ret;
}
void bpf_jit_free(struct bpf_prog *prog)
{
if (prog->jited) {
struct rv_jit_data *jit_data = prog->aux->jit_data;
struct bpf_binary_header *hdr;
/*
* If we fail the final pass of JIT (from jit_subprogs),
* the program may not be finalized yet. Call finalize here
* before freeing it.
*/
if (jit_data) {
bpf_jit_binary_pack_finalize(prog, jit_data->ro_header, jit_data->header);
kfree(jit_data);
}
hdr = bpf_jit_binary_pack_hdr(prog);
bpf_jit_binary_pack_free(hdr, NULL);
WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
}
bpf_prog_unlock_free(prog);
}
| linux-master | arch/riscv/net/bpf_jit_core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Heiko Stuebner <[email protected]>
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/errata_list.h>
#include <asm/hwprobe.h>
#include <asm/patch.h>
#include <asm/vendorid_list.h>
static bool errata_probe_pbmt(unsigned int stage,
unsigned long arch_id, unsigned long impid)
{
if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PBMT))
return false;
if (arch_id != 0 || impid != 0)
return false;
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT ||
stage == RISCV_ALTERNATIVES_MODULE)
return true;
return false;
}
static bool errata_probe_cmo(unsigned int stage,
unsigned long arch_id, unsigned long impid)
{
if (!IS_ENABLED(CONFIG_ERRATA_THEAD_CMO))
return false;
if (arch_id != 0 || impid != 0)
return false;
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
return false;
if (stage == RISCV_ALTERNATIVES_BOOT) {
riscv_cbom_block_size = L1_CACHE_BYTES;
riscv_noncoherent_supported();
}
return true;
}
static bool errata_probe_pmu(unsigned int stage,
unsigned long arch_id, unsigned long impid)
{
if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PMU))
return false;
/* target-c9xx cores report arch_id and impid as 0 */
if (arch_id != 0 || impid != 0)
return false;
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
return false;
return true;
}
static u32 thead_errata_probe(unsigned int stage,
unsigned long archid, unsigned long impid)
{
u32 cpu_req_errata = 0;
if (errata_probe_pbmt(stage, archid, impid))
cpu_req_errata |= BIT(ERRATA_THEAD_PBMT);
if (errata_probe_cmo(stage, archid, impid))
cpu_req_errata |= BIT(ERRATA_THEAD_CMO);
if (errata_probe_pmu(stage, archid, impid))
cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
return cpu_req_errata;
}
void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage)
{
struct alt_entry *alt;
u32 cpu_req_errata = thead_errata_probe(stage, archid, impid);
u32 tmp;
void *oldptr, *altptr;
for (alt = begin; alt < end; alt++) {
if (alt->vendor_id != THEAD_VENDOR_ID)
continue;
if (alt->patch_id >= ERRATA_THEAD_NUMBER)
continue;
tmp = (1U << alt->patch_id);
if (cpu_req_errata & tmp) {
oldptr = ALT_OLD_PTR(alt);
altptr = ALT_ALT_PTR(alt);
/* On vm-alternatives, the mmu isn't running yet */
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) {
memcpy(oldptr, altptr, alt->alt_len);
} else {
mutex_lock(&text_mutex);
patch_text_nosync(oldptr, altptr, alt->alt_len);
mutex_unlock(&text_mutex);
}
}
}
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
local_flush_icache_all();
}
| linux-master | arch/riscv/errata/thead/errata.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Sifive.
*/
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/bug.h>
#include <asm/patch.h>
#include <asm/alternative.h>
#include <asm/vendorid_list.h>
#include <asm/errata_list.h>
struct errata_info_t {
char name[32];
bool (*check_func)(unsigned long arch_id, unsigned long impid);
};
static bool errata_cip_453_check_func(unsigned long arch_id, unsigned long impid)
{
/*
* Affected cores:
* Architecture ID: 0x8000000000000007
* Implement ID: 0x20181004 <= impid <= 0x20191105
*/
if (arch_id != 0x8000000000000007 ||
(impid < 0x20181004 || impid > 0x20191105))
return false;
return true;
}
static bool errata_cip_1200_check_func(unsigned long arch_id, unsigned long impid)
{
/*
* Affected cores:
* Architecture ID: 0x8000000000000007 or 0x1
* Implement ID: mimpid[23:0] <= 0x200630 and mimpid != 0x01200626
*/
if (arch_id != 0x8000000000000007 && arch_id != 0x1)
return false;
if ((impid & 0xffffff) > 0x200630 || impid == 0x1200626)
return false;
return true;
}
static struct errata_info_t errata_list[ERRATA_SIFIVE_NUMBER] = {
{
.name = "cip-453",
.check_func = errata_cip_453_check_func
},
{
.name = "cip-1200",
.check_func = errata_cip_1200_check_func
},
};
static u32 __init_or_module sifive_errata_probe(unsigned long archid,
unsigned long impid)
{
int idx;
u32 cpu_req_errata = 0;
for (idx = 0; idx < ERRATA_SIFIVE_NUMBER; idx++)
if (errata_list[idx].check_func(archid, impid))
cpu_req_errata |= (1U << idx);
return cpu_req_errata;
}
static void __init_or_module warn_miss_errata(u32 miss_errata)
{
int i;
pr_warn("----------------------------------------------------------------\n");
pr_warn("WARNING: Missing the following errata may cause potential issues\n");
for (i = 0; i < ERRATA_SIFIVE_NUMBER; i++)
if (miss_errata & 0x1 << i)
pr_warn("\tSiFive Errata[%d]:%s\n", i, errata_list[i].name);
pr_warn("Please enable the corresponding Kconfig to apply them\n");
pr_warn("----------------------------------------------------------------\n");
}
void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage)
{
struct alt_entry *alt;
u32 cpu_req_errata;
u32 cpu_apply_errata = 0;
u32 tmp;
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
return;
cpu_req_errata = sifive_errata_probe(archid, impid);
for (alt = begin; alt < end; alt++) {
if (alt->vendor_id != SIFIVE_VENDOR_ID)
continue;
if (alt->patch_id >= ERRATA_SIFIVE_NUMBER) {
WARN(1, "This errata id:%d is not in kernel errata list", alt->patch_id);
continue;
}
tmp = (1U << alt->patch_id);
if (cpu_req_errata & tmp) {
mutex_lock(&text_mutex);
patch_text_nosync(ALT_OLD_PTR(alt), ALT_ALT_PTR(alt),
alt->alt_len);
mutex_unlock(&text_mutex);
cpu_apply_errata |= tmp;
}
}
if (stage != RISCV_ALTERNATIVES_MODULE &&
cpu_apply_errata != cpu_req_errata)
warn_miss_errata(cpu_req_errata - cpu_apply_errata);
}
| linux-master | arch/riscv/errata/sifive/errata.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Erratas to be applied for Andes CPU cores
*
* Copyright (C) 2023 Renesas Electronics Corporation.
*
* Author: Lad Prabhakar <[email protected]>
*/
#include <linux/memory.h>
#include <linux/module.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/errata_list.h>
#include <asm/patch.h>
#include <asm/processor.h>
#include <asm/sbi.h>
#include <asm/vendorid_list.h>
#define ANDESTECH_AX45MP_MARCHID 0x8000000000008a45UL
#define ANDESTECH_AX45MP_MIMPID 0x500UL
#define ANDESTECH_SBI_EXT_ANDES 0x0900031E
#define ANDES_SBI_EXT_IOCP_SW_WORKAROUND 1
static long ax45mp_iocp_sw_workaround(void)
{
struct sbiret ret;
/*
* ANDES_SBI_EXT_IOCP_SW_WORKAROUND SBI EXT checks if the IOCP is missing and
* cache is controllable only then CMO will be applied to the platform.
*/
ret = sbi_ecall(ANDESTECH_SBI_EXT_ANDES, ANDES_SBI_EXT_IOCP_SW_WORKAROUND,
0, 0, 0, 0, 0, 0);
return ret.error ? 0 : ret.value;
}
static bool errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid)
{
if (!IS_ENABLED(CONFIG_ERRATA_ANDES_CMO))
return false;
if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID)
return false;
if (!ax45mp_iocp_sw_workaround())
return false;
/* Set this just to make core cbo code happy */
riscv_cbom_block_size = 1;
riscv_noncoherent_supported();
return true;
}
void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage)
{
errata_probe_iocp(stage, archid, impid);
/* we have nothing to patch here ATM so just return back */
}
| linux-master | arch/riscv/errata/andes/errata.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/entry-kvm.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/vmalloc.h>
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
#include <asm/cacheflush.h>
#include <asm/kvm_vcpu_vector.h>
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
STATS_DESC_COUNTER(VCPU, mmio_exit_user),
STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
STATS_DESC_COUNTER(VCPU, csr_exit_user),
STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
STATS_DESC_COUNTER(VCPU, signal_exits),
STATS_DESC_COUNTER(VCPU, exits)
};
const struct kvm_stats_header kvm_vcpu_stats_header = {
.name_size = KVM_STATS_NAME_SIZE,
.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
.id_offset = sizeof(struct kvm_stats_header),
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
sizeof(kvm_vcpu_stats_desc),
};
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
bool loaded;
/**
* The preemption should be disabled here because it races with
* kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
* also calls vcpu_load/put.
*/
get_cpu();
loaded = (vcpu->cpu != -1);
if (loaded)
kvm_arch_vcpu_put(vcpu);
vcpu->arch.last_exit_cpu = -1;
memcpy(csr, reset_csr, sizeof(*csr));
memcpy(cntx, reset_cntx, sizeof(*cntx));
kvm_riscv_vcpu_fp_reset(vcpu);
kvm_riscv_vcpu_vector_reset(vcpu);
kvm_riscv_vcpu_timer_reset(vcpu);
kvm_riscv_vcpu_aia_reset(vcpu);
bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
kvm_riscv_vcpu_pmu_reset(vcpu);
vcpu->arch.hfence_head = 0;
vcpu->arch.hfence_tail = 0;
memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
/* Reset the guest CSRs for hotplug usecase */
if (loaded)
kvm_arch_vcpu_load(vcpu, smp_processor_id());
put_cpu();
}
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
{
return 0;
}
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
int rc;
struct kvm_cpu_context *cntx;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
/* Setup ISA features available to VCPU */
kvm_riscv_vcpu_setup_isa(vcpu);
/* Setup vendor, arch, and implementation details */
vcpu->arch.mvendorid = sbi_get_mvendorid();
vcpu->arch.marchid = sbi_get_marchid();
vcpu->arch.mimpid = sbi_get_mimpid();
/* Setup VCPU hfence queue */
spin_lock_init(&vcpu->arch.hfence_lock);
/* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
cntx = &vcpu->arch.guest_reset_context;
cntx->sstatus = SR_SPP | SR_SPIE;
cntx->hstatus = 0;
cntx->hstatus |= HSTATUS_VTW;
cntx->hstatus |= HSTATUS_SPVP;
cntx->hstatus |= HSTATUS_SPV;
if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx))
return -ENOMEM;
/* By default, make CY, TM, and IR counters accessible in VU mode */
reset_csr->scounteren = 0x7;
/* Setup VCPU timer */
kvm_riscv_vcpu_timer_init(vcpu);
/* setup performance monitoring */
kvm_riscv_vcpu_pmu_init(vcpu);
/* Setup VCPU AIA */
rc = kvm_riscv_vcpu_aia_init(vcpu);
if (rc)
return rc;
/* Reset VCPU */
kvm_riscv_reset_vcpu(vcpu);
return 0;
}
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
/**
* vcpu with id 0 is the designated boot cpu.
* Keep all vcpus with non-zero id in power-off state so that
* they can be brought up using SBI HSM extension.
*/
if (vcpu->vcpu_idx != 0)
kvm_riscv_vcpu_power_off(vcpu);
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
/* Cleanup VCPU AIA context */
kvm_riscv_vcpu_aia_deinit(vcpu);
/* Cleanup VCPU timer */
kvm_riscv_vcpu_timer_deinit(vcpu);
kvm_riscv_vcpu_pmu_deinit(vcpu);
/* Free unused pages pre-allocated for G-stage page table mappings */
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
/* Free vector context space for host and guest kernel */
kvm_riscv_vcpu_free_vector_context(vcpu);
}
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return kvm_riscv_vcpu_timer_pending(vcpu);
}
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
kvm_riscv_aia_wakeon_hgei(vcpu, true);
}
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
kvm_riscv_aia_wakeon_hgei(vcpu, false);
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
!vcpu->arch.power_off && !vcpu->arch.pause);
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
}
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
if (ioctl == KVM_INTERRUPT) {
struct kvm_interrupt irq;
if (copy_from_user(&irq, argp, sizeof(irq)))
return -EFAULT;
if (irq.irq == KVM_INTERRUPT_SET)
return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
else
return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
}
return -ENOIOCTLCMD;
}
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
long r = -EINVAL;
switch (ioctl) {
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;
r = -EFAULT;
if (copy_from_user(®, argp, sizeof(reg)))
break;
if (ioctl == KVM_SET_ONE_REG)
r = kvm_riscv_vcpu_set_reg(vcpu, ®);
else
r = kvm_riscv_vcpu_get_reg(vcpu, ®);
break;
}
case KVM_GET_REG_LIST: {
struct kvm_reg_list __user *user_list = argp;
struct kvm_reg_list reg_list;
unsigned int n;
r = -EFAULT;
if (copy_from_user(®_list, user_list, sizeof(reg_list)))
break;
n = reg_list.n;
reg_list.n = kvm_riscv_vcpu_num_regs(vcpu);
if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
break;
r = -E2BIG;
if (n < reg_list.n)
break;
r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg);
break;
}
default:
break;
}
return r;
}
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
}
void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
unsigned long mask, val;
if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) {
mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0);
val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask;
csr->hvip &= ~mask;
csr->hvip |= val;
}
/* Flush AIA high interrupts */
kvm_riscv_vcpu_aia_flush_interrupts(vcpu);
}
void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
{
unsigned long hvip;
struct kvm_vcpu_arch *v = &vcpu->arch;
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
/* Read current HVIP and VSIE CSRs */
csr->vsie = csr_read(CSR_VSIE);
/* Sync-up HVIP.VSSIP bit changes does by Guest */
hvip = csr_read(CSR_HVIP);
if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
if (hvip & (1UL << IRQ_VS_SOFT)) {
if (!test_and_set_bit(IRQ_VS_SOFT,
v->irqs_pending_mask))
set_bit(IRQ_VS_SOFT, v->irqs_pending);
} else {
if (!test_and_set_bit(IRQ_VS_SOFT,
v->irqs_pending_mask))
clear_bit(IRQ_VS_SOFT, v->irqs_pending);
}
}
/* Sync-up AIA high interrupts */
kvm_riscv_vcpu_aia_sync_interrupts(vcpu);
/* Sync-up timer CSRs */
kvm_riscv_vcpu_timer_sync(vcpu);
}
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
{
/*
* We only allow VS-mode software, timer, and external
* interrupts when irq is one of the local interrupts
* defined by RISC-V privilege specification.
*/
if (irq < IRQ_LOCAL_MAX &&
irq != IRQ_VS_SOFT &&
irq != IRQ_VS_TIMER &&
irq != IRQ_VS_EXT)
return -EINVAL;
set_bit(irq, vcpu->arch.irqs_pending);
smp_mb__before_atomic();
set_bit(irq, vcpu->arch.irqs_pending_mask);
kvm_vcpu_kick(vcpu);
return 0;
}
int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
{
/*
* We only allow VS-mode software, timer, and external
* interrupts when irq is one of the local interrupts
* defined by RISC-V privilege specification.
*/
if (irq < IRQ_LOCAL_MAX &&
irq != IRQ_VS_SOFT &&
irq != IRQ_VS_TIMER &&
irq != IRQ_VS_EXT)
return -EINVAL;
clear_bit(irq, vcpu->arch.irqs_pending);
smp_mb__before_atomic();
set_bit(irq, vcpu->arch.irqs_pending_mask);
return 0;
}
bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
{
unsigned long ie;
ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
<< VSIP_TO_HVIP_SHIFT) & (unsigned long)mask;
ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK &
(unsigned long)mask;
if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie)
return true;
/* Check AIA high interrupts */
return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
}
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
{
vcpu->arch.power_off = true;
kvm_make_request(KVM_REQ_SLEEP, vcpu);
kvm_vcpu_kick(vcpu);
}
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
{
vcpu->arch.power_off = false;
kvm_vcpu_wake_up(vcpu);
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
if (vcpu->arch.power_off)
mp_state->mp_state = KVM_MP_STATE_STOPPED;
else
mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
return 0;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
int ret = 0;
switch (mp_state->mp_state) {
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.power_off = false;
break;
case KVM_MP_STATE_STOPPED:
kvm_riscv_vcpu_power_off(vcpu);
break;
default:
ret = -EINVAL;
}
return ret;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
/* TODO; To be implemented later. */
return -EINVAL;
}
static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
{
u64 henvcfg = 0;
if (riscv_isa_extension_available(isa, SVPBMT))
henvcfg |= ENVCFG_PBMTE;
if (riscv_isa_extension_available(isa, SSTC))
henvcfg |= ENVCFG_STCE;
if (riscv_isa_extension_available(isa, ZICBOM))
henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
if (riscv_isa_extension_available(isa, ZICBOZ))
henvcfg |= ENVCFG_CBZE;
csr_write(CSR_HENVCFG, henvcfg);
#ifdef CONFIG_32BIT
csr_write(CSR_HENVCFGH, henvcfg >> 32);
#endif
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
csr_write(CSR_VSSTATUS, csr->vsstatus);
csr_write(CSR_VSIE, csr->vsie);
csr_write(CSR_VSTVEC, csr->vstvec);
csr_write(CSR_VSSCRATCH, csr->vsscratch);
csr_write(CSR_VSEPC, csr->vsepc);
csr_write(CSR_VSCAUSE, csr->vscause);
csr_write(CSR_VSTVAL, csr->vstval);
csr_write(CSR_HVIP, csr->hvip);
csr_write(CSR_VSATP, csr->vsatp);
kvm_riscv_vcpu_update_config(vcpu->arch.isa);
kvm_riscv_gstage_update_hgatp(vcpu);
kvm_riscv_vcpu_timer_restore(vcpu);
kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
vcpu->arch.isa);
kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context);
kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context,
vcpu->arch.isa);
kvm_riscv_vcpu_aia_load(vcpu, cpu);
vcpu->cpu = cpu;
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
vcpu->cpu = -1;
kvm_riscv_vcpu_aia_put(vcpu);
kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
vcpu->arch.isa);
kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
kvm_riscv_vcpu_timer_save(vcpu);
kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context,
vcpu->arch.isa);
kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context);
csr->vsstatus = csr_read(CSR_VSSTATUS);
csr->vsie = csr_read(CSR_VSIE);
csr->vstvec = csr_read(CSR_VSTVEC);
csr->vsscratch = csr_read(CSR_VSSCRATCH);
csr->vsepc = csr_read(CSR_VSEPC);
csr->vscause = csr_read(CSR_VSCAUSE);
csr->vstval = csr_read(CSR_VSTVAL);
csr->hvip = csr_read(CSR_HVIP);
csr->vsatp = csr_read(CSR_VSATP);
}
static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
{
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
kvm_vcpu_srcu_read_unlock(vcpu);
rcuwait_wait_event(wait,
(!vcpu->arch.power_off) && (!vcpu->arch.pause),
TASK_INTERRUPTIBLE);
kvm_vcpu_srcu_read_lock(vcpu);
if (vcpu->arch.power_off || vcpu->arch.pause) {
/*
* Awaken to handle a signal, request to
* sleep again later.
*/
kvm_make_request(KVM_REQ_SLEEP, vcpu);
}
}
if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
kvm_riscv_reset_vcpu(vcpu);
if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
kvm_riscv_gstage_update_hgatp(vcpu);
if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
kvm_riscv_fence_i_process(vcpu);
/*
* The generic KVM_REQ_TLB_FLUSH is same as
* KVM_REQ_HFENCE_GVMA_VMID_ALL
*/
if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu))
kvm_riscv_hfence_gvma_vmid_all_process(vcpu);
if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
kvm_riscv_hfence_vvma_all_process(vcpu);
if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
kvm_riscv_hfence_process(vcpu);
}
}
static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
csr_write(CSR_HVIP, csr->hvip);
kvm_riscv_vcpu_aia_update_hvip(vcpu);
}
/*
* Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
* the vCPU is running.
*
* This must be noinstr as instrumentation may make use of RCU, and this is not
* safe during the EQS.
*/
static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
guest_state_enter_irqoff();
__kvm_riscv_switch_to(&vcpu->arch);
vcpu->arch.last_exit_cpu = vcpu->cpu;
guest_state_exit_irqoff();
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
int ret;
struct kvm_cpu_trap trap;
struct kvm_run *run = vcpu->run;
/* Mark this VCPU ran at least once */
vcpu->arch.ran_atleast_once = true;
kvm_vcpu_srcu_read_lock(vcpu);
switch (run->exit_reason) {
case KVM_EXIT_MMIO:
/* Process MMIO value returned from user-space */
ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
break;
case KVM_EXIT_RISCV_SBI:
/* Process SBI value returned from user-space */
ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
break;
case KVM_EXIT_RISCV_CSR:
/* Process CSR value returned from user-space */
ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
break;
default:
ret = 0;
break;
}
if (ret) {
kvm_vcpu_srcu_read_unlock(vcpu);
return ret;
}
if (run->immediate_exit) {
kvm_vcpu_srcu_read_unlock(vcpu);
return -EINTR;
}
vcpu_load(vcpu);
kvm_sigset_activate(vcpu);
ret = 1;
run->exit_reason = KVM_EXIT_UNKNOWN;
while (ret > 0) {
/* Check conditions before entering the guest */
ret = xfer_to_guest_mode_handle_work(vcpu);
if (ret)
continue;
ret = 1;
kvm_riscv_gstage_vmid_update(vcpu);
kvm_riscv_check_vcpu_requests(vcpu);
preempt_disable();
/* Update AIA HW state before entering guest */
ret = kvm_riscv_vcpu_aia_update(vcpu);
if (ret <= 0) {
preempt_enable();
continue;
}
local_irq_disable();
/*
* Ensure we set mode to IN_GUEST_MODE after we disable
* interrupts and before the final VCPU requests check.
* See the comment in kvm_vcpu_exiting_guest_mode() and
* Documentation/virt/kvm/vcpu-requests.rst
*/
vcpu->mode = IN_GUEST_MODE;
kvm_vcpu_srcu_read_unlock(vcpu);
smp_mb__after_srcu_read_unlock();
/*
* We might have got VCPU interrupts updated asynchronously
* so update it in HW.
*/
kvm_riscv_vcpu_flush_interrupts(vcpu);
/* Update HVIP CSR for current CPU */
kvm_riscv_update_hvip(vcpu);
if (ret <= 0 ||
kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
kvm_request_pending(vcpu) ||
xfer_to_guest_mode_work_pending()) {
vcpu->mode = OUTSIDE_GUEST_MODE;
local_irq_enable();
preempt_enable();
kvm_vcpu_srcu_read_lock(vcpu);
continue;
}
/*
* Cleanup stale TLB enteries
*
* Note: This should be done after G-stage VMID has been
* updated using kvm_riscv_gstage_vmid_ver_changed()
*/
kvm_riscv_local_tlb_sanitize(vcpu);
guest_timing_enter_irqoff();
kvm_riscv_vcpu_enter_exit(vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
/*
* Save SCAUSE, STVAL, HTVAL, and HTINST because we might
* get an interrupt between __kvm_riscv_switch_to() and
* local_irq_enable() which can potentially change CSRs.
*/
trap.sepc = vcpu->arch.guest_context.sepc;
trap.scause = csr_read(CSR_SCAUSE);
trap.stval = csr_read(CSR_STVAL);
trap.htval = csr_read(CSR_HTVAL);
trap.htinst = csr_read(CSR_HTINST);
/* Syncup interrupts state with HW */
kvm_riscv_vcpu_sync_interrupts(vcpu);
/*
* We must ensure that any pending interrupts are taken before
* we exit guest timing so that timer ticks are accounted as
* guest time. Transiently unmask interrupts so that any
* pending interrupts are taken.
*
* There's no barrier which ensures that pending interrupts are
* recognised, so we just hope that the CPU takes any pending
* interrupts between the enable and disable.
*/
local_irq_enable();
local_irq_disable();
guest_timing_exit_irqoff();
local_irq_enable();
preempt_enable();
kvm_vcpu_srcu_read_lock(vcpu);
ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
}
kvm_sigset_deactivate(vcpu);
vcpu_put(vcpu);
kvm_vcpu_srcu_read_unlock(vcpu);
return ret;
}
| linux-master | arch/riscv/kvm/vcpu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 SiFive
*
* Authors:
* Vincent Chen <[email protected]>
* Greentime Hu <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <asm/hwcap.h>
#include <asm/kvm_vcpu_vector.h>
#include <asm/vector.h>
#ifdef CONFIG_RISCV_ISA_V
void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
{
unsigned long *isa = vcpu->arch.isa;
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
cntx->sstatus &= ~SR_VS;
if (riscv_isa_extension_available(isa, v)) {
cntx->sstatus |= SR_VS_INITIAL;
WARN_ON(!cntx->vector.datap);
memset(cntx->vector.datap, 0, riscv_v_vsize);
} else {
cntx->sstatus |= SR_VS_OFF;
}
}
static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx)
{
cntx->sstatus &= ~SR_VS;
cntx->sstatus |= SR_VS_CLEAN;
}
void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
unsigned long *isa)
{
if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) {
if (riscv_isa_extension_available(isa, v))
__kvm_riscv_vector_save(cntx);
kvm_riscv_vcpu_vector_clean(cntx);
}
}
void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
unsigned long *isa)
{
if ((cntx->sstatus & SR_VS) != SR_VS_OFF) {
if (riscv_isa_extension_available(isa, v))
__kvm_riscv_vector_restore(cntx);
kvm_riscv_vcpu_vector_clean(cntx);
}
}
void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
{
/* No need to check host sstatus as it can be modified outside */
if (riscv_isa_extension_available(NULL, v))
__kvm_riscv_vector_save(cntx);
}
void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
{
if (riscv_isa_extension_available(NULL, v))
__kvm_riscv_vector_restore(cntx);
}
int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
struct kvm_cpu_context *cntx)
{
cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
if (!cntx->vector.datap)
return -ENOMEM;
vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
if (!vcpu->arch.host_context.vector.datap)
return -ENOMEM;
return 0;
}
void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
{
kfree(vcpu->arch.guest_reset_context.vector.datap);
kfree(vcpu->arch.host_context.vector.datap);
}
#endif
static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
unsigned long reg_num,
size_t reg_size,
void **reg_addr)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
size_t vlenb = riscv_v_vsize / 32;
if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) {
if (reg_size != sizeof(unsigned long))
return -EINVAL;
switch (reg_num) {
case KVM_REG_RISCV_VECTOR_CSR_REG(vstart):
*reg_addr = &cntx->vector.vstart;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(vl):
*reg_addr = &cntx->vector.vl;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(vtype):
*reg_addr = &cntx->vector.vtype;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
*reg_addr = &cntx->vector.vcsr;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(datap):
default:
return -ENOENT;
}
} else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) {
if (reg_size != vlenb)
return -EINVAL;
*reg_addr = cntx->vector.datap +
(reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb;
} else {
return -ENOENT;
}
return 0;
}
int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long *isa = vcpu->arch.isa;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_VECTOR);
size_t reg_size = KVM_REG_SIZE(reg->id);
void *reg_addr;
int rc;
if (!riscv_isa_extension_available(isa, v))
return -ENOENT;
rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, ®_addr);
if (rc)
return rc;
if (copy_to_user(uaddr, reg_addr, reg_size))
return -EFAULT;
return 0;
}
int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long *isa = vcpu->arch.isa;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_VECTOR);
size_t reg_size = KVM_REG_SIZE(reg->id);
void *reg_addr;
int rc;
if (!riscv_isa_extension_available(isa, v))
return -ENOENT;
rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, ®_addr);
if (rc)
return rc;
if (copy_from_user(reg_addr, uaddr, reg_size))
return -EFAULT;
return 0;
}
| linux-master | arch/riscv/kvm/vcpu_vector.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <[email protected]>
*/
#include <linux/kvm_host.h>
#include <asm/csr.h>
#include <asm/insn-def.h>
static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap)
{
struct kvm_memory_slot *memslot;
unsigned long hva, fault_addr;
bool writable;
gfn_t gfn;
int ret;
fault_addr = (trap->htval << 2) | (trap->stval & 0x3);
gfn = fault_addr >> PAGE_SHIFT;
memslot = gfn_to_memslot(vcpu->kvm, gfn);
hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
if (kvm_is_error_hva(hva) ||
(trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writable)) {
switch (trap->scause) {
case EXC_LOAD_GUEST_PAGE_FAULT:
return kvm_riscv_vcpu_mmio_load(vcpu, run,
fault_addr,
trap->htinst);
case EXC_STORE_GUEST_PAGE_FAULT:
return kvm_riscv_vcpu_mmio_store(vcpu, run,
fault_addr,
trap->htinst);
default:
return -EOPNOTSUPP;
};
}
ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
(trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
if (ret < 0)
return ret;
return 1;
}
/**
* kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory
*
* @vcpu: The VCPU pointer
* @read_insn: Flag representing whether we are reading instruction
* @guest_addr: Guest address to read
* @trap: Output pointer to trap details
*/
unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
bool read_insn,
unsigned long guest_addr,
struct kvm_cpu_trap *trap)
{
register unsigned long taddr asm("a0") = (unsigned long)trap;
register unsigned long ttmp asm("a1");
unsigned long flags, val, tmp, old_stvec, old_hstatus;
local_irq_save(flags);
old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus);
old_stvec = csr_swap(CSR_STVEC, (ulong)&__kvm_riscv_unpriv_trap);
if (read_insn) {
/*
* HLVX.HU instruction
* 0110010 00011 rs1 100 rd 1110011
*/
asm volatile ("\n"
".option push\n"
".option norvc\n"
"add %[ttmp], %[taddr], 0\n"
HLVX_HU(%[val], %[addr])
"andi %[tmp], %[val], 3\n"
"addi %[tmp], %[tmp], -3\n"
"bne %[tmp], zero, 2f\n"
"addi %[addr], %[addr], 2\n"
HLVX_HU(%[tmp], %[addr])
"sll %[tmp], %[tmp], 16\n"
"add %[val], %[val], %[tmp]\n"
"2:\n"
".option pop"
: [val] "=&r" (val), [tmp] "=&r" (tmp),
[taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp),
[addr] "+&r" (guest_addr) : : "memory");
if (trap->scause == EXC_LOAD_PAGE_FAULT)
trap->scause = EXC_INST_PAGE_FAULT;
} else {
/*
* HLV.D instruction
* 0110110 00000 rs1 100 rd 1110011
*
* HLV.W instruction
* 0110100 00000 rs1 100 rd 1110011
*/
asm volatile ("\n"
".option push\n"
".option norvc\n"
"add %[ttmp], %[taddr], 0\n"
#ifdef CONFIG_64BIT
HLV_D(%[val], %[addr])
#else
HLV_W(%[val], %[addr])
#endif
".option pop"
: [val] "=&r" (val),
[taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp)
: [addr] "r" (guest_addr) : "memory");
}
csr_write(CSR_STVEC, old_stvec);
csr_write(CSR_HSTATUS, old_hstatus);
local_irq_restore(flags);
return val;
}
/**
* kvm_riscv_vcpu_trap_redirect -- Redirect trap to Guest
*
* @vcpu: The VCPU pointer
* @trap: Trap details
*/
void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
struct kvm_cpu_trap *trap)
{
unsigned long vsstatus = csr_read(CSR_VSSTATUS);
/* Change Guest SSTATUS.SPP bit */
vsstatus &= ~SR_SPP;
if (vcpu->arch.guest_context.sstatus & SR_SPP)
vsstatus |= SR_SPP;
/* Change Guest SSTATUS.SPIE bit */
vsstatus &= ~SR_SPIE;
if (vsstatus & SR_SIE)
vsstatus |= SR_SPIE;
/* Clear Guest SSTATUS.SIE bit */
vsstatus &= ~SR_SIE;
/* Update Guest SSTATUS */
csr_write(CSR_VSSTATUS, vsstatus);
/* Update Guest SCAUSE, STVAL, and SEPC */
csr_write(CSR_VSCAUSE, trap->scause);
csr_write(CSR_VSTVAL, trap->stval);
csr_write(CSR_VSEPC, trap->sepc);
/* Set Guest PC to Guest exception vector */
vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC);
/* Set Guest privilege mode to supervisor */
vcpu->arch.guest_context.sstatus |= SR_SPP;
}
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
*/
int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap)
{
int ret;
/* If we got host interrupt then do nothing */
if (trap->scause & CAUSE_IRQ_FLAG)
return 1;
/* Handle guest traps */
ret = -EFAULT;
run->exit_reason = KVM_EXIT_UNKNOWN;
switch (trap->scause) {
case EXC_INST_ILLEGAL:
case EXC_LOAD_MISALIGNED:
case EXC_STORE_MISALIGNED:
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) {
kvm_riscv_vcpu_trap_redirect(vcpu, trap);
ret = 1;
}
break;
case EXC_VIRTUAL_INST_FAULT:
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap);
break;
case EXC_INST_GUEST_PAGE_FAULT:
case EXC_LOAD_GUEST_PAGE_FAULT:
case EXC_STORE_GUEST_PAGE_FAULT:
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
ret = gstage_page_fault(vcpu, run, trap);
break;
case EXC_SUPERVISOR_SYSCALL:
if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run);
break;
default:
break;
}
/* Print details in-case of error */
if (ret < 0) {
kvm_err("VCPU exit error %d\n", ret);
kvm_err("SEPC=0x%lx SSTATUS=0x%lx HSTATUS=0x%lx\n",
vcpu->arch.guest_context.sepc,
vcpu->arch.guest_context.sstatus,
vcpu->arch.guest_context.hstatus);
kvm_err("SCAUSE=0x%lx STVAL=0x%lx HTVAL=0x%lx HTINST=0x%lx\n",
trap->scause, trap->stval, trap->htval, trap->htinst);
}
return ret;
}
| linux-master | arch/riscv/kvm/vcpu_exit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
* Copyright (C) 2022 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <[email protected]>
*/
#include <linux/bits.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <asm/kvm_aia_imsic.h>
static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
{
struct kvm_vcpu *tmp_vcpu;
for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
mutex_unlock(&tmp_vcpu->mutex);
}
}
static void unlock_all_vcpus(struct kvm *kvm)
{
unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
}
static bool lock_all_vcpus(struct kvm *kvm)
{
struct kvm_vcpu *tmp_vcpu;
unsigned long c;
kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
if (!mutex_trylock(&tmp_vcpu->mutex)) {
unlock_vcpus(kvm, c - 1);
return false;
}
}
return true;
}
static int aia_create(struct kvm_device *dev, u32 type)
{
int ret;
unsigned long i;
struct kvm *kvm = dev->kvm;
struct kvm_vcpu *vcpu;
if (irqchip_in_kernel(kvm))
return -EEXIST;
ret = -EBUSY;
if (!lock_all_vcpus(kvm))
return ret;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.ran_atleast_once)
goto out_unlock;
}
ret = 0;
kvm->arch.aia.in_kernel = true;
out_unlock:
unlock_all_vcpus(kvm);
return ret;
}
static void aia_destroy(struct kvm_device *dev)
{
kfree(dev);
}
static int aia_config(struct kvm *kvm, unsigned long type,
u32 *nr, bool write)
{
struct kvm_aia *aia = &kvm->arch.aia;
/* Writes can only be done before irqchip is initialized */
if (write && kvm_riscv_aia_initialized(kvm))
return -EBUSY;
switch (type) {
case KVM_DEV_RISCV_AIA_CONFIG_MODE:
if (write) {
switch (*nr) {
case KVM_DEV_RISCV_AIA_MODE_EMUL:
break;
case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
case KVM_DEV_RISCV_AIA_MODE_AUTO:
/*
* HW Acceleration and Auto modes only
* supported on host with non-zero guest
* external interrupts (i.e. non-zero
* VS-level IMSIC pages).
*/
if (!kvm_riscv_aia_nr_hgei)
return -EINVAL;
break;
default:
return -EINVAL;
}
aia->mode = *nr;
} else
*nr = aia->mode;
break;
case KVM_DEV_RISCV_AIA_CONFIG_IDS:
if (write) {
if ((*nr < KVM_DEV_RISCV_AIA_IDS_MIN) ||
(*nr >= KVM_DEV_RISCV_AIA_IDS_MAX) ||
((*nr & KVM_DEV_RISCV_AIA_IDS_MIN) !=
KVM_DEV_RISCV_AIA_IDS_MIN) ||
(kvm_riscv_aia_max_ids <= *nr))
return -EINVAL;
aia->nr_ids = *nr;
} else
*nr = aia->nr_ids;
break;
case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
if (write) {
if ((*nr >= KVM_DEV_RISCV_AIA_SRCS_MAX) ||
(*nr >= kvm_riscv_aia_max_ids))
return -EINVAL;
aia->nr_sources = *nr;
} else
*nr = aia->nr_sources;
break;
case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
if (write) {
if (*nr >= KVM_DEV_RISCV_AIA_GROUP_BITS_MAX)
return -EINVAL;
aia->nr_group_bits = *nr;
} else
*nr = aia->nr_group_bits;
break;
case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
if (write) {
if ((*nr < KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN) ||
(*nr >= KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX))
return -EINVAL;
aia->nr_group_shift = *nr;
} else
*nr = aia->nr_group_shift;
break;
case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
if (write) {
if (*nr >= KVM_DEV_RISCV_AIA_HART_BITS_MAX)
return -EINVAL;
aia->nr_hart_bits = *nr;
} else
*nr = aia->nr_hart_bits;
break;
case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
if (write) {
if (*nr >= KVM_DEV_RISCV_AIA_GUEST_BITS_MAX)
return -EINVAL;
aia->nr_guest_bits = *nr;
} else
*nr = aia->nr_guest_bits;
break;
default:
return -ENXIO;
}
return 0;
}
static int aia_aplic_addr(struct kvm *kvm, u64 *addr, bool write)
{
struct kvm_aia *aia = &kvm->arch.aia;
if (write) {
/* Writes can only be done before irqchip is initialized */
if (kvm_riscv_aia_initialized(kvm))
return -EBUSY;
if (*addr & (KVM_DEV_RISCV_APLIC_ALIGN - 1))
return -EINVAL;
aia->aplic_addr = *addr;
} else
*addr = aia->aplic_addr;
return 0;
}
static int aia_imsic_addr(struct kvm *kvm, u64 *addr,
unsigned long vcpu_idx, bool write)
{
struct kvm_vcpu *vcpu;
struct kvm_vcpu_aia *vcpu_aia;
vcpu = kvm_get_vcpu(kvm, vcpu_idx);
if (!vcpu)
return -EINVAL;
vcpu_aia = &vcpu->arch.aia_context;
if (write) {
/* Writes can only be done before irqchip is initialized */
if (kvm_riscv_aia_initialized(kvm))
return -EBUSY;
if (*addr & (KVM_DEV_RISCV_IMSIC_ALIGN - 1))
return -EINVAL;
}
mutex_lock(&vcpu->mutex);
if (write)
vcpu_aia->imsic_addr = *addr;
else
*addr = vcpu_aia->imsic_addr;
mutex_unlock(&vcpu->mutex);
return 0;
}
static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
{
u32 h, l;
gpa_t mask = 0;
h = aia->nr_hart_bits + aia->nr_guest_bits +
IMSIC_MMIO_PAGE_SHIFT - 1;
mask = GENMASK_ULL(h, 0);
if (aia->nr_group_bits) {
h = aia->nr_group_bits + aia->nr_group_shift - 1;
l = aia->nr_group_shift;
mask |= GENMASK_ULL(h, l);
}
return (addr & ~mask) >> IMSIC_MMIO_PAGE_SHIFT;
}
static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
{
u32 hart, group = 0;
hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
GENMASK_ULL(aia->nr_hart_bits - 1, 0);
if (aia->nr_group_bits)
group = (addr >> aia->nr_group_shift) &
GENMASK_ULL(aia->nr_group_bits - 1, 0);
return (group << aia->nr_hart_bits) | hart;
}
static int aia_init(struct kvm *kvm)
{
int ret, i;
unsigned long idx;
struct kvm_vcpu *vcpu;
struct kvm_vcpu_aia *vaia;
struct kvm_aia *aia = &kvm->arch.aia;
gpa_t base_ppn = KVM_RISCV_AIA_UNDEF_ADDR;
/* Irqchip can be initialized only once */
if (kvm_riscv_aia_initialized(kvm))
return -EBUSY;
/* We might be in the middle of creating a VCPU? */
if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
return -EBUSY;
/* Number of sources should be less than or equals number of IDs */
if (aia->nr_ids < aia->nr_sources)
return -EINVAL;
/* APLIC base is required for non-zero number of sources */
if (aia->nr_sources && aia->aplic_addr == KVM_RISCV_AIA_UNDEF_ADDR)
return -EINVAL;
/* Initialize APLIC */
ret = kvm_riscv_aia_aplic_init(kvm);
if (ret)
return ret;
/* Iterate over each VCPU */
kvm_for_each_vcpu(idx, vcpu, kvm) {
vaia = &vcpu->arch.aia_context;
/* IMSIC base is required */
if (vaia->imsic_addr == KVM_RISCV_AIA_UNDEF_ADDR) {
ret = -EINVAL;
goto fail_cleanup_imsics;
}
/* All IMSICs should have matching base PPN */
if (base_ppn == KVM_RISCV_AIA_UNDEF_ADDR)
base_ppn = aia_imsic_ppn(aia, vaia->imsic_addr);
if (base_ppn != aia_imsic_ppn(aia, vaia->imsic_addr)) {
ret = -EINVAL;
goto fail_cleanup_imsics;
}
/* Update HART index of the IMSIC based on IMSIC base */
vaia->hart_index = aia_imsic_hart_index(aia,
vaia->imsic_addr);
/* Initialize IMSIC for this VCPU */
ret = kvm_riscv_vcpu_aia_imsic_init(vcpu);
if (ret)
goto fail_cleanup_imsics;
}
/* Set the initialized flag */
kvm->arch.aia.initialized = true;
return 0;
fail_cleanup_imsics:
for (i = idx - 1; i >= 0; i--) {
vcpu = kvm_get_vcpu(kvm, i);
if (!vcpu)
continue;
kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
}
kvm_riscv_aia_aplic_cleanup(kvm);
return ret;
}
static int aia_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
u32 nr;
u64 addr;
int nr_vcpus, r = -ENXIO;
unsigned long v, type = (unsigned long)attr->attr;
void __user *uaddr = (void __user *)(long)attr->addr;
switch (attr->group) {
case KVM_DEV_RISCV_AIA_GRP_CONFIG:
if (copy_from_user(&nr, uaddr, sizeof(nr)))
return -EFAULT;
mutex_lock(&dev->kvm->lock);
r = aia_config(dev->kvm, type, &nr, true);
mutex_unlock(&dev->kvm->lock);
break;
case KVM_DEV_RISCV_AIA_GRP_ADDR:
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
mutex_lock(&dev->kvm->lock);
if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
r = aia_aplic_addr(dev->kvm, &addr, true);
else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
r = aia_imsic_addr(dev->kvm, &addr,
type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), true);
mutex_unlock(&dev->kvm->lock);
break;
case KVM_DEV_RISCV_AIA_GRP_CTRL:
switch (type) {
case KVM_DEV_RISCV_AIA_CTRL_INIT:
mutex_lock(&dev->kvm->lock);
r = aia_init(dev->kvm);
mutex_unlock(&dev->kvm->lock);
break;
}
break;
case KVM_DEV_RISCV_AIA_GRP_APLIC:
if (copy_from_user(&nr, uaddr, sizeof(nr)))
return -EFAULT;
mutex_lock(&dev->kvm->lock);
r = kvm_riscv_aia_aplic_set_attr(dev->kvm, type, nr);
mutex_unlock(&dev->kvm->lock);
break;
case KVM_DEV_RISCV_AIA_GRP_IMSIC:
if (copy_from_user(&v, uaddr, sizeof(v)))
return -EFAULT;
mutex_lock(&dev->kvm->lock);
r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, true, &v);
mutex_unlock(&dev->kvm->lock);
break;
}
return r;
}
static int aia_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
u32 nr;
u64 addr;
int nr_vcpus, r = -ENXIO;
void __user *uaddr = (void __user *)(long)attr->addr;
unsigned long v, type = (unsigned long)attr->attr;
switch (attr->group) {
case KVM_DEV_RISCV_AIA_GRP_CONFIG:
if (copy_from_user(&nr, uaddr, sizeof(nr)))
return -EFAULT;
mutex_lock(&dev->kvm->lock);
r = aia_config(dev->kvm, type, &nr, false);
mutex_unlock(&dev->kvm->lock);
if (r)
return r;
if (copy_to_user(uaddr, &nr, sizeof(nr)))
return -EFAULT;
break;
case KVM_DEV_RISCV_AIA_GRP_ADDR:
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
mutex_lock(&dev->kvm->lock);
if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
r = aia_aplic_addr(dev->kvm, &addr, false);
else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
r = aia_imsic_addr(dev->kvm, &addr,
type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), false);
mutex_unlock(&dev->kvm->lock);
if (r)
return r;
if (copy_to_user(uaddr, &addr, sizeof(addr)))
return -EFAULT;
break;
case KVM_DEV_RISCV_AIA_GRP_APLIC:
if (copy_from_user(&nr, uaddr, sizeof(nr)))
return -EFAULT;
mutex_lock(&dev->kvm->lock);
r = kvm_riscv_aia_aplic_get_attr(dev->kvm, type, &nr);
mutex_unlock(&dev->kvm->lock);
if (r)
return r;
if (copy_to_user(uaddr, &nr, sizeof(nr)))
return -EFAULT;
break;
case KVM_DEV_RISCV_AIA_GRP_IMSIC:
if (copy_from_user(&v, uaddr, sizeof(v)))
return -EFAULT;
mutex_lock(&dev->kvm->lock);
r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, false, &v);
mutex_unlock(&dev->kvm->lock);
if (r)
return r;
if (copy_to_user(uaddr, &v, sizeof(v)))
return -EFAULT;
break;
}
return r;
}
static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int nr_vcpus;
switch (attr->group) {
case KVM_DEV_RISCV_AIA_GRP_CONFIG:
switch (attr->attr) {
case KVM_DEV_RISCV_AIA_CONFIG_MODE:
case KVM_DEV_RISCV_AIA_CONFIG_IDS:
case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
return 0;
}
break;
case KVM_DEV_RISCV_AIA_GRP_ADDR:
nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
if (attr->attr == KVM_DEV_RISCV_AIA_ADDR_APLIC)
return 0;
else if (attr->attr < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
return 0;
break;
case KVM_DEV_RISCV_AIA_GRP_CTRL:
switch (attr->attr) {
case KVM_DEV_RISCV_AIA_CTRL_INIT:
return 0;
}
break;
case KVM_DEV_RISCV_AIA_GRP_APLIC:
return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr);
case KVM_DEV_RISCV_AIA_GRP_IMSIC:
return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr);
}
return -ENXIO;
}
struct kvm_device_ops kvm_riscv_aia_device_ops = {
.name = "kvm-riscv-aia",
.create = aia_create,
.destroy = aia_destroy,
.set_attr = aia_set_attr,
.get_attr = aia_get_attr,
.has_attr = aia_has_attr,
};
int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
{
/* Proceed only if AIA was initialized successfully */
if (!kvm_riscv_aia_initialized(vcpu->kvm))
return 1;
/* Update the IMSIC HW state before entering guest mode */
return kvm_riscv_vcpu_aia_imsic_update(vcpu);
}
void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
struct kvm_vcpu_aia_csr *reset_csr =
&vcpu->arch.aia_context.guest_reset_csr;
if (!kvm_riscv_aia_available())
return;
memcpy(csr, reset_csr, sizeof(*csr));
/* Proceed only if AIA was initialized successfully */
if (!kvm_riscv_aia_initialized(vcpu->kvm))
return;
/* Reset the IMSIC context */
kvm_riscv_vcpu_aia_imsic_reset(vcpu);
}
int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
if (!kvm_riscv_aia_available())
return 0;
/*
* We don't do any memory allocations over here because these
* will be done after AIA device is initialized by the user-space.
*
* Refer, aia_init() implementation for more details.
*/
/* Initialize default values in AIA vcpu context */
vaia->imsic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
vaia->hart_index = vcpu->vcpu_idx;
return 0;
}
void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
{
/* Proceed only if AIA was initialized successfully */
if (!kvm_riscv_aia_initialized(vcpu->kvm))
return;
/* Cleanup IMSIC context */
kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
}
int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
u32 guest_index, u32 iid)
{
unsigned long idx;
struct kvm_vcpu *vcpu;
/* Proceed only if AIA was initialized successfully */
if (!kvm_riscv_aia_initialized(kvm))
return -EBUSY;
/* Inject MSI to matching VCPU */
kvm_for_each_vcpu(idx, vcpu, kvm) {
if (vcpu->arch.aia_context.hart_index == hart_index)
return kvm_riscv_vcpu_aia_imsic_inject(vcpu,
guest_index,
0, iid);
}
return 0;
}
int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
{
gpa_t tppn, ippn;
unsigned long idx;
struct kvm_vcpu *vcpu;
u32 g, toff, iid = msi->data;
struct kvm_aia *aia = &kvm->arch.aia;
gpa_t target = (((gpa_t)msi->address_hi) << 32) | msi->address_lo;
/* Proceed only if AIA was initialized successfully */
if (!kvm_riscv_aia_initialized(kvm))
return -EBUSY;
/* Convert target address to target PPN */
tppn = target >> IMSIC_MMIO_PAGE_SHIFT;
/* Extract and clear Guest ID from target PPN */
g = tppn & (BIT(aia->nr_guest_bits) - 1);
tppn &= ~((gpa_t)(BIT(aia->nr_guest_bits) - 1));
/* Inject MSI to matching VCPU */
kvm_for_each_vcpu(idx, vcpu, kvm) {
ippn = vcpu->arch.aia_context.imsic_addr >>
IMSIC_MMIO_PAGE_SHIFT;
if (ippn == tppn) {
toff = target & (IMSIC_MMIO_PAGE_SZ - 1);
return kvm_riscv_vcpu_aia_imsic_inject(vcpu, g,
toff, iid);
}
}
return 0;
}
int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level)
{
/* Proceed only if AIA was initialized successfully */
if (!kvm_riscv_aia_initialized(kvm))
return -EBUSY;
/* Inject interrupt level change in APLIC */
return kvm_riscv_aia_aplic_inject(kvm, irq, level);
}
void kvm_riscv_aia_init_vm(struct kvm *kvm)
{
struct kvm_aia *aia = &kvm->arch.aia;
if (!kvm_riscv_aia_available())
return;
/*
* We don't do any memory allocations over here because these
* will be done after AIA device is initialized by the user-space.
*
* Refer, aia_init() implementation for more details.
*/
/* Initialize default values in AIA global context */
aia->mode = (kvm_riscv_aia_nr_hgei) ?
KVM_DEV_RISCV_AIA_MODE_AUTO : KVM_DEV_RISCV_AIA_MODE_EMUL;
aia->nr_ids = kvm_riscv_aia_max_ids - 1;
aia->nr_sources = 0;
aia->nr_group_bits = 0;
aia->nr_group_shift = KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN;
aia->nr_hart_bits = 0;
aia->nr_guest_bits = 0;
aia->aplic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
}
void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
{
/* Proceed only if AIA was initialized successfully */
if (!kvm_riscv_aia_initialized(kvm))
return;
/* Cleanup APLIC context */
kvm_riscv_aia_aplic_cleanup(kvm);
}
| linux-master | arch/riscv/kvm/aia_device.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/kvm_host.h>
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS()
};
static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
sizeof(struct kvm_vm_stat) / sizeof(u64));
const struct kvm_stats_header kvm_vm_stats_header = {
.name_size = KVM_STATS_NAME_SIZE,
.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
.id_offset = sizeof(struct kvm_stats_header),
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
sizeof(kvm_vm_stats_desc),
};
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int r;
r = kvm_riscv_gstage_alloc_pgd(kvm);
if (r)
return r;
r = kvm_riscv_gstage_vmid_init(kvm);
if (r) {
kvm_riscv_gstage_free_pgd(kvm);
return r;
}
kvm_riscv_aia_init_vm(kvm);
kvm_riscv_guest_timer_init(kvm);
return 0;
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_destroy_vcpus(kvm);
kvm_riscv_aia_destroy_vm(kvm);
}
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irql,
bool line_status)
{
if (!irqchip_in_kernel(kvm))
return -ENXIO;
return kvm_riscv_aia_inject_irq(kvm, irql->irq, irql->level);
}
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id,
int level, bool line_status)
{
struct kvm_msi msi;
if (!level)
return -1;
msi.address_lo = e->msi.address_lo;
msi.address_hi = e->msi.address_hi;
msi.data = e->msi.data;
msi.flags = e->msi.flags;
msi.devid = e->msi.devid;
return kvm_riscv_aia_inject_msi(kvm, &msi);
}
static int kvm_riscv_set_irq(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id,
int level, bool line_status)
{
return kvm_riscv_aia_inject_irq(kvm, e->irqchip.pin, level);
}
int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines)
{
struct kvm_irq_routing_entry *ents;
int i, rc;
ents = kcalloc(lines, sizeof(*ents), GFP_KERNEL);
if (!ents)
return -ENOMEM;
for (i = 0; i < lines; i++) {
ents[i].gsi = i;
ents[i].type = KVM_IRQ_ROUTING_IRQCHIP;
ents[i].u.irqchip.irqchip = 0;
ents[i].u.irqchip.pin = i;
}
rc = kvm_set_irq_routing(kvm, ents, lines, 0);
kfree(ents);
return rc;
}
bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
{
return irqchip_in_kernel(kvm);
}
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
{
int r = -EINVAL;
switch (ue->type) {
case KVM_IRQ_ROUTING_IRQCHIP:
e->set = kvm_riscv_set_irq;
e->irqchip.irqchip = ue->u.irqchip.irqchip;
e->irqchip.pin = ue->u.irqchip.pin;
if ((e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) ||
(e->irqchip.irqchip >= KVM_NR_IRQCHIPS))
goto out;
break;
case KVM_IRQ_ROUTING_MSI:
e->set = kvm_set_msi;
e->msi.address_lo = ue->u.msi.address_lo;
e->msi.address_hi = ue->u.msi.address_hi;
e->msi.data = ue->u.msi.data;
e->msi.flags = ue->flags;
e->msi.devid = ue->u.msi.devid;
break;
default:
goto out;
}
r = 0;
out:
return r;
}
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
bool line_status)
{
if (!level)
return -EWOULDBLOCK;
switch (e->type) {
case KVM_IRQ_ROUTING_MSI:
return kvm_set_msi(e, kvm, irq_source_id, level, line_status);
case KVM_IRQ_ROUTING_IRQCHIP:
return kvm_riscv_set_irq(e, kvm, irq_source_id,
level, line_status);
}
return -EWOULDBLOCK;
}
bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
{
return irqchip_in_kernel(kvm);
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
r = kvm_riscv_aia_available();
break;
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_ONE_REG:
case KVM_CAP_READONLY_MEM:
case KVM_CAP_MP_STATE:
case KVM_CAP_IMMEDIATE_EXIT:
r = 1;
break;
case KVM_CAP_NR_VCPUS:
r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
break;
case KVM_CAP_VM_GPA_BITS:
r = kvm_riscv_gstage_gpa_bits();
break;
default:
r = 0;
break;
}
return r;
}
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
return -EINVAL;
}
| linux-master | arch/riscv/kvm/vm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
*
* Authors:
* Atish Patra <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_timer.h>
#include <asm/kvm_vcpu_sbi.h>
static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
ulong hmask;
int i, ret = 0;
u64 next_cycle;
struct kvm_vcpu *rvcpu;
struct kvm *kvm = vcpu->kvm;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
struct kvm_cpu_trap *utrap = retdata->utrap;
switch (cp->a7) {
case SBI_EXT_0_1_CONSOLE_GETCHAR:
case SBI_EXT_0_1_CONSOLE_PUTCHAR:
/*
* The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be
* handled in kernel so we forward these to user-space
*/
kvm_riscv_vcpu_sbi_forward(vcpu, run);
retdata->uexit = true;
break;
case SBI_EXT_0_1_SET_TIMER:
#if __riscv_xlen == 32
next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
#else
next_cycle = (u64)cp->a0;
#endif
ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
break;
case SBI_EXT_0_1_CLEAR_IPI:
ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT);
break;
case SBI_EXT_0_1_SEND_IPI:
if (cp->a0)
hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, utrap);
else
hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
if (utrap->scause)
break;
for_each_set_bit(i, &hmask, BITS_PER_LONG) {
rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT);
if (ret < 0)
break;
}
break;
case SBI_EXT_0_1_SHUTDOWN:
kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
KVM_SYSTEM_EVENT_SHUTDOWN, 0);
retdata->uexit = true;
break;
case SBI_EXT_0_1_REMOTE_FENCE_I:
case SBI_EXT_0_1_REMOTE_SFENCE_VMA:
case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID:
if (cp->a0)
hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0, utrap);
else
hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
if (utrap->scause)
break;
if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
kvm_riscv_fence_i(vcpu->kvm, 0, hmask);
else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA) {
if (cp->a1 == 0 && cp->a2 == 0)
kvm_riscv_hfence_vvma_all(vcpu->kvm,
0, hmask);
else
kvm_riscv_hfence_vvma_gva(vcpu->kvm,
0, hmask,
cp->a1, cp->a2,
PAGE_SHIFT);
} else {
if (cp->a1 == 0 && cp->a2 == 0)
kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
0, hmask,
cp->a3);
else
kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
0, hmask,
cp->a1, cp->a2,
PAGE_SHIFT,
cp->a3);
}
break;
default:
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
break;
}
return ret;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
.extid_start = SBI_EXT_0_1_SET_TIMER,
.extid_end = SBI_EXT_0_1_SHUTDOWN,
.handler = kvm_sbi_ext_v01_handler,
};
| linux-master | arch/riscv/kvm/vcpu_sbi_v01.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
* Copyright (C) 2023 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/uaccess.h>
#include <linux/kvm_host.h>
#include <asm/cacheflush.h>
#include <asm/hwcap.h>
#include <asm/kvm_vcpu_vector.h>
#include <asm/vector.h>
#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
#define KVM_ISA_EXT_ARR(ext) \
[KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
static const unsigned long kvm_isa_ext_arr[] = {
/* Single letter extensions (alphabetically sorted) */
[KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
[KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
[KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
[KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
[KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
[KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
[KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
[KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
/* Multi letter extensions (alphabetically sorted) */
KVM_ISA_EXT_ARR(SSAIA),
KVM_ISA_EXT_ARR(SSTC),
KVM_ISA_EXT_ARR(SVINVAL),
KVM_ISA_EXT_ARR(SVNAPOT),
KVM_ISA_EXT_ARR(SVPBMT),
KVM_ISA_EXT_ARR(ZBA),
KVM_ISA_EXT_ARR(ZBB),
KVM_ISA_EXT_ARR(ZBS),
KVM_ISA_EXT_ARR(ZICBOM),
KVM_ISA_EXT_ARR(ZICBOZ),
KVM_ISA_EXT_ARR(ZICNTR),
KVM_ISA_EXT_ARR(ZICSR),
KVM_ISA_EXT_ARR(ZIFENCEI),
KVM_ISA_EXT_ARR(ZIHINTPAUSE),
KVM_ISA_EXT_ARR(ZIHPM),
};
static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
{
unsigned long i;
for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
if (kvm_isa_ext_arr[i] == base_ext)
return i;
}
return KVM_RISCV_ISA_EXT_MAX;
}
static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
{
switch (ext) {
case KVM_RISCV_ISA_EXT_H:
return false;
case KVM_RISCV_ISA_EXT_V:
return riscv_v_vstate_ctrl_user_allowed();
default:
break;
}
return true;
}
static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
{
switch (ext) {
case KVM_RISCV_ISA_EXT_A:
case KVM_RISCV_ISA_EXT_C:
case KVM_RISCV_ISA_EXT_I:
case KVM_RISCV_ISA_EXT_M:
case KVM_RISCV_ISA_EXT_SSAIA:
case KVM_RISCV_ISA_EXT_SSTC:
case KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_RISCV_ISA_EXT_ZBA:
case KVM_RISCV_ISA_EXT_ZBB:
case KVM_RISCV_ISA_EXT_ZBS:
case KVM_RISCV_ISA_EXT_ZICNTR:
case KVM_RISCV_ISA_EXT_ZICSR:
case KVM_RISCV_ISA_EXT_ZIFENCEI:
case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
case KVM_RISCV_ISA_EXT_ZIHPM:
return false;
default:
break;
}
return true;
}
void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
{
unsigned long host_isa, i;
for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
host_isa = kvm_isa_ext_arr[i];
if (__riscv_isa_extension_available(NULL, host_isa) &&
kvm_riscv_vcpu_isa_enable_allowed(i))
set_bit(host_isa, vcpu->arch.isa);
}
}
static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CONFIG);
unsigned long reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
break;
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
return -ENOENT;
reg_val = riscv_cbom_block_size;
break;
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
return -ENOENT;
reg_val = riscv_cboz_block_size;
break;
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
reg_val = vcpu->arch.mvendorid;
break;
case KVM_REG_RISCV_CONFIG_REG(marchid):
reg_val = vcpu->arch.marchid;
break;
case KVM_REG_RISCV_CONFIG_REG(mimpid):
reg_val = vcpu->arch.mimpid;
break;
case KVM_REG_RISCV_CONFIG_REG(satp_mode):
reg_val = satp_mode >> SATP_MODE_SHIFT;
break;
default:
return -ENOENT;
}
if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CONFIG);
unsigned long i, isa_ext, reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
switch (reg_num) {
case KVM_REG_RISCV_CONFIG_REG(isa):
/*
* This ONE REG interface is only defined for
* single letter extensions.
*/
if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
return -EINVAL;
/*
* Return early (i.e. do nothing) if reg_val is the same
* value retrievable via kvm_riscv_vcpu_get_reg_config().
*/
if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
break;
if (!vcpu->arch.ran_atleast_once) {
/* Ignore the enable/disable request for certain extensions */
for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
reg_val &= ~BIT(i);
continue;
}
if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
if (reg_val & BIT(i))
reg_val &= ~BIT(i);
if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
if (!(reg_val & BIT(i)))
reg_val |= BIT(i);
}
reg_val &= riscv_isa_extension_base(NULL);
/* Do not modify anything beyond single letter extensions */
reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
(reg_val & KVM_RISCV_BASE_ISA_MASK);
vcpu->arch.isa[0] = reg_val;
kvm_riscv_vcpu_fp_reset(vcpu);
} else {
return -EBUSY;
}
break;
case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
return -ENOENT;
if (reg_val != riscv_cbom_block_size)
return -EINVAL;
break;
case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
return -ENOENT;
if (reg_val != riscv_cboz_block_size)
return -EINVAL;
break;
case KVM_REG_RISCV_CONFIG_REG(mvendorid):
if (reg_val == vcpu->arch.mvendorid)
break;
if (!vcpu->arch.ran_atleast_once)
vcpu->arch.mvendorid = reg_val;
else
return -EBUSY;
break;
case KVM_REG_RISCV_CONFIG_REG(marchid):
if (reg_val == vcpu->arch.marchid)
break;
if (!vcpu->arch.ran_atleast_once)
vcpu->arch.marchid = reg_val;
else
return -EBUSY;
break;
case KVM_REG_RISCV_CONFIG_REG(mimpid):
if (reg_val == vcpu->arch.mimpid)
break;
if (!vcpu->arch.ran_atleast_once)
vcpu->arch.mimpid = reg_val;
else
return -EBUSY;
break;
case KVM_REG_RISCV_CONFIG_REG(satp_mode):
if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
return -EINVAL;
break;
default:
return -ENOENT;
}
return 0;
}
static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CORE);
unsigned long reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
return -ENOENT;
if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
reg_val = cntx->sepc;
else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
reg_val = ((unsigned long *)cntx)[reg_num];
else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
reg_val = (cntx->sstatus & SR_SPP) ?
KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
else
return -ENOENT;
if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CORE);
unsigned long reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
return -ENOENT;
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
cntx->sepc = reg_val;
else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
((unsigned long *)cntx)[reg_num] = reg_val;
else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
if (reg_val == KVM_RISCV_MODE_S)
cntx->sstatus |= SR_SPP;
else
cntx->sstatus &= ~SR_SPP;
} else
return -ENOENT;
return 0;
}
static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long *out_val)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
return -ENOENT;
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
kvm_riscv_vcpu_flush_interrupts(vcpu);
*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
*out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
} else
*out_val = ((unsigned long *)csr)[reg_num];
return 0;
}
static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long reg_val)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
return -ENOENT;
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
reg_val &= VSIP_VALID_MASK;
reg_val <<= VSIP_TO_HVIP_SHIFT;
}
((unsigned long *)csr)[reg_num] = reg_val;
if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
return 0;
}
static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
int rc;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CSR);
unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
switch (reg_subtype) {
case KVM_REG_RISCV_CSR_GENERAL:
rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, ®_val);
break;
case KVM_REG_RISCV_CSR_AIA:
rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val);
break;
default:
rc = -ENOENT;
break;
}
if (rc)
return rc;
if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
int rc;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CSR);
unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
switch (reg_subtype) {
case KVM_REG_RISCV_CSR_GENERAL:
rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
break;
case KVM_REG_RISCV_CSR_AIA:
rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
break;
default:
rc = -ENOENT;
break;
}
if (rc)
return rc;
return 0;
}
static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long *reg_val)
{
unsigned long host_isa_ext;
if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -ENOENT;
host_isa_ext = kvm_isa_ext_arr[reg_num];
if (!__riscv_isa_extension_available(NULL, host_isa_ext))
return -ENOENT;
*reg_val = 0;
if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
*reg_val = 1; /* Mark the given extension as available */
return 0;
}
static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long reg_val)
{
unsigned long host_isa_ext;
if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -ENOENT;
host_isa_ext = kvm_isa_ext_arr[reg_num];
if (!__riscv_isa_extension_available(NULL, host_isa_ext))
return -ENOENT;
if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
return 0;
if (!vcpu->arch.ran_atleast_once) {
/*
* All multi-letter extension and a few single letter
* extension can be disabled
*/
if (reg_val == 1 &&
kvm_riscv_vcpu_isa_enable_allowed(reg_num))
set_bit(host_isa_ext, vcpu->arch.isa);
else if (!reg_val &&
kvm_riscv_vcpu_isa_disable_allowed(reg_num))
clear_bit(host_isa_ext, vcpu->arch.isa);
else
return -EINVAL;
kvm_riscv_vcpu_fp_reset(vcpu);
} else {
return -EBUSY;
}
return 0;
}
static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long *reg_val)
{
unsigned long i, ext_id, ext_val;
if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
return -ENOENT;
for (i = 0; i < BITS_PER_LONG; i++) {
ext_id = i + reg_num * BITS_PER_LONG;
if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
break;
ext_val = 0;
riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
if (ext_val)
*reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
}
return 0;
}
static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long reg_val, bool enable)
{
unsigned long i, ext_id;
if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
return -ENOENT;
for_each_set_bit(i, ®_val, BITS_PER_LONG) {
ext_id = i + reg_num * BITS_PER_LONG;
if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
break;
riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
}
return 0;
}
static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
int rc;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_ISA_EXT);
unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
reg_val = 0;
switch (reg_subtype) {
case KVM_REG_RISCV_ISA_SINGLE:
rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, ®_val);
break;
case KVM_REG_RISCV_ISA_MULTI_EN:
case KVM_REG_RISCV_ISA_MULTI_DIS:
rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, ®_val);
if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
reg_val = ~reg_val;
break;
default:
rc = -ENOENT;
}
if (rc)
return rc;
if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_ISA_EXT);
unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
switch (reg_subtype) {
case KVM_REG_RISCV_ISA_SINGLE:
return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
case KVM_REG_RISCV_SBI_MULTI_EN:
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
case KVM_REG_RISCV_SBI_MULTI_DIS:
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
default:
return -ENOENT;
}
return 0;
}
static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
u64 __user *uindices)
{
int n = 0;
for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
i++) {
u64 size;
u64 reg;
/*
* Avoid reporting config reg if the corresponding extension
* was not available.
*/
if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
continue;
else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
continue;
size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
n++;
}
return n;
}
static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
{
return copy_config_reg_indices(vcpu, NULL);
}
static inline unsigned long num_core_regs(void)
{
return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
}
static int copy_core_reg_indices(u64 __user *uindices)
{
int n = num_core_regs();
for (int i = 0; i < n; i++) {
u64 size = IS_ENABLED(CONFIG_32BIT) ?
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
}
return n;
}
static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
{
unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
return n;
}
static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
u64 __user *uindices)
{
int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
int n2 = 0;
/* copy general csr regs */
for (int i = 0; i < n1; i++) {
u64 size = IS_ENABLED(CONFIG_32BIT) ?
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
KVM_REG_RISCV_CSR_GENERAL | i;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
}
/* copy AIA csr regs */
if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
for (int i = 0; i < n2; i++) {
u64 size = IS_ENABLED(CONFIG_32BIT) ?
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
KVM_REG_RISCV_CSR_AIA | i;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
}
}
return n1 + n2;
}
static inline unsigned long num_timer_regs(void)
{
return sizeof(struct kvm_riscv_timer) / sizeof(u64);
}
static int copy_timer_reg_indices(u64 __user *uindices)
{
int n = num_timer_regs();
for (int i = 0; i < n; i++) {
u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
KVM_REG_RISCV_TIMER | i;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
}
return n;
}
static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
{
const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
if (riscv_isa_extension_available(vcpu->arch.isa, f))
return sizeof(cntx->fp.f) / sizeof(u32);
else
return 0;
}
static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
u64 __user *uindices)
{
int n = num_fp_f_regs(vcpu);
for (int i = 0; i < n; i++) {
u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
KVM_REG_RISCV_FP_F | i;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
}
return n;
}
static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
{
const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
if (riscv_isa_extension_available(vcpu->arch.isa, d))
return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
else
return 0;
}
static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
u64 __user *uindices)
{
int i;
int n = num_fp_d_regs(vcpu);
u64 reg;
/* copy fp.d.f indices */
for (i = 0; i < n-1; i++) {
reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
KVM_REG_RISCV_FP_D | i;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
}
/* copy fp.d.fcsr indices */
reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
return n;
}
static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
u64 __user *uindices)
{
unsigned int n = 0;
unsigned long isa_ext;
for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
u64 size = IS_ENABLED(CONFIG_32BIT) ?
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
isa_ext = kvm_isa_ext_arr[i];
if (!__riscv_isa_extension_available(NULL, isa_ext))
continue;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
n++;
}
return n;
}
static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
{
return copy_isa_ext_reg_indices(vcpu, NULL);;
}
static inline unsigned long num_sbi_ext_regs(void)
{
/*
* number of KVM_REG_RISCV_SBI_SINGLE +
* 2 x (number of KVM_REG_RISCV_SBI_MULTI)
*/
return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
}
static int copy_sbi_ext_reg_indices(u64 __user *uindices)
{
int n;
/* copy KVM_REG_RISCV_SBI_SINGLE */
n = KVM_RISCV_SBI_EXT_MAX;
for (int i = 0; i < n; i++) {
u64 size = IS_ENABLED(CONFIG_32BIT) ?
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
KVM_REG_RISCV_SBI_SINGLE | i;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
}
/* copy KVM_REG_RISCV_SBI_MULTI */
n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
for (int i = 0; i < n; i++) {
u64 size = IS_ENABLED(CONFIG_32BIT) ?
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
KVM_REG_RISCV_SBI_MULTI_EN | i;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
KVM_REG_RISCV_SBI_MULTI_DIS | i;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
}
return num_sbi_ext_regs();
}
/*
* kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
*
* This is for all registers.
*/
unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
{
unsigned long res = 0;
res += num_config_regs(vcpu);
res += num_core_regs();
res += num_csr_regs(vcpu);
res += num_timer_regs();
res += num_fp_f_regs(vcpu);
res += num_fp_d_regs(vcpu);
res += num_isa_ext_regs(vcpu);
res += num_sbi_ext_regs();
return res;
}
/*
* kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
*/
int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
u64 __user *uindices)
{
int ret;
ret = copy_config_reg_indices(vcpu, uindices);
if (ret < 0)
return ret;
uindices += ret;
ret = copy_core_reg_indices(uindices);
if (ret < 0)
return ret;
uindices += ret;
ret = copy_csr_reg_indices(vcpu, uindices);
if (ret < 0)
return ret;
uindices += ret;
ret = copy_timer_reg_indices(uindices);
if (ret < 0)
return ret;
uindices += ret;
ret = copy_fp_f_reg_indices(vcpu, uindices);
if (ret < 0)
return ret;
uindices += ret;
ret = copy_fp_d_reg_indices(vcpu, uindices);
if (ret < 0)
return ret;
uindices += ret;
ret = copy_isa_ext_reg_indices(vcpu, uindices);
if (ret < 0)
return ret;
uindices += ret;
ret = copy_sbi_ext_reg_indices(uindices);
if (ret < 0)
return ret;
return 0;
}
int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
case KVM_REG_RISCV_CONFIG:
return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
case KVM_REG_RISCV_CORE:
return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
case KVM_REG_RISCV_CSR:
return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
case KVM_REG_RISCV_TIMER:
return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
case KVM_REG_RISCV_FP_F:
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_F);
case KVM_REG_RISCV_FP_D:
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D);
case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
case KVM_REG_RISCV_SBI_EXT:
return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
case KVM_REG_RISCV_VECTOR:
return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
default:
break;
}
return -ENOENT;
}
int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
case KVM_REG_RISCV_CONFIG:
return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
case KVM_REG_RISCV_CORE:
return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
case KVM_REG_RISCV_CSR:
return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
case KVM_REG_RISCV_TIMER:
return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
case KVM_REG_RISCV_FP_F:
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_F);
case KVM_REG_RISCV_FP_D:
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D);
case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
case KVM_REG_RISCV_SBI_EXT:
return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
case KVM_REG_RISCV_VECTOR:
return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
default:
break;
}
return -ENOENT;
}
| linux-master | arch/riscv/kvm/vcpu_onereg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
* Copyright (C) 2022 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/bitmap.h>
#include <linux/kvm_host.h>
#include <linux/math.h>
#include <linux/spinlock.h>
#include <linux/swab.h>
#include <kvm/iodev.h>
#include <asm/csr.h>
#include <asm/kvm_aia_imsic.h>
#define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64))
struct imsic_mrif_eix {
unsigned long eip[BITS_PER_TYPE(u64) / BITS_PER_LONG];
unsigned long eie[BITS_PER_TYPE(u64) / BITS_PER_LONG];
};
struct imsic_mrif {
struct imsic_mrif_eix eix[IMSIC_MAX_EIX];
unsigned long eithreshold;
unsigned long eidelivery;
};
struct imsic {
struct kvm_io_device iodev;
u32 nr_msis;
u32 nr_eix;
u32 nr_hw_eix;
/*
* At any point in time, the register state is in
* one of the following places:
*
* 1) Hardware: IMSIC VS-file (vsfile_cpu >= 0)
* 2) Software: IMSIC SW-file (vsfile_cpu < 0)
*/
/* IMSIC VS-file */
rwlock_t vsfile_lock;
int vsfile_cpu;
int vsfile_hgei;
void __iomem *vsfile_va;
phys_addr_t vsfile_pa;
/* IMSIC SW-file */
struct imsic_mrif *swfile;
phys_addr_t swfile_pa;
};
#define imsic_vs_csr_read(__c) \
({ \
unsigned long __r; \
csr_write(CSR_VSISELECT, __c); \
__r = csr_read(CSR_VSIREG); \
__r; \
})
#define imsic_read_switchcase(__ireg) \
case __ireg: \
return imsic_vs_csr_read(__ireg);
#define imsic_read_switchcase_2(__ireg) \
imsic_read_switchcase(__ireg + 0) \
imsic_read_switchcase(__ireg + 1)
#define imsic_read_switchcase_4(__ireg) \
imsic_read_switchcase_2(__ireg + 0) \
imsic_read_switchcase_2(__ireg + 2)
#define imsic_read_switchcase_8(__ireg) \
imsic_read_switchcase_4(__ireg + 0) \
imsic_read_switchcase_4(__ireg + 4)
#define imsic_read_switchcase_16(__ireg) \
imsic_read_switchcase_8(__ireg + 0) \
imsic_read_switchcase_8(__ireg + 8)
#define imsic_read_switchcase_32(__ireg) \
imsic_read_switchcase_16(__ireg + 0) \
imsic_read_switchcase_16(__ireg + 16)
#define imsic_read_switchcase_64(__ireg) \
imsic_read_switchcase_32(__ireg + 0) \
imsic_read_switchcase_32(__ireg + 32)
static unsigned long imsic_eix_read(int ireg)
{
switch (ireg) {
imsic_read_switchcase_64(IMSIC_EIP0)
imsic_read_switchcase_64(IMSIC_EIE0)
}
return 0;
}
#define imsic_vs_csr_swap(__c, __v) \
({ \
unsigned long __r; \
csr_write(CSR_VSISELECT, __c); \
__r = csr_swap(CSR_VSIREG, __v); \
__r; \
})
#define imsic_swap_switchcase(__ireg, __v) \
case __ireg: \
return imsic_vs_csr_swap(__ireg, __v);
#define imsic_swap_switchcase_2(__ireg, __v) \
imsic_swap_switchcase(__ireg + 0, __v) \
imsic_swap_switchcase(__ireg + 1, __v)
#define imsic_swap_switchcase_4(__ireg, __v) \
imsic_swap_switchcase_2(__ireg + 0, __v) \
imsic_swap_switchcase_2(__ireg + 2, __v)
#define imsic_swap_switchcase_8(__ireg, __v) \
imsic_swap_switchcase_4(__ireg + 0, __v) \
imsic_swap_switchcase_4(__ireg + 4, __v)
#define imsic_swap_switchcase_16(__ireg, __v) \
imsic_swap_switchcase_8(__ireg + 0, __v) \
imsic_swap_switchcase_8(__ireg + 8, __v)
#define imsic_swap_switchcase_32(__ireg, __v) \
imsic_swap_switchcase_16(__ireg + 0, __v) \
imsic_swap_switchcase_16(__ireg + 16, __v)
#define imsic_swap_switchcase_64(__ireg, __v) \
imsic_swap_switchcase_32(__ireg + 0, __v) \
imsic_swap_switchcase_32(__ireg + 32, __v)
static unsigned long imsic_eix_swap(int ireg, unsigned long val)
{
switch (ireg) {
imsic_swap_switchcase_64(IMSIC_EIP0, val)
imsic_swap_switchcase_64(IMSIC_EIE0, val)
}
return 0;
}
#define imsic_vs_csr_write(__c, __v) \
do { \
csr_write(CSR_VSISELECT, __c); \
csr_write(CSR_VSIREG, __v); \
} while (0)
#define imsic_write_switchcase(__ireg, __v) \
case __ireg: \
imsic_vs_csr_write(__ireg, __v); \
break;
#define imsic_write_switchcase_2(__ireg, __v) \
imsic_write_switchcase(__ireg + 0, __v) \
imsic_write_switchcase(__ireg + 1, __v)
#define imsic_write_switchcase_4(__ireg, __v) \
imsic_write_switchcase_2(__ireg + 0, __v) \
imsic_write_switchcase_2(__ireg + 2, __v)
#define imsic_write_switchcase_8(__ireg, __v) \
imsic_write_switchcase_4(__ireg + 0, __v) \
imsic_write_switchcase_4(__ireg + 4, __v)
#define imsic_write_switchcase_16(__ireg, __v) \
imsic_write_switchcase_8(__ireg + 0, __v) \
imsic_write_switchcase_8(__ireg + 8, __v)
#define imsic_write_switchcase_32(__ireg, __v) \
imsic_write_switchcase_16(__ireg + 0, __v) \
imsic_write_switchcase_16(__ireg + 16, __v)
#define imsic_write_switchcase_64(__ireg, __v) \
imsic_write_switchcase_32(__ireg + 0, __v) \
imsic_write_switchcase_32(__ireg + 32, __v)
static void imsic_eix_write(int ireg, unsigned long val)
{
switch (ireg) {
imsic_write_switchcase_64(IMSIC_EIP0, val)
imsic_write_switchcase_64(IMSIC_EIE0, val)
}
}
#define imsic_vs_csr_set(__c, __v) \
do { \
csr_write(CSR_VSISELECT, __c); \
csr_set(CSR_VSIREG, __v); \
} while (0)
#define imsic_set_switchcase(__ireg, __v) \
case __ireg: \
imsic_vs_csr_set(__ireg, __v); \
break;
#define imsic_set_switchcase_2(__ireg, __v) \
imsic_set_switchcase(__ireg + 0, __v) \
imsic_set_switchcase(__ireg + 1, __v)
#define imsic_set_switchcase_4(__ireg, __v) \
imsic_set_switchcase_2(__ireg + 0, __v) \
imsic_set_switchcase_2(__ireg + 2, __v)
#define imsic_set_switchcase_8(__ireg, __v) \
imsic_set_switchcase_4(__ireg + 0, __v) \
imsic_set_switchcase_4(__ireg + 4, __v)
#define imsic_set_switchcase_16(__ireg, __v) \
imsic_set_switchcase_8(__ireg + 0, __v) \
imsic_set_switchcase_8(__ireg + 8, __v)
#define imsic_set_switchcase_32(__ireg, __v) \
imsic_set_switchcase_16(__ireg + 0, __v) \
imsic_set_switchcase_16(__ireg + 16, __v)
#define imsic_set_switchcase_64(__ireg, __v) \
imsic_set_switchcase_32(__ireg + 0, __v) \
imsic_set_switchcase_32(__ireg + 32, __v)
static void imsic_eix_set(int ireg, unsigned long val)
{
switch (ireg) {
imsic_set_switchcase_64(IMSIC_EIP0, val)
imsic_set_switchcase_64(IMSIC_EIE0, val)
}
}
static unsigned long imsic_mrif_atomic_rmw(struct imsic_mrif *mrif,
unsigned long *ptr,
unsigned long new_val,
unsigned long wr_mask)
{
unsigned long old_val = 0, tmp = 0;
__asm__ __volatile__ (
"0: lr.w.aq %1, %0\n"
" and %2, %1, %3\n"
" or %2, %2, %4\n"
" sc.w.rl %2, %2, %0\n"
" bnez %2, 0b"
: "+A" (*ptr), "+r" (old_val), "+r" (tmp)
: "r" (~wr_mask), "r" (new_val & wr_mask)
: "memory");
return old_val;
}
static unsigned long imsic_mrif_atomic_or(struct imsic_mrif *mrif,
unsigned long *ptr,
unsigned long val)
{
return atomic_long_fetch_or(val, (atomic_long_t *)ptr);
}
#define imsic_mrif_atomic_write(__mrif, __ptr, __new_val) \
imsic_mrif_atomic_rmw(__mrif, __ptr, __new_val, -1UL)
#define imsic_mrif_atomic_read(__mrif, __ptr) \
imsic_mrif_atomic_or(__mrif, __ptr, 0)
static u32 imsic_mrif_topei(struct imsic_mrif *mrif, u32 nr_eix, u32 nr_msis)
{
struct imsic_mrif_eix *eix;
u32 i, imin, imax, ei, max_msi;
unsigned long eipend[BITS_PER_TYPE(u64) / BITS_PER_LONG];
unsigned long eithreshold = imsic_mrif_atomic_read(mrif,
&mrif->eithreshold);
max_msi = (eithreshold && (eithreshold <= nr_msis)) ?
eithreshold : nr_msis;
for (ei = 0; ei < nr_eix; ei++) {
eix = &mrif->eix[ei];
eipend[0] = imsic_mrif_atomic_read(mrif, &eix->eie[0]) &
imsic_mrif_atomic_read(mrif, &eix->eip[0]);
#ifdef CONFIG_32BIT
eipend[1] = imsic_mrif_atomic_read(mrif, &eix->eie[1]) &
imsic_mrif_atomic_read(mrif, &eix->eip[1]);
if (!eipend[0] && !eipend[1])
#else
if (!eipend[0])
#endif
continue;
imin = ei * BITS_PER_TYPE(u64);
imax = ((imin + BITS_PER_TYPE(u64)) < max_msi) ?
imin + BITS_PER_TYPE(u64) : max_msi;
for (i = (!imin) ? 1 : imin; i < imax; i++) {
if (test_bit(i - imin, eipend))
return (i << TOPEI_ID_SHIFT) | i;
}
}
return 0;
}
static int imsic_mrif_isel_check(u32 nr_eix, unsigned long isel)
{
u32 num = 0;
switch (isel) {
case IMSIC_EIDELIVERY:
case IMSIC_EITHRESHOLD:
break;
case IMSIC_EIP0 ... IMSIC_EIP63:
num = isel - IMSIC_EIP0;
break;
case IMSIC_EIE0 ... IMSIC_EIE63:
num = isel - IMSIC_EIE0;
break;
default:
return -ENOENT;
}
#ifndef CONFIG_32BIT
if (num & 0x1)
return -EINVAL;
#endif
if ((num / 2) >= nr_eix)
return -EINVAL;
return 0;
}
static int imsic_mrif_rmw(struct imsic_mrif *mrif, u32 nr_eix,
unsigned long isel, unsigned long *val,
unsigned long new_val, unsigned long wr_mask)
{
bool pend;
struct imsic_mrif_eix *eix;
unsigned long *ei, num, old_val = 0;
switch (isel) {
case IMSIC_EIDELIVERY:
old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eidelivery,
new_val, wr_mask & 0x1);
break;
case IMSIC_EITHRESHOLD:
old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eithreshold,
new_val, wr_mask & (IMSIC_MAX_ID - 1));
break;
case IMSIC_EIP0 ... IMSIC_EIP63:
case IMSIC_EIE0 ... IMSIC_EIE63:
if (isel >= IMSIC_EIP0 && isel <= IMSIC_EIP63) {
pend = true;
num = isel - IMSIC_EIP0;
} else {
pend = false;
num = isel - IMSIC_EIE0;
}
if ((num / 2) >= nr_eix)
return -EINVAL;
eix = &mrif->eix[num / 2];
#ifndef CONFIG_32BIT
if (num & 0x1)
return -EINVAL;
ei = (pend) ? &eix->eip[0] : &eix->eie[0];
#else
ei = (pend) ? &eix->eip[num & 0x1] : &eix->eie[num & 0x1];
#endif
/* Bit0 of EIP0 or EIE0 is read-only */
if (!num)
wr_mask &= ~BIT(0);
old_val = imsic_mrif_atomic_rmw(mrif, ei, new_val, wr_mask);
break;
default:
return -ENOENT;
}
if (val)
*val = old_val;
return 0;
}
struct imsic_vsfile_read_data {
int hgei;
u32 nr_eix;
bool clear;
struct imsic_mrif *mrif;
};
static void imsic_vsfile_local_read(void *data)
{
u32 i;
struct imsic_mrif_eix *eix;
struct imsic_vsfile_read_data *idata = data;
struct imsic_mrif *mrif = idata->mrif;
unsigned long new_hstatus, old_hstatus, old_vsiselect;
old_vsiselect = csr_read(CSR_VSISELECT);
old_hstatus = csr_read(CSR_HSTATUS);
new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT;
csr_write(CSR_HSTATUS, new_hstatus);
/*
* We don't use imsic_mrif_atomic_xyz() functions to store
* values in MRIF because imsic_vsfile_read() is always called
* with pointer to temporary MRIF on stack.
*/
if (idata->clear) {
mrif->eidelivery = imsic_vs_csr_swap(IMSIC_EIDELIVERY, 0);
mrif->eithreshold = imsic_vs_csr_swap(IMSIC_EITHRESHOLD, 0);
for (i = 0; i < idata->nr_eix; i++) {
eix = &mrif->eix[i];
eix->eip[0] = imsic_eix_swap(IMSIC_EIP0 + i * 2, 0);
eix->eie[0] = imsic_eix_swap(IMSIC_EIE0 + i * 2, 0);
#ifdef CONFIG_32BIT
eix->eip[1] = imsic_eix_swap(IMSIC_EIP0 + i * 2 + 1, 0);
eix->eie[1] = imsic_eix_swap(IMSIC_EIE0 + i * 2 + 1, 0);
#endif
}
} else {
mrif->eidelivery = imsic_vs_csr_read(IMSIC_EIDELIVERY);
mrif->eithreshold = imsic_vs_csr_read(IMSIC_EITHRESHOLD);
for (i = 0; i < idata->nr_eix; i++) {
eix = &mrif->eix[i];
eix->eip[0] = imsic_eix_read(IMSIC_EIP0 + i * 2);
eix->eie[0] = imsic_eix_read(IMSIC_EIE0 + i * 2);
#ifdef CONFIG_32BIT
eix->eip[1] = imsic_eix_read(IMSIC_EIP0 + i * 2 + 1);
eix->eie[1] = imsic_eix_read(IMSIC_EIE0 + i * 2 + 1);
#endif
}
}
csr_write(CSR_HSTATUS, old_hstatus);
csr_write(CSR_VSISELECT, old_vsiselect);
}
static void imsic_vsfile_read(int vsfile_hgei, int vsfile_cpu, u32 nr_eix,
bool clear, struct imsic_mrif *mrif)
{
struct imsic_vsfile_read_data idata;
/* We can only read clear if we have a IMSIC VS-file */
if (vsfile_cpu < 0 || vsfile_hgei <= 0)
return;
/* We can only read clear on local CPU */
idata.hgei = vsfile_hgei;
idata.nr_eix = nr_eix;
idata.clear = clear;
idata.mrif = mrif;
on_each_cpu_mask(cpumask_of(vsfile_cpu),
imsic_vsfile_local_read, &idata, 1);
}
struct imsic_vsfile_rw_data {
int hgei;
int isel;
bool write;
unsigned long val;
};
static void imsic_vsfile_local_rw(void *data)
{
struct imsic_vsfile_rw_data *idata = data;
unsigned long new_hstatus, old_hstatus, old_vsiselect;
old_vsiselect = csr_read(CSR_VSISELECT);
old_hstatus = csr_read(CSR_HSTATUS);
new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT;
csr_write(CSR_HSTATUS, new_hstatus);
switch (idata->isel) {
case IMSIC_EIDELIVERY:
if (idata->write)
imsic_vs_csr_write(IMSIC_EIDELIVERY, idata->val);
else
idata->val = imsic_vs_csr_read(IMSIC_EIDELIVERY);
break;
case IMSIC_EITHRESHOLD:
if (idata->write)
imsic_vs_csr_write(IMSIC_EITHRESHOLD, idata->val);
else
idata->val = imsic_vs_csr_read(IMSIC_EITHRESHOLD);
break;
case IMSIC_EIP0 ... IMSIC_EIP63:
case IMSIC_EIE0 ... IMSIC_EIE63:
#ifndef CONFIG_32BIT
if (idata->isel & 0x1)
break;
#endif
if (idata->write)
imsic_eix_write(idata->isel, idata->val);
else
idata->val = imsic_eix_read(idata->isel);
break;
default:
break;
}
csr_write(CSR_HSTATUS, old_hstatus);
csr_write(CSR_VSISELECT, old_vsiselect);
}
static int imsic_vsfile_rw(int vsfile_hgei, int vsfile_cpu, u32 nr_eix,
unsigned long isel, bool write,
unsigned long *val)
{
int rc;
struct imsic_vsfile_rw_data rdata;
/* We can only access register if we have a IMSIC VS-file */
if (vsfile_cpu < 0 || vsfile_hgei <= 0)
return -EINVAL;
/* Check IMSIC register iselect */
rc = imsic_mrif_isel_check(nr_eix, isel);
if (rc)
return rc;
/* We can only access register on local CPU */
rdata.hgei = vsfile_hgei;
rdata.isel = isel;
rdata.write = write;
rdata.val = (write) ? *val : 0;
on_each_cpu_mask(cpumask_of(vsfile_cpu),
imsic_vsfile_local_rw, &rdata, 1);
if (!write)
*val = rdata.val;
return 0;
}
static void imsic_vsfile_local_clear(int vsfile_hgei, u32 nr_eix)
{
u32 i;
unsigned long new_hstatus, old_hstatus, old_vsiselect;
/* We can only zero-out if we have a IMSIC VS-file */
if (vsfile_hgei <= 0)
return;
old_vsiselect = csr_read(CSR_VSISELECT);
old_hstatus = csr_read(CSR_HSTATUS);
new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
csr_write(CSR_HSTATUS, new_hstatus);
imsic_vs_csr_write(IMSIC_EIDELIVERY, 0);
imsic_vs_csr_write(IMSIC_EITHRESHOLD, 0);
for (i = 0; i < nr_eix; i++) {
imsic_eix_write(IMSIC_EIP0 + i * 2, 0);
imsic_eix_write(IMSIC_EIE0 + i * 2, 0);
#ifdef CONFIG_32BIT
imsic_eix_write(IMSIC_EIP0 + i * 2 + 1, 0);
imsic_eix_write(IMSIC_EIE0 + i * 2 + 1, 0);
#endif
}
csr_write(CSR_HSTATUS, old_hstatus);
csr_write(CSR_VSISELECT, old_vsiselect);
}
static void imsic_vsfile_local_update(int vsfile_hgei, u32 nr_eix,
struct imsic_mrif *mrif)
{
u32 i;
struct imsic_mrif_eix *eix;
unsigned long new_hstatus, old_hstatus, old_vsiselect;
/* We can only update if we have a HW IMSIC context */
if (vsfile_hgei <= 0)
return;
/*
* We don't use imsic_mrif_atomic_xyz() functions to read values
* from MRIF in this function because it is always called with
* pointer to temporary MRIF on stack.
*/
old_vsiselect = csr_read(CSR_VSISELECT);
old_hstatus = csr_read(CSR_HSTATUS);
new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
csr_write(CSR_HSTATUS, new_hstatus);
for (i = 0; i < nr_eix; i++) {
eix = &mrif->eix[i];
imsic_eix_set(IMSIC_EIP0 + i * 2, eix->eip[0]);
imsic_eix_set(IMSIC_EIE0 + i * 2, eix->eie[0]);
#ifdef CONFIG_32BIT
imsic_eix_set(IMSIC_EIP0 + i * 2 + 1, eix->eip[1]);
imsic_eix_set(IMSIC_EIE0 + i * 2 + 1, eix->eie[1]);
#endif
}
imsic_vs_csr_write(IMSIC_EITHRESHOLD, mrif->eithreshold);
imsic_vs_csr_write(IMSIC_EIDELIVERY, mrif->eidelivery);
csr_write(CSR_HSTATUS, old_hstatus);
csr_write(CSR_VSISELECT, old_vsiselect);
}
static void imsic_vsfile_cleanup(struct imsic *imsic)
{
int old_vsfile_hgei, old_vsfile_cpu;
unsigned long flags;
/*
* We don't use imsic_mrif_atomic_xyz() functions to clear the
* SW-file in this function because it is always called when the
* VCPU is being destroyed.
*/
write_lock_irqsave(&imsic->vsfile_lock, flags);
old_vsfile_hgei = imsic->vsfile_hgei;
old_vsfile_cpu = imsic->vsfile_cpu;
imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
imsic->vsfile_va = NULL;
imsic->vsfile_pa = 0;
write_unlock_irqrestore(&imsic->vsfile_lock, flags);
memset(imsic->swfile, 0, sizeof(*imsic->swfile));
if (old_vsfile_cpu >= 0)
kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
}
static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
{
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
struct imsic_mrif *mrif = imsic->swfile;
if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) &&
imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis))
kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
else
kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
}
static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear,
struct imsic_mrif *mrif)
{
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
/*
* We don't use imsic_mrif_atomic_xyz() functions to read and
* write SW-file and MRIF in this function because it is always
* called when VCPU is not using SW-file and the MRIF points to
* a temporary MRIF on stack.
*/
memcpy(mrif, imsic->swfile, sizeof(*mrif));
if (clear) {
memset(imsic->swfile, 0, sizeof(*imsic->swfile));
kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
}
}
static void imsic_swfile_update(struct kvm_vcpu *vcpu,
struct imsic_mrif *mrif)
{
u32 i;
struct imsic_mrif_eix *seix, *eix;
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
struct imsic_mrif *smrif = imsic->swfile;
imsic_mrif_atomic_write(smrif, &smrif->eidelivery, mrif->eidelivery);
imsic_mrif_atomic_write(smrif, &smrif->eithreshold, mrif->eithreshold);
for (i = 0; i < imsic->nr_eix; i++) {
seix = &smrif->eix[i];
eix = &mrif->eix[i];
imsic_mrif_atomic_or(smrif, &seix->eip[0], eix->eip[0]);
imsic_mrif_atomic_or(smrif, &seix->eie[0], eix->eie[0]);
#ifdef CONFIG_32BIT
imsic_mrif_atomic_or(smrif, &seix->eip[1], eix->eip[1]);
imsic_mrif_atomic_or(smrif, &seix->eie[1], eix->eie[1]);
#endif
}
imsic_swfile_extirq_update(vcpu);
}
void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
{
unsigned long flags;
struct imsic_mrif tmrif;
int old_vsfile_hgei, old_vsfile_cpu;
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
/* Read and clear IMSIC VS-file details */
write_lock_irqsave(&imsic->vsfile_lock, flags);
old_vsfile_hgei = imsic->vsfile_hgei;
old_vsfile_cpu = imsic->vsfile_cpu;
imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
imsic->vsfile_va = NULL;
imsic->vsfile_pa = 0;
write_unlock_irqrestore(&imsic->vsfile_lock, flags);
/* Do nothing, if no IMSIC VS-file to release */
if (old_vsfile_cpu < 0)
return;
/*
* At this point, all interrupt producers are still using
* the old IMSIC VS-file so we first re-direct all interrupt
* producers.
*/
/* Purge the G-stage mapping */
kvm_riscv_gstage_iounmap(vcpu->kvm,
vcpu->arch.aia_context.imsic_addr,
IMSIC_MMIO_PAGE_SZ);
/* TODO: Purge the IOMMU mapping ??? */
/*
* At this point, all interrupt producers have been re-directed
* to somewhere else so we move register state from the old IMSIC
* VS-file to the IMSIC SW-file.
*/
/* Read and clear register state from old IMSIC VS-file */
memset(&tmrif, 0, sizeof(tmrif));
imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu, imsic->nr_hw_eix,
true, &tmrif);
/* Update register state in IMSIC SW-file */
imsic_swfile_update(vcpu, &tmrif);
/* Free-up old IMSIC VS-file */
kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
}
int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
{
unsigned long flags;
phys_addr_t new_vsfile_pa;
struct imsic_mrif tmrif;
void __iomem *new_vsfile_va;
struct kvm *kvm = vcpu->kvm;
struct kvm_run *run = vcpu->run;
struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
struct imsic *imsic = vaia->imsic_state;
int ret = 0, new_vsfile_hgei = -1, old_vsfile_hgei, old_vsfile_cpu;
/* Do nothing for emulation mode */
if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_EMUL)
return 1;
/* Read old IMSIC VS-file details */
read_lock_irqsave(&imsic->vsfile_lock, flags);
old_vsfile_hgei = imsic->vsfile_hgei;
old_vsfile_cpu = imsic->vsfile_cpu;
read_unlock_irqrestore(&imsic->vsfile_lock, flags);
/* Do nothing if we are continuing on same CPU */
if (old_vsfile_cpu == vcpu->cpu)
return 1;
/* Allocate new IMSIC VS-file */
ret = kvm_riscv_aia_alloc_hgei(vcpu->cpu, vcpu,
&new_vsfile_va, &new_vsfile_pa);
if (ret <= 0) {
/* For HW acceleration mode, we can't continue */
if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_HWACCEL) {
run->fail_entry.hardware_entry_failure_reason =
CSR_HSTATUS;
run->fail_entry.cpu = vcpu->cpu;
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
return 0;
}
/* Release old IMSIC VS-file */
if (old_vsfile_cpu >= 0)
kvm_riscv_vcpu_aia_imsic_release(vcpu);
/* For automatic mode, we continue */
goto done;
}
new_vsfile_hgei = ret;
/*
* At this point, all interrupt producers are still using
* to the old IMSIC VS-file so we first move all interrupt
* producers to the new IMSIC VS-file.
*/
/* Zero-out new IMSIC VS-file */
imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix);
/* Update G-stage mapping for the new IMSIC VS-file */
ret = kvm_riscv_gstage_ioremap(kvm, vcpu->arch.aia_context.imsic_addr,
new_vsfile_pa, IMSIC_MMIO_PAGE_SZ,
true, true);
if (ret)
goto fail_free_vsfile_hgei;
/* TODO: Update the IOMMU mapping ??? */
/* Update new IMSIC VS-file details in IMSIC context */
write_lock_irqsave(&imsic->vsfile_lock, flags);
imsic->vsfile_hgei = new_vsfile_hgei;
imsic->vsfile_cpu = vcpu->cpu;
imsic->vsfile_va = new_vsfile_va;
imsic->vsfile_pa = new_vsfile_pa;
write_unlock_irqrestore(&imsic->vsfile_lock, flags);
/*
* At this point, all interrupt producers have been moved
* to the new IMSIC VS-file so we move register state from
* the old IMSIC VS/SW-file to the new IMSIC VS-file.
*/
memset(&tmrif, 0, sizeof(tmrif));
if (old_vsfile_cpu >= 0) {
/* Read and clear register state from old IMSIC VS-file */
imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu,
imsic->nr_hw_eix, true, &tmrif);
/* Free-up old IMSIC VS-file */
kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
} else {
/* Read and clear register state from IMSIC SW-file */
imsic_swfile_read(vcpu, true, &tmrif);
}
/* Restore register state in the new IMSIC VS-file */
imsic_vsfile_local_update(new_vsfile_hgei, imsic->nr_hw_eix, &tmrif);
done:
/* Set VCPU HSTATUS.VGEIN to new IMSIC VS-file */
vcpu->arch.guest_context.hstatus &= ~HSTATUS_VGEIN;
if (new_vsfile_hgei > 0)
vcpu->arch.guest_context.hstatus |=
((unsigned long)new_vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
/* Continue run-loop */
return 1;
fail_free_vsfile_hgei:
kvm_riscv_aia_free_hgei(vcpu->cpu, new_vsfile_hgei);
return ret;
}
int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
{
u32 topei;
struct imsic_mrif_eix *eix;
int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC;
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) {
/* Read pending and enabled interrupt with highest priority */
topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix,
imsic->nr_msis);
if (val)
*val = topei;
/* Writes ignore value and clear top pending interrupt */
if (topei && wr_mask) {
topei >>= TOPEI_ID_SHIFT;
if (topei) {
eix = &imsic->swfile->eix[topei /
BITS_PER_TYPE(u64)];
clear_bit(topei & (BITS_PER_TYPE(u64) - 1),
eix->eip);
}
}
} else {
r = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix, isel,
val, new_val, wr_mask);
/* Forward unknown IMSIC register to user-space */
if (r)
rc = (r == -ENOENT) ? 0 : KVM_INSN_ILLEGAL_TRAP;
}
if (wr_mask)
imsic_swfile_extirq_update(vcpu);
return rc;
}
int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
bool write, unsigned long *val)
{
u32 isel, vcpu_id;
unsigned long flags;
struct imsic *imsic;
struct kvm_vcpu *vcpu;
int rc, vsfile_hgei, vsfile_cpu;
if (!kvm_riscv_aia_initialized(kvm))
return -ENODEV;
vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type);
vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
if (!vcpu)
return -ENODEV;
isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
imsic = vcpu->arch.aia_context.imsic_state;
read_lock_irqsave(&imsic->vsfile_lock, flags);
rc = 0;
vsfile_hgei = imsic->vsfile_hgei;
vsfile_cpu = imsic->vsfile_cpu;
if (vsfile_cpu < 0) {
if (write) {
rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix,
isel, NULL, *val, -1UL);
imsic_swfile_extirq_update(vcpu);
} else
rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix,
isel, val, 0, 0);
}
read_unlock_irqrestore(&imsic->vsfile_lock, flags);
if (!rc && vsfile_cpu >= 0)
rc = imsic_vsfile_rw(vsfile_hgei, vsfile_cpu, imsic->nr_eix,
isel, write, val);
return rc;
}
int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type)
{
u32 isel, vcpu_id;
struct imsic *imsic;
struct kvm_vcpu *vcpu;
if (!kvm_riscv_aia_initialized(kvm))
return -ENODEV;
vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type);
vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
if (!vcpu)
return -ENODEV;
isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
imsic = vcpu->arch.aia_context.imsic_state;
return imsic_mrif_isel_check(imsic->nr_eix, isel);
}
void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu)
{
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
if (!imsic)
return;
kvm_riscv_vcpu_aia_imsic_release(vcpu);
memset(imsic->swfile, 0, sizeof(*imsic->swfile));
}
int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
u32 guest_index, u32 offset, u32 iid)
{
unsigned long flags;
struct imsic_mrif_eix *eix;
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
/* We only emulate one IMSIC MMIO page for each Guest VCPU */
if (!imsic || !iid || guest_index ||
(offset != IMSIC_MMIO_SETIPNUM_LE &&
offset != IMSIC_MMIO_SETIPNUM_BE))
return -ENODEV;
iid = (offset == IMSIC_MMIO_SETIPNUM_BE) ? __swab32(iid) : iid;
if (imsic->nr_msis <= iid)
return -EINVAL;
read_lock_irqsave(&imsic->vsfile_lock, flags);
if (imsic->vsfile_cpu >= 0) {
writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
kvm_vcpu_kick(vcpu);
} else {
eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);
imsic_swfile_extirq_update(vcpu);
}
read_unlock_irqrestore(&imsic->vsfile_lock, flags);
return 0;
}
static int imsic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
if (len != 4 || (addr & 0x3) != 0)
return -EOPNOTSUPP;
*((u32 *)val) = 0;
return 0;
}
static int imsic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
struct kvm_msi msi = { 0 };
if (len != 4 || (addr & 0x3) != 0)
return -EOPNOTSUPP;
msi.address_hi = addr >> 32;
msi.address_lo = (u32)addr;
msi.data = *((const u32 *)val);
kvm_riscv_aia_inject_msi(vcpu->kvm, &msi);
return 0;
};
static struct kvm_io_device_ops imsic_iodoev_ops = {
.read = imsic_mmio_read,
.write = imsic_mmio_write,
};
int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
{
int ret = 0;
struct imsic *imsic;
struct page *swfile_page;
struct kvm *kvm = vcpu->kvm;
/* Fail if we have zero IDs */
if (!kvm->arch.aia.nr_ids)
return -EINVAL;
/* Allocate IMSIC context */
imsic = kzalloc(sizeof(*imsic), GFP_KERNEL);
if (!imsic)
return -ENOMEM;
vcpu->arch.aia_context.imsic_state = imsic;
/* Setup IMSIC context */
imsic->nr_msis = kvm->arch.aia.nr_ids + 1;
rwlock_init(&imsic->vsfile_lock);
imsic->nr_eix = BITS_TO_U64(imsic->nr_msis);
imsic->nr_hw_eix = BITS_TO_U64(kvm_riscv_aia_max_ids);
imsic->vsfile_hgei = imsic->vsfile_cpu = -1;
/* Setup IMSIC SW-file */
swfile_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
get_order(sizeof(*imsic->swfile)));
if (!swfile_page) {
ret = -ENOMEM;
goto fail_free_imsic;
}
imsic->swfile = page_to_virt(swfile_page);
imsic->swfile_pa = page_to_phys(swfile_page);
/* Setup IO device */
kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);
mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
vcpu->arch.aia_context.imsic_addr,
KVM_DEV_RISCV_IMSIC_SIZE,
&imsic->iodev);
mutex_unlock(&kvm->slots_lock);
if (ret)
goto fail_free_swfile;
return 0;
fail_free_swfile:
free_pages((unsigned long)imsic->swfile,
get_order(sizeof(*imsic->swfile)));
fail_free_imsic:
vcpu->arch.aia_context.imsic_state = NULL;
kfree(imsic);
return ret;
}
void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
if (!imsic)
return;
imsic_vsfile_cleanup(imsic);
mutex_lock(&kvm->slots_lock);
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &imsic->iodev);
mutex_unlock(&kvm->slots_lock);
free_pages((unsigned long)imsic->swfile,
get_order(sizeof(*imsic->swfile)));
vcpu->arch.aia_context.imsic_state = NULL;
kfree(imsic);
}
| linux-master | arch/riscv/kvm/aia_imsic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#include <linux/bitops.h>
#include <linux/kvm_host.h>
#define INSN_OPCODE_MASK 0x007c
#define INSN_OPCODE_SHIFT 2
#define INSN_OPCODE_SYSTEM 28
#define INSN_MASK_WFI 0xffffffff
#define INSN_MATCH_WFI 0x10500073
#define INSN_MATCH_CSRRW 0x1073
#define INSN_MASK_CSRRW 0x707f
#define INSN_MATCH_CSRRS 0x2073
#define INSN_MASK_CSRRS 0x707f
#define INSN_MATCH_CSRRC 0x3073
#define INSN_MASK_CSRRC 0x707f
#define INSN_MATCH_CSRRWI 0x5073
#define INSN_MASK_CSRRWI 0x707f
#define INSN_MATCH_CSRRSI 0x6073
#define INSN_MASK_CSRRSI 0x707f
#define INSN_MATCH_CSRRCI 0x7073
#define INSN_MASK_CSRRCI 0x707f
#define INSN_MATCH_LB 0x3
#define INSN_MASK_LB 0x707f
#define INSN_MATCH_LH 0x1003
#define INSN_MASK_LH 0x707f
#define INSN_MATCH_LW 0x2003
#define INSN_MASK_LW 0x707f
#define INSN_MATCH_LD 0x3003
#define INSN_MASK_LD 0x707f
#define INSN_MATCH_LBU 0x4003
#define INSN_MASK_LBU 0x707f
#define INSN_MATCH_LHU 0x5003
#define INSN_MASK_LHU 0x707f
#define INSN_MATCH_LWU 0x6003
#define INSN_MASK_LWU 0x707f
#define INSN_MATCH_SB 0x23
#define INSN_MASK_SB 0x707f
#define INSN_MATCH_SH 0x1023
#define INSN_MASK_SH 0x707f
#define INSN_MATCH_SW 0x2023
#define INSN_MASK_SW 0x707f
#define INSN_MATCH_SD 0x3023
#define INSN_MASK_SD 0x707f
#define INSN_MATCH_C_LD 0x6000
#define INSN_MASK_C_LD 0xe003
#define INSN_MATCH_C_SD 0xe000
#define INSN_MASK_C_SD 0xe003
#define INSN_MATCH_C_LW 0x4000
#define INSN_MASK_C_LW 0xe003
#define INSN_MATCH_C_SW 0xc000
#define INSN_MASK_C_SW 0xe003
#define INSN_MATCH_C_LDSP 0x6002
#define INSN_MASK_C_LDSP 0xe003
#define INSN_MATCH_C_SDSP 0xe002
#define INSN_MASK_C_SDSP 0xe003
#define INSN_MATCH_C_LWSP 0x4002
#define INSN_MASK_C_LWSP 0xe003
#define INSN_MATCH_C_SWSP 0xc002
#define INSN_MASK_C_SWSP 0xe003
#define INSN_16BIT_MASK 0x3
#define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
#define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
#ifdef CONFIG_64BIT
#define LOG_REGBYTES 3
#else
#define LOG_REGBYTES 2
#endif
#define REGBYTES (1 << LOG_REGBYTES)
#define SH_RD 7
#define SH_RS1 15
#define SH_RS2 20
#define SH_RS2C 2
#define MASK_RX 0x1f
#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
(RV_X(x, 10, 3) << 3) | \
(RV_X(x, 5, 1) << 6))
#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
(RV_X(x, 5, 2) << 6))
#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
(RV_X(x, 12, 1) << 5) | \
(RV_X(x, 2, 2) << 6))
#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
(RV_X(x, 12, 1) << 5) | \
(RV_X(x, 2, 3) << 6))
#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
(RV_X(x, 7, 2) << 6))
#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
(RV_X(x, 7, 3) << 6))
#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
#define SHIFT_RIGHT(x, y) \
((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
#define REG_MASK \
((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
#define REG_OFFSET(insn, pos) \
(SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
#define REG_PTR(insn, pos, regs) \
((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
#define GET_FUNCT3(insn) (((insn) >> 12) & 7)
#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
#define GET_SP(regs) (*REG_PTR(2, 0, regs))
#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
#define IMM_I(insn) ((s32)(insn) >> 20)
#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
(s32)(((insn) >> 7) & 0x1f))
struct insn_func {
unsigned long mask;
unsigned long match;
/*
* Possible return values are as follows:
* 1) Returns < 0 for error case
* 2) Returns 0 for exit to user-space
* 3) Returns 1 to continue with next sepc
* 4) Returns 2 to continue with same sepc
* 5) Returns 3 to inject illegal instruction trap and continue
* 6) Returns 4 to inject virtual instruction trap and continue
*
* Use enum kvm_insn_return for return values
*/
int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn);
};
static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
ulong insn)
{
struct kvm_cpu_trap utrap = { 0 };
/* Redirect trap to Guest VCPU */
utrap.sepc = vcpu->arch.guest_context.sepc;
utrap.scause = EXC_INST_ILLEGAL;
utrap.stval = insn;
utrap.htval = 0;
utrap.htinst = 0;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
ulong insn)
{
struct kvm_cpu_trap utrap = { 0 };
/* Redirect trap to Guest VCPU */
utrap.sepc = vcpu->arch.guest_context.sepc;
utrap.scause = EXC_VIRTUAL_INST_FAULT;
utrap.stval = insn;
utrap.htval = 0;
utrap.htinst = 0;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
/**
* kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
*
* @vcpu: The VCPU pointer
*/
void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu)) {
kvm_vcpu_srcu_read_unlock(vcpu);
kvm_vcpu_halt(vcpu);
kvm_vcpu_srcu_read_lock(vcpu);
}
}
static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
{
vcpu->stat.wfi_exit_stat++;
kvm_riscv_vcpu_wfi(vcpu);
return KVM_INSN_CONTINUE_NEXT_SEPC;
}
struct csr_func {
unsigned int base;
unsigned int count;
/*
* Possible return values are as same as "func" callback in
* "struct insn_func".
*/
int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask);
};
static const struct csr_func csr_funcs[] = {
KVM_RISCV_VCPU_AIA_CSR_FUNCS
KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS
};
/**
* kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
* emulation or in-kernel emulation
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the CSR data
*
* Returns > 0 upon failure and 0 upon success
*/
int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
ulong insn;
if (vcpu->arch.csr_decode.return_handled)
return 0;
vcpu->arch.csr_decode.return_handled = 1;
/* Update destination register for CSR reads */
insn = vcpu->arch.csr_decode.insn;
if ((insn >> SH_RD) & MASK_RX)
SET_RD(insn, &vcpu->arch.guest_context,
run->riscv_csr.ret_value);
/* Move to next instruction */
vcpu->arch.guest_context.sepc += INSN_LEN(insn);
return 0;
}
static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
{
int i, rc = KVM_INSN_ILLEGAL_TRAP;
unsigned int csr_num = insn >> SH_RS2;
unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX;
ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context);
const struct csr_func *tcfn, *cfn = NULL;
ulong val = 0, wr_mask = 0, new_val = 0;
/* Decode the CSR instruction */
switch (GET_FUNCT3(insn)) {
case GET_FUNCT3(INSN_MATCH_CSRRW):
wr_mask = -1UL;
new_val = rs1_val;
break;
case GET_FUNCT3(INSN_MATCH_CSRRS):
wr_mask = rs1_val;
new_val = -1UL;
break;
case GET_FUNCT3(INSN_MATCH_CSRRC):
wr_mask = rs1_val;
new_val = 0;
break;
case GET_FUNCT3(INSN_MATCH_CSRRWI):
wr_mask = -1UL;
new_val = rs1_num;
break;
case GET_FUNCT3(INSN_MATCH_CSRRSI):
wr_mask = rs1_num;
new_val = -1UL;
break;
case GET_FUNCT3(INSN_MATCH_CSRRCI):
wr_mask = rs1_num;
new_val = 0;
break;
default:
return rc;
}
/* Save instruction decode info */
vcpu->arch.csr_decode.insn = insn;
vcpu->arch.csr_decode.return_handled = 0;
/* Update CSR details in kvm_run struct */
run->riscv_csr.csr_num = csr_num;
run->riscv_csr.new_value = new_val;
run->riscv_csr.write_mask = wr_mask;
run->riscv_csr.ret_value = 0;
/* Find in-kernel CSR function */
for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) {
tcfn = &csr_funcs[i];
if ((tcfn->base <= csr_num) &&
(csr_num < (tcfn->base + tcfn->count))) {
cfn = tcfn;
break;
}
}
/* First try in-kernel CSR emulation */
if (cfn && cfn->func) {
rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask);
if (rc > KVM_INSN_EXIT_TO_USER_SPACE) {
if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) {
run->riscv_csr.ret_value = val;
vcpu->stat.csr_exit_kernel++;
kvm_riscv_vcpu_csr_return(vcpu, run);
rc = KVM_INSN_CONTINUE_SAME_SEPC;
}
return rc;
}
}
/* Exit to user-space for CSR emulation */
if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) {
vcpu->stat.csr_exit_user++;
run->exit_reason = KVM_EXIT_RISCV_CSR;
}
return rc;
}
static const struct insn_func system_opcode_funcs[] = {
{
.mask = INSN_MASK_CSRRW,
.match = INSN_MATCH_CSRRW,
.func = csr_insn,
},
{
.mask = INSN_MASK_CSRRS,
.match = INSN_MATCH_CSRRS,
.func = csr_insn,
},
{
.mask = INSN_MASK_CSRRC,
.match = INSN_MATCH_CSRRC,
.func = csr_insn,
},
{
.mask = INSN_MASK_CSRRWI,
.match = INSN_MATCH_CSRRWI,
.func = csr_insn,
},
{
.mask = INSN_MASK_CSRRSI,
.match = INSN_MATCH_CSRRSI,
.func = csr_insn,
},
{
.mask = INSN_MASK_CSRRCI,
.match = INSN_MATCH_CSRRCI,
.func = csr_insn,
},
{
.mask = INSN_MASK_WFI,
.match = INSN_MATCH_WFI,
.func = wfi_insn,
},
};
static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
ulong insn)
{
int i, rc = KVM_INSN_ILLEGAL_TRAP;
const struct insn_func *ifn;
for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) {
ifn = &system_opcode_funcs[i];
if ((insn & ifn->mask) == ifn->match) {
rc = ifn->func(vcpu, run, insn);
break;
}
}
switch (rc) {
case KVM_INSN_ILLEGAL_TRAP:
return truly_illegal_insn(vcpu, run, insn);
case KVM_INSN_VIRTUAL_TRAP:
return truly_virtual_insn(vcpu, run, insn);
case KVM_INSN_CONTINUE_NEXT_SEPC:
vcpu->arch.guest_context.sepc += INSN_LEN(insn);
break;
default:
break;
}
return (rc <= 0) ? rc : 1;
}
/**
* kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
* @trap: Trap details
*
* Returns > 0 to continue run-loop
* Returns 0 to exit run-loop and handle in user-space.
* Returns < 0 to report failure and exit run-loop
*/
int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap)
{
unsigned long insn = trap->stval;
struct kvm_cpu_trap utrap = { 0 };
struct kvm_cpu_context *ct;
if (unlikely(INSN_IS_16BIT(insn))) {
if (insn == 0) {
ct = &vcpu->arch.guest_context;
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
ct->sepc,
&utrap);
if (utrap.scause) {
utrap.sepc = ct->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
}
if (INSN_IS_16BIT(insn))
return truly_illegal_insn(vcpu, run, insn);
}
switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
case INSN_OPCODE_SYSTEM:
return system_opcode_insn(vcpu, run, insn);
default:
return truly_illegal_insn(vcpu, run, insn);
}
}
/**
* kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
* @fault_addr: Guest physical address to load
* @htinst: Transformed encoding of the load instruction
*
* Returns > 0 to continue run-loop
* Returns 0 to exit run-loop and handle in user-space.
* Returns < 0 to report failure and exit run-loop
*/
int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long fault_addr,
unsigned long htinst)
{
u8 data_buf[8];
unsigned long insn;
int shift = 0, len = 0, insn_len = 0;
struct kvm_cpu_trap utrap = { 0 };
struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
/* Determine trapped instruction */
if (htinst & 0x1) {
/*
* Bit[0] == 1 implies trapped instruction value is
* transformed instruction or custom instruction.
*/
insn = htinst | INSN_16BIT_MASK;
insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
} else {
/*
* Bit[0] == 0 implies trapped instruction value is
* zero or special value.
*/
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
&utrap);
if (utrap.scause) {
/* Redirect trap if we failed to read instruction */
utrap.sepc = ct->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
insn_len = INSN_LEN(insn);
}
/* Decode length of MMIO and shift */
if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
len = 4;
shift = 8 * (sizeof(ulong) - len);
} else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
len = 1;
shift = 8 * (sizeof(ulong) - len);
} else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
len = 1;
shift = 8 * (sizeof(ulong) - len);
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
len = 8;
shift = 8 * (sizeof(ulong) - len);
} else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
len = 4;
#endif
} else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
len = 2;
shift = 8 * (sizeof(ulong) - len);
} else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
len = 2;
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
len = 8;
shift = 8 * (sizeof(ulong) - len);
insn = RVC_RS2S(insn) << SH_RD;
} else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
((insn >> SH_RD) & 0x1f)) {
len = 8;
shift = 8 * (sizeof(ulong) - len);
#endif
} else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
len = 4;
shift = 8 * (sizeof(ulong) - len);
insn = RVC_RS2S(insn) << SH_RD;
} else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
((insn >> SH_RD) & 0x1f)) {
len = 4;
shift = 8 * (sizeof(ulong) - len);
} else {
return -EOPNOTSUPP;
}
/* Fault address should be aligned to length of MMIO */
if (fault_addr & (len - 1))
return -EIO;
/* Save instruction decode info */
vcpu->arch.mmio_decode.insn = insn;
vcpu->arch.mmio_decode.insn_len = insn_len;
vcpu->arch.mmio_decode.shift = shift;
vcpu->arch.mmio_decode.len = len;
vcpu->arch.mmio_decode.return_handled = 0;
/* Update MMIO details in kvm_run struct */
run->mmio.is_write = false;
run->mmio.phys_addr = fault_addr;
run->mmio.len = len;
/* Try to handle MMIO access in the kernel */
if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
/* Successfully handled MMIO access in the kernel so resume */
memcpy(run->mmio.data, data_buf, len);
vcpu->stat.mmio_exit_kernel++;
kvm_riscv_vcpu_mmio_return(vcpu, run);
return 1;
}
/* Exit to userspace for MMIO emulation */
vcpu->stat.mmio_exit_user++;
run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
/**
* kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
* @fault_addr: Guest physical address to store
* @htinst: Transformed encoding of the store instruction
*
* Returns > 0 to continue run-loop
* Returns 0 to exit run-loop and handle in user-space.
* Returns < 0 to report failure and exit run-loop
*/
int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long fault_addr,
unsigned long htinst)
{
u8 data8;
u16 data16;
u32 data32;
u64 data64;
ulong data;
unsigned long insn;
int len = 0, insn_len = 0;
struct kvm_cpu_trap utrap = { 0 };
struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
/* Determine trapped instruction */
if (htinst & 0x1) {
/*
* Bit[0] == 1 implies trapped instruction value is
* transformed instruction or custom instruction.
*/
insn = htinst | INSN_16BIT_MASK;
insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
} else {
/*
* Bit[0] == 0 implies trapped instruction value is
* zero or special value.
*/
insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
&utrap);
if (utrap.scause) {
/* Redirect trap if we failed to read instruction */
utrap.sepc = ct->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
return 1;
}
insn_len = INSN_LEN(insn);
}
data = GET_RS2(insn, &vcpu->arch.guest_context);
data8 = data16 = data32 = data64 = data;
if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
len = 4;
} else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
len = 1;
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
len = 8;
#endif
} else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
len = 2;
#ifdef CONFIG_64BIT
} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
len = 8;
data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
((insn >> SH_RD) & 0x1f)) {
len = 8;
data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
#endif
} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
len = 4;
data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
((insn >> SH_RD) & 0x1f)) {
len = 4;
data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
} else {
return -EOPNOTSUPP;
}
/* Fault address should be aligned to length of MMIO */
if (fault_addr & (len - 1))
return -EIO;
/* Save instruction decode info */
vcpu->arch.mmio_decode.insn = insn;
vcpu->arch.mmio_decode.insn_len = insn_len;
vcpu->arch.mmio_decode.shift = 0;
vcpu->arch.mmio_decode.len = len;
vcpu->arch.mmio_decode.return_handled = 0;
/* Copy data to kvm_run instance */
switch (len) {
case 1:
*((u8 *)run->mmio.data) = data8;
break;
case 2:
*((u16 *)run->mmio.data) = data16;
break;
case 4:
*((u32 *)run->mmio.data) = data32;
break;
case 8:
*((u64 *)run->mmio.data) = data64;
break;
default:
return -EOPNOTSUPP;
}
/* Update MMIO details in kvm_run struct */
run->mmio.is_write = true;
run->mmio.phys_addr = fault_addr;
run->mmio.len = len;
/* Try to handle MMIO access in the kernel */
if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
fault_addr, len, run->mmio.data)) {
/* Successfully handled MMIO access in the kernel so resume */
vcpu->stat.mmio_exit_kernel++;
kvm_riscv_vcpu_mmio_return(vcpu, run);
return 1;
}
/* Exit to userspace for MMIO emulation */
vcpu->stat.mmio_exit_user++;
run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
/**
* kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
* or in-kernel IO emulation
*
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
*/
int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
u8 data8;
u16 data16;
u32 data32;
u64 data64;
ulong insn;
int len, shift;
if (vcpu->arch.mmio_decode.return_handled)
return 0;
vcpu->arch.mmio_decode.return_handled = 1;
insn = vcpu->arch.mmio_decode.insn;
if (run->mmio.is_write)
goto done;
len = vcpu->arch.mmio_decode.len;
shift = vcpu->arch.mmio_decode.shift;
switch (len) {
case 1:
data8 = *((u8 *)run->mmio.data);
SET_RD(insn, &vcpu->arch.guest_context,
(ulong)data8 << shift >> shift);
break;
case 2:
data16 = *((u16 *)run->mmio.data);
SET_RD(insn, &vcpu->arch.guest_context,
(ulong)data16 << shift >> shift);
break;
case 4:
data32 = *((u32 *)run->mmio.data);
SET_RD(insn, &vcpu->arch.guest_context,
(ulong)data32 << shift >> shift);
break;
case 8:
data64 = *((u64 *)run->mmio.data);
SET_RD(insn, &vcpu->arch.guest_context,
(ulong)data64 << shift >> shift);
break;
default:
return -EOPNOTSUPP;
}
done:
/* Move to next instruction */
vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;
return 0;
}
| linux-master | arch/riscv/kvm/vcpu_insn.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
*
* Authors:
* Atish Patra <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/version.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_sbi.h>
static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
const struct kvm_vcpu_sbi_extension *sbi_ext;
unsigned long *out_val = &retdata->out_val;
switch (cp->a6) {
case SBI_EXT_BASE_GET_SPEC_VERSION:
*out_val = (KVM_SBI_VERSION_MAJOR <<
SBI_SPEC_VERSION_MAJOR_SHIFT) |
KVM_SBI_VERSION_MINOR;
break;
case SBI_EXT_BASE_GET_IMP_ID:
*out_val = KVM_SBI_IMPID;
break;
case SBI_EXT_BASE_GET_IMP_VERSION:
*out_val = LINUX_VERSION_CODE;
break;
case SBI_EXT_BASE_PROBE_EXT:
if ((cp->a0 >= SBI_EXT_EXPERIMENTAL_START &&
cp->a0 <= SBI_EXT_EXPERIMENTAL_END) ||
(cp->a0 >= SBI_EXT_VENDOR_START &&
cp->a0 <= SBI_EXT_VENDOR_END)) {
/*
* For experimental/vendor extensions
* forward it to the userspace
*/
kvm_riscv_vcpu_sbi_forward(vcpu, run);
retdata->uexit = true;
} else {
sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a0);
*out_val = sbi_ext && sbi_ext->probe ?
sbi_ext->probe(vcpu) : !!sbi_ext;
}
break;
case SBI_EXT_BASE_GET_MVENDORID:
*out_val = vcpu->arch.mvendorid;
break;
case SBI_EXT_BASE_GET_MARCHID:
*out_val = vcpu->arch.marchid;
break;
case SBI_EXT_BASE_GET_MIMPID:
*out_val = vcpu->arch.mimpid;
break;
default:
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
break;
}
return 0;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base = {
.extid_start = SBI_EXT_BASE,
.extid_end = SBI_EXT_BASE,
.handler = kvm_sbi_ext_base_handler,
};
static int kvm_sbi_ext_forward_handler(struct kvm_vcpu *vcpu,
struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
/*
* Both SBI experimental and vendor extensions are
* unconditionally forwarded to userspace.
*/
kvm_riscv_vcpu_sbi_forward(vcpu, run);
retdata->uexit = true;
return 0;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental = {
.extid_start = SBI_EXT_EXPERIMENTAL_START,
.extid_end = SBI_EXT_EXPERIMENTAL_END,
.handler = kvm_sbi_ext_forward_handler,
};
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor = {
.extid_start = SBI_EXT_VENDOR_START,
.extid_end = SBI_EXT_VENDOR_END,
.handler = kvm_sbi_ext_forward_handler,
};
| linux-master | arch/riscv/kvm/vcpu_sbi_base.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
#include <asm/hwcap.h>
#include <asm/sbi.h>
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
return -EINVAL;
}
int kvm_arch_hardware_enable(void)
{
unsigned long hideleg, hedeleg;
hedeleg = 0;
hedeleg |= (1UL << EXC_INST_MISALIGNED);
hedeleg |= (1UL << EXC_BREAKPOINT);
hedeleg |= (1UL << EXC_SYSCALL);
hedeleg |= (1UL << EXC_INST_PAGE_FAULT);
hedeleg |= (1UL << EXC_LOAD_PAGE_FAULT);
hedeleg |= (1UL << EXC_STORE_PAGE_FAULT);
csr_write(CSR_HEDELEG, hedeleg);
hideleg = 0;
hideleg |= (1UL << IRQ_VS_SOFT);
hideleg |= (1UL << IRQ_VS_TIMER);
hideleg |= (1UL << IRQ_VS_EXT);
csr_write(CSR_HIDELEG, hideleg);
/* VS should access only the time counter directly. Everything else should trap */
csr_write(CSR_HCOUNTEREN, 0x02);
csr_write(CSR_HVIP, 0);
kvm_riscv_aia_enable();
return 0;
}
void kvm_arch_hardware_disable(void)
{
kvm_riscv_aia_disable();
/*
* After clearing the hideleg CSR, the host kernel will receive
* spurious interrupts if hvip CSR has pending interrupts and the
* corresponding enable bits in vsie CSR are asserted. To avoid it,
* hvip CSR and vsie CSR must be cleared before clearing hideleg CSR.
*/
csr_write(CSR_VSIE, 0);
csr_write(CSR_HVIP, 0);
csr_write(CSR_HEDELEG, 0);
csr_write(CSR_HIDELEG, 0);
}
static int __init riscv_kvm_init(void)
{
int rc;
const char *str;
if (!riscv_isa_extension_available(NULL, h)) {
kvm_info("hypervisor extension not available\n");
return -ENODEV;
}
if (sbi_spec_is_0_1()) {
kvm_info("require SBI v0.2 or higher\n");
return -ENODEV;
}
if (!sbi_probe_extension(SBI_EXT_RFENCE)) {
kvm_info("require SBI RFENCE extension\n");
return -ENODEV;
}
kvm_riscv_gstage_mode_detect();
kvm_riscv_gstage_vmid_detect();
rc = kvm_riscv_aia_init();
if (rc && rc != -ENODEV)
return rc;
kvm_info("hypervisor extension available\n");
switch (kvm_riscv_gstage_mode()) {
case HGATP_MODE_SV32X4:
str = "Sv32x4";
break;
case HGATP_MODE_SV39X4:
str = "Sv39x4";
break;
case HGATP_MODE_SV48X4:
str = "Sv48x4";
break;
case HGATP_MODE_SV57X4:
str = "Sv57x4";
break;
default:
return -ENODEV;
}
kvm_info("using %s G-stage page table format\n", str);
kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());
if (kvm_riscv_aia_available())
kvm_info("AIA available with %d guest external interrupts\n",
kvm_riscv_aia_nr_hgei);
rc = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
if (rc) {
kvm_riscv_aia_exit();
return rc;
}
return 0;
}
module_init(riscv_kvm_init);
static void __exit riscv_kvm_exit(void)
{
kvm_riscv_aia_exit();
kvm_exit();
}
module_exit(riscv_kvm_exit);
| linux-master | arch/riscv/kvm/main.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Rivos Inc
*
* Authors:
* Atish Patra <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_sbi.h>
static int kvm_sbi_ext_pmu_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
int ret = 0;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
unsigned long funcid = cp->a6;
u64 temp;
if (!kvpmu->init_done) {
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
return 0;
}
switch (funcid) {
case SBI_EXT_PMU_NUM_COUNTERS:
ret = kvm_riscv_vcpu_pmu_num_ctrs(vcpu, retdata);
break;
case SBI_EXT_PMU_COUNTER_GET_INFO:
ret = kvm_riscv_vcpu_pmu_ctr_info(vcpu, cp->a0, retdata);
break;
case SBI_EXT_PMU_COUNTER_CFG_MATCH:
#if defined(CONFIG_32BIT)
temp = ((uint64_t)cp->a5 << 32) | cp->a4;
#else
temp = cp->a4;
#endif
/*
* This can fail if perf core framework fails to create an event.
* Forward the error to userspace because it's an error which
* happened within the host kernel. The other option would be
* to convert to an SBI error and forward to the guest.
*/
ret = kvm_riscv_vcpu_pmu_ctr_cfg_match(vcpu, cp->a0, cp->a1,
cp->a2, cp->a3, temp, retdata);
break;
case SBI_EXT_PMU_COUNTER_START:
#if defined(CONFIG_32BIT)
temp = ((uint64_t)cp->a4 << 32) | cp->a3;
#else
temp = cp->a3;
#endif
ret = kvm_riscv_vcpu_pmu_ctr_start(vcpu, cp->a0, cp->a1, cp->a2,
temp, retdata);
break;
case SBI_EXT_PMU_COUNTER_STOP:
ret = kvm_riscv_vcpu_pmu_ctr_stop(vcpu, cp->a0, cp->a1, cp->a2, retdata);
break;
case SBI_EXT_PMU_COUNTER_FW_READ:
ret = kvm_riscv_vcpu_pmu_ctr_read(vcpu, cp->a0, retdata);
break;
default:
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
}
return ret;
}
static unsigned long kvm_sbi_ext_pmu_probe(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
return kvpmu->init_done;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
.extid_start = SBI_EXT_PMU,
.extid_end = SBI_EXT_PMU,
.handler = kvm_sbi_ext_pmu_handler,
.probe = kvm_sbi_ext_pmu_probe,
};
| linux-master | arch/riscv/kvm/vcpu_sbi_pmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
*
* Authors:
* Atish Patra <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_sbi.h>
static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *reset_cntx;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
struct kvm_vcpu *target_vcpu;
unsigned long target_vcpuid = cp->a0;
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
if (!target_vcpu)
return SBI_ERR_INVALID_PARAM;
if (!target_vcpu->arch.power_off)
return SBI_ERR_ALREADY_AVAILABLE;
reset_cntx = &target_vcpu->arch.guest_reset_context;
/* start address */
reset_cntx->sepc = cp->a1;
/* target vcpu id to start */
reset_cntx->a0 = target_vcpuid;
/* private data passed from kernel */
reset_cntx->a1 = cp->a2;
kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
kvm_riscv_vcpu_power_on(target_vcpu);
return 0;
}
static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.power_off)
return SBI_ERR_FAILURE;
kvm_riscv_vcpu_power_off(vcpu);
return 0;
}
static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long target_vcpuid = cp->a0;
struct kvm_vcpu *target_vcpu;
target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
if (!target_vcpu)
return SBI_ERR_INVALID_PARAM;
if (!target_vcpu->arch.power_off)
return SBI_HSM_STATE_STARTED;
else if (vcpu->stat.generic.blocking)
return SBI_HSM_STATE_SUSPENDED;
else
return SBI_HSM_STATE_STOPPED;
}
static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
int ret = 0;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
struct kvm *kvm = vcpu->kvm;
unsigned long funcid = cp->a6;
switch (funcid) {
case SBI_EXT_HSM_HART_START:
mutex_lock(&kvm->lock);
ret = kvm_sbi_hsm_vcpu_start(vcpu);
mutex_unlock(&kvm->lock);
break;
case SBI_EXT_HSM_HART_STOP:
ret = kvm_sbi_hsm_vcpu_stop(vcpu);
break;
case SBI_EXT_HSM_HART_STATUS:
ret = kvm_sbi_hsm_vcpu_get_status(vcpu);
if (ret >= 0) {
retdata->out_val = ret;
retdata->err_val = 0;
}
return 0;
case SBI_EXT_HSM_HART_SUSPEND:
switch (cp->a0) {
case SBI_HSM_SUSPEND_RET_DEFAULT:
kvm_riscv_vcpu_wfi(vcpu);
break;
case SBI_HSM_SUSPEND_NON_RET_DEFAULT:
ret = SBI_ERR_NOT_SUPPORTED;
break;
default:
ret = SBI_ERR_INVALID_PARAM;
}
break;
default:
ret = SBI_ERR_NOT_SUPPORTED;
}
retdata->err_val = ret;
return 0;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm = {
.extid_start = SBI_EXT_HSM,
.extid_end = SBI_EXT_HSM,
.handler = kvm_sbi_ext_hsm_handler,
};
| linux-master | arch/riscv/kvm/vcpu_sbi_hsm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
* Copyright (C) 2022 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kvm_host.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <asm/hwcap.h>
#include <asm/kvm_aia_imsic.h>
struct aia_hgei_control {
raw_spinlock_t lock;
unsigned long free_bitmap;
struct kvm_vcpu *owners[BITS_PER_LONG];
};
static DEFINE_PER_CPU(struct aia_hgei_control, aia_hgei);
static int hgei_parent_irq;
unsigned int kvm_riscv_aia_nr_hgei;
unsigned int kvm_riscv_aia_max_ids;
DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
static int aia_find_hgei(struct kvm_vcpu *owner)
{
int i, hgei;
unsigned long flags;
struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
raw_spin_lock_irqsave(&hgctrl->lock, flags);
hgei = -1;
for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) {
if (hgctrl->owners[i] == owner) {
hgei = i;
break;
}
}
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
put_cpu_ptr(&aia_hgei);
return hgei;
}
static void aia_set_hvictl(bool ext_irq_pending)
{
unsigned long hvictl;
/*
* HVICTL.IID == 9 and HVICTL.IPRIO == 0 represents
* no interrupt in HVICTL.
*/
hvictl = (IRQ_S_EXT << HVICTL_IID_SHIFT) & HVICTL_IID;
hvictl |= ext_irq_pending;
csr_write(CSR_HVICTL, hvictl);
}
#ifdef CONFIG_32BIT
void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
unsigned long mask, val;
if (!kvm_riscv_aia_available())
return;
if (READ_ONCE(vcpu->arch.irqs_pending_mask[1])) {
mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[1], 0);
val = READ_ONCE(vcpu->arch.irqs_pending[1]) & mask;
csr->hviph &= ~mask;
csr->hviph |= val;
}
}
void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (kvm_riscv_aia_available())
csr->vsieh = csr_read(CSR_VSIEH);
}
#endif
bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
{
int hgei;
unsigned long seip;
if (!kvm_riscv_aia_available())
return false;
#ifdef CONFIG_32BIT
if (READ_ONCE(vcpu->arch.irqs_pending[1]) &
(vcpu->arch.aia_context.guest_csr.vsieh & upper_32_bits(mask)))
return true;
#endif
seip = vcpu->arch.guest_csr.vsie;
seip &= (unsigned long)mask;
seip &= BIT(IRQ_S_EXT);
if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
return false;
hgei = aia_find_hgei(vcpu);
if (hgei > 0)
return !!(csr_read(CSR_HGEIP) & BIT(hgei));
return false;
}
void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
if (!kvm_riscv_aia_available())
return;
#ifdef CONFIG_32BIT
csr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
#endif
aia_set_hvictl(!!(csr->hvip & BIT(IRQ_VS_EXT)));
}
void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (!kvm_riscv_aia_available())
return;
csr_write(CSR_VSISELECT, csr->vsiselect);
csr_write(CSR_HVIPRIO1, csr->hviprio1);
csr_write(CSR_HVIPRIO2, csr->hviprio2);
#ifdef CONFIG_32BIT
csr_write(CSR_VSIEH, csr->vsieh);
csr_write(CSR_HVIPH, csr->hviph);
csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
#endif
}
void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (!kvm_riscv_aia_available())
return;
csr->vsiselect = csr_read(CSR_VSISELECT);
csr->hviprio1 = csr_read(CSR_HVIPRIO1);
csr->hviprio2 = csr_read(CSR_HVIPRIO2);
#ifdef CONFIG_32BIT
csr->vsieh = csr_read(CSR_VSIEH);
csr->hviph = csr_read(CSR_HVIPH);
csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
#endif
}
int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long *out_val)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
return -ENOENT;
*out_val = 0;
if (kvm_riscv_aia_available())
*out_val = ((unsigned long *)csr)[reg_num];
return 0;
}
int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long val)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
return -ENOENT;
if (kvm_riscv_aia_available()) {
((unsigned long *)csr)[reg_num] = val;
#ifdef CONFIG_32BIT
if (reg_num == KVM_REG_RISCV_CSR_AIA_REG(siph))
WRITE_ONCE(vcpu->arch.irqs_pending_mask[1], 0);
#endif
}
return 0;
}
int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
unsigned int csr_num,
unsigned long *val,
unsigned long new_val,
unsigned long wr_mask)
{
/* If AIA not available then redirect trap */
if (!kvm_riscv_aia_available())
return KVM_INSN_ILLEGAL_TRAP;
/* If AIA not initialized then forward to user space */
if (!kvm_riscv_aia_initialized(vcpu->kvm))
return KVM_INSN_EXIT_TO_USER_SPACE;
return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, KVM_RISCV_AIA_IMSIC_TOPEI,
val, new_val, wr_mask);
}
/*
* External IRQ priority always read-only zero. This means default
* priority order is always preferred for external IRQs unless
* HVICTL.IID == 9 and HVICTL.IPRIO != 0
*/
static int aia_irq2bitpos[] = {
0, 8, -1, -1, 16, 24, -1, -1, /* 0 - 7 */
32, -1, -1, -1, -1, 40, 48, 56, /* 8 - 15 */
64, 72, 80, 88, 96, 104, 112, 120, /* 16 - 23 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 24 - 31 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 32 - 39 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 40 - 47 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 48 - 55 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 56 - 63 */
};
static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq)
{
unsigned long hviprio;
int bitpos = aia_irq2bitpos[irq];
if (bitpos < 0)
return 0;
switch (bitpos / BITS_PER_LONG) {
case 0:
hviprio = csr_read(CSR_HVIPRIO1);
break;
case 1:
#ifndef CONFIG_32BIT
hviprio = csr_read(CSR_HVIPRIO2);
break;
#else
hviprio = csr_read(CSR_HVIPRIO1H);
break;
case 2:
hviprio = csr_read(CSR_HVIPRIO2);
break;
case 3:
hviprio = csr_read(CSR_HVIPRIO2H);
break;
#endif
default:
return 0;
}
return (hviprio >> (bitpos % BITS_PER_LONG)) & TOPI_IPRIO_MASK;
}
static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
{
unsigned long hviprio;
int bitpos = aia_irq2bitpos[irq];
if (bitpos < 0)
return;
switch (bitpos / BITS_PER_LONG) {
case 0:
hviprio = csr_read(CSR_HVIPRIO1);
break;
case 1:
#ifndef CONFIG_32BIT
hviprio = csr_read(CSR_HVIPRIO2);
break;
#else
hviprio = csr_read(CSR_HVIPRIO1H);
break;
case 2:
hviprio = csr_read(CSR_HVIPRIO2);
break;
case 3:
hviprio = csr_read(CSR_HVIPRIO2H);
break;
#endif
default:
return;
}
hviprio &= ~(TOPI_IPRIO_MASK << (bitpos % BITS_PER_LONG));
hviprio |= (unsigned long)prio << (bitpos % BITS_PER_LONG);
switch (bitpos / BITS_PER_LONG) {
case 0:
csr_write(CSR_HVIPRIO1, hviprio);
break;
case 1:
#ifndef CONFIG_32BIT
csr_write(CSR_HVIPRIO2, hviprio);
break;
#else
csr_write(CSR_HVIPRIO1H, hviprio);
break;
case 2:
csr_write(CSR_HVIPRIO2, hviprio);
break;
case 3:
csr_write(CSR_HVIPRIO2H, hviprio);
break;
#endif
default:
return;
}
}
static int aia_rmw_iprio(struct kvm_vcpu *vcpu, unsigned int isel,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
{
int i, first_irq, nirqs;
unsigned long old_val;
u8 prio;
#ifndef CONFIG_32BIT
if (isel & 0x1)
return KVM_INSN_ILLEGAL_TRAP;
#endif
nirqs = 4 * (BITS_PER_LONG / 32);
first_irq = (isel - ISELECT_IPRIO0) * 4;
old_val = 0;
for (i = 0; i < nirqs; i++) {
prio = aia_get_iprio8(vcpu, first_irq + i);
old_val |= (unsigned long)prio << (TOPI_IPRIO_BITS * i);
}
if (val)
*val = old_val;
if (wr_mask) {
new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
for (i = 0; i < nirqs; i++) {
prio = (new_val >> (TOPI_IPRIO_BITS * i)) &
TOPI_IPRIO_MASK;
aia_set_iprio8(vcpu, first_irq + i, prio);
}
}
return KVM_INSN_CONTINUE_NEXT_SEPC;
}
int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
{
unsigned int isel;
/* If AIA not available then redirect trap */
if (!kvm_riscv_aia_available())
return KVM_INSN_ILLEGAL_TRAP;
/* First try to emulate in kernel space */
isel = csr_read(CSR_VSISELECT) & ISELECT_MASK;
if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15)
return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask);
else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&
kvm_riscv_aia_initialized(vcpu->kvm))
return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, isel, val, new_val,
wr_mask);
/* We can't handle it here so redirect to user space */
return KVM_INSN_EXIT_TO_USER_SPACE;
}
int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
void __iomem **hgei_va, phys_addr_t *hgei_pa)
{
int ret = -ENOENT;
unsigned long flags;
struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
if (!kvm_riscv_aia_available() || !hgctrl)
return -ENODEV;
raw_spin_lock_irqsave(&hgctrl->lock, flags);
if (hgctrl->free_bitmap) {
ret = __ffs(hgctrl->free_bitmap);
hgctrl->free_bitmap &= ~BIT(ret);
hgctrl->owners[ret] = owner;
}
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
/* TODO: To be updated later by AIA IMSIC HW guest file support */
if (hgei_va)
*hgei_va = NULL;
if (hgei_pa)
*hgei_pa = 0;
return ret;
}
void kvm_riscv_aia_free_hgei(int cpu, int hgei)
{
unsigned long flags;
struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
if (!kvm_riscv_aia_available() || !hgctrl)
return;
raw_spin_lock_irqsave(&hgctrl->lock, flags);
if (hgei > 0 && hgei <= kvm_riscv_aia_nr_hgei) {
if (!(hgctrl->free_bitmap & BIT(hgei))) {
hgctrl->free_bitmap |= BIT(hgei);
hgctrl->owners[hgei] = NULL;
}
}
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
}
void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable)
{
int hgei;
if (!kvm_riscv_aia_available())
return;
hgei = aia_find_hgei(owner);
if (hgei > 0) {
if (enable)
csr_set(CSR_HGEIE, BIT(hgei));
else
csr_clear(CSR_HGEIE, BIT(hgei));
}
}
static irqreturn_t hgei_interrupt(int irq, void *dev_id)
{
int i;
unsigned long hgei_mask, flags;
struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
hgei_mask = csr_read(CSR_HGEIP) & csr_read(CSR_HGEIE);
csr_clear(CSR_HGEIE, hgei_mask);
raw_spin_lock_irqsave(&hgctrl->lock, flags);
for_each_set_bit(i, &hgei_mask, BITS_PER_LONG) {
if (hgctrl->owners[i])
kvm_vcpu_kick(hgctrl->owners[i]);
}
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
put_cpu_ptr(&aia_hgei);
return IRQ_HANDLED;
}
static int aia_hgei_init(void)
{
int cpu, rc;
struct irq_domain *domain;
struct aia_hgei_control *hgctrl;
/* Initialize per-CPU guest external interrupt line management */
for_each_possible_cpu(cpu) {
hgctrl = per_cpu_ptr(&aia_hgei, cpu);
raw_spin_lock_init(&hgctrl->lock);
if (kvm_riscv_aia_nr_hgei) {
hgctrl->free_bitmap =
BIT(kvm_riscv_aia_nr_hgei + 1) - 1;
hgctrl->free_bitmap &= ~BIT(0);
} else
hgctrl->free_bitmap = 0;
}
/* Find INTC irq domain */
domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
DOMAIN_BUS_ANY);
if (!domain) {
kvm_err("unable to find INTC domain\n");
return -ENOENT;
}
/* Map per-CPU SGEI interrupt from INTC domain */
hgei_parent_irq = irq_create_mapping(domain, IRQ_S_GEXT);
if (!hgei_parent_irq) {
kvm_err("unable to map SGEI IRQ\n");
return -ENOMEM;
}
/* Request per-CPU SGEI interrupt */
rc = request_percpu_irq(hgei_parent_irq, hgei_interrupt,
"riscv-kvm", &aia_hgei);
if (rc) {
kvm_err("failed to request SGEI IRQ\n");
return rc;
}
return 0;
}
static void aia_hgei_exit(void)
{
/* Free per-CPU SGEI interrupt */
free_percpu_irq(hgei_parent_irq, &aia_hgei);
}
void kvm_riscv_aia_enable(void)
{
if (!kvm_riscv_aia_available())
return;
aia_set_hvictl(false);
csr_write(CSR_HVIPRIO1, 0x0);
csr_write(CSR_HVIPRIO2, 0x0);
#ifdef CONFIG_32BIT
csr_write(CSR_HVIPH, 0x0);
csr_write(CSR_HIDELEGH, 0x0);
csr_write(CSR_HVIPRIO1H, 0x0);
csr_write(CSR_HVIPRIO2H, 0x0);
#endif
/* Enable per-CPU SGEI interrupt */
enable_percpu_irq(hgei_parent_irq,
irq_get_trigger_type(hgei_parent_irq));
csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
}
void kvm_riscv_aia_disable(void)
{
int i;
unsigned long flags;
struct kvm_vcpu *vcpu;
struct aia_hgei_control *hgctrl;
if (!kvm_riscv_aia_available())
return;
hgctrl = get_cpu_ptr(&aia_hgei);
/* Disable per-CPU SGEI interrupt */
csr_clear(CSR_HIE, BIT(IRQ_S_GEXT));
disable_percpu_irq(hgei_parent_irq);
aia_set_hvictl(false);
raw_spin_lock_irqsave(&hgctrl->lock, flags);
for (i = 0; i <= kvm_riscv_aia_nr_hgei; i++) {
vcpu = hgctrl->owners[i];
if (!vcpu)
continue;
/*
* We release hgctrl->lock before notifying IMSIC
* so that we don't have lock ordering issues.
*/
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
/* Notify IMSIC */
kvm_riscv_vcpu_aia_imsic_release(vcpu);
/*
* Wakeup VCPU if it was blocked so that it can
* run on other HARTs
*/
if (csr_read(CSR_HGEIE) & BIT(i)) {
csr_clear(CSR_HGEIE, BIT(i));
kvm_vcpu_kick(vcpu);
}
raw_spin_lock_irqsave(&hgctrl->lock, flags);
}
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
put_cpu_ptr(&aia_hgei);
}
int kvm_riscv_aia_init(void)
{
int rc;
if (!riscv_isa_extension_available(NULL, SxAIA))
return -ENODEV;
/* Figure-out number of bits in HGEIE */
csr_write(CSR_HGEIE, -1UL);
kvm_riscv_aia_nr_hgei = fls_long(csr_read(CSR_HGEIE));
csr_write(CSR_HGEIE, 0);
if (kvm_riscv_aia_nr_hgei)
kvm_riscv_aia_nr_hgei--;
/*
* Number of usable HGEI lines should be minimum of per-HART
* IMSIC guest files and number of bits in HGEIE
*
* TODO: To be updated later by AIA IMSIC HW guest file support
*/
kvm_riscv_aia_nr_hgei = 0;
/*
* Find number of guest MSI IDs
*
* TODO: To be updated later by AIA IMSIC HW guest file support
*/
kvm_riscv_aia_max_ids = IMSIC_MAX_ID;
/* Initialize guest external interrupt line management */
rc = aia_hgei_init();
if (rc)
return rc;
/* Register device operations */
rc = kvm_register_device_ops(&kvm_riscv_aia_device_ops,
KVM_DEV_TYPE_RISCV_AIA);
if (rc) {
aia_hgei_exit();
return rc;
}
/* Enable KVM AIA support */
static_branch_enable(&kvm_riscv_aia_available);
return 0;
}
void kvm_riscv_aia_exit(void)
{
if (!kvm_riscv_aia_available())
return;
/* Unregister device operations */
kvm_unregister_device_ops(KVM_DEV_TYPE_RISCV_AIA);
/* Cleanup the HGEI state */
aia_hgei_exit();
}
| linux-master | arch/riscv/kvm/aia.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Atish Patra <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_sbi.h>
#ifndef CONFIG_RISCV_SBI_V01
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
.extid_start = -1UL,
.extid_end = -1UL,
.handler = NULL,
};
#endif
#ifndef CONFIG_RISCV_PMU_SBI
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
.extid_start = -1UL,
.extid_end = -1UL,
.handler = NULL,
};
#endif
struct kvm_riscv_sbi_extension_entry {
enum KVM_RISCV_SBI_EXT_ID ext_idx;
const struct kvm_vcpu_sbi_extension *ext_ptr;
};
static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
{
.ext_idx = KVM_RISCV_SBI_EXT_V01,
.ext_ptr = &vcpu_sbi_ext_v01,
},
{
.ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
.ext_ptr = &vcpu_sbi_ext_base,
},
{
.ext_idx = KVM_RISCV_SBI_EXT_TIME,
.ext_ptr = &vcpu_sbi_ext_time,
},
{
.ext_idx = KVM_RISCV_SBI_EXT_IPI,
.ext_ptr = &vcpu_sbi_ext_ipi,
},
{
.ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
.ext_ptr = &vcpu_sbi_ext_rfence,
},
{
.ext_idx = KVM_RISCV_SBI_EXT_SRST,
.ext_ptr = &vcpu_sbi_ext_srst,
},
{
.ext_idx = KVM_RISCV_SBI_EXT_HSM,
.ext_ptr = &vcpu_sbi_ext_hsm,
},
{
.ext_idx = KVM_RISCV_SBI_EXT_PMU,
.ext_ptr = &vcpu_sbi_ext_pmu,
},
{
.ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
.ext_ptr = &vcpu_sbi_ext_experimental,
},
{
.ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
.ext_ptr = &vcpu_sbi_ext_vendor,
},
};
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
vcpu->arch.sbi_context.return_handled = 0;
vcpu->stat.ecall_exit_stat++;
run->exit_reason = KVM_EXIT_RISCV_SBI;
run->riscv_sbi.extension_id = cp->a7;
run->riscv_sbi.function_id = cp->a6;
run->riscv_sbi.args[0] = cp->a0;
run->riscv_sbi.args[1] = cp->a1;
run->riscv_sbi.args[2] = cp->a2;
run->riscv_sbi.args[3] = cp->a3;
run->riscv_sbi.args[4] = cp->a4;
run->riscv_sbi.args[5] = cp->a5;
run->riscv_sbi.ret[0] = cp->a0;
run->riscv_sbi.ret[1] = cp->a1;
}
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_run *run,
u32 type, u64 reason)
{
unsigned long i;
struct kvm_vcpu *tmp;
kvm_for_each_vcpu(i, tmp, vcpu->kvm)
tmp->arch.power_off = true;
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
memset(&run->system_event, 0, sizeof(run->system_event));
run->system_event.type = type;
run->system_event.ndata = 1;
run->system_event.data[0] = reason;
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
/* Handle SBI return only once */
if (vcpu->arch.sbi_context.return_handled)
return 0;
vcpu->arch.sbi_context.return_handled = 1;
/* Update return values */
cp->a0 = run->riscv_sbi.ret[0];
cp->a1 = run->riscv_sbi.ret[1];
/* Move to next instruction */
vcpu->arch.guest_context.sepc += 4;
return 0;
}
static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long reg_val)
{
unsigned long i;
const struct kvm_riscv_sbi_extension_entry *sext = NULL;
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
return -ENOENT;
if (reg_val != 1 && reg_val != 0)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
if (sbi_ext[i].ext_idx == reg_num) {
sext = &sbi_ext[i];
break;
}
}
if (!sext)
return -ENOENT;
/*
* We can't set the extension status to available here, since it may
* have a probe() function which needs to confirm availability first,
* but it may be too early to call that here. We can set the status to
* unavailable, though.
*/
if (!reg_val)
scontext->ext_status[sext->ext_idx] =
KVM_RISCV_SBI_EXT_UNAVAILABLE;
return 0;
}
static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long *reg_val)
{
unsigned long i;
const struct kvm_riscv_sbi_extension_entry *sext = NULL;
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
return -ENOENT;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
if (sbi_ext[i].ext_idx == reg_num) {
sext = &sbi_ext[i];
break;
}
}
if (!sext)
return -ENOENT;
/*
* If the extension status is still uninitialized, then we should probe
* to determine if it's available, but it may be too early to do that
* here. The best we can do is report that the extension has not been
* disabled, i.e. we return 1 when the extension is available and also
* when it only may be available.
*/
*reg_val = scontext->ext_status[sext->ext_idx] !=
KVM_RISCV_SBI_EXT_UNAVAILABLE;
return 0;
}
static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long reg_val, bool enable)
{
unsigned long i, ext_id;
if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
return -ENOENT;
for_each_set_bit(i, ®_val, BITS_PER_LONG) {
ext_id = i + reg_num * BITS_PER_LONG;
if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
break;
riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
}
return 0;
}
static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long *reg_val)
{
unsigned long i, ext_id, ext_val;
if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
return -ENOENT;
for (i = 0; i < BITS_PER_LONG; i++) {
ext_id = i + reg_num * BITS_PER_LONG;
if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
break;
ext_val = 0;
riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
if (ext_val)
*reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
}
return 0;
}
int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_SBI_EXT);
unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (vcpu->arch.ran_atleast_once)
return -EBUSY;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
switch (reg_subtype) {
case KVM_REG_RISCV_SBI_SINGLE:
return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
case KVM_REG_RISCV_SBI_MULTI_EN:
return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
case KVM_REG_RISCV_SBI_MULTI_DIS:
return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
default:
return -ENOENT;
}
return 0;
}
int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
int rc;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_SBI_EXT);
unsigned long reg_val, reg_subtype;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
reg_val = 0;
switch (reg_subtype) {
case KVM_REG_RISCV_SBI_SINGLE:
rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, ®_val);
break;
case KVM_REG_RISCV_SBI_MULTI_EN:
case KVM_REG_RISCV_SBI_MULTI_DIS:
rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, ®_val);
if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
reg_val = ~reg_val;
break;
default:
rc = -ENOENT;
}
if (rc)
return rc;
if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
struct kvm_vcpu *vcpu, unsigned long extid)
{
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
const struct kvm_riscv_sbi_extension_entry *entry;
const struct kvm_vcpu_sbi_extension *ext;
int i;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
entry = &sbi_ext[i];
ext = entry->ext_ptr;
if (ext->extid_start <= extid && ext->extid_end >= extid) {
if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
scontext->ext_status[entry->ext_idx] ==
KVM_RISCV_SBI_EXT_AVAILABLE)
return ext;
if (scontext->ext_status[entry->ext_idx] ==
KVM_RISCV_SBI_EXT_UNAVAILABLE)
return NULL;
if (ext->probe && !ext->probe(vcpu)) {
scontext->ext_status[entry->ext_idx] =
KVM_RISCV_SBI_EXT_UNAVAILABLE;
return NULL;
}
scontext->ext_status[entry->ext_idx] =
KVM_RISCV_SBI_EXT_AVAILABLE;
return ext;
}
}
return NULL;
}
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret = 1;
bool next_sepc = true;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
const struct kvm_vcpu_sbi_extension *sbi_ext;
struct kvm_cpu_trap utrap = {0};
struct kvm_vcpu_sbi_return sbi_ret = {
.out_val = 0,
.err_val = 0,
.utrap = &utrap,
};
bool ext_is_v01 = false;
sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
if (sbi_ext && sbi_ext->handler) {
#ifdef CONFIG_RISCV_SBI_V01
if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
ext_is_v01 = true;
#endif
ret = sbi_ext->handler(vcpu, run, &sbi_ret);
} else {
/* Return error for unsupported SBI calls */
cp->a0 = SBI_ERR_NOT_SUPPORTED;
goto ecall_done;
}
/*
* When the SBI extension returns a Linux error code, it exits the ioctl
* loop and forwards the error to userspace.
*/
if (ret < 0) {
next_sepc = false;
goto ecall_done;
}
/* Handle special error cases i.e trap, exit or userspace forward */
if (sbi_ret.utrap->scause) {
/* No need to increment sepc or exit ioctl loop */
ret = 1;
sbi_ret.utrap->sepc = cp->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
next_sepc = false;
goto ecall_done;
}
/* Exit ioctl loop or Propagate the error code the guest */
if (sbi_ret.uexit) {
next_sepc = false;
ret = 0;
} else {
cp->a0 = sbi_ret.err_val;
ret = 1;
}
ecall_done:
if (next_sepc)
cp->sepc += 4;
/* a1 should only be updated when we continue the ioctl loop */
if (!ext_is_v01 && ret == 1)
cp->a1 = sbi_ret.out_val;
return ret;
}
| linux-master | arch/riscv/kvm/vcpu_sbi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
*
* Authors:
* Atish Patra <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_timer.h>
#include <asm/kvm_vcpu_pmu.h>
#include <asm/kvm_vcpu_sbi.h>
static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
u64 next_cycle;
if (cp->a6 != SBI_EXT_TIME_SET_TIMER) {
retdata->err_val = SBI_ERR_INVALID_PARAM;
return 0;
}
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_SET_TIMER);
#if __riscv_xlen == 32
next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
#else
next_cycle = (u64)cp->a0;
#endif
kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
return 0;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
.extid_start = SBI_EXT_TIME,
.extid_end = SBI_EXT_TIME,
.handler = kvm_sbi_ext_time_handler,
};
static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
int ret = 0;
unsigned long i;
struct kvm_vcpu *tmp;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long hmask = cp->a0;
unsigned long hbase = cp->a1;
if (cp->a6 != SBI_EXT_IPI_SEND_IPI) {
retdata->err_val = SBI_ERR_INVALID_PARAM;
return 0;
}
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_IPI_SENT);
kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
if (hbase != -1UL) {
if (tmp->vcpu_id < hbase)
continue;
if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
continue;
}
ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
if (ret < 0)
break;
kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
}
return ret;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = {
.extid_start = SBI_EXT_IPI,
.extid_end = SBI_EXT_IPI,
.handler = kvm_sbi_ext_ipi_handler,
};
static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long hmask = cp->a0;
unsigned long hbase = cp->a1;
unsigned long funcid = cp->a6;
switch (funcid) {
case SBI_EXT_RFENCE_REMOTE_FENCE_I:
kvm_riscv_fence_i(vcpu->kvm, hbase, hmask);
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
if (cp->a2 == 0 && cp->a3 == 0)
kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
else
kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
cp->a2, cp->a3, PAGE_SHIFT);
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
if (cp->a2 == 0 && cp->a3 == 0)
kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
hbase, hmask, cp->a4);
else
kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
hbase, hmask,
cp->a2, cp->a3,
PAGE_SHIFT, cp->a4);
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT);
break;
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
/*
* Until nested virtualization is implemented, the
* SBI HFENCE calls should be treated as NOPs
*/
break;
default:
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
}
return 0;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
.extid_start = SBI_EXT_RFENCE,
.extid_end = SBI_EXT_RFENCE,
.handler = kvm_sbi_ext_rfence_handler,
};
static int kvm_sbi_ext_srst_handler(struct kvm_vcpu *vcpu,
struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long funcid = cp->a6;
u32 reason = cp->a1;
u32 type = cp->a0;
switch (funcid) {
case SBI_EXT_SRST_RESET:
switch (type) {
case SBI_SRST_RESET_TYPE_SHUTDOWN:
kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
KVM_SYSTEM_EVENT_SHUTDOWN,
reason);
retdata->uexit = true;
break;
case SBI_SRST_RESET_TYPE_COLD_REBOOT:
case SBI_SRST_RESET_TYPE_WARM_REBOOT:
kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
KVM_SYSTEM_EVENT_RESET,
reason);
retdata->uexit = true;
break;
default:
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
}
break;
default:
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
}
return 0;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = {
.extid_start = SBI_EXT_SRST,
.extid_end = SBI_EXT_SRST,
.handler = kvm_sbi_ext_srst_handler,
};
| linux-master | arch/riscv/kvm/vcpu_sbi_replace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
static unsigned long vmid_version = 1;
static unsigned long vmid_next;
static unsigned long vmid_bits __ro_after_init;
static DEFINE_SPINLOCK(vmid_lock);
void __init kvm_riscv_gstage_vmid_detect(void)
{
unsigned long old;
/* Figure-out number of VMID bits in HW */
old = csr_read(CSR_HGATP);
csr_write(CSR_HGATP, old | HGATP_VMID);
vmid_bits = csr_read(CSR_HGATP);
vmid_bits = (vmid_bits & HGATP_VMID) >> HGATP_VMID_SHIFT;
vmid_bits = fls_long(vmid_bits);
csr_write(CSR_HGATP, old);
/* We polluted local TLB so flush all guest TLB */
kvm_riscv_local_hfence_gvma_all();
/* We don't use VMID bits if they are not sufficient */
if ((1UL << vmid_bits) < num_possible_cpus())
vmid_bits = 0;
}
unsigned long kvm_riscv_gstage_vmid_bits(void)
{
return vmid_bits;
}
int kvm_riscv_gstage_vmid_init(struct kvm *kvm)
{
/* Mark the initial VMID and VMID version invalid */
kvm->arch.vmid.vmid_version = 0;
kvm->arch.vmid.vmid = 0;
return 0;
}
bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid)
{
if (!vmid_bits)
return false;
return unlikely(READ_ONCE(vmid->vmid_version) !=
READ_ONCE(vmid_version));
}
static void __local_hfence_gvma_all(void *info)
{
kvm_riscv_local_hfence_gvma_all();
}
void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu)
{
unsigned long i;
struct kvm_vcpu *v;
struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid;
if (!kvm_riscv_gstage_vmid_ver_changed(vmid))
return;
spin_lock(&vmid_lock);
/*
* We need to re-check the vmid_version here to ensure that if
* another vcpu already allocated a valid vmid for this vm.
*/
if (!kvm_riscv_gstage_vmid_ver_changed(vmid)) {
spin_unlock(&vmid_lock);
return;
}
/* First user of a new VMID version? */
if (unlikely(vmid_next == 0)) {
WRITE_ONCE(vmid_version, READ_ONCE(vmid_version) + 1);
vmid_next = 1;
/*
* We ran out of VMIDs so we increment vmid_version and
* start assigning VMIDs from 1.
*
* This also means existing VMIDs assignment to all Guest
* instances is invalid and we have force VMID re-assignement
* for all Guest instances. The Guest instances that were not
* running will automatically pick-up new VMIDs because will
* call kvm_riscv_gstage_vmid_update() whenever they enter
* in-kernel run loop. For Guest instances that are already
* running, we force VM exits on all host CPUs using IPI and
* flush all Guest TLBs.
*/
on_each_cpu_mask(cpu_online_mask, __local_hfence_gvma_all,
NULL, 1);
}
vmid->vmid = vmid_next;
vmid_next++;
vmid_next &= (1 << vmid_bits) - 1;
WRITE_ONCE(vmid->vmid_version, READ_ONCE(vmid_version));
spin_unlock(&vmid_lock);
/* Request G-stage page table update for all VCPUs */
kvm_for_each_vcpu(i, v, vcpu->kvm)
kvm_make_request(KVM_REQ_UPDATE_HGATP, v);
}
| linux-master | arch/riscv/kvm/vmid.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Atish Patra <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <clocksource/timer-riscv.h>
#include <asm/csr.h>
#include <asm/delay.h>
#include <asm/kvm_vcpu_timer.h>
static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
{
return get_cycles64() + gt->time_delta;
}
static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
struct kvm_guest_timer *gt,
struct kvm_vcpu_timer *t)
{
unsigned long flags;
u64 cycles_now, cycles_delta, delta_ns;
local_irq_save(flags);
cycles_now = kvm_riscv_current_cycles(gt);
if (cycles_now < cycles)
cycles_delta = cycles - cycles_now;
else
cycles_delta = 0;
delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift;
local_irq_restore(flags);
return delta_ns;
}
static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h)
{
u64 delta_ns;
struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
return HRTIMER_RESTART;
}
t->next_set = false;
kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER);
return HRTIMER_NORESTART;
}
static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
{
if (!t->init_done || !t->next_set)
return -EINVAL;
hrtimer_cancel(&t->hrt);
t->next_set = false;
return 0;
}
static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
{
#if defined(CONFIG_32BIT)
csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF);
csr_write(CSR_VSTIMECMPH, ncycles >> 32);
#else
csr_write(CSR_VSTIMECMP, ncycles);
#endif
return 0;
}
static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
u64 delta_ns;
if (!t->init_done)
return -EINVAL;
kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER);
delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t);
t->next_cycles = ncycles;
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
t->next_set = true;
return 0;
}
int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
return t->timer_next_event(vcpu, ncycles);
}
static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h)
{
u64 delta_ns;
struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
return HRTIMER_RESTART;
}
t->next_set = false;
kvm_vcpu_kick(vcpu);
return HRTIMER_NORESTART;
}
bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) ||
kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER))
return true;
else
return false;
}
static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
u64 delta_ns;
if (!t->init_done)
return;
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
t->next_set = true;
}
static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
{
kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
}
int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_TIMER);
u64 reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
return -ENOENT;
switch (reg_num) {
case KVM_REG_RISCV_TIMER_REG(frequency):
reg_val = riscv_timebase;
break;
case KVM_REG_RISCV_TIMER_REG(time):
reg_val = kvm_riscv_current_cycles(gt);
break;
case KVM_REG_RISCV_TIMER_REG(compare):
reg_val = t->next_cycles;
break;
case KVM_REG_RISCV_TIMER_REG(state):
reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON :
KVM_RISCV_TIMER_STATE_OFF;
break;
default:
return -ENOENT;
}
if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_TIMER);
u64 reg_val;
int ret = 0;
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
return -ENOENT;
if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
switch (reg_num) {
case KVM_REG_RISCV_TIMER_REG(frequency):
if (reg_val != riscv_timebase)
return -EINVAL;
break;
case KVM_REG_RISCV_TIMER_REG(time):
gt->time_delta = reg_val - get_cycles64();
break;
case KVM_REG_RISCV_TIMER_REG(compare):
t->next_cycles = reg_val;
break;
case KVM_REG_RISCV_TIMER_REG(state):
if (reg_val == KVM_RISCV_TIMER_STATE_ON)
ret = kvm_riscv_vcpu_timer_next_event(vcpu, reg_val);
else
ret = kvm_riscv_vcpu_timer_cancel(t);
break;
default:
ret = -ENOENT;
break;
}
return ret;
}
int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
if (t->init_done)
return -EINVAL;
hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
t->init_done = true;
t->next_set = false;
/* Enable sstc for every vcpu if available in hardware */
if (riscv_isa_extension_available(NULL, SSTC)) {
t->sstc_enabled = true;
t->hrt.function = kvm_riscv_vcpu_vstimer_expired;
t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp;
} else {
t->sstc_enabled = false;
t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
t->timer_next_event = kvm_riscv_vcpu_update_hrtimer;
}
return 0;
}
int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
{
int ret;
ret = kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
vcpu->arch.timer.init_done = false;
return ret;
}
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
t->next_cycles = -1ULL;
return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
}
static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
{
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
#if defined(CONFIG_32BIT)
csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
#else
csr_write(CSR_HTIMEDELTA, gt->time_delta);
#endif
}
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
kvm_riscv_vcpu_update_timedelta(vcpu);
if (!t->sstc_enabled)
return;
#if defined(CONFIG_32BIT)
csr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
#else
csr_write(CSR_VSTIMECMP, t->next_cycles);
#endif
/* timer should be enabled for the remaining operations */
if (unlikely(!t->init_done))
return;
kvm_riscv_vcpu_timer_unblocking(vcpu);
}
void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
if (!t->sstc_enabled)
return;
#if defined(CONFIG_32BIT)
t->next_cycles = csr_read(CSR_VSTIMECMP);
t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
#else
t->next_cycles = csr_read(CSR_VSTIMECMP);
#endif
}
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
if (!t->sstc_enabled)
return;
/*
* The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
* upon every VM exit so no need to save here.
*/
/* timer should be enabled for the remaining operations */
if (unlikely(!t->init_done))
return;
if (kvm_vcpu_is_blocking(vcpu))
kvm_riscv_vcpu_timer_blocking(vcpu);
}
void kvm_riscv_guest_timer_init(struct kvm *kvm)
{
struct kvm_guest_timer *gt = &kvm->arch.timer;
riscv_cs_get_mult_shift(>->nsec_mult, >->nsec_shift);
gt->time_delta = -get_cycles64();
}
| linux-master | arch/riscv/kvm/vcpu_timer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
* Copyright (C) 2022 Ventana Micro Systems Inc.
*
* Authors:
* Anup Patel <[email protected]>
*/
#include <linux/kvm_host.h>
#include <linux/math.h>
#include <linux/spinlock.h>
#include <linux/swab.h>
#include <kvm/iodev.h>
#include <asm/kvm_aia_aplic.h>
struct aplic_irq {
raw_spinlock_t lock;
u32 sourcecfg;
u32 state;
#define APLIC_IRQ_STATE_PENDING BIT(0)
#define APLIC_IRQ_STATE_ENABLED BIT(1)
#define APLIC_IRQ_STATE_ENPEND (APLIC_IRQ_STATE_PENDING | \
APLIC_IRQ_STATE_ENABLED)
#define APLIC_IRQ_STATE_INPUT BIT(8)
u32 target;
};
struct aplic {
struct kvm_io_device iodev;
u32 domaincfg;
u32 genmsi;
u32 nr_irqs;
u32 nr_words;
struct aplic_irq *irqs;
};
static u32 aplic_read_sourcecfg(struct aplic *aplic, u32 irq)
{
u32 ret;
unsigned long flags;
struct aplic_irq *irqd;
if (!irq || aplic->nr_irqs <= irq)
return 0;
irqd = &aplic->irqs[irq];
raw_spin_lock_irqsave(&irqd->lock, flags);
ret = irqd->sourcecfg;
raw_spin_unlock_irqrestore(&irqd->lock, flags);
return ret;
}
static void aplic_write_sourcecfg(struct aplic *aplic, u32 irq, u32 val)
{
unsigned long flags;
struct aplic_irq *irqd;
if (!irq || aplic->nr_irqs <= irq)
return;
irqd = &aplic->irqs[irq];
if (val & APLIC_SOURCECFG_D)
val = 0;
else
val &= APLIC_SOURCECFG_SM_MASK;
raw_spin_lock_irqsave(&irqd->lock, flags);
irqd->sourcecfg = val;
raw_spin_unlock_irqrestore(&irqd->lock, flags);
}
static u32 aplic_read_target(struct aplic *aplic, u32 irq)
{
u32 ret;
unsigned long flags;
struct aplic_irq *irqd;
if (!irq || aplic->nr_irqs <= irq)
return 0;
irqd = &aplic->irqs[irq];
raw_spin_lock_irqsave(&irqd->lock, flags);
ret = irqd->target;
raw_spin_unlock_irqrestore(&irqd->lock, flags);
return ret;
}
static void aplic_write_target(struct aplic *aplic, u32 irq, u32 val)
{
unsigned long flags;
struct aplic_irq *irqd;
if (!irq || aplic->nr_irqs <= irq)
return;
irqd = &aplic->irqs[irq];
val &= APLIC_TARGET_EIID_MASK |
(APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) |
(APLIC_TARGET_GUEST_IDX_MASK << APLIC_TARGET_GUEST_IDX_SHIFT);
raw_spin_lock_irqsave(&irqd->lock, flags);
irqd->target = val;
raw_spin_unlock_irqrestore(&irqd->lock, flags);
}
static bool aplic_read_pending(struct aplic *aplic, u32 irq)
{
bool ret;
unsigned long flags;
struct aplic_irq *irqd;
if (!irq || aplic->nr_irqs <= irq)
return false;
irqd = &aplic->irqs[irq];
raw_spin_lock_irqsave(&irqd->lock, flags);
ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false;
raw_spin_unlock_irqrestore(&irqd->lock, flags);
return ret;
}
static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
{
unsigned long flags, sm;
struct aplic_irq *irqd;
if (!irq || aplic->nr_irqs <= irq)
return;
irqd = &aplic->irqs[irq];
raw_spin_lock_irqsave(&irqd->lock, flags);
sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
if (!pending &&
((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
(sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))
goto skip_write_pending;
if (pending)
irqd->state |= APLIC_IRQ_STATE_PENDING;
else
irqd->state &= ~APLIC_IRQ_STATE_PENDING;
skip_write_pending:
raw_spin_unlock_irqrestore(&irqd->lock, flags);
}
static bool aplic_read_enabled(struct aplic *aplic, u32 irq)
{
bool ret;
unsigned long flags;
struct aplic_irq *irqd;
if (!irq || aplic->nr_irqs <= irq)
return false;
irqd = &aplic->irqs[irq];
raw_spin_lock_irqsave(&irqd->lock, flags);
ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false;
raw_spin_unlock_irqrestore(&irqd->lock, flags);
return ret;
}
static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
{
unsigned long flags;
struct aplic_irq *irqd;
if (!irq || aplic->nr_irqs <= irq)
return;
irqd = &aplic->irqs[irq];
raw_spin_lock_irqsave(&irqd->lock, flags);
if (enabled)
irqd->state |= APLIC_IRQ_STATE_ENABLED;
else
irqd->state &= ~APLIC_IRQ_STATE_ENABLED;
raw_spin_unlock_irqrestore(&irqd->lock, flags);
}
static bool aplic_read_input(struct aplic *aplic, u32 irq)
{
bool ret;
unsigned long flags;
struct aplic_irq *irqd;
if (!irq || aplic->nr_irqs <= irq)
return false;
irqd = &aplic->irqs[irq];
raw_spin_lock_irqsave(&irqd->lock, flags);
ret = (irqd->state & APLIC_IRQ_STATE_INPUT) ? true : false;
raw_spin_unlock_irqrestore(&irqd->lock, flags);
return ret;
}
static void aplic_inject_msi(struct kvm *kvm, u32 irq, u32 target)
{
u32 hart_idx, guest_idx, eiid;
hart_idx = target >> APLIC_TARGET_HART_IDX_SHIFT;
hart_idx &= APLIC_TARGET_HART_IDX_MASK;
guest_idx = target >> APLIC_TARGET_GUEST_IDX_SHIFT;
guest_idx &= APLIC_TARGET_GUEST_IDX_MASK;
eiid = target & APLIC_TARGET_EIID_MASK;
kvm_riscv_aia_inject_msi_by_id(kvm, hart_idx, guest_idx, eiid);
}
static void aplic_update_irq_range(struct kvm *kvm, u32 first, u32 last)
{
bool inject;
u32 irq, target;
unsigned long flags;
struct aplic_irq *irqd;
struct aplic *aplic = kvm->arch.aia.aplic_state;
if (!(aplic->domaincfg & APLIC_DOMAINCFG_IE))
return;
for (irq = first; irq <= last; irq++) {
if (!irq || aplic->nr_irqs <= irq)
continue;
irqd = &aplic->irqs[irq];
raw_spin_lock_irqsave(&irqd->lock, flags);
inject = false;
target = irqd->target;
if ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
APLIC_IRQ_STATE_ENPEND) {
irqd->state &= ~APLIC_IRQ_STATE_PENDING;
inject = true;
}
raw_spin_unlock_irqrestore(&irqd->lock, flags);
if (inject)
aplic_inject_msi(kvm, irq, target);
}
}
int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level)
{
u32 target;
bool inject = false, ie;
unsigned long flags;
struct aplic_irq *irqd;
struct aplic *aplic = kvm->arch.aia.aplic_state;
if (!aplic || !source || (aplic->nr_irqs <= source))
return -ENODEV;
irqd = &aplic->irqs[source];
ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false;
raw_spin_lock_irqsave(&irqd->lock, flags);
if (irqd->sourcecfg & APLIC_SOURCECFG_D)
goto skip_unlock;
switch (irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK) {
case APLIC_SOURCECFG_SM_EDGE_RISE:
if (level && !(irqd->state & APLIC_IRQ_STATE_INPUT) &&
!(irqd->state & APLIC_IRQ_STATE_PENDING))
irqd->state |= APLIC_IRQ_STATE_PENDING;
break;
case APLIC_SOURCECFG_SM_EDGE_FALL:
if (!level && (irqd->state & APLIC_IRQ_STATE_INPUT) &&
!(irqd->state & APLIC_IRQ_STATE_PENDING))
irqd->state |= APLIC_IRQ_STATE_PENDING;
break;
case APLIC_SOURCECFG_SM_LEVEL_HIGH:
if (level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
irqd->state |= APLIC_IRQ_STATE_PENDING;
break;
case APLIC_SOURCECFG_SM_LEVEL_LOW:
if (!level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
irqd->state |= APLIC_IRQ_STATE_PENDING;
break;
}
if (level)
irqd->state |= APLIC_IRQ_STATE_INPUT;
else
irqd->state &= ~APLIC_IRQ_STATE_INPUT;
target = irqd->target;
if (ie && ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
APLIC_IRQ_STATE_ENPEND)) {
irqd->state &= ~APLIC_IRQ_STATE_PENDING;
inject = true;
}
skip_unlock:
raw_spin_unlock_irqrestore(&irqd->lock, flags);
if (inject)
aplic_inject_msi(kvm, source, target);
return 0;
}
static u32 aplic_read_input_word(struct aplic *aplic, u32 word)
{
u32 i, ret = 0;
for (i = 0; i < 32; i++)
ret |= aplic_read_input(aplic, word * 32 + i) ? BIT(i) : 0;
return ret;
}
static u32 aplic_read_pending_word(struct aplic *aplic, u32 word)
{
u32 i, ret = 0;
for (i = 0; i < 32; i++)
ret |= aplic_read_pending(aplic, word * 32 + i) ? BIT(i) : 0;
return ret;
}
static void aplic_write_pending_word(struct aplic *aplic, u32 word,
u32 val, bool pending)
{
u32 i;
for (i = 0; i < 32; i++) {
if (val & BIT(i))
aplic_write_pending(aplic, word * 32 + i, pending);
}
}
static u32 aplic_read_enabled_word(struct aplic *aplic, u32 word)
{
u32 i, ret = 0;
for (i = 0; i < 32; i++)
ret |= aplic_read_enabled(aplic, word * 32 + i) ? BIT(i) : 0;
return ret;
}
static void aplic_write_enabled_word(struct aplic *aplic, u32 word,
u32 val, bool enabled)
{
u32 i;
for (i = 0; i < 32; i++) {
if (val & BIT(i))
aplic_write_enabled(aplic, word * 32 + i, enabled);
}
}
static int aplic_mmio_read_offset(struct kvm *kvm, gpa_t off, u32 *val32)
{
u32 i;
struct aplic *aplic = kvm->arch.aia.aplic_state;
if ((off & 0x3) != 0)
return -EOPNOTSUPP;
if (off == APLIC_DOMAINCFG) {
*val32 = APLIC_DOMAINCFG_RDONLY |
aplic->domaincfg | APLIC_DOMAINCFG_DM;
} else if ((off >= APLIC_SOURCECFG_BASE) &&
(off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
*val32 = aplic_read_sourcecfg(aplic, i);
} else if ((off >= APLIC_SETIP_BASE) &&
(off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
i = (off - APLIC_SETIP_BASE) >> 2;
*val32 = aplic_read_pending_word(aplic, i);
} else if (off == APLIC_SETIPNUM) {
*val32 = 0;
} else if ((off >= APLIC_CLRIP_BASE) &&
(off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
i = (off - APLIC_CLRIP_BASE) >> 2;
*val32 = aplic_read_input_word(aplic, i);
} else if (off == APLIC_CLRIPNUM) {
*val32 = 0;
} else if ((off >= APLIC_SETIE_BASE) &&
(off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
i = (off - APLIC_SETIE_BASE) >> 2;
*val32 = aplic_read_enabled_word(aplic, i);
} else if (off == APLIC_SETIENUM) {
*val32 = 0;
} else if ((off >= APLIC_CLRIE_BASE) &&
(off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
*val32 = 0;
} else if (off == APLIC_CLRIENUM) {
*val32 = 0;
} else if (off == APLIC_SETIPNUM_LE) {
*val32 = 0;
} else if (off == APLIC_SETIPNUM_BE) {
*val32 = 0;
} else if (off == APLIC_GENMSI) {
*val32 = aplic->genmsi;
} else if ((off >= APLIC_TARGET_BASE) &&
(off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
*val32 = aplic_read_target(aplic, i);
} else
return -ENODEV;
return 0;
}
static int aplic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, void *val)
{
if (len != 4)
return -EOPNOTSUPP;
return aplic_mmio_read_offset(vcpu->kvm,
addr - vcpu->kvm->arch.aia.aplic_addr,
val);
}
static int aplic_mmio_write_offset(struct kvm *kvm, gpa_t off, u32 val32)
{
u32 i;
struct aplic *aplic = kvm->arch.aia.aplic_state;
if ((off & 0x3) != 0)
return -EOPNOTSUPP;
if (off == APLIC_DOMAINCFG) {
/* Only IE bit writeable */
aplic->domaincfg = val32 & APLIC_DOMAINCFG_IE;
} else if ((off >= APLIC_SOURCECFG_BASE) &&
(off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
aplic_write_sourcecfg(aplic, i, val32);
} else if ((off >= APLIC_SETIP_BASE) &&
(off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
i = (off - APLIC_SETIP_BASE) >> 2;
aplic_write_pending_word(aplic, i, val32, true);
} else if (off == APLIC_SETIPNUM) {
aplic_write_pending(aplic, val32, true);
} else if ((off >= APLIC_CLRIP_BASE) &&
(off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
i = (off - APLIC_CLRIP_BASE) >> 2;
aplic_write_pending_word(aplic, i, val32, false);
} else if (off == APLIC_CLRIPNUM) {
aplic_write_pending(aplic, val32, false);
} else if ((off >= APLIC_SETIE_BASE) &&
(off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
i = (off - APLIC_SETIE_BASE) >> 2;
aplic_write_enabled_word(aplic, i, val32, true);
} else if (off == APLIC_SETIENUM) {
aplic_write_enabled(aplic, val32, true);
} else if ((off >= APLIC_CLRIE_BASE) &&
(off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
i = (off - APLIC_CLRIE_BASE) >> 2;
aplic_write_enabled_word(aplic, i, val32, false);
} else if (off == APLIC_CLRIENUM) {
aplic_write_enabled(aplic, val32, false);
} else if (off == APLIC_SETIPNUM_LE) {
aplic_write_pending(aplic, val32, true);
} else if (off == APLIC_SETIPNUM_BE) {
aplic_write_pending(aplic, __swab32(val32), true);
} else if (off == APLIC_GENMSI) {
aplic->genmsi = val32 & ~(APLIC_TARGET_GUEST_IDX_MASK <<
APLIC_TARGET_GUEST_IDX_SHIFT);
kvm_riscv_aia_inject_msi_by_id(kvm,
val32 >> APLIC_TARGET_HART_IDX_SHIFT, 0,
val32 & APLIC_TARGET_EIID_MASK);
} else if ((off >= APLIC_TARGET_BASE) &&
(off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
aplic_write_target(aplic, i, val32);
} else
return -ENODEV;
aplic_update_irq_range(kvm, 1, aplic->nr_irqs - 1);
return 0;
}
static int aplic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
gpa_t addr, int len, const void *val)
{
if (len != 4)
return -EOPNOTSUPP;
return aplic_mmio_write_offset(vcpu->kvm,
addr - vcpu->kvm->arch.aia.aplic_addr,
*((const u32 *)val));
}
static struct kvm_io_device_ops aplic_iodoev_ops = {
.read = aplic_mmio_read,
.write = aplic_mmio_write,
};
int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v)
{
int rc;
if (!kvm->arch.aia.aplic_state)
return -ENODEV;
rc = aplic_mmio_write_offset(kvm, type, v);
if (rc)
return rc;
return 0;
}
int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v)
{
int rc;
if (!kvm->arch.aia.aplic_state)
return -ENODEV;
rc = aplic_mmio_read_offset(kvm, type, v);
if (rc)
return rc;
return 0;
}
int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type)
{
int rc;
u32 val;
if (!kvm->arch.aia.aplic_state)
return -ENODEV;
rc = aplic_mmio_read_offset(kvm, type, &val);
if (rc)
return rc;
return 0;
}
int kvm_riscv_aia_aplic_init(struct kvm *kvm)
{
int i, ret = 0;
struct aplic *aplic;
/* Do nothing if we have zero sources */
if (!kvm->arch.aia.nr_sources)
return 0;
/* Allocate APLIC global state */
aplic = kzalloc(sizeof(*aplic), GFP_KERNEL);
if (!aplic)
return -ENOMEM;
kvm->arch.aia.aplic_state = aplic;
/* Setup APLIC IRQs */
aplic->nr_irqs = kvm->arch.aia.nr_sources + 1;
aplic->nr_words = DIV_ROUND_UP(aplic->nr_irqs, 32);
aplic->irqs = kcalloc(aplic->nr_irqs,
sizeof(*aplic->irqs), GFP_KERNEL);
if (!aplic->irqs) {
ret = -ENOMEM;
goto fail_free_aplic;
}
for (i = 0; i < aplic->nr_irqs; i++)
raw_spin_lock_init(&aplic->irqs[i].lock);
/* Setup IO device */
kvm_iodevice_init(&aplic->iodev, &aplic_iodoev_ops);
mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
kvm->arch.aia.aplic_addr,
KVM_DEV_RISCV_APLIC_SIZE,
&aplic->iodev);
mutex_unlock(&kvm->slots_lock);
if (ret)
goto fail_free_aplic_irqs;
/* Setup default IRQ routing */
ret = kvm_riscv_setup_default_irq_routing(kvm, aplic->nr_irqs);
if (ret)
goto fail_unreg_iodev;
return 0;
fail_unreg_iodev:
mutex_lock(&kvm->slots_lock);
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
mutex_unlock(&kvm->slots_lock);
fail_free_aplic_irqs:
kfree(aplic->irqs);
fail_free_aplic:
kvm->arch.aia.aplic_state = NULL;
kfree(aplic);
return ret;
}
void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm)
{
struct aplic *aplic = kvm->arch.aia.aplic_state;
if (!aplic)
return;
mutex_lock(&kvm->slots_lock);
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
mutex_unlock(&kvm->slots_lock);
kfree(aplic->irqs);
kvm->arch.aia.aplic_state = NULL;
kfree(aplic);
}
| linux-master | arch/riscv/kvm/aia_aplic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#include <linux/bitmap.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/kvm_host.h>
#include <asm/cacheflush.h>
#include <asm/csr.h>
#include <asm/hwcap.h>
#include <asm/insn-def.h>
#define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
gpa_t gpa, gpa_t gpsz,
unsigned long order)
{
gpa_t pos;
if (PTRS_PER_PTE < (gpsz >> order)) {
kvm_riscv_local_hfence_gvma_vmid_all(vmid);
return;
}
if (has_svinval()) {
asm volatile (SFENCE_W_INVAL() ::: "memory");
for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
asm volatile (HINVAL_GVMA(%0, %1)
: : "r" (pos >> 2), "r" (vmid) : "memory");
asm volatile (SFENCE_INVAL_IR() ::: "memory");
} else {
for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
asm volatile (HFENCE_GVMA(%0, %1)
: : "r" (pos >> 2), "r" (vmid) : "memory");
}
}
void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid)
{
asm volatile(HFENCE_GVMA(zero, %0) : : "r" (vmid) : "memory");
}
void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
unsigned long order)
{
gpa_t pos;
if (PTRS_PER_PTE < (gpsz >> order)) {
kvm_riscv_local_hfence_gvma_all();
return;
}
if (has_svinval()) {
asm volatile (SFENCE_W_INVAL() ::: "memory");
for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
asm volatile(HINVAL_GVMA(%0, zero)
: : "r" (pos >> 2) : "memory");
asm volatile (SFENCE_INVAL_IR() ::: "memory");
} else {
for (pos = gpa; pos < (gpa + gpsz); pos += BIT(order))
asm volatile(HFENCE_GVMA(%0, zero)
: : "r" (pos >> 2) : "memory");
}
}
void kvm_riscv_local_hfence_gvma_all(void)
{
asm volatile(HFENCE_GVMA(zero, zero) : : : "memory");
}
void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
unsigned long asid,
unsigned long gva,
unsigned long gvsz,
unsigned long order)
{
unsigned long pos, hgatp;
if (PTRS_PER_PTE < (gvsz >> order)) {
kvm_riscv_local_hfence_vvma_asid_all(vmid, asid);
return;
}
hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
if (has_svinval()) {
asm volatile (SFENCE_W_INVAL() ::: "memory");
for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
asm volatile(HINVAL_VVMA(%0, %1)
: : "r" (pos), "r" (asid) : "memory");
asm volatile (SFENCE_INVAL_IR() ::: "memory");
} else {
for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
asm volatile(HFENCE_VVMA(%0, %1)
: : "r" (pos), "r" (asid) : "memory");
}
csr_write(CSR_HGATP, hgatp);
}
void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
unsigned long asid)
{
unsigned long hgatp;
hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
asm volatile(HFENCE_VVMA(zero, %0) : : "r" (asid) : "memory");
csr_write(CSR_HGATP, hgatp);
}
void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
unsigned long gva, unsigned long gvsz,
unsigned long order)
{
unsigned long pos, hgatp;
if (PTRS_PER_PTE < (gvsz >> order)) {
kvm_riscv_local_hfence_vvma_all(vmid);
return;
}
hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
if (has_svinval()) {
asm volatile (SFENCE_W_INVAL() ::: "memory");
for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
asm volatile(HINVAL_VVMA(%0, zero)
: : "r" (pos) : "memory");
asm volatile (SFENCE_INVAL_IR() ::: "memory");
} else {
for (pos = gva; pos < (gva + gvsz); pos += BIT(order))
asm volatile(HFENCE_VVMA(%0, zero)
: : "r" (pos) : "memory");
}
csr_write(CSR_HGATP, hgatp);
}
void kvm_riscv_local_hfence_vvma_all(unsigned long vmid)
{
unsigned long hgatp;
hgatp = csr_swap(CSR_HGATP, vmid << HGATP_VMID_SHIFT);
asm volatile(HFENCE_VVMA(zero, zero) : : : "memory");
csr_write(CSR_HGATP, hgatp);
}
void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu)
{
unsigned long vmid;
if (!kvm_riscv_gstage_vmid_bits() ||
vcpu->arch.last_exit_cpu == vcpu->cpu)
return;
/*
* On RISC-V platforms with hardware VMID support, we share same
* VMID for all VCPUs of a particular Guest/VM. This means we might
* have stale G-stage TLB entries on the current Host CPU due to
* some other VCPU of the same Guest which ran previously on the
* current Host CPU.
*
* To cleanup stale TLB entries, we simply flush all G-stage TLB
* entries by VMID whenever underlying Host CPU changes for a VCPU.
*/
vmid = READ_ONCE(vcpu->kvm->arch.vmid.vmid);
kvm_riscv_local_hfence_gvma_vmid_all(vmid);
}
void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
{
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);
local_flush_icache_all();
}
void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
{
struct kvm_vmid *vmid;
vmid = &vcpu->kvm->arch.vmid;
kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid));
}
void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
{
struct kvm_vmid *vmid;
vmid = &vcpu->kvm->arch.vmid;
kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid));
}
static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
struct kvm_riscv_hfence *out_data)
{
bool ret = false;
struct kvm_vcpu_arch *varch = &vcpu->arch;
spin_lock(&varch->hfence_lock);
if (varch->hfence_queue[varch->hfence_head].type) {
memcpy(out_data, &varch->hfence_queue[varch->hfence_head],
sizeof(*out_data));
varch->hfence_queue[varch->hfence_head].type = 0;
varch->hfence_head++;
if (varch->hfence_head == KVM_RISCV_VCPU_MAX_HFENCE)
varch->hfence_head = 0;
ret = true;
}
spin_unlock(&varch->hfence_lock);
return ret;
}
static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
const struct kvm_riscv_hfence *data)
{
bool ret = false;
struct kvm_vcpu_arch *varch = &vcpu->arch;
spin_lock(&varch->hfence_lock);
if (!varch->hfence_queue[varch->hfence_tail].type) {
memcpy(&varch->hfence_queue[varch->hfence_tail],
data, sizeof(*data));
varch->hfence_tail++;
if (varch->hfence_tail == KVM_RISCV_VCPU_MAX_HFENCE)
varch->hfence_tail = 0;
ret = true;
}
spin_unlock(&varch->hfence_lock);
return ret;
}
void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
{
struct kvm_riscv_hfence d = { 0 };
struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
while (vcpu_hfence_dequeue(vcpu, &d)) {
switch (d.type) {
case KVM_RISCV_HFENCE_UNKNOWN:
break;
case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
kvm_riscv_local_hfence_gvma_vmid_gpa(
READ_ONCE(v->vmid),
d.addr, d.size, d.order);
break;
case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
kvm_riscv_local_hfence_vvma_asid_gva(
READ_ONCE(v->vmid), d.asid,
d.addr, d.size, d.order);
break;
case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
kvm_riscv_local_hfence_vvma_asid_all(
READ_ONCE(v->vmid), d.asid);
break;
case KVM_RISCV_HFENCE_VVMA_GVA:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
kvm_riscv_local_hfence_vvma_gva(
READ_ONCE(v->vmid),
d.addr, d.size, d.order);
break;
default:
break;
}
}
}
static void make_xfence_request(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
unsigned int req, unsigned int fallback_req,
const struct kvm_riscv_hfence *data)
{
unsigned long i;
struct kvm_vcpu *vcpu;
unsigned int actual_req = req;
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
kvm_for_each_vcpu(i, vcpu, kvm) {
if (hbase != -1UL) {
if (vcpu->vcpu_id < hbase)
continue;
if (!(hmask & (1UL << (vcpu->vcpu_id - hbase))))
continue;
}
bitmap_set(vcpu_mask, i, 1);
if (!data || !data->type)
continue;
/*
* Enqueue hfence data to VCPU hfence queue. If we don't
* have space in the VCPU hfence queue then fallback to
* a more conservative hfence request.
*/
if (!vcpu_hfence_enqueue(vcpu, data))
actual_req = fallback_req;
}
kvm_make_vcpus_request_mask(kvm, actual_req, vcpu_mask);
}
void kvm_riscv_fence_i(struct kvm *kvm,
unsigned long hbase, unsigned long hmask)
{
make_xfence_request(kvm, hbase, hmask, KVM_REQ_FENCE_I,
KVM_REQ_FENCE_I, NULL);
}
void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
gpa_t gpa, gpa_t gpsz,
unsigned long order)
{
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_GVMA_VMID_GPA;
data.asid = 0;
data.addr = gpa;
data.size = gpsz;
data.order = order;
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
KVM_REQ_HFENCE_GVMA_VMID_ALL, &data);
}
void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
unsigned long hbase, unsigned long hmask)
{
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_GVMA_VMID_ALL,
KVM_REQ_HFENCE_GVMA_VMID_ALL, NULL);
}
void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
unsigned long gva, unsigned long gvsz,
unsigned long order, unsigned long asid)
{
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_VVMA_ASID_GVA;
data.asid = asid;
data.addr = gva;
data.size = gvsz;
data.order = order;
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
KVM_REQ_HFENCE_VVMA_ALL, &data);
}
void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
unsigned long asid)
{
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_VVMA_ASID_ALL;
data.asid = asid;
data.addr = data.size = data.order = 0;
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
KVM_REQ_HFENCE_VVMA_ALL, &data);
}
void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
unsigned long hbase, unsigned long hmask,
unsigned long gva, unsigned long gvsz,
unsigned long order)
{
struct kvm_riscv_hfence data;
data.type = KVM_RISCV_HFENCE_VVMA_GVA;
data.asid = 0;
data.addr = gva;
data.size = gvsz;
data.order = order;
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE,
KVM_REQ_HFENCE_VVMA_ALL, &data);
}
void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
unsigned long hbase, unsigned long hmask)
{
make_xfence_request(kvm, hbase, hmask, KVM_REQ_HFENCE_VVMA_ALL,
KVM_REQ_HFENCE_VVMA_ALL, NULL);
}
| linux-master | arch/riscv/kvm/tlb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
*
* Authors:
* Atish Patra <[email protected]>
* Anup Patel <[email protected]>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <asm/hwcap.h>
#ifdef CONFIG_FPU
void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
cntx->sstatus &= ~SR_FS;
if (riscv_isa_extension_available(vcpu->arch.isa, f) ||
riscv_isa_extension_available(vcpu->arch.isa, d))
cntx->sstatus |= SR_FS_INITIAL;
else
cntx->sstatus |= SR_FS_OFF;
}
static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
{
cntx->sstatus &= ~SR_FS;
cntx->sstatus |= SR_FS_CLEAN;
}
void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
const unsigned long *isa)
{
if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) {
if (riscv_isa_extension_available(isa, d))
__kvm_riscv_fp_d_save(cntx);
else if (riscv_isa_extension_available(isa, f))
__kvm_riscv_fp_f_save(cntx);
kvm_riscv_vcpu_fp_clean(cntx);
}
}
void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
const unsigned long *isa)
{
if ((cntx->sstatus & SR_FS) != SR_FS_OFF) {
if (riscv_isa_extension_available(isa, d))
__kvm_riscv_fp_d_restore(cntx);
else if (riscv_isa_extension_available(isa, f))
__kvm_riscv_fp_f_restore(cntx);
kvm_riscv_vcpu_fp_clean(cntx);
}
}
void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
{
/* No need to check host sstatus as it can be modified outside */
if (riscv_isa_extension_available(NULL, d))
__kvm_riscv_fp_d_save(cntx);
else if (riscv_isa_extension_available(NULL, f))
__kvm_riscv_fp_f_save(cntx);
}
void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
{
if (riscv_isa_extension_available(NULL, d))
__kvm_riscv_fp_d_restore(cntx);
else if (riscv_isa_extension_available(NULL, f))
__kvm_riscv_fp_f_restore(cntx);
}
#endif
int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
unsigned long rtype)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
rtype);
void *reg_val;
if ((rtype == KVM_REG_RISCV_FP_F) &&
riscv_isa_extension_available(vcpu->arch.isa, f)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
reg_val = &cntx->fp.f.fcsr;
else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
reg_val = &cntx->fp.f.f[reg_num];
else
return -ENOENT;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
riscv_isa_extension_available(vcpu->arch.isa, d)) {
if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
reg_val = &cntx->fp.d.fcsr;
} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
return -EINVAL;
reg_val = &cntx->fp.d.f[reg_num];
} else
return -ENOENT;
} else
return -ENOENT;
if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
unsigned long rtype)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
rtype);
void *reg_val;
if ((rtype == KVM_REG_RISCV_FP_F) &&
riscv_isa_extension_available(vcpu->arch.isa, f)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
reg_val = &cntx->fp.f.fcsr;
else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
reg_val = &cntx->fp.f.f[reg_num];
else
return -ENOENT;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
riscv_isa_extension_available(vcpu->arch.isa, d)) {
if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
if (KVM_REG_SIZE(reg->id) != sizeof(u32))
return -EINVAL;
reg_val = &cntx->fp.d.fcsr;
} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
return -EINVAL;
reg_val = &cntx->fp.d.f[reg_num];
} else
return -ENOENT;
} else
return -ENOENT;
if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
| linux-master | arch/riscv/kvm/vcpu_fp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Rivos Inc
*
* Authors:
* Atish Patra <[email protected]>
*/
#define pr_fmt(fmt) "riscv-kvm-pmu: " fmt
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/perf/riscv_pmu.h>
#include <asm/csr.h>
#include <asm/kvm_vcpu_sbi.h>
#include <asm/kvm_vcpu_pmu.h>
#include <linux/bitops.h>
#define kvm_pmu_num_counters(pmu) ((pmu)->num_hw_ctrs + (pmu)->num_fw_ctrs)
#define get_event_type(x) (((x) & SBI_PMU_EVENT_IDX_TYPE_MASK) >> 16)
#define get_event_code(x) ((x) & SBI_PMU_EVENT_IDX_CODE_MASK)
static enum perf_hw_id hw_event_perf_map[SBI_PMU_HW_GENERAL_MAX] = {
[SBI_PMU_HW_CPU_CYCLES] = PERF_COUNT_HW_CPU_CYCLES,
[SBI_PMU_HW_INSTRUCTIONS] = PERF_COUNT_HW_INSTRUCTIONS,
[SBI_PMU_HW_CACHE_REFERENCES] = PERF_COUNT_HW_CACHE_REFERENCES,
[SBI_PMU_HW_CACHE_MISSES] = PERF_COUNT_HW_CACHE_MISSES,
[SBI_PMU_HW_BRANCH_INSTRUCTIONS] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
[SBI_PMU_HW_BRANCH_MISSES] = PERF_COUNT_HW_BRANCH_MISSES,
[SBI_PMU_HW_BUS_CYCLES] = PERF_COUNT_HW_BUS_CYCLES,
[SBI_PMU_HW_STALLED_CYCLES_FRONTEND] = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND,
[SBI_PMU_HW_STALLED_CYCLES_BACKEND] = PERF_COUNT_HW_STALLED_CYCLES_BACKEND,
[SBI_PMU_HW_REF_CPU_CYCLES] = PERF_COUNT_HW_REF_CPU_CYCLES,
};
static u64 kvm_pmu_get_sample_period(struct kvm_pmc *pmc)
{
u64 counter_val_mask = GENMASK(pmc->cinfo.width, 0);
u64 sample_period;
if (!pmc->counter_val)
sample_period = counter_val_mask + 1;
else
sample_period = (-pmc->counter_val) & counter_val_mask;
return sample_period;
}
static u32 kvm_pmu_get_perf_event_type(unsigned long eidx)
{
enum sbi_pmu_event_type etype = get_event_type(eidx);
u32 type = PERF_TYPE_MAX;
switch (etype) {
case SBI_PMU_EVENT_TYPE_HW:
type = PERF_TYPE_HARDWARE;
break;
case SBI_PMU_EVENT_TYPE_CACHE:
type = PERF_TYPE_HW_CACHE;
break;
case SBI_PMU_EVENT_TYPE_RAW:
case SBI_PMU_EVENT_TYPE_FW:
type = PERF_TYPE_RAW;
break;
default:
break;
}
return type;
}
static bool kvm_pmu_is_fw_event(unsigned long eidx)
{
return get_event_type(eidx) == SBI_PMU_EVENT_TYPE_FW;
}
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
{
if (pmc->perf_event) {
perf_event_disable(pmc->perf_event);
perf_event_release_kernel(pmc->perf_event);
pmc->perf_event = NULL;
}
}
static u64 kvm_pmu_get_perf_event_hw_config(u32 sbi_event_code)
{
return hw_event_perf_map[sbi_event_code];
}
static u64 kvm_pmu_get_perf_event_cache_config(u32 sbi_event_code)
{
u64 config = U64_MAX;
unsigned int cache_type, cache_op, cache_result;
/* All the cache event masks lie within 0xFF. No separate masking is necessary */
cache_type = (sbi_event_code & SBI_PMU_EVENT_CACHE_ID_CODE_MASK) >>
SBI_PMU_EVENT_CACHE_ID_SHIFT;
cache_op = (sbi_event_code & SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK) >>
SBI_PMU_EVENT_CACHE_OP_SHIFT;
cache_result = sbi_event_code & SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX ||
cache_op >= PERF_COUNT_HW_CACHE_OP_MAX ||
cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return config;
config = cache_type | (cache_op << 8) | (cache_result << 16);
return config;
}
static u64 kvm_pmu_get_perf_event_config(unsigned long eidx, uint64_t evt_data)
{
enum sbi_pmu_event_type etype = get_event_type(eidx);
u32 ecode = get_event_code(eidx);
u64 config = U64_MAX;
switch (etype) {
case SBI_PMU_EVENT_TYPE_HW:
if (ecode < SBI_PMU_HW_GENERAL_MAX)
config = kvm_pmu_get_perf_event_hw_config(ecode);
break;
case SBI_PMU_EVENT_TYPE_CACHE:
config = kvm_pmu_get_perf_event_cache_config(ecode);
break;
case SBI_PMU_EVENT_TYPE_RAW:
config = evt_data & RISCV_PMU_RAW_EVENT_MASK;
break;
case SBI_PMU_EVENT_TYPE_FW:
if (ecode < SBI_PMU_FW_MAX)
config = (1ULL << 63) | ecode;
break;
default:
break;
}
return config;
}
static int kvm_pmu_get_fixed_pmc_index(unsigned long eidx)
{
u32 etype = kvm_pmu_get_perf_event_type(eidx);
u32 ecode = get_event_code(eidx);
if (etype != SBI_PMU_EVENT_TYPE_HW)
return -EINVAL;
if (ecode == SBI_PMU_HW_CPU_CYCLES)
return 0;
else if (ecode == SBI_PMU_HW_INSTRUCTIONS)
return 2;
else
return -EINVAL;
}
static int kvm_pmu_get_programmable_pmc_index(struct kvm_pmu *kvpmu, unsigned long eidx,
unsigned long cbase, unsigned long cmask)
{
int ctr_idx = -1;
int i, pmc_idx;
int min, max;
if (kvm_pmu_is_fw_event(eidx)) {
/* Firmware counters are mapped 1:1 starting from num_hw_ctrs for simplicity */
min = kvpmu->num_hw_ctrs;
max = min + kvpmu->num_fw_ctrs;
} else {
/* First 3 counters are reserved for fixed counters */
min = 3;
max = kvpmu->num_hw_ctrs;
}
for_each_set_bit(i, &cmask, BITS_PER_LONG) {
pmc_idx = i + cbase;
if ((pmc_idx >= min && pmc_idx < max) &&
!test_bit(pmc_idx, kvpmu->pmc_in_use)) {
ctr_idx = pmc_idx;
break;
}
}
return ctr_idx;
}
static int pmu_get_pmc_index(struct kvm_pmu *pmu, unsigned long eidx,
unsigned long cbase, unsigned long cmask)
{
int ret;
/* Fixed counters need to be have fixed mapping as they have different width */
ret = kvm_pmu_get_fixed_pmc_index(eidx);
if (ret >= 0)
return ret;
return kvm_pmu_get_programmable_pmc_index(pmu, eidx, cbase, cmask);
}
static int pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
unsigned long *out_val)
{
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc;
u64 enabled, running;
int fevent_code;
pmc = &kvpmu->pmc[cidx];
if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
fevent_code = get_event_code(pmc->event_idx);
pmc->counter_val = kvpmu->fw_event[fevent_code].value;
} else if (pmc->perf_event) {
pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running);
} else {
return -EINVAL;
}
*out_val = pmc->counter_val;
return 0;
}
static int kvm_pmu_validate_counter_mask(struct kvm_pmu *kvpmu, unsigned long ctr_base,
unsigned long ctr_mask)
{
/* Make sure the we have a valid counter mask requested from the caller */
if (!ctr_mask || (ctr_base + __fls(ctr_mask) >= kvm_pmu_num_counters(kvpmu)))
return -EINVAL;
return 0;
}
static int kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_attr *attr,
unsigned long flags, unsigned long eidx, unsigned long evtdata)
{
struct perf_event *event;
kvm_pmu_release_perf_event(pmc);
attr->config = kvm_pmu_get_perf_event_config(eidx, evtdata);
if (flags & SBI_PMU_CFG_FLAG_CLEAR_VALUE) {
//TODO: Do we really want to clear the value in hardware counter
pmc->counter_val = 0;
}
/*
* Set the default sample_period for now. The guest specified value
* will be updated in the start call.
*/
attr->sample_period = kvm_pmu_get_sample_period(pmc);
event = perf_event_create_kernel_counter(attr, -1, current, NULL, pmc);
if (IS_ERR(event)) {
pr_err("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event));
return PTR_ERR(event);
}
pmc->perf_event = event;
if (flags & SBI_PMU_CFG_FLAG_AUTO_START)
perf_event_enable(pmc->perf_event);
return 0;
}
int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
{
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
struct kvm_fw_event *fevent;
if (!kvpmu || fid >= SBI_PMU_FW_MAX)
return -EINVAL;
fevent = &kvpmu->fw_event[fid];
if (fevent->started)
fevent->value++;
return 0;
}
int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
unsigned long *val, unsigned long new_val,
unsigned long wr_mask)
{
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
int cidx, ret = KVM_INSN_CONTINUE_NEXT_SEPC;
if (!kvpmu || !kvpmu->init_done) {
/*
* In absence of sscofpmf in the platform, the guest OS may use
* the legacy PMU driver to read cycle/instret. In that case,
* just return 0 to avoid any illegal trap. However, any other
* hpmcounter access should result in illegal trap as they must
* be access through SBI PMU only.
*/
if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
*val = 0;
return ret;
} else {
return KVM_INSN_ILLEGAL_TRAP;
}
}
/* The counter CSR are read only. Thus, any write should result in illegal traps */
if (wr_mask)
return KVM_INSN_ILLEGAL_TRAP;
cidx = csr_num - CSR_CYCLE;
if (pmu_ctr_read(vcpu, cidx, val) < 0)
return KVM_INSN_ILLEGAL_TRAP;
return ret;
}
int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu,
struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
retdata->out_val = kvm_pmu_num_counters(kvpmu);
return 0;
}
int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
if (cidx > RISCV_KVM_MAX_COUNTERS || cidx == 1) {
retdata->err_val = SBI_ERR_INVALID_PARAM;
return 0;
}
retdata->out_val = kvpmu->pmc[cidx].cinfo.value;
return 0;
}
int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
unsigned long ctr_mask, unsigned long flags, u64 ival,
struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
int i, pmc_index, sbiret = 0;
struct kvm_pmc *pmc;
int fevent_code;
if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) {
sbiret = SBI_ERR_INVALID_PARAM;
goto out;
}
/* Start the counters that have been configured and requested by the guest */
for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) {
pmc_index = i + ctr_base;
if (!test_bit(pmc_index, kvpmu->pmc_in_use))
continue;
pmc = &kvpmu->pmc[pmc_index];
if (flags & SBI_PMU_START_FLAG_SET_INIT_VALUE)
pmc->counter_val = ival;
if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
fevent_code = get_event_code(pmc->event_idx);
if (fevent_code >= SBI_PMU_FW_MAX) {
sbiret = SBI_ERR_INVALID_PARAM;
goto out;
}
/* Check if the counter was already started for some reason */
if (kvpmu->fw_event[fevent_code].started) {
sbiret = SBI_ERR_ALREADY_STARTED;
continue;
}
kvpmu->fw_event[fevent_code].started = true;
kvpmu->fw_event[fevent_code].value = pmc->counter_val;
} else if (pmc->perf_event) {
if (unlikely(pmc->started)) {
sbiret = SBI_ERR_ALREADY_STARTED;
continue;
}
perf_event_period(pmc->perf_event, kvm_pmu_get_sample_period(pmc));
perf_event_enable(pmc->perf_event);
pmc->started = true;
} else {
sbiret = SBI_ERR_INVALID_PARAM;
}
}
out:
retdata->err_val = sbiret;
return 0;
}
int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
unsigned long ctr_mask, unsigned long flags,
struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
int i, pmc_index, sbiret = 0;
u64 enabled, running;
struct kvm_pmc *pmc;
int fevent_code;
if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) {
sbiret = SBI_ERR_INVALID_PARAM;
goto out;
}
/* Stop the counters that have been configured and requested by the guest */
for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) {
pmc_index = i + ctr_base;
if (!test_bit(pmc_index, kvpmu->pmc_in_use))
continue;
pmc = &kvpmu->pmc[pmc_index];
if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
fevent_code = get_event_code(pmc->event_idx);
if (fevent_code >= SBI_PMU_FW_MAX) {
sbiret = SBI_ERR_INVALID_PARAM;
goto out;
}
if (!kvpmu->fw_event[fevent_code].started)
sbiret = SBI_ERR_ALREADY_STOPPED;
kvpmu->fw_event[fevent_code].started = false;
} else if (pmc->perf_event) {
if (pmc->started) {
/* Stop counting the counter */
perf_event_disable(pmc->perf_event);
pmc->started = false;
} else {
sbiret = SBI_ERR_ALREADY_STOPPED;
}
if (flags & SBI_PMU_STOP_FLAG_RESET) {
/* Relase the counter if this is a reset request */
pmc->counter_val += perf_event_read_value(pmc->perf_event,
&enabled, &running);
kvm_pmu_release_perf_event(pmc);
}
} else {
sbiret = SBI_ERR_INVALID_PARAM;
}
if (flags & SBI_PMU_STOP_FLAG_RESET) {
pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID;
clear_bit(pmc_index, kvpmu->pmc_in_use);
}
}
out:
retdata->err_val = sbiret;
return 0;
}
int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
unsigned long ctr_mask, unsigned long flags,
unsigned long eidx, u64 evtdata,
struct kvm_vcpu_sbi_return *retdata)
{
int ctr_idx, ret, sbiret = 0;
bool is_fevent;
unsigned long event_code;
u32 etype = kvm_pmu_get_perf_event_type(eidx);
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc = NULL;
struct perf_event_attr attr = {
.type = etype,
.size = sizeof(struct perf_event_attr),
.pinned = true,
/*
* It should never reach here if the platform doesn't support the sscofpmf
* extension as mode filtering won't work without it.
*/
.exclude_host = true,
.exclude_hv = true,
.exclude_user = !!(flags & SBI_PMU_CFG_FLAG_SET_UINH),
.exclude_kernel = !!(flags & SBI_PMU_CFG_FLAG_SET_SINH),
.config1 = RISCV_PMU_CONFIG1_GUEST_EVENTS,
};
if (kvm_pmu_validate_counter_mask(kvpmu, ctr_base, ctr_mask) < 0) {
sbiret = SBI_ERR_INVALID_PARAM;
goto out;
}
event_code = get_event_code(eidx);
is_fevent = kvm_pmu_is_fw_event(eidx);
if (is_fevent && event_code >= SBI_PMU_FW_MAX) {
sbiret = SBI_ERR_NOT_SUPPORTED;
goto out;
}
/*
* SKIP_MATCH flag indicates the caller is aware of the assigned counter
* for this event. Just do a sanity check if it already marked used.
*/
if (flags & SBI_PMU_CFG_FLAG_SKIP_MATCH) {
if (!test_bit(ctr_base + __ffs(ctr_mask), kvpmu->pmc_in_use)) {
sbiret = SBI_ERR_FAILURE;
goto out;
}
ctr_idx = ctr_base + __ffs(ctr_mask);
} else {
ctr_idx = pmu_get_pmc_index(kvpmu, eidx, ctr_base, ctr_mask);
if (ctr_idx < 0) {
sbiret = SBI_ERR_NOT_SUPPORTED;
goto out;
}
}
pmc = &kvpmu->pmc[ctr_idx];
pmc->idx = ctr_idx;
if (is_fevent) {
if (flags & SBI_PMU_CFG_FLAG_AUTO_START)
kvpmu->fw_event[event_code].started = true;
} else {
ret = kvm_pmu_create_perf_event(pmc, &attr, flags, eidx, evtdata);
if (ret)
return ret;
}
set_bit(ctr_idx, kvpmu->pmc_in_use);
pmc->event_idx = eidx;
retdata->out_val = ctr_idx;
out:
retdata->err_val = sbiret;
return 0;
}
int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
struct kvm_vcpu_sbi_return *retdata)
{
int ret;
ret = pmu_ctr_read(vcpu, cidx, &retdata->out_val);
if (ret == -EINVAL)
retdata->err_val = SBI_ERR_INVALID_PARAM;
return 0;
}
void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu)
{
int i = 0, ret, num_hw_ctrs = 0, hpm_width = 0;
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc;
/*
* PMU functionality should be only available to guests if privilege mode
* filtering is available in the host. Otherwise, guest will always count
* events while the execution is in hypervisor mode.
*/
if (!riscv_isa_extension_available(NULL, SSCOFPMF))
return;
ret = riscv_pmu_get_hpm_info(&hpm_width, &num_hw_ctrs);
if (ret < 0 || !hpm_width || !num_hw_ctrs)
return;
/*
* Increase the number of hardware counters to offset the time counter.
*/
kvpmu->num_hw_ctrs = num_hw_ctrs + 1;
kvpmu->num_fw_ctrs = SBI_PMU_FW_MAX;
memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event));
if (kvpmu->num_hw_ctrs > RISCV_KVM_MAX_HW_CTRS) {
pr_warn_once("Limiting the hardware counters to 32 as specified by the ISA");
kvpmu->num_hw_ctrs = RISCV_KVM_MAX_HW_CTRS;
}
/*
* There is no correlation between the logical hardware counter and virtual counters.
* However, we need to encode a hpmcounter CSR in the counter info field so that
* KVM can trap n emulate the read. This works well in the migration use case as
* KVM doesn't care if the actual hpmcounter is available in the hardware or not.
*/
for (i = 0; i < kvm_pmu_num_counters(kvpmu); i++) {
/* TIME CSR shouldn't be read from perf interface */
if (i == 1)
continue;
pmc = &kvpmu->pmc[i];
pmc->idx = i;
pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID;
if (i < kvpmu->num_hw_ctrs) {
pmc->cinfo.type = SBI_PMU_CTR_TYPE_HW;
if (i < 3)
/* CY, IR counters */
pmc->cinfo.width = 63;
else
pmc->cinfo.width = hpm_width;
/*
* The CSR number doesn't have any relation with the logical
* hardware counters. The CSR numbers are encoded sequentially
* to avoid maintaining a map between the virtual counter
* and CSR number.
*/
pmc->cinfo.csr = CSR_CYCLE + i;
} else {
pmc->cinfo.type = SBI_PMU_CTR_TYPE_FW;
pmc->cinfo.width = BITS_PER_LONG - 1;
}
}
kvpmu->init_done = true;
}
void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc;
int i;
if (!kvpmu)
return;
for_each_set_bit(i, kvpmu->pmc_in_use, RISCV_MAX_COUNTERS) {
pmc = &kvpmu->pmc[i];
pmc->counter_val = 0;
kvm_pmu_release_perf_event(pmc);
pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID;
}
bitmap_zero(kvpmu->pmc_in_use, RISCV_MAX_COUNTERS);
memset(&kvpmu->fw_event, 0, SBI_PMU_FW_MAX * sizeof(struct kvm_fw_event));
}
void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu)
{
kvm_riscv_vcpu_pmu_deinit(vcpu);
}
| linux-master | arch/riscv/kvm/vcpu_pmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
*
* Authors:
* Anup Patel <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/hugetlb.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/kvm_host.h>
#include <linux/sched/signal.h>
#include <asm/csr.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#ifdef CONFIG_64BIT
static unsigned long gstage_mode __ro_after_init = (HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT);
static unsigned long gstage_pgd_levels __ro_after_init = 3;
#define gstage_index_bits 9
#else
static unsigned long gstage_mode __ro_after_init = (HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT);
static unsigned long gstage_pgd_levels __ro_after_init = 2;
#define gstage_index_bits 10
#endif
#define gstage_pgd_xbits 2
#define gstage_pgd_size (1UL << (HGATP_PAGE_SHIFT + gstage_pgd_xbits))
#define gstage_gpa_bits (HGATP_PAGE_SHIFT + \
(gstage_pgd_levels * gstage_index_bits) + \
gstage_pgd_xbits)
#define gstage_gpa_size ((gpa_t)(1ULL << gstage_gpa_bits))
#define gstage_pte_leaf(__ptep) \
(pte_val(*(__ptep)) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC))
static inline unsigned long gstage_pte_index(gpa_t addr, u32 level)
{
unsigned long mask;
unsigned long shift = HGATP_PAGE_SHIFT + (gstage_index_bits * level);
if (level == (gstage_pgd_levels - 1))
mask = (PTRS_PER_PTE * (1UL << gstage_pgd_xbits)) - 1;
else
mask = PTRS_PER_PTE - 1;
return (addr >> shift) & mask;
}
static inline unsigned long gstage_pte_page_vaddr(pte_t pte)
{
return (unsigned long)pfn_to_virt(__page_val_to_pfn(pte_val(pte)));
}
static int gstage_page_size_to_level(unsigned long page_size, u32 *out_level)
{
u32 i;
unsigned long psz = 1UL << 12;
for (i = 0; i < gstage_pgd_levels; i++) {
if (page_size == (psz << (i * gstage_index_bits))) {
*out_level = i;
return 0;
}
}
return -EINVAL;
}
static int gstage_level_to_page_order(u32 level, unsigned long *out_pgorder)
{
if (gstage_pgd_levels < level)
return -EINVAL;
*out_pgorder = 12 + (level * gstage_index_bits);
return 0;
}
static int gstage_level_to_page_size(u32 level, unsigned long *out_pgsize)
{
int rc;
unsigned long page_order = PAGE_SHIFT;
rc = gstage_level_to_page_order(level, &page_order);
if (rc)
return rc;
*out_pgsize = BIT(page_order);
return 0;
}
static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
pte_t **ptepp, u32 *ptep_level)
{
pte_t *ptep;
u32 current_level = gstage_pgd_levels - 1;
*ptep_level = current_level;
ptep = (pte_t *)kvm->arch.pgd;
ptep = &ptep[gstage_pte_index(addr, current_level)];
while (ptep && pte_val(*ptep)) {
if (gstage_pte_leaf(ptep)) {
*ptep_level = current_level;
*ptepp = ptep;
return true;
}
if (current_level) {
current_level--;
*ptep_level = current_level;
ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
ptep = &ptep[gstage_pte_index(addr, current_level)];
} else {
ptep = NULL;
}
}
return false;
}
static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
{
unsigned long order = PAGE_SHIFT;
if (gstage_level_to_page_order(level, &order))
return;
addr &= ~(BIT(order) - 1);
kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, addr, BIT(order), order);
}
static int gstage_set_pte(struct kvm *kvm, u32 level,
struct kvm_mmu_memory_cache *pcache,
gpa_t addr, const pte_t *new_pte)
{
u32 current_level = gstage_pgd_levels - 1;
pte_t *next_ptep = (pte_t *)kvm->arch.pgd;
pte_t *ptep = &next_ptep[gstage_pte_index(addr, current_level)];
if (current_level < level)
return -EINVAL;
while (current_level != level) {
if (gstage_pte_leaf(ptep))
return -EEXIST;
if (!pte_val(*ptep)) {
if (!pcache)
return -ENOMEM;
next_ptep = kvm_mmu_memory_cache_alloc(pcache);
if (!next_ptep)
return -ENOMEM;
*ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
__pgprot(_PAGE_TABLE));
} else {
if (gstage_pte_leaf(ptep))
return -EEXIST;
next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
}
current_level--;
ptep = &next_ptep[gstage_pte_index(addr, current_level)];
}
*ptep = *new_pte;
if (gstage_pte_leaf(ptep))
gstage_remote_tlb_flush(kvm, current_level, addr);
return 0;
}
static int gstage_map_page(struct kvm *kvm,
struct kvm_mmu_memory_cache *pcache,
gpa_t gpa, phys_addr_t hpa,
unsigned long page_size,
bool page_rdonly, bool page_exec)
{
int ret;
u32 level = 0;
pte_t new_pte;
pgprot_t prot;
ret = gstage_page_size_to_level(page_size, &level);
if (ret)
return ret;
/*
* A RISC-V implementation can choose to either:
* 1) Update 'A' and 'D' PTE bits in hardware
* 2) Generate page fault when 'A' and/or 'D' bits are not set
* PTE so that software can update these bits.
*
* We support both options mentioned above. To achieve this, we
* always set 'A' and 'D' PTE bits at time of creating G-stage
* mapping. To support KVM dirty page logging with both options
* mentioned above, we will write-protect G-stage PTEs to track
* dirty pages.
*/
if (page_exec) {
if (page_rdonly)
prot = PAGE_READ_EXEC;
else
prot = PAGE_WRITE_EXEC;
} else {
if (page_rdonly)
prot = PAGE_READ;
else
prot = PAGE_WRITE;
}
new_pte = pfn_pte(PFN_DOWN(hpa), prot);
new_pte = pte_mkdirty(new_pte);
return gstage_set_pte(kvm, level, pcache, gpa, &new_pte);
}
enum gstage_op {
GSTAGE_OP_NOP = 0, /* Nothing */
GSTAGE_OP_CLEAR, /* Clear/Unmap */
GSTAGE_OP_WP, /* Write-protect */
};
static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
pte_t *ptep, u32 ptep_level, enum gstage_op op)
{
int i, ret;
pte_t *next_ptep;
u32 next_ptep_level;
unsigned long next_page_size, page_size;
ret = gstage_level_to_page_size(ptep_level, &page_size);
if (ret)
return;
BUG_ON(addr & (page_size - 1));
if (!pte_val(*ptep))
return;
if (ptep_level && !gstage_pte_leaf(ptep)) {
next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
next_ptep_level = ptep_level - 1;
ret = gstage_level_to_page_size(next_ptep_level,
&next_page_size);
if (ret)
return;
if (op == GSTAGE_OP_CLEAR)
set_pte(ptep, __pte(0));
for (i = 0; i < PTRS_PER_PTE; i++)
gstage_op_pte(kvm, addr + i * next_page_size,
&next_ptep[i], next_ptep_level, op);
if (op == GSTAGE_OP_CLEAR)
put_page(virt_to_page(next_ptep));
} else {
if (op == GSTAGE_OP_CLEAR)
set_pte(ptep, __pte(0));
else if (op == GSTAGE_OP_WP)
set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
gstage_remote_tlb_flush(kvm, ptep_level, addr);
}
}
static void gstage_unmap_range(struct kvm *kvm, gpa_t start,
gpa_t size, bool may_block)
{
int ret;
pte_t *ptep;
u32 ptep_level;
bool found_leaf;
unsigned long page_size;
gpa_t addr = start, end = start + size;
while (addr < end) {
found_leaf = gstage_get_leaf_entry(kvm, addr,
&ptep, &ptep_level);
ret = gstage_level_to_page_size(ptep_level, &page_size);
if (ret)
break;
if (!found_leaf)
goto next;
if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
gstage_op_pte(kvm, addr, ptep,
ptep_level, GSTAGE_OP_CLEAR);
next:
addr += page_size;
/*
* If the range is too large, release the kvm->mmu_lock
* to prevent starvation and lockup detector warnings.
*/
if (may_block && addr < end)
cond_resched_lock(&kvm->mmu_lock);
}
}
static void gstage_wp_range(struct kvm *kvm, gpa_t start, gpa_t end)
{
int ret;
pte_t *ptep;
u32 ptep_level;
bool found_leaf;
gpa_t addr = start;
unsigned long page_size;
while (addr < end) {
found_leaf = gstage_get_leaf_entry(kvm, addr,
&ptep, &ptep_level);
ret = gstage_level_to_page_size(ptep_level, &page_size);
if (ret)
break;
if (!found_leaf)
goto next;
if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
gstage_op_pte(kvm, addr, ptep,
ptep_level, GSTAGE_OP_WP);
next:
addr += page_size;
}
}
static void gstage_wp_memory_region(struct kvm *kvm, int slot)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
spin_lock(&kvm->mmu_lock);
gstage_wp_range(kvm, start, end);
spin_unlock(&kvm->mmu_lock);
kvm_flush_remote_tlbs(kvm);
}
int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
phys_addr_t hpa, unsigned long size,
bool writable, bool in_atomic)
{
pte_t pte;
int ret = 0;
unsigned long pfn;
phys_addr_t addr, end;
struct kvm_mmu_memory_cache pcache = {
.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
.gfp_zero = __GFP_ZERO,
};
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(hpa);
for (addr = gpa; addr < end; addr += PAGE_SIZE) {
pte = pfn_pte(pfn, PAGE_KERNEL_IO);
if (!writable)
pte = pte_wrprotect(pte);
ret = kvm_mmu_topup_memory_cache(&pcache, gstage_pgd_levels);
if (ret)
goto out;
spin_lock(&kvm->mmu_lock);
ret = gstage_set_pte(kvm, 0, &pcache, addr, &pte);
spin_unlock(&kvm->mmu_lock);
if (ret)
goto out;
pfn++;
}
out:
kvm_mmu_free_memory_cache(&pcache);
return ret;
}
void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, unsigned long size)
{
spin_lock(&kvm->mmu_lock);
gstage_unmap_range(kvm, gpa, size, false);
spin_unlock(&kvm->mmu_lock);
}
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
unsigned long mask)
{
phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
gstage_wp_range(kvm, start, end);
}
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
}
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free)
{
}
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
kvm_riscv_gstage_free_pgd(kvm);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
phys_addr_t size = slot->npages << PAGE_SHIFT;
spin_lock(&kvm->mmu_lock);
gstage_unmap_range(kvm, gpa, size, false);
spin_unlock(&kvm->mmu_lock);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
/*
* At this point memslot has been committed and there is an
* allocated dirty_bitmap[], dirty pages will be tracked while
* the memory slot is write protected.
*/
if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES)
gstage_wp_memory_region(kvm, new->id);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
hva_t hva, reg_end, size;
gpa_t base_gpa;
bool writable;
int ret = 0;
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
change != KVM_MR_FLAGS_ONLY)
return 0;
/*
* Prevent userspace from creating a memory region outside of the GPA
* space addressable by the KVM guest GPA space.
*/
if ((new->base_gfn + new->npages) >=
(gstage_gpa_size >> PAGE_SHIFT))
return -EFAULT;
hva = new->userspace_addr;
size = new->npages << PAGE_SHIFT;
reg_end = hva + size;
base_gpa = new->base_gfn << PAGE_SHIFT;
writable = !(new->flags & KVM_MEM_READONLY);
mmap_read_lock(current->mm);
/*
* A memory region could potentially cover multiple VMAs, and
* any holes between them, so iterate over all of them to find
* out if we can map any of them right now.
*
* +--------------------------------------------+
* +---------------+----------------+ +----------------+
* | : VMA 1 | VMA 2 | | VMA 3 : |
* +---------------+----------------+ +----------------+
* | memory region |
* +--------------------------------------------+
*/
do {
struct vm_area_struct *vma = find_vma(current->mm, hva);
hva_t vm_start, vm_end;
if (!vma || vma->vm_start >= reg_end)
break;
/*
* Mapping a read-only VMA is only allowed if the
* memory region is configured as read-only.
*/
if (writable && !(vma->vm_flags & VM_WRITE)) {
ret = -EPERM;
break;
}
/* Take the intersection of this VMA with the memory region */
vm_start = max(hva, vma->vm_start);
vm_end = min(reg_end, vma->vm_end);
if (vma->vm_flags & VM_PFNMAP) {
gpa_t gpa = base_gpa + (vm_start - hva);
phys_addr_t pa;
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
ret = -EINVAL;
goto out;
}
ret = kvm_riscv_gstage_ioremap(kvm, gpa, pa,
vm_end - vm_start,
writable, false);
if (ret)
break;
}
hva = vm_end;
} while (hva < reg_end);
if (change == KVM_MR_FLAGS_ONLY)
goto out;
if (ret)
kvm_riscv_gstage_iounmap(kvm, base_gpa, size);
out:
mmap_read_unlock(current->mm);
return ret;
}
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
if (!kvm->arch.pgd)
return false;
gstage_unmap_range(kvm, range->start << PAGE_SHIFT,
(range->end - range->start) << PAGE_SHIFT,
range->may_block);
return false;
}
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
int ret;
kvm_pfn_t pfn = pte_pfn(range->arg.pte);
if (!kvm->arch.pgd)
return false;
WARN_ON(range->end - range->start != 1);
ret = gstage_map_page(kvm, NULL, range->start << PAGE_SHIFT,
__pfn_to_phys(pfn), PAGE_SIZE, true, true);
if (ret) {
kvm_debug("Failed to map G-stage page (error %d)\n", ret);
return true;
}
return false;
}
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
pte_t *ptep;
u32 ptep_level = 0;
u64 size = (range->end - range->start) << PAGE_SHIFT;
if (!kvm->arch.pgd)
return false;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
&ptep, &ptep_level))
return false;
return ptep_test_and_clear_young(NULL, 0, ptep);
}
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
pte_t *ptep;
u32 ptep_level = 0;
u64 size = (range->end - range->start) << PAGE_SHIFT;
if (!kvm->arch.pgd)
return false;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
&ptep, &ptep_level))
return false;
return pte_young(*ptep);
}
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write)
{
int ret;
kvm_pfn_t hfn;
bool writable;
short vma_pageshift;
gfn_t gfn = gpa >> PAGE_SHIFT;
struct vm_area_struct *vma;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache;
bool logging = (memslot->dirty_bitmap &&
!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
unsigned long vma_pagesize, mmu_seq;
/* We need minimum second+third level pages */
ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
if (ret) {
kvm_err("Failed to topup G-stage cache\n");
return ret;
}
mmap_read_lock(current->mm);
vma = vma_lookup(current->mm, hva);
if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
mmap_read_unlock(current->mm);
return -EFAULT;
}
if (is_vm_hugetlb_page(vma))
vma_pageshift = huge_page_shift(hstate_vma(vma));
else
vma_pageshift = PAGE_SHIFT;
vma_pagesize = 1ULL << vma_pageshift;
if (logging || (vma->vm_flags & VM_PFNMAP))
vma_pagesize = PAGE_SIZE;
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
/*
* Read mmu_invalidate_seq so that KVM can detect if the results of
* vma_lookup() or gfn_to_pfn_prot() become stale priort to acquiring
* kvm->mmu_lock.
*
* Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
* with the smp_wmb() in kvm_mmu_invalidate_end().
*/
mmu_seq = kvm->mmu_invalidate_seq;
mmap_read_unlock(current->mm);
if (vma_pagesize != PUD_SIZE &&
vma_pagesize != PMD_SIZE &&
vma_pagesize != PAGE_SIZE) {
kvm_err("Invalid VMA page size 0x%lx\n", vma_pagesize);
return -EFAULT;
}
hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
if (hfn == KVM_PFN_ERR_HWPOISON) {
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
vma_pageshift, current);
return 0;
}
if (is_error_noslot_pfn(hfn))
return -EFAULT;
/*
* If logging is active then we allow writable pages only
* for write faults.
*/
if (logging && !is_write)
writable = false;
spin_lock(&kvm->mmu_lock);
if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;
if (writable) {
kvm_set_pfn_dirty(hfn);
mark_page_dirty(kvm, gfn);
ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
vma_pagesize, false, true);
} else {
ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
vma_pagesize, true, true);
}
if (ret)
kvm_err("Failed to map in G-stage\n");
out_unlock:
spin_unlock(&kvm->mmu_lock);
kvm_set_pfn_accessed(hfn);
kvm_release_pfn_clean(hfn);
return ret;
}
int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm)
{
struct page *pgd_page;
if (kvm->arch.pgd != NULL) {
kvm_err("kvm_arch already initialized?\n");
return -EINVAL;
}
pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
get_order(gstage_pgd_size));
if (!pgd_page)
return -ENOMEM;
kvm->arch.pgd = page_to_virt(pgd_page);
kvm->arch.pgd_phys = page_to_phys(pgd_page);
return 0;
}
void kvm_riscv_gstage_free_pgd(struct kvm *kvm)
{
void *pgd = NULL;
spin_lock(&kvm->mmu_lock);
if (kvm->arch.pgd) {
gstage_unmap_range(kvm, 0UL, gstage_gpa_size, false);
pgd = READ_ONCE(kvm->arch.pgd);
kvm->arch.pgd = NULL;
kvm->arch.pgd_phys = 0;
}
spin_unlock(&kvm->mmu_lock);
if (pgd)
free_pages((unsigned long)pgd, get_order(gstage_pgd_size));
}
void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu)
{
unsigned long hgatp = gstage_mode;
struct kvm_arch *k = &vcpu->kvm->arch;
hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) & HGATP_VMID;
hgatp |= (k->pgd_phys >> PAGE_SHIFT) & HGATP_PPN;
csr_write(CSR_HGATP, hgatp);
if (!kvm_riscv_gstage_vmid_bits())
kvm_riscv_local_hfence_gvma_all();
}
void __init kvm_riscv_gstage_mode_detect(void)
{
#ifdef CONFIG_64BIT
/* Try Sv57x4 G-stage mode */
csr_write(CSR_HGATP, HGATP_MODE_SV57X4 << HGATP_MODE_SHIFT);
if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV57X4) {
gstage_mode = (HGATP_MODE_SV57X4 << HGATP_MODE_SHIFT);
gstage_pgd_levels = 5;
goto skip_sv48x4_test;
}
/* Try Sv48x4 G-stage mode */
csr_write(CSR_HGATP, HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) {
gstage_mode = (HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
gstage_pgd_levels = 4;
}
skip_sv48x4_test:
csr_write(CSR_HGATP, 0);
kvm_riscv_local_hfence_gvma_all();
#endif
}
unsigned long __init kvm_riscv_gstage_mode(void)
{
return gstage_mode >> HGATP_MODE_SHIFT;
}
int kvm_riscv_gstage_gpa_bits(void)
{
return gstage_gpa_bits;
}
| linux-master | arch/riscv/kvm/mmu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Regents of the University of California
*/
#include <linux/delay.h>
#include <linux/math.h>
#include <linux/param.h>
#include <linux/timex.h>
#include <linux/types.h>
#include <linux/export.h>
#include <asm/processor.h>
/*
* This is copies from arch/arm/include/asm/delay.h
*
* Loop (or tick) based delay:
*
* loops = loops_per_jiffy * jiffies_per_sec * delay_us / us_per_sec
*
* where:
*
* jiffies_per_sec = HZ
* us_per_sec = 1000000
*
* Therefore the constant part is HZ / 1000000 which is a small
* fractional number. To make this usable with integer math, we
* scale up this constant by 2^31, perform the actual multiplication,
* and scale the result back down by 2^31 with a simple shift:
*
* loops = (loops_per_jiffy * delay_us * UDELAY_MULT) >> 31
*
* where:
*
* UDELAY_MULT = 2^31 * HZ / 1000000
* = (2^31 / 1000000) * HZ
* = 2147.483648 * HZ
* = 2147 * HZ + 483648 * HZ / 1000000
*
* 31 is the biggest scale shift value that won't overflow 32 bits for
* delay_us * UDELAY_MULT assuming HZ <= 1000 and delay_us <= 2000.
*/
#define MAX_UDELAY_US 2000
#define MAX_UDELAY_HZ 1000
#define UDELAY_MULT (2147UL * HZ + 483648UL * HZ / 1000000UL)
#define UDELAY_SHIFT 31
#if HZ > MAX_UDELAY_HZ
#error "HZ > MAX_UDELAY_HZ"
#endif
/*
* RISC-V supports both UDELAY and NDELAY. This is largely the same as above,
* but with different constants. I added 10 bits to the shift to get this, but
* the result is that I need a 64-bit multiply, which is slow on 32-bit
* platforms.
*
* NDELAY_MULT = 2^41 * HZ / 1000000000
* = (2^41 / 1000000000) * HZ
* = 2199.02325555 * HZ
* = 2199 * HZ + 23255550 * HZ / 1000000000
*
* The maximum here is to avoid 64-bit overflow, but it isn't checked as it
* won't happen.
*/
#define MAX_NDELAY_NS (1ULL << 42)
#define MAX_NDELAY_HZ MAX_UDELAY_HZ
#define NDELAY_MULT ((unsigned long long)(2199ULL * HZ + 23255550ULL * HZ / 1000000000ULL))
#define NDELAY_SHIFT 41
#if HZ > MAX_NDELAY_HZ
#error "HZ > MAX_NDELAY_HZ"
#endif
void __delay(unsigned long cycles)
{
u64 t0 = get_cycles();
while ((unsigned long)(get_cycles() - t0) < cycles)
cpu_relax();
}
EXPORT_SYMBOL(__delay);
void udelay(unsigned long usecs)
{
u64 ucycles = (u64)usecs * lpj_fine * UDELAY_MULT;
u64 n;
if (unlikely(usecs > MAX_UDELAY_US)) {
n = (u64)usecs * riscv_timebase;
do_div(n, 1000000);
__delay(n);
return;
}
__delay(ucycles >> UDELAY_SHIFT);
}
EXPORT_SYMBOL(udelay);
void ndelay(unsigned long nsecs)
{
/*
* This doesn't bother checking for overflow, as it won't happen (it's
* an hour) of delay.
*/
unsigned long long ncycles = nsecs * lpj_fine * NDELAY_MULT;
__delay(ncycles >> NDELAY_SHIFT);
}
EXPORT_SYMBOL(ndelay);
| linux-master | arch/riscv/lib/delay.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/error-injection.h>
#include <linux/kprobes.h>
void override_function_with_return(struct pt_regs *regs)
{
instruction_pointer_set(regs, regs->ra);
}
NOKPROBE_SYMBOL(override_function_with_return);
| linux-master | arch/riscv/lib/error-inject.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2019 SiFive
*/
#include <linux/pagewalk.h>
#include <linux/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/bitops.h>
#include <asm/set_memory.h>
struct pageattr_masks {
pgprot_t set_mask;
pgprot_t clear_mask;
};
static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
{
struct pageattr_masks *masks = walk->private;
unsigned long new_val = val;
new_val &= ~(pgprot_val(masks->clear_mask));
new_val |= (pgprot_val(masks->set_mask));
return new_val;
}
static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pgd_t val = READ_ONCE(*pgd);
if (pgd_leaf(val)) {
val = __pgd(set_pageattr_masks(pgd_val(val), walk));
set_pgd(pgd, val);
}
return 0;
}
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
p4d_t val = READ_ONCE(*p4d);
if (p4d_leaf(val)) {
val = __p4d(set_pageattr_masks(p4d_val(val), walk));
set_p4d(p4d, val);
}
return 0;
}
static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pud_t val = READ_ONCE(*pud);
if (pud_leaf(val)) {
val = __pud(set_pageattr_masks(pud_val(val), walk));
set_pud(pud, val);
}
return 0;
}
static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pmd_t val = READ_ONCE(*pmd);
if (pmd_leaf(val)) {
val = __pmd(set_pageattr_masks(pmd_val(val), walk));
set_pmd(pmd, val);
}
return 0;
}
static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pte_t val = READ_ONCE(*pte);
val = __pte(set_pageattr_masks(pte_val(val), walk));
set_pte(pte, val);
return 0;
}
static int pageattr_pte_hole(unsigned long addr, unsigned long next,
int depth, struct mm_walk *walk)
{
/* Nothing to do here */
return 0;
}
static const struct mm_walk_ops pageattr_ops = {
.pgd_entry = pageattr_pgd_entry,
.p4d_entry = pageattr_p4d_entry,
.pud_entry = pageattr_pud_entry,
.pmd_entry = pageattr_pmd_entry,
.pte_entry = pageattr_pte_entry,
.pte_hole = pageattr_pte_hole,
.walk_lock = PGWALK_RDLOCK,
};
static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
pgprot_t clear_mask)
{
int ret;
unsigned long start = addr;
unsigned long end = start + PAGE_SIZE * numpages;
struct pageattr_masks masks = {
.set_mask = set_mask,
.clear_mask = clear_mask
};
if (!numpages)
return 0;
mmap_write_lock(&init_mm);
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
&masks);
mmap_write_unlock(&init_mm);
flush_tlb_kernel_range(start, end);
return ret;
}
int set_memory_rw_nx(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
__pgprot(_PAGE_EXEC));
}
int set_memory_ro(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
__pgprot(_PAGE_WRITE));
}
int set_memory_rw(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
__pgprot(0));
}
int set_memory_x(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
}
int set_memory_nx(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
}
int set_direct_map_invalid_noflush(struct page *page)
{
int ret;
unsigned long start = (unsigned long)page_address(page);
unsigned long end = start + PAGE_SIZE;
struct pageattr_masks masks = {
.set_mask = __pgprot(0),
.clear_mask = __pgprot(_PAGE_PRESENT)
};
mmap_read_lock(&init_mm);
ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
mmap_read_unlock(&init_mm);
return ret;
}
int set_direct_map_default_noflush(struct page *page)
{
int ret;
unsigned long start = (unsigned long)page_address(page);
unsigned long end = start + PAGE_SIZE;
struct pageattr_masks masks = {
.set_mask = PAGE_KERNEL,
.clear_mask = __pgprot(0)
};
mmap_read_lock(&init_mm);
ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
mmap_read_unlock(&init_mm);
return ret;
}
#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!debug_pagealloc_enabled())
return;
if (enable)
__set_memory((unsigned long)page_address(page), numpages,
__pgprot(_PAGE_PRESENT), __pgprot(0));
else
__set_memory((unsigned long)page_address(page), numpages,
__pgprot(0), __pgprot(_PAGE_PRESENT));
}
#endif
bool kernel_page_present(struct page *page)
{
unsigned long addr = (unsigned long)page_address(page);
pgd_t *pgd;
pud_t *pud;
p4d_t *p4d;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset_k(addr);
if (!pgd_present(*pgd))
return false;
if (pgd_leaf(*pgd))
return true;
p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d))
return false;
if (p4d_leaf(*p4d))
return true;
pud = pud_offset(p4d, addr);
if (!pud_present(*pud))
return false;
if (pud_leaf(*pud))
return true;
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd))
return false;
if (pmd_leaf(*pmd))
return true;
pte = pte_offset_kernel(pmd, addr);
return pte_present(*pte);
}
| linux-master | arch/riscv/mm/pageattr.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/hugetlb.h>
#include <linux/err.h>
#ifdef CONFIG_RISCV_ISA_SVNAPOT
pte_t huge_ptep_get(pte_t *ptep)
{
unsigned long pte_num;
int i;
pte_t orig_pte = ptep_get(ptep);
if (!pte_present(orig_pte) || !pte_napot(orig_pte))
return orig_pte;
pte_num = napot_pte_num(napot_cont_order(orig_pte));
for (i = 0; i < pte_num; i++, ptep++) {
pte_t pte = ptep_get(ptep);
if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte);
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
}
return orig_pte;
}
pte_t *huge_pte_alloc(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long addr,
unsigned long sz)
{
unsigned long order;
pte_t *pte = NULL;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
p4d = p4d_alloc(mm, pgd, addr);
if (!p4d)
return NULL;
pud = pud_alloc(mm, p4d, addr);
if (!pud)
return NULL;
if (sz == PUD_SIZE) {
pte = (pte_t *)pud;
goto out;
}
if (sz == PMD_SIZE) {
if (want_pmd_share(vma, addr) && pud_none(*pud))
pte = huge_pmd_share(mm, vma, addr, pud);
else
pte = (pte_t *)pmd_alloc(mm, pud, addr);
goto out;
}
pmd = pmd_alloc(mm, pud, addr);
if (!pmd)
return NULL;
for_each_napot_order(order) {
if (napot_cont_size(order) == sz) {
pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order));
break;
}
}
out:
if (pte) {
pte_t pteval = ptep_get_lockless(pte);
WARN_ON_ONCE(pte_present(pteval) && !pte_huge(pteval));
}
return pte;
}
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr,
unsigned long sz)
{
unsigned long order;
pte_t *pte = NULL;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
if (!pgd_present(*pgd))
return NULL;
p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d))
return NULL;
pud = pud_offset(p4d, addr);
if (sz == PUD_SIZE)
/* must be pud huge, non-present or none */
return (pte_t *)pud;
if (!pud_present(*pud))
return NULL;
pmd = pmd_offset(pud, addr);
if (sz == PMD_SIZE)
/* must be pmd huge, non-present or none */
return (pte_t *)pmd;
if (!pmd_present(*pmd))
return NULL;
for_each_napot_order(order) {
if (napot_cont_size(order) == sz) {
pte = pte_offset_huge(pmd, addr & napot_cont_mask(order));
break;
}
}
return pte;
}
static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
unsigned long pte_num)
{
pte_t orig_pte = ptep_get(ptep);
unsigned long i;
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++) {
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte);
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
}
return orig_pte;
}
static pte_t get_clear_contig_flush(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
unsigned long pte_num)
{
pte_t orig_pte = get_clear_contig(mm, addr, ptep, pte_num);
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
bool valid = !pte_none(orig_pte);
if (valid)
flush_tlb_range(&vma, addr, addr + (PAGE_SIZE * pte_num));
return orig_pte;
}
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
unsigned long order;
for_each_napot_order(order) {
if (shift == napot_cont_shift(order)) {
entry = pte_mknapot(entry, order);
break;
}
}
if (order == NAPOT_ORDER_MAX)
entry = pte_mkhuge(entry);
return entry;
}
void set_huge_pte_at(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
pte_t pte)
{
int i, pte_num;
if (!pte_napot(pte)) {
set_pte_at(mm, addr, ptep, pte);
return;
}
pte_num = napot_pte_num(napot_cont_order(pte));
for (i = 0; i < pte_num; i++, ptep++, addr += PAGE_SIZE)
set_pte_at(mm, addr, ptep, pte);
}
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep,
pte_t pte,
int dirty)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long order;
pte_t orig_pte;
int i, pte_num;
if (!pte_napot(pte))
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
order = napot_cont_order(pte);
pte_num = napot_pte_num(order);
ptep = huge_pte_offset(mm, addr, napot_cont_size(order));
orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num);
if (pte_dirty(orig_pte))
pte = pte_mkdirty(pte);
if (pte_young(orig_pte))
pte = pte_mkyoung(pte);
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
set_pte_at(mm, addr, ptep, pte);
return true;
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep)
{
pte_t orig_pte = ptep_get(ptep);
int pte_num;
if (!pte_napot(orig_pte))
return ptep_get_and_clear(mm, addr, ptep);
pte_num = napot_pte_num(napot_cont_order(orig_pte));
return get_clear_contig(mm, addr, ptep, pte_num);
}
void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep)
{
pte_t pte = ptep_get(ptep);
unsigned long order;
pte_t orig_pte;
int i, pte_num;
if (!pte_napot(pte)) {
ptep_set_wrprotect(mm, addr, ptep);
return;
}
order = napot_cont_order(pte);
pte_num = napot_pte_num(order);
ptep = huge_pte_offset(mm, addr, napot_cont_size(order));
orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num);
orig_pte = pte_wrprotect(orig_pte);
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
set_pte_at(mm, addr, ptep, orig_pte);
}
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr,
pte_t *ptep)
{
pte_t pte = ptep_get(ptep);
int pte_num;
if (!pte_napot(pte))
return ptep_clear_flush(vma, addr, ptep);
pte_num = napot_pte_num(napot_cont_order(pte));
return get_clear_contig_flush(vma->vm_mm, addr, ptep, pte_num);
}
void huge_pte_clear(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
unsigned long sz)
{
pte_t pte = READ_ONCE(*ptep);
int i, pte_num;
if (!pte_napot(pte)) {
pte_clear(mm, addr, ptep);
return;
}
pte_num = napot_pte_num(napot_cont_order(pte));
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
pte_clear(mm, addr, ptep);
}
static __init bool is_napot_size(unsigned long size)
{
unsigned long order;
if (!has_svnapot())
return false;
for_each_napot_order(order) {
if (size == napot_cont_size(order))
return true;
}
return false;
}
static __init int napot_hugetlbpages_init(void)
{
if (has_svnapot()) {
unsigned long order;
for_each_napot_order(order)
hugetlb_add_hstate(order);
}
return 0;
}
arch_initcall(napot_hugetlbpages_init);
#else
static __init bool is_napot_size(unsigned long size)
{
return false;
}
#endif /*CONFIG_RISCV_ISA_SVNAPOT*/
int pud_huge(pud_t pud)
{
return pud_leaf(pud);
}
int pmd_huge(pmd_t pmd)
{
return pmd_leaf(pmd);
}
bool __init arch_hugetlb_valid_size(unsigned long size)
{
if (size == HPAGE_SIZE)
return true;
else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE)
return true;
else if (is_napot_size(size))
return true;
else
return false;
}
#ifdef CONFIG_CONTIG_ALLOC
static __init int gigantic_pages_init(void)
{
/* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */
if (IS_ENABLED(CONFIG_64BIT))
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
return 0;
}
arch_initcall(gigantic_pages_init);
#endif
| linux-master | arch/riscv/mm/hugetlbpage.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Ventana Micro Systems Inc.
*/
#include <linux/export.h>
#include <linux/libnvdimm.h>
#include <asm/cacheflush.h>
#include <asm/dma-noncoherent.h>
void arch_wb_cache_pmem(void *addr, size_t size)
{
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (unlikely(noncoherent_cache_ops.wback)) {
noncoherent_cache_ops.wback(virt_to_phys(addr), size);
return;
}
#endif
ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size)
{
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (unlikely(noncoherent_cache_ops.inv)) {
noncoherent_cache_ops.inv(virt_to_phys(addr), size);
return;
}
#endif
ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
| linux-master | arch/riscv/mm/pmem.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Andes Technology Corporation
#include <linux/pfn.h>
#include <linux/init_task.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
#include <asm/pgalloc.h>
/*
* Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
* which is right before the kernel.
*
* For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
* the page global directory with kasan_early_shadow_pmd.
*
* For sv48 and sv57, the region start is aligned on PGDIR_SIZE whereas the end
* region is not and then we have to go down to the PUD level.
*/
static pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
static p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
static pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
{
phys_addr_t phys_addr;
pte_t *ptep, *p;
if (pmd_none(*pmd)) {
p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
ptep = pte_offset_kernel(pmd, vaddr);
do {
if (pte_none(*ptep)) {
phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
}
} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
}
static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
{
phys_addr_t phys_addr;
pmd_t *pmdp, *p;
unsigned long next;
if (pud_none(*pud)) {
p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
pmdp = pmd_offset(pud, vaddr);
do {
next = pmd_addr_end(vaddr, end);
if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
if (phys_addr) {
set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
memset(__va(phys_addr), KASAN_SHADOW_INIT, PMD_SIZE);
continue;
}
}
kasan_populate_pte(pmdp, vaddr, next);
} while (pmdp++, vaddr = next, vaddr != end);
}
static void __init kasan_populate_pud(p4d_t *p4d,
unsigned long vaddr, unsigned long end)
{
phys_addr_t phys_addr;
pud_t *pudp, *p;
unsigned long next;
if (p4d_none(*p4d)) {
p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
pudp = pud_offset(p4d, vaddr);
do {
next = pud_addr_end(vaddr, end);
if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
if (phys_addr) {
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE);
continue;
}
}
kasan_populate_pmd(pudp, vaddr, next);
} while (pudp++, vaddr = next, vaddr != end);
}
static void __init kasan_populate_p4d(pgd_t *pgd,
unsigned long vaddr, unsigned long end)
{
phys_addr_t phys_addr;
p4d_t *p4dp, *p;
unsigned long next;
if (pgd_none(*pgd)) {
p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
p4dp = p4d_offset(pgd, vaddr);
do {
next = p4d_addr_end(vaddr, end);
if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
if (phys_addr) {
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
memset(__va(phys_addr), KASAN_SHADOW_INIT, P4D_SIZE);
continue;
}
}
kasan_populate_pud(p4dp, vaddr, next);
} while (p4dp++, vaddr = next, vaddr != end);
}
static void __init kasan_populate_pgd(pgd_t *pgdp,
unsigned long vaddr, unsigned long end)
{
phys_addr_t phys_addr;
unsigned long next;
do {
next = pgd_addr_end(vaddr, end);
if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
(next - vaddr) >= PGDIR_SIZE) {
phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
if (phys_addr) {
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
memset(__va(phys_addr), KASAN_SHADOW_INIT, PGDIR_SIZE);
continue;
}
}
kasan_populate_p4d(pgdp, vaddr, next);
} while (pgdp++, vaddr = next, vaddr != end);
}
static void __init kasan_early_clear_pud(p4d_t *p4dp,
unsigned long vaddr, unsigned long end)
{
pud_t *pudp, *base_pud;
unsigned long next;
if (!pgtable_l4_enabled) {
pudp = (pud_t *)p4dp;
} else {
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
pudp = base_pud + pud_index(vaddr);
}
do {
next = pud_addr_end(vaddr, end);
if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
pud_clear(pudp);
continue;
}
BUG();
} while (pudp++, vaddr = next, vaddr != end);
}
static void __init kasan_early_clear_p4d(pgd_t *pgdp,
unsigned long vaddr, unsigned long end)
{
p4d_t *p4dp, *base_p4d;
unsigned long next;
if (!pgtable_l5_enabled) {
p4dp = (p4d_t *)pgdp;
} else {
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
p4dp = base_p4d + p4d_index(vaddr);
}
do {
next = p4d_addr_end(vaddr, end);
if (pgtable_l4_enabled && IS_ALIGNED(vaddr, P4D_SIZE) &&
(next - vaddr) >= P4D_SIZE) {
p4d_clear(p4dp);
continue;
}
kasan_early_clear_pud(p4dp, vaddr, next);
} while (p4dp++, vaddr = next, vaddr != end);
}
static void __init kasan_early_clear_pgd(pgd_t *pgdp,
unsigned long vaddr, unsigned long end)
{
unsigned long next;
do {
next = pgd_addr_end(vaddr, end);
if (pgtable_l5_enabled && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
(next - vaddr) >= PGDIR_SIZE) {
pgd_clear(pgdp);
continue;
}
kasan_early_clear_p4d(pgdp, vaddr, next);
} while (pgdp++, vaddr = next, vaddr != end);
}
static void __init kasan_early_populate_pud(p4d_t *p4dp,
unsigned long vaddr,
unsigned long end)
{
pud_t *pudp, *base_pud;
phys_addr_t phys_addr;
unsigned long next;
if (!pgtable_l4_enabled) {
pudp = (pud_t *)p4dp;
} else {
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
pudp = base_pud + pud_index(vaddr);
}
do {
next = pud_addr_end(vaddr, end);
if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
(next - vaddr) >= PUD_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
continue;
}
BUG();
} while (pudp++, vaddr = next, vaddr != end);
}
static void __init kasan_early_populate_p4d(pgd_t *pgdp,
unsigned long vaddr,
unsigned long end)
{
p4d_t *p4dp, *base_p4d;
phys_addr_t phys_addr;
unsigned long next;
/*
* We can't use pgd_page_vaddr here as it would return a linear
* mapping address but it is not mapped yet, but when populating
* early_pg_dir, we need the physical address and when populating
* swapper_pg_dir, we need the kernel virtual address so use
* pt_ops facility.
* Note that this test is then completely equivalent to
* p4dp = p4d_offset(pgdp, vaddr)
*/
if (!pgtable_l5_enabled) {
p4dp = (p4d_t *)pgdp;
} else {
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
p4dp = base_p4d + p4d_index(vaddr);
}
do {
next = p4d_addr_end(vaddr, end);
if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
(next - vaddr) >= P4D_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
continue;
}
kasan_early_populate_pud(p4dp, vaddr, next);
} while (p4dp++, vaddr = next, vaddr != end);
}
static void __init kasan_early_populate_pgd(pgd_t *pgdp,
unsigned long vaddr,
unsigned long end)
{
phys_addr_t phys_addr;
unsigned long next;
do {
next = pgd_addr_end(vaddr, end);
if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
(next - vaddr) >= PGDIR_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
continue;
}
kasan_early_populate_p4d(pgdp, vaddr, next);
} while (pgdp++, vaddr = next, vaddr != end);
}
asmlinkage void __init kasan_early_init(void)
{
uintptr_t i;
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
for (i = 0; i < PTRS_PER_PTE; ++i)
set_pte(kasan_early_shadow_pte + i,
pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
for (i = 0; i < PTRS_PER_PMD; ++i)
set_pmd(kasan_early_shadow_pmd + i,
pfn_pmd(PFN_DOWN
(__pa((uintptr_t)kasan_early_shadow_pte)),
PAGE_TABLE));
if (pgtable_l4_enabled) {
for (i = 0; i < PTRS_PER_PUD; ++i)
set_pud(kasan_early_shadow_pud + i,
pfn_pud(PFN_DOWN
(__pa(((uintptr_t)kasan_early_shadow_pmd))),
PAGE_TABLE));
}
if (pgtable_l5_enabled) {
for (i = 0; i < PTRS_PER_P4D; ++i)
set_p4d(kasan_early_shadow_p4d + i,
pfn_p4d(PFN_DOWN
(__pa(((uintptr_t)kasan_early_shadow_pud))),
PAGE_TABLE));
}
kasan_early_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
KASAN_SHADOW_START, KASAN_SHADOW_END);
local_flush_tlb_all();
}
void __init kasan_swapper_init(void)
{
kasan_early_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
KASAN_SHADOW_START, KASAN_SHADOW_END);
local_flush_tlb_all();
}
static void __init kasan_populate(void *start, void *end)
{
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);
kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend);
}
static void __init kasan_shallow_populate_pud(p4d_t *p4d,
unsigned long vaddr, unsigned long end)
{
unsigned long next;
void *p;
pud_t *pud_k = pud_offset(p4d, vaddr);
do {
next = pud_addr_end(vaddr, end);
if (pud_none(*pud_k)) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
}
BUG();
} while (pud_k++, vaddr = next, vaddr != end);
}
static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
unsigned long vaddr, unsigned long end)
{
unsigned long next;
void *p;
p4d_t *p4d_k = p4d_offset(pgd, vaddr);
do {
next = p4d_addr_end(vaddr, end);
if (p4d_none(*p4d_k)) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
}
kasan_shallow_populate_pud(p4d_k, vaddr, end);
} while (p4d_k++, vaddr = next, vaddr != end);
}
static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
{
unsigned long next;
void *p;
pgd_t *pgd_k = pgd_offset_k(vaddr);
do {
next = pgd_addr_end(vaddr, end);
if (pgd_none(*pgd_k)) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
}
kasan_shallow_populate_p4d(pgd_k, vaddr, next);
} while (pgd_k++, vaddr = next, vaddr != end);
}
static void __init kasan_shallow_populate(void *start, void *end)
{
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);
kasan_shallow_populate_pgd(vaddr, vend);
}
static void __init create_tmp_mapping(void)
{
void *ptr;
p4d_t *base_p4d;
/*
* We need to clean the early mapping: this is hard to achieve "in-place",
* so install a temporary mapping like arm64 and x86 do.
*/
memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(pgd_t) * PTRS_PER_PGD);
/* Copy the last p4d since it is shared with the kernel mapping. */
if (pgtable_l5_enabled) {
ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
base_p4d = tmp_p4d;
} else {
base_p4d = (p4d_t *)tmp_pg_dir;
}
/* Copy the last pud since it is shared with the kernel mapping. */
if (pgtable_l4_enabled) {
ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END)));
memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
}
}
void __init kasan_init(void)
{
phys_addr_t p_start, p_end;
u64 i;
create_tmp_mapping();
csr_write(CSR_SATP, PFN_DOWN(__pa(tmp_pg_dir)) | satp_mode);
kasan_early_clear_pgd(pgd_offset_k(KASAN_SHADOW_START),
KASAN_SHADOW_START, KASAN_SHADOW_END);
kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)FIXADDR_START),
(void *)kasan_mem_to_shadow((void *)VMALLOC_START));
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
kasan_shallow_populate(
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
/* Shallow populate modules and BPF which are vmalloc-allocated */
kasan_shallow_populate(
(void *)kasan_mem_to_shadow((void *)MODULES_VADDR),
(void *)kasan_mem_to_shadow((void *)MODULES_END));
} else {
kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)VMALLOC_START),
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
}
/* Populate the linear mapping */
for_each_mem_range(i, &p_start, &p_end) {
void *start = (void *)__va(p_start);
void *end = (void *)__va(p_end);
if (start >= end)
break;
kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
}
/* Populate kernel */
kasan_populate(kasan_mem_to_shadow((const void *)MODULES_END),
kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
mk_pte(virt_to_page(kasan_early_shadow_page),
__pgprot(_PAGE_PRESENT | _PAGE_READ |
_PAGE_ACCESSED)));
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
init_task.kasan_depth = 0;
csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | satp_mode);
local_flush_tlb_all();
}
| linux-master | arch/riscv/mm/kasan_init.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* RISC-V specific functions to support DMA for non-coherent devices
*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
*/
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/dma-noncoherent.h>
static bool noncoherent_supported __ro_after_init;
int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN;
EXPORT_SYMBOL_GPL(dma_cache_alignment);
struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = {
.wback = NULL,
.inv = NULL,
.wback_inv = NULL,
};
static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (unlikely(noncoherent_cache_ops.wback)) {
noncoherent_cache_ops.wback(paddr, size);
return;
}
#endif
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
}
static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (unlikely(noncoherent_cache_ops.inv)) {
noncoherent_cache_ops.inv(paddr, size);
return;
}
#endif
ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
}
static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (unlikely(noncoherent_cache_ops.wback_inv)) {
noncoherent_cache_ops.wback_inv(paddr, size);
return;
}
#endif
ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
}
static inline bool arch_sync_dma_clean_before_fromdevice(void)
{
return true;
}
static inline bool arch_sync_dma_cpu_needs_post_dma_flush(void)
{
return true;
}
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
arch_dma_cache_wback(paddr, size);
break;
case DMA_FROM_DEVICE:
if (!arch_sync_dma_clean_before_fromdevice()) {
arch_dma_cache_inv(paddr, size);
break;
}
fallthrough;
case DMA_BIDIRECTIONAL:
/* Skip the invalidate here if it's done later */
if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) &&
arch_sync_dma_cpu_needs_post_dma_flush())
arch_dma_cache_wback(paddr, size);
else
arch_dma_cache_wback_inv(paddr, size);
break;
default:
break;
}
}
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
break;
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
if (arch_sync_dma_cpu_needs_post_dma_flush())
arch_dma_cache_inv(paddr, size);
break;
default:
break;
}
}
void arch_dma_prep_coherent(struct page *page, size_t size)
{
void *flush_addr = page_address(page);
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (unlikely(noncoherent_cache_ops.wback_inv)) {
noncoherent_cache_ops.wback_inv(page_to_phys(page), size);
return;
}
#endif
ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
TAINT_CPU_OUT_OF_SPEC,
"%s %s: ARCH_DMA_MINALIGN smaller than riscv,cbom-block-size (%d < %d)",
dev_driver_string(dev), dev_name(dev),
ARCH_DMA_MINALIGN, riscv_cbom_block_size);
WARN_TAINT(!coherent && !noncoherent_supported, TAINT_CPU_OUT_OF_SPEC,
"%s %s: device non-coherent but no non-coherent operations supported",
dev_driver_string(dev), dev_name(dev));
dev->dma_coherent = coherent;
}
void riscv_noncoherent_supported(void)
{
WARN(!riscv_cbom_block_size,
"Non-coherent DMA support enabled without a block size\n");
noncoherent_supported = true;
}
void __init riscv_set_dma_cache_alignment(void)
{
if (!noncoherent_supported)
dma_cache_alignment = 1;
}
void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
{
if (!ops)
return;
noncoherent_cache_ops = *ops;
}
EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
| linux-master | arch/riscv/mm/dma-noncoherent.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
* Copyright (C) 2020 FORTH-ICS/CARV
* Nick Kossifidis <[email protected]>
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/initrd.h>
#include <linux/swap.h>
#include <linux/swiotlb.h>
#include <linux/sizes.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
#include <linux/libfdt.h>
#include <linux/set_memory.h>
#include <linux/dma-map-ops.h>
#include <linux/crash_dump.h>
#include <linux/hugetlb.h>
#ifdef CONFIG_RELOCATABLE
#include <linux/elf.h>
#endif
#include <linux/kfence.h>
#include <asm/fixmap.h>
#include <asm/io.h>
#include <asm/numa.h>
#include <asm/pgtable.h>
#include <asm/ptdump.h>
#include <asm/sections.h>
#include <asm/soc.h>
#include <asm/tlbflush.h>
#include "../kernel/head.h"
struct kernel_mapping kernel_map __ro_after_init;
EXPORT_SYMBOL(kernel_map);
#ifdef CONFIG_XIP_KERNEL
#define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
#endif
#ifdef CONFIG_64BIT
u64 satp_mode __ro_after_init = !IS_ENABLED(CONFIG_XIP_KERNEL) ? SATP_MODE_57 : SATP_MODE_39;
#else
u64 satp_mode __ro_after_init = SATP_MODE_32;
#endif
EXPORT_SYMBOL(satp_mode);
bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
EXPORT_SYMBOL(pgtable_l4_enabled);
EXPORT_SYMBOL(pgtable_l5_enabled);
phys_addr_t phys_ram_base __ro_after_init;
EXPORT_SYMBOL(phys_ram_base);
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
extern char _start[];
void *_dtb_early_va __initdata;
uintptr_t _dtb_early_pa __initdata;
static phys_addr_t dma32_phys_limit __initdata;
static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init(max_zone_pfns);
}
#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
#define LOG2_SZ_1K ilog2(SZ_1K)
#define LOG2_SZ_1M ilog2(SZ_1M)
#define LOG2_SZ_1G ilog2(SZ_1G)
#define LOG2_SZ_1T ilog2(SZ_1T)
static inline void print_mlk(char *name, unsigned long b, unsigned long t)
{
pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t,
(((t) - (b)) >> LOG2_SZ_1K));
}
static inline void print_mlm(char *name, unsigned long b, unsigned long t)
{
pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t,
(((t) - (b)) >> LOG2_SZ_1M));
}
static inline void print_mlg(char *name, unsigned long b, unsigned long t)
{
pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld GB)\n", name, b, t,
(((t) - (b)) >> LOG2_SZ_1G));
}
#ifdef CONFIG_64BIT
static inline void print_mlt(char *name, unsigned long b, unsigned long t)
{
pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld TB)\n", name, b, t,
(((t) - (b)) >> LOG2_SZ_1T));
}
#else
#define print_mlt(n, b, t) do {} while (0)
#endif
static inline void print_ml(char *name, unsigned long b, unsigned long t)
{
unsigned long diff = t - b;
if (IS_ENABLED(CONFIG_64BIT) && (diff >> LOG2_SZ_1T) >= 10)
print_mlt(name, b, t);
else if ((diff >> LOG2_SZ_1G) >= 10)
print_mlg(name, b, t);
else if ((diff >> LOG2_SZ_1M) >= 10)
print_mlm(name, b, t);
else
print_mlk(name, b, t);
}
static void __init print_vm_layout(void)
{
pr_notice("Virtual kernel memory layout:\n");
print_ml("fixmap", (unsigned long)FIXADDR_START,
(unsigned long)FIXADDR_TOP);
print_ml("pci io", (unsigned long)PCI_IO_START,
(unsigned long)PCI_IO_END);
print_ml("vmemmap", (unsigned long)VMEMMAP_START,
(unsigned long)VMEMMAP_END);
print_ml("vmalloc", (unsigned long)VMALLOC_START,
(unsigned long)VMALLOC_END);
#ifdef CONFIG_64BIT
print_ml("modules", (unsigned long)MODULES_VADDR,
(unsigned long)MODULES_END);
#endif
print_ml("lowmem", (unsigned long)PAGE_OFFSET,
(unsigned long)high_memory);
if (IS_ENABLED(CONFIG_64BIT)) {
#ifdef CONFIG_KASAN
print_ml("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END);
#endif
print_ml("kernel", (unsigned long)kernel_map.virt_addr,
(unsigned long)ADDRESS_SPACE_END);
}
}
#else
static void print_vm_layout(void) { }
#endif /* CONFIG_DEBUG_VM */
void __init mem_init(void)
{
#ifdef CONFIG_FLATMEM
BUG_ON(!mem_map);
#endif /* CONFIG_FLATMEM */
swiotlb_init(max_pfn > PFN_DOWN(dma32_phys_limit), SWIOTLB_VERBOSE);
memblock_free_all();
print_vm_layout();
}
/* Limit the memory size via mem. */
static phys_addr_t memory_limit;
static int __init early_mem(char *p)
{
u64 size;
if (!p)
return 1;
size = memparse(p, &p) & PAGE_MASK;
memory_limit = min_t(u64, size, memory_limit);
pr_notice("Memory limited to %lldMB\n", (u64)memory_limit >> 20);
return 0;
}
early_param("mem", early_mem);
static void __init setup_bootmem(void)
{
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t max_mapped_addr;
phys_addr_t phys_ram_end, vmlinux_start;
if (IS_ENABLED(CONFIG_XIP_KERNEL))
vmlinux_start = __pa_symbol(&_sdata);
else
vmlinux_start = __pa_symbol(&_start);
memblock_enforce_memory_limit(memory_limit);
/*
* Make sure we align the reservation on PMD_SIZE since we will
* map the kernel in the linear mapping as read-only: we do not want
* any allocation to happen between _end and the next pmd aligned page.
*/
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
/*
* Reserve from the start of the kernel to the end of the kernel
*/
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
phys_ram_end = memblock_end_of_DRAM();
/*
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
*/
if (!IS_ENABLED(CONFIG_XIP_KERNEL))
phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;
/*
* In 64-bit, any use of __va/__pa before this point is wrong as we
* did not know the start of DRAM before.
*/
if (IS_ENABLED(CONFIG_64BIT))
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
* macro. Make sure that last 4k bytes are not usable by memblock
* if end of dram is equal to maximum addressable memory. For 64-bit
* kernel, this problem can't happen here as the end of the virtual
* address space is occupied by the kernel mapping then this check must
* be done as soon as the kernel mapping base address is determined.
*/
if (!IS_ENABLED(CONFIG_64BIT)) {
max_mapped_addr = __pa(~(ulong)0);
if (max_mapped_addr == (phys_ram_end - 1))
memblock_set_current_limit(max_mapped_addr - 4096);
}
min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
reserve_initrd_mem();
/*
* No allocation should be done before reserving the memory as defined
* in the device tree, otherwise the allocation could end up in a
* reserved region.
*/
early_init_fdt_scan_reserved_mem();
/*
* If DTB is built in, no need to reserve its memblock.
* Otherwise, do reserve it but avoid using
* early_init_fdt_reserve_self() since __pa() does
* not work for DTB pointers that are fixmap addresses
*/
if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
dma_contiguous_reserve(dma32_phys_limit);
if (IS_ENABLED(CONFIG_64BIT))
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
}
#ifdef CONFIG_MMU
struct pt_alloc_ops pt_ops __initdata;
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
#define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte))
#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir))
#endif /* CONFIG_XIP_KERNEL */
static const pgprot_t protection_map[16] = {
[VM_NONE] = PAGE_NONE,
[VM_READ] = PAGE_READ,
[VM_WRITE] = PAGE_COPY,
[VM_WRITE | VM_READ] = PAGE_COPY,
[VM_EXEC] = PAGE_EXEC,
[VM_EXEC | VM_READ] = PAGE_READ_EXEC,
[VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
[VM_SHARED] = PAGE_NONE,
[VM_SHARED | VM_READ] = PAGE_READ,
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
[VM_SHARED | VM_EXEC] = PAGE_EXEC,
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_READ_EXEC,
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
};
DECLARE_VM_GET_PAGE_PROT
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
unsigned long addr = __fix_to_virt(idx);
pte_t *ptep;
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
ptep = &fixmap_pte[pte_index(addr)];
if (pgprot_val(prot))
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
else
pte_clear(&init_mm, addr, ptep);
local_flush_tlb_page(addr);
}
static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
{
return (pte_t *)((uintptr_t)pa);
}
static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
{
clear_fixmap(FIX_PTE);
return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
}
static inline pte_t *__init get_pte_virt_late(phys_addr_t pa)
{
return (pte_t *) __va(pa);
}
static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
{
/*
* We only create PMD or PGD early mappings so we
* should never reach here with MMU disabled.
*/
BUG();
}
static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
{
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t __init alloc_pte_late(uintptr_t va)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
BUG_ON(!ptdesc || !pagetable_pte_ctor(ptdesc));
return __pa((pte_t *)ptdesc_address(ptdesc));
}
static void __init create_pte_mapping(pte_t *ptep,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
uintptr_t pte_idx = pte_index(va);
BUG_ON(sz != PAGE_SIZE);
if (pte_none(ptep[pte_idx]))
ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
}
#ifndef __PAGETABLE_PMD_FOLDED
static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
#define fixmap_pmd ((pmd_t *)XIP_FIXUP(fixmap_pmd))
#define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd))
#endif /* CONFIG_XIP_KERNEL */
static p4d_t trampoline_p4d[PTRS_PER_P4D] __page_aligned_bss;
static p4d_t fixmap_p4d[PTRS_PER_P4D] __page_aligned_bss;
static p4d_t early_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
#define trampoline_p4d ((p4d_t *)XIP_FIXUP(trampoline_p4d))
#define fixmap_p4d ((p4d_t *)XIP_FIXUP(fixmap_p4d))
#define early_p4d ((p4d_t *)XIP_FIXUP(early_p4d))
#endif /* CONFIG_XIP_KERNEL */
static pud_t trampoline_pud[PTRS_PER_PUD] __page_aligned_bss;
static pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss;
static pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
#define trampoline_pud ((pud_t *)XIP_FIXUP(trampoline_pud))
#define fixmap_pud ((pud_t *)XIP_FIXUP(fixmap_pud))
#define early_pud ((pud_t *)XIP_FIXUP(early_pud))
#endif /* CONFIG_XIP_KERNEL */
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
{
/* Before MMU is enabled */
return (pmd_t *)((uintptr_t)pa);
}
static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
{
clear_fixmap(FIX_PMD);
return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
}
static pmd_t *__init get_pmd_virt_late(phys_addr_t pa)
{
return (pmd_t *) __va(pa);
}
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT);
return (uintptr_t)early_pmd;
}
static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
{
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t __init alloc_pmd_late(uintptr_t va)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
BUG_ON(!ptdesc || !pagetable_pmd_ctor(ptdesc));
return __pa((pmd_t *)ptdesc_address(ptdesc));
}
static void __init create_pmd_mapping(pmd_t *pmdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
pte_t *ptep;
phys_addr_t pte_phys;
uintptr_t pmd_idx = pmd_index(va);
if (sz == PMD_SIZE) {
if (pmd_none(pmdp[pmd_idx]))
pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
return;
}
if (pmd_none(pmdp[pmd_idx])) {
pte_phys = pt_ops.alloc_pte(va);
pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
ptep = pt_ops.get_pte_virt(pte_phys);
memset(ptep, 0, PAGE_SIZE);
} else {
pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
ptep = pt_ops.get_pte_virt(pte_phys);
}
create_pte_mapping(ptep, va, pa, sz, prot);
}
static pud_t *__init get_pud_virt_early(phys_addr_t pa)
{
return (pud_t *)((uintptr_t)pa);
}
static pud_t *__init get_pud_virt_fixmap(phys_addr_t pa)
{
clear_fixmap(FIX_PUD);
return (pud_t *)set_fixmap_offset(FIX_PUD, pa);
}
static pud_t *__init get_pud_virt_late(phys_addr_t pa)
{
return (pud_t *)__va(pa);
}
static phys_addr_t __init alloc_pud_early(uintptr_t va)
{
/* Only one PUD is available for early mapping */
BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
return (uintptr_t)early_pud;
}
static phys_addr_t __init alloc_pud_fixmap(uintptr_t va)
{
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t alloc_pud_late(uintptr_t va)
{
unsigned long vaddr;
vaddr = __get_free_page(GFP_KERNEL);
BUG_ON(!vaddr);
return __pa(vaddr);
}
static p4d_t *__init get_p4d_virt_early(phys_addr_t pa)
{
return (p4d_t *)((uintptr_t)pa);
}
static p4d_t *__init get_p4d_virt_fixmap(phys_addr_t pa)
{
clear_fixmap(FIX_P4D);
return (p4d_t *)set_fixmap_offset(FIX_P4D, pa);
}
static p4d_t *__init get_p4d_virt_late(phys_addr_t pa)
{
return (p4d_t *)__va(pa);
}
static phys_addr_t __init alloc_p4d_early(uintptr_t va)
{
/* Only one P4D is available for early mapping */
BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
return (uintptr_t)early_p4d;
}
static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va)
{
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t alloc_p4d_late(uintptr_t va)
{
unsigned long vaddr;
vaddr = __get_free_page(GFP_KERNEL);
BUG_ON(!vaddr);
return __pa(vaddr);
}
static void __init create_pud_mapping(pud_t *pudp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
pmd_t *nextp;
phys_addr_t next_phys;
uintptr_t pud_index = pud_index(va);
if (sz == PUD_SIZE) {
if (pud_val(pudp[pud_index]) == 0)
pudp[pud_index] = pfn_pud(PFN_DOWN(pa), prot);
return;
}
if (pud_val(pudp[pud_index]) == 0) {
next_phys = pt_ops.alloc_pmd(va);
pudp[pud_index] = pfn_pud(PFN_DOWN(next_phys), PAGE_TABLE);
nextp = pt_ops.get_pmd_virt(next_phys);
memset(nextp, 0, PAGE_SIZE);
} else {
next_phys = PFN_PHYS(_pud_pfn(pudp[pud_index]));
nextp = pt_ops.get_pmd_virt(next_phys);
}
create_pmd_mapping(nextp, va, pa, sz, prot);
}
static void __init create_p4d_mapping(p4d_t *p4dp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
pud_t *nextp;
phys_addr_t next_phys;
uintptr_t p4d_index = p4d_index(va);
if (sz == P4D_SIZE) {
if (p4d_val(p4dp[p4d_index]) == 0)
p4dp[p4d_index] = pfn_p4d(PFN_DOWN(pa), prot);
return;
}
if (p4d_val(p4dp[p4d_index]) == 0) {
next_phys = pt_ops.alloc_pud(va);
p4dp[p4d_index] = pfn_p4d(PFN_DOWN(next_phys), PAGE_TABLE);
nextp = pt_ops.get_pud_virt(next_phys);
memset(nextp, 0, PAGE_SIZE);
} else {
next_phys = PFN_PHYS(_p4d_pfn(p4dp[p4d_index]));
nextp = pt_ops.get_pud_virt(next_phys);
}
create_pud_mapping(nextp, va, pa, sz, prot);
}
#define pgd_next_t p4d_t
#define alloc_pgd_next(__va) (pgtable_l5_enabled ? \
pt_ops.alloc_p4d(__va) : (pgtable_l4_enabled ? \
pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va)))
#define get_pgd_next_virt(__pa) (pgtable_l5_enabled ? \
pt_ops.get_p4d_virt(__pa) : (pgd_next_t *)(pgtable_l4_enabled ? \
pt_ops.get_pud_virt(__pa) : (pud_t *)pt_ops.get_pmd_virt(__pa)))
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
(pgtable_l5_enabled ? \
create_p4d_mapping(__nextp, __va, __pa, __sz, __prot) : \
(pgtable_l4_enabled ? \
create_pud_mapping((pud_t *)__nextp, __va, __pa, __sz, __prot) : \
create_pmd_mapping((pmd_t *)__nextp, __va, __pa, __sz, __prot)))
#define fixmap_pgd_next (pgtable_l5_enabled ? \
(uintptr_t)fixmap_p4d : (pgtable_l4_enabled ? \
(uintptr_t)fixmap_pud : (uintptr_t)fixmap_pmd))
#define trampoline_pgd_next (pgtable_l5_enabled ? \
(uintptr_t)trampoline_p4d : (pgtable_l4_enabled ? \
(uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd))
#else
#define pgd_next_t pte_t
#define alloc_pgd_next(__va) pt_ops.alloc_pte(__va)
#define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa)
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next ((uintptr_t)fixmap_pte)
#define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
#define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
#endif /* __PAGETABLE_PMD_FOLDED */
void __init create_pgd_mapping(pgd_t *pgdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
pgd_next_t *nextp;
phys_addr_t next_phys;
uintptr_t pgd_idx = pgd_index(va);
if (sz == PGDIR_SIZE) {
if (pgd_val(pgdp[pgd_idx]) == 0)
pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
return;
}
if (pgd_val(pgdp[pgd_idx]) == 0) {
next_phys = alloc_pgd_next(va);
pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
nextp = get_pgd_next_virt(next_phys);
memset(nextp, 0, PAGE_SIZE);
} else {
next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
nextp = get_pgd_next_virt(next_phys);
}
create_pgd_next_mapping(nextp, va, pa, sz, prot);
}
static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
phys_addr_t size)
{
if (!(pa & (PGDIR_SIZE - 1)) && !(va & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
return PGDIR_SIZE;
if (!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
return P4D_SIZE;
if (!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
return PUD_SIZE;
if (!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
return PMD_SIZE;
return PAGE_SIZE;
}
#ifdef CONFIG_XIP_KERNEL
#define phys_ram_base (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base))
extern char _xiprom[], _exiprom[], __data_loc;
/* called from head.S with MMU off */
asmlinkage void __init __copy_data(void)
{
void *from = (void *)(&__data_loc);
void *to = (void *)CONFIG_PHYS_RAM_BASE;
size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata));
memcpy(to, from, sz);
}
#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
static __init pgprot_t pgprot_from_va(uintptr_t va)
{
if (is_va_kernel_text(va))
return PAGE_KERNEL_READ_EXEC;
/*
* In 64-bit kernel, the kernel mapping is outside the linear mapping so
* we must protect its linear mapping alias from being executed and
* written.
* And rodata section is marked readonly in mark_rodata_ro.
*/
if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
return PAGE_KERNEL_READ;
return PAGE_KERNEL;
}
void mark_rodata_ro(void)
{
set_kernel_memory(__start_rodata, _data, set_memory_ro);
if (IS_ENABLED(CONFIG_64BIT))
set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
set_memory_ro);
debug_checkwx();
}
#else
static __init pgprot_t pgprot_from_va(uintptr_t va)
{
if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
return PAGE_KERNEL;
return PAGE_KERNEL_EXEC;
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
u64 __pi_set_satp_mode_from_cmdline(uintptr_t dtb_pa);
static void __init disable_pgtable_l5(void)
{
pgtable_l5_enabled = false;
kernel_map.page_offset = PAGE_OFFSET_L4;
satp_mode = SATP_MODE_48;
}
static void __init disable_pgtable_l4(void)
{
pgtable_l4_enabled = false;
kernel_map.page_offset = PAGE_OFFSET_L3;
satp_mode = SATP_MODE_39;
}
static int __init print_no4lvl(char *p)
{
pr_info("Disabled 4-level and 5-level paging");
return 0;
}
early_param("no4lvl", print_no4lvl);
static int __init print_no5lvl(char *p)
{
pr_info("Disabled 5-level paging");
return 0;
}
early_param("no5lvl", print_no5lvl);
/*
* There is a simple way to determine if 4-level is supported by the
* underlying hardware: establish 1:1 mapping in 4-level page table mode
* then read SATP to see if the configuration was taken into account
* meaning sv48 is supported.
*/
static __init void set_satp_mode(uintptr_t dtb_pa)
{
u64 identity_satp, hw_satp;
uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK;
u64 satp_mode_cmdline = __pi_set_satp_mode_from_cmdline(dtb_pa);
if (satp_mode_cmdline == SATP_MODE_57) {
disable_pgtable_l5();
} else if (satp_mode_cmdline == SATP_MODE_48) {
disable_pgtable_l5();
disable_pgtable_l4();
return;
}
create_p4d_mapping(early_p4d,
set_satp_mode_pmd, (uintptr_t)early_pud,
P4D_SIZE, PAGE_TABLE);
create_pud_mapping(early_pud,
set_satp_mode_pmd, (uintptr_t)early_pmd,
PUD_SIZE, PAGE_TABLE);
/* Handle the case where set_satp_mode straddles 2 PMDs */
create_pmd_mapping(early_pmd,
set_satp_mode_pmd, set_satp_mode_pmd,
PMD_SIZE, PAGE_KERNEL_EXEC);
create_pmd_mapping(early_pmd,
set_satp_mode_pmd + PMD_SIZE,
set_satp_mode_pmd + PMD_SIZE,
PMD_SIZE, PAGE_KERNEL_EXEC);
retry:
create_pgd_mapping(early_pg_dir,
set_satp_mode_pmd,
pgtable_l5_enabled ?
(uintptr_t)early_p4d : (uintptr_t)early_pud,
PGDIR_SIZE, PAGE_TABLE);
identity_satp = PFN_DOWN((uintptr_t)&early_pg_dir) | satp_mode;
local_flush_tlb_all();
csr_write(CSR_SATP, identity_satp);
hw_satp = csr_swap(CSR_SATP, 0ULL);
local_flush_tlb_all();
if (hw_satp != identity_satp) {
if (pgtable_l5_enabled) {
disable_pgtable_l5();
memset(early_pg_dir, 0, PAGE_SIZE);
goto retry;
}
disable_pgtable_l4();
}
memset(early_pg_dir, 0, PAGE_SIZE);
memset(early_p4d, 0, PAGE_SIZE);
memset(early_pud, 0, PAGE_SIZE);
memset(early_pmd, 0, PAGE_SIZE);
}
#endif
/*
* setup_vm() is called from head.S with MMU-off.
*
* Following requirements should be honoured for setup_vm() to work
* correctly:
* 1) It should use PC-relative addressing for accessing kernel symbols.
* To achieve this we always use GCC cmodel=medany.
* 2) The compiler instrumentation for FTRACE will not work for setup_vm()
* so disable compiler instrumentation when FTRACE is enabled.
*
* Currently, the above requirements are honoured by using custom CFLAGS
* for init.o in mm/Makefile.
*/
#ifndef __riscv_cmodel_medany
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
#endif
#ifdef CONFIG_RELOCATABLE
extern unsigned long __rela_dyn_start, __rela_dyn_end;
static void __init relocate_kernel(void)
{
Elf64_Rela *rela = (Elf64_Rela *)&__rela_dyn_start;
/*
* This holds the offset between the linked virtual address and the
* relocated virtual address.
*/
uintptr_t reloc_offset = kernel_map.virt_addr - KERNEL_LINK_ADDR;
/*
* This holds the offset between kernel linked virtual address and
* physical address.
*/
uintptr_t va_kernel_link_pa_offset = KERNEL_LINK_ADDR - kernel_map.phys_addr;
for ( ; rela < (Elf64_Rela *)&__rela_dyn_end; rela++) {
Elf64_Addr addr = (rela->r_offset - va_kernel_link_pa_offset);
Elf64_Addr relocated_addr = rela->r_addend;
if (rela->r_info != R_RISCV_RELATIVE)
continue;
/*
* Make sure to not relocate vdso symbols like rt_sigreturn
* which are linked from the address 0 in vmlinux since
* vdso symbol addresses are actually used as an offset from
* mm->context.vdso in VDSO_OFFSET macro.
*/
if (relocated_addr >= KERNEL_LINK_ADDR)
relocated_addr += reloc_offset;
*(Elf64_Addr *)addr = relocated_addr;
}
}
#endif /* CONFIG_RELOCATABLE */
#ifdef CONFIG_XIP_KERNEL
static void __init create_kernel_page_table(pgd_t *pgdir,
__always_unused bool early)
{
uintptr_t va, end_va;
/* Map the flash resident part */
end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.xiprom + (va - kernel_map.virt_addr),
PMD_SIZE, PAGE_KERNEL_EXEC);
/* Map the data in RAM */
end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
PMD_SIZE, PAGE_KERNEL);
}
#else
static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
{
uintptr_t va, end_va;
end_va = kernel_map.virt_addr + kernel_map.size;
for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - kernel_map.virt_addr),
PMD_SIZE,
early ?
PAGE_KERNEL_EXEC : pgprot_from_va(va));
}
#endif
/*
* Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel,
* this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
* entry.
*/
static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
uintptr_t dtb_pa)
{
#ifndef CONFIG_BUILTIN_DTB
uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
/* Make sure the fdt fixmap address is always aligned on PMD size */
BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));
/* In 32-bit only, the fdt lies in its own PGD */
if (!IS_ENABLED(CONFIG_64BIT)) {
create_pgd_mapping(early_pg_dir, fix_fdt_va,
pa, MAX_FDT_SIZE, PAGE_KERNEL);
} else {
create_pmd_mapping(fixmap_pmd, fix_fdt_va,
pa, PMD_SIZE, PAGE_KERNEL);
create_pmd_mapping(fixmap_pmd, fix_fdt_va + PMD_SIZE,
pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
}
dtb_early_va = (void *)fix_fdt_va + (dtb_pa & (PMD_SIZE - 1));
#else
/*
* For 64-bit kernel, __va can't be used since it would return a linear
* mapping address whereas dtb_early_va will be used before
* setup_vm_final installs the linear mapping. For 32-bit kernel, as the
* kernel is mapped in the linear mapping, that makes no difference.
*/
dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
#endif
dtb_early_pa = dtb_pa;
}
/*
* MMU is not enabled, the page tables are allocated directly using
* early_pmd/pud/p4d and the address returned is the physical one.
*/
static void __init pt_ops_set_early(void)
{
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
#ifndef __PAGETABLE_PMD_FOLDED
pt_ops.alloc_pmd = alloc_pmd_early;
pt_ops.get_pmd_virt = get_pmd_virt_early;
pt_ops.alloc_pud = alloc_pud_early;
pt_ops.get_pud_virt = get_pud_virt_early;
pt_ops.alloc_p4d = alloc_p4d_early;
pt_ops.get_p4d_virt = get_p4d_virt_early;
#endif
}
/*
* MMU is enabled but page table setup is not complete yet.
* fixmap page table alloc functions must be used as a means to temporarily
* map the allocated physical pages since the linear mapping does not exist yet.
*
* Note that this is called with MMU disabled, hence kernel_mapping_pa_to_va,
* but it will be used as described above.
*/
static void __init pt_ops_set_fixmap(void)
{
pt_ops.alloc_pte = kernel_mapping_pa_to_va(alloc_pte_fixmap);
pt_ops.get_pte_virt = kernel_mapping_pa_to_va(get_pte_virt_fixmap);
#ifndef __PAGETABLE_PMD_FOLDED
pt_ops.alloc_pmd = kernel_mapping_pa_to_va(alloc_pmd_fixmap);
pt_ops.get_pmd_virt = kernel_mapping_pa_to_va(get_pmd_virt_fixmap);
pt_ops.alloc_pud = kernel_mapping_pa_to_va(alloc_pud_fixmap);
pt_ops.get_pud_virt = kernel_mapping_pa_to_va(get_pud_virt_fixmap);
pt_ops.alloc_p4d = kernel_mapping_pa_to_va(alloc_p4d_fixmap);
pt_ops.get_p4d_virt = kernel_mapping_pa_to_va(get_p4d_virt_fixmap);
#endif
}
/*
* MMU is enabled and page table setup is complete, so from now, we can use
* generic page allocation functions to setup page table.
*/
static void __init pt_ops_set_late(void)
{
pt_ops.alloc_pte = alloc_pte_late;
pt_ops.get_pte_virt = get_pte_virt_late;
#ifndef __PAGETABLE_PMD_FOLDED
pt_ops.alloc_pmd = alloc_pmd_late;
pt_ops.get_pmd_virt = get_pmd_virt_late;
pt_ops.alloc_pud = alloc_pud_late;
pt_ops.get_pud_virt = get_pud_virt_late;
pt_ops.alloc_p4d = alloc_p4d_late;
pt_ops.get_p4d_virt = get_p4d_virt_late;
#endif
}
#ifdef CONFIG_RANDOMIZE_BASE
extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa);
extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa);
static int __init print_nokaslr(char *p)
{
pr_info("Disabled KASLR");
return 0;
}
early_param("nokaslr", print_nokaslr);
unsigned long kaslr_offset(void)
{
return kernel_map.virt_offset;
}
#endif
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
#ifdef CONFIG_RANDOMIZE_BASE
if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) {
u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
u32 nr_pos;
/*
* Compute the number of positions available: we are limited
* by the early page table that only has one PUD and we must
* be aligned on PMD_SIZE.
*/
nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE;
kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE;
}
#endif
kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
#ifdef CONFIG_XIP_KERNEL
kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
phys_ram_base = CONFIG_PHYS_RAM_BASE;
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
#else
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
#endif
#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
set_satp_mode(dtb_pa);
#endif
/*
* In 64-bit, we defer the setup of va_pa_offset to setup_bootmem,
* where we have the system memory layout: this allows us to align
* the physical and virtual mappings and then make use of PUD/P4D/PGD
* for the linear mapping. This is only possible because the kernel
* mapping lies outside the linear mapping.
* In 32-bit however, as the kernel resides in the linear mapping,
* setup_vm_final can not change the mapping established here,
* otherwise the same kernel addresses would get mapped to different
* physical addresses (if the start of dram is different from the
* kernel physical address start).
*/
kernel_map.va_pa_offset = IS_ENABLED(CONFIG_64BIT) ?
0UL : PAGE_OFFSET - kernel_map.phys_addr;
kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
/*
* The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit
* kernel, whereas for 64-bit kernel, the end of the virtual address
* space is occupied by the modules/BPF/kernel mappings which reduces
* the available size of the linear mapping.
*/
memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0);
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
#ifdef CONFIG_64BIT
/*
* The last 4K bytes of the addressable memory can not be mapped because
* of IS_ERR_VALUE macro.
*/
BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
#endif
#ifdef CONFIG_RELOCATABLE
/*
* Early page table uses only one PUD, which makes it possible
* to map PUD_SIZE aligned on PUD_SIZE: if the relocation offset
* makes the kernel cross over a PUD_SIZE boundary, raise a bug
* since a part of the kernel would not get mapped.
*/
BUG_ON(PUD_SIZE - (kernel_map.virt_addr & (PUD_SIZE - 1)) < kernel_map.size);
relocate_kernel();
#endif
apply_early_boot_alternatives();
pt_ops_set_early();
/* Setup early PGD for fixmap */
create_pgd_mapping(early_pg_dir, FIXADDR_START,
fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
#ifndef __PAGETABLE_PMD_FOLDED
/* Setup fixmap P4D and PUD */
if (pgtable_l5_enabled)
create_p4d_mapping(fixmap_p4d, FIXADDR_START,
(uintptr_t)fixmap_pud, P4D_SIZE, PAGE_TABLE);
/* Setup fixmap PUD and PMD */
if (pgtable_l4_enabled)
create_pud_mapping(fixmap_pud, FIXADDR_START,
(uintptr_t)fixmap_pmd, PUD_SIZE, PAGE_TABLE);
create_pmd_mapping(fixmap_pmd, FIXADDR_START,
(uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
/* Setup trampoline PGD and PMD */
create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
trampoline_pgd_next, PGDIR_SIZE, PAGE_TABLE);
if (pgtable_l5_enabled)
create_p4d_mapping(trampoline_p4d, kernel_map.virt_addr,
(uintptr_t)trampoline_pud, P4D_SIZE, PAGE_TABLE);
if (pgtable_l4_enabled)
create_pud_mapping(trampoline_pud, kernel_map.virt_addr,
(uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE);
#ifdef CONFIG_XIP_KERNEL
create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
#else
create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC);
#endif
#else
/* Setup trampoline PGD */
create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC);
#endif
/*
* Setup early PGD covering entire kernel which will allow
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
create_kernel_page_table(early_pg_dir, true);
/* Setup early mapping for FDT early scan */
create_fdt_early_page_table(__fix_to_virt(FIX_FDT), dtb_pa);
/*
* Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
* range can not span multiple pmds.
*/
BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
#ifndef __PAGETABLE_PMD_FOLDED
/*
* Early ioremap fixmap is already created as it lies within first 2MB
* of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
* FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
* the user if not.
*/
fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
WARN_ON(1);
pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
fix_to_virt(FIX_BTMAP_BEGIN));
pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
fix_to_virt(FIX_BTMAP_END));
pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
}
#endif
pt_ops_set_fixmap();
}
static void __init create_linear_mapping_range(phys_addr_t start,
phys_addr_t end,
uintptr_t fixed_map_size)
{
phys_addr_t pa;
uintptr_t va, map_size;
for (pa = start; pa < end; pa += map_size) {
va = (uintptr_t)__va(pa);
map_size = fixed_map_size ? fixed_map_size :
best_map_size(pa, va, end - pa);
create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
pgprot_from_va(va));
}
}
static void __init create_linear_mapping_page_table(void)
{
phys_addr_t start, end;
phys_addr_t kfence_pool __maybe_unused;
u64 i;
#ifdef CONFIG_STRICT_KERNEL_RWX
phys_addr_t ktext_start = __pa_symbol(_start);
phys_addr_t ktext_size = __init_data_begin - _start;
phys_addr_t krodata_start = __pa_symbol(__start_rodata);
phys_addr_t krodata_size = _data - __start_rodata;
/* Isolate kernel text and rodata so they don't get mapped with a PUD */
memblock_mark_nomap(ktext_start, ktext_size);
memblock_mark_nomap(krodata_start, krodata_size);
#endif
#ifdef CONFIG_KFENCE
/*
* kfence pool must be backed by PAGE_SIZE mappings, so allocate it
* before we setup the linear mapping so that we avoid using hugepages
* for this region.
*/
kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
BUG_ON(!kfence_pool);
memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
__kfence_pool = __va(kfence_pool);
#endif
/* Map all memory banks in the linear mapping */
for_each_mem_range(i, &start, &end) {
if (start >= end)
break;
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
if (end >= __pa(PAGE_OFFSET) + memory_limit)
end = __pa(PAGE_OFFSET) + memory_limit;
create_linear_mapping_range(start, end, 0);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
create_linear_mapping_range(ktext_start, ktext_start + ktext_size, 0);
create_linear_mapping_range(krodata_start,
krodata_start + krodata_size, 0);
memblock_clear_nomap(ktext_start, ktext_size);
memblock_clear_nomap(krodata_start, krodata_size);
#endif
#ifdef CONFIG_KFENCE
create_linear_mapping_range(kfence_pool,
kfence_pool + KFENCE_POOL_SIZE,
PAGE_SIZE);
memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
#endif
}
static void __init setup_vm_final(void)
{
/* Setup swapper PGD for fixmap */
#if !defined(CONFIG_64BIT)
/*
* In 32-bit, the device tree lies in a pgd entry, so it must be copied
* directly in swapper_pg_dir in addition to the pgd entry that points
* to fixmap_pte.
*/
unsigned long idx = pgd_index(__fix_to_virt(FIX_FDT));
set_pgd(&swapper_pg_dir[idx], early_pg_dir[idx]);
#endif
create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
__pa_symbol(fixmap_pgd_next),
PGDIR_SIZE, PAGE_TABLE);
/* Map the linear mapping */
create_linear_mapping_page_table();
/* Map the kernel */
if (IS_ENABLED(CONFIG_64BIT))
create_kernel_page_table(swapper_pg_dir, false);
#ifdef CONFIG_KASAN
kasan_swapper_init();
#endif
/* Clear fixmap PTE and PMD mappings */
clear_fixmap(FIX_PTE);
clear_fixmap(FIX_PMD);
clear_fixmap(FIX_PUD);
clear_fixmap(FIX_P4D);
/* Move to swapper page table */
csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | satp_mode);
local_flush_tlb_all();
pt_ops_set_late();
}
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
dtb_early_va = (void *)dtb_pa;
dtb_early_pa = dtb_pa;
}
static inline void setup_vm_final(void)
{
}
#endif /* CONFIG_MMU */
/* Reserve 128M low memory by default for swiotlb buffer */
#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
static int __init reserve_crashkernel_low(unsigned long long low_size)
{
unsigned long long low_base;
low_base = memblock_phys_alloc_range(low_size, PMD_SIZE, 0, dma32_phys_limit);
if (!low_base) {
pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
return -ENOMEM;
}
pr_info("crashkernel low memory reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
low_base, low_base + low_size, low_size >> 20);
crashk_low_res.start = low_base;
crashk_low_res.end = low_base + low_size - 1;
return 0;
}
/*
* reserve_crashkernel() - reserves memory for crash kernel
*
* This function reserves memory area given in "crashkernel=" kernel command
* line parameter. The memory reserved is used by dump capture kernel when
* primary kernel is crashing.
*/
static void __init reserve_crashkernel(void)
{
unsigned long long crash_base = 0;
unsigned long long crash_size = 0;
unsigned long long crash_low_size = 0;
unsigned long search_start = memblock_start_of_DRAM();
unsigned long search_end = (unsigned long)dma32_phys_limit;
char *cmdline = boot_command_line;
bool fixed_base = false;
bool high = false;
int ret = 0;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
/*
* Don't reserve a region for a crash kernel on a crash kernel
* since it doesn't make much sense and we have limited memory
* resources.
*/
if (is_kdump_kernel()) {
pr_info("crashkernel: ignoring reservation request\n");
return;
}
ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
&crash_size, &crash_base);
if (ret == -ENOENT) {
/* Fallback to crashkernel=X,[high,low] */
ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
if (ret || !crash_size)
return;
/*
* crashkernel=Y,low is valid only when crashkernel=X,high
* is passed.
*/
ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
if (ret == -ENOENT)
crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
else if (ret)
return;
search_start = (unsigned long)dma32_phys_limit;
search_end = memblock_end_of_DRAM();
high = true;
} else if (ret || !crash_size) {
/* Invalid argument value specified */
return;
}
crash_size = PAGE_ALIGN(crash_size);
if (crash_base) {
fixed_base = true;
search_start = crash_base;
search_end = crash_base + crash_size;
}
/*
* Current riscv boot protocol requires 2MB alignment for
* RV64 and 4MB alignment for RV32 (hugepage size)
*
* Try to alloc from 32bit addressible physical memory so that
* swiotlb can work on the crash kernel.
*/
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
search_start, search_end);
if (crash_base == 0) {
/*
* For crashkernel=size[KMG]@offset[KMG], print out failure
* message if can't reserve the specified region.
*/
if (fixed_base) {
pr_warn("crashkernel: allocating failed with given size@offset\n");
return;
}
if (high) {
/*
* For crashkernel=size[KMG],high, if the first attempt was
* for high memory, fall back to low memory.
*/
search_start = memblock_start_of_DRAM();
search_end = (unsigned long)dma32_phys_limit;
} else {
/*
* For crashkernel=size[KMG], if the first attempt was for
* low memory, fall back to high memory, the minimum required
* low memory will be reserved later.
*/
search_start = (unsigned long)dma32_phys_limit;
search_end = memblock_end_of_DRAM();
crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
}
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
search_start, search_end);
if (crash_base == 0) {
pr_warn("crashkernel: couldn't allocate %lldKB\n",
crash_size >> 10);
return;
}
}
if ((crash_base >= dma32_phys_limit) && crash_low_size &&
reserve_crashkernel_low(crash_low_size)) {
memblock_phys_free(crash_base, crash_size);
return;
}
pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
crash_base, crash_base + crash_size, crash_size >> 20);
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
}
void __init paging_init(void)
{
setup_bootmem();
setup_vm_final();
/* Depend on that Linear Mapping is ready */
memblock_allow_resize();
}
void __init misc_mem_init(void)
{
early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
arch_numa_init();
sparse_init();
zone_sizes_init();
reserve_crashkernel();
memblock_dump_all();
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
return vmemmap_populate_basepages(start, end, node, NULL);
}
#endif
#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
/*
* Pre-allocates page-table pages for a specific area in the kernel
* page-table. Only the level which needs to be synchronized between
* all page-tables is allocated because the synchronization can be
* expensive.
*/
static void __init preallocate_pgd_pages_range(unsigned long start, unsigned long end,
const char *area)
{
unsigned long addr;
const char *lvl;
for (addr = start; addr < end && addr >= start; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
pgd_t *pgd = pgd_offset_k(addr);
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
lvl = "p4d";
p4d = p4d_alloc(&init_mm, pgd, addr);
if (!p4d)
goto failed;
if (pgtable_l5_enabled)
continue;
lvl = "pud";
pud = pud_alloc(&init_mm, p4d, addr);
if (!pud)
goto failed;
if (pgtable_l4_enabled)
continue;
lvl = "pmd";
pmd = pmd_alloc(&init_mm, pud, addr);
if (!pmd)
goto failed;
}
return;
failed:
/*
* The pages have to be there now or they will be missing in
* process page-tables later.
*/
panic("Failed to pre-allocate %s pages for %s area\n", lvl, area);
}
void __init pgtable_cache_init(void)
{
preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, "vmalloc");
if (IS_ENABLED(CONFIG_MODULES))
preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules");
}
#endif
| linux-master | arch/riscv/mm/init.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Lennox Wu <[email protected]>
* Chen Liqin <[email protected]>
* Copyright (C) 2013 Regents of the University of California
*/
#include <linux/bitfield.h>
#include <linux/extable.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/asm-extable.h>
#include <asm/ptrace.h>
static inline unsigned long
get_ex_fixup(const struct exception_table_entry *ex)
{
return ((unsigned long)&ex->fixup + ex->fixup);
}
static bool ex_handler_fixup(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
regs->epc = get_ex_fixup(ex);
return true;
}
static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset,
unsigned long val)
{
if (unlikely(offset > MAX_REG_OFFSET))
return;
if (offset)
*(unsigned long *)((unsigned long)regs + offset) = val;
}
static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
regs_set_gpr(regs, reg_err * sizeof(unsigned long), -EFAULT);
regs_set_gpr(regs, reg_zero * sizeof(unsigned long), 0);
regs->epc = get_ex_fixup(ex);
return true;
}
bool fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *ex;
ex = search_exception_tables(regs->epc);
if (!ex)
return false;
switch (ex->type) {
case EX_TYPE_FIXUP:
return ex_handler_fixup(ex, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
}
BUG();
}
| linux-master | arch/riscv/mm/extable.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/mmdebug.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/sections.h>
phys_addr_t __virt_to_phys(unsigned long x)
{
/*
* Boundary checking aginst the kernel linear mapping space.
*/
WARN(!is_linear_mapping(x) && !is_kernel_mapping(x),
"virt_to_phys used for non-linear address: %pK (%pS)\n",
(void *)x, (void *)x);
return __va_to_pa_nodebug(x);
}
EXPORT_SYMBOL(__virt_to_phys);
phys_addr_t __phys_addr_symbol(unsigned long x)
{
unsigned long kernel_start = kernel_map.virt_addr;
unsigned long kernel_end = kernel_start + kernel_map.size;
/*
* Boundary checking aginst the kernel image mapping.
* __pa_symbol should only be used on kernel symbol addresses.
*/
VIRTUAL_BUG_ON(x < kernel_start || x > kernel_end);
return __va_to_pa_nodebug(x);
}
EXPORT_SYMBOL(__phys_addr_symbol);
phys_addr_t linear_mapping_va_to_pa(unsigned long x)
{
BUG_ON(!kernel_map.va_pa_offset);
return ((unsigned long)(x) - kernel_map.va_pa_offset);
}
EXPORT_SYMBOL(linear_mapping_va_to_pa);
void *linear_mapping_pa_to_va(unsigned long x)
{
BUG_ON(!kernel_map.va_pa_offset);
return ((void *)((unsigned long)(x) + kernel_map.va_pa_offset));
}
EXPORT_SYMBOL(linear_mapping_pa_to_va);
| linux-master | arch/riscv/mm/physaddr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 SiFive
*/
#include <linux/of.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_SMP
#include <asm/sbi.h>
static void ipi_remote_fence_i(void *info)
{
return local_flush_icache_all();
}
void flush_icache_all(void)
{
local_flush_icache_all();
if (IS_ENABLED(CONFIG_RISCV_SBI) && !riscv_use_ipi_for_rfence())
sbi_remote_fence_i(NULL);
else
on_each_cpu(ipi_remote_fence_i, NULL, 1);
}
EXPORT_SYMBOL(flush_icache_all);
/*
* Performs an icache flush for the given MM context. RISC-V has no direct
* mechanism for instruction cache shoot downs, so instead we send an IPI that
* informs the remote harts they need to flush their local instruction caches.
* To avoid pathologically slow behavior in a common case (a bunch of
* single-hart processes on a many-hart machine, ie 'make -j') we avoid the
* IPIs for harts that are not currently executing a MM context and instead
* schedule a deferred local instruction cache flush to be performed before
* execution resumes on each hart.
*/
void flush_icache_mm(struct mm_struct *mm, bool local)
{
unsigned int cpu;
cpumask_t others, *mask;
preempt_disable();
/* Mark every hart's icache as needing a flush for this MM. */
mask = &mm->context.icache_stale_mask;
cpumask_setall(mask);
/* Flush this hart's I$ now, and mark it as flushed. */
cpu = smp_processor_id();
cpumask_clear_cpu(cpu, mask);
local_flush_icache_all();
/*
* Flush the I$ of other harts concurrently executing, and mark them as
* flushed.
*/
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
local |= cpumask_empty(&others);
if (mm == current->active_mm && local) {
/*
* It's assumed that at least one strongly ordered operation is
* performed on this hart between setting a hart's cpumask bit
* and scheduling this MM context on that hart. Sending an SBI
* remote message will do this, but in the case where no
* messages are sent we still need to order this hart's writes
* with flush_icache_deferred().
*/
smp_mb();
} else if (IS_ENABLED(CONFIG_RISCV_SBI) &&
!riscv_use_ipi_for_rfence()) {
sbi_remote_fence_i(&others);
} else {
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
}
preempt_enable();
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_MMU
void flush_icache_pte(pte_t pte)
{
struct folio *folio = page_folio(pte_page(pte));
if (!test_bit(PG_dcache_clean, &folio->flags)) {
flush_icache_all();
set_bit(PG_dcache_clean, &folio->flags);
}
}
#endif /* CONFIG_MMU */
unsigned int riscv_cbom_block_size;
EXPORT_SYMBOL_GPL(riscv_cbom_block_size);
unsigned int riscv_cboz_block_size;
EXPORT_SYMBOL_GPL(riscv_cboz_block_size);
static void __init cbo_get_block_size(struct device_node *node,
const char *name, u32 *block_size,
unsigned long *first_hartid)
{
unsigned long hartid;
u32 val;
if (riscv_of_processor_hartid(node, &hartid))
return;
if (of_property_read_u32(node, name, &val))
return;
if (!*block_size) {
*block_size = val;
*first_hartid = hartid;
} else if (*block_size != val) {
pr_warn("%s mismatched between harts %lu and %lu\n",
name, *first_hartid, hartid);
}
}
void __init riscv_init_cbo_blocksizes(void)
{
unsigned long cbom_hartid, cboz_hartid;
u32 cbom_block_size = 0, cboz_block_size = 0;
struct device_node *node;
for_each_of_cpu_node(node) {
/* set block-size for cbom and/or cboz extension if available */
cbo_get_block_size(node, "riscv,cbom-block-size",
&cbom_block_size, &cbom_hartid);
cbo_get_block_size(node, "riscv,cboz-block-size",
&cboz_block_size, &cboz_hartid);
}
if (cbom_block_size)
riscv_cbom_block_size = cbom_block_size;
if (cboz_block_size)
riscv_cboz_block_size = cboz_block_size;
}
| linux-master | arch/riscv/mm/cacheflush.c |
// SPDX-License-Identifier: GPL-2.0
#include <asm/pgalloc.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/pgtable.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
void p4d_clear_huge(p4d_t *p4d)
{
}
int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
{
pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
set_pud(pud, new_pud);
return 1;
}
int pud_clear_huge(pud_t *pud)
{
if (!pud_leaf(READ_ONCE(*pud)))
return 0;
pud_clear(pud);
return 1;
}
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
pmd_t *pmd = pud_pgtable(*pud);
int i;
pud_clear(pud);
flush_tlb_kernel_range(addr, addr + PUD_SIZE);
for (i = 0; i < PTRS_PER_PMD; i++) {
if (!pmd_none(pmd[i])) {
pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
pte_free_kernel(NULL, pte);
}
}
pmd_free(NULL, pmd);
return 1;
}
int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
{
pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
set_pmd(pmd, new_pmd);
return 1;
}
int pmd_clear_huge(pmd_t *pmd)
{
if (!pmd_leaf(READ_ONCE(*pmd)))
return 0;
pmd_clear(pmd);
return 1;
}
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
pmd_clear(pmd);
flush_tlb_kernel_range(addr, addr + PMD_SIZE);
pte_free_kernel(NULL, pte);
return 1;
}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
/*
* When leaf PTE entries (regular pages) are collapsed into a leaf
* PMD entry (huge page), a valid non-leaf PTE is converted into a
* valid leaf PTE at the level 1 page table. Since the sfence.vma
* forms that specify an address only apply to leaf PTEs, we need a
* global flush here. collapse_huge_page() assumes these flushes are
* eager, so just do the fence here.
*/
flush_tlb_mm(vma->vm_mm);
return pmd;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
| linux-master | arch/riscv/mm/pgtable.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
*/
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/static_key.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#ifdef CONFIG_MMU
DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
static unsigned long asid_bits;
static unsigned long num_asids;
unsigned long asid_mask;
static atomic_long_t current_version;
static DEFINE_RAW_SPINLOCK(context_lock);
static cpumask_t context_tlb_flush_pending;
static unsigned long *context_asid_map;
static DEFINE_PER_CPU(atomic_long_t, active_context);
static DEFINE_PER_CPU(unsigned long, reserved_context);
static bool check_update_reserved_context(unsigned long cntx,
unsigned long newcntx)
{
int cpu;
bool hit = false;
/*
* Iterate over the set of reserved CONTEXT looking for a match.
* If we find one, then we can update our mm to use new CONTEXT
* (i.e. the same CONTEXT in the current_version) but we can't
* exit the loop early, since we need to ensure that all copies
* of the old CONTEXT are updated to reflect the mm. Failure to do
* so could result in us missing the reserved CONTEXT in a future
* version.
*/
for_each_possible_cpu(cpu) {
if (per_cpu(reserved_context, cpu) == cntx) {
hit = true;
per_cpu(reserved_context, cpu) = newcntx;
}
}
return hit;
}
static void __flush_context(void)
{
int i;
unsigned long cntx;
/* Must be called with context_lock held */
lockdep_assert_held(&context_lock);
/* Update the list of reserved ASIDs and the ASID bitmap. */
bitmap_zero(context_asid_map, num_asids);
/* Mark already active ASIDs as used */
for_each_possible_cpu(i) {
cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0);
/*
* If this CPU has already been through a rollover, but
* hasn't run another task in the meantime, we must preserve
* its reserved CONTEXT, as this is the only trace we have of
* the process it is still running.
*/
if (cntx == 0)
cntx = per_cpu(reserved_context, i);
__set_bit(cntx & asid_mask, context_asid_map);
per_cpu(reserved_context, i) = cntx;
}
/* Mark ASID #0 as used because it is used at boot-time */
__set_bit(0, context_asid_map);
/* Queue a TLB invalidation for each CPU on next context-switch */
cpumask_setall(&context_tlb_flush_pending);
}
static unsigned long __new_context(struct mm_struct *mm)
{
static u32 cur_idx = 1;
unsigned long cntx = atomic_long_read(&mm->context.id);
unsigned long asid, ver = atomic_long_read(¤t_version);
/* Must be called with context_lock held */
lockdep_assert_held(&context_lock);
if (cntx != 0) {
unsigned long newcntx = ver | (cntx & asid_mask);
/*
* If our current CONTEXT was active during a rollover, we
* can continue to use it and this was just a false alarm.
*/
if (check_update_reserved_context(cntx, newcntx))
return newcntx;
/*
* We had a valid CONTEXT in a previous life, so try to
* re-use it if possible.
*/
if (!__test_and_set_bit(cntx & asid_mask, context_asid_map))
return newcntx;
}
/*
* Allocate a free ASID. If we can't find one then increment
* current_version and flush all ASIDs.
*/
asid = find_next_zero_bit(context_asid_map, num_asids, cur_idx);
if (asid != num_asids)
goto set_asid;
/* We're out of ASIDs, so increment current_version */
ver = atomic_long_add_return_relaxed(num_asids, ¤t_version);
/* Flush everything */
__flush_context();
/* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(context_asid_map, num_asids, 1);
set_asid:
__set_bit(asid, context_asid_map);
cur_idx = asid;
return asid | ver;
}
static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
{
unsigned long flags;
bool need_flush_tlb = false;
unsigned long cntx, old_active_cntx;
cntx = atomic_long_read(&mm->context.id);
/*
* If our active_context is non-zero and the context matches the
* current_version, then we update the active_context entry with a
* relaxed cmpxchg.
*
* Following is how we handle racing with a concurrent rollover:
*
* - We get a zero back from the cmpxchg and end up waiting on the
* lock. Taking the lock synchronises with the rollover and so
* we are forced to see the updated verion.
*
* - We get a valid context back from the cmpxchg then we continue
* using old ASID because __flush_context() would have marked ASID
* of active_context as used and next context switch we will
* allocate new context.
*/
old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
if (old_active_cntx &&
((cntx & ~asid_mask) == atomic_long_read(¤t_version)) &&
atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
old_active_cntx, cntx))
goto switch_mm_fast;
raw_spin_lock_irqsave(&context_lock, flags);
/* Check that our ASID belongs to the current_version. */
cntx = atomic_long_read(&mm->context.id);
if ((cntx & ~asid_mask) != atomic_long_read(¤t_version)) {
cntx = __new_context(mm);
atomic_long_set(&mm->context.id, cntx);
}
if (cpumask_test_and_clear_cpu(cpu, &context_tlb_flush_pending))
need_flush_tlb = true;
atomic_long_set(&per_cpu(active_context, cpu), cntx);
raw_spin_unlock_irqrestore(&context_lock, flags);
switch_mm_fast:
csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
((cntx & asid_mask) << SATP_ASID_SHIFT) |
satp_mode);
if (need_flush_tlb)
local_flush_tlb_all();
}
static void set_mm_noasid(struct mm_struct *mm)
{
/* Switch the page table and blindly nuke entire local TLB */
csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | satp_mode);
local_flush_tlb_all();
}
static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
{
/*
* The mm_cpumask indicates which harts' TLBs contain the virtual
* address mapping of the mm. Compared to noasid, using asid
* can't guarantee that stale TLB entries are invalidated because
* the asid mechanism wouldn't flush TLB for every switch_mm for
* performance. So when using asid, keep all CPUs footmarks in
* cpumask() until mm reset.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
if (static_branch_unlikely(&use_asid_allocator)) {
set_mm_asid(next, cpu);
} else {
cpumask_clear_cpu(cpu, mm_cpumask(prev));
set_mm_noasid(next);
}
}
static int __init asids_init(void)
{
unsigned long old;
/* Figure-out number of ASID bits in HW */
old = csr_read(CSR_SATP);
asid_bits = old | (SATP_ASID_MASK << SATP_ASID_SHIFT);
csr_write(CSR_SATP, asid_bits);
asid_bits = (csr_read(CSR_SATP) >> SATP_ASID_SHIFT) & SATP_ASID_MASK;
asid_bits = fls_long(asid_bits);
csr_write(CSR_SATP, old);
/*
* In the process of determining number of ASID bits (above)
* we polluted the TLB of current HART so let's do TLB flushed
* to remove unwanted TLB enteries.
*/
local_flush_tlb_all();
/* Pre-compute ASID details */
if (asid_bits) {
num_asids = 1 << asid_bits;
asid_mask = num_asids - 1;
}
/*
* Use ASID allocator only if number of HW ASIDs are
* at-least twice more than CPUs
*/
if (num_asids > (2 * num_possible_cpus())) {
atomic_long_set(¤t_version, num_asids);
context_asid_map = bitmap_zalloc(num_asids, GFP_KERNEL);
if (!context_asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
num_asids);
__set_bit(0, context_asid_map);
static_branch_enable(&use_asid_allocator);
pr_info("ASID allocator using %lu bits (%lu entries)\n",
asid_bits, num_asids);
} else {
pr_info("ASID allocator disabled (%lu bits)\n", asid_bits);
}
return 0;
}
early_initcall(asids_init);
#else
static inline void set_mm(struct mm_struct *prev,
struct mm_struct *next, unsigned int cpu)
{
/* Nothing to do here when there is no MMU */
}
#endif
/*
* When necessary, performs a deferred icache flush for the given MM context,
* on the local CPU. RISC-V has no direct mechanism for instruction cache
* shoot downs, so instead we send an IPI that informs the remote harts they
* need to flush their local instruction caches. To avoid pathologically slow
* behavior in a common case (a bunch of single-hart processes on a many-hart
* machine, ie 'make -j') we avoid the IPIs for harts that are not currently
* executing a MM context and instead schedule a deferred local instruction
* cache flush to be performed before execution resumes on each hart. This
* actually performs that local instruction cache flush, which implicitly only
* refers to the current hart.
*
* The "cpu" argument must be the current local CPU number.
*/
static inline void flush_icache_deferred(struct mm_struct *mm, unsigned int cpu)
{
#ifdef CONFIG_SMP
cpumask_t *mask = &mm->context.icache_stale_mask;
if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
/*
* Ensure the remote hart's writes are visible to this hart.
* This pairs with a barrier in flush_icache_mm.
*/
smp_mb();
local_flush_icache_all();
}
#endif
}
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *task)
{
unsigned int cpu;
if (unlikely(prev == next))
return;
/*
* Mark the current MM context as inactive, and the next as
* active. This is at least used by the icache flushing
* routines in order to determine who should be flushed.
*/
cpu = smp_processor_id();
set_mm(prev, next, cpu);
flush_icache_deferred(next, cpu);
}
| linux-master | arch/riscv/mm/context.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma x0, %0"
:
: "r" (asid)
: "memory");
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
__asm__ __volatile__ ("sfence.vma %0, %1"
:
: "r" (addr), "r" (asid)
: "memory");
}
static inline void local_flush_tlb_range(unsigned long start,
unsigned long size, unsigned long stride)
{
if (size <= stride)
local_flush_tlb_page(start);
else
local_flush_tlb_all();
}
static inline void local_flush_tlb_range_asid(unsigned long start,
unsigned long size, unsigned long stride, unsigned long asid)
{
if (size <= stride)
local_flush_tlb_page_asid(start, asid);
else
local_flush_tlb_all_asid(asid);
}
static void __ipi_flush_tlb_all(void *info)
{
local_flush_tlb_all();
}
void flush_tlb_all(void)
{
if (riscv_use_ipi_for_rfence())
on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
else
sbi_remote_sfence_vma(NULL, 0, -1);
}
struct flush_tlb_range_data {
unsigned long asid;
unsigned long start;
unsigned long size;
unsigned long stride;
};
static void __ipi_flush_tlb_range_asid(void *info)
{
struct flush_tlb_range_data *d = info;
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
static void __ipi_flush_tlb_range(void *info)
{
struct flush_tlb_range_data *d = info;
local_flush_tlb_range(d->start, d->size, d->stride);
}
static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
struct flush_tlb_range_data ftd;
struct cpumask *cmask = mm_cpumask(mm);
unsigned int cpuid;
bool broadcast;
if (cpumask_empty(cmask))
return;
cpuid = get_cpu();
/* check if the tlbflush needs to be sent to other CPUs */
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
if (static_branch_unlikely(&use_asid_allocator)) {
unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
if (broadcast) {
if (riscv_use_ipi_for_rfence()) {
ftd.asid = asid;
ftd.start = start;
ftd.size = size;
ftd.stride = stride;
on_each_cpu_mask(cmask,
__ipi_flush_tlb_range_asid,
&ftd, 1);
} else
sbi_remote_sfence_vma_asid(cmask,
start, size, asid);
} else {
local_flush_tlb_range_asid(start, size, stride, asid);
}
} else {
if (broadcast) {
if (riscv_use_ipi_for_rfence()) {
ftd.asid = 0;
ftd.start = start;
ftd.size = size;
ftd.stride = stride;
on_each_cpu_mask(cmask,
__ipi_flush_tlb_range,
&ftd, 1);
} else
sbi_remote_sfence_vma(cmask, start, size);
} else {
local_flush_tlb_range(start, size, stride);
}
}
put_cpu();
}
void flush_tlb_mm(struct mm_struct *mm)
{
__flush_tlb_range(mm, 0, -1, PAGE_SIZE);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
__flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
__flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
__flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
}
#endif
| linux-master | arch/riscv/mm/tlbflush.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Lennox Wu <[email protected]>
* Chen Liqin <[email protected]>
* Copyright (C) 2012 Regents of the University of California
*/
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/perf_event.h>
#include <linux/signal.h>
#include <linux/uaccess.h>
#include <linux/kprobes.h>
#include <linux/kfence.h>
#include <linux/entry-common.h>
#include <asm/ptrace.h>
#include <asm/tlbflush.h>
#include "../kernel/head.h"
static void die_kernel_fault(const char *msg, unsigned long addr,
struct pt_regs *regs)
{
bust_spinlocks(1);
pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", msg,
addr);
bust_spinlocks(0);
die(regs, "Oops");
make_task_dead(SIGKILL);
}
static inline void no_context(struct pt_regs *regs, unsigned long addr)
{
const char *msg;
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
if (addr < PAGE_SIZE)
msg = "NULL pointer dereference";
else {
if (kfence_handle_page_fault(addr, regs->cause == EXC_STORE_PAGE_FAULT, regs))
return;
msg = "paging request";
}
die_kernel_fault(msg, addr, regs);
}
static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
{
if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
if (!user_mode(regs)) {
no_context(regs, addr);
return;
}
pagefault_out_of_memory();
return;
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
no_context(regs, addr);
return;
}
do_trap(regs, SIGBUS, BUS_ADRERR, addr);
return;
}
BUG();
}
static inline void
bad_area_nosemaphore(struct pt_regs *regs, int code, unsigned long addr)
{
/*
* Something tried to access memory that isn't in our memory map.
* Fix it, but check if it's kernel or user first.
*/
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr);
return;
}
no_context(regs, addr);
}
static inline void
bad_area(struct pt_regs *regs, struct mm_struct *mm, int code,
unsigned long addr)
{
mmap_read_unlock(mm);
bad_area_nosemaphore(regs, code, addr);
}
static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
{
pgd_t *pgd, *pgd_k;
pud_t *pud_k;
p4d_t *p4d_k;
pmd_t *pmd_k;
pte_t *pte_k;
int index;
unsigned long pfn;
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs))
return do_trap(regs, SIGSEGV, code, addr);
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk->active_mm->pgd" here.
* We might be inside an interrupt in the middle
* of a task switch.
*/
index = pgd_index(addr);
pfn = csr_read(CSR_SATP) & SATP_PPN;
pgd = (pgd_t *)pfn_to_virt(pfn) + index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k)) {
no_context(regs, addr);
return;
}
set_pgd(pgd, *pgd_k);
p4d_k = p4d_offset(pgd_k, addr);
if (!p4d_present(*p4d_k)) {
no_context(regs, addr);
return;
}
pud_k = pud_offset(p4d_k, addr);
if (!pud_present(*pud_k)) {
no_context(regs, addr);
return;
}
if (pud_leaf(*pud_k))
goto flush_tlb;
/*
* Since the vmalloc area is global, it is unnecessary
* to copy individual PTEs
*/
pmd_k = pmd_offset(pud_k, addr);
if (!pmd_present(*pmd_k)) {
no_context(regs, addr);
return;
}
if (pmd_leaf(*pmd_k))
goto flush_tlb;
/*
* Make sure the actual PTE exists as well to
* catch kernel vmalloc-area accesses to non-mapped
* addresses. If we don't do this, this will just
* silently loop forever.
*/
pte_k = pte_offset_kernel(pmd_k, addr);
if (!pte_present(*pte_k)) {
no_context(regs, addr);
return;
}
/*
* The kernel assumes that TLBs don't cache invalid
* entries, but in RISC-V, SFENCE.VMA specifies an
* ordering constraint, not a cache flush; it is
* necessary even after writing invalid entries.
*/
flush_tlb:
local_flush_tlb_page(addr);
}
static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
{
switch (cause) {
case EXC_INST_PAGE_FAULT:
if (!(vma->vm_flags & VM_EXEC)) {
return true;
}
break;
case EXC_LOAD_PAGE_FAULT:
/* Write implies read */
if (!(vma->vm_flags & (VM_READ | VM_WRITE))) {
return true;
}
break;
case EXC_STORE_PAGE_FAULT:
if (!(vma->vm_flags & VM_WRITE)) {
return true;
}
break;
default:
panic("%s: unhandled cause %lu", __func__, cause);
}
return false;
}
/*
* This routine handles page faults. It determines the address and the
* problem, and then passes it off to one of the appropriate routines.
*/
void handle_page_fault(struct pt_regs *regs)
{
struct task_struct *tsk;
struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long addr, cause;
unsigned int flags = FAULT_FLAG_DEFAULT;
int code = SEGV_MAPERR;
vm_fault_t fault;
cause = regs->cause;
addr = regs->badaddr;
tsk = current;
mm = tsk->mm;
if (kprobe_page_fault(regs, cause))
return;
/*
* Fault-in kernel-space virtual memory on-demand.
* The 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
if ((!IS_ENABLED(CONFIG_MMU) || !IS_ENABLED(CONFIG_64BIT)) &&
unlikely(addr >= VMALLOC_START && addr < VMALLOC_END)) {
vmalloc_fault(regs, code, addr);
return;
}
/* Enable interrupts if they were enabled in the parent context. */
if (!regs_irqs_disabled(regs))
local_irq_enable();
/*
* If we're in an interrupt, have no user context, or are running
* in an atomic region, then we must not take the fault.
*/
if (unlikely(faulthandler_disabled() || !mm)) {
tsk->thread.bad_cause = cause;
no_context(regs, addr);
return;
}
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (!user_mode(regs) && addr < TASK_SIZE && unlikely(!(regs->status & SR_SUM))) {
if (fixup_exception(regs))
return;
die_kernel_fault("access to user memory without uaccess routines", addr, regs);
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
if (cause == EXC_STORE_PAGE_FAULT)
flags |= FAULT_FLAG_WRITE;
else if (cause == EXC_INST_PAGE_FAULT)
flags |= FAULT_FLAG_INSTRUCTION;
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
vma = lock_vma_under_rcu(mm, addr);
if (!vma)
goto lock_mmap;
if (unlikely(access_error(cause, vma))) {
vma_end_read(vma);
goto lock_mmap;
}
fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
no_context(regs, addr);
return;
}
lock_mmap:
retry:
vma = lock_mm_and_find_vma(mm, addr, regs);
if (unlikely(!vma)) {
tsk->thread.bad_cause = cause;
bad_area_nosemaphore(regs, code, addr);
return;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it.
*/
code = SEGV_ACCERR;
if (unlikely(access_error(cause, vma))) {
tsk->thread.bad_cause = cause;
bad_area(regs, mm, code, addr);
return;
}
/*
* If for any reason at all we could not handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(vma, addr, flags, regs);
/*
* If we need to retry but a fatal signal is pending, handle the
* signal first. We do not need to release the mmap_lock because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
no_context(regs, addr);
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
return;
if (unlikely(fault & VM_FAULT_RETRY)) {
flags |= FAULT_FLAG_TRIED;
/*
* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
mmap_read_unlock(mm);
done:
if (unlikely(fault & VM_FAULT_ERROR)) {
tsk->thread.bad_cause = cause;
mm_fault_error(regs, addr, fault);
return;
}
return;
}
| linux-master | arch/riscv/mm/fault.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2019 SiFive
*/
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/ptdump.h>
#include <asm/ptdump.h>
#include <linux/pgtable.h>
#include <asm/kasan.h>
#define pt_dump_seq_printf(m, fmt, args...) \
({ \
if (m) \
seq_printf(m, fmt, ##args); \
})
#define pt_dump_seq_puts(m, fmt) \
({ \
if (m) \
seq_printf(m, fmt); \
})
/*
* The page dumper groups page table entries of the same type into a single
* description. It uses pg_state to track the range information while
* iterating over the pte entries. When the continuity is broken it then
* dumps out a description of the range.
*/
struct pg_state {
struct ptdump_state ptdump;
struct seq_file *seq;
const struct addr_marker *marker;
unsigned long start_address;
unsigned long start_pa;
unsigned long last_pa;
int level;
u64 current_prot;
bool check_wx;
unsigned long wx_pages;
};
/* Address marker */
struct addr_marker {
unsigned long start_address;
const char *name;
};
/* Private information for debugfs */
struct ptd_mm_info {
struct mm_struct *mm;
const struct addr_marker *markers;
unsigned long base_addr;
unsigned long end;
};
enum address_markers_idx {
FIXMAP_START_NR,
FIXMAP_END_NR,
PCI_IO_START_NR,
PCI_IO_END_NR,
#ifdef CONFIG_SPARSEMEM_VMEMMAP
VMEMMAP_START_NR,
VMEMMAP_END_NR,
#endif
VMALLOC_START_NR,
VMALLOC_END_NR,
PAGE_OFFSET_NR,
#ifdef CONFIG_KASAN
KASAN_SHADOW_START_NR,
KASAN_SHADOW_END_NR,
#endif
#ifdef CONFIG_64BIT
MODULES_MAPPING_NR,
KERNEL_MAPPING_NR,
#endif
END_OF_SPACE_NR
};
static struct addr_marker address_markers[] = {
{0, "Fixmap start"},
{0, "Fixmap end"},
{0, "PCI I/O start"},
{0, "PCI I/O end"},
#ifdef CONFIG_SPARSEMEM_VMEMMAP
{0, "vmemmap start"},
{0, "vmemmap end"},
#endif
{0, "vmalloc() area"},
{0, "vmalloc() end"},
{0, "Linear mapping"},
#ifdef CONFIG_KASAN
{0, "Kasan shadow start"},
{0, "Kasan shadow end"},
#endif
#ifdef CONFIG_64BIT
{0, "Modules/BPF mapping"},
{0, "Kernel mapping"},
#endif
{-1, NULL},
};
static struct ptd_mm_info kernel_ptd_info = {
.mm = &init_mm,
.markers = address_markers,
.base_addr = 0,
.end = ULONG_MAX,
};
#ifdef CONFIG_EFI
static struct addr_marker efi_addr_markers[] = {
{ 0, "UEFI runtime start" },
{ SZ_1G, "UEFI runtime end" },
{ -1, NULL }
};
static struct ptd_mm_info efi_ptd_info = {
.mm = &efi_mm,
.markers = efi_addr_markers,
.base_addr = 0,
.end = SZ_2G,
};
#endif
/* Page Table Entry */
struct prot_bits {
u64 mask;
u64 val;
const char *set;
const char *clear;
};
static const struct prot_bits pte_bits[] = {
{
.mask = _PAGE_SOFT,
.val = _PAGE_SOFT,
.set = "RSW",
.clear = " ",
}, {
.mask = _PAGE_DIRTY,
.val = _PAGE_DIRTY,
.set = "D",
.clear = ".",
}, {
.mask = _PAGE_ACCESSED,
.val = _PAGE_ACCESSED,
.set = "A",
.clear = ".",
}, {
.mask = _PAGE_GLOBAL,
.val = _PAGE_GLOBAL,
.set = "G",
.clear = ".",
}, {
.mask = _PAGE_USER,
.val = _PAGE_USER,
.set = "U",
.clear = ".",
}, {
.mask = _PAGE_EXEC,
.val = _PAGE_EXEC,
.set = "X",
.clear = ".",
}, {
.mask = _PAGE_WRITE,
.val = _PAGE_WRITE,
.set = "W",
.clear = ".",
}, {
.mask = _PAGE_READ,
.val = _PAGE_READ,
.set = "R",
.clear = ".",
}, {
.mask = _PAGE_PRESENT,
.val = _PAGE_PRESENT,
.set = "V",
.clear = ".",
}
};
/* Page Level */
struct pg_level {
const char *name;
u64 mask;
};
static struct pg_level pg_level[] = {
{ /* pgd */
.name = "PGD",
}, { /* p4d */
.name = (CONFIG_PGTABLE_LEVELS > 4) ? "P4D" : "PGD",
}, { /* pud */
.name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
}, { /* pmd */
.name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
}, { /* pte */
.name = "PTE",
},
};
static void dump_prot(struct pg_state *st)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(pte_bits); i++) {
const char *s;
if ((st->current_prot & pte_bits[i].mask) == pte_bits[i].val)
s = pte_bits[i].set;
else
s = pte_bits[i].clear;
if (s)
pt_dump_seq_printf(st->seq, " %s", s);
}
}
#ifdef CONFIG_64BIT
#define ADDR_FORMAT "0x%016lx"
#else
#define ADDR_FORMAT "0x%08lx"
#endif
static void dump_addr(struct pg_state *st, unsigned long addr)
{
static const char units[] = "KMGTPE";
const char *unit = units;
unsigned long delta;
pt_dump_seq_printf(st->seq, ADDR_FORMAT "-" ADDR_FORMAT " ",
st->start_address, addr);
pt_dump_seq_printf(st->seq, " " ADDR_FORMAT " ", st->start_pa);
delta = (addr - st->start_address) >> 10;
while (!(delta & 1023) && unit[1]) {
delta >>= 10;
unit++;
}
pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
pg_level[st->level].name);
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
if (!st->check_wx)
return;
if ((st->current_prot & (_PAGE_WRITE | _PAGE_EXEC)) !=
(_PAGE_WRITE | _PAGE_EXEC))
return;
WARN_ONCE(1, "riscv/mm: Found insecure W+X mapping at address %p/%pS\n",
(void *)st->start_address, (void *)st->start_address);
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
}
static void note_page(struct ptdump_state *pt_st, unsigned long addr,
int level, u64 val)
{
struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
u64 pa = PFN_PHYS(pte_pfn(__pte(val)));
u64 prot = 0;
if (level >= 0)
prot = val & pg_level[level].mask;
if (st->level == -1) {
st->level = level;
st->current_prot = prot;
st->start_address = addr;
st->start_pa = pa;
st->last_pa = pa;
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
} else if (prot != st->current_prot ||
level != st->level || addr >= st->marker[1].start_address) {
if (st->current_prot) {
note_prot_wx(st, addr);
dump_addr(st, addr);
dump_prot(st);
pt_dump_seq_puts(st->seq, "\n");
}
while (addr >= st->marker[1].start_address) {
st->marker++;
pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
st->marker->name);
}
st->start_address = addr;
st->start_pa = pa;
st->last_pa = pa;
st->current_prot = prot;
st->level = level;
} else {
st->last_pa = pa;
}
}
static void ptdump_walk(struct seq_file *s, struct ptd_mm_info *pinfo)
{
struct pg_state st = {
.seq = s,
.marker = pinfo->markers,
.level = -1,
.ptdump = {
.note_page = note_page,
.range = (struct ptdump_range[]) {
{pinfo->base_addr, pinfo->end},
{0, 0}
}
}
};
ptdump_walk_pgd(&st.ptdump, pinfo->mm, NULL);
}
void ptdump_check_wx(void)
{
struct pg_state st = {
.seq = NULL,
.marker = (struct addr_marker[]) {
{0, NULL},
{-1, NULL},
},
.level = -1,
.check_wx = true,
.ptdump = {
.note_page = note_page,
.range = (struct ptdump_range[]) {
{KERN_VIRT_START, ULONG_MAX},
{0, 0}
}
}
};
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
if (st.wx_pages)
pr_warn("Checked W+X mappings: failed, %lu W+X pages found\n",
st.wx_pages);
else
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
}
static int ptdump_show(struct seq_file *m, void *v)
{
ptdump_walk(m, m->private);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptdump);
static int __init ptdump_init(void)
{
unsigned int i, j;
address_markers[FIXMAP_START_NR].start_address = FIXADDR_START;
address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP;
address_markers[PCI_IO_START_NR].start_address = PCI_IO_START;
address_markers[PCI_IO_END_NR].start_address = PCI_IO_END;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
address_markers[VMEMMAP_END_NR].start_address = VMEMMAP_END;
#endif
address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
#ifdef CONFIG_KASAN
address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
#endif
#ifdef CONFIG_64BIT
address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;
#endif
kernel_ptd_info.base_addr = KERN_VIRT_START;
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
pg_level[i].mask |= pte_bits[j].mask;
debugfs_create_file("kernel_page_tables", 0400, NULL, &kernel_ptd_info,
&ptdump_fops);
#ifdef CONFIG_EFI
if (efi_enabled(EFI_RUNTIME_SERVICES))
debugfs_create_file("efi_page_tables", 0400, NULL, &efi_ptd_info,
&ptdump_fops);
#endif
return 0;
}
device_initcall(ptdump_init);
| linux-master | arch/riscv/mm/ptdump.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Linaro Limited
* Author: AKASHI Takahiro <[email protected]>
* Copyright (C) 2017 Andes Technology Corporation
*/
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/memory.h>
#include <asm/cacheflush.h>
#include <asm/patch.h>
#ifdef CONFIG_DYNAMIC_FTRACE
void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
{
mutex_lock(&text_mutex);
/*
* The code sequences we use for ftrace can't be patched while the
* kernel is running, so we need to use stop_machine() to modify them
* for now. This doesn't play nice with text_mutex, we use this flag
* to elide the check.
*/
riscv_patch_in_stop_machine = true;
}
void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
{
riscv_patch_in_stop_machine = false;
mutex_unlock(&text_mutex);
}
static int ftrace_check_current_call(unsigned long hook_pos,
unsigned int *expected)
{
unsigned int replaced[2];
unsigned int nops[2] = {NOP4, NOP4};
/* we expect nops at the hook position */
if (!expected)
expected = nops;
/*
* Read the text we want to modify;
* return must be -EFAULT on read error
*/
if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
MCOUNT_INSN_SIZE))
return -EFAULT;
/*
* Make sure it is what we expect it to be;
* return must be -EINVAL on failed comparison
*/
if (memcmp(expected, replaced, sizeof(replaced))) {
pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
(void *)hook_pos, expected[0], expected[1], replaced[0],
replaced[1]);
return -EINVAL;
}
return 0;
}
static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
bool enable, bool ra)
{
unsigned int call[2];
unsigned int nops[2] = {NOP4, NOP4};
if (ra)
make_call_ra(hook_pos, target, call);
else
make_call_t0(hook_pos, target, call);
/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
if (patch_text_nosync
((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
return -EPERM;
return 0;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned int call[2];
make_call_t0(rec->ip, addr, call);
if (patch_text_nosync((void *)rec->ip, call, MCOUNT_INSN_SIZE))
return -EPERM;
return 0;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
unsigned int nops[2] = {NOP4, NOP4};
if (patch_text_nosync((void *)rec->ip, nops, MCOUNT_INSN_SIZE))
return -EPERM;
return 0;
}
/*
* This is called early on, and isn't wrapped by
* ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
* text_mutex, which triggers a lockdep failure. SMP isn't running so we could
* just directly poke the text, but it's simpler to just take the lock
* ourselves.
*/
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
int out;
mutex_lock(&text_mutex);
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
mutex_unlock(&text_mutex);
return out;
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
(unsigned long)func, true, true);
if (!ret) {
ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
(unsigned long)func, true, true);
}
return ret;
}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
unsigned int call[2];
unsigned long caller = rec->ip;
int ret;
make_call_t0(caller, old_addr, call);
ret = ftrace_check_current_call(caller, call);
if (ret)
return ret;
return __ftrace_modify_call(caller, addr, true, false);
}
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Most of this function is copied from arm64.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long)&return_to_handler;
unsigned long old;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
/*
* We don't suffer access faults, so no extra fault-recovery assembly
* is needed here.
*/
old = *parent;
if (!function_graph_enter(old, self_addr, frame_pointer, parent))
*parent = return_hooker;
}
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
extern void ftrace_graph_regs_call(void);
int ftrace_enable_ftrace_graph_caller(void)
{
int ret;
ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
(unsigned long)&prepare_ftrace_return, true, true);
if (ret)
return ret;
return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
(unsigned long)&prepare_ftrace_return, true, true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
int ret;
ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call,
(unsigned long)&prepare_ftrace_return, false, true);
if (ret)
return ret;
return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call,
(unsigned long)&prepare_ftrace_return, false, true);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
| linux-master | arch/riscv/kernel/ftrace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copied from arch/arm64/kernel/cpufeature.c
*
* Copyright (C) 2015 ARM Ltd.
* Copyright (C) 2017 SiFive
*/
#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/ctype.h>
#include <linux/log2.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/of.h>
#include <asm/acpi.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwcap.h>
#include <asm/hwprobe.h>
#include <asm/patch.h>
#include <asm/processor.h>
#include <asm/vector.h>
#include "copy-unaligned.h"
#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
#define MISALIGNED_ACCESS_JIFFIES_LG2 1
#define MISALIGNED_BUFFER_SIZE 0x4000
#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
unsigned long elf_hwcap __read_mostly;
/* Host ISA bitmap */
static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
/* Per-cpu ISA extensions. */
struct riscv_isainfo hart_isa[NR_CPUS];
/* Performance information */
DEFINE_PER_CPU(long, misaligned_access_speed);
/**
* riscv_isa_extension_base() - Get base extension word
*
* @isa_bitmap: ISA bitmap to use
* Return: base extension word as unsigned long value
*
* NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
*/
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
{
if (!isa_bitmap)
return riscv_isa[0];
return isa_bitmap[0];
}
EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
/**
* __riscv_isa_extension_available() - Check whether given extension
* is available or not
*
* @isa_bitmap: ISA bitmap to use
* @bit: bit position of the desired extension
* Return: true or false
*
* NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
*/
bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
{
const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
if (bit >= RISCV_ISA_EXT_MAX)
return false;
return test_bit(bit, bmap) ? true : false;
}
EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
static bool riscv_isa_extension_check(int id)
{
switch (id) {
case RISCV_ISA_EXT_ZICBOM:
if (!riscv_cbom_block_size) {
pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n");
return false;
} else if (!is_power_of_2(riscv_cbom_block_size)) {
pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n");
return false;
}
return true;
case RISCV_ISA_EXT_ZICBOZ:
if (!riscv_cboz_block_size) {
pr_err("Zicboz detected in ISA string, but no cboz-block-size found\n");
return false;
} else if (!is_power_of_2(riscv_cboz_block_size)) {
pr_err("cboz-block-size present, but is not a power-of-2\n");
return false;
}
return true;
}
return true;
}
#define __RISCV_ISA_EXT_DATA(_name, _id) { \
.name = #_name, \
.property = #_name, \
.id = _id, \
}
/*
* The canonical order of ISA extension names in the ISA string is defined in
* chapter 27 of the unprivileged specification.
*
* Ordinarily, for in-kernel data structures, this order is unimportant but
* isa_ext_arr defines the order of the ISA string in /proc/cpuinfo.
*
* The specification uses vague wording, such as should, when it comes to
* ordering, so for our purposes the following rules apply:
*
* 1. All multi-letter extensions must be separated from other extensions by an
* underscore.
*
* 2. Additional standard extensions (starting with 'Z') must be sorted after
* single-letter extensions and before any higher-privileged extensions.
*
* 3. The first letter following the 'Z' conventionally indicates the most
* closely related alphabetical extension category, IMAFDQLCBKJTPVH.
* If multiple 'Z' extensions are named, they must be ordered first by
* category, then alphabetically within a category.
*
* 3. Standard supervisor-level extensions (starting with 'S') must be listed
* after standard unprivileged extensions. If multiple supervisor-level
* extensions are listed, they must be ordered alphabetically.
*
* 4. Standard machine-level extensions (starting with 'Zxm') must be listed
* after any lower-privileged, standard extensions. If multiple
* machine-level extensions are listed, they must be ordered
* alphabetically.
*
* 5. Non-standard extensions (starting with 'X') must be listed after all
* standard extensions. If multiple non-standard extensions are listed, they
* must be ordered alphabetically.
*
* An example string following the order is:
* rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
*
* New entries to this struct should follow the ordering rules described above.
*/
const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i),
__RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m),
__RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a),
__RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f),
__RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
__RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
__RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
__RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b),
__RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k),
__RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j),
__RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p),
__RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
__RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
__RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
__RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
__RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
__RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
__RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
__RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
__RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
__RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
__RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
__RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
__RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
__RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
__RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
__RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
__RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
__RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
};
const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
unsigned long *isa2hwcap, const char *isa)
{
/*
* For all possible cpus, we have already validated in
* the boot process that they at least contain "rv" and
* whichever of "32"/"64" this kernel supports, and so this
* section can be skipped.
*/
isa += 4;
while (*isa) {
const char *ext = isa++;
const char *ext_end = isa;
bool ext_long = false, ext_err = false;
switch (*ext) {
case 's':
/*
* Workaround for invalid single-letter 's' & 'u'(QEMU).
* No need to set the bit in riscv_isa as 's' & 'u' are
* not valid ISA extensions. It works until multi-letter
* extension starting with "Su" appears.
*/
if (ext[-1] != '_' && ext[1] == 'u') {
++isa;
ext_err = true;
break;
}
fallthrough;
case 'S':
case 'x':
case 'X':
case 'z':
case 'Z':
/*
* Before attempting to parse the extension itself, we find its end.
* As multi-letter extensions must be split from other multi-letter
* extensions with an "_", the end of a multi-letter extension will
* either be the null character or the "_" at the start of the next
* multi-letter extension.
*
* Next, as the extensions version is currently ignored, we
* eliminate that portion. This is done by parsing backwards from
* the end of the extension, removing any numbers. This may be a
* major or minor number however, so the process is repeated if a
* minor number was found.
*
* ext_end is intended to represent the first character *after* the
* name portion of an extension, but will be decremented to the last
* character itself while eliminating the extensions version number.
* A simple re-increment solves this problem.
*/
ext_long = true;
for (; *isa && *isa != '_'; ++isa)
if (unlikely(!isalnum(*isa)))
ext_err = true;
ext_end = isa;
if (unlikely(ext_err))
break;
if (!isdigit(ext_end[-1]))
break;
while (isdigit(*--ext_end))
;
if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) {
++ext_end;
break;
}
while (isdigit(*--ext_end))
;
++ext_end;
break;
default:
/*
* Things are a little easier for single-letter extensions, as they
* are parsed forwards.
*
* After checking that our starting position is valid, we need to
* ensure that, when isa was incremented at the start of the loop,
* that it arrived at the start of the next extension.
*
* If we are already on a non-digit, there is nothing to do. Either
* we have a multi-letter extension's _, or the start of an
* extension.
*
* Otherwise we have found the current extension's major version
* number. Parse past it, and a subsequent p/minor version number
* if present. The `p` extension must not appear immediately after
* a number, so there is no fear of missing it.
*
*/
if (unlikely(!isalpha(*ext))) {
ext_err = true;
break;
}
if (!isdigit(*isa))
break;
while (isdigit(*++isa))
;
if (tolower(*isa) != 'p')
break;
if (!isdigit(*++isa)) {
--isa;
break;
}
while (isdigit(*++isa))
;
break;
}
/*
* The parser expects that at the start of an iteration isa points to the
* first character of the next extension. As we stop parsing an extension
* on meeting a non-alphanumeric character, an extra increment is needed
* where the succeeding extension is a multi-letter prefixed with an "_".
*/
if (*isa == '_')
++isa;
#define SET_ISA_EXT_MAP(name, bit) \
do { \
if ((ext_end - ext == strlen(name)) && \
!strncasecmp(ext, name, strlen(name)) && \
riscv_isa_extension_check(bit)) \
set_bit(bit, isainfo->isa); \
} while (false) \
if (unlikely(ext_err))
continue;
if (!ext_long) {
int nr = tolower(*ext) - 'a';
if (riscv_isa_extension_check(nr)) {
*this_hwcap |= isa2hwcap[nr];
set_bit(nr, isainfo->isa);
}
} else {
for (int i = 0; i < riscv_isa_ext_count; i++)
SET_ISA_EXT_MAP(riscv_isa_ext[i].name,
riscv_isa_ext[i].id);
}
#undef SET_ISA_EXT_MAP
}
}
static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
{
struct device_node *node;
const char *isa;
int rc;
struct acpi_table_header *rhct;
acpi_status status;
unsigned int cpu;
if (!acpi_disabled) {
status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
if (ACPI_FAILURE(status))
return;
}
for_each_possible_cpu(cpu) {
struct riscv_isainfo *isainfo = &hart_isa[cpu];
unsigned long this_hwcap = 0;
if (acpi_disabled) {
node = of_cpu_device_node_get(cpu);
if (!node) {
pr_warn("Unable to find cpu node\n");
continue;
}
rc = of_property_read_string(node, "riscv,isa", &isa);
of_node_put(node);
if (rc) {
pr_warn("Unable to find \"riscv,isa\" devicetree entry\n");
continue;
}
} else {
rc = acpi_get_riscv_isa(rhct, cpu, &isa);
if (rc < 0) {
pr_warn("Unable to get ISA for the hart - %d\n", cpu);
continue;
}
}
riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa);
/*
* These ones were as they were part of the base ISA when the
* port & dt-bindings were upstreamed, and so can be set
* unconditionally where `i` is in riscv,isa on DT systems.
*/
if (acpi_disabled) {
set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa);
set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
}
/*
* All "okay" hart should have same isa. Set HWCAP based on
* common capabilities of every "okay" hart, in case they don't
* have.
*/
if (elf_hwcap)
elf_hwcap &= this_hwcap;
else
elf_hwcap = this_hwcap;
if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
else
bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
}
if (!acpi_disabled && rhct)
acpi_put_table((struct acpi_table_header *)rhct);
}
static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
{
unsigned int cpu;
for_each_possible_cpu(cpu) {
unsigned long this_hwcap = 0;
struct device_node *cpu_node;
struct riscv_isainfo *isainfo = &hart_isa[cpu];
cpu_node = of_cpu_device_node_get(cpu);
if (!cpu_node) {
pr_warn("Unable to find cpu node\n");
continue;
}
if (!of_property_present(cpu_node, "riscv,isa-extensions")) {
of_node_put(cpu_node);
continue;
}
for (int i = 0; i < riscv_isa_ext_count; i++) {
if (of_property_match_string(cpu_node, "riscv,isa-extensions",
riscv_isa_ext[i].property) < 0)
continue;
if (!riscv_isa_extension_check(riscv_isa_ext[i].id))
continue;
/* Only single letter extensions get set in hwcap */
if (strnlen(riscv_isa_ext[i].name, 2) == 1)
this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
set_bit(riscv_isa_ext[i].id, isainfo->isa);
}
of_node_put(cpu_node);
/*
* All "okay" harts should have same isa. Set HWCAP based on
* common capabilities of every "okay" hart, in case they don't.
*/
if (elf_hwcap)
elf_hwcap &= this_hwcap;
else
elf_hwcap = this_hwcap;
if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
else
bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
}
if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
return -ENOENT;
return 0;
}
#ifdef CONFIG_RISCV_ISA_FALLBACK
bool __initdata riscv_isa_fallback = true;
#else
bool __initdata riscv_isa_fallback;
static int __init riscv_isa_fallback_setup(char *__unused)
{
riscv_isa_fallback = true;
return 1;
}
early_param("riscv_isa_fallback", riscv_isa_fallback_setup);
#endif
void __init riscv_fill_hwcap(void)
{
char print_str[NUM_ALPHA_EXTS + 1];
unsigned long isa2hwcap[26] = {0};
int i, j;
isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V;
if (!acpi_disabled) {
riscv_fill_hwcap_from_isa_string(isa2hwcap);
} else {
int ret = riscv_fill_hwcap_from_ext_list(isa2hwcap);
if (ret && riscv_isa_fallback) {
pr_info("Falling back to deprecated \"riscv,isa\"\n");
riscv_fill_hwcap_from_isa_string(isa2hwcap);
}
}
/*
* We don't support systems with F but without D, so mask those out
* here.
*/
if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
pr_info("This kernel does not support systems with F but not D\n");
elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
}
if (elf_hwcap & COMPAT_HWCAP_ISA_V) {
riscv_v_setup_vsize();
/*
* ISA string in device tree might have 'v' flag, but
* CONFIG_RISCV_ISA_V is disabled in kernel.
* Clear V flag in elf_hwcap if CONFIG_RISCV_ISA_V is disabled.
*/
if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
}
memset(print_str, 0, sizeof(print_str));
for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
if (riscv_isa[0] & BIT_MASK(i))
print_str[j++] = (char)('a' + i);
pr_info("riscv: base ISA extensions %s\n", print_str);
memset(print_str, 0, sizeof(print_str));
for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
if (elf_hwcap & BIT_MASK(i))
print_str[j++] = (char)('a' + i);
pr_info("riscv: ELF capabilities %s\n", print_str);
}
unsigned long riscv_get_elf_hwcap(void)
{
unsigned long hwcap;
hwcap = (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1));
if (!riscv_v_vstate_ctrl_user_allowed())
hwcap &= ~COMPAT_HWCAP_ISA_V;
return hwcap;
}
void check_unaligned_access(int cpu)
{
u64 start_cycles, end_cycles;
u64 word_cycles;
u64 byte_cycles;
int ratio;
unsigned long start_jiffies, now;
struct page *page;
void *dst;
void *src;
long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
if (!page) {
pr_warn("Can't alloc pages to measure memcpy performance");
return;
}
/* Make an unaligned destination buffer. */
dst = (void *)((unsigned long)page_address(page) | 0x1);
/* Unalign src as well, but differently (off by 1 + 2 = 3). */
src = dst + (MISALIGNED_BUFFER_SIZE / 2);
src += 2;
word_cycles = -1ULL;
/* Do a warmup. */
__riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
preempt_disable();
start_jiffies = jiffies;
while ((now = jiffies) == start_jiffies)
cpu_relax();
/*
* For a fixed amount of time, repeatedly try the function, and take
* the best time in cycles as the measurement.
*/
while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
start_cycles = get_cycles64();
/* Ensure the CSR read can't reorder WRT to the copy. */
mb();
__riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
/* Ensure the copy ends before the end time is snapped. */
mb();
end_cycles = get_cycles64();
if ((end_cycles - start_cycles) < word_cycles)
word_cycles = end_cycles - start_cycles;
}
byte_cycles = -1ULL;
__riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
start_jiffies = jiffies;
while ((now = jiffies) == start_jiffies)
cpu_relax();
while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
start_cycles = get_cycles64();
mb();
__riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
mb();
end_cycles = get_cycles64();
if ((end_cycles - start_cycles) < byte_cycles)
byte_cycles = end_cycles - start_cycles;
}
preempt_enable();
/* Don't divide by zero. */
if (!word_cycles || !byte_cycles) {
pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
cpu);
goto out;
}
if (word_cycles < byte_cycles)
speed = RISCV_HWPROBE_MISALIGNED_FAST;
ratio = div_u64((byte_cycles * 100), word_cycles);
pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
cpu,
ratio / 100,
ratio % 100,
(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
per_cpu(misaligned_access_speed, cpu) = speed;
out:
__free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
}
static int check_unaligned_access_boot_cpu(void)
{
check_unaligned_access(0);
return 0;
}
arch_initcall(check_unaligned_access_boot_cpu);
#ifdef CONFIG_RISCV_ALTERNATIVE
/*
* Alternative patch sites consider 48 bits when determining when to patch
* the old instruction sequence with the new. These bits are broken into a
* 16-bit vendor ID and a 32-bit patch ID. A non-zero vendor ID means the
* patch site is for an erratum, identified by the 32-bit patch ID. When
* the vendor ID is zero, the patch site is for a cpufeature. cpufeatures
* further break down patch ID into two 16-bit numbers. The lower 16 bits
* are the cpufeature ID and the upper 16 bits are used for a value specific
* to the cpufeature and patch site. If the upper 16 bits are zero, then it
* implies no specific value is specified. cpufeatures that want to control
* patching on a per-site basis will provide non-zero values and implement
* checks here. The checks return true when patching should be done, and
* false otherwise.
*/
static bool riscv_cpufeature_patch_check(u16 id, u16 value)
{
if (!value)
return true;
switch (id) {
case RISCV_ISA_EXT_ZICBOZ:
/*
* Zicboz alternative applications provide the maximum
* supported block size order, or zero when it doesn't
* matter. If the current block size exceeds the maximum,
* then the alternative cannot be applied.
*/
return riscv_cboz_block_size <= (1U << value);
}
return false;
}
void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
struct alt_entry *end,
unsigned int stage)
{
struct alt_entry *alt;
void *oldptr, *altptr;
u16 id, value;
if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
return;
for (alt = begin; alt < end; alt++) {
if (alt->vendor_id != 0)
continue;
id = PATCH_ID_CPUFEATURE_ID(alt->patch_id);
if (id >= RISCV_ISA_EXT_MAX) {
WARN(1, "This extension id:%d is not in ISA extension list", id);
continue;
}
if (!__riscv_isa_extension_available(NULL, id))
continue;
value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id);
if (!riscv_cpufeature_patch_check(id, value))
continue;
oldptr = ALT_OLD_PTR(alt);
altptr = ALT_ALT_PTR(alt);
mutex_lock(&text_mutex);
patch_text_nosync(oldptr, altptr, alt->alt_len);
riscv_alternative_fix_offsets(oldptr, alt->alt_len, oldptr - altptr);
mutex_unlock(&text_mutex);
}
}
#endif
| linux-master | arch/riscv/kernel/cpufeature.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/stringify.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/csr.h>
#define INSN_MATCH_LB 0x3
#define INSN_MASK_LB 0x707f
#define INSN_MATCH_LH 0x1003
#define INSN_MASK_LH 0x707f
#define INSN_MATCH_LW 0x2003
#define INSN_MASK_LW 0x707f
#define INSN_MATCH_LD 0x3003
#define INSN_MASK_LD 0x707f
#define INSN_MATCH_LBU 0x4003
#define INSN_MASK_LBU 0x707f
#define INSN_MATCH_LHU 0x5003
#define INSN_MASK_LHU 0x707f
#define INSN_MATCH_LWU 0x6003
#define INSN_MASK_LWU 0x707f
#define INSN_MATCH_SB 0x23
#define INSN_MASK_SB 0x707f
#define INSN_MATCH_SH 0x1023
#define INSN_MASK_SH 0x707f
#define INSN_MATCH_SW 0x2023
#define INSN_MASK_SW 0x707f
#define INSN_MATCH_SD 0x3023
#define INSN_MASK_SD 0x707f
#define INSN_MATCH_FLW 0x2007
#define INSN_MASK_FLW 0x707f
#define INSN_MATCH_FLD 0x3007
#define INSN_MASK_FLD 0x707f
#define INSN_MATCH_FLQ 0x4007
#define INSN_MASK_FLQ 0x707f
#define INSN_MATCH_FSW 0x2027
#define INSN_MASK_FSW 0x707f
#define INSN_MATCH_FSD 0x3027
#define INSN_MASK_FSD 0x707f
#define INSN_MATCH_FSQ 0x4027
#define INSN_MASK_FSQ 0x707f
#define INSN_MATCH_C_LD 0x6000
#define INSN_MASK_C_LD 0xe003
#define INSN_MATCH_C_SD 0xe000
#define INSN_MASK_C_SD 0xe003
#define INSN_MATCH_C_LW 0x4000
#define INSN_MASK_C_LW 0xe003
#define INSN_MATCH_C_SW 0xc000
#define INSN_MASK_C_SW 0xe003
#define INSN_MATCH_C_LDSP 0x6002
#define INSN_MASK_C_LDSP 0xe003
#define INSN_MATCH_C_SDSP 0xe002
#define INSN_MASK_C_SDSP 0xe003
#define INSN_MATCH_C_LWSP 0x4002
#define INSN_MASK_C_LWSP 0xe003
#define INSN_MATCH_C_SWSP 0xc002
#define INSN_MASK_C_SWSP 0xe003
#define INSN_MATCH_C_FLD 0x2000
#define INSN_MASK_C_FLD 0xe003
#define INSN_MATCH_C_FLW 0x6000
#define INSN_MASK_C_FLW 0xe003
#define INSN_MATCH_C_FSD 0xa000
#define INSN_MASK_C_FSD 0xe003
#define INSN_MATCH_C_FSW 0xe000
#define INSN_MASK_C_FSW 0xe003
#define INSN_MATCH_C_FLDSP 0x2002
#define INSN_MASK_C_FLDSP 0xe003
#define INSN_MATCH_C_FSDSP 0xa002
#define INSN_MASK_C_FSDSP 0xe003
#define INSN_MATCH_C_FLWSP 0x6002
#define INSN_MASK_C_FLWSP 0xe003
#define INSN_MATCH_C_FSWSP 0xe002
#define INSN_MASK_C_FSWSP 0xe003
#define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4)
#if defined(CONFIG_64BIT)
#define LOG_REGBYTES 3
#define XLEN 64
#else
#define LOG_REGBYTES 2
#define XLEN 32
#endif
#define REGBYTES (1 << LOG_REGBYTES)
#define XLEN_MINUS_16 ((XLEN) - 16)
#define SH_RD 7
#define SH_RS1 15
#define SH_RS2 20
#define SH_RS2C 2
#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
(RV_X(x, 10, 3) << 3) | \
(RV_X(x, 5, 1) << 6))
#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
(RV_X(x, 5, 2) << 6))
#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
(RV_X(x, 12, 1) << 5) | \
(RV_X(x, 2, 2) << 6))
#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
(RV_X(x, 12, 1) << 5) | \
(RV_X(x, 2, 3) << 6))
#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
(RV_X(x, 7, 2) << 6))
#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
(RV_X(x, 7, 3) << 6))
#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
#define SHIFT_RIGHT(x, y) \
((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
#define REG_MASK \
((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
#define REG_OFFSET(insn, pos) \
(SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
#define REG_PTR(insn, pos, regs) \
(ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))
#define GET_RM(insn) (((insn) >> 12) & 7)
#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
#define GET_SP(regs) (*REG_PTR(2, 0, regs))
#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
#define IMM_I(insn) ((s32)(insn) >> 20)
#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
(s32)(((insn) >> 7) & 0x1f))
#define MASK_FUNCT3 0x7000
#define GET_PRECISION(insn) (((insn) >> 25) & 3)
#define GET_RM(insn) (((insn) >> 12) & 7)
#define PRECISION_S 0
#define PRECISION_D 1
#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \
static inline type load_##type(const type *addr) \
{ \
type val; \
asm (#insn " %0, %1" \
: "=&r" (val) : "m" (*addr)); \
return val; \
}
#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \
static inline void store_##type(type *addr, type val) \
{ \
asm volatile (#insn " %0, %1\n" \
: : "r" (val), "m" (*addr)); \
}
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu)
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu)
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb)
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh)
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw)
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb)
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh)
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw)
#if defined(CONFIG_64BIT)
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu)
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld)
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd)
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld)
#else
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw)
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw)
static inline u64 load_u64(const u64 *addr)
{
return load_u32((u32 *)addr)
+ ((u64)load_u32((u32 *)addr + 1) << 32);
}
static inline void store_u64(u64 *addr, u64 val)
{
store_u32((u32 *)addr, val);
store_u32((u32 *)addr + 1, val >> 32);
}
#endif
static inline ulong get_insn(ulong mepc)
{
register ulong __mepc asm ("a2") = mepc;
ulong val, rvc_mask = 3, tmp;
asm ("and %[tmp], %[addr], 2\n"
"bnez %[tmp], 1f\n"
#if defined(CONFIG_64BIT)
__stringify(LWU) " %[insn], (%[addr])\n"
#else
__stringify(LW) " %[insn], (%[addr])\n"
#endif
"and %[tmp], %[insn], %[rvc_mask]\n"
"beq %[tmp], %[rvc_mask], 2f\n"
"sll %[insn], %[insn], %[xlen_minus_16]\n"
"srl %[insn], %[insn], %[xlen_minus_16]\n"
"j 2f\n"
"1:\n"
"lhu %[insn], (%[addr])\n"
"and %[tmp], %[insn], %[rvc_mask]\n"
"bne %[tmp], %[rvc_mask], 2f\n"
"lhu %[tmp], 2(%[addr])\n"
"sll %[tmp], %[tmp], 16\n"
"add %[insn], %[insn], %[tmp]\n"
"2:"
: [insn] "=&r" (val), [tmp] "=&r" (tmp)
: [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
[xlen_minus_16] "i" (XLEN_MINUS_16));
return val;
}
union reg_data {
u8 data_bytes[8];
ulong data_ulong;
u64 data_u64;
};
int handle_misaligned_load(struct pt_regs *regs)
{
union reg_data val;
unsigned long epc = regs->epc;
unsigned long insn = get_insn(epc);
unsigned long addr = csr_read(mtval);
int i, fp = 0, shift = 0, len = 0;
regs->epc = 0;
if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
len = 4;
shift = 8 * (sizeof(unsigned long) - len);
#if defined(CONFIG_64BIT)
} else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
len = 8;
shift = 8 * (sizeof(unsigned long) - len);
} else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
len = 4;
#endif
} else if ((insn & INSN_MASK_FLD) == INSN_MATCH_FLD) {
fp = 1;
len = 8;
} else if ((insn & INSN_MASK_FLW) == INSN_MATCH_FLW) {
fp = 1;
len = 4;
} else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
len = 2;
shift = 8 * (sizeof(unsigned long) - len);
} else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
len = 2;
#if defined(CONFIG_64BIT)
} else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
len = 8;
shift = 8 * (sizeof(unsigned long) - len);
insn = RVC_RS2S(insn) << SH_RD;
} else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
((insn >> SH_RD) & 0x1f)) {
len = 8;
shift = 8 * (sizeof(unsigned long) - len);
#endif
} else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
len = 4;
shift = 8 * (sizeof(unsigned long) - len);
insn = RVC_RS2S(insn) << SH_RD;
} else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
((insn >> SH_RD) & 0x1f)) {
len = 4;
shift = 8 * (sizeof(unsigned long) - len);
} else if ((insn & INSN_MASK_C_FLD) == INSN_MATCH_C_FLD) {
fp = 1;
len = 8;
insn = RVC_RS2S(insn) << SH_RD;
} else if ((insn & INSN_MASK_C_FLDSP) == INSN_MATCH_C_FLDSP) {
fp = 1;
len = 8;
#if defined(CONFIG_32BIT)
} else if ((insn & INSN_MASK_C_FLW) == INSN_MATCH_C_FLW) {
fp = 1;
len = 4;
insn = RVC_RS2S(insn) << SH_RD;
} else if ((insn & INSN_MASK_C_FLWSP) == INSN_MATCH_C_FLWSP) {
fp = 1;
len = 4;
#endif
} else {
regs->epc = epc;
return -1;
}
val.data_u64 = 0;
for (i = 0; i < len; i++)
val.data_bytes[i] = load_u8((void *)(addr + i));
if (fp)
return -1;
SET_RD(insn, regs, val.data_ulong << shift >> shift);
regs->epc = epc + INSN_LEN(insn);
return 0;
}
int handle_misaligned_store(struct pt_regs *regs)
{
union reg_data val;
unsigned long epc = regs->epc;
unsigned long insn = get_insn(epc);
unsigned long addr = csr_read(mtval);
int i, len = 0;
regs->epc = 0;
val.data_ulong = GET_RS2(insn, regs);
if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
len = 4;
#if defined(CONFIG_64BIT)
} else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
len = 8;
#endif
} else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
len = 2;
#if defined(CONFIG_64BIT)
} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
len = 8;
val.data_ulong = GET_RS2S(insn, regs);
} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
((insn >> SH_RD) & 0x1f)) {
len = 8;
val.data_ulong = GET_RS2C(insn, regs);
#endif
} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
len = 4;
val.data_ulong = GET_RS2S(insn, regs);
} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
((insn >> SH_RD) & 0x1f)) {
len = 4;
val.data_ulong = GET_RS2C(insn, regs);
} else {
regs->epc = epc;
return -1;
}
for (i = 0; i < len; i++)
store_u8((void *)(addr + i), val.data_bytes[i]);
regs->epc = epc + INSN_LEN(insn);
return 0;
}
| linux-master | arch/riscv/kernel/traps_misaligned.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <[email protected]>
* Lennox Wu <[email protected]>
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*/
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/tick.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/csr.h>
#include <asm/stacktrace.h>
#include <asm/string.h>
#include <asm/switch_to.h>
#include <asm/thread_info.h>
#include <asm/cpuidle.h>
#include <asm/vector.h>
register unsigned long gp_in_global __asm__("gp");
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
extern asmlinkage void ret_from_fork(void);
void arch_cpu_idle(void)
{
cpu_do_idle();
}
void __show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
if (!user_mode(regs)) {
pr_cont("epc : %pS\n", (void *)regs->epc);
pr_cont(" ra : %pS\n", (void *)regs->ra);
}
pr_cont("epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
regs->epc, regs->ra, regs->sp);
pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
regs->gp, regs->tp, regs->t0);
pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
regs->t1, regs->t2, regs->s0);
pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
regs->s1, regs->a0, regs->a1);
pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
regs->a2, regs->a3, regs->a4);
pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
regs->a5, regs->a6, regs->a7);
pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
regs->s2, regs->s3, regs->s4);
pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
regs->s5, regs->s6, regs->s7);
pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
regs->s8, regs->s9, regs->s10);
pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
regs->s11, regs->t3, regs->t4);
pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
regs->t5, regs->t6);
pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
regs->status, regs->badaddr, regs->cause);
}
void show_regs(struct pt_regs *regs)
{
__show_regs(regs);
if (!user_mode(regs))
dump_backtrace(regs, NULL, KERN_DEFAULT);
}
#ifdef CONFIG_COMPAT
static bool compat_mode_supported __read_mostly;
bool compat_elf_check_arch(Elf32_Ehdr *hdr)
{
return compat_mode_supported &&
hdr->e_machine == EM_RISCV &&
hdr->e_ident[EI_CLASS] == ELFCLASS32;
}
static int __init compat_mode_detect(void)
{
unsigned long tmp = csr_read(CSR_STATUS);
csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32);
compat_mode_supported =
(csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32;
csr_write(CSR_STATUS, tmp);
pr_info("riscv: ELF compat mode %s",
compat_mode_supported ? "supported" : "unsupported");
return 0;
}
early_initcall(compat_mode_detect);
#endif
void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
regs->status = SR_PIE;
if (has_fpu()) {
regs->status |= SR_FS_INITIAL;
/*
* Restore the initial value to the FP register
* before starting the user program.
*/
fstate_restore(current, regs);
}
regs->epc = pc;
regs->sp = sp;
#ifdef CONFIG_64BIT
regs->status &= ~SR_UXL;
if (is_compat_task())
regs->status |= SR_UXL_32;
else
regs->status |= SR_UXL_64;
#endif
}
void flush_thread(void)
{
#ifdef CONFIG_FPU
/*
* Reset FPU state and context
* frm: round to nearest, ties to even (IEEE default)
* fflags: accrued exceptions cleared
*/
fstate_off(current, task_pt_regs(current));
memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate));
#endif
#ifdef CONFIG_RISCV_ISA_V
/* Reset vector state */
riscv_v_vstate_ctrl_init(current);
riscv_v_vstate_off(task_pt_regs(current));
kfree(current->thread.vstate.datap);
memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
#endif
}
void arch_release_task_struct(struct task_struct *tsk)
{
/* Free the vector context of datap. */
if (has_vector())
kfree(tsk->thread.vstate.datap);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
fstate_save(src, task_pt_regs(src));
*dst = *src;
/* clear entire V context, including datap for a new task */
memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
return 0;
}
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct pt_regs *childregs = task_pt_regs(p);
memset(&p->thread.s, 0, sizeof(p->thread.s));
/* p->thread holds context to be restored by __switch_to() */
if (unlikely(args->fn)) {
/* Kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gp = gp_in_global;
/* Supervisor/Machine, irqs on: */
childregs->status = SR_PP | SR_PIE;
p->thread.s[0] = (unsigned long)args->fn;
p->thread.s[1] = (unsigned long)args->fn_arg;
} else {
*childregs = *(current_pt_regs());
/* Turn off status.VS */
riscv_v_vstate_off(childregs);
if (usp) /* User fork */
childregs->sp = usp;
if (clone_flags & CLONE_SETTLS)
childregs->tp = tls;
childregs->a0 = 0; /* Return value of fork() */
p->thread.s[0] = 0;
}
p->thread.ra = (unsigned long)ret_from_fork;
p->thread.sp = (unsigned long)childregs; /* kernel sp */
return 0;
}
| linux-master | arch/riscv/kernel/process.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SMP initialisation and IPI support
* Based on arch/arm64/kernel/smp.c
*
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2015 Regents of the University of California
* Copyright (C) 2017 SiFive
*/
#include <linux/acpi.h>
#include <linux/arch_topology.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/percpu.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
#include <asm/cpu_ops.h>
#include <asm/cpufeature.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/smp.h>
#include <uapi/asm/hwcap.h>
#include <asm/vector.h>
#include "head.h"
static DECLARE_COMPLETION(cpu_running);
void __init smp_prepare_boot_cpu(void)
{
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int cpuid;
int ret;
unsigned int curr_cpuid;
init_cpu_topology();
curr_cpuid = smp_processor_id();
store_cpu_topology(curr_cpuid);
numa_store_cpu_info(curr_cpuid);
numa_add_cpu(curr_cpuid);
/* This covers non-smp usecase mandated by "nosmp" option */
if (max_cpus == 0)
return;
for_each_possible_cpu(cpuid) {
if (cpuid == curr_cpuid)
continue;
if (cpu_ops[cpuid]->cpu_prepare) {
ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
if (ret)
continue;
}
set_cpu_present(cpuid, true);
numa_store_cpu_info(cpuid);
}
}
#ifdef CONFIG_ACPI
static unsigned int cpu_count = 1;
static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const unsigned long end)
{
unsigned long hart;
static bool found_boot_cpu;
struct acpi_madt_rintc *processor = (struct acpi_madt_rintc *)header;
/*
* Each RINTC structure in MADT will have a flag. If ACPI_MADT_ENABLED
* bit in the flag is not enabled, it means OS should not try to enable
* the cpu to which RINTC belongs.
*/
if (!(processor->flags & ACPI_MADT_ENABLED))
return 0;
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(&header->common);
hart = processor->hart_id;
if (hart == INVALID_HARTID) {
pr_warn("Invalid hartid\n");
return 0;
}
if (hart == cpuid_to_hartid_map(0)) {
BUG_ON(found_boot_cpu);
found_boot_cpu = true;
early_map_cpu_to_node(0, acpi_numa_get_nid(cpu_count));
return 0;
}
if (cpu_count >= NR_CPUS) {
pr_warn("NR_CPUS is too small for the number of ACPI tables.\n");
return 0;
}
cpuid_to_hartid_map(cpu_count) = hart;
early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count));
cpu_count++;
return 0;
}
static void __init acpi_parse_and_init_cpus(void)
{
int cpuid;
cpu_set_ops(0);
acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_rintc, 0);
for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
cpu_set_ops(cpuid);
set_cpu_possible(cpuid, true);
}
}
}
#else
#define acpi_parse_and_init_cpus(...) do { } while (0)
#endif
static void __init of_parse_and_init_cpus(void)
{
struct device_node *dn;
unsigned long hart;
bool found_boot_cpu = false;
int cpuid = 1;
int rc;
cpu_set_ops(0);
for_each_of_cpu_node(dn) {
rc = riscv_early_of_processor_hartid(dn, &hart);
if (rc < 0)
continue;
if (hart == cpuid_to_hartid_map(0)) {
BUG_ON(found_boot_cpu);
found_boot_cpu = 1;
early_map_cpu_to_node(0, of_node_to_nid(dn));
continue;
}
if (cpuid >= NR_CPUS) {
pr_warn("Invalid cpuid [%d] for hartid [%lu]\n",
cpuid, hart);
continue;
}
cpuid_to_hartid_map(cpuid) = hart;
early_map_cpu_to_node(cpuid, of_node_to_nid(dn));
cpuid++;
}
BUG_ON(!found_boot_cpu);
if (cpuid > nr_cpu_ids)
pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
cpuid, nr_cpu_ids);
for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
cpu_set_ops(cpuid);
set_cpu_possible(cpuid, true);
}
}
}
void __init setup_smp(void)
{
if (acpi_disabled)
of_parse_and_init_cpus();
else
acpi_parse_and_init_cpus();
}
static int start_secondary_cpu(int cpu, struct task_struct *tidle)
{
if (cpu_ops[cpu]->cpu_start)
return cpu_ops[cpu]->cpu_start(cpu, tidle);
return -EOPNOTSUPP;
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int ret = 0;
tidle->thread_info.cpu = cpu;
ret = start_secondary_cpu(cpu, tidle);
if (!ret) {
wait_for_completion_timeout(&cpu_running,
msecs_to_jiffies(1000));
if (!cpu_online(cpu)) {
pr_crit("CPU%u: failed to come online\n", cpu);
ret = -EIO;
}
} else {
pr_crit("CPU%u: failed to start\n", cpu);
}
return ret;
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
/*
* C entry point for a secondary processor.
*/
asmlinkage __visible void smp_callin(void)
{
struct mm_struct *mm = &init_mm;
unsigned int curr_cpuid = smp_processor_id();
/* All kernel threads share the same mm context. */
mmgrab(mm);
current->active_mm = mm;
store_cpu_topology(curr_cpuid);
notify_cpu_starting(curr_cpuid);
riscv_ipi_enable();
numa_add_cpu(curr_cpuid);
set_cpu_online(curr_cpuid, 1);
check_unaligned_access(curr_cpuid);
if (has_vector()) {
if (riscv_v_setup_vsize())
elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
}
/*
* Remote TLB flushes are ignored while the CPU is offline, so emit
* a local TLB flush right now just in case.
*/
local_flush_tlb_all();
complete(&cpu_running);
/*
* Disable preemption before enabling interrupts, so we don't try to
* schedule a CPU that hasn't actually started yet.
*/
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
| linux-master | arch/riscv/kernel/smpboot.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 SiFive
*/
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/memory.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <asm/kprobes.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/ftrace.h>
#include <asm/patch.h>
struct patch_insn {
void *addr;
u32 *insns;
int ninsns;
atomic_t cpu_count;
};
int riscv_patch_in_stop_machine = false;
#ifdef CONFIG_MMU
/*
* The fix_to_virt(, idx) needs a const value (not a dynamic variable of
* reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
* So use '__always_inline' and 'const unsigned int fixmap' here.
*/
static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
{
uintptr_t uintaddr = (uintptr_t) addr;
struct page *page;
if (core_kernel_text(uintaddr))
page = phys_to_page(__pa_symbol(addr));
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
page = vmalloc_to_page(addr);
else
return addr;
BUG_ON(!page);
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
(uintaddr & ~PAGE_MASK));
}
static void patch_unmap(int fixmap)
{
clear_fixmap(fixmap);
}
NOKPROBE_SYMBOL(patch_unmap);
static int __patch_insn_set(void *addr, u8 c, size_t len)
{
void *waddr = addr;
bool across_pages = (((uintptr_t)addr & ~PAGE_MASK) + len) > PAGE_SIZE;
/*
* Only two pages can be mapped at a time for writing.
*/
if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
return -EINVAL;
/*
* Before reaching here, it was expected to lock the text_mutex
* already, so we don't need to give another lock here and could
* ensure that it was safe between each cores.
*/
lockdep_assert_held(&text_mutex);
if (across_pages)
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
waddr = patch_map(addr, FIX_TEXT_POKE0);
memset(waddr, c, len);
patch_unmap(FIX_TEXT_POKE0);
if (across_pages)
patch_unmap(FIX_TEXT_POKE1);
return 0;
}
NOKPROBE_SYMBOL(__patch_insn_set);
static int __patch_insn_write(void *addr, const void *insn, size_t len)
{
void *waddr = addr;
bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
int ret;
/*
* Only two pages can be mapped at a time for writing.
*/
if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
return -EINVAL;
/*
* Before reaching here, it was expected to lock the text_mutex
* already, so we don't need to give another lock here and could
* ensure that it was safe between each cores.
*
* We're currently using stop_machine() for ftrace & kprobes, and while
* that ensures text_mutex is held before installing the mappings it
* does not ensure text_mutex is held by the calling thread. That's
* safe but triggers a lockdep failure, so just elide it for that
* specific case.
*/
if (!riscv_patch_in_stop_machine)
lockdep_assert_held(&text_mutex);
if (across_pages)
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
waddr = patch_map(addr, FIX_TEXT_POKE0);
ret = copy_to_kernel_nofault(waddr, insn, len);
patch_unmap(FIX_TEXT_POKE0);
if (across_pages)
patch_unmap(FIX_TEXT_POKE1);
return ret;
}
NOKPROBE_SYMBOL(__patch_insn_write);
#else
static int __patch_insn_set(void *addr, u8 c, size_t len)
{
memset(addr, c, len);
return 0;
}
NOKPROBE_SYMBOL(__patch_insn_set);
static int __patch_insn_write(void *addr, const void *insn, size_t len)
{
return copy_to_kernel_nofault(addr, insn, len);
}
NOKPROBE_SYMBOL(__patch_insn_write);
#endif /* CONFIG_MMU */
static int patch_insn_set(void *addr, u8 c, size_t len)
{
size_t patched = 0;
size_t size;
int ret = 0;
/*
* __patch_insn_set() can only work on 2 pages at a time so call it in a
* loop with len <= 2 * PAGE_SIZE.
*/
while (patched < len && !ret) {
size = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(addr + patched), len - patched);
ret = __patch_insn_set(addr + patched, c, size);
patched += size;
}
return ret;
}
NOKPROBE_SYMBOL(patch_insn_set);
int patch_text_set_nosync(void *addr, u8 c, size_t len)
{
u32 *tp = addr;
int ret;
ret = patch_insn_set(tp, c, len);
if (!ret)
flush_icache_range((uintptr_t)tp, (uintptr_t)tp + len);
return ret;
}
NOKPROBE_SYMBOL(patch_text_set_nosync);
static int patch_insn_write(void *addr, const void *insn, size_t len)
{
size_t patched = 0;
size_t size;
int ret = 0;
/*
* Copy the instructions to the destination address, two pages at a time
* because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE.
*/
while (patched < len && !ret) {
size = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(addr + patched), len - patched);
ret = __patch_insn_write(addr + patched, insn + patched, size);
patched += size;
}
return ret;
}
NOKPROBE_SYMBOL(patch_insn_write);
int patch_text_nosync(void *addr, const void *insns, size_t len)
{
u32 *tp = addr;
int ret;
ret = patch_insn_write(tp, insns, len);
if (!ret)
flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
return ret;
}
NOKPROBE_SYMBOL(patch_text_nosync);
static int patch_text_cb(void *data)
{
struct patch_insn *patch = data;
unsigned long len;
int i, ret = 0;
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
for (i = 0; ret == 0 && i < patch->ninsns; i++) {
len = GET_INSN_LENGTH(patch->insns[i]);
ret = patch_text_nosync(patch->addr + i * len,
&patch->insns[i], len);
}
atomic_inc(&patch->cpu_count);
} else {
while (atomic_read(&patch->cpu_count) <= num_online_cpus())
cpu_relax();
smp_mb();
}
return ret;
}
NOKPROBE_SYMBOL(patch_text_cb);
int patch_text(void *addr, u32 *insns, int ninsns)
{
int ret;
struct patch_insn patch = {
.addr = addr,
.insns = insns,
.ninsns = ninsns,
.cpu_count = ATOMIC_INIT(0),
};
/*
* kprobes takes text_mutex, before calling patch_text(), but as we call
* calls stop_machine(), the lockdep assertion in patch_insn_write()
* gets confused by the context in which the lock is taken.
* Instead, ensure the lock is held before calling stop_machine(), and
* set riscv_patch_in_stop_machine to skip the check in
* patch_insn_write().
*/
lockdep_assert_held(&text_mutex);
riscv_patch_in_stop_machine = true;
ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
riscv_patch_in_stop_machine = false;
return ret;
}
NOKPROBE_SYMBOL(patch_text);
| linux-master | arch/riscv/kernel/patch.c |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.