python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sh/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
*
* SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
*
*/
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/elf.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/io.h>
#include <linux/resume_user_mode.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/syscalls.h>
#include <asm/fpu.h>
struct fdpic_func_descriptor {
unsigned long text;
unsigned long GOT;
};
/*
* The following define adds a 64 byte gap between the signal
* stack frame and previous contents of the stack. This allows
* frame unwinding in a function epilogue but only if a frame
* pointer is used in the function. This is necessary because
* current gcc compilers (<4.3) do not generate unwind info on
* SH for function epilogues.
*/
#define UNWINDGUARD 64
/*
* Do a signal return; undo the signal stack.
*/
#define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
#if defined(CONFIG_CPU_SH2)
#define TRAP_NOARG 0xc320 /* Syscall w/no args (NR in R3) */
#else
#define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) */
#endif
#define OR_R0_R0 0x200b /* or r0,r0 (insert to avoid hardware bug) */
struct sigframe
{
struct sigcontext sc;
unsigned long extramask[_NSIG_WORDS-1];
u16 retcode[8];
};
struct rt_sigframe
{
struct siginfo info;
struct ucontext uc;
u16 retcode[8];
};
#ifdef CONFIG_SH_FPU
static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
{
struct task_struct *tsk = current;
if (!(boot_cpu_data.flags & CPU_HAS_FPU))
return 0;
set_used_math();
return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0],
sizeof(long)*(16*2+2));
}
static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
struct pt_regs *regs)
{
struct task_struct *tsk = current;
if (!(boot_cpu_data.flags & CPU_HAS_FPU))
return 0;
if (!used_math())
return __put_user(0, &sc->sc_ownedfp);
if (__put_user(1, &sc->sc_ownedfp))
return -EFAULT;
/* This will cause a "finit" to be triggered by the next
attempted FPU operation by the 'current' process.
*/
clear_used_math();
unlazy_fpu(tsk, regs);
return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu,
sizeof(long)*(16*2+2));
}
#endif /* CONFIG_SH_FPU */
static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
{
unsigned int err = 0;
unsigned int sr = regs->sr & ~SR_USER_MASK;
#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
COPY(regs[1]);
COPY(regs[2]); COPY(regs[3]);
COPY(regs[4]); COPY(regs[5]);
COPY(regs[6]); COPY(regs[7]);
COPY(regs[8]); COPY(regs[9]);
COPY(regs[10]); COPY(regs[11]);
COPY(regs[12]); COPY(regs[13]);
COPY(regs[14]); COPY(regs[15]);
COPY(gbr); COPY(mach);
COPY(macl); COPY(pr);
COPY(sr); COPY(pc);
#undef COPY
regs->sr = (regs->sr & SR_USER_MASK) | sr;
#ifdef CONFIG_SH_FPU
if (boot_cpu_data.flags & CPU_HAS_FPU) {
int owned_fp;
struct task_struct *tsk = current;
regs->sr |= SR_FD; /* Release FPU */
clear_fpu(tsk, regs);
clear_used_math();
err |= __get_user (owned_fp, &sc->sc_ownedfp);
if (owned_fp)
err |= restore_sigcontext_fpu(sc);
}
#endif
regs->tra = -1; /* disable syscall checks */
err |= __get_user(*r0_p, &sc->sc_regs[0]);
return err;
}
asmlinkage int sys_sigreturn(void)
{
struct pt_regs *regs = current_pt_regs();
struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15];
sigset_t set;
int r0;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (_NSIG_WORDS > 1
&& __copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->sc, &r0))
goto badframe;
return r0;
badframe:
force_sig(SIGSEGV);
return 0;
}
asmlinkage int sys_rt_sigreturn(void)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
sigset_t set;
int r0;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
goto badframe;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return r0;
badframe:
force_sig(SIGSEGV);
return 0;
}
/*
* Set up a signal frame.
*/
static int
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long mask)
{
int err = 0;
#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
COPY(regs[0]); COPY(regs[1]);
COPY(regs[2]); COPY(regs[3]);
COPY(regs[4]); COPY(regs[5]);
COPY(regs[6]); COPY(regs[7]);
COPY(regs[8]); COPY(regs[9]);
COPY(regs[10]); COPY(regs[11]);
COPY(regs[12]); COPY(regs[13]);
COPY(regs[14]); COPY(regs[15]);
COPY(gbr); COPY(mach);
COPY(macl); COPY(pr);
COPY(sr); COPY(pc);
#undef COPY
#ifdef CONFIG_SH_FPU
err |= save_sigcontext_fpu(sc, regs);
#endif
/* non-iBCS2 extensions.. */
err |= __put_user(mask, &sc->oldmask);
return err;
}
/*
* Determine which stack to use..
*/
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
if (ka->sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(sp) == 0)
sp = current->sas_ss_sp + current->sas_ss_size;
}
return (void __user *)((sp - (frame_size+UNWINDGUARD)) & -8ul);
}
/* These symbols are defined with the addresses in the vsyscall page.
See vsyscall-trapa.S. */
extern void __kernel_sigreturn(void);
extern void __kernel_rt_sigreturn(void);
static int setup_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct sigframe __user *frame;
int err = 0, sig = ksig->sig;
frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
return -EFAULT;
err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
if (_NSIG_WORDS > 1)
err |= __copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask));
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
regs->pr = (unsigned long) ksig->ka.sa.sa_restorer;
#ifdef CONFIG_VSYSCALL
} else if (likely(current->mm->context.vdso)) {
regs->pr = VDSO_SYM(&__kernel_sigreturn);
#endif
} else {
/* Generate return code (system call to sigreturn) */
err |= __put_user(MOVW(7), &frame->retcode[0]);
err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
err |= __put_user(OR_R0_R0, &frame->retcode[2]);
err |= __put_user(OR_R0_R0, &frame->retcode[3]);
err |= __put_user(OR_R0_R0, &frame->retcode[4]);
err |= __put_user(OR_R0_R0, &frame->retcode[5]);
err |= __put_user(OR_R0_R0, &frame->retcode[6]);
err |= __put_user((__NR_sigreturn), &frame->retcode[7]);
regs->pr = (unsigned long) frame->retcode;
flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
}
if (err)
return -EFAULT;
/* Set up registers for signal handler */
regs->regs[15] = (unsigned long) frame;
regs->regs[4] = sig; /* Arg for signal handler */
regs->regs[5] = 0;
regs->regs[6] = (unsigned long) &frame->sc;
if (current->personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor __user *funcptr =
(struct fdpic_func_descriptor __user *)ksig->ka.sa.sa_handler;
err |= __get_user(regs->pc, &funcptr->text);
err |= __get_user(regs->regs[12], &funcptr->GOT);
} else
regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
if (err)
return -EFAULT;
pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
return 0;
}
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
return -EFAULT;
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(NULL, &frame->uc.uc_link);
err |= __save_altstack(&frame->uc.uc_stack, regs->regs[15]);
err |= setup_sigcontext(&frame->uc.uc_mcontext,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
regs->pr = (unsigned long) ksig->ka.sa.sa_restorer;
#ifdef CONFIG_VSYSCALL
} else if (likely(current->mm->context.vdso)) {
regs->pr = VDSO_SYM(&__kernel_rt_sigreturn);
#endif
} else {
/* Generate return code (system call to rt_sigreturn) */
err |= __put_user(MOVW(7), &frame->retcode[0]);
err |= __put_user(TRAP_NOARG, &frame->retcode[1]);
err |= __put_user(OR_R0_R0, &frame->retcode[2]);
err |= __put_user(OR_R0_R0, &frame->retcode[3]);
err |= __put_user(OR_R0_R0, &frame->retcode[4]);
err |= __put_user(OR_R0_R0, &frame->retcode[5]);
err |= __put_user(OR_R0_R0, &frame->retcode[6]);
err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
regs->pr = (unsigned long) frame->retcode;
flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
}
if (err)
return -EFAULT;
/* Set up registers for signal handler */
regs->regs[15] = (unsigned long) frame;
regs->regs[4] = sig; /* Arg for signal handler */
regs->regs[5] = (unsigned long) &frame->info;
regs->regs[6] = (unsigned long) &frame->uc;
if (current->personality & FDPIC_FUNCPTRS) {
struct fdpic_func_descriptor __user *funcptr =
(struct fdpic_func_descriptor __user *)ksig->ka.sa.sa_handler;
err |= __get_user(regs->pc, &funcptr->text);
err |= __get_user(regs->regs[12], &funcptr->GOT);
} else
regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
if (err)
return -EFAULT;
pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
return 0;
}
static inline void
handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
struct sigaction *sa)
{
/* If we're not from a syscall, bail out */
if (regs->tra < 0)
return;
/* check for system call restart.. */
switch (regs->regs[0]) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
no_system_call_restart:
regs->regs[0] = -EINTR;
break;
case -ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
fallthrough;
case -ERESTARTNOINTR:
regs->regs[0] = save_r0;
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
break;
}
}
/*
* OK, we're invoking a handler
*/
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs, unsigned int save_r0)
{
sigset_t *oldset = sigmask_to_save();
int ret;
/* Set up the stack frame */
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame(ksig, oldset, regs);
else
ret = setup_frame(ksig, oldset, regs);
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*
* Note that we go through the signals twice: once to check the signals that
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
static void do_signal(struct pt_regs *regs, unsigned int save_r0)
{
struct ksignal ksig;
/*
* We want the common case to go fast, which
* is why we may in certain cases get here from
* kernel mode. Just return without doing anything
* if so.
*/
if (!user_mode(regs))
return;
if (get_signal(&ksig)) {
handle_syscall_restart(save_r0, regs, &ksig.ka.sa);
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs, save_r0);
return;
}
/* Did we come from a system call? */
if (regs->tra >= 0) {
/* Restart the system call - no handlers present */
if (regs->regs[0] == -ERESTARTNOHAND ||
regs->regs[0] == -ERESTARTSYS ||
regs->regs[0] == -ERESTARTNOINTR) {
regs->regs[0] = save_r0;
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
} else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) {
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
regs->regs[3] = __NR_restart_syscall;
}
}
/*
* If there's no signal to deliver, we just put the saved sigmask
* back.
*/
restore_saved_sigmask();
}
asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
unsigned long thread_info_flags)
{
/* deal with pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs, save_r0);
if (thread_info_flags & _TIF_NOTIFY_RESUME)
resume_user_mode_work(regs);
}
| linux-master | arch/sh/kernel/signal_32.c |
// SPDX-License-Identifier: GPL-2.0
/*
* C interface for trapping into the standard LinuxSH BIOS.
*
* Copyright (C) 2000 Greg Banks, Mitch Davis
* Copyright (C) 1999, 2000 Niibe Yutaka
* Copyright (C) 2002 M. R. Brown
* Copyright (C) 2004 - 2010 Paul Mundt
*/
#include <linux/module.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <asm/sh_bios.h>
#define BIOS_CALL_CONSOLE_WRITE 0
#define BIOS_CALL_ETH_NODE_ADDR 10
#define BIOS_CALL_SHUTDOWN 11
#define BIOS_CALL_GDB_DETACH 0xff
void *gdb_vbr_vector = NULL;
static inline long sh_bios_call(long func, long arg0, long arg1, long arg2,
long arg3)
{
register long r0 __asm__("r0") = func;
register long r4 __asm__("r4") = arg0;
register long r5 __asm__("r5") = arg1;
register long r6 __asm__("r6") = arg2;
register long r7 __asm__("r7") = arg3;
if (!gdb_vbr_vector)
return -ENOSYS;
__asm__ __volatile__("trapa #0x3f":"=z"(r0)
:"0"(r0), "r"(r4), "r"(r5), "r"(r6), "r"(r7)
:"memory");
return r0;
}
void sh_bios_console_write(const char *buf, unsigned int len)
{
sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0);
}
void sh_bios_gdb_detach(void)
{
sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0);
}
EXPORT_SYMBOL_GPL(sh_bios_gdb_detach);
void sh_bios_get_node_addr(unsigned char *node_addr)
{
sh_bios_call(BIOS_CALL_ETH_NODE_ADDR, 0, (long)node_addr, 0, 0);
}
EXPORT_SYMBOL_GPL(sh_bios_get_node_addr);
void sh_bios_shutdown(unsigned int how)
{
sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0);
}
/*
* Read the old value of the VBR register to initialise the vector
* through which debug and BIOS traps are delegated by the Linux trap
* handler.
*/
void sh_bios_vbr_init(void)
{
unsigned long vbr;
if (unlikely(gdb_vbr_vector))
return;
__asm__ __volatile__ ("stc vbr, %0" : "=r" (vbr));
if (vbr) {
gdb_vbr_vector = (void *)(vbr + 0x100);
printk(KERN_NOTICE "Setting GDB trap vector to %p\n",
gdb_vbr_vector);
} else
printk(KERN_NOTICE "SH-BIOS not detected\n");
}
/**
* sh_bios_vbr_reload - Re-load the system VBR from the BIOS vector.
*
* This can be used by save/restore code to reinitialize the system VBR
* from the fixed BIOS VBR. A no-op if no BIOS VBR is known.
*/
void sh_bios_vbr_reload(void)
{
if (gdb_vbr_vector)
__asm__ __volatile__ (
"ldc %0, vbr"
:
: "r" (((unsigned long) gdb_vbr_vector) - 0x100)
: "memory"
);
}
#ifdef CONFIG_EARLY_PRINTK
/*
* Print a string through the BIOS
*/
static void sh_console_write(struct console *co, const char *s,
unsigned count)
{
sh_bios_console_write(s, count);
}
/*
* Setup initial baud/bits/parity. We do two things here:
* - construct a cflag setting for the first rs_open()
* - initialize the serial port
* Return non-zero if we didn't find a serial port.
*/
static int __init sh_console_setup(struct console *co, char *options)
{
int cflag = CREAD | HUPCL | CLOCAL;
/*
* Now construct a cflag setting.
* TODO: this is a totally bogus cflag, as we have
* no idea what serial settings the BIOS is using, or
* even if its using the serial port at all.
*/
cflag |= B115200 | CS8 | /*no parity*/0;
co->cflag = cflag;
return 0;
}
static struct console bios_console = {
.name = "bios",
.write = sh_console_write,
.setup = sh_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
};
static int __init setup_early_printk(char *buf)
{
int keep_early = 0;
if (!buf)
return 0;
if (strstr(buf, "keep"))
keep_early = 1;
if (!strncmp(buf, "bios", 4))
early_console = &bios_console;
if (likely(early_console)) {
if (keep_early)
early_console->flags &= ~CON_BOOT;
else
early_console->flags |= CON_BOOT;
register_console(early_console);
}
return 0;
}
early_param("earlyprintk", setup_early_printk);
#endif
| linux-master | arch/sh/kernel/sh_bios.c |
// SPDX-License-Identifier: GPL-2.0
/*
* swsusp.c - SuperH hibernation support
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/suspend.h>
#include <asm/suspend.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/fpu.h>
struct swsusp_arch_regs swsusp_arch_regs_cpu0;
int pfn_is_nosave(unsigned long pfn)
{
unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
return (pfn >= begin_pfn) && (pfn < end_pfn);
}
void save_processor_state(void)
{
init_fpu(current);
}
void restore_processor_state(void)
{
local_flush_tlb_all();
}
| linux-master | arch/sh/kernel/swsusp.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
#include <linux/export.h>
#include <linux/stackprotector.h>
#include <asm/fpu.h>
#include <asm/ptrace.h>
struct kmem_cache *task_xstate_cachep = NULL;
unsigned int xstate_size;
#ifdef CONFIG_STACKPROTECTOR
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
/*
* this gets called so that we can store lazy state into memory and copy the
* current task into the new thread.
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
unlazy_fpu(src, task_pt_regs(src));
*dst = *src;
if (src->thread.xstate) {
dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
GFP_KERNEL);
if (!dst->thread.xstate)
return -ENOMEM;
memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
}
return 0;
}
void free_thread_xstate(struct task_struct *tsk)
{
if (tsk->thread.xstate) {
kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
tsk->thread.xstate = NULL;
}
}
void arch_release_task_struct(struct task_struct *tsk)
{
free_thread_xstate(tsk);
}
void arch_task_cache_init(void)
{
if (!xstate_size)
return;
task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
__alignof__(union thread_xstate),
SLAB_PANIC, NULL);
}
#ifdef CONFIG_SH_FPU_EMU
# define HAVE_SOFTFP 1
#else
# define HAVE_SOFTFP 0
#endif
void init_thread_xstate(void)
{
if (boot_cpu_data.flags & CPU_HAS_FPU)
xstate_size = sizeof(struct sh_fpu_hard_struct);
else if (HAVE_SOFTFP)
xstate_size = sizeof(struct sh_fpu_soft_struct);
else
xstate_size = 0;
}
| linux-master | arch/sh/kernel/process.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Disassemble SuperH instructions.
*
* Copyright (C) 1999 kaz Kojima
* Copyright (C) 2008 Paul Mundt
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <asm/ptrace.h>
/*
* Format of an instruction in memory.
*/
typedef enum {
HEX_0, HEX_1, HEX_2, HEX_3, HEX_4, HEX_5, HEX_6, HEX_7,
HEX_8, HEX_9, HEX_A, HEX_B, HEX_C, HEX_D, HEX_E, HEX_F,
REG_N, REG_M, REG_NM, REG_B,
BRANCH_12, BRANCH_8,
DISP_8, DISP_4,
IMM_4, IMM_4BY2, IMM_4BY4, PCRELIMM_8BY2, PCRELIMM_8BY4,
IMM_8, IMM_8BY2, IMM_8BY4,
} sh_nibble_type;
typedef enum {
A_END, A_BDISP12, A_BDISP8,
A_DEC_M, A_DEC_N,
A_DISP_GBR, A_DISP_PC, A_DISP_REG_M, A_DISP_REG_N,
A_GBR,
A_IMM,
A_INC_M, A_INC_N,
A_IND_M, A_IND_N, A_IND_R0_REG_M, A_IND_R0_REG_N,
A_MACH, A_MACL,
A_PR, A_R0, A_R0_GBR, A_REG_M, A_REG_N, A_REG_B,
A_SR, A_VBR, A_SSR, A_SPC, A_SGR, A_DBR,
F_REG_N, F_REG_M, D_REG_N, D_REG_M,
X_REG_N, /* Only used for argument parsing */
X_REG_M, /* Only used for argument parsing */
DX_REG_N, DX_REG_M, V_REG_N, V_REG_M,
FD_REG_N,
XMTRX_M4,
F_FR0,
FPUL_N, FPUL_M, FPSCR_N, FPSCR_M,
} sh_arg_type;
static struct sh_opcode_info {
char *name;
sh_arg_type arg[7];
sh_nibble_type nibbles[4];
} sh_table[] = {
{"add",{A_IMM,A_REG_N},{HEX_7,REG_N,IMM_8}},
{"add",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_C}},
{"addc",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_E}},
{"addv",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_F}},
{"and",{A_IMM,A_R0},{HEX_C,HEX_9,IMM_8}},
{"and",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_9}},
{"and.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_D,IMM_8}},
{"bra",{A_BDISP12},{HEX_A,BRANCH_12}},
{"bsr",{A_BDISP12},{HEX_B,BRANCH_12}},
{"bt",{A_BDISP8},{HEX_8,HEX_9,BRANCH_8}},
{"bf",{A_BDISP8},{HEX_8,HEX_B,BRANCH_8}},
{"bt.s",{A_BDISP8},{HEX_8,HEX_D,BRANCH_8}},
{"bt/s",{A_BDISP8},{HEX_8,HEX_D,BRANCH_8}},
{"bf.s",{A_BDISP8},{HEX_8,HEX_F,BRANCH_8}},
{"bf/s",{A_BDISP8},{HEX_8,HEX_F,BRANCH_8}},
{"clrmac",{0},{HEX_0,HEX_0,HEX_2,HEX_8}},
{"clrs",{0},{HEX_0,HEX_0,HEX_4,HEX_8}},
{"clrt",{0},{HEX_0,HEX_0,HEX_0,HEX_8}},
{"cmp/eq",{A_IMM,A_R0},{HEX_8,HEX_8,IMM_8}},
{"cmp/eq",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_0}},
{"cmp/ge",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_3}},
{"cmp/gt",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_7}},
{"cmp/hi",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_6}},
{"cmp/hs",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_2}},
{"cmp/pl",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_5}},
{"cmp/pz",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_1}},
{"cmp/str",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_C}},
{"div0s",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_7}},
{"div0u",{0},{HEX_0,HEX_0,HEX_1,HEX_9}},
{"div1",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_4}},
{"exts.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_E}},
{"exts.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_F}},
{"extu.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_C}},
{"extu.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_D}},
{"jmp",{A_IND_N},{HEX_4,REG_N,HEX_2,HEX_B}},
{"jsr",{A_IND_N},{HEX_4,REG_N,HEX_0,HEX_B}},
{"ldc",{A_REG_N,A_SR},{HEX_4,REG_N,HEX_0,HEX_E}},
{"ldc",{A_REG_N,A_GBR},{HEX_4,REG_N,HEX_1,HEX_E}},
{"ldc",{A_REG_N,A_VBR},{HEX_4,REG_N,HEX_2,HEX_E}},
{"ldc",{A_REG_N,A_SSR},{HEX_4,REG_N,HEX_3,HEX_E}},
{"ldc",{A_REG_N,A_SPC},{HEX_4,REG_N,HEX_4,HEX_E}},
{"ldc",{A_REG_N,A_DBR},{HEX_4,REG_N,HEX_7,HEX_E}},
{"ldc",{A_REG_N,A_REG_B},{HEX_4,REG_N,REG_B,HEX_E}},
{"ldc.l",{A_INC_N,A_SR},{HEX_4,REG_N,HEX_0,HEX_7}},
{"ldc.l",{A_INC_N,A_GBR},{HEX_4,REG_N,HEX_1,HEX_7}},
{"ldc.l",{A_INC_N,A_VBR},{HEX_4,REG_N,HEX_2,HEX_7}},
{"ldc.l",{A_INC_N,A_SSR},{HEX_4,REG_N,HEX_3,HEX_7}},
{"ldc.l",{A_INC_N,A_SPC},{HEX_4,REG_N,HEX_4,HEX_7}},
{"ldc.l",{A_INC_N,A_DBR},{HEX_4,REG_N,HEX_7,HEX_7}},
{"ldc.l",{A_INC_N,A_REG_B},{HEX_4,REG_N,REG_B,HEX_7}},
{"lds",{A_REG_N,A_MACH},{HEX_4,REG_N,HEX_0,HEX_A}},
{"lds",{A_REG_N,A_MACL},{HEX_4,REG_N,HEX_1,HEX_A}},
{"lds",{A_REG_N,A_PR},{HEX_4,REG_N,HEX_2,HEX_A}},
{"lds",{A_REG_M,FPUL_N},{HEX_4,REG_M,HEX_5,HEX_A}},
{"lds",{A_REG_M,FPSCR_N},{HEX_4,REG_M,HEX_6,HEX_A}},
{"lds.l",{A_INC_N,A_MACH},{HEX_4,REG_N,HEX_0,HEX_6}},
{"lds.l",{A_INC_N,A_MACL},{HEX_4,REG_N,HEX_1,HEX_6}},
{"lds.l",{A_INC_N,A_PR},{HEX_4,REG_N,HEX_2,HEX_6}},
{"lds.l",{A_INC_M,FPUL_N},{HEX_4,REG_M,HEX_5,HEX_6}},
{"lds.l",{A_INC_M,FPSCR_N},{HEX_4,REG_M,HEX_6,HEX_6}},
{"ldtlb",{0},{HEX_0,HEX_0,HEX_3,HEX_8}},
{"mac.w",{A_INC_M,A_INC_N},{HEX_4,REG_N,REG_M,HEX_F}},
{"mov",{A_IMM,A_REG_N},{HEX_E,REG_N,IMM_8}},
{"mov",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_3}},
{"mov.b",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_4}},
{"mov.b",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_4}},
{"mov.b",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_0}},
{"mov.b",{A_DISP_REG_M,A_R0},{HEX_8,HEX_4,REG_M,IMM_4}},
{"mov.b",{A_DISP_GBR,A_R0},{HEX_C,HEX_4,IMM_8}},
{"mov.b",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_C}},
{"mov.b",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_4}},
{"mov.b",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_0}},
{"mov.b",{A_R0,A_DISP_REG_M},{HEX_8,HEX_0,REG_M,IMM_4}},
{"mov.b",{A_R0,A_DISP_GBR},{HEX_C,HEX_0,IMM_8}},
{"mov.l",{ A_REG_M,A_DISP_REG_N},{HEX_1,REG_N,REG_M,IMM_4BY4}},
{"mov.l",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_6}},
{"mov.l",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_6}},
{"mov.l",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_2}},
{"mov.l",{A_DISP_REG_M,A_REG_N},{HEX_5,REG_N,REG_M,IMM_4BY4}},
{"mov.l",{A_DISP_GBR,A_R0},{HEX_C,HEX_6,IMM_8BY4}},
{"mov.l",{A_DISP_PC,A_REG_N},{HEX_D,REG_N,PCRELIMM_8BY4}},
{"mov.l",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_E}},
{"mov.l",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_6}},
{"mov.l",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_2}},
{"mov.l",{A_R0,A_DISP_GBR},{HEX_C,HEX_2,IMM_8BY4}},
{"mov.w",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_5}},
{"mov.w",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_5}},
{"mov.w",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_1}},
{"mov.w",{A_DISP_REG_M,A_R0},{HEX_8,HEX_5,REG_M,IMM_4BY2}},
{"mov.w",{A_DISP_GBR,A_R0},{HEX_C,HEX_5,IMM_8BY2}},
{"mov.w",{A_DISP_PC,A_REG_N},{HEX_9,REG_N,PCRELIMM_8BY2}},
{"mov.w",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_D}},
{"mov.w",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_5}},
{"mov.w",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_1}},
{"mov.w",{A_R0,A_DISP_REG_M},{HEX_8,HEX_1,REG_M,IMM_4BY2}},
{"mov.w",{A_R0,A_DISP_GBR},{HEX_C,HEX_1,IMM_8BY2}},
{"mova",{A_DISP_PC,A_R0},{HEX_C,HEX_7,PCRELIMM_8BY4}},
{"movca.l",{A_R0,A_IND_N},{HEX_0,REG_N,HEX_C,HEX_3}},
{"movt",{A_REG_N},{HEX_0,REG_N,HEX_2,HEX_9}},
{"muls",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_F}},
{"mul.l",{ A_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_7}},
{"mulu",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_E}},
{"neg",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_B}},
{"negc",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_A}},
{"nop",{0},{HEX_0,HEX_0,HEX_0,HEX_9}},
{"not",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_7}},
{"ocbi",{A_IND_N},{HEX_0,REG_N,HEX_9,HEX_3}},
{"ocbp",{A_IND_N},{HEX_0,REG_N,HEX_A,HEX_3}},
{"ocbwb",{A_IND_N},{HEX_0,REG_N,HEX_B,HEX_3}},
{"or",{A_IMM,A_R0},{HEX_C,HEX_B,IMM_8}},
{"or",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_B}},
{"or.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_F,IMM_8}},
{"pref",{A_IND_N},{HEX_0,REG_N,HEX_8,HEX_3}},
{"rotcl",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_4}},
{"rotcr",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_5}},
{"rotl",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_4}},
{"rotr",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_5}},
{"rte",{0},{HEX_0,HEX_0,HEX_2,HEX_B}},
{"rts",{0},{HEX_0,HEX_0,HEX_0,HEX_B}},
{"sets",{0},{HEX_0,HEX_0,HEX_5,HEX_8}},
{"sett",{0},{HEX_0,HEX_0,HEX_1,HEX_8}},
{"shad",{ A_REG_M,A_REG_N},{HEX_4,REG_N,REG_M,HEX_C}},
{"shld",{ A_REG_M,A_REG_N},{HEX_4,REG_N,REG_M,HEX_D}},
{"shal",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_0}},
{"shar",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_1}},
{"shll",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_0}},
{"shll16",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_8}},
{"shll2",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_8}},
{"shll8",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_8}},
{"shlr",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_1}},
{"shlr16",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_9}},
{"shlr2",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_9}},
{"shlr8",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_9}},
{"sleep",{0},{HEX_0,HEX_0,HEX_1,HEX_B}},
{"stc",{A_SR,A_REG_N},{HEX_0,REG_N,HEX_0,HEX_2}},
{"stc",{A_GBR,A_REG_N},{HEX_0,REG_N,HEX_1,HEX_2}},
{"stc",{A_VBR,A_REG_N},{HEX_0,REG_N,HEX_2,HEX_2}},
{"stc",{A_SSR,A_REG_N},{HEX_0,REG_N,HEX_3,HEX_2}},
{"stc",{A_SPC,A_REG_N},{HEX_0,REG_N,HEX_4,HEX_2}},
{"stc",{A_SGR,A_REG_N},{HEX_0,REG_N,HEX_6,HEX_2}},
{"stc",{A_DBR,A_REG_N},{HEX_0,REG_N,HEX_7,HEX_2}},
{"stc",{A_REG_B,A_REG_N},{HEX_0,REG_N,REG_B,HEX_2}},
{"stc.l",{A_SR,A_DEC_N},{HEX_4,REG_N,HEX_0,HEX_3}},
{"stc.l",{A_GBR,A_DEC_N},{HEX_4,REG_N,HEX_1,HEX_3}},
{"stc.l",{A_VBR,A_DEC_N},{HEX_4,REG_N,HEX_2,HEX_3}},
{"stc.l",{A_SSR,A_DEC_N},{HEX_4,REG_N,HEX_3,HEX_3}},
{"stc.l",{A_SPC,A_DEC_N},{HEX_4,REG_N,HEX_4,HEX_3}},
{"stc.l",{A_SGR,A_DEC_N},{HEX_4,REG_N,HEX_6,HEX_3}},
{"stc.l",{A_DBR,A_DEC_N},{HEX_4,REG_N,HEX_7,HEX_3}},
{"stc.l",{A_REG_B,A_DEC_N},{HEX_4,REG_N,REG_B,HEX_3}},
{"sts",{A_MACH,A_REG_N},{HEX_0,REG_N,HEX_0,HEX_A}},
{"sts",{A_MACL,A_REG_N},{HEX_0,REG_N,HEX_1,HEX_A}},
{"sts",{A_PR,A_REG_N},{HEX_0,REG_N,HEX_2,HEX_A}},
{"sts",{FPUL_M,A_REG_N},{HEX_0,REG_N,HEX_5,HEX_A}},
{"sts",{FPSCR_M,A_REG_N},{HEX_0,REG_N,HEX_6,HEX_A}},
{"sts.l",{A_MACH,A_DEC_N},{HEX_4,REG_N,HEX_0,HEX_2}},
{"sts.l",{A_MACL,A_DEC_N},{HEX_4,REG_N,HEX_1,HEX_2}},
{"sts.l",{A_PR,A_DEC_N},{HEX_4,REG_N,HEX_2,HEX_2}},
{"sts.l",{FPUL_M,A_DEC_N},{HEX_4,REG_N,HEX_5,HEX_2}},
{"sts.l",{FPSCR_M,A_DEC_N},{HEX_4,REG_N,HEX_6,HEX_2}},
{"sub",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_8}},
{"subc",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_A}},
{"subv",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_B}},
{"swap.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_8}},
{"swap.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_9}},
{"tas.b",{A_IND_N},{HEX_4,REG_N,HEX_1,HEX_B}},
{"trapa",{A_IMM},{HEX_C,HEX_3,IMM_8}},
{"tst",{A_IMM,A_R0},{HEX_C,HEX_8,IMM_8}},
{"tst",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_8}},
{"tst.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_C,IMM_8}},
{"xor",{A_IMM,A_R0},{HEX_C,HEX_A,IMM_8}},
{"xor",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_A}},
{"xor.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_E,IMM_8}},
{"xtrct",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_D}},
{"mul.l",{ A_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_7}},
{"dt",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_0}},
{"dmuls.l",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_D}},
{"dmulu.l",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_5}},
{"mac.l",{A_INC_M,A_INC_N},{HEX_0,REG_N,REG_M,HEX_F}},
{"braf",{A_REG_N},{HEX_0,REG_N,HEX_2,HEX_3}},
{"bsrf",{A_REG_N},{HEX_0,REG_N,HEX_0,HEX_3}},
{"fabs",{FD_REG_N},{HEX_F,REG_N,HEX_5,HEX_D}},
{"fadd",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_0}},
{"fadd",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_0}},
{"fcmp/eq",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_4}},
{"fcmp/eq",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_4}},
{"fcmp/gt",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_5}},
{"fcmp/gt",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_5}},
{"fcnvds",{D_REG_N,FPUL_M},{HEX_F,REG_N,HEX_B,HEX_D}},
{"fcnvsd",{FPUL_M,D_REG_N},{HEX_F,REG_N,HEX_A,HEX_D}},
{"fdiv",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_3}},
{"fdiv",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_3}},
{"fipr",{V_REG_M,V_REG_N},{HEX_F,REG_NM,HEX_E,HEX_D}},
{"fldi0",{F_REG_N},{HEX_F,REG_N,HEX_8,HEX_D}},
{"fldi1",{F_REG_N},{HEX_F,REG_N,HEX_9,HEX_D}},
{"flds",{F_REG_N,FPUL_M},{HEX_F,REG_N,HEX_1,HEX_D}},
{"float",{FPUL_M,FD_REG_N},{HEX_F,REG_N,HEX_2,HEX_D}},
{"fmac",{F_FR0,F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_E}},
{"fmov",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_C}},
{"fmov",{DX_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_C}},
{"fmov",{A_IND_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_8}},
{"fmov",{A_IND_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_8}},
{"fmov",{F_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}},
{"fmov",{DX_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}},
{"fmov",{A_INC_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_9}},
{"fmov",{A_INC_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_9}},
{"fmov",{F_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}},
{"fmov",{DX_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}},
{"fmov",{A_IND_R0_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_6}},
{"fmov",{A_IND_R0_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_6}},
{"fmov",{F_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}},
{"fmov",{DX_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}},
{"fmov.d",{A_IND_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_8}},
{"fmov.d",{DX_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}},
{"fmov.d",{A_INC_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_9}},
{"fmov.d",{DX_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}},
{"fmov.d",{A_IND_R0_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_6}},
{"fmov.d",{DX_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}},
{"fmov.s",{A_IND_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_8}},
{"fmov.s",{F_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}},
{"fmov.s",{A_INC_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_9}},
{"fmov.s",{F_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}},
{"fmov.s",{A_IND_R0_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_6}},
{"fmov.s",{F_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}},
{"fmul",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_2}},
{"fmul",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_2}},
{"fneg",{FD_REG_N},{HEX_F,REG_N,HEX_4,HEX_D}},
{"frchg",{0},{HEX_F,HEX_B,HEX_F,HEX_D}},
{"fschg",{0},{HEX_F,HEX_3,HEX_F,HEX_D}},
{"fsqrt",{FD_REG_N},{HEX_F,REG_N,HEX_6,HEX_D}},
{"fsts",{FPUL_M,F_REG_N},{HEX_F,REG_N,HEX_0,HEX_D}},
{"fsub",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_1}},
{"fsub",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_1}},
{"ftrc",{FD_REG_N,FPUL_M},{HEX_F,REG_N,HEX_3,HEX_D}},
{"ftrv",{XMTRX_M4,V_REG_N},{HEX_F,REG_NM,HEX_F,HEX_D}},
{ 0 },
};
static void print_sh_insn(u32 memaddr, u16 insn)
{
int relmask = ~0;
int nibs[4] = { (insn >> 12) & 0xf, (insn >> 8) & 0xf, (insn >> 4) & 0xf, insn & 0xf};
int lastsp;
struct sh_opcode_info *op = sh_table;
for (; op->name; op++) {
int n;
int imm = 0;
int rn = 0;
int rm = 0;
int rb = 0;
int disp_pc;
int disp_pc_addr = 0;
for (n = 0; n < 4; n++) {
int i = op->nibbles[n];
if (i < 16) {
if (nibs[n] == i)
continue;
goto fail;
}
switch (i) {
case BRANCH_8:
imm = (nibs[2] << 4) | (nibs[3]);
if (imm & 0x80)
imm |= ~0xff;
imm = ((char)imm) * 2 + 4 ;
goto ok;
case BRANCH_12:
imm = ((nibs[1]) << 8) | (nibs[2] << 4) | (nibs[3]);
if (imm & 0x800)
imm |= ~0xfff;
imm = imm * 2 + 4;
goto ok;
case IMM_4:
imm = nibs[3];
goto ok;
case IMM_4BY2:
imm = nibs[3] <<1;
goto ok;
case IMM_4BY4:
imm = nibs[3] <<2;
goto ok;
case IMM_8:
imm = (nibs[2] << 4) | nibs[3];
goto ok;
case PCRELIMM_8BY2:
imm = ((nibs[2] << 4) | nibs[3]) <<1;
relmask = ~1;
goto ok;
case PCRELIMM_8BY4:
imm = ((nibs[2] << 4) | nibs[3]) <<2;
relmask = ~3;
goto ok;
case IMM_8BY2:
imm = ((nibs[2] << 4) | nibs[3]) <<1;
goto ok;
case IMM_8BY4:
imm = ((nibs[2] << 4) | nibs[3]) <<2;
goto ok;
case DISP_8:
imm = (nibs[2] << 4) | (nibs[3]);
goto ok;
case DISP_4:
imm = nibs[3];
goto ok;
case REG_N:
rn = nibs[n];
break;
case REG_M:
rm = nibs[n];
break;
case REG_NM:
rn = (nibs[n] & 0xc) >> 2;
rm = (nibs[n] & 0x3);
break;
case REG_B:
rb = nibs[n] & 0x07;
break;
default:
return;
}
}
ok:
pr_cont("%-8s ", op->name);
lastsp = (op->arg[0] == A_END);
disp_pc = 0;
for (n = 0; n < 6 && op->arg[n] != A_END; n++) {
if (n && op->arg[1] != A_END)
pr_cont(", ");
switch (op->arg[n]) {
case A_IMM:
pr_cont("#%d", (char)(imm));
break;
case A_R0:
pr_cont("r0");
break;
case A_REG_N:
pr_cont("r%d", rn);
break;
case A_INC_N:
pr_cont("@r%d+", rn);
break;
case A_DEC_N:
pr_cont("@-r%d", rn);
break;
case A_IND_N:
pr_cont("@r%d", rn);
break;
case A_DISP_REG_N:
pr_cont("@(%d,r%d)", imm, rn);
break;
case A_REG_M:
pr_cont("r%d", rm);
break;
case A_INC_M:
pr_cont("@r%d+", rm);
break;
case A_DEC_M:
pr_cont("@-r%d", rm);
break;
case A_IND_M:
pr_cont("@r%d", rm);
break;
case A_DISP_REG_M:
pr_cont("@(%d,r%d)", imm, rm);
break;
case A_REG_B:
pr_cont("r%d_bank", rb);
break;
case A_DISP_PC:
disp_pc = 1;
disp_pc_addr = imm + 4 + (memaddr & relmask);
pr_cont("%08x <%pS>", disp_pc_addr,
(void *)disp_pc_addr);
break;
case A_IND_R0_REG_N:
pr_cont("@(r0,r%d)", rn);
break;
case A_IND_R0_REG_M:
pr_cont("@(r0,r%d)", rm);
break;
case A_DISP_GBR:
pr_cont("@(%d,gbr)", imm);
break;
case A_R0_GBR:
pr_cont("@(r0,gbr)");
break;
case A_BDISP12:
case A_BDISP8:
pr_cont("%08x", imm + memaddr);
break;
case A_SR:
pr_cont("sr");
break;
case A_GBR:
pr_cont("gbr");
break;
case A_VBR:
pr_cont("vbr");
break;
case A_SSR:
pr_cont("ssr");
break;
case A_SPC:
pr_cont("spc");
break;
case A_MACH:
pr_cont("mach");
break;
case A_MACL:
pr_cont("macl");
break;
case A_PR:
pr_cont("pr");
break;
case A_SGR:
pr_cont("sgr");
break;
case A_DBR:
pr_cont("dbr");
break;
case FD_REG_N:
case F_REG_N:
pr_cont("fr%d", rn);
break;
case F_REG_M:
pr_cont("fr%d", rm);
break;
case DX_REG_N:
if (rn & 1) {
pr_cont("xd%d", rn & ~1);
break;
}
fallthrough;
case D_REG_N:
pr_cont("dr%d", rn);
break;
case DX_REG_M:
if (rm & 1) {
pr_cont("xd%d", rm & ~1);
break;
}
fallthrough;
case D_REG_M:
pr_cont("dr%d", rm);
break;
case FPSCR_M:
case FPSCR_N:
pr_cont("fpscr");
break;
case FPUL_M:
case FPUL_N:
pr_cont("fpul");
break;
case F_FR0:
pr_cont("fr0");
break;
case V_REG_N:
pr_cont("fv%d", rn*4);
break;
case V_REG_M:
pr_cont("fv%d", rm*4);
break;
case XMTRX_M4:
pr_cont("xmtrx");
break;
default:
return;
}
}
if (disp_pc && strcmp(op->name, "mova") != 0) {
u32 val;
if (relmask == ~1)
__get_user(val, (u16 *)disp_pc_addr);
else
__get_user(val, (u32 *)disp_pc_addr);
pr_cont(" ! %08x <%pS>", val, (void *)val);
}
return;
fail:
;
}
pr_info(".word 0x%x%x%x%x", nibs[0], nibs[1], nibs[2], nibs[3]);
}
void show_code(struct pt_regs *regs)
{
unsigned short *pc = (unsigned short *)regs->pc;
long i;
if (regs->pc & 0x1)
return;
pr_info("Code:\n");
for (i = -3 ; i < 6 ; i++) {
unsigned short insn;
if (__get_user(insn, pc + i)) {
pr_err(" (Bad address in pc)\n");
break;
}
pr_info("%s%08lx: ", (i ? " " : "->"),
(unsigned long)(pc + i));
print_sh_insn((unsigned long)(pc + i), insn);
pr_cont("\n");
}
pr_info("\n");
}
| linux-master | arch/sh/kernel/disassemble.c |
// SPDX-License-Identifier: GPL-2.0
/*
* 'traps.c' handles hardware traps and faults after we have saved some
* state in 'entry.S'.
*
* SuperH version: Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf
* Copyright (C) 2000 David Howells
* Copyright (C) 2002 - 2010 Paul Mundt
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/io.h>
#include <linux/bug.h>
#include <linux/debug_locks.h>
#include <linux/kdebug.h>
#include <linux/limits.h>
#include <linux/sysfs.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/sched/task_stack.h>
#include <asm/alignment.h>
#include <asm/fpu.h>
#include <asm/kprobes.h>
#include <asm/traps.h>
#include <asm/bl_bit.h>
#ifdef CONFIG_CPU_SH2
# define TRAP_RESERVED_INST 4
# define TRAP_ILLEGAL_SLOT_INST 6
# define TRAP_ADDRESS_ERROR 9
# ifdef CONFIG_CPU_SH2A
# define TRAP_UBC 12
# define TRAP_FPU_ERROR 13
# define TRAP_DIVZERO_ERROR 17
# define TRAP_DIVOVF_ERROR 18
# endif
#else
#define TRAP_RESERVED_INST 12
#define TRAP_ILLEGAL_SLOT_INST 13
#endif
static inline void sign_extend(unsigned int count, unsigned char *dst)
{
#ifdef __LITTLE_ENDIAN__
if ((count == 1) && dst[0] & 0x80) {
dst[1] = 0xff;
dst[2] = 0xff;
dst[3] = 0xff;
}
if ((count == 2) && dst[1] & 0x80) {
dst[2] = 0xff;
dst[3] = 0xff;
}
#else
if ((count == 1) && dst[3] & 0x80) {
dst[2] = 0xff;
dst[1] = 0xff;
dst[0] = 0xff;
}
if ((count == 2) && dst[2] & 0x80) {
dst[1] = 0xff;
dst[0] = 0xff;
}
#endif
}
static struct mem_access user_mem_access = {
copy_from_user,
copy_to_user,
};
static unsigned long copy_from_kernel_wrapper(void *dst, const void __user *src,
unsigned long cnt)
{
return copy_from_kernel_nofault(dst, (const void __force *)src, cnt);
}
static unsigned long copy_to_kernel_wrapper(void __user *dst, const void *src,
unsigned long cnt)
{
return copy_to_kernel_nofault((void __force *)dst, src, cnt);
}
static struct mem_access kernel_mem_access = {
copy_from_kernel_wrapper,
copy_to_kernel_wrapper,
};
/*
* handle an instruction that does an unaligned memory access by emulating the
* desired behaviour
* - note that PC _may not_ point to the faulting instruction
* (if that instruction is in a branch delay slot)
* - return 0 if emulation okay, -EFAULT on existential error
*/
static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma)
{
int ret, index, count;
unsigned long *rm, *rn;
unsigned char *src, *dst;
unsigned char __user *srcu, *dstu;
index = (instruction>>8)&15; /* 0x0F00 */
rn = ®s->regs[index];
index = (instruction>>4)&15; /* 0x00F0 */
rm = ®s->regs[index];
count = 1<<(instruction&3);
switch (count) {
case 1: inc_unaligned_byte_access(); break;
case 2: inc_unaligned_word_access(); break;
case 4: inc_unaligned_dword_access(); break;
case 8: inc_unaligned_multi_access(); break;
}
ret = -EFAULT;
switch (instruction>>12) {
case 0: /* mov.[bwl] to/from memory via r0+rn */
if (instruction & 8) {
/* from memory */
srcu = (unsigned char __user *)*rm;
srcu += regs->regs[0];
dst = (unsigned char *)rn;
*(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
} else {
/* to memory */
src = (unsigned char *)rm;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
dstu = (unsigned char __user *)*rn;
dstu += regs->regs[0];
if (ma->to(dstu, src, count))
goto fetch_fault;
}
ret = 0;
break;
case 1: /* mov.l Rm,@(disp,Rn) */
src = (unsigned char*) rm;
dstu = (unsigned char __user *)*rn;
dstu += (instruction&0x000F)<<2;
if (ma->to(dstu, src, 4))
goto fetch_fault;
ret = 0;
break;
case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
if (instruction & 4)
*rn -= count;
src = (unsigned char*) rm;
dstu = (unsigned char __user *)*rn;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
if (ma->to(dstu, src, count))
goto fetch_fault;
ret = 0;
break;
case 5: /* mov.l @(disp,Rm),Rn */
srcu = (unsigned char __user *)*rm;
srcu += (instruction & 0x000F) << 2;
dst = (unsigned char *)rn;
*(unsigned long *)dst = 0;
if (ma->from(dst, srcu, 4))
goto fetch_fault;
ret = 0;
break;
case 6: /* mov.[bwl] from memory, possibly with post-increment */
srcu = (unsigned char __user *)*rm;
if (instruction & 4)
*rm += count;
dst = (unsigned char*) rn;
*(unsigned long*)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
ret = 0;
break;
case 8:
switch ((instruction&0xFF00)>>8) {
case 0x81: /* mov.w R0,@(disp,Rn) */
src = (unsigned char *) ®s->regs[0];
#if !defined(__LITTLE_ENDIAN__)
src += 2;
#endif
dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
dstu += (instruction & 0x000F) << 1;
if (ma->to(dstu, src, 2))
goto fetch_fault;
ret = 0;
break;
case 0x85: /* mov.w @(disp,Rm),R0 */
srcu = (unsigned char __user *)*rm;
srcu += (instruction & 0x000F) << 1;
dst = (unsigned char *) ®s->regs[0];
*(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 2;
#endif
if (ma->from(dst, srcu, 2))
goto fetch_fault;
sign_extend(2, dst);
ret = 0;
break;
}
break;
case 9: /* mov.w @(disp,PC),Rn */
srcu = (unsigned char __user *)regs->pc;
srcu += 4;
srcu += (instruction & 0x00FF) << 1;
dst = (unsigned char *)rn;
*(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 2;
#endif
if (ma->from(dst, srcu, 2))
goto fetch_fault;
sign_extend(2, dst);
ret = 0;
break;
case 0xd: /* mov.l @(disp,PC),Rn */
srcu = (unsigned char __user *)(regs->pc & ~0x3);
srcu += 4;
srcu += (instruction & 0x00FF) << 2;
dst = (unsigned char *)rn;
*(unsigned long *)dst = 0;
if (ma->from(dst, srcu, 4))
goto fetch_fault;
ret = 0;
break;
}
return ret;
fetch_fault:
/* Argh. Address not only misaligned but also non-existent.
* Raise an EFAULT and see if it's trapped
*/
die_if_no_fixup("Fault in unaligned fixup", regs, 0);
return -EFAULT;
}
/*
* emulate the instruction in the delay slot
* - fetches the instruction from PC+2
*/
static inline int handle_delayslot(struct pt_regs *regs,
insn_size_t old_instruction,
struct mem_access *ma)
{
insn_size_t instruction;
void __user *addr = (void __user *)(regs->pc +
instruction_size(old_instruction));
if (copy_from_user(&instruction, addr, sizeof(instruction))) {
/* the instruction-fetch faulted */
if (user_mode(regs))
return -EFAULT;
/* kernel */
die("delay-slot-insn faulting in handle_unaligned_delayslot",
regs, 0);
}
return handle_unaligned_ins(instruction, regs, ma);
}
/*
* handle an instruction that does an unaligned memory access
* - have to be careful of branch delay-slot instructions that fault
* SH3:
* - if the branch would be taken PC points to the branch
* - if the branch would not be taken, PC points to delay-slot
* SH4:
* - PC always points to delayed branch
* - return 0 if handled, -EFAULT if failed (may not return if in kernel)
*/
/* Macros to determine offset from current PC for branch instructions */
/* Explicit type coercion is used to force sign extension where needed */
#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma, int expected,
unsigned long address)
{
u_int rm;
int ret, index;
/*
* XXX: We can't handle mixed 16/32-bit instructions yet
*/
if (instruction_size(instruction) != 2)
return -EINVAL;
index = (instruction>>8)&15; /* 0x0F00 */
rm = regs->regs[index];
/*
* Log the unexpected fixups, and then pass them on to perf.
*
* We intentionally don't report the expected cases to perf as
* otherwise the trapped I/O case will skew the results too much
* to be useful.
*/
if (!expected) {
unaligned_fixups_notify(current, instruction, regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
regs, address);
}
ret = -EFAULT;
switch (instruction&0xF000) {
case 0x0000:
if (instruction==0x000B) {
/* rts */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc = regs->pr;
}
else if ((instruction&0x00FF)==0x0023) {
/* braf @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc += rm + 4;
}
else if ((instruction&0x00FF)==0x0003) {
/* bsrf @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc += rm + 4;
}
}
else {
/* mov.[bwl] to/from memory via r0+rn */
goto simple;
}
break;
case 0x1000: /* mov.l Rm,@(disp,Rn) */
goto simple;
case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
goto simple;
case 0x4000:
if ((instruction&0x00FF)==0x002B) {
/* jmp @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc = rm;
}
else if ((instruction&0x00FF)==0x000B) {
/* jsr @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc = rm;
}
}
else {
/* mov.[bwl] to/from memory via r0+rn */
goto simple;
}
break;
case 0x5000: /* mov.l @(disp,Rm),Rn */
goto simple;
case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
goto simple;
case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
switch (instruction&0x0F00) {
case 0x0100: /* mov.w R0,@(disp,Rm) */
goto simple;
case 0x0500: /* mov.w @(disp,Rm),R0 */
goto simple;
case 0x0B00: /* bf lab - no delayslot*/
ret = 0;
break;
case 0x0F00: /* bf/s lab */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
if ((regs->sr & 0x00000001) != 0)
regs->pc += 4; /* next after slot */
else
#endif
regs->pc += SH_PC_8BIT_OFFSET(instruction);
}
break;
case 0x0900: /* bt lab - no delayslot */
ret = 0;
break;
case 0x0D00: /* bt/s lab */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
if ((regs->sr & 0x00000001) == 0)
regs->pc += 4; /* next after slot */
else
#endif
regs->pc += SH_PC_8BIT_OFFSET(instruction);
}
break;
}
break;
case 0x9000: /* mov.w @(disp,Rm),Rn */
goto simple;
case 0xA000: /* bra label */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc += SH_PC_12BIT_OFFSET(instruction);
break;
case 0xB000: /* bsr label */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc += SH_PC_12BIT_OFFSET(instruction);
}
break;
case 0xD000: /* mov.l @(disp,Rm),Rn */
goto simple;
}
return ret;
/* handle non-delay-slot instruction */
simple:
ret = handle_unaligned_ins(instruction, regs, ma);
if (ret==0)
regs->pc += instruction_size(instruction);
return ret;
}
/*
* Handle various address error exceptions:
* - instruction address error:
* misaligned PC
* PC >= 0x80000000 in user mode
* - data address error (read and write)
* misaligned data access
* access to >= 0x80000000 is user mode
* Unfortuntaly we can't distinguish between instruction address error
* and data address errors caused by read accesses.
*/
asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address)
{
unsigned long error_code = 0;
insn_size_t instruction;
int tmp;
/* Intentional ifdef */
#ifdef CONFIG_CPU_HAS_SR_RB
error_code = lookup_exception_vector();
#endif
if (user_mode(regs)) {
int si_code = BUS_ADRERR;
unsigned int user_action;
local_irq_enable();
inc_unaligned_user_access();
if (copy_from_user(&instruction, (insn_size_t __user *)(regs->pc & ~1),
sizeof(instruction))) {
goto uspace_segv;
}
/* shout about userspace fixups */
unaligned_fixups_notify(current, instruction, regs);
user_action = unaligned_user_action();
if (user_action & UM_FIXUP)
goto fixup;
if (user_action & UM_SIGNAL)
goto uspace_segv;
else {
/* ignore */
regs->pc += instruction_size(instruction);
return;
}
fixup:
/* bad PC is not something we can fix */
if (regs->pc & 1) {
si_code = BUS_ADRALN;
goto uspace_segv;
}
tmp = handle_unaligned_access(instruction, regs,
&user_mem_access, 0,
address);
if (tmp == 0)
return; /* sorted */
uspace_segv:
printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
"access (PC %lx PR %lx)\n", current->comm, regs->pc,
regs->pr);
force_sig_fault(SIGBUS, si_code, (void __user *)address);
} else {
inc_unaligned_kernel_access();
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
if (copy_from_kernel_nofault(&instruction, (void *)(regs->pc),
sizeof(instruction))) {
/* Argh. Fault on the instruction itself.
This should never happen non-SMP
*/
die("insn faulting in do_address_error", regs, 0);
}
unaligned_fixups_notify(current, instruction, regs);
handle_unaligned_access(instruction, regs, &kernel_mem_access,
0, address);
}
}
#ifdef CONFIG_SH_DSP
/*
* SH-DSP support [email protected].
*/
int is_dsp_inst(struct pt_regs *regs)
{
unsigned short inst = 0;
/*
* Safe guard if DSP mode is already enabled or we're lacking
* the DSP altogether.
*/
if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
return 0;
get_user(inst, ((unsigned short *) regs->pc));
inst &= 0xf000;
/* Check for any type of DSP or support instruction */
if ((inst == 0xf000) || (inst == 0x4000))
return 1;
return 0;
}
#else
#define is_dsp_inst(regs) (0)
#endif /* CONFIG_SH_DSP */
#ifdef CONFIG_CPU_SH2A
asmlinkage void do_divide_error(unsigned long r4)
{
int code;
switch (r4) {
case TRAP_DIVZERO_ERROR:
code = FPE_INTDIV;
break;
case TRAP_DIVOVF_ERROR:
code = FPE_INTOVF;
break;
default:
/* Let gcc know unhandled cases don't make it past here */
return;
}
force_sig_fault(SIGFPE, code, NULL);
}
#endif
asmlinkage void do_reserved_inst(void)
{
struct pt_regs *regs = current_pt_regs();
unsigned long error_code;
#ifdef CONFIG_SH_FPU_EMU
unsigned short inst = 0;
int err;
get_user(inst, (unsigned short __user *)regs->pc);
err = do_fpu_inst(inst, regs);
if (!err) {
regs->pc += instruction_size(inst);
return;
}
/* not a FPU inst. */
#endif
#ifdef CONFIG_SH_DSP
/* Check if it's a DSP instruction */
if (is_dsp_inst(regs)) {
/* Enable DSP mode, and restart instruction. */
regs->sr |= SR_DSP;
/* Save DSP mode */
current->thread.dsp_status.status |= SR_DSP;
return;
}
#endif
error_code = lookup_exception_vector();
local_irq_enable();
force_sig(SIGILL);
die_if_no_fixup("reserved instruction", regs, error_code);
}
#ifdef CONFIG_SH_FPU_EMU
static int emulate_branch(unsigned short inst, struct pt_regs *regs)
{
/*
* bfs: 8fxx: PC+=d*2+4;
* bts: 8dxx: PC+=d*2+4;
* bra: axxx: PC+=D*2+4;
* bsr: bxxx: PC+=D*2+4 after PR=PC+4;
* braf:0x23: PC+=Rn*2+4;
* bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
* jmp: 4x2b: PC=Rn;
* jsr: 4x0b: PC=Rn after PR=PC+4;
* rts: 000b: PC=PR;
*/
if (((inst & 0xf000) == 0xb000) || /* bsr */
((inst & 0xf0ff) == 0x0003) || /* bsrf */
((inst & 0xf0ff) == 0x400b)) /* jsr */
regs->pr = regs->pc + 4;
if ((inst & 0xfd00) == 0x8d00) { /* bfs, bts */
regs->pc += SH_PC_8BIT_OFFSET(inst);
return 0;
}
if ((inst & 0xe000) == 0xa000) { /* bra, bsr */
regs->pc += SH_PC_12BIT_OFFSET(inst);
return 0;
}
if ((inst & 0xf0df) == 0x0003) { /* braf, bsrf */
regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
return 0;
}
if ((inst & 0xf0df) == 0x400b) { /* jmp, jsr */
regs->pc = regs->regs[(inst & 0x0f00) >> 8];
return 0;
}
if ((inst & 0xffff) == 0x000b) { /* rts */
regs->pc = regs->pr;
return 0;
}
return 1;
}
#endif
asmlinkage void do_illegal_slot_inst(void)
{
struct pt_regs *regs = current_pt_regs();
unsigned long inst;
if (kprobe_handle_illslot(regs->pc) == 0)
return;
#ifdef CONFIG_SH_FPU_EMU
get_user(inst, (unsigned short __user *)regs->pc + 1);
if (!do_fpu_inst(inst, regs)) {
get_user(inst, (unsigned short __user *)regs->pc);
if (!emulate_branch(inst, regs))
return;
/* fault in branch.*/
}
/* not a FPU inst. */
#endif
inst = lookup_exception_vector();
local_irq_enable();
force_sig(SIGILL);
die_if_no_fixup("illegal slot instruction", regs, inst);
}
asmlinkage void do_exception_error(void)
{
long ex;
ex = lookup_exception_vector();
die_if_kernel("exception", current_pt_regs(), ex);
}
void per_cpu_trap_init(void)
{
extern void *vbr_base;
/* NOTE: The VBR value should be at P1
(or P2, virtural "fixed" address space).
It's definitely should not in physical address. */
asm volatile("ldc %0, vbr"
: /* no output */
: "r" (&vbr_base)
: "memory");
/* disable exception blocking now when the vbr has been setup */
clear_bl_bit();
}
void *set_exception_table_vec(unsigned int vec, void *handler)
{
extern void *exception_handling_table[];
void *old_handler;
old_handler = exception_handling_table[vec];
exception_handling_table[vec] = handler;
return old_handler;
}
void __init trap_init(void)
{
set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
defined(CONFIG_SH_FPU_EMU)
/*
* For SH-4 lacking an FPU, treat floating point instructions as
* reserved. They'll be handled in the math-emu case, or faulted on
* otherwise.
*/
set_exception_table_evt(0x800, do_reserved_inst);
set_exception_table_evt(0x820, do_illegal_slot_inst);
#elif defined(CONFIG_SH_FPU)
set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
#endif
#ifdef CONFIG_CPU_SH2
set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
#endif
#ifdef CONFIG_CPU_SH2A
set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
#ifdef CONFIG_SH_FPU
set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler);
#endif
#endif
#ifdef TRAP_UBC
set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
#endif
}
| linux-master | arch/sh/kernel/traps_32.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <asm/checksum.h>
#include <asm/sections.h>
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_FLATMEM
/* need in pfn_valid macro */
EXPORT_SYMBOL(min_low_pfn);
EXPORT_SYMBOL(max_low_pfn);
#endif
#define DECLARE_EXPORT(name) \
extern void name(void);EXPORT_SYMBOL(name)
DECLARE_EXPORT(__udivsi3);
DECLARE_EXPORT(__sdivsi3);
DECLARE_EXPORT(__lshrsi3);
DECLARE_EXPORT(__ashrsi3);
DECLARE_EXPORT(__ashlsi3);
DECLARE_EXPORT(__lshrsi3_r0);
DECLARE_EXPORT(__ashrsi3_r0);
DECLARE_EXPORT(__ashlsi3_r0);
DECLARE_EXPORT(__ashiftrt_r4_0);
DECLARE_EXPORT(__ashiftrt_r4_1);
DECLARE_EXPORT(__ashiftrt_r4_2);
DECLARE_EXPORT(__ashiftrt_r4_3);
DECLARE_EXPORT(__ashiftrt_r4_4);
DECLARE_EXPORT(__ashiftrt_r4_5);
DECLARE_EXPORT(__ashiftrt_r4_6);
DECLARE_EXPORT(__ashiftrt_r4_7);
DECLARE_EXPORT(__ashiftrt_r4_8);
DECLARE_EXPORT(__ashiftrt_r4_9);
DECLARE_EXPORT(__ashiftrt_r4_10);
DECLARE_EXPORT(__ashiftrt_r4_11);
DECLARE_EXPORT(__ashiftrt_r4_12);
DECLARE_EXPORT(__ashiftrt_r4_13);
DECLARE_EXPORT(__ashiftrt_r4_14);
DECLARE_EXPORT(__ashiftrt_r4_15);
DECLARE_EXPORT(__ashiftrt_r4_16);
DECLARE_EXPORT(__ashiftrt_r4_17);
DECLARE_EXPORT(__ashiftrt_r4_18);
DECLARE_EXPORT(__ashiftrt_r4_19);
DECLARE_EXPORT(__ashiftrt_r4_20);
DECLARE_EXPORT(__ashiftrt_r4_21);
DECLARE_EXPORT(__ashiftrt_r4_22);
DECLARE_EXPORT(__ashiftrt_r4_23);
DECLARE_EXPORT(__ashiftrt_r4_24);
DECLARE_EXPORT(__ashiftrt_r4_25);
DECLARE_EXPORT(__ashiftrt_r4_26);
DECLARE_EXPORT(__ashiftrt_r4_27);
DECLARE_EXPORT(__ashiftrt_r4_28);
DECLARE_EXPORT(__ashiftrt_r4_29);
DECLARE_EXPORT(__ashiftrt_r4_30);
DECLARE_EXPORT(__ashiftrt_r4_31);
DECLARE_EXPORT(__ashiftrt_r4_32);
DECLARE_EXPORT(__movstr);
DECLARE_EXPORT(__movstrSI8);
DECLARE_EXPORT(__movstrSI12);
DECLARE_EXPORT(__movstrSI16);
DECLARE_EXPORT(__movstrSI20);
DECLARE_EXPORT(__movstrSI24);
DECLARE_EXPORT(__movstrSI28);
DECLARE_EXPORT(__movstrSI32);
DECLARE_EXPORT(__movstrSI36);
DECLARE_EXPORT(__movstrSI40);
DECLARE_EXPORT(__movstrSI44);
DECLARE_EXPORT(__movstrSI48);
DECLARE_EXPORT(__movstrSI52);
DECLARE_EXPORT(__movstrSI56);
DECLARE_EXPORT(__movstrSI60);
DECLARE_EXPORT(__movstr_i4_even);
DECLARE_EXPORT(__movstr_i4_odd);
DECLARE_EXPORT(__movstrSI12_i4);
DECLARE_EXPORT(__movmem);
DECLARE_EXPORT(__movmemSI8);
DECLARE_EXPORT(__movmemSI12);
DECLARE_EXPORT(__movmemSI16);
DECLARE_EXPORT(__movmemSI20);
DECLARE_EXPORT(__movmemSI24);
DECLARE_EXPORT(__movmemSI28);
DECLARE_EXPORT(__movmemSI32);
DECLARE_EXPORT(__movmemSI36);
DECLARE_EXPORT(__movmemSI40);
DECLARE_EXPORT(__movmemSI44);
DECLARE_EXPORT(__movmemSI48);
DECLARE_EXPORT(__movmemSI52);
DECLARE_EXPORT(__movmemSI56);
DECLARE_EXPORT(__movmemSI60);
DECLARE_EXPORT(__movmem_i4_even);
DECLARE_EXPORT(__movmem_i4_odd);
DECLARE_EXPORT(__movmemSI12_i4);
DECLARE_EXPORT(__udiv_qrnnd_16);
DECLARE_EXPORT(__sdivsi3_i4);
DECLARE_EXPORT(__udivsi3_i4);
DECLARE_EXPORT(__sdivsi3_i4i);
DECLARE_EXPORT(__udivsi3_i4i);
#ifdef CONFIG_MCOUNT
DECLARE_EXPORT(mcount);
#endif
| linux-master | arch/sh/kernel/sh_ksyms_32.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sh/kernel/irq.c
*
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*
*
* SuperH version: Copyright (C) 1999 Niibe Yutaka
*/
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/ftrace.h>
#include <linux/delay.h>
#include <linux/ratelimit.h>
#include <asm/processor.h>
#include <asm/machvec.h>
#include <linux/uaccess.h>
#include <asm/thread_info.h>
#include <cpu/mmu_context.h>
#include <asm/softirq_stack.h>
atomic_t irq_err_count;
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves, it doesn't deserve
* a generic callback i think.
*/
void ack_bad_irq(unsigned int irq)
{
atomic_inc(&irq_err_count);
printk("unexpected IRQ trap at vector %02x\n", irq);
}
#if defined(CONFIG_PROC_FS)
/*
* /proc/interrupts printing for arch specific interrupts
*/
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
seq_printf(p, "%*s: ", prec, "NMI");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat.__nmi_count, j));
seq_printf(p, " Non-maskable interrupts\n");
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
return 0;
}
#endif
#ifdef CONFIG_IRQSTACKS
/*
* per-CPU IRQ handling contexts (thread information and stack)
*/
union irq_ctx {
struct thread_info tinfo;
u32 stack[THREAD_SIZE/sizeof(u32)];
};
static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
static inline void handle_one_irq(unsigned int irq)
{
union irq_ctx *curctx, *irqctx;
curctx = (union irq_ctx *)current_thread_info();
irqctx = hardirq_ctx[smp_processor_id()];
/*
* this is where we switch to the IRQ stack. However, if we are
* already using the IRQ stack (because we interrupted a hardirq
* handler) we can't do that and just have to keep using the
* current stack (which is the irq stack already after all)
*/
if (curctx != irqctx) {
u32 *isp;
isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
irqctx->tinfo.task = curctx->tinfo.task;
irqctx->tinfo.previous_sp = current_stack_pointer;
/*
* Copy the softirq bits in preempt_count so that the
* softirq checks work in the hardirq context.
*/
irqctx->tinfo.preempt_count =
(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
__asm__ __volatile__ (
"mov %0, r4 \n"
"mov r15, r8 \n"
"jsr @%1 \n"
/* switch to the irq stack */
" mov %2, r15 \n"
/* restore the stack (ring zero) */
"mov r8, r15 \n"
: /* no outputs */
: "r" (irq), "r" (generic_handle_irq), "r" (isp)
: "memory", "r0", "r1", "r2", "r3", "r4",
"r5", "r6", "r7", "r8", "t", "pr"
);
} else
generic_handle_irq(irq);
}
/*
* allocate per-cpu stacks for hardirq and for softirq processing
*/
void irq_ctx_init(int cpu)
{
union irq_ctx *irqctx;
if (hardirq_ctx[cpu])
return;
irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
irqctx->tinfo.task = NULL;
irqctx->tinfo.cpu = cpu;
irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
hardirq_ctx[cpu] = irqctx;
irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
irqctx->tinfo.task = NULL;
irqctx->tinfo.cpu = cpu;
irqctx->tinfo.preempt_count = 0;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
softirq_ctx[cpu] = irqctx;
printk("CPU %u irqstacks, hard=%p soft=%p\n",
cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
}
void irq_ctx_exit(int cpu)
{
hardirq_ctx[cpu] = NULL;
}
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void)
{
struct thread_info *curctx;
union irq_ctx *irqctx;
u32 *isp;
curctx = current_thread_info();
irqctx = softirq_ctx[smp_processor_id()];
irqctx->tinfo.task = curctx->task;
irqctx->tinfo.previous_sp = current_stack_pointer;
/* build the stack frame on the softirq stack */
isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
__asm__ __volatile__ (
"mov r15, r9 \n"
"jsr @%0 \n"
/* switch to the softirq stack */
" mov %1, r15 \n"
/* restore the thread stack */
"mov r9, r15 \n"
: /* no outputs */
: "r" (__do_softirq), "r" (isp)
: "memory", "r0", "r1", "r2", "r3", "r4",
"r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
);
}
#endif
#else
static inline void handle_one_irq(unsigned int irq)
{
generic_handle_irq(irq);
}
#endif
asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
irq = irq_demux(irq_lookup(irq));
if (irq != NO_IRQ_IGNORE) {
handle_one_irq(irq);
irq_finish(irq);
}
irq_exit();
set_irq_regs(old_regs);
return IRQ_HANDLED;
}
void __init init_IRQ(void)
{
plat_irq_setup();
/* Perform the machine specific initialisation */
if (sh_mv.mv_init_irq)
sh_mv.mv_init_irq();
intc_finalize();
irq_ctx_init(smp_processor_id());
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* The CPU has been marked offline. Migrate IRQs off this CPU. If
* the affinity settings do not allow other CPUs, force them onto any
* available CPU.
*/
void migrate_irqs(void)
{
unsigned int irq, cpu = smp_processor_id();
for_each_active_irq(irq) {
struct irq_data *data = irq_get_irq_data(irq);
if (irq_data_get_node(data) == cpu) {
const struct cpumask *mask = irq_data_get_affinity_mask(data);
unsigned int newcpu = cpumask_any_and(mask,
cpu_online_mask);
if (newcpu >= nr_cpu_ids) {
pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
irq, cpu);
irq_set_affinity(irq, cpu_all_mask);
} else {
irq_set_affinity(irq, mask);
}
}
}
}
#endif
| linux-master | arch/sh/kernel/irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/io.c - Machine independent I/O functions.
*
* Copyright (C) 2000 - 2009 Stuart Menefy
* Copyright (C) 2005 Paul Mundt
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <asm/machvec.h>
#include <asm/io.h>
/*
* Copy data from IO memory space to "real" memory space.
*/
void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count)
{
/*
* Would it be worthwhile doing byte and long transfers first
* to try and get aligned?
*/
#ifdef CONFIG_CPU_SH4
if ((count >= 0x20) &&
(((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) {
int tmp2, tmp3, tmp4, tmp5, tmp6;
__asm__ __volatile__(
"1: \n\t"
"mov.l @%7+, r0 \n\t"
"mov.l @%7+, %2 \n\t"
"movca.l r0, @%0 \n\t"
"mov.l @%7+, %3 \n\t"
"mov.l @%7+, %4 \n\t"
"mov.l @%7+, %5 \n\t"
"mov.l @%7+, %6 \n\t"
"mov.l @%7+, r7 \n\t"
"mov.l @%7+, r0 \n\t"
"mov.l %2, @(0x04,%0) \n\t"
"mov #0x20, %2 \n\t"
"mov.l %3, @(0x08,%0) \n\t"
"sub %2, %1 \n\t"
"mov.l %4, @(0x0c,%0) \n\t"
"cmp/hi %1, %2 ! T if 32 > count \n\t"
"mov.l %5, @(0x10,%0) \n\t"
"mov.l %6, @(0x14,%0) \n\t"
"mov.l r7, @(0x18,%0) \n\t"
"mov.l r0, @(0x1c,%0) \n\t"
"bf.s 1b \n\t"
" add #0x20, %0 \n\t"
: "=&r" (to), "=&r" (count),
"=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4),
"=&r" (tmp5), "=&r" (tmp6), "=&r" (from)
: "7"(from), "0" (to), "1" (count)
: "r0", "r7", "t", "memory");
}
#endif
if ((((u32)to | (u32)from) & 0x3) == 0) {
for (; count > 3; count -= 4) {
*(u32 *)to = *(volatile u32 *)from;
to += 4;
from += 4;
}
}
for (; count > 0; count--) {
*(u8 *)to = *(volatile u8 *)from;
to++;
from++;
}
mb();
}
EXPORT_SYMBOL(memcpy_fromio);
/*
* Copy data from "real" memory space to IO memory space.
*/
void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count)
{
if ((((u32)to | (u32)from) & 0x3) == 0) {
for ( ; count > 3; count -= 4) {
*(volatile u32 *)to = *(u32 *)from;
to += 4;
from += 4;
}
}
for (; count > 0; count--) {
*(volatile u8 *)to = *(u8 *)from;
to++;
from++;
}
mb();
}
EXPORT_SYMBOL(memcpy_toio);
/*
* "memset" on IO memory space.
* This needs to be optimized.
*/
void memset_io(volatile void __iomem *dst, int c, unsigned long count)
{
while (count) {
count--;
writeb(c, dst);
dst++;
}
}
EXPORT_SYMBOL(memset_io);
| linux-master | arch/sh/kernel/io.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Atmel Corporation
*/
#include <linux/delay.h>
#include <linux/kdebug.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/hardirq.h>
enum nmi_action {
NMI_SHOW_STATE = 1 << 0,
NMI_SHOW_REGS = 1 << 1,
NMI_DIE = 1 << 2,
NMI_DEBOUNCE = 1 << 3,
};
static unsigned long nmi_actions;
static int nmi_debug_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = data;
if (likely(val != DIE_NMI))
return NOTIFY_DONE;
if (nmi_actions & NMI_SHOW_STATE)
show_state();
if (nmi_actions & NMI_SHOW_REGS)
show_regs(args->regs);
if (nmi_actions & NMI_DEBOUNCE)
mdelay(10);
if (nmi_actions & NMI_DIE)
return NOTIFY_BAD;
return NOTIFY_OK;
}
static struct notifier_block nmi_debug_nb = {
.notifier_call = nmi_debug_notify,
};
static int __init nmi_debug_setup(char *str)
{
char *p, *sep;
register_die_notifier(&nmi_debug_nb);
if (*str != '=')
return 1;
for (p = str + 1; *p; p = sep + 1) {
sep = strchr(p, ',');
if (sep)
*sep = 0;
if (strcmp(p, "state") == 0)
nmi_actions |= NMI_SHOW_STATE;
else if (strcmp(p, "regs") == 0)
nmi_actions |= NMI_SHOW_REGS;
else if (strcmp(p, "debounce") == 0)
nmi_actions |= NMI_DEBOUNCE;
else if (strcmp(p, "die") == 0)
nmi_actions |= NMI_DIE;
else
printk(KERN_WARNING "NMI: Unrecognized action `%s'\n",
p);
if (!sep)
break;
}
return 1;
}
__setup("nmi_debug", nmi_debug_setup);
| linux-master | arch/sh/kernel/nmi_debug.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/ptrace.h>
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
| linux-master | arch/sh/kernel/ptrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/ioport.c
*
* Copyright (C) 2000 Niibe Yutaka
* Copyright (C) 2005 - 2007 Paul Mundt
*/
#include <linux/module.h>
#include <linux/io.h>
#include <asm/io_trapped.h>
unsigned long sh_io_port_base __read_mostly = -1;
EXPORT_SYMBOL(sh_io_port_base);
void __iomem *__ioport_map(unsigned long addr, unsigned int size)
{
if (sh_mv.mv_ioport_map)
return sh_mv.mv_ioport_map(addr, size);
return (void __iomem *)(addr + sh_io_port_base);
}
EXPORT_SYMBOL(__ioport_map);
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
void __iomem *ret;
ret = __ioport_map_trapped(port, nr);
if (ret)
return ret;
return __ioport_map(port, nr);
}
EXPORT_SYMBOL(ioport_map);
void ioport_unmap(void __iomem *addr)
{
if (sh_mv.mv_ioport_unmap)
sh_mv.mv_ioport_unmap(addr);
}
EXPORT_SYMBOL(ioport_unmap);
| linux-master | arch/sh/kernel/ioport.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/bug.h>
#include <linux/io.h>
#include <linux/types.h>
#include <linux/kdebug.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <linux/sched/signal.h>
#include <linux/extable.h>
#include <linux/module.h> /* print_modules */
#include <asm/unwinder.h>
#include <asm/traps.h>
static DEFINE_SPINLOCK(die_lock);
void __noreturn die(const char *str, struct pt_regs *regs, long err)
{
static int die_counter;
oops_enter();
spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
print_modules();
show_regs(regs);
printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
task_pid_nr(current), task_stack_page(current) + 1);
if (!user_mode(regs) || in_interrupt())
dump_mem("Stack: ", KERN_DEFAULT, regs->regs[15],
THREAD_SIZE + (unsigned long)task_stack_page(current));
notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
oops_exit();
if (kexec_should_crash(current))
crash_kexec(regs);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
make_task_dead(SIGSEGV);
}
void die_if_kernel(const char *str, struct pt_regs *regs, long err)
{
if (!user_mode(regs))
die(str, regs, err);
}
/*
* try and fix up kernelspace address errors
* - userspace errors just cause EFAULT to be returned, resulting in SEGV
* - kernel/userspace interfaces cause a jump to an appropriate handler
* - other kernel errors are bad
*/
void die_if_no_fixup(const char *str, struct pt_regs *regs, long err)
{
if (!user_mode(regs)) {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
die(str, regs, err);
}
}
#ifdef CONFIG_GENERIC_BUG
static void handle_BUG(struct pt_regs *regs)
{
const struct bug_entry *bug;
unsigned long bugaddr = regs->pc;
enum bug_trap_type tt;
if (!is_valid_bugaddr(bugaddr))
goto invalid;
bug = find_bug(bugaddr);
/* Switch unwinders when unwind_stack() is called */
if (bug->flags & BUGFLAG_UNWINDER)
unwinder_faulted = 1;
tt = report_bug(bugaddr, regs);
if (tt == BUG_TRAP_TYPE_WARN) {
regs->pc += instruction_size(bugaddr);
return;
}
invalid:
die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
}
int is_valid_bugaddr(unsigned long addr)
{
insn_size_t opcode;
if (addr < PAGE_OFFSET)
return 0;
if (get_kernel_nofault(opcode, (insn_size_t *)addr))
return 0;
if (opcode == TRAPA_BUG_OPCODE)
return 1;
return 0;
}
#endif
/*
* Generic trap handler.
*/
BUILD_TRAP_HANDLER(debug)
{
TRAP_HANDLER_DECL;
/* Rewind */
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff,
SIGTRAP) == NOTIFY_STOP)
return;
force_sig(SIGTRAP);
}
/*
* Special handler for BUG() traps.
*/
BUILD_TRAP_HANDLER(bug)
{
TRAP_HANDLER_DECL;
/* Rewind */
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff,
SIGTRAP) == NOTIFY_STOP)
return;
#ifdef CONFIG_GENERIC_BUG
if (__kernel_text_address(instruction_pointer(regs))) {
insn_size_t insn = *(insn_size_t *)instruction_pointer(regs);
if (insn == TRAPA_BUG_OPCODE)
handle_BUG(regs);
return;
}
#endif
force_sig(SIGTRAP);
}
#ifdef CONFIG_DYNAMIC_FTRACE
extern void arch_ftrace_nmi_enter(void);
extern void arch_ftrace_nmi_exit(void);
#else
static inline void arch_ftrace_nmi_enter(void) { }
static inline void arch_ftrace_nmi_exit(void) { }
#endif
BUILD_TRAP_HANDLER(nmi)
{
TRAP_HANDLER_DECL;
arch_ftrace_nmi_enter();
nmi_enter();
this_cpu_inc(irq_stat.__nmi_count);
switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) {
case NOTIFY_OK:
case NOTIFY_STOP:
break;
case NOTIFY_BAD:
die("Fatal Non-Maskable Interrupt", regs, SIGINT);
default:
printk(KERN_ALERT "Got NMI, but nobody cared. Ignoring...\n");
break;
}
nmi_exit();
arch_ftrace_nmi_exit();
}
| linux-master | arch/sh/kernel/traps.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/kbuild.h>
#include <linux/suspend.h>
#include <asm/thread_info.h>
#include <asm/suspend.h>
int main(void)
{
/* offsets into the thread_info struct */
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_SIZE, sizeof(struct thread_info));
#ifdef CONFIG_HIBERNATION
DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address));
DEFINE(PBE_NEXT, offsetof(struct pbe, next));
DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs));
#endif
DEFINE(SH_SLEEP_MODE, offsetof(struct sh_sleep_data, mode));
DEFINE(SH_SLEEP_SF_PRE, offsetof(struct sh_sleep_data, sf_pre));
DEFINE(SH_SLEEP_SF_POST, offsetof(struct sh_sleep_data, sf_post));
DEFINE(SH_SLEEP_RESUME, offsetof(struct sh_sleep_data, resume));
DEFINE(SH_SLEEP_VBR, offsetof(struct sh_sleep_data, vbr));
DEFINE(SH_SLEEP_SPC, offsetof(struct sh_sleep_data, spc));
DEFINE(SH_SLEEP_SR, offsetof(struct sh_sleep_data, sr));
DEFINE(SH_SLEEP_SP, offsetof(struct sh_sleep_data, sp));
DEFINE(SH_SLEEP_BASE_ADDR, offsetof(struct sh_sleep_data, addr));
DEFINE(SH_SLEEP_BASE_DATA, offsetof(struct sh_sleep_data, data));
DEFINE(SH_SLEEP_REG_STBCR, offsetof(struct sh_sleep_regs, stbcr));
DEFINE(SH_SLEEP_REG_BAR, offsetof(struct sh_sleep_regs, bar));
DEFINE(SH_SLEEP_REG_PTEH, offsetof(struct sh_sleep_regs, pteh));
DEFINE(SH_SLEEP_REG_PTEL, offsetof(struct sh_sleep_regs, ptel));
DEFINE(SH_SLEEP_REG_TTB, offsetof(struct sh_sleep_regs, ttb));
DEFINE(SH_SLEEP_REG_TEA, offsetof(struct sh_sleep_regs, tea));
DEFINE(SH_SLEEP_REG_MMUCR, offsetof(struct sh_sleep_regs, mmucr));
DEFINE(SH_SLEEP_REG_PTEA, offsetof(struct sh_sleep_regs, ptea));
DEFINE(SH_SLEEP_REG_PASCR, offsetof(struct sh_sleep_regs, pascr));
DEFINE(SH_SLEEP_REG_IRMCR, offsetof(struct sh_sleep_regs, irmcr));
DEFINE(SH_SLEEP_REG_CCR, offsetof(struct sh_sleep_regs, ccr));
DEFINE(SH_SLEEP_REG_RAMCR, offsetof(struct sh_sleep_regs, ramcr));
return 0;
}
| linux-master | arch/sh/kernel/asm-offsets.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/topology.c
*
* Copyright (C) 2007 Paul Mundt
*/
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/topology.h>
#include <linux/node.h>
#include <linux/nodemask.h>
#include <linux/export.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
cpumask_t cpu_core_map[NR_CPUS];
EXPORT_SYMBOL(cpu_core_map);
static cpumask_t cpu_coregroup_map(int cpu)
{
/*
* Presently all SH-X3 SMP cores are multi-cores, so just keep it
* simple until we have a method for determining topology..
*/
return *cpu_possible_mask;
}
const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_core_map[cpu];
}
int arch_update_cpu_topology(void)
{
unsigned int cpu;
for_each_possible_cpu(cpu)
cpu_core_map[cpu] = cpu_coregroup_map(cpu);
return 0;
}
static int __init topology_init(void)
{
int i, ret;
for_each_present_cpu(i) {
struct cpu *c = &per_cpu(cpu_devices, i);
c->hotpluggable = 1;
ret = register_cpu(c, i);
if (unlikely(ret))
printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n",
__func__, i, ret);
}
#if defined(CONFIG_NUMA) && !defined(CONFIG_SMP)
/*
* In the UP case, make sure the CPU association is still
* registered under each node. Without this, sysfs fails
* to make the connection between nodes other than node0
* and cpu0.
*/
for_each_online_node(i)
if (i != numa_node_id())
register_cpu_under_node(raw_smp_processor_id(), i);
#endif
return 0;
}
subsys_initcall(topology_init);
| linux-master | arch/sh/kernel/topology.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH process tracing
*
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
* Copyright (C) 2002 - 2009 Paul Mundt
*
* Audit support by Yuichi Nakamura <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/io.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/elf.h>
#include <linux/regset.h>
#include <linux/hw_breakpoint.h>
#include <linux/uaccess.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
#include <asm/fpu.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/*
* This routine will get a word off of the process kernel stack.
*/
static inline int get_stack_long(struct task_struct *task, int offset)
{
unsigned char *stack;
stack = (unsigned char *)task_pt_regs(task);
stack += offset;
return (*((int *)stack));
}
/*
* This routine will put a word on the process kernel stack.
*/
static inline int put_stack_long(struct task_struct *task, int offset,
unsigned long data)
{
unsigned char *stack;
stack = (unsigned char *)task_pt_regs(task);
stack += offset;
*(unsigned long *) stack = data;
return 0;
}
void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
/*
* Disable the breakpoint request here since ptrace has defined a
* one-shot behaviour for breakpoint exceptions.
*/
attr = bp->attr;
attr.disabled = true;
modify_user_hw_breakpoint(bp, &attr);
}
static int set_single_step(struct task_struct *tsk, unsigned long addr)
{
struct thread_struct *thread = &tsk->thread;
struct perf_event *bp;
struct perf_event_attr attr;
bp = thread->ptrace_bps[0];
if (!bp) {
ptrace_breakpoint_init(&attr);
attr.bp_addr = addr;
attr.bp_len = HW_BREAKPOINT_LEN_2;
attr.bp_type = HW_BREAKPOINT_R;
bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
NULL, tsk);
if (IS_ERR(bp))
return PTR_ERR(bp);
thread->ptrace_bps[0] = bp;
} else {
int err;
attr = bp->attr;
attr.bp_addr = addr;
/* reenable breakpoint */
attr.disabled = false;
err = modify_user_hw_breakpoint(bp, &attr);
if (unlikely(err))
return err;
}
return 0;
}
void user_enable_single_step(struct task_struct *child)
{
unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
set_tsk_thread_flag(child, TIF_SINGLESTEP);
set_single_step(child, pc);
}
void user_disable_single_step(struct task_struct *child)
{
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const struct pt_regs *regs = task_pt_regs(target);
return membuf_write(&to, regs, sizeof(struct pt_regs));
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->regs,
0, 16 * sizeof(unsigned long));
if (!ret && count > 0)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->pc,
offsetof(struct pt_regs, pc),
sizeof(struct pt_regs));
if (!ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
#ifdef CONFIG_SH_FPU
static int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
return membuf_write(&to, target->thread.xstate,
sizeof(struct user_fpu_struct));
}
static int fpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
set_stopped_child_used_math(target);
if ((boot_cpu_data.flags & CPU_HAS_FPU))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->softfpu, 0, -1);
}
static int fpregs_active(struct task_struct *target,
const struct user_regset *regset)
{
return tsk_used_math(target) ? regset->n : 0;
}
#endif
#ifdef CONFIG_SH_DSP
static int dspregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const struct pt_dspregs *regs =
(struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
return membuf_write(&to, regs, sizeof(struct pt_dspregs));
}
static int dspregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_dspregs *regs =
(struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
0, sizeof(struct pt_dspregs));
if (!ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_dspregs), -1);
return ret;
}
static int dspregs_active(struct task_struct *target,
const struct user_regset *regset)
{
struct pt_regs *regs = task_pt_regs(target);
return regs->sr & SR_DSP ? regset->n : 0;
}
#endif
const struct pt_regs_offset regoffset_table[] = {
REGS_OFFSET_NAME(0),
REGS_OFFSET_NAME(1),
REGS_OFFSET_NAME(2),
REGS_OFFSET_NAME(3),
REGS_OFFSET_NAME(4),
REGS_OFFSET_NAME(5),
REGS_OFFSET_NAME(6),
REGS_OFFSET_NAME(7),
REGS_OFFSET_NAME(8),
REGS_OFFSET_NAME(9),
REGS_OFFSET_NAME(10),
REGS_OFFSET_NAME(11),
REGS_OFFSET_NAME(12),
REGS_OFFSET_NAME(13),
REGS_OFFSET_NAME(14),
REGS_OFFSET_NAME(15),
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(pr),
REG_OFFSET_NAME(sr),
REG_OFFSET_NAME(gbr),
REG_OFFSET_NAME(mach),
REG_OFFSET_NAME(macl),
REG_OFFSET_NAME(tra),
REG_OFFSET_END,
};
/*
* These are our native regset flavours.
*/
enum sh_regset {
REGSET_GENERAL,
#ifdef CONFIG_SH_FPU
REGSET_FPU,
#endif
#ifdef CONFIG_SH_DSP
REGSET_DSP,
#endif
};
static const struct user_regset sh_regsets[] = {
/*
* Format is:
* R0 --> R15
* PC, PR, SR, GBR, MACH, MACL, TRA
*/
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(long),
.align = sizeof(long),
.regset_get = genregs_get,
.set = genregs_set,
},
#ifdef CONFIG_SH_FPU
[REGSET_FPU] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fpu_struct) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.regset_get = fpregs_get,
.set = fpregs_set,
.active = fpregs_active,
},
#endif
#ifdef CONFIG_SH_DSP
[REGSET_DSP] = {
.n = sizeof(struct pt_dspregs) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.regset_get = dspregs_get,
.set = dspregs_set,
.active = dspregs_active,
},
#endif
};
static const struct user_regset_view user_sh_native_view = {
.name = "sh",
.e_machine = EM_SH,
.regsets = sh_regsets,
.n = ARRAY_SIZE(sh_regsets),
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_sh_native_view;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long __user *datap = (unsigned long __user *)data;
int ret;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & 3) || addr < 0 ||
addr > sizeof(struct user) - 3)
break;
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
else if (addr >= offsetof(struct user, fpu) &&
addr < offsetof(struct user, u_fpvalid)) {
if (!tsk_used_math(child)) {
if (addr == offsetof(struct user, fpu.fpscr))
tmp = FPSCR_INIT;
else
tmp = 0;
} else {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
tmp = ((unsigned long *)child->thread.xstate)
[index >> 2];
}
} else if (addr == offsetof(struct user, u_fpvalid))
tmp = !!tsk_used_math(child);
else if (addr == PT_TEXT_ADDR)
tmp = child->mm->start_code;
else if (addr == PT_DATA_ADDR)
tmp = child->mm->start_data;
else if (addr == PT_TEXT_END_ADDR)
tmp = child->mm->end_code;
else if (addr == PT_TEXT_LEN)
tmp = child->mm->end_code - child->mm->start_code;
else
tmp = 0;
ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
if ((addr & 3) || addr < 0 ||
addr > sizeof(struct user) - 3)
break;
if (addr < sizeof(struct pt_regs))
ret = put_stack_long(child, addr, data);
else if (addr >= offsetof(struct user, fpu) &&
addr < offsetof(struct user, u_fpvalid)) {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
set_stopped_child_used_math(child);
((unsigned long *)child->thread.xstate)
[index >> 2] = data;
ret = 0;
} else if (addr == offsetof(struct user, u_fpvalid)) {
conditional_stopped_child_used_math(data, child);
ret = 0;
}
break;
case PTRACE_GETREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
#endif
#ifdef CONFIG_SH_DSP
case PTRACE_GETDSPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
datap);
case PTRACE_SETDSPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
ptrace_report_syscall_entry(regs)) {
regs->regs[0] = -ENOSYS;
return -1;
}
if (secure_computing() == -1)
return -1;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[0]);
audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
return 0;
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[0]);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
ptrace_report_syscall_exit(regs, step);
}
| linux-master | arch/sh/kernel/ptrace_32.c |
// SPDX-License-Identifier: GPL-2.0+
/* Kernel module help for SH.
SHcompact version by Kaz Kojima and Paul Mundt.
SHmedia bits:
Copyright 2004 SuperH (UK) Ltd
Author: Richard Curnow
Based on the sh version, and on code from the sh64-specific parts of
modutils, originally written by Richard Curnow and Ben Gaster.
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/bug.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <asm/dwarf.h>
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
Elf32_Addr relocation;
uint32_t *location;
uint32_t value;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
relocation = sym->st_value + rel[i].r_addend;
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_SH_NONE:
break;
case R_SH_DIR32:
value = get_unaligned(location);
value += relocation;
put_unaligned(value, location);
break;
case R_SH_REL32:
relocation = (relocation - (Elf32_Addr) location);
value = get_unaligned(location);
value += relocation;
put_unaligned(value, location);
break;
case R_SH_IMM_LOW16:
*location = (*location & ~0x3fffc00) |
((relocation & 0xffff) << 10);
break;
case R_SH_IMM_MEDLOW16:
*location = (*location & ~0x3fffc00) |
(((relocation >> 16) & 0xffff) << 10);
break;
case R_SH_IMM_LOW16_PCREL:
relocation -= (Elf32_Addr) location;
*location = (*location & ~0x3fffc00) |
((relocation & 0xffff) << 10);
break;
case R_SH_IMM_MEDLOW16_PCREL:
relocation -= (Elf32_Addr) location;
*location = (*location & ~0x3fffc00) |
(((relocation >> 16) & 0xffff) << 10);
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
int ret = 0;
ret |= module_dwarf_finalize(hdr, sechdrs, me);
return ret;
}
void module_arch_cleanup(struct module *mod)
{
module_dwarf_cleanup(mod);
}
| linux-master | arch/sh/kernel/module.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sh/kernel/sys_sh.c
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/SuperH
* platform.
*
* Taken from i386 version.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/utsname.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/ipc.h>
#include <asm/syscalls.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#include <asm/cachectl.h>
asmlinkage int old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
int fd, unsigned long off)
{
if (off & ~PAGE_MASK)
return -EINVAL;
return ksys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
}
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
/*
* The shift for mmap2 is constant, regardless of PAGE_SIZE
* setting.
*/
if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
return -EINVAL;
pgoff >>= PAGE_SHIFT - 12;
return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
/* sys_cacheflush -- flush (part of) the processor cache. */
asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
{
struct vm_area_struct *vma;
if ((op <= 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I)))
return -EINVAL;
/*
* Verify that the specified address region actually belongs
* to this process.
*/
if (addr + len < addr)
return -EFAULT;
mmap_read_lock(current->mm);
vma = find_vma (current->mm, addr);
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
mmap_read_unlock(current->mm);
return -EFAULT;
}
switch (op & CACHEFLUSH_D_PURGE) {
case CACHEFLUSH_D_INVAL:
__flush_invalidate_region((void *)addr, len);
break;
case CACHEFLUSH_D_WB:
__flush_wback_region((void *)addr, len);
break;
case CACHEFLUSH_D_PURGE:
__flush_purge_region((void *)addr, len);
break;
}
if (op & CACHEFLUSH_I)
flush_icache_range(addr, addr+len);
mmap_read_unlock(current->mm);
return 0;
}
| linux-master | arch/sh/kernel/sys_sh.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2004 - 2007 Paul Mundt
*/
#include <linux/mm.h>
#include <linux/dma-map-ops.h>
#include <asm/cacheflush.h>
#include <asm/addrspace.h>
void arch_dma_prep_coherent(struct page *page, size_t size)
{
__flush_purge_region(page_address(page), size);
}
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
switch (dir) {
case DMA_FROM_DEVICE: /* invalidate only */
__flush_invalidate_region(addr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
__flush_wback_region(addr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
__flush_purge_region(addr, size);
break;
default:
BUG();
}
}
| linux-master | arch/sh/kernel/dma-coherent.c |
// SPDX-License-Identifier: GPL-2.0
/*
* crash_dump.c - Memory preserving reboot related code.
*
* Created by: Hariprasad Nellitheertha ([email protected])
* Copyright (C) IBM Corporation, 2004. All rights reserved
*/
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/io.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
size_t csize, unsigned long offset)
{
void __iomem *vaddr;
if (!csize)
return 0;
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
csize = copy_to_iter(vaddr + offset, csize, iter);
iounmap(vaddr);
return csize;
}
| linux-master | arch/sh/kernel/crash_dump.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Trapped io support
*
* Copyright (C) 2008 Magnus Damm
*
* Intercept io operations by trapping.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bitops.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/init.h>
#include <asm/mmu_context.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/io_trapped.h>
#define TRAPPED_PAGES_MAX 16
#ifdef CONFIG_HAS_IOPORT_MAP
LIST_HEAD(trapped_io);
EXPORT_SYMBOL_GPL(trapped_io);
#endif
#ifdef CONFIG_HAS_IOMEM
LIST_HEAD(trapped_mem);
EXPORT_SYMBOL_GPL(trapped_mem);
#endif
static DEFINE_SPINLOCK(trapped_lock);
static int trapped_io_disable __read_mostly;
static int __init trapped_io_setup(char *__unused)
{
trapped_io_disable = 1;
return 1;
}
__setup("noiotrap", trapped_io_setup);
int register_trapped_io(struct trapped_io *tiop)
{
struct resource *res;
unsigned long len = 0, flags = 0;
struct page *pages[TRAPPED_PAGES_MAX];
int k, n;
if (unlikely(trapped_io_disable))
return 0;
/* structure must be page aligned */
if ((unsigned long)tiop & (PAGE_SIZE - 1))
goto bad;
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
len += roundup(resource_size(res), PAGE_SIZE);
flags |= res->flags;
}
/* support IORESOURCE_IO _or_ MEM, not both */
if (hweight_long(flags) != 1)
goto bad;
n = len >> PAGE_SHIFT;
if (n >= TRAPPED_PAGES_MAX)
goto bad;
for (k = 0; k < n; k++)
pages[k] = virt_to_page(tiop);
tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
if (!tiop->virt_base)
goto bad;
len = 0;
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
(unsigned long)(tiop->virt_base + len),
res->flags & IORESOURCE_IO ? "io" : "mmio",
(unsigned long)res->start);
len += roundup(resource_size(res), PAGE_SIZE);
}
tiop->magic = IO_TRAPPED_MAGIC;
INIT_LIST_HEAD(&tiop->list);
spin_lock_irq(&trapped_lock);
#ifdef CONFIG_HAS_IOPORT_MAP
if (flags & IORESOURCE_IO)
list_add(&tiop->list, &trapped_io);
#endif
#ifdef CONFIG_HAS_IOMEM
if (flags & IORESOURCE_MEM)
list_add(&tiop->list, &trapped_mem);
#endif
spin_unlock_irq(&trapped_lock);
return 0;
bad:
pr_warn("unable to install trapped io filter\n");
return -1;
}
void __iomem *match_trapped_io_handler(struct list_head *list,
unsigned long offset,
unsigned long size)
{
unsigned long voffs;
struct trapped_io *tiop;
struct resource *res;
int k, len;
unsigned long flags;
spin_lock_irqsave(&trapped_lock, flags);
list_for_each_entry(tiop, list, list) {
voffs = 0;
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
if (res->start == offset) {
spin_unlock_irqrestore(&trapped_lock, flags);
return tiop->virt_base + voffs;
}
len = resource_size(res);
voffs += roundup(len, PAGE_SIZE);
}
}
spin_unlock_irqrestore(&trapped_lock, flags);
return NULL;
}
static struct trapped_io *lookup_tiop(unsigned long address)
{
pgd_t *pgd_k;
p4d_t *p4d_k;
pud_t *pud_k;
pmd_t *pmd_k;
pte_t *pte_k;
pte_t entry;
pgd_k = swapper_pg_dir + pgd_index(address);
if (!pgd_present(*pgd_k))
return NULL;
p4d_k = p4d_offset(pgd_k, address);
if (!p4d_present(*p4d_k))
return NULL;
pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
return NULL;
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
return NULL;
pte_k = pte_offset_kernel(pmd_k, address);
entry = *pte_k;
return pfn_to_kaddr(pte_pfn(entry));
}
static unsigned long lookup_address(struct trapped_io *tiop,
unsigned long address)
{
struct resource *res;
unsigned long vaddr = (unsigned long)tiop->virt_base;
unsigned long len;
int k;
for (k = 0; k < tiop->num_resources; k++) {
res = tiop->resource + k;
len = roundup(resource_size(res), PAGE_SIZE);
if (address < (vaddr + len))
return res->start + (address - vaddr);
vaddr += len;
}
return 0;
}
static unsigned long long copy_word(unsigned long src_addr, int src_len,
unsigned long dst_addr, int dst_len)
{
unsigned long long tmp = 0;
switch (src_len) {
case 1:
tmp = __raw_readb(src_addr);
break;
case 2:
tmp = __raw_readw(src_addr);
break;
case 4:
tmp = __raw_readl(src_addr);
break;
case 8:
tmp = __raw_readq(src_addr);
break;
}
switch (dst_len) {
case 1:
__raw_writeb(tmp, dst_addr);
break;
case 2:
__raw_writew(tmp, dst_addr);
break;
case 4:
__raw_writel(tmp, dst_addr);
break;
case 8:
__raw_writeq(tmp, dst_addr);
break;
}
return tmp;
}
static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
{
struct trapped_io *tiop;
unsigned long src_addr = (unsigned long)src;
unsigned long long tmp;
pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
tiop = lookup_tiop(src_addr);
WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
src_addr = lookup_address(tiop, src_addr);
if (!src_addr)
return cnt;
tmp = copy_word(src_addr,
max_t(unsigned long, cnt,
(tiop->minimum_bus_width / 8)),
(unsigned long)dst, cnt);
pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
return 0;
}
static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
{
struct trapped_io *tiop;
unsigned long dst_addr = (unsigned long)dst;
unsigned long long tmp;
pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
tiop = lookup_tiop(dst_addr);
WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
dst_addr = lookup_address(tiop, dst_addr);
if (!dst_addr)
return cnt;
tmp = copy_word((unsigned long)src, cnt,
dst_addr, max_t(unsigned long, cnt,
(tiop->minimum_bus_width / 8)));
pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
return 0;
}
static struct mem_access trapped_io_access = {
from_device,
to_device,
};
int handle_trapped_io(struct pt_regs *regs, unsigned long address)
{
insn_size_t instruction;
int tmp;
if (trapped_io_disable)
return 0;
if (!lookup_tiop(address))
return 0;
WARN_ON(user_mode(regs));
if (copy_from_kernel_nofault(&instruction, (void *)(regs->pc),
sizeof(instruction))) {
return 0;
}
tmp = handle_unaligned_access(instruction, regs,
&trapped_io_access, 1, address);
return tmp == 0;
}
| linux-master | arch/sh/kernel/io_trapped.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/pm.h>
#include <linux/kexec.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/module.h>
#include <asm/watchdog.h>
#include <asm/addrspace.h>
#include <asm/reboot.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
static void watchdog_trigger_immediate(void)
{
sh_wdt_write_cnt(0xFF);
sh_wdt_write_csr(0xC2);
}
static void native_machine_restart(char * __unused)
{
local_irq_disable();
/* Destroy all of the TLBs in preparation for reset by MMU */
__flush_tlb_global();
/* Address error with SR.BL=1 first. */
trigger_address_error();
/* If that fails or is unsupported, go for the watchdog next. */
watchdog_trigger_immediate();
/*
* Give up and sleep.
*/
while (1)
cpu_sleep();
}
static void native_machine_shutdown(void)
{
smp_send_stop();
}
static void native_machine_power_off(void)
{
do_kernel_power_off();
}
static void native_machine_halt(void)
{
/* stop other cpus */
machine_shutdown();
/* stop this cpu */
stop_this_cpu(NULL);
}
struct machine_ops machine_ops = {
.power_off = native_machine_power_off,
.shutdown = native_machine_shutdown,
.restart = native_machine_restart,
.halt = native_machine_halt,
#ifdef CONFIG_KEXEC
.crash_shutdown = native_machine_crash_shutdown,
#endif
};
void machine_power_off(void)
{
machine_ops.power_off();
}
void machine_shutdown(void)
{
machine_ops.shutdown();
}
void machine_restart(char *cmd)
{
machine_ops.restart(cmd);
}
void machine_halt(void)
{
machine_ops.halt();
}
#ifdef CONFIG_KEXEC
void machine_crash_shutdown(struct pt_regs *regs)
{
machine_ops.crash_shutdown(regs);
}
#endif
| linux-master | arch/sh/kernel/reboot.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SHcompact irqflags support
*
* Copyright (C) 2006 - 2009 Paul Mundt
*/
#include <linux/irqflags.h>
#include <linux/module.h>
void notrace arch_local_irq_restore(unsigned long flags)
{
unsigned long __dummy0, __dummy1;
if (flags == ARCH_IRQ_DISABLED) {
__asm__ __volatile__ (
"stc sr, %0\n\t"
"or #0xf0, %0\n\t"
"ldc %0, sr\n\t"
: "=&z" (__dummy0)
: /* no inputs */
: "memory"
);
} else {
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and %1, %0\n\t"
#ifdef CONFIG_CPU_HAS_SR_RB
"stc r6_bank, %1\n\t"
"or %1, %0\n\t"
#endif
"ldc %0, sr\n\t"
: "=&r" (__dummy0), "=r" (__dummy1)
: "1" (~ARCH_IRQ_DISABLED)
: "memory"
);
}
}
EXPORT_SYMBOL(arch_local_irq_restore);
unsigned long notrace arch_local_save_flags(void)
{
unsigned long flags;
__asm__ __volatile__ (
"stc sr, %0\n\t"
"and #0xf0, %0\n\t"
: "=&z" (flags)
: /* no inputs */
: "memory"
);
return flags;
}
EXPORT_SYMBOL(arch_local_save_flags);
| linux-master | arch/sh/kernel/irq_32.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/setup.c
*
* This file handles the architecture-dependent parts of initialization
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2002 - 2010 Paul Mundt
*/
#include <linux/screen_info.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/console.h>
#include <linux/root_dev.h>
#include <linux/utsname.h>
#include <linux/nodemask.h>
#include <linux/cpu.h>
#include <linux/pfn.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/err.h>
#include <linux/crash_dump.h>
#include <linux/mmzone.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/uaccess.h>
#include <uapi/linux/mount.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/elf.h>
#include <asm/sections.h>
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/clock.h>
#include <asm/smp.h>
#include <asm/mmu_context.h>
#include <asm/mmzone.h>
#include <asm/processor.h>
#include <asm/sparsemem.h>
#include <asm/platform_early.h>
/*
* Initialize loops_per_jiffy as 10000000 (1000MIPS).
* This value will be used at the very early stage of serial setup.
* The bigger value means no problem.
*/
struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
[0] = {
.type = CPU_SH_NONE,
.family = CPU_FAMILY_UNKNOWN,
.loops_per_jiffy = 10000000,
.phys_bits = MAX_PHYSMEM_BITS,
},
};
EXPORT_SYMBOL(cpu_data);
/*
* The machine vector. First entry in .machvec.init, or clobbered by
* sh_mv= on the command line, prior to .machvec.init teardown.
*/
struct sh_machine_vector sh_mv = { .mv_name = "generic", };
EXPORT_SYMBOL(sh_mv);
#ifdef CONFIG_VT
struct screen_info screen_info;
#endif
extern int root_mountflags;
#define RAMDISK_IMAGE_START_MASK 0x07FF
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
static struct resource code_resource = {
.name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource data_resource = {
.name = "Kernel data",
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource bss_resource = {
.name = "Kernel bss",
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
unsigned long memory_start;
EXPORT_SYMBOL(memory_start);
unsigned long memory_end = 0;
EXPORT_SYMBOL(memory_end);
unsigned long memory_limit = 0;
static struct resource mem_resources[MAX_NUMNODES];
int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
static int __init early_parse_mem(char *p)
{
if (!p)
return 1;
memory_limit = PAGE_ALIGN(memparse(p, &p));
pr_notice("Memory limited to %ldMB\n", memory_limit >> 20);
return 0;
}
early_param("mem", early_parse_mem);
void __init check_for_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long start, end;
/*
* Check for the rare cases where boot loaders adhere to the boot
* ABI.
*/
if (!LOADER_TYPE || !INITRD_START || !INITRD_SIZE)
goto disable;
start = INITRD_START + __MEMORY_START;
end = start + INITRD_SIZE;
if (unlikely(end <= start))
goto disable;
if (unlikely(start & ~PAGE_MASK)) {
pr_err("initrd must be page aligned\n");
goto disable;
}
if (unlikely(start < __MEMORY_START)) {
pr_err("initrd start (%08lx) < __MEMORY_START(%x)\n",
start, __MEMORY_START);
goto disable;
}
if (unlikely(end > memblock_end_of_DRAM())) {
pr_err("initrd extends beyond end of memory "
"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
end, (unsigned long)memblock_end_of_DRAM());
goto disable;
}
/*
* If we got this far in spite of the boot loader's best efforts
* to the contrary, assume we actually have a valid initrd and
* fix up the root dev.
*/
ROOT_DEV = Root_RAM0;
/*
* Address sanitization
*/
initrd_start = (unsigned long)__va(start);
initrd_end = initrd_start + INITRD_SIZE;
memblock_reserve(__pa(initrd_start), INITRD_SIZE);
return;
disable:
pr_info("initrd disabled\n");
initrd_start = initrd_end = 0;
#endif
}
#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
void calibrate_delay(void)
{
struct clk *clk = clk_get(NULL, "cpu_clk");
if (IS_ERR(clk))
panic("Need a sane CPU clock definition!");
loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ;
printk(KERN_INFO "Calibrating delay loop (skipped)... "
"%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n",
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100,
loops_per_jiffy);
}
#endif
void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn)
{
struct resource *res = &mem_resources[nid];
unsigned long start, end;
WARN_ON(res->name); /* max one active range per node for now */
start = start_pfn << PAGE_SHIFT;
end = end_pfn << PAGE_SHIFT;
res->name = "System RAM";
res->start = start;
res->end = end - 1;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, res)) {
pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
start_pfn, end_pfn);
return;
}
/*
* We don't know which RAM region contains kernel data or
* the reserved crashkernel region, so try it repeatedly
* and let the resource manager test it.
*/
request_resource(res, &code_resource);
request_resource(res, &data_resource);
request_resource(res, &bss_resource);
#ifdef CONFIG_KEXEC
request_resource(res, &crashk_res);
#endif
/*
* Also make sure that there is a PMB mapping that covers this
* range before we attempt to activate it, to avoid reset by MMU.
* We can hit this path with NUMA or memory hot-add.
*/
pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
PAGE_KERNEL);
memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
&memblock.memory, nid);
}
void __init __weak plat_early_device_setup(void)
{
}
#ifdef CONFIG_OF_EARLY_FLATTREE
void __ref sh_fdt_init(phys_addr_t dt_phys)
{
static int done = 0;
void *dt_virt;
/* Avoid calling an __init function on secondary cpus. */
if (done) return;
#ifdef CONFIG_USE_BUILTIN_DTB
dt_virt = __dtb_start;
#else
dt_virt = phys_to_virt(dt_phys);
#endif
if (!dt_virt || !early_init_dt_scan(dt_virt)) {
pr_crit("Error: invalid device tree blob"
" at physical address %p\n", (void *)dt_phys);
while (true)
cpu_relax();
}
done = 1;
}
#endif
void __init setup_arch(char **cmdline_p)
{
enable_mmu();
ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
printk(KERN_NOTICE "Boot params:\n"
"... MOUNT_ROOT_RDONLY - %08lx\n"
"... RAMDISK_FLAGS - %08lx\n"
"... ORIG_ROOT_DEV - %08lx\n"
"... LOADER_TYPE - %08lx\n"
"... INITRD_START - %08lx\n"
"... INITRD_SIZE - %08lx\n",
MOUNT_ROOT_RDONLY, RAMDISK_FLAGS,
ORIG_ROOT_DEV, LOADER_TYPE,
INITRD_START, INITRD_SIZE);
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
#endif
if (!MOUNT_ROOT_RDONLY)
root_mountflags &= ~MS_RDONLY;
setup_initial_init_mm(_text, _etext, _edata, _end);
code_resource.start = virt_to_phys(_text);
code_resource.end = virt_to_phys(_etext)-1;
data_resource.start = virt_to_phys(_etext);
data_resource.end = virt_to_phys(_edata)-1;
bss_resource.start = virt_to_phys(__bss_start);
bss_resource.end = virt_to_phys(__bss_stop)-1;
#ifdef CONFIG_CMDLINE_OVERWRITE
strscpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
#else
strscpy(command_line, COMMAND_LINE, sizeof(command_line));
#ifdef CONFIG_CMDLINE_EXTEND
strlcat(command_line, " ", sizeof(command_line));
strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
#endif
#endif
/* Save unparsed command line copy for /proc/cmdline */
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
parse_early_param();
plat_early_device_setup();
sh_mv_setup();
/* Let earlyprintk output early console messages */
sh_early_platform_driver_probe("earlyprintk", 1, 1);
#ifdef CONFIG_OF_EARLY_FLATTREE
#ifdef CONFIG_USE_BUILTIN_DTB
unflatten_and_copy_device_tree();
#else
unflatten_device_tree();
#endif
#endif
paging_init();
/* Perform the machine specific initialisation */
if (likely(sh_mv.mv_setup))
sh_mv.mv_setup(cmdline_p);
plat_smp_setup();
}
/* processor boot mode configuration */
int generic_mode_pins(void)
{
pr_warn("generic_mode_pins(): missing mode pin configuration\n");
return 0;
}
int test_mode_pin(int pin)
{
return sh_mv.mv_mode_pins() & pin;
}
void __init arch_cpu_finalize_init(void)
{
char *p = &init_utsname()->machine[2]; /* "sh" */
select_idle_routine();
current_cpu_data.loops_per_jiffy = loops_per_jiffy;
switch (current_cpu_data.family) {
case CPU_FAMILY_SH2:
*p++ = '2';
break;
case CPU_FAMILY_SH2A:
*p++ = '2';
*p++ = 'a';
break;
case CPU_FAMILY_SH3:
*p++ = '3';
break;
case CPU_FAMILY_SH4:
*p++ = '4';
break;
case CPU_FAMILY_SH4A:
*p++ = '4';
*p++ = 'a';
break;
case CPU_FAMILY_SH4AL_DSP:
*p++ = '4';
*p++ = 'a';
*p++ = 'l';
*p++ = '-';
*p++ = 'd';
*p++ = 's';
*p++ = 'p';
break;
case CPU_FAMILY_UNKNOWN:
/*
* Specifically use CPU_FAMILY_UNKNOWN rather than
* default:, so we're able to have the compiler whine
* about unhandled enumerations.
*/
break;
}
pr_info("CPU: %s\n", get_cpu_subtype(¤t_cpu_data));
#ifndef __LITTLE_ENDIAN__
/* 'eb' means 'Endian Big' */
*p++ = 'e';
*p++ = 'b';
#endif
*p = '\0';
}
| linux-master | arch/sh/kernel/setup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/machvec.c
*
* The SuperH machine vector setup handlers, yanked from setup.c
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2002 - 2007 Paul Mundt
*/
#include <linux/init.h>
#include <linux/string.h>
#include <asm/machvec.h>
#include <asm/sections.h>
#include <asm/addrspace.h>
#include <asm/setup.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/processor.h>
#define MV_NAME_SIZE 32
#define for_each_mv(mv) \
for ((mv) = (struct sh_machine_vector *)__machvec_start; \
(mv) && (unsigned long)(mv) < (unsigned long)__machvec_end; \
(mv)++)
static struct sh_machine_vector * __init get_mv_byname(const char *name)
{
struct sh_machine_vector *mv;
for_each_mv(mv)
if (strcasecmp(name, mv->mv_name) == 0)
return mv;
return NULL;
}
static unsigned int __initdata machvec_selected;
static int __init early_parse_mv(char *from)
{
char mv_name[MV_NAME_SIZE] = "";
char *mv_end;
char *mv_comma;
int mv_len;
struct sh_machine_vector *mvp;
mv_end = strchr(from, ' ');
if (mv_end == NULL)
mv_end = from + strlen(from);
mv_comma = strchr(from, ',');
mv_len = mv_end - from;
if (mv_len > (MV_NAME_SIZE-1))
mv_len = MV_NAME_SIZE-1;
memcpy(mv_name, from, mv_len);
mv_name[mv_len] = '\0';
from = mv_end;
machvec_selected = 1;
/* Boot with the generic vector */
if (strcmp(mv_name, "generic") == 0)
return 0;
mvp = get_mv_byname(mv_name);
if (unlikely(!mvp)) {
pr_info("Available vectors:\n\n\t'%s', ", sh_mv.mv_name);
for_each_mv(mvp)
pr_cont("'%s', ", mvp->mv_name);
pr_cont("\n\n");
panic("Failed to select machvec '%s' -- halting.\n",
mv_name);
} else
sh_mv = *mvp;
return 0;
}
early_param("sh_mv", early_parse_mv);
void __init sh_mv_setup(void)
{
/*
* Only overload the machvec if one hasn't been selected on
* the command line with sh_mv=
*/
if (!machvec_selected) {
unsigned long machvec_size;
machvec_size = ((unsigned long)__machvec_end -
(unsigned long)__machvec_start);
/*
* Sanity check for machvec section alignment. Ensure
* __initmv hasn't been misused.
*/
if (machvec_size % sizeof(struct sh_machine_vector))
panic("machvec misaligned, invalid __initmv use?");
/*
* If the machvec hasn't been preselected, use the first
* vector (usually the only one) from .machvec.init.
*/
if (machvec_size >= sizeof(struct sh_machine_vector))
sh_mv = *(struct sh_machine_vector *)__machvec_start;
}
pr_notice("Booting machvec: %s\n", get_system_type());
/*
* Manually walk the vec, fill in anything that the board hasn't yet
* by hand, wrapping to the generic implementation.
*/
#define mv_set(elem) do { \
if (!sh_mv.mv_##elem) \
sh_mv.mv_##elem = generic_##elem; \
} while (0)
mv_set(irq_demux);
mv_set(mode_pins);
mv_set(mem_init);
}
| linux-master | arch/sh/kernel/machvec.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/ipc.h>
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include <asm/syscalls.h>
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though.
*/
asmlinkage int sys_sh_pipe(void)
{
int fd[2];
int error;
error = do_pipe_flags(fd, 0);
if (!error) {
current_pt_regs()->regs[1] = fd[1];
return fd[0];
}
return error;
}
asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf,
size_t count, long dummy, loff_t pos)
{
return ksys_pread64(fd, buf, count, pos);
}
asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf,
size_t count, long dummy, loff_t pos)
{
return ksys_pwrite64(fd, buf, count, pos);
}
asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
u32 len0, u32 len1, int advice)
{
#ifdef __LITTLE_ENDIAN__
return ksys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
(u64)len1 << 32 | len0, advice);
#else
return ksys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
(u64)len0 << 32 | len1, advice);
#endif
}
| linux-master | arch/sh/kernel/sys_sh32.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/time.c
*
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf <[email protected]>
* Copyright (C) 2002 - 2009 Paul Mundt
* Copyright (C) 2002 M. R. Brown <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/timex.h>
#include <linux/sched.h>
#include <linux/clockchips.h>
#include <linux/platform_device.h>
#include <linux/smp.h>
#include <linux/rtc.h>
#include <asm/clock.h>
#include <asm/rtc.h>
#include <asm/platform_early.h>
static void __init sh_late_time_init(void)
{
/*
* Make sure all compiled-in early timers register themselves.
*
* Run probe() for two "earlytimer" devices, these will be the
* clockevents and clocksource devices respectively. In the event
* that only a clockevents device is available, we -ENODEV on the
* clocksource and the jiffies clocksource is used transparently
* instead. No error handling is necessary here.
*/
sh_early_platform_driver_register_all("earlytimer");
sh_early_platform_driver_probe("earlytimer", 2, 0);
}
void __init time_init(void)
{
timer_probe();
clk_init();
late_time_init = sh_late_time_init;
}
| linux-master | arch/sh/kernel/time.c |
// SPDX-License-Identifier: GPL-2.0
/*
* machine_kexec.c - handle transition of Linux booting another kernel
* Copyright (C) 2002-2003 Eric Biederman <[email protected]>
*
* GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
* LANDISK/sh4 supported by kogiidena
*/
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/numa.h>
#include <linux/ftrace.h>
#include <linux/suspend.h>
#include <linux/memblock.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
#include <asm/sh_bios.h>
#include <asm/reboot.h>
typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
unsigned long reboot_code_buffer,
unsigned long start_address);
extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size;
extern void *vbr_base;
void native_machine_crash_shutdown(struct pt_regs *regs)
{
/* Nothing to do for UP, but definitely broken for SMP.. */
}
/*
* Do what every setup is needed on image and the
* reboot code buffer to allow us to avoid allocations
* later.
*/
int machine_kexec_prepare(struct kimage *image)
{
return 0;
}
void machine_kexec_cleanup(struct kimage *image)
{
}
static void kexec_info(struct kimage *image)
{
int i;
printk("kexec information\n");
for (i = 0; i < image->nr_segments; i++) {
printk(" segment[%d]: 0x%08x - 0x%08x (0x%08x)\n",
i,
(unsigned int)image->segment[i].mem,
(unsigned int)image->segment[i].mem +
image->segment[i].memsz,
(unsigned int)image->segment[i].memsz);
}
printk(" start : 0x%08x\n\n", (unsigned int)image->start);
}
/*
* Do not allocate memory (or fail in any way) in machine_kexec().
* We are past the point of no return, committed to rebooting now.
*/
void machine_kexec(struct kimage *image)
{
unsigned long page_list;
unsigned long reboot_code_buffer;
relocate_new_kernel_t rnk;
unsigned long entry;
unsigned long *ptr;
int save_ftrace_enabled;
/*
* Nicked from the mips version of machine_kexec():
* The generic kexec code builds a page list with physical
* addresses. Use phys_to_virt() to convert them to virtual.
*/
for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
*ptr & IND_DESTINATION)
*ptr = (unsigned long) phys_to_virt(*ptr);
}
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
save_processor_state();
#endif
save_ftrace_enabled = __ftrace_enabled_save();
/* Interrupts aren't acceptable while we reboot */
local_irq_disable();
page_list = image->head;
/* we need both effective and real address here */
reboot_code_buffer =
(unsigned long)page_address(image->control_code_page);
/* copy our kernel relocation code to the control code page */
memcpy((void *)reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
kexec_info(image);
flush_cache_all();
sh_bios_vbr_reload();
/* now call it */
rnk = (relocate_new_kernel_t) reboot_code_buffer;
(*rnk)(page_list, reboot_code_buffer,
(unsigned long)phys_to_virt(image->start));
#ifdef CONFIG_KEXEC_JUMP
asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory");
if (image->preserve_context)
restore_processor_state();
/* Convert page list back to physical addresses, what a mess. */
for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
ptr = (*ptr & IND_INDIRECTION) ?
phys_to_virt(*ptr & PAGE_MASK) : ptr + 1) {
if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
*ptr & IND_DESTINATION)
*ptr = virt_to_phys(*ptr);
}
#endif
__ftrace_enabled_restore(save_ftrace_enabled);
}
void arch_crash_save_vmcoreinfo(void)
{
#ifdef CONFIG_NUMA
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
#ifdef CONFIG_X2TLB
VMCOREINFO_CONFIG(X2TLB);
#endif
}
void __init reserve_crashkernel(void)
{
unsigned long long crash_size, crash_base;
int ret;
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base);
if (ret == 0 && crash_size > 0) {
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
}
if (crashk_res.end == crashk_res.start)
goto disable;
crash_size = PAGE_ALIGN(resource_size(&crashk_res));
if (!crashk_res.start) {
unsigned long max = memblock_end_of_DRAM() - memory_limit;
crashk_res.start = memblock_phys_alloc_range(crash_size,
PAGE_SIZE, 0, max);
if (!crashk_res.start) {
pr_err("crashkernel allocation failed\n");
goto disable;
}
} else {
ret = memblock_reserve(crashk_res.start, crash_size);
if (unlikely(ret < 0)) {
pr_err("crashkernel reservation failed - "
"memory is in use\n");
goto disable;
}
}
crashk_res.end = crashk_res.start + crash_size - 1;
/*
* Crash kernel trumps memory limit
*/
if ((memblock_end_of_DRAM() - memory_limit) <= crashk_res.end) {
memory_limit = 0;
pr_info("Disabled memory limit for crashkernel\n");
}
pr_info("Reserving %ldMB of memory at 0x%08lx "
"for crashkernel (System RAM: %ldMB)\n",
(unsigned long)(crash_size >> 20),
(unsigned long)(crashk_res.start),
(unsigned long)(memblock_phys_mem_size() >> 20));
return;
disable:
crashk_res.start = crashk_res.end = 0;
}
| linux-master | arch/sh/kernel/machine_kexec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Performance event support framework for SuperH hardware counters.
*
* Copyright (C) 2009 Paul Mundt
*
* Heavily based on the x86 and PowerPC implementations.
*
* x86:
* Copyright (C) 2008 Thomas Gleixner <[email protected]>
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2009 Jaswinder Singh Rajput
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
* Copyright (C) 2009 Intel Corporation, <[email protected]>
*
* ppc:
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
#include <linux/export.h>
#include <asm/processor.h>
struct cpu_hw_events {
struct perf_event *events[MAX_HWEVENTS];
unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
static struct sh_pmu *sh_pmu __read_mostly;
/* Number of perf_events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* Stub these out for now, do something more profound later.
*/
int reserve_pmc_hardware(void)
{
return 0;
}
void release_pmc_hardware(void)
{
}
static inline int sh_pmu_initialized(void)
{
return !!sh_pmu;
}
/*
* Release the PMU if this is the last perf_event.
*/
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
static int hw_perf_cache_event(int config, int *evp)
{
unsigned long type, op, result;
int ev;
if (!sh_pmu->cache_events)
return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = (*sh_pmu->cache_events)[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*evp = ev;
return 0;
}
static int __hw_perf_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
int config = -1;
int err;
if (!sh_pmu_initialized())
return -ENODEV;
/*
* See if we need to reserve the counter.
*
* If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware())
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
}
if (err)
return err;
event->destroy = hw_perf_event_destroy;
switch (attr->type) {
case PERF_TYPE_RAW:
config = attr->config & sh_pmu->raw_event_mask;
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(attr->config, &config);
if (err)
return err;
break;
case PERF_TYPE_HARDWARE:
if (attr->config >= sh_pmu->max_events)
return -EINVAL;
config = sh_pmu->event_map(attr->config);
break;
}
if (config == -1)
return -EINVAL;
hwc->config |= config;
return 0;
}
static void sh_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
u64 prev_raw_count, new_raw_count;
s64 delta;
int shift = 0;
/*
* Depending on the counter configuration, they may or may not
* be chained, in which case the previous counter value can be
* updated underneath us if the lower-half overflows.
*
* Our tactic to handle this is to first atomically read and
* exchange a new raw count - then add that new-prev delta
* count to the generic counter atomically.
*
* As there is no interrupt associated with the overflow events,
* this is the simplest approach for maintaining consistency.
*/
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = sh_pmu->read(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
/*
* Now we have the new raw value and have updated the prev
* timestamp already. We can now calculate the elapsed delta
* (counter-)time and add that to the generic counter.
*
* Careful, not all hw sign-extends above the physical width
* of the count.
*/
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
}
static void sh_pmu_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (!(event->hw.state & PERF_HES_STOPPED)) {
sh_pmu->disable(hwc, idx);
cpuc->events[idx] = NULL;
event->hw.state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
sh_perf_event_update(event, &event->hw, idx);
event->hw.state |= PERF_HES_UPTODATE;
}
}
static void sh_pmu_start(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
if (WARN_ON_ONCE(idx == -1))
return;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
cpuc->events[idx] = event;
event->hw.state = 0;
sh_pmu->enable(hwc, idx);
}
static void sh_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
sh_pmu_stop(event, PERF_EF_UPDATE);
__clear_bit(event->hw.idx, cpuc->used_mask);
perf_event_update_userpage(event);
}
static int sh_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
int ret = -EAGAIN;
perf_pmu_disable(event->pmu);
if (__test_and_set_bit(idx, cpuc->used_mask)) {
idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
if (idx == sh_pmu->num_events)
goto out;
__set_bit(idx, cpuc->used_mask);
hwc->idx = idx;
}
sh_pmu->disable(hwc, idx);
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (flags & PERF_EF_START)
sh_pmu_start(event, PERF_EF_RELOAD);
perf_event_update_userpage(event);
ret = 0;
out:
perf_pmu_enable(event->pmu);
return ret;
}
static void sh_pmu_read(struct perf_event *event)
{
sh_perf_event_update(event, &event->hw, event->hw.idx);
}
static int sh_pmu_event_init(struct perf_event *event)
{
int err;
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HW_CACHE:
case PERF_TYPE_HARDWARE:
err = __hw_perf_event_init(event);
break;
default:
return -ENOENT;
}
if (unlikely(err)) {
if (event->destroy)
event->destroy(event);
}
return err;
}
static void sh_pmu_enable(struct pmu *pmu)
{
if (!sh_pmu_initialized())
return;
sh_pmu->enable_all();
}
static void sh_pmu_disable(struct pmu *pmu)
{
if (!sh_pmu_initialized())
return;
sh_pmu->disable_all();
}
static struct pmu pmu = {
.pmu_enable = sh_pmu_enable,
.pmu_disable = sh_pmu_disable,
.event_init = sh_pmu_event_init,
.add = sh_pmu_add,
.del = sh_pmu_del,
.start = sh_pmu_start,
.stop = sh_pmu_stop,
.read = sh_pmu_read,
};
static int sh_pmu_prepare_cpu(unsigned int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
return 0;
}
int register_sh_pmu(struct sh_pmu *_pmu)
{
if (sh_pmu)
return -EBUSY;
sh_pmu = _pmu;
pr_info("Performance Events: %s support registered\n", _pmu->name);
/*
* All of the on-chip counters are "limited", in that they have
* no interrupts, and are therefore unable to do sampling without
* further work and timer assistance.
*/
pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
WARN_ON(_pmu->num_events > MAX_HWEVENTS);
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
cpuhp_setup_state(CPUHP_PERF_SUPERH, "PERF_SUPERH", sh_pmu_prepare_cpu,
NULL);
return 0;
}
| linux-master | arch/sh/kernel/perf_event.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009 Matt Fleming
*
* Based, in part, on kernel/time/clocksource.c.
*
* This file provides arbitration code for stack unwinders.
*
* Multiple stack unwinders can be available on a system, usually with
* the most accurate unwinder being the currently active one.
*/
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/unwinder.h>
#include <linux/atomic.h>
/*
* This is the most basic stack unwinder an architecture can
* provide. For architectures without reliable frame pointers, e.g.
* RISC CPUs, it can be implemented by looking through the stack for
* addresses that lie within the kernel text section.
*
* Other CPUs, e.g. x86, can use their frame pointer register to
* construct more accurate stack traces.
*/
static struct list_head unwinder_list;
static struct unwinder stack_reader = {
.name = "stack-reader",
.dump = stack_reader_dump,
.rating = 50,
.list = {
.next = &unwinder_list,
.prev = &unwinder_list,
},
};
/*
* "curr_unwinder" points to the stack unwinder currently in use. This
* is the unwinder with the highest rating.
*
* "unwinder_list" is a linked-list of all available unwinders, sorted
* by rating.
*
* All modifications of "curr_unwinder" and "unwinder_list" must be
* performed whilst holding "unwinder_lock".
*/
static struct unwinder *curr_unwinder = &stack_reader;
static struct list_head unwinder_list = {
.next = &stack_reader.list,
.prev = &stack_reader.list,
};
static DEFINE_SPINLOCK(unwinder_lock);
/**
* select_unwinder - Select the best registered stack unwinder.
*
* Private function. Must hold unwinder_lock when called.
*
* Select the stack unwinder with the best rating. This is useful for
* setting up curr_unwinder.
*/
static struct unwinder *select_unwinder(void)
{
struct unwinder *best;
if (list_empty(&unwinder_list))
return NULL;
best = list_entry(unwinder_list.next, struct unwinder, list);
if (best == curr_unwinder)
return NULL;
return best;
}
/*
* Enqueue the stack unwinder sorted by rating.
*/
static int unwinder_enqueue(struct unwinder *ops)
{
struct list_head *tmp, *entry = &unwinder_list;
list_for_each(tmp, &unwinder_list) {
struct unwinder *o;
o = list_entry(tmp, struct unwinder, list);
if (o == ops)
return -EBUSY;
/* Keep track of the place, where to insert */
if (o->rating >= ops->rating)
entry = tmp;
}
list_add(&ops->list, entry);
return 0;
}
/**
* unwinder_register - Used to install new stack unwinder
* @u: unwinder to be registered
*
* Install the new stack unwinder on the unwinder list, which is sorted
* by rating.
*
* Returns -EBUSY if registration fails, zero otherwise.
*/
int unwinder_register(struct unwinder *u)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&unwinder_lock, flags);
ret = unwinder_enqueue(u);
if (!ret)
curr_unwinder = select_unwinder();
spin_unlock_irqrestore(&unwinder_lock, flags);
return ret;
}
int unwinder_faulted = 0;
/*
* Unwind the call stack and pass information to the stacktrace_ops
* functions. Also handle the case where we need to switch to a new
* stack dumper because the current one faulted unexpectedly.
*/
void unwind_stack(struct task_struct *task, struct pt_regs *regs,
unsigned long *sp, const struct stacktrace_ops *ops,
void *data)
{
unsigned long flags;
/*
* The problem with unwinders with high ratings is that they are
* inherently more complicated than the simple ones with lower
* ratings. We are therefore more likely to fault in the
* complicated ones, e.g. hitting BUG()s. If we fault in the
* code for the current stack unwinder we try to downgrade to
* one with a lower rating.
*
* Hopefully this will give us a semi-reliable stacktrace so we
* can diagnose why curr_unwinder->dump() faulted.
*/
if (unwinder_faulted) {
spin_lock_irqsave(&unwinder_lock, flags);
/* Make sure no one beat us to changing the unwinder */
if (unwinder_faulted && !list_is_singular(&unwinder_list)) {
list_del(&curr_unwinder->list);
curr_unwinder = select_unwinder();
unwinder_faulted = 0;
}
spin_unlock_irqrestore(&unwinder_lock, flags);
}
curr_unwinder->dump(task, regs, sp, ops, data);
}
EXPORT_SYMBOL_GPL(unwind_stack);
| linux-master | arch/sh/kernel/unwinder.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Performance event callchain support - SuperH architecture code
*
* Copyright (C) 2009 Paul Mundt
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <asm/unwinder.h>
#include <asm/ptrace.h>
static void callchain_address(void *data, unsigned long addr, int reliable)
{
struct perf_callchain_entry_ctx *entry = data;
if (reliable)
perf_callchain_store(entry, addr);
}
static const struct stacktrace_ops callchain_ops = {
.address = callchain_address,
};
void
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
perf_callchain_store(entry, regs->pc);
unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
}
| linux-master | arch/sh/kernel/perf_callchain.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
* Copyright (C) 2009 Matt Fleming
* Copyright (C) 2002 - 2012 Paul Mundt
*/
#include <linux/kallsyms.h>
#include <linux/ftrace.h>
#include <linux/debug_locks.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/kdebug.h>
#include <linux/export.h>
#include <linux/uaccess.h>
#include <asm/unwinder.h>
#include <asm/stacktrace.h>
void dump_mem(const char *str, const char *loglvl, unsigned long bottom,
unsigned long top)
{
unsigned long p;
int i;
printk("%s%s(0x%08lx to 0x%08lx)\n", loglvl, str, bottom, top);
for (p = bottom & ~31; p < top; ) {
printk("%s%04lx: ", loglvl, p & 0xffff);
for (i = 0; i < 8; i++, p += 4) {
unsigned int val;
if (p < bottom || p >= top)
pr_cont(" ");
else {
if (__get_user(val, (unsigned int __user *)p)) {
pr_cont("\n");
return;
}
pr_cont("%08x ", val);
}
}
pr_cont("\n");
}
}
void printk_address(unsigned long address, int reliable)
{
pr_cont(" [<%px>] %s%pS\n", (void *) address,
reliable ? "" : "? ", (void *) address);
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void
print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
struct thread_info *tinfo, int *graph)
{
struct task_struct *task = tinfo->task;
struct ftrace_ret_stack *ret_stack;
unsigned long ret_addr;
if (addr != (unsigned long)return_to_handler)
return;
if (!task->ret_stack)
return;
ret_stack = ftrace_graph_get_ret_stack(task, *graph);
if (!ret_stack)
return;
ret_addr = ret_stack->ret;
ops->address(data, ret_addr, 1);
(*graph)++;
}
#else
static inline void
print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
struct thread_info *tinfo, int *graph)
{ }
#endif
void
stack_reader_dump(struct task_struct *task, struct pt_regs *regs,
unsigned long *sp, const struct stacktrace_ops *ops,
void *data)
{
struct thread_info *context;
int graph = 0;
context = (struct thread_info *)
((unsigned long)sp & (~(THREAD_SIZE - 1)));
while (!kstack_end(sp)) {
unsigned long addr = *sp++;
if (__kernel_text_address(addr)) {
ops->address(data, addr, 1);
print_ftrace_graph_addr(addr, data, ops,
context, &graph);
}
}
}
/*
* Print one address/symbol entries per line.
*/
static void print_trace_address(void *data, unsigned long addr, int reliable)
{
printk("%s", (char *)data);
printk_address(addr, reliable);
}
static const struct stacktrace_ops print_trace_ops = {
.address = print_trace_address,
};
void show_trace(struct task_struct *tsk, unsigned long *sp,
struct pt_regs *regs, const char *loglvl)
{
if (regs && user_mode(regs))
return;
printk("%s\nCall trace:\n", loglvl);
unwind_stack(tsk, regs, sp, &print_trace_ops, (void *)loglvl);
pr_cont("\n");
if (!tsk)
tsk = current;
debug_show_held_locks(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
unsigned long stack;
if (!tsk)
tsk = current;
if (tsk == current)
sp = (unsigned long *)current_stack_pointer;
else
sp = (unsigned long *)tsk->thread.sp;
stack = (unsigned long)sp;
dump_mem("Stack: ", loglvl, stack, THREAD_SIZE +
(unsigned long)task_stack_page(tsk));
show_trace(tsk, sp, NULL, loglvl);
}
| linux-master | arch/sh/kernel/dumpstack.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/stacktrace.c
*
* Stack trace management functions
*
* Copyright (C) 2006 - 2008 Paul Mundt
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/module.h>
#include <asm/unwinder.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
static void save_stack_address(void *data, unsigned long addr, int reliable)
{
struct stack_trace *trace = data;
if (!reliable)
return;
if (trace->skip > 0) {
trace->skip--;
return;
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = addr;
}
static const struct stacktrace_ops save_stack_ops = {
.address = save_stack_address,
};
void save_stack_trace(struct stack_trace *trace)
{
unsigned long *sp = (unsigned long *)current_stack_pointer;
unwind_stack(current, NULL, sp, &save_stack_ops, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
static void
save_stack_address_nosched(void *data, unsigned long addr, int reliable)
{
struct stack_trace *trace = (struct stack_trace *)data;
if (!reliable)
return;
if (in_sched_functions(addr))
return;
if (trace->skip > 0) {
trace->skip--;
return;
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = addr;
}
static const struct stacktrace_ops save_stack_ops_nosched = {
.address = save_stack_address_nosched,
};
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned long *sp = (unsigned long *)tsk->thread.sp;
unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
| linux-master | arch/sh/kernel/stacktrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009 Matt Fleming <[email protected]>
*
* This is an implementation of a DWARF unwinder. Its main purpose is
* for generating stacktrace information. Based on the DWARF 3
* specification from http://www.dwarfstd.org.
*
* TODO:
* - DWARF64 doesn't work.
* - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
*/
/* #define DEBUG */
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/mm.h>
#include <linux/elf.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/dwarf.h>
#include <asm/unwinder.h>
#include <asm/sections.h>
#include <asm/unaligned.h>
#include <asm/stacktrace.h>
/* Reserve enough memory for two stack frames */
#define DWARF_FRAME_MIN_REQ 2
/* ... with 4 registers per frame. */
#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
static struct kmem_cache *dwarf_frame_cachep;
static mempool_t *dwarf_frame_pool;
static struct kmem_cache *dwarf_reg_cachep;
static mempool_t *dwarf_reg_pool;
static struct rb_root cie_root;
static DEFINE_SPINLOCK(dwarf_cie_lock);
static struct rb_root fde_root;
static DEFINE_SPINLOCK(dwarf_fde_lock);
static struct dwarf_cie *cached_cie;
static unsigned int dwarf_unwinder_ready;
/**
* dwarf_frame_alloc_reg - allocate memory for a DWARF register
* @frame: the DWARF frame whose list of registers we insert on
* @reg_num: the register number
*
* Allocate space for, and initialise, a dwarf reg from
* dwarf_reg_pool and insert it onto the (unsorted) linked-list of
* dwarf registers for @frame.
*
* Return the initialised DWARF reg.
*/
static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
unsigned int reg_num)
{
struct dwarf_reg *reg;
reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
if (!reg) {
printk(KERN_WARNING "Unable to allocate a DWARF register\n");
/*
* Let's just bomb hard here, we have no way to
* gracefully recover.
*/
UNWINDER_BUG();
}
reg->number = reg_num;
reg->addr = 0;
reg->flags = 0;
list_add(®->link, &frame->reg_list);
return reg;
}
static void dwarf_frame_free_regs(struct dwarf_frame *frame)
{
struct dwarf_reg *reg, *n;
list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
list_del(®->link);
mempool_free(reg, dwarf_reg_pool);
}
}
/**
* dwarf_frame_reg - return a DWARF register
* @frame: the DWARF frame to search in for @reg_num
* @reg_num: the register number to search for
*
* Lookup and return the dwarf reg @reg_num for this frame. Return
* NULL if @reg_num is an register invalid number.
*/
static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
unsigned int reg_num)
{
struct dwarf_reg *reg;
list_for_each_entry(reg, &frame->reg_list, link) {
if (reg->number == reg_num)
return reg;
}
return NULL;
}
/**
* dwarf_read_addr - read dwarf data
* @src: source address of data
* @dst: destination address to store the data to
*
* Read 'n' bytes from @src, where 'n' is the size of an address on
* the native machine. We return the number of bytes read, which
* should always be 'n'. We also have to be careful when reading
* from @src and writing to @dst, because they can be arbitrarily
* aligned. Return 'n' - the number of bytes read.
*/
static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
{
u32 val = get_unaligned(src);
put_unaligned(val, dst);
return sizeof(unsigned long *);
}
/**
* dwarf_read_uleb128 - read unsigned LEB128 data
* @addr: the address where the ULEB128 data is stored
* @ret: address to store the result
*
* Decode an unsigned LEB128 encoded datum. The algorithm is taken
* from Appendix C of the DWARF 3 spec. For information on the
* encodings refer to section "7.6 - Variable Length Data". Return
* the number of bytes read.
*/
static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
{
unsigned int result;
unsigned char byte;
int shift, count;
result = 0;
shift = 0;
count = 0;
while (1) {
byte = __raw_readb(addr);
addr++;
count++;
result |= (byte & 0x7f) << shift;
shift += 7;
if (!(byte & 0x80))
break;
}
*ret = result;
return count;
}
/**
* dwarf_read_leb128 - read signed LEB128 data
* @addr: the address of the LEB128 encoded data
* @ret: address to store the result
*
* Decode signed LEB128 data. The algorithm is taken from Appendix
* C of the DWARF 3 spec. Return the number of bytes read.
*/
static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
{
unsigned char byte;
int result, shift;
int num_bits;
int count;
result = 0;
shift = 0;
count = 0;
while (1) {
byte = __raw_readb(addr);
addr++;
result |= (byte & 0x7f) << shift;
shift += 7;
count++;
if (!(byte & 0x80))
break;
}
/* The number of bits in a signed integer. */
num_bits = 8 * sizeof(result);
if ((shift < num_bits) && (byte & 0x40))
result |= (-1 << shift);
*ret = result;
return count;
}
/**
* dwarf_read_encoded_value - return the decoded value at @addr
* @addr: the address of the encoded value
* @val: where to write the decoded value
* @encoding: the encoding with which we can decode @addr
*
* GCC emits encoded address in the .eh_frame FDE entries. Decode
* the value at @addr using @encoding. The decoded value is written
* to @val and the number of bytes read is returned.
*/
static int dwarf_read_encoded_value(char *addr, unsigned long *val,
char encoding)
{
unsigned long decoded_addr = 0;
int count = 0;
switch (encoding & 0x70) {
case DW_EH_PE_absptr:
break;
case DW_EH_PE_pcrel:
decoded_addr = (unsigned long)addr;
break;
default:
pr_debug("encoding=0x%x\n", (encoding & 0x70));
UNWINDER_BUG();
}
if ((encoding & 0x07) == 0x00)
encoding |= DW_EH_PE_udata4;
switch (encoding & 0x0f) {
case DW_EH_PE_sdata4:
case DW_EH_PE_udata4:
count += 4;
decoded_addr += get_unaligned((u32 *)addr);
__raw_writel(decoded_addr, val);
break;
default:
pr_debug("encoding=0x%x\n", encoding);
UNWINDER_BUG();
}
return count;
}
/**
* dwarf_entry_len - return the length of an FDE or CIE
* @addr: the address of the entry
* @len: the length of the entry
*
* Read the initial_length field of the entry and store the size of
* the entry in @len. We return the number of bytes read. Return a
* count of 0 on error.
*/
static inline int dwarf_entry_len(char *addr, unsigned long *len)
{
u32 initial_len;
int count;
initial_len = get_unaligned((u32 *)addr);
count = 4;
/*
* An initial length field value in the range DW_LEN_EXT_LO -
* DW_LEN_EXT_HI indicates an extension, and should not be
* interpreted as a length. The only extension that we currently
* understand is the use of DWARF64 addresses.
*/
if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
/*
* The 64-bit length field immediately follows the
* compulsory 32-bit length field.
*/
if (initial_len == DW_EXT_DWARF64) {
*len = get_unaligned((u64 *)addr + 4);
count = 12;
} else {
printk(KERN_WARNING "Unknown DWARF extension\n");
count = 0;
}
} else
*len = initial_len;
return count;
}
/**
* dwarf_lookup_cie - locate the cie
* @cie_ptr: pointer to help with lookup
*/
static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
{
struct rb_node **rb_node = &cie_root.rb_node;
struct dwarf_cie *cie = NULL;
unsigned long flags;
spin_lock_irqsave(&dwarf_cie_lock, flags);
/*
* We've cached the last CIE we looked up because chances are
* that the FDE wants this CIE.
*/
if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
cie = cached_cie;
goto out;
}
while (*rb_node) {
struct dwarf_cie *cie_tmp;
cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
BUG_ON(!cie_tmp);
if (cie_ptr == cie_tmp->cie_pointer) {
cie = cie_tmp;
cached_cie = cie_tmp;
goto out;
} else {
if (cie_ptr < cie_tmp->cie_pointer)
rb_node = &(*rb_node)->rb_left;
else
rb_node = &(*rb_node)->rb_right;
}
}
out:
spin_unlock_irqrestore(&dwarf_cie_lock, flags);
return cie;
}
/**
* dwarf_lookup_fde - locate the FDE that covers pc
* @pc: the program counter
*/
struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
{
struct rb_node **rb_node = &fde_root.rb_node;
struct dwarf_fde *fde = NULL;
unsigned long flags;
spin_lock_irqsave(&dwarf_fde_lock, flags);
while (*rb_node) {
struct dwarf_fde *fde_tmp;
unsigned long tmp_start, tmp_end;
fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
BUG_ON(!fde_tmp);
tmp_start = fde_tmp->initial_location;
tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
if (pc < tmp_start) {
rb_node = &(*rb_node)->rb_left;
} else {
if (pc < tmp_end) {
fde = fde_tmp;
goto out;
} else
rb_node = &(*rb_node)->rb_right;
}
}
out:
spin_unlock_irqrestore(&dwarf_fde_lock, flags);
return fde;
}
/**
* dwarf_cfa_execute_insns - execute instructions to calculate a CFA
* @insn_start: address of the first instruction
* @insn_end: address of the last instruction
* @cie: the CIE for this function
* @fde: the FDE for this function
* @frame: the instructions calculate the CFA for this frame
* @pc: the program counter of the address we're interested in
*
* Execute the Call Frame instruction sequence starting at
* @insn_start and ending at @insn_end. The instructions describe
* how to calculate the Canonical Frame Address of a stackframe.
* Store the results in @frame.
*/
static int dwarf_cfa_execute_insns(unsigned char *insn_start,
unsigned char *insn_end,
struct dwarf_cie *cie,
struct dwarf_fde *fde,
struct dwarf_frame *frame,
unsigned long pc)
{
unsigned char insn;
unsigned char *current_insn;
unsigned int count, delta, reg, expr_len, offset;
struct dwarf_reg *regp;
current_insn = insn_start;
while (current_insn < insn_end && frame->pc <= pc) {
insn = __raw_readb(current_insn++);
/*
* Firstly, handle the opcodes that embed their operands
* in the instructions.
*/
switch (DW_CFA_opcode(insn)) {
case DW_CFA_advance_loc:
delta = DW_CFA_operand(insn);
delta *= cie->code_alignment_factor;
frame->pc += delta;
continue;
/* NOTREACHED */
case DW_CFA_offset:
reg = DW_CFA_operand(insn);
count = dwarf_read_uleb128(current_insn, &offset);
current_insn += count;
offset *= cie->data_alignment_factor;
regp = dwarf_frame_alloc_reg(frame, reg);
regp->addr = offset;
regp->flags |= DWARF_REG_OFFSET;
continue;
/* NOTREACHED */
case DW_CFA_restore:
reg = DW_CFA_operand(insn);
continue;
/* NOTREACHED */
}
/*
* Secondly, handle the opcodes that don't embed their
* operands in the instruction.
*/
switch (insn) {
case DW_CFA_nop:
continue;
case DW_CFA_advance_loc1:
delta = *current_insn++;
frame->pc += delta * cie->code_alignment_factor;
break;
case DW_CFA_advance_loc2:
delta = get_unaligned((u16 *)current_insn);
current_insn += 2;
frame->pc += delta * cie->code_alignment_factor;
break;
case DW_CFA_advance_loc4:
delta = get_unaligned((u32 *)current_insn);
current_insn += 4;
frame->pc += delta * cie->code_alignment_factor;
break;
case DW_CFA_offset_extended:
count = dwarf_read_uleb128(current_insn, ®);
current_insn += count;
count = dwarf_read_uleb128(current_insn, &offset);
current_insn += count;
offset *= cie->data_alignment_factor;
break;
case DW_CFA_restore_extended:
count = dwarf_read_uleb128(current_insn, ®);
current_insn += count;
break;
case DW_CFA_undefined:
count = dwarf_read_uleb128(current_insn, ®);
current_insn += count;
regp = dwarf_frame_alloc_reg(frame, reg);
regp->flags |= DWARF_UNDEFINED;
break;
case DW_CFA_def_cfa:
count = dwarf_read_uleb128(current_insn,
&frame->cfa_register);
current_insn += count;
count = dwarf_read_uleb128(current_insn,
&frame->cfa_offset);
current_insn += count;
frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
break;
case DW_CFA_def_cfa_register:
count = dwarf_read_uleb128(current_insn,
&frame->cfa_register);
current_insn += count;
frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
break;
case DW_CFA_def_cfa_offset:
count = dwarf_read_uleb128(current_insn, &offset);
current_insn += count;
frame->cfa_offset = offset;
break;
case DW_CFA_def_cfa_expression:
count = dwarf_read_uleb128(current_insn, &expr_len);
current_insn += count;
frame->cfa_expr = current_insn;
frame->cfa_expr_len = expr_len;
current_insn += expr_len;
frame->flags |= DWARF_FRAME_CFA_REG_EXP;
break;
case DW_CFA_offset_extended_sf:
count = dwarf_read_uleb128(current_insn, ®);
current_insn += count;
count = dwarf_read_leb128(current_insn, &offset);
current_insn += count;
offset *= cie->data_alignment_factor;
regp = dwarf_frame_alloc_reg(frame, reg);
regp->flags |= DWARF_REG_OFFSET;
regp->addr = offset;
break;
case DW_CFA_val_offset:
count = dwarf_read_uleb128(current_insn, ®);
current_insn += count;
count = dwarf_read_leb128(current_insn, &offset);
offset *= cie->data_alignment_factor;
regp = dwarf_frame_alloc_reg(frame, reg);
regp->flags |= DWARF_VAL_OFFSET;
regp->addr = offset;
break;
case DW_CFA_GNU_args_size:
count = dwarf_read_uleb128(current_insn, &offset);
current_insn += count;
break;
case DW_CFA_GNU_negative_offset_extended:
count = dwarf_read_uleb128(current_insn, ®);
current_insn += count;
count = dwarf_read_uleb128(current_insn, &offset);
offset *= cie->data_alignment_factor;
regp = dwarf_frame_alloc_reg(frame, reg);
regp->flags |= DWARF_REG_OFFSET;
regp->addr = -offset;
break;
default:
pr_debug("unhandled DWARF instruction 0x%x\n", insn);
UNWINDER_BUG();
break;
}
}
return 0;
}
/**
* dwarf_free_frame - free the memory allocated for @frame
* @frame: the frame to free
*/
void dwarf_free_frame(struct dwarf_frame *frame)
{
dwarf_frame_free_regs(frame);
mempool_free(frame, dwarf_frame_pool);
}
extern void ret_from_irq(void);
/**
* dwarf_unwind_stack - unwind the stack
*
* @pc: address of the function to unwind
* @prev: struct dwarf_frame of the previous stackframe on the callstack
*
* Return a struct dwarf_frame representing the most recent frame
* on the callstack. Each of the lower (older) stack frames are
* linked via the "prev" member.
*/
struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
struct dwarf_frame *prev)
{
struct dwarf_frame *frame;
struct dwarf_cie *cie;
struct dwarf_fde *fde;
struct dwarf_reg *reg;
unsigned long addr;
/*
* If we've been called in to before initialization has
* completed, bail out immediately.
*/
if (!dwarf_unwinder_ready)
return NULL;
/*
* If we're starting at the top of the stack we need get the
* contents of a physical register to get the CFA in order to
* begin the virtual unwinding of the stack.
*
* NOTE: the return address is guaranteed to be setup by the
* time this function makes its first function call.
*/
if (!pc || !prev)
pc = _THIS_IP_;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* If our stack has been patched by the function graph tracer
* then we might see the address of return_to_handler() where we
* expected to find the real return address.
*/
if (pc == (unsigned long)&return_to_handler) {
struct ftrace_ret_stack *ret_stack;
ret_stack = ftrace_graph_get_ret_stack(current, 0);
if (ret_stack)
pc = ret_stack->ret;
/*
* We currently have no way of tracking how many
* return_to_handler()'s we've seen. If there is more
* than one patched return address on our stack,
* complain loudly.
*/
WARN_ON(ftrace_graph_get_ret_stack(current, 1));
}
#endif
frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
if (!frame) {
printk(KERN_ERR "Unable to allocate a dwarf frame\n");
UNWINDER_BUG();
}
INIT_LIST_HEAD(&frame->reg_list);
frame->flags = 0;
frame->prev = prev;
frame->return_addr = 0;
fde = dwarf_lookup_fde(pc);
if (!fde) {
/*
* This is our normal exit path. There are two reasons
* why we might exit here,
*
* a) pc has no asscociated DWARF frame info and so
* we don't know how to unwind this frame. This is
* usually the case when we're trying to unwind a
* frame that was called from some assembly code
* that has no DWARF info, e.g. syscalls.
*
* b) the DEBUG info for pc is bogus. There's
* really no way to distinguish this case from the
* case above, which sucks because we could print a
* warning here.
*/
goto bail;
}
cie = dwarf_lookup_cie(fde->cie_pointer);
frame->pc = fde->initial_location;
/* CIE initial instructions */
dwarf_cfa_execute_insns(cie->initial_instructions,
cie->instructions_end, cie, fde,
frame, pc);
/* FDE instructions */
dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
fde, frame, pc);
/* Calculate the CFA */
switch (frame->flags) {
case DWARF_FRAME_CFA_REG_OFFSET:
if (prev) {
reg = dwarf_frame_reg(prev, frame->cfa_register);
UNWINDER_BUG_ON(!reg);
UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
addr = prev->cfa + reg->addr;
frame->cfa = __raw_readl(addr);
} else {
/*
* Again, we're starting from the top of the
* stack. We need to physically read
* the contents of a register in order to get
* the Canonical Frame Address for this
* function.
*/
frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
}
frame->cfa += frame->cfa_offset;
break;
default:
UNWINDER_BUG();
}
reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
/*
* If we haven't seen the return address register or the return
* address column is undefined then we must assume that this is
* the end of the callstack.
*/
if (!reg || reg->flags == DWARF_UNDEFINED)
goto bail;
UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
addr = frame->cfa + reg->addr;
frame->return_addr = __raw_readl(addr);
/*
* Ah, the joys of unwinding through interrupts.
*
* Interrupts are tricky - the DWARF info needs to be _really_
* accurate and unfortunately I'm seeing a lot of bogus DWARF
* info. For example, I've seen interrupts occur in epilogues
* just after the frame pointer (r14) had been restored. The
* problem was that the DWARF info claimed that the CFA could be
* reached by using the value of the frame pointer before it was
* restored.
*
* So until the compiler can be trusted to produce reliable
* DWARF info when it really matters, let's stop unwinding once
* we've calculated the function that was interrupted.
*/
if (prev && prev->pc == (unsigned long)ret_from_irq)
frame->return_addr = 0;
return frame;
bail:
dwarf_free_frame(frame);
return NULL;
}
static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
unsigned char *end, struct module *mod)
{
struct rb_node **rb_node = &cie_root.rb_node;
struct rb_node *parent = *rb_node;
struct dwarf_cie *cie;
unsigned long flags;
int count;
cie = kzalloc(sizeof(*cie), GFP_KERNEL);
if (!cie)
return -ENOMEM;
cie->length = len;
/*
* Record the offset into the .eh_frame section
* for this CIE. It allows this CIE to be
* quickly and easily looked up from the
* corresponding FDE.
*/
cie->cie_pointer = (unsigned long)entry;
cie->version = *(char *)p++;
UNWINDER_BUG_ON(cie->version != 1);
cie->augmentation = p;
p += strlen(cie->augmentation) + 1;
count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
p += count;
count = dwarf_read_leb128(p, &cie->data_alignment_factor);
p += count;
/*
* Which column in the rule table contains the
* return address?
*/
if (cie->version == 1) {
cie->return_address_reg = __raw_readb(p);
p++;
} else {
count = dwarf_read_uleb128(p, &cie->return_address_reg);
p += count;
}
if (cie->augmentation[0] == 'z') {
unsigned int length, count;
cie->flags |= DWARF_CIE_Z_AUGMENTATION;
count = dwarf_read_uleb128(p, &length);
p += count;
UNWINDER_BUG_ON((unsigned char *)p > end);
cie->initial_instructions = p + length;
cie->augmentation++;
}
while (*cie->augmentation) {
/*
* "L" indicates a byte showing how the
* LSDA pointer is encoded. Skip it.
*/
if (*cie->augmentation == 'L') {
p++;
cie->augmentation++;
} else if (*cie->augmentation == 'R') {
/*
* "R" indicates a byte showing
* how FDE addresses are
* encoded.
*/
cie->encoding = *(char *)p++;
cie->augmentation++;
} else if (*cie->augmentation == 'P') {
/*
* "R" indicates a personality
* routine in the CIE
* augmentation.
*/
UNWINDER_BUG();
} else if (*cie->augmentation == 'S') {
UNWINDER_BUG();
} else {
/*
* Unknown augmentation. Assume
* 'z' augmentation.
*/
p = cie->initial_instructions;
UNWINDER_BUG_ON(!p);
break;
}
}
cie->initial_instructions = p;
cie->instructions_end = end;
/* Add to list */
spin_lock_irqsave(&dwarf_cie_lock, flags);
while (*rb_node) {
struct dwarf_cie *cie_tmp;
cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
parent = *rb_node;
if (cie->cie_pointer < cie_tmp->cie_pointer)
rb_node = &parent->rb_left;
else if (cie->cie_pointer >= cie_tmp->cie_pointer)
rb_node = &parent->rb_right;
else
WARN_ON(1);
}
rb_link_node(&cie->node, parent, rb_node);
rb_insert_color(&cie->node, &cie_root);
#ifdef CONFIG_MODULES
if (mod != NULL)
list_add_tail(&cie->link, &mod->arch.cie_list);
#endif
spin_unlock_irqrestore(&dwarf_cie_lock, flags);
return 0;
}
static int dwarf_parse_fde(void *entry, u32 entry_type,
void *start, unsigned long len,
unsigned char *end, struct module *mod)
{
struct rb_node **rb_node = &fde_root.rb_node;
struct rb_node *parent = *rb_node;
struct dwarf_fde *fde;
struct dwarf_cie *cie;
unsigned long flags;
int count;
void *p = start;
fde = kzalloc(sizeof(*fde), GFP_KERNEL);
if (!fde)
return -ENOMEM;
fde->length = len;
/*
* In a .eh_frame section the CIE pointer is the
* delta between the address within the FDE
*/
fde->cie_pointer = (unsigned long)(p - entry_type - 4);
cie = dwarf_lookup_cie(fde->cie_pointer);
fde->cie = cie;
if (cie->encoding)
count = dwarf_read_encoded_value(p, &fde->initial_location,
cie->encoding);
else
count = dwarf_read_addr(p, &fde->initial_location);
p += count;
if (cie->encoding)
count = dwarf_read_encoded_value(p, &fde->address_range,
cie->encoding & 0x0f);
else
count = dwarf_read_addr(p, &fde->address_range);
p += count;
if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
unsigned int length;
count = dwarf_read_uleb128(p, &length);
p += count + length;
}
/* Call frame instructions. */
fde->instructions = p;
fde->end = end;
/* Add to list. */
spin_lock_irqsave(&dwarf_fde_lock, flags);
while (*rb_node) {
struct dwarf_fde *fde_tmp;
unsigned long tmp_start, tmp_end;
unsigned long start, end;
fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
start = fde->initial_location;
end = fde->initial_location + fde->address_range;
tmp_start = fde_tmp->initial_location;
tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
parent = *rb_node;
if (start < tmp_start)
rb_node = &parent->rb_left;
else if (start >= tmp_end)
rb_node = &parent->rb_right;
else
WARN_ON(1);
}
rb_link_node(&fde->node, parent, rb_node);
rb_insert_color(&fde->node, &fde_root);
#ifdef CONFIG_MODULES
if (mod != NULL)
list_add_tail(&fde->link, &mod->arch.fde_list);
#endif
spin_unlock_irqrestore(&dwarf_fde_lock, flags);
return 0;
}
static void dwarf_unwinder_dump(struct task_struct *task,
struct pt_regs *regs,
unsigned long *sp,
const struct stacktrace_ops *ops,
void *data)
{
struct dwarf_frame *frame, *_frame;
unsigned long return_addr;
_frame = NULL;
return_addr = 0;
while (1) {
frame = dwarf_unwind_stack(return_addr, _frame);
if (_frame)
dwarf_free_frame(_frame);
_frame = frame;
if (!frame || !frame->return_addr)
break;
return_addr = frame->return_addr;
ops->address(data, return_addr, 1);
}
if (frame)
dwarf_free_frame(frame);
}
static struct unwinder dwarf_unwinder = {
.name = "dwarf-unwinder",
.dump = dwarf_unwinder_dump,
.rating = 150,
};
static void __init dwarf_unwinder_cleanup(void)
{
struct dwarf_fde *fde, *next_fde;
struct dwarf_cie *cie, *next_cie;
/*
* Deallocate all the memory allocated for the DWARF unwinder.
* Traverse all the FDE/CIE lists and remove and free all the
* memory associated with those data structures.
*/
rbtree_postorder_for_each_entry_safe(fde, next_fde, &fde_root, node)
kfree(fde);
rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node)
kfree(cie);
mempool_destroy(dwarf_reg_pool);
mempool_destroy(dwarf_frame_pool);
kmem_cache_destroy(dwarf_reg_cachep);
kmem_cache_destroy(dwarf_frame_cachep);
}
/**
* dwarf_parse_section - parse DWARF section
* @eh_frame_start: start address of the .eh_frame section
* @eh_frame_end: end address of the .eh_frame section
* @mod: the kernel module containing the .eh_frame section
*
* Parse the information in a .eh_frame section.
*/
static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
struct module *mod)
{
u32 entry_type;
void *p, *entry;
int count, err = 0;
unsigned long len = 0;
unsigned int c_entries, f_entries;
unsigned char *end;
c_entries = 0;
f_entries = 0;
entry = eh_frame_start;
while ((char *)entry < eh_frame_end) {
p = entry;
count = dwarf_entry_len(p, &len);
if (count == 0) {
/*
* We read a bogus length field value. There is
* nothing we can do here apart from disabling
* the DWARF unwinder. We can't even skip this
* entry and move to the next one because 'len'
* tells us where our next entry is.
*/
err = -EINVAL;
goto out;
} else
p += count;
/* initial length does not include itself */
end = p + len;
entry_type = get_unaligned((u32 *)p);
p += 4;
if (entry_type == DW_EH_FRAME_CIE) {
err = dwarf_parse_cie(entry, p, len, end, mod);
if (err < 0)
goto out;
else
c_entries++;
} else {
err = dwarf_parse_fde(entry, entry_type, p, len,
end, mod);
if (err < 0)
goto out;
else
f_entries++;
}
entry = (char *)entry + len + 4;
}
printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
c_entries, f_entries);
return 0;
out:
return err;
}
#ifdef CONFIG_MODULES
int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *me)
{
unsigned int i, err;
unsigned long start, end;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
start = end = 0;
for (i = 1; i < hdr->e_shnum; i++) {
/* Alloc bit cleared means "ignore it." */
if ((sechdrs[i].sh_flags & SHF_ALLOC)
&& !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
start = sechdrs[i].sh_addr;
end = start + sechdrs[i].sh_size;
break;
}
}
/* Did we find the .eh_frame section? */
if (i != hdr->e_shnum) {
INIT_LIST_HEAD(&me->arch.cie_list);
INIT_LIST_HEAD(&me->arch.fde_list);
err = dwarf_parse_section((char *)start, (char *)end, me);
if (err) {
printk(KERN_WARNING "%s: failed to parse DWARF info\n",
me->name);
return err;
}
}
return 0;
}
/**
* module_dwarf_cleanup - remove FDE/CIEs associated with @mod
* @mod: the module that is being unloaded
*
* Remove any FDEs and CIEs from the global lists that came from
* @mod's .eh_frame section because @mod is being unloaded.
*/
void module_dwarf_cleanup(struct module *mod)
{
struct dwarf_fde *fde, *ftmp;
struct dwarf_cie *cie, *ctmp;
unsigned long flags;
spin_lock_irqsave(&dwarf_cie_lock, flags);
list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
list_del(&cie->link);
rb_erase(&cie->node, &cie_root);
kfree(cie);
}
spin_unlock_irqrestore(&dwarf_cie_lock, flags);
spin_lock_irqsave(&dwarf_fde_lock, flags);
list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
list_del(&fde->link);
rb_erase(&fde->node, &fde_root);
kfree(fde);
}
spin_unlock_irqrestore(&dwarf_fde_lock, flags);
}
#endif /* CONFIG_MODULES */
/**
* dwarf_unwinder_init - initialise the dwarf unwinder
*
* Build the data structures describing the .dwarf_frame section to
* make it easier to lookup CIE and FDE entries. Because the
* .eh_frame section is packed as tightly as possible it is not
* easy to lookup the FDE for a given PC, so we build a list of FDE
* and CIE entries that make it easier.
*/
static int __init dwarf_unwinder_init(void)
{
int err = -ENOMEM;
dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
sizeof(struct dwarf_frame), 0,
SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
sizeof(struct dwarf_reg), 0,
SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ,
dwarf_frame_cachep);
if (!dwarf_frame_pool)
goto out;
dwarf_reg_pool = mempool_create_slab_pool(DWARF_REG_MIN_REQ,
dwarf_reg_cachep);
if (!dwarf_reg_pool)
goto out;
err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
if (err)
goto out;
err = unwinder_register(&dwarf_unwinder);
if (err)
goto out;
dwarf_unwinder_ready = 1;
return 0;
out:
printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
dwarf_unwinder_cleanup();
return err;
}
early_initcall(dwarf_unwinder_init);
| linux-master | arch/sh/kernel/dwarf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/return_address.c
*
* Copyright (C) 2009 Matt Fleming
* Copyright (C) 2009 Paul Mundt
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/dwarf.h>
#ifdef CONFIG_DWARF_UNWINDER
void *return_address(unsigned int depth)
{
struct dwarf_frame *frame;
unsigned long ra;
int i;
for (i = 0, frame = NULL, ra = 0; i <= depth; i++) {
struct dwarf_frame *tmp;
tmp = dwarf_unwind_stack(ra, frame);
if (!tmp)
return NULL;
if (frame)
dwarf_free_frame(frame);
frame = tmp;
if (!frame || !frame->return_addr)
break;
ra = frame->return_addr;
}
/* Failed to unwind the stack to the specified depth. */
WARN_ON(i != depth + 1);
if (frame)
dwarf_free_frame(frame);
return (void *)ra;
}
#else
void *return_address(unsigned int depth)
{
return NULL;
}
#endif
EXPORT_SYMBOL_GPL(return_address);
| linux-master | arch/sh/kernel/return_address.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/process.c
*
* This file handles the architecture-dependent parts of process handling..
*
* Copyright (C) 1995 Linus Torvalds
*
* SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
* Copyright (C) 2002 - 2008 Paul Mundt
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <linux/elfcore.h>
#include <linux/fs.h>
#include <linux/ftrace.h>
#include <linux/hw_breakpoint.h>
#include <linux/prefetch.h>
#include <linux/stackprotector.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/fpu.h>
#include <asm/syscalls.h>
#include <asm/switch_to.h>
void show_regs(struct pt_regs * regs)
{
pr_info("\n");
show_regs_print_info(KERN_DEFAULT);
pr_info("PC is at %pS\n", (void *)instruction_pointer(regs));
pr_info("PR is at %pS\n", (void *)regs->pr);
pr_info("PC : %08lx SP : %08lx SR : %08lx ", regs->pc,
regs->regs[15], regs->sr);
#ifdef CONFIG_MMU
pr_cont("TEA : %08x\n", __raw_readl(MMU_TEA));
#else
pr_cont("\n");
#endif
pr_info("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3]);
pr_info("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
pr_info("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n",
regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11]);
pr_info("R12 : %08lx R13 : %08lx R14 : %08lx\n",
regs->regs[12], regs->regs[13], regs->regs[14]);
pr_info("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
regs->mach, regs->macl, regs->gbr, regs->pr);
show_trace(NULL, (unsigned long *)regs->regs[15], regs, KERN_DEFAULT);
show_code(regs);
}
void start_thread(struct pt_regs *regs, unsigned long new_pc,
unsigned long new_sp)
{
regs->pr = 0;
regs->sr = SR_FD;
regs->pc = new_pc;
regs->regs[15] = new_sp;
free_thread_xstate(current);
}
EXPORT_SYMBOL(start_thread);
void flush_thread(void)
{
struct task_struct *tsk = current;
flush_ptrace_hw_breakpoint(tsk);
#if defined(CONFIG_SH_FPU)
/* Forget lazy FPU state */
clear_fpu(tsk, task_pt_regs(tsk));
clear_used_math();
#endif
}
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs;
#if defined(CONFIG_SH_DSP)
struct task_struct *tsk = current;
if (is_dsp_enabled(tsk)) {
/* We can use the __save_dsp or just copy the struct:
* __save_dsp(p);
* p->thread.dsp_status.status |= SR_DSP
*/
p->thread.dsp_status = tsk->thread.dsp_status;
}
#endif
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
childregs = task_pt_regs(p);
p->thread.sp = (unsigned long) childregs;
if (unlikely(args->fn)) {
memset(childregs, 0, sizeof(struct pt_regs));
p->thread.pc = (unsigned long) ret_from_kernel_thread;
childregs->regs[4] = (unsigned long) args->fn_arg;
childregs->regs[5] = (unsigned long) args->fn;
childregs->sr = SR_MD;
#if defined(CONFIG_SH_FPU)
childregs->sr |= SR_FD;
#endif
ti->status &= ~TS_USEDFPU;
p->thread.fpu_counter = 0;
return 0;
}
*childregs = *current_pt_regs();
if (usp)
childregs->regs[15] = usp;
if (clone_flags & CLONE_SETTLS)
childregs->gbr = tls;
childregs->regs[0] = 0; /* Set return value for child */
p->thread.pc = (unsigned long) ret_from_fork;
return 0;
}
/*
* switch_to(x,y) should switch tasks from x to y.
*
*/
__notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev, struct task_struct *next)
{
struct thread_struct *next_t = &next->thread;
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
__stack_chk_guard = next->stack_canary;
#endif
unlazy_fpu(prev, task_pt_regs(prev));
/* we're going to use this soon, after a few expensive things */
if (next->thread.fpu_counter > 5)
prefetch(next_t->xstate);
#ifdef CONFIG_MMU
/*
* Restore the kernel mode register
* k7 (r7_bank1)
*/
asm volatile("ldc %0, r7_bank"
: /* no output */
: "r" (task_thread_info(next)));
#endif
/*
* If the task has used fpu the last 5 timeslices, just do a full
* restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now
*/
if (next->thread.fpu_counter > 5)
__fpu_state_restore();
return prev;
}
unsigned long __get_wchan(struct task_struct *p)
{
unsigned long pc;
/*
* The same comment as on the Alpha applies here, too ...
*/
pc = thread_saved_pc(p);
#ifdef CONFIG_FRAME_POINTER
if (in_sched_functions(pc)) {
unsigned long schedule_frame = (unsigned long)p->thread.sp;
return ((unsigned long *)schedule_frame)[21];
}
#endif
return pc;
}
| linux-master | arch/sh/kernel/process_32.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH KGDB support
*
* Copyright (C) 2008 - 2012 Paul Mundt
*
* Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
*/
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
/* Macros for single step instruction identification */
#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00)
#define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
(((op) & 0x7f ) << 1))
#define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00)
#define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00)
#define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000)
#define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
(((op) & 0x7ff) << 1))
#define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023)
#define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8)
#define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000)
#define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
(((op) & 0x7ff) << 1))
#define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003)
#define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf)
#define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b)
#define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf)
#define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b)
#define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf)
#define OPCODE_RTS(op) ((op) == 0xb)
#define OPCODE_RTE(op) ((op) == 0x2b)
#define SR_T_BIT_MASK 0x1
#define STEP_OPCODE 0xc33d
/* Calculate the new address for after a step */
static short *get_step_address(struct pt_regs *linux_regs)
{
insn_size_t op = __raw_readw(linux_regs->pc);
long addr;
/* BT */
if (OPCODE_BT(op)) {
if (linux_regs->sr & SR_T_BIT_MASK)
addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
else
addr = linux_regs->pc + 2;
}
/* BTS */
else if (OPCODE_BTS(op)) {
if (linux_regs->sr & SR_T_BIT_MASK)
addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
else
addr = linux_regs->pc + 4; /* Not in delay slot */
}
/* BF */
else if (OPCODE_BF(op)) {
if (!(linux_regs->sr & SR_T_BIT_MASK))
addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
else
addr = linux_regs->pc + 2;
}
/* BFS */
else if (OPCODE_BFS(op)) {
if (!(linux_regs->sr & SR_T_BIT_MASK))
addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
else
addr = linux_regs->pc + 4; /* Not in delay slot */
}
/* BRA */
else if (OPCODE_BRA(op))
addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op);
/* BRAF */
else if (OPCODE_BRAF(op))
addr = linux_regs->pc + 4
+ linux_regs->regs[OPCODE_BRAF_REG(op)];
/* BSR */
else if (OPCODE_BSR(op))
addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op);
/* BSRF */
else if (OPCODE_BSRF(op))
addr = linux_regs->pc + 4
+ linux_regs->regs[OPCODE_BSRF_REG(op)];
/* JMP */
else if (OPCODE_JMP(op))
addr = linux_regs->regs[OPCODE_JMP_REG(op)];
/* JSR */
else if (OPCODE_JSR(op))
addr = linux_regs->regs[OPCODE_JSR_REG(op)];
/* RTS */
else if (OPCODE_RTS(op))
addr = linux_regs->pr;
/* RTE */
else if (OPCODE_RTE(op))
addr = linux_regs->regs[15];
/* Other */
else
addr = linux_regs->pc + instruction_size(op);
flush_icache_range(addr, addr + instruction_size(op));
return (short *)addr;
}
/*
* Replace the instruction immediately after the current instruction
* (i.e. next in the expected flow of control) with a trap instruction,
* so that returning will cause only a single instruction to be executed.
* Note that this model is slightly broken for instructions with delay
* slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the
* instruction in the delay slot will be executed.
*/
static unsigned long stepped_address;
static insn_size_t stepped_opcode;
static void do_single_step(struct pt_regs *linux_regs)
{
/* Determine where the target instruction will send us to */
unsigned short *addr = get_step_address(linux_regs);
stepped_address = (int)addr;
/* Replace it */
stepped_opcode = __raw_readw((long)addr);
*addr = STEP_OPCODE;
/* Flush and return */
flush_icache_range((long)addr, (long)addr +
instruction_size(stepped_opcode));
}
/* Undo a single step */
static void undo_single_step(struct pt_regs *linux_regs)
{
/* If we have stepped, put back the old instruction */
/* Use stepped_address in case we stopped elsewhere */
if (stepped_opcode != 0) {
__raw_writew(stepped_opcode, stepped_address);
flush_icache_range(stepped_address, stepped_address + 2);
}
stepped_opcode = 0;
}
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc) },
{ "pr", GDB_SIZEOF_REG, offsetof(struct pt_regs, pr) },
{ "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, sr) },
{ "gbr", GDB_SIZEOF_REG, offsetof(struct pt_regs, gbr) },
{ "mach", GDB_SIZEOF_REG, offsetof(struct pt_regs, mach) },
{ "macl", GDB_SIZEOF_REG, offsetof(struct pt_regs, macl) },
{ "vbr", GDB_SIZEOF_REG, -1 },
};
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno < 0 || regno >= DBG_MAX_REG_NUM)
return -EINVAL;
if (dbg_reg_def[regno].offset != -1)
memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
dbg_reg_def[regno].size);
return 0;
}
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
if (dbg_reg_def[regno].size != -1)
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
dbg_reg_def[regno].size);
switch (regno) {
case GDB_VBR:
__asm__ __volatile__ ("stc vbr, %0" : "=r" (mem));
break;
}
return dbg_reg_def[regno].name;
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct pt_regs *thread_regs = task_pt_regs(p);
int reg;
/* Initialize to zero */
for (reg = 0; reg < DBG_MAX_REG_NUM; reg++)
gdb_regs[reg] = 0;
/*
* Copy out GP regs 8 to 14.
*
* switch_to() relies on SR.RB toggling, so regs 0->7 are banked
* and need privileged instructions to get to. The r15 value we
* fetch from the thread info directly.
*/
for (reg = GDB_R8; reg < GDB_R15; reg++)
gdb_regs[reg] = thread_regs->regs[reg];
gdb_regs[GDB_R15] = p->thread.sp;
gdb_regs[GDB_PC] = p->thread.pc;
/*
* Additional registers we have context for
*/
gdb_regs[GDB_PR] = thread_regs->pr;
gdb_regs[GDB_GBR] = thread_regs->gbr;
}
int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
char *remcomInBuffer, char *remcomOutBuffer,
struct pt_regs *linux_regs)
{
unsigned long addr;
char *ptr;
/* Undo any stepping we may have done */
undo_single_step(linux_regs);
switch (remcomInBuffer[0]) {
case 'c':
case 's':
/* try to read optional parameter, pc unchanged if no parm */
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->pc = addr;
fallthrough;
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
if (remcomInBuffer[0] == 's') {
do_single_step(linux_regs);
kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
return 0;
}
/* this means that we do not want to exit from the handler: */
return -1;
}
unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
{
if (exception == 60)
return instruction_pointer(regs) - 2;
return instruction_pointer(regs);
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->pc = ip;
}
/*
* The primary entry points for the kgdb debug trap table entries.
*/
BUILD_TRAP_HANDLER(singlestep)
{
unsigned long flags;
TRAP_HANDLER_DECL;
local_irq_save(flags);
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
kgdb_handle_exception(0, SIGTRAP, 0, regs);
local_irq_restore(flags);
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
int ret;
switch (cmd) {
case DIE_BREAKPOINT:
/*
* This means a user thread is single stepping
* a system call which should be ignored
*/
if (test_thread_flag(TIF_SINGLESTEP))
return NOTIFY_DONE;
ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
args->err, args->regs);
if (ret)
return NOTIFY_DONE;
break;
}
return NOTIFY_STOP;
}
static int
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = __kgdb_notify(ptr, cmd);
local_irq_restore(flags);
return ret;
}
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,
/*
* Lowest-prio notifier priority, we want to be notified last:
*/
.priority = -INT_MAX,
};
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
}
void kgdb_arch_exit(void)
{
unregister_die_notifier(&kgdb_notifier);
}
const struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: trapa #0x3c */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
.gdb_bpt_instr = { 0x3c, 0xc3 },
#else
.gdb_bpt_instr = { 0xc3, 0x3c },
#endif
};
| linux-master | arch/sh/kernel/kgdb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* The idle loop for all SuperH platforms.
*
* Copyright (C) 2002 - 2009 Paul Mundt
*/
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/tick.h>
#include <linux/preempt.h>
#include <linux/thread_info.h>
#include <linux/irqflags.h>
#include <linux/smp.h>
#include <linux/atomic.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/bl_bit.h>
static void (*sh_idle)(void);
void default_idle(void)
{
set_bl_bit();
raw_local_irq_enable();
/* Isn't this racy ? */
cpu_sleep();
raw_local_irq_disable();
clear_bl_bit();
}
void __noreturn arch_cpu_idle_dead(void)
{
play_dead();
}
void arch_cpu_idle(void)
{
sh_idle();
}
void __init select_idle_routine(void)
{
/*
* If a platform has set its own idle routine, leave it alone.
*/
if (!sh_idle)
sh_idle = default_idle;
}
void stop_this_cpu(void *unused)
{
local_irq_disable();
set_cpu_online(smp_processor_id(), false);
for (;;)
cpu_sleep();
}
| linux-master | arch/sh/kernel/idle.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Kernel probes (kprobes) for SuperH
*
* Copyright (C) 2007 Chris Smith <[email protected]>
* Copyright (C) 2006 Lineo Solutions, Inc.
*/
#include <linux/kprobes.h>
#include <linux/extable.h>
#include <linux/ptrace.h>
#include <linux/preempt.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static DEFINE_PER_CPU(struct kprobe, saved_current_opcode);
static DEFINE_PER_CPU(struct kprobe, saved_next_opcode);
static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2);
#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b)
#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b)
#define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000)
#define OPCODE_BRAF(x) (((x) & 0xF0FF) == 0x0023)
#define OPCODE_BSR(x) (((x) & 0xF000) == 0xb000)
#define OPCODE_BSRF(x) (((x) & 0xF0FF) == 0x0003)
#define OPCODE_BF_S(x) (((x) & 0xFF00) == 0x8f00)
#define OPCODE_BT_S(x) (((x) & 0xFF00) == 0x8d00)
#define OPCODE_BF(x) (((x) & 0xFF00) == 0x8b00)
#define OPCODE_BT(x) (((x) & 0xFF00) == 0x8900)
#define OPCODE_RTS(x) (((x) & 0x000F) == 0x000b)
#define OPCODE_RTE(x) (((x) & 0xFFFF) == 0x002b)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr);
if (OPCODE_RTE(opcode))
return -EFAULT; /* Bad breakpoint */
p->opcode = opcode;
return 0;
}
void __kprobes arch_copy_kprobe(struct kprobe *p)
{
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = *p->addr;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_icache_range((unsigned long)p->addr,
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
if (*p->addr == BREAKPOINT_INSTRUCTION)
return 1;
return 0;
}
/**
* If an illegal slot instruction exception occurs for an address
* containing a kprobe, remove the probe.
*
* Returns 0 if the exception was handled successfully, 1 otherwise.
*/
int __kprobes kprobe_handle_illslot(unsigned long pc)
{
struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1);
if (p != NULL) {
printk("Warning: removing kprobe from delay slot: 0x%.8x\n",
(unsigned int)pc + 2);
unregister_kprobe(p);
return 0;
}
return 1;
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
struct kprobe *saved = this_cpu_ptr(&saved_next_opcode);
if (saved->addr) {
arch_disarm_kprobe(p);
arch_disarm_kprobe(saved);
saved->addr = NULL;
saved->opcode = 0;
saved = this_cpu_ptr(&saved_next_opcode2);
if (saved->addr) {
arch_disarm_kprobe(saved);
saved->addr = NULL;
saved->opcode = 0;
}
}
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, p);
}
/*
* Singlestep is implemented by disabling the current kprobe and setting one
* on the next instruction, following branches. Two probes are set if the
* branch is conditional.
*/
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
__this_cpu_write(saved_current_opcode.addr, (kprobe_opcode_t *)regs->pc);
if (p != NULL) {
struct kprobe *op1, *op2;
arch_disarm_kprobe(p);
op1 = this_cpu_ptr(&saved_next_opcode);
op2 = this_cpu_ptr(&saved_next_opcode2);
if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr];
} else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
unsigned long disp = (p->opcode & 0x0FFF);
op1->addr =
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
} else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
op1->addr =
(kprobe_opcode_t *) (regs->pc + 4 +
regs->regs[reg_nr]);
} else if (OPCODE_RTS(p->opcode)) {
op1->addr = (kprobe_opcode_t *) regs->pr;
} else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
unsigned long disp = (p->opcode & 0x00FF);
/* case 1 */
op1->addr = p->addr + 1;
/* case 2 */
op2->addr =
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
op2->opcode = *(op2->addr);
arch_arm_kprobe(op2);
} else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
unsigned long disp = (p->opcode & 0x00FF);
/* case 1 */
op1->addr = p->addr + 2;
/* case 2 */
op2->addr =
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
op2->opcode = *(op2->addr);
arch_arm_kprobe(op2);
} else {
op1->addr = p->addr + 1;
}
op1->opcode = *(op1->addr);
arch_arm_kprobe(op1);
}
}
/* Called with kretprobe_lock held */
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->pr;
ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->pr = (unsigned long)__kretprobe_trampoline;
}
static int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
kprobe_opcode_t *addr = NULL;
struct kprobe_ctlblk *kcb;
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
*/
preempt_disable();
kcb = get_kprobe_ctlblk();
addr = (kprobe_opcode_t *) (regs->pc);
/* Check we're not actually recursing */
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
if (kcb->kprobe_status == KPROBE_HIT_SS &&
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
goto no_kprobe;
}
/* We have reentered the kprobe_handler(), since
* another probe was hit while within the handler.
* We here save the original kprobes variables and
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
}
goto no_kprobe;
}
p = get_kprobe(addr);
if (!p) {
/* Not one of ours: let kernel handle it */
if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
*/
ret = 1;
}
goto no_kprobe;
}
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs)) {
/* handler has already set things up, so skip ss setup */
reset_current_kprobe();
preempt_enable_no_resched();
return 1;
}
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
preempt_enable_no_resched();
return ret;
}
/*
* For function-return probes, init_kprobes() establishes a probepoint
* here. When a retprobed function returns, this probe is hit and
* trampoline_probe_handler() runs, calling the kretprobe's handler.
*/
static void __used kretprobe_trampoline_holder(void)
{
asm volatile (".globl __kretprobe_trampoline\n"
"__kretprobe_trampoline:\n\t"
"nop\n");
}
/*
* Called when we hit the probe point at __kretprobe_trampoline
*/
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
regs->pc = __kretprobe_trampoline_handler(regs, NULL);
return 1;
}
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
kprobe_opcode_t *addr = NULL;
struct kprobe *p = NULL;
if (!cur)
return 0;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
p = this_cpu_ptr(&saved_next_opcode);
if (p->addr) {
arch_disarm_kprobe(p);
p->addr = NULL;
p->opcode = 0;
addr = __this_cpu_read(saved_current_opcode.addr);
__this_cpu_write(saved_current_opcode.addr, NULL);
p = get_kprobe(addr);
arch_arm_kprobe(p);
p = this_cpu_ptr(&saved_next_opcode2);
if (p->addr) {
arch_disarm_kprobe(p);
p->addr = NULL;
p->opcode = 0;
}
}
/* Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
preempt_enable_no_resched();
return 1;
}
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
const struct exception_table_entry *entry;
switch (kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe, point the pc back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs->pc = (unsigned long)cur->addr;
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if ((entry = search_exception_tables(regs->pc)) != NULL) {
regs->pc = entry->fixup;
return 1;
}
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
*/
break;
default:
break;
}
return 0;
}
/*
* Wrapper routine to for handling exceptions.
*/
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct kprobe *p = NULL;
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
kprobe_opcode_t *addr = NULL;
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
addr = (kprobe_opcode_t *) (args->regs->pc);
if (val == DIE_TRAP &&
args->trapnr == (BREAKPOINT_INSTRUCTION & 0xff)) {
if (!kprobe_running()) {
if (kprobe_handler(args->regs)) {
ret = NOTIFY_STOP;
} else {
/* Not a kprobe trap */
ret = NOTIFY_DONE;
}
} else {
p = get_kprobe(addr);
if ((kcb->kprobe_status == KPROBE_HIT_SS) ||
(kcb->kprobe_status == KPROBE_REENTER)) {
if (post_kprobe_handler(args->regs))
ret = NOTIFY_STOP;
} else {
if (kprobe_handler(args->regs))
ret = NOTIFY_STOP;
}
}
}
return ret;
}
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *)&__kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{
return register_kprobe(&trampoline_p);
}
| linux-master | arch/sh/kernel/kprobes.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/init.h>
#include <linux/debugfs.h>
struct dentry *arch_debugfs_dir;
EXPORT_SYMBOL(arch_debugfs_dir);
static int __init arch_kdebugfs_init(void)
{
arch_debugfs_dir = debugfs_create_dir("sh", NULL);
return 0;
}
arch_initcall(arch_kdebugfs_init);
| linux-master | arch/sh/kernel/kdebugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/smp.c
*
* SMP support for the SuperH processors.
*
* Copyright (C) 2002 - 2010 Paul Mundt
* Copyright (C) 2006 - 2007 Akio Idehara
*/
#include <linux/err.h>
#include <linux/cache.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/sched/mm.h>
#include <linux/sched/hotplug.h>
#include <linux/atomic.h>
#include <linux/clockchips.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/setup.h>
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
struct plat_smp_ops *mp_ops = NULL;
/* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 };
void register_smp_ops(struct plat_smp_ops *ops)
{
if (mp_ops)
printk(KERN_WARNING "Overriding previously set SMP ops\n");
mp_ops = ops;
}
static inline void smp_store_cpu_info(unsigned int cpu)
{
struct sh_cpuinfo *c = cpu_data + cpu;
memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
c->loops_per_jiffy = loops_per_jiffy;
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int cpu = smp_processor_id();
init_new_context(current, &init_mm);
current_thread_info()->cpu = cpu;
mp_ops->prepare_cpus(max_cpus);
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask);
#endif
}
void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
__cpu_number_map[0] = cpu;
__cpu_logical_map[0] = cpu;
set_cpu_online(cpu, true);
set_cpu_possible(cpu, true);
per_cpu(cpu_state, cpu) = CPU_ONLINE;
}
#ifdef CONFIG_HOTPLUG_CPU
void native_cpu_die(unsigned int cpu)
{
unsigned int i;
for (i = 0; i < 10; i++) {
smp_rmb();
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
if (system_state == SYSTEM_RUNNING)
pr_info("CPU %u is now offline\n", cpu);
return;
}
msleep(100);
}
pr_err("CPU %u didn't die...\n", cpu);
}
int native_cpu_disable(unsigned int cpu)
{
return cpu == 0 ? -EPERM : 0;
}
void play_dead_common(void)
{
idle_task_exit();
irq_ctx_exit(raw_smp_processor_id());
mb();
__this_cpu_write(cpu_state, CPU_DEAD);
local_irq_disable();
}
void native_play_dead(void)
{
play_dead_common();
}
int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
int ret;
ret = mp_ops->cpu_disable(cpu);
if (ret)
return ret;
/*
* Take this CPU offline. Once we clear this, we can't return,
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs();
/*
* Flush user cache and TLB mappings, and then remove this CPU
* from the vm mask set of all processes.
*/
flush_cache_all();
#ifdef CONFIG_MMU
local_flush_tlb_all();
#endif
clear_tasks_mm_cpumask(cpu);
return 0;
}
#else /* ... !CONFIG_HOTPLUG_CPU */
int native_cpu_disable(unsigned int cpu)
{
return -ENOSYS;
}
void native_cpu_die(unsigned int cpu)
{
/* We said "no" in __cpu_disable */
BUG();
}
void native_play_dead(void)
{
BUG();
}
#endif
asmlinkage void start_secondary(void)
{
unsigned int cpu = smp_processor_id();
struct mm_struct *mm = &init_mm;
enable_mmu();
mmgrab(mm);
mmget(mm);
current->active_mm = mm;
#ifdef CONFIG_MMU
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
#endif
per_cpu_trap_init();
notify_cpu_starting(cpu);
local_irq_enable();
calibrate_delay();
smp_store_cpu_info(cpu);
set_cpu_online(cpu, true);
per_cpu(cpu_state, cpu) = CPU_ONLINE;
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
extern struct {
unsigned long sp;
unsigned long bss_start;
unsigned long bss_end;
void *start_kernel_fn;
void *cpu_init_fn;
void *thread_info;
} stack_start;
int __cpu_up(unsigned int cpu, struct task_struct *tsk)
{
unsigned long timeout;
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Fill in data in head.S for secondary cpus */
stack_start.sp = tsk->thread.sp;
stack_start.thread_info = tsk->stack;
stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
stack_start.start_kernel_fn = start_secondary;
flush_icache_range((unsigned long)&stack_start,
(unsigned long)&stack_start + sizeof(stack_start));
wmb();
mp_ops->start_cpu(cpu, (unsigned long)_stext);
timeout = jiffies + HZ;
while (time_before(jiffies, timeout)) {
if (cpu_online(cpu))
break;
udelay(10);
barrier();
}
if (cpu_online(cpu))
return 0;
return -ENOENT;
}
void __init smp_cpus_done(unsigned int max_cpus)
{
unsigned long bogosum = 0;
int cpu;
for_each_online_cpu(cpu)
bogosum += cpu_data[cpu].loops_per_jiffy;
printk(KERN_INFO "SMP: Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
bogosum / (500000/HZ),
(bogosum / (5000/HZ)) % 100);
}
void arch_smp_send_reschedule(int cpu)
{
mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
}
void smp_send_stop(void)
{
smp_call_function(stop_this_cpu, 0, 0);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
int cpu;
for_each_cpu(cpu, mask)
mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
}
void arch_send_call_function_single_ipi(int cpu)
{
mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
int cpu;
for_each_cpu(cpu, mask)
mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
}
static void ipi_timer(void)
{
irq_enter();
tick_receive_broadcast();
irq_exit();
}
#endif
void smp_message_recv(unsigned int msg)
{
switch (msg) {
case SMP_MSG_FUNCTION:
generic_smp_call_function_interrupt();
break;
case SMP_MSG_RESCHEDULE:
scheduler_ipi();
break;
case SMP_MSG_FUNCTION_SINGLE:
generic_smp_call_function_single_interrupt();
break;
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case SMP_MSG_TIMER:
ipi_timer();
break;
#endif
default:
printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
smp_processor_id(), __func__, msg);
break;
}
}
/* Not really SMP stuff ... */
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
#ifdef CONFIG_MMU
static void flush_tlb_all_ipi(void *info)
{
local_flush_tlb_all();
}
void flush_tlb_all(void)
{
on_each_cpu(flush_tlb_all_ipi, 0, 1);
}
static void flush_tlb_mm_ipi(void *mm)
{
local_flush_tlb_mm((struct mm_struct *)mm);
}
/*
* The following tlb flush calls are invoked when old translations are
* being torn down, or pte attributes are changing. For single threaded
* address spaces, a new context is obtained on the current cpu, and tlb
* context on other cpus are invalidated to force a new context allocation
* at switch_mm time, should the mm ever be used on other cpus. For
* multithreaded address spaces, intercpu interrupts have to be sent.
* Another case where intercpu interrupts are required is when the target
* mm might be active on another cpu (eg debuggers doing the flushes on
* behalf of debugees, kswapd stealing pages from another process etc).
* Kanoj 07/00.
*/
void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
} else {
int i;
for_each_online_cpu(i)
if (smp_processor_id() != i)
cpu_context(i, mm) = 0;
}
local_flush_tlb_mm(mm);
preempt_enable();
}
struct flush_tlb_data {
struct vm_area_struct *vma;
unsigned long addr1;
unsigned long addr2;
};
static void flush_tlb_range_ipi(void *info)
{
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
}
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd;
fd.vma = vma;
fd.addr1 = start;
fd.addr2 = end;
smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
} else {
int i;
for_each_online_cpu(i)
if (smp_processor_id() != i)
cpu_context(i, mm) = 0;
}
local_flush_tlb_range(vma, start, end);
preempt_enable();
}
static void flush_tlb_kernel_range_ipi(void *info)
{
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
struct flush_tlb_data fd;
fd.addr1 = start;
fd.addr2 = end;
on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
}
static void flush_tlb_page_ipi(void *info)
{
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
local_flush_tlb_page(fd->vma, fd->addr1);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
preempt_disable();
if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
(current->mm != vma->vm_mm)) {
struct flush_tlb_data fd;
fd.vma = vma;
fd.addr1 = page;
smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
} else {
int i;
for_each_online_cpu(i)
if (smp_processor_id() != i)
cpu_context(i, vma->vm_mm) = 0;
}
local_flush_tlb_page(vma, page);
preempt_enable();
}
static void flush_tlb_one_ipi(void *info)
{
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
local_flush_tlb_one(fd->addr1, fd->addr2);
}
void flush_tlb_one(unsigned long asid, unsigned long vaddr)
{
struct flush_tlb_data fd;
fd.addr1 = asid;
fd.addr2 = vaddr;
smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
local_flush_tlb_one(asid, vaddr);
}
#endif
| linux-master | arch/sh/kernel/smp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/iomap.c
*
* Copyright (C) 2000 Niibe Yutaka
* Copyright (C) 2005 - 2007 Paul Mundt
*/
#include <linux/module.h>
#include <linux/io.h>
unsigned int ioread8(const void __iomem *addr)
{
return readb(addr);
}
EXPORT_SYMBOL(ioread8);
unsigned int ioread16(const void __iomem *addr)
{
return readw(addr);
}
EXPORT_SYMBOL(ioread16);
unsigned int ioread16be(const void __iomem *addr)
{
return be16_to_cpu(__raw_readw(addr));
}
EXPORT_SYMBOL(ioread16be);
unsigned int ioread32(const void __iomem *addr)
{
return readl(addr);
}
EXPORT_SYMBOL(ioread32);
unsigned int ioread32be(const void __iomem *addr)
{
return be32_to_cpu(__raw_readl(addr));
}
EXPORT_SYMBOL(ioread32be);
void iowrite8(u8 val, void __iomem *addr)
{
writeb(val, addr);
}
EXPORT_SYMBOL(iowrite8);
void iowrite16(u16 val, void __iomem *addr)
{
writew(val, addr);
}
EXPORT_SYMBOL(iowrite16);
void iowrite16be(u16 val, void __iomem *addr)
{
__raw_writew(cpu_to_be16(val), addr);
}
EXPORT_SYMBOL(iowrite16be);
void iowrite32(u32 val, void __iomem *addr)
{
writel(val, addr);
}
EXPORT_SYMBOL(iowrite32);
void iowrite32be(u32 val, void __iomem *addr)
{
__raw_writel(cpu_to_be32(val), addr);
}
EXPORT_SYMBOL(iowrite32be);
/*
* These are the "repeat MMIO read/write" functions.
* Note the "__raw" accesses, since we don't want to
* convert to CPU byte order. We write in "IO byte
* order" (we also don't have IO barriers).
*/
static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count)
{
while (--count >= 0) {
u8 data = __raw_readb(addr);
*dst = data;
dst++;
}
}
static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count)
{
while (--count >= 0) {
u16 data = __raw_readw(addr);
*dst = data;
dst++;
}
}
static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count)
{
while (--count >= 0) {
u32 data = __raw_readl(addr);
*dst = data;
dst++;
}
}
static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
{
while (--count >= 0) {
__raw_writeb(*src, addr);
src++;
}
}
static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
{
while (--count >= 0) {
__raw_writew(*src, addr);
src++;
}
}
static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
{
while (--count >= 0) {
__raw_writel(*src, addr);
src++;
}
}
void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
{
mmio_insb(addr, dst, count);
}
EXPORT_SYMBOL(ioread8_rep);
void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
{
mmio_insw(addr, dst, count);
}
EXPORT_SYMBOL(ioread16_rep);
void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
{
mmio_insl(addr, dst, count);
}
EXPORT_SYMBOL(ioread32_rep);
void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
{
mmio_outsb(addr, src, count);
}
EXPORT_SYMBOL(iowrite8_rep);
void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
{
mmio_outsw(addr, src, count);
}
EXPORT_SYMBOL(iowrite16_rep);
void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
{
mmio_outsl(addr, src, count);
}
EXPORT_SYMBOL(iowrite32_rep);
| linux-master | arch/sh/kernel/iomap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/hw_breakpoint.c
*
* Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
*
* Copyright (C) 2009 - 2010 Paul Mundt
*/
#include <linux/init.h>
#include <linux/perf_event.h>
#include <linux/sched/signal.h>
#include <linux/hw_breakpoint.h>
#include <linux/percpu.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <asm/hw_breakpoint.h>
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
/*
* Stores the breakpoints currently in use on each breakpoint address
* register for each cpus
*/
static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
/*
* A dummy placeholder for early accesses until the CPUs get a chance to
* register their UBCs later in the boot process.
*/
static struct sh_ubc ubc_dummy = { .num_events = 0 };
static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
/*
* Install a perf counter breakpoint.
*
* We seek a free UBC channel and use it for this breakpoint.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
int arch_install_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
int i;
for (i = 0; i < sh_ubc->num_events; i++) {
struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
if (!*slot) {
*slot = bp;
break;
}
}
if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
return -EBUSY;
clk_enable(sh_ubc->clk);
sh_ubc->enable(info, i);
return 0;
}
/*
* Uninstall the breakpoint contained in the given counter.
*
* First we search the debug address register it uses and then we disable
* it.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
int i;
for (i = 0; i < sh_ubc->num_events; i++) {
struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
if (*slot == bp) {
*slot = NULL;
break;
}
}
if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
return;
sh_ubc->disable(info, i);
clk_disable(sh_ubc->clk);
}
static int get_hbp_len(u16 hbp_len)
{
unsigned int len_in_bytes = 0;
switch (hbp_len) {
case SH_BREAKPOINT_LEN_1:
len_in_bytes = 1;
break;
case SH_BREAKPOINT_LEN_2:
len_in_bytes = 2;
break;
case SH_BREAKPOINT_LEN_4:
len_in_bytes = 4;
break;
case SH_BREAKPOINT_LEN_8:
len_in_bytes = 8;
break;
}
return len_in_bytes;
}
/*
* Check for virtual address in kernel space.
*/
int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
va = hw->address;
len = get_hbp_len(hw->len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
int arch_bp_generic_fields(int sh_len, int sh_type,
int *gen_len, int *gen_type)
{
/* Len */
switch (sh_len) {
case SH_BREAKPOINT_LEN_1:
*gen_len = HW_BREAKPOINT_LEN_1;
break;
case SH_BREAKPOINT_LEN_2:
*gen_len = HW_BREAKPOINT_LEN_2;
break;
case SH_BREAKPOINT_LEN_4:
*gen_len = HW_BREAKPOINT_LEN_4;
break;
case SH_BREAKPOINT_LEN_8:
*gen_len = HW_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
}
/* Type */
switch (sh_type) {
case SH_BREAKPOINT_READ:
*gen_type = HW_BREAKPOINT_R;
break;
case SH_BREAKPOINT_WRITE:
*gen_type = HW_BREAKPOINT_W;
break;
case SH_BREAKPOINT_RW:
*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
break;
default:
return -EINVAL;
}
return 0;
}
static int arch_build_bp_info(struct perf_event *bp,
const struct perf_event_attr *attr,
struct arch_hw_breakpoint *hw)
{
hw->address = attr->bp_addr;
/* Len */
switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
hw->len = SH_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
hw->len = SH_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
hw->len = SH_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_8:
hw->len = SH_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
}
/* Type */
switch (attr->bp_type) {
case HW_BREAKPOINT_R:
hw->type = SH_BREAKPOINT_READ;
break;
case HW_BREAKPOINT_W:
hw->type = SH_BREAKPOINT_WRITE;
break;
case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
hw->type = SH_BREAKPOINT_RW;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Validate the arch-specific HW Breakpoint register settings
*/
int hw_breakpoint_arch_parse(struct perf_event *bp,
const struct perf_event_attr *attr,
struct arch_hw_breakpoint *hw)
{
unsigned int align;
int ret;
ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
ret = -EINVAL;
switch (hw->len) {
case SH_BREAKPOINT_LEN_1:
align = 0;
break;
case SH_BREAKPOINT_LEN_2:
align = 1;
break;
case SH_BREAKPOINT_LEN_4:
align = 3;
break;
case SH_BREAKPOINT_LEN_8:
align = 7;
break;
default:
return ret;
}
/*
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.
*/
if (hw->address & align)
return -EINVAL;
return 0;
}
/*
* Release the user breakpoints used by ptrace
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
int i;
struct thread_struct *t = &tsk->thread;
for (i = 0; i < sh_ubc->num_events; i++) {
unregister_hw_breakpoint(t->ptrace_bps[i]);
t->ptrace_bps[i] = NULL;
}
}
static int __kprobes hw_breakpoint_handler(struct die_args *args)
{
int cpu, i, rc = NOTIFY_STOP;
struct perf_event *bp;
unsigned int cmf, resume_mask;
/*
* Do an early return if none of the channels triggered.
*/
cmf = sh_ubc->triggered_mask();
if (unlikely(!cmf))
return NOTIFY_DONE;
/*
* By default, resume all of the active channels.
*/
resume_mask = sh_ubc->active_mask();
/*
* Disable breakpoints during exception handling.
*/
sh_ubc->disable_all();
cpu = get_cpu();
for (i = 0; i < sh_ubc->num_events; i++) {
unsigned long event_mask = (1 << i);
if (likely(!(cmf & event_mask)))
continue;
/*
* The counter may be concurrently released but that can only
* occur from a call_rcu() path. We can then safely fetch
* the breakpoint, use its callback, touch its counter
* while we are in an rcu_read_lock() path.
*/
rcu_read_lock();
bp = per_cpu(bp_per_reg[i], cpu);
if (bp)
rc = NOTIFY_DONE;
/*
* Reset the condition match flag to denote completion of
* exception handling.
*/
sh_ubc->clear_triggered_mask(event_mask);
/*
* bp can be NULL due to concurrent perf counter
* removing.
*/
if (!bp) {
rcu_read_unlock();
break;
}
/*
* Don't restore the channel if the breakpoint is from
* ptrace, as it always operates in one-shot mode.
*/
if (bp->overflow_handler == ptrace_triggered)
resume_mask &= ~(1 << i);
perf_bp_event(bp, args->regs);
/* Deliver the signal to userspace */
if (!arch_check_bp_in_kernelspace(&bp->hw.info)) {
force_sig_fault(SIGTRAP, TRAP_HWBKPT,
(void __user *)NULL);
}
rcu_read_unlock();
}
if (cmf == 0)
rc = NOTIFY_DONE;
sh_ubc->enable_all(resume_mask);
put_cpu();
return rc;
}
BUILD_TRAP_HANDLER(breakpoint)
{
unsigned long ex = lookup_exception_vector();
TRAP_HANDLER_DECL;
notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
}
/*
* Handle debug exception notifications.
*/
int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data)
{
struct die_args *args = data;
if (val != DIE_BREAKPOINT)
return NOTIFY_DONE;
/*
* If the breakpoint hasn't been triggered by the UBC, it's
* probably from a debugger, so don't do anything more here.
*
* This also permits the UBC interface clock to remain off for
* non-UBC breakpoints, as we don't need to check the triggered
* or active channel masks.
*/
if (args->trapnr != sh_ubc->trap_nr)
return NOTIFY_DONE;
return hw_breakpoint_handler(data);
}
void hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
}
int register_sh_ubc(struct sh_ubc *ubc)
{
/* Bail if it's already assigned */
if (sh_ubc != &ubc_dummy)
return -EBUSY;
sh_ubc = ubc;
pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
WARN_ON(ubc->num_events > HBP_NUM);
return 0;
}
| linux-master | arch/sh/kernel/hw_breakpoint.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <asm/processor.h>
#include <asm/fpu.h>
#include <asm/traps.h>
#include <asm/ptrace.h>
int init_fpu(struct task_struct *tsk)
{
if (tsk_used_math(tsk)) {
if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
unlazy_fpu(tsk, task_pt_regs(tsk));
return 0;
}
/*
* Memory allocation at the first usage of the FPU and other state.
*/
if (!tsk->thread.xstate) {
tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
GFP_KERNEL);
if (!tsk->thread.xstate)
return -ENOMEM;
}
if (boot_cpu_data.flags & CPU_HAS_FPU) {
struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
memset(fp, 0, xstate_size);
fp->fpscr = FPSCR_INIT;
} else {
struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
memset(fp, 0, xstate_size);
fp->fpscr = FPSCR_INIT;
}
set_stopped_child_used_math(tsk);
return 0;
}
#ifdef CONFIG_SH_FPU
void __fpu_state_restore(void)
{
struct task_struct *tsk = current;
restore_fpu(tsk);
task_thread_info(tsk)->status |= TS_USEDFPU;
tsk->thread.fpu_counter++;
}
void fpu_state_restore(struct pt_regs *regs)
{
struct task_struct *tsk = current;
if (unlikely(!user_mode(regs))) {
printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
BUG();
return;
}
if (!tsk_used_math(tsk)) {
int ret;
/*
* does a slab alloc which can sleep
*/
local_irq_enable();
ret = init_fpu(tsk);
local_irq_disable();
if (ret) {
/*
* ran out of memory!
*/
force_sig(SIGKILL);
return;
}
}
grab_fpu(regs);
__fpu_state_restore();
}
BUILD_TRAP_HANDLER(fpu_state_restore)
{
TRAP_HANDLER_DECL;
fpu_state_restore(regs);
}
#endif /* CONFIG_SH_FPU */
| linux-master | arch/sh/kernel/cpu/fpu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/init.c
*
* CPU init code
*
* Copyright (C) 2002 - 2009 Paul Mundt
* Copyright (C) 2003 Richard Curnow
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/log2.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/cache.h>
#include <asm/elf.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/sh_bios.h>
#include <asm/setup.h>
#ifdef CONFIG_SH_FPU
#define cpu_has_fpu 1
#else
#define cpu_has_fpu 0
#endif
#ifdef CONFIG_SH_DSP
#define cpu_has_dsp 1
#else
#define cpu_has_dsp 0
#endif
/*
* Generic wrapper for command line arguments to disable on-chip
* peripherals (nofpu, nodsp, and so forth).
*/
#define onchip_setup(x) \
static int x##_disabled = !cpu_has_##x; \
\
static int x##_setup(char *opts) \
{ \
x##_disabled = 1; \
return 1; \
} \
__setup("no" __stringify(x), x##_setup);
onchip_setup(fpu);
onchip_setup(dsp);
#ifdef CONFIG_SPECULATIVE_EXECUTION
#define CPUOPM 0xff2f0000
#define CPUOPM_RABD (1 << 5)
static void speculative_execution_init(void)
{
/* Clear RABD */
__raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
/* Flush the update */
(void)__raw_readl(CPUOPM);
ctrl_barrier();
}
#else
#define speculative_execution_init() do { } while (0)
#endif
#ifdef CONFIG_CPU_SH4A
#define EXPMASK 0xff2f0004
#define EXPMASK_RTEDS (1 << 0)
#define EXPMASK_BRDSSLP (1 << 1)
#define EXPMASK_MMCAW (1 << 4)
static void expmask_init(void)
{
unsigned long expmask = __raw_readl(EXPMASK);
/*
* Future proofing.
*
* Disable support for slottable sleep instruction, non-nop
* instructions in the rte delay slot, and associative writes to
* the memory-mapped cache array.
*/
expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW);
__raw_writel(expmask, EXPMASK);
ctrl_barrier();
}
#else
#define expmask_init() do { } while (0)
#endif
/* 2nd-level cache init */
void __attribute__ ((weak)) l2_cache_init(void)
{
}
/*
* Generic first-level cache init
*/
#if !defined(CONFIG_CPU_J2)
static void cache_init(void)
{
unsigned long ccr, flags;
jump_to_uncached();
ccr = __raw_readl(SH_CCR);
/*
* At this point we don't know whether the cache is enabled or not - a
* bootloader may have enabled it. There are at least 2 things that
* could be dirty in the cache at this point:
* 1. kernel command line set up by boot loader
* 2. spilled registers from the prolog of this function
* => before re-initialising the cache, we must do a purge of the whole
* cache out to memory for safety. As long as nothing is spilled
* during the loop to lines that have already been done, this is safe.
* - RPC
*/
if (ccr & CCR_CACHE_ENABLE) {
unsigned long ways, waysize, addrstart;
waysize = current_cpu_data.dcache.sets;
#ifdef CCR_CACHE_ORA
/*
* If the OC is already in RAM mode, we only have
* half of the entries to flush..
*/
if (ccr & CCR_CACHE_ORA)
waysize >>= 1;
#endif
waysize <<= current_cpu_data.dcache.entry_shift;
#ifdef CCR_CACHE_EMODE
/* If EMODE is not set, we only have 1 way to flush. */
if (!(ccr & CCR_CACHE_EMODE))
ways = 1;
else
#endif
ways = current_cpu_data.dcache.ways;
addrstart = CACHE_OC_ADDRESS_ARRAY;
do {
unsigned long addr;
for (addr = addrstart;
addr < addrstart + waysize;
addr += current_cpu_data.dcache.linesz)
__raw_writel(0, addr);
addrstart += current_cpu_data.dcache.way_incr;
} while (--ways);
}
/*
* Default CCR values .. enable the caches
* and invalidate them immediately..
*/
flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE;
#ifdef CCR_CACHE_EMODE
/* Force EMODE if possible */
if (current_cpu_data.dcache.ways > 1)
flags |= CCR_CACHE_EMODE;
else
flags &= ~CCR_CACHE_EMODE;
#endif
#if defined(CONFIG_CACHE_WRITETHROUGH)
/* Write-through */
flags |= CCR_CACHE_WT;
#elif defined(CONFIG_CACHE_WRITEBACK)
/* Write-back */
flags |= CCR_CACHE_CB;
#else
/* Off */
flags &= ~CCR_CACHE_ENABLE;
#endif
l2_cache_init();
__raw_writel(flags, SH_CCR);
back_to_cached();
}
#else
#define cache_init() do { } while (0)
#endif
#define CSHAPE(totalsize, linesize, assoc) \
((totalsize & ~0xff) | (linesize << 4) | assoc)
#define CACHE_DESC_SHAPE(desc) \
CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways)
static void detect_cache_shape(void)
{
l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache);
if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED)
l1i_cache_shape = l1d_cache_shape;
else
l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache);
if (current_cpu_data.flags & CPU_HAS_L2_CACHE)
l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache);
else
l2_cache_shape = -1; /* No S-cache */
}
static void fpu_init(void)
{
/* Disable the FPU */
if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
printk("FPU Disabled\n");
current_cpu_data.flags &= ~CPU_HAS_FPU;
}
disable_fpu();
clear_used_math();
}
#ifdef CONFIG_SH_DSP
static void release_dsp(void)
{
unsigned long sr;
/* Clear SR.DSP bit */
__asm__ __volatile__ (
"stc\tsr, %0\n\t"
"and\t%1, %0\n\t"
"ldc\t%0, sr\n\t"
: "=&r" (sr)
: "r" (~SR_DSP)
);
}
static void dsp_init(void)
{
unsigned long sr;
/*
* Set the SR.DSP bit, wait for one instruction, and then read
* back the SR value.
*/
__asm__ __volatile__ (
"stc\tsr, %0\n\t"
"or\t%1, %0\n\t"
"ldc\t%0, sr\n\t"
"nop\n\t"
"stc\tsr, %0\n\t"
: "=&r" (sr)
: "r" (SR_DSP)
);
/* If the DSP bit is still set, this CPU has a DSP */
if (sr & SR_DSP)
current_cpu_data.flags |= CPU_HAS_DSP;
/* Disable the DSP */
if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) {
printk("DSP Disabled\n");
current_cpu_data.flags &= ~CPU_HAS_DSP;
}
/* Now that we've determined the DSP status, clear the DSP bit. */
release_dsp();
}
#else
static inline void dsp_init(void) { }
#endif /* CONFIG_SH_DSP */
/**
* cpu_init
*
* This is our initial entry point for each CPU, and is invoked on the
* boot CPU prior to calling start_kernel(). For SMP, a combination of
* this and start_secondary() will bring up each processor to a ready
* state prior to hand forking the idle loop.
*
* We do all of the basic processor init here, including setting up
* the caches, FPU, DSP, etc. By the time start_kernel() is hit (and
* subsequently platform_setup()) things like determining the CPU
* subtype and initial configuration will all be done.
*
* Each processor family is still responsible for doing its own probing
* and cache configuration in cpu_probe().
*/
asmlinkage void cpu_init(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
/* First, probe the CPU */
cpu_probe();
if (current_cpu_data.type == CPU_SH_NONE)
panic("Unknown CPU");
/* First setup the rest of the I-cache info */
current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
current_cpu_data.icache.linesz;
current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
current_cpu_data.icache.linesz;
/* And the D-cache too */
current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
current_cpu_data.dcache.linesz;
current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
current_cpu_data.dcache.linesz;
/* Init the cache */
cache_init();
if (raw_smp_processor_id() == 0) {
#ifdef CONFIG_MMU
shm_align_mask = max_t(unsigned long,
current_cpu_data.dcache.way_size - 1,
PAGE_SIZE - 1);
#else
shm_align_mask = PAGE_SIZE - 1;
#endif
/* Boot CPU sets the cache shape */
detect_cache_shape();
}
fpu_init();
dsp_init();
/*
* Initialize the per-CPU ASID cache very early, since the
* TLB flushing routines depend on this being setup.
*/
current_cpu_data.asid_cache = NO_CONTEXT;
current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32;
speculative_execution_init();
expmask_init();
/* Do the rest of the boot processor setup */
if (raw_smp_processor_id() == 0) {
/* Save off the BIOS VBR, if there is one */
sh_bios_vbr_init();
/*
* Setup VBR for boot CPU. Secondary CPUs do this through
* start_secondary().
*/
per_cpu_trap_init();
/*
* Boot processor to setup the FP and extended state
* context info.
*/
init_thread_xstate();
}
}
| linux-master | arch/sh/kernel/cpu/init.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/clk.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
static struct clk master_clk = {
.flags = CLK_ENABLE_ON_INIT,
.rate = CONFIG_SH_PCLK_FREQ,
};
static struct clk peripheral_clk = {
.parent = &master_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static struct clk bus_clk = {
.parent = &master_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static struct clk cpu_clk = {
.parent = &master_clk,
.flags = CLK_ENABLE_ON_INIT,
};
/*
* The ordering of these clocks matters, do not change it.
*/
static struct clk *onchip_clocks[] = {
&master_clk,
&peripheral_clk,
&bus_clk,
&cpu_clk,
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("master_clk", &master_clk),
CLKDEV_CON_ID("peripheral_clk", &peripheral_clk),
CLKDEV_CON_ID("bus_clk", &bus_clk),
CLKDEV_CON_ID("cpu_clk", &cpu_clk),
};
int __init __deprecated cpg_clk_init(void)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
struct clk *clk = onchip_clocks[i];
arch_init_clk_ops(&clk->ops, i);
if (clk->ops)
ret |= clk_register(clk);
}
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
clk_add_alias("fck", "sh-tmu-sh3.0", "peripheral_clk", NULL);
clk_add_alias("fck", "sh-tmu.0", "peripheral_clk", NULL);
clk_add_alias("fck", "sh-tmu.1", "peripheral_clk", NULL);
clk_add_alias("fck", "sh-tmu.2", "peripheral_clk", NULL);
clk_add_alias("fck", "sh-mtu2", "peripheral_clk", NULL);
clk_add_alias("fck", "sh-cmt-16.0", "peripheral_clk", NULL);
clk_add_alias("fck", "sh-cmt-32.0", "peripheral_clk", NULL);
return ret;
}
/*
* Placeholder for compatibility, until the lazy CPUs do this
* on their own.
*/
int __init __weak arch_clk_init(void)
{
return cpg_clk_init();
}
| linux-master | arch/sh/kernel/cpu/clock-cpg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH Pin Function Control Initialization
*
* Copyright (C) 2012 Renesas Solutions Corp.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <cpu/pfc.h>
static struct platform_device sh_pfc_device = {
.id = -1,
};
int __init sh_pfc_register(const char *name,
struct resource *resource, u32 num_resources)
{
sh_pfc_device.name = name;
sh_pfc_device.num_resources = num_resources;
sh_pfc_device.resource = resource;
return platform_device_register(&sh_pfc_device);
}
| linux-master | arch/sh/kernel/cpu/pfc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/clock.c - SuperH clock framework
*
* Copyright (C) 2005 - 2009 Paul Mundt
*
* This clock framework is derived from the OMAP version by:
*
* Copyright (C) 2004 - 2008 Nokia Corporation
* Written by Tuukka Tikkanen <[email protected]>
*
* Modified for omap shared clock framework by Tony Lindgren <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <asm/clock.h>
#include <asm/machvec.h>
int __init clk_init(void)
{
int ret;
#ifndef CONFIG_COMMON_CLK
ret = arch_clk_init();
if (unlikely(ret)) {
pr_err("%s: CPU clock registration failed.\n", __func__);
return ret;
}
#endif
if (sh_mv.mv_clk_init) {
ret = sh_mv.mv_clk_init();
if (unlikely(ret)) {
pr_err("%s: machvec clock initialization failed.\n",
__func__);
return ret;
}
}
#ifndef CONFIG_COMMON_CLK
/* Kick the child clocks.. */
recalculate_root_clocks();
/* Enable the necessary init clocks */
clk_enable_init_clocks();
#endif
return ret;
}
| linux-master | arch/sh/kernel/cpu/clock.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sh/kernel/adc.c -- SH3 on-chip ADC support
*
* Copyright (C) 2004 Andriy Skulysh <[email protected]>
*/
#include <linux/module.h>
#include <asm/adc.h>
#include <asm/io.h>
int adc_single(unsigned int channel)
{
int off;
unsigned char csr;
if (channel >= 8) return -1;
off = (channel & 0x03) << 2;
csr = __raw_readb(ADCSR);
csr = channel | ADCSR_ADST | ADCSR_CKS;
__raw_writeb(csr, ADCSR);
do {
csr = __raw_readb(ADCSR);
} while ((csr & ADCSR_ADF) == 0);
csr &= ~(ADCSR_ADF | ADCSR_ADST);
__raw_writeb(csr, ADCSR);
return (((__raw_readb(ADDRAH + off) << 8) |
__raw_readb(ADDRAL + off)) >> 6);
}
EXPORT_SYMBOL(adc_single);
| linux-master | arch/sh/kernel/cpu/adc.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/seq_file.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/machvec.h>
#include <asm/processor.h>
static const char *cpu_name[] = {
[CPU_SH7201] = "SH7201",
[CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263",
[CPU_SH7264] = "SH7264", [CPU_SH7269] = "SH7269",
[CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
[CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
[CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
[CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
[CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720",
[CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729",
[CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S",
[CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751",
[CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760",
[CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
[CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
[CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
[CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
[CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
[CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
[CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
[CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
[CPU_SH7372] = "SH7372", [CPU_SH7734] = "SH7734",
[CPU_J2] = "J2",
[CPU_SH_NONE] = "Unknown"
};
const char *get_cpu_subtype(struct sh_cpuinfo *c)
{
return cpu_name[c->type];
}
EXPORT_SYMBOL(get_cpu_subtype);
#ifdef CONFIG_PROC_FS
/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
static const char *cpu_flags[] = {
"none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
"ptea", "llsc", "l2", "op32", "pteaex", NULL
};
static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
{
unsigned long i;
seq_printf(m, "cpu flags\t:");
if (!c->flags) {
seq_printf(m, " %s\n", cpu_flags[0]);
return;
}
for (i = 0; cpu_flags[i]; i++)
if ((c->flags & (1 << i)))
seq_printf(m, " %s", cpu_flags[i+1]);
seq_printf(m, "\n");
}
static void show_cacheinfo(struct seq_file *m, const char *type,
struct cache_info info)
{
unsigned int cache_size;
cache_size = info.ways * info.sets * info.linesz;
seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
type, cache_size >> 10, info.ways);
}
/*
* Get CPU information for use by the procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
struct sh_cpuinfo *c = v;
unsigned int cpu = c - cpu_data;
if (!cpu_online(cpu))
return 0;
if (cpu == 0)
seq_printf(m, "machine\t\t: %s\n", get_system_type());
else
seq_printf(m, "\n");
seq_printf(m, "processor\t: %d\n", cpu);
seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
if (c->cut_major == -1)
seq_printf(m, "cut\t\t: unknown\n");
else if (c->cut_minor == -1)
seq_printf(m, "cut\t\t: %d.x\n", c->cut_major);
else
seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor);
show_cpuflags(m, c);
seq_printf(m, "cache type\t: ");
/*
* Check for what type of cache we have, we support both the
* unified cache on the SH-2 and SH-3, as well as the harvard
* style cache on the SH-4.
*/
if (c->icache.flags & SH_CACHE_COMBINED) {
seq_printf(m, "unified\n");
show_cacheinfo(m, "cache", c->icache);
} else {
seq_printf(m, "split (harvard)\n");
show_cacheinfo(m, "icache", c->icache);
show_cacheinfo(m, "dcache", c->dcache);
}
/* Optional secondary cache */
if (c->flags & CPU_HAS_L2_CACHE)
show_cacheinfo(m, "scache", c->scache);
seq_printf(m, "address sizes\t: %u bits physical\n", c->phys_bits);
seq_printf(m, "bogomips\t: %lu.%02lu\n",
c->loops_per_jiffy/(500000/HZ),
(c->loops_per_jiffy/(5000/HZ)) % 100);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < NR_CPUS ? cpu_data + *pos : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
#endif /* CONFIG_PROC_FS */
| linux-master | arch/sh/kernel/cpu/proc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/shmobile/cpuidle.c
*
* Cpuidle support code for SuperH Mobile
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/suspend.h>
#include <linux/cpuidle.h>
#include <linux/export.h>
#include <asm/suspend.h>
#include <linux/uaccess.h>
static unsigned long cpuidle_mode[] = {
SUSP_SH_SLEEP, /* regular sleep mode */
SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */
SUSP_SH_STANDBY | SUSP_SH_SF, /* software standby mode + self refresh */
};
static int cpuidle_sleep_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
unsigned long allowed_mode = SUSP_SH_SLEEP;
int requested_state = index;
int allowed_state;
int k;
/* convert allowed mode to allowed state */
for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--)
if (cpuidle_mode[k] == allowed_mode)
break;
allowed_state = k;
/* take the following into account for sleep mode selection:
* - allowed_state: best mode allowed by hardware (clock deps)
* - requested_state: best mode allowed by software (latencies)
*/
k = min_t(int, allowed_state, requested_state);
sh_mobile_call_standby(cpuidle_mode[k]);
return k;
}
static struct cpuidle_driver cpuidle_driver = {
.name = "sh_idle",
.owner = THIS_MODULE,
.states = {
{
.exit_latency = 1,
.target_residency = 1 * 2,
.power_usage = 3,
.enter = cpuidle_sleep_enter,
.name = "C1",
.desc = "SuperH Sleep Mode",
},
{
.exit_latency = 100,
.target_residency = 1 * 2,
.power_usage = 1,
.enter = cpuidle_sleep_enter,
.name = "C2",
.desc = "SuperH Sleep Mode [SF]",
.flags = CPUIDLE_FLAG_UNUSABLE,
},
{
.exit_latency = 2300,
.target_residency = 1 * 2,
.power_usage = 1,
.enter = cpuidle_sleep_enter,
.name = "C3",
.desc = "SuperH Mobile Standby Mode [SF]",
.flags = CPUIDLE_FLAG_UNUSABLE,
},
},
.safe_state_index = 0,
.state_count = 3,
};
int __init sh_mobile_setup_cpuidle(void)
{
if (sh_mobile_sleep_supported & SUSP_SH_SF)
cpuidle_driver.states[1].flags = CPUIDLE_FLAG_NONE;
if (sh_mobile_sleep_supported & SUSP_SH_STANDBY)
cpuidle_driver.states[2].flags = CPUIDLE_FLAG_NONE;
return cpuidle_register(&cpuidle_driver, NULL);
}
| linux-master | arch/sh/kernel/cpu/shmobile/cpuidle.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/shmobile/pm.c
*
* Power management support code for SuperH Mobile
*
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/suspend.h>
#include <asm/suspend.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/bl_bit.h>
/*
* Notifier lists for pre/post sleep notification
*/
ATOMIC_NOTIFIER_HEAD(sh_mobile_pre_sleep_notifier_list);
ATOMIC_NOTIFIER_HEAD(sh_mobile_post_sleep_notifier_list);
/*
* Sleep modes available on SuperH Mobile:
*
* Sleep mode is just plain "sleep" instruction
* Sleep Self-Refresh mode is above plus RAM put in Self-Refresh
* Standby Self-Refresh mode is above plus stopped clocks
*/
#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP)
#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF)
#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF)
#define SUSP_MODE_RSTANDBY_SF \
(SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_REGS | SUSP_SH_SF)
/*
* U-standby mode is unsupported since it needs bootloader hacks
*/
#ifdef CONFIG_CPU_SUBTYPE_SH7724
#define RAM_BASE 0xfd800000 /* RSMEM */
#else
#define RAM_BASE 0xe5200000 /* ILRAM */
#endif
void sh_mobile_call_standby(unsigned long mode)
{
void *onchip_mem = (void *)RAM_BASE;
struct sh_sleep_data *sdp = onchip_mem;
void (*standby_onchip_mem)(unsigned long, unsigned long);
/* code located directly after data structure */
standby_onchip_mem = (void *)(sdp + 1);
atomic_notifier_call_chain(&sh_mobile_pre_sleep_notifier_list,
mode, NULL);
/* flush the caches if MMU flag is set */
if (mode & SUSP_SH_MMU)
flush_cache_all();
/* Let assembly snippet in on-chip memory handle the rest */
standby_onchip_mem(mode, RAM_BASE);
atomic_notifier_call_chain(&sh_mobile_post_sleep_notifier_list,
mode, NULL);
}
extern char sh_mobile_sleep_enter_start;
extern char sh_mobile_sleep_enter_end;
extern char sh_mobile_sleep_resume_start;
extern char sh_mobile_sleep_resume_end;
unsigned long sh_mobile_sleep_supported = SUSP_SH_SLEEP;
void sh_mobile_register_self_refresh(unsigned long flags,
void *pre_start, void *pre_end,
void *post_start, void *post_end)
{
void *onchip_mem = (void *)RAM_BASE;
void *vp;
struct sh_sleep_data *sdp;
int n;
/* part 0: data area */
sdp = onchip_mem;
sdp->addr.stbcr = 0xa4150020; /* STBCR */
sdp->addr.bar = 0xa4150040; /* BAR */
sdp->addr.pteh = 0xff000000; /* PTEH */
sdp->addr.ptel = 0xff000004; /* PTEL */
sdp->addr.ttb = 0xff000008; /* TTB */
sdp->addr.tea = 0xff00000c; /* TEA */
sdp->addr.mmucr = 0xff000010; /* MMUCR */
sdp->addr.ptea = 0xff000034; /* PTEA */
sdp->addr.pascr = 0xff000070; /* PASCR */
sdp->addr.irmcr = 0xff000078; /* IRMCR */
sdp->addr.ccr = 0xff00001c; /* CCR */
sdp->addr.ramcr = 0xff000074; /* RAMCR */
vp = sdp + 1;
/* part 1: common code to enter sleep mode */
n = &sh_mobile_sleep_enter_end - &sh_mobile_sleep_enter_start;
memcpy(vp, &sh_mobile_sleep_enter_start, n);
vp += roundup(n, 4);
/* part 2: board specific code to enter self-refresh mode */
n = pre_end - pre_start;
memcpy(vp, pre_start, n);
sdp->sf_pre = (unsigned long)vp;
vp += roundup(n, 4);
/* part 3: board specific code to resume from self-refresh mode */
n = post_end - post_start;
memcpy(vp, post_start, n);
sdp->sf_post = (unsigned long)vp;
vp += roundup(n, 4);
/* part 4: common code to resume from sleep mode */
WARN_ON(vp > (onchip_mem + 0x600));
vp = onchip_mem + 0x600; /* located at interrupt vector */
n = &sh_mobile_sleep_resume_end - &sh_mobile_sleep_resume_start;
memcpy(vp, &sh_mobile_sleep_resume_start, n);
sdp->resume = (unsigned long)vp;
sh_mobile_sleep_supported |= flags;
}
static int sh_pm_enter(suspend_state_t state)
{
if (!(sh_mobile_sleep_supported & SUSP_MODE_STANDBY_SF))
return -ENXIO;
local_irq_disable();
set_bl_bit();
sh_mobile_call_standby(SUSP_MODE_STANDBY_SF);
local_irq_disable();
clear_bl_bit();
return 0;
}
static const struct platform_suspend_ops sh_pm_ops = {
.enter = sh_pm_enter,
.valid = suspend_valid_only_mem,
};
static int __init sh_pm_init(void)
{
suspend_set_ops(&sh_pm_ops);
return sh_mobile_setup_cpuidle();
}
late_initcall(sh_pm_init);
| linux-master | arch/sh/kernel/cpu/shmobile/pm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Save/restore floating point context for signal handlers.
*
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
* Copyright (C) 2006 ST Microelectronics Ltd. (denorm support)
*
* FIXME! These routines have not been tested for big endian case.
*/
#include <linux/sched/signal.h>
#include <linux/io.h>
#include <cpu/fpu.h>
#include <asm/processor.h>
#include <asm/fpu.h>
#include <asm/traps.h>
/* The PR (precision) bit in the FP Status Register must be clear when
* an frchg instruction is executed, otherwise the instruction is undefined.
* Executing frchg with PR set causes a trap on some SH4 implementations.
*/
#define FPSCR_RCHG 0x00000000
extern unsigned long long float64_div(unsigned long long a,
unsigned long long b);
extern unsigned long int float32_div(unsigned long int a, unsigned long int b);
extern unsigned long long float64_mul(unsigned long long a,
unsigned long long b);
extern unsigned long int float32_mul(unsigned long int a, unsigned long int b);
extern unsigned long long float64_add(unsigned long long a,
unsigned long long b);
extern unsigned long int float32_add(unsigned long int a, unsigned long int b);
extern unsigned long long float64_sub(unsigned long long a,
unsigned long long b);
extern unsigned long int float32_sub(unsigned long int a, unsigned long int b);
extern unsigned long int float64_to_float32(unsigned long long a);
static unsigned int fpu_exception_flags;
/*
* Save FPU registers onto task structure.
*/
void save_fpu(struct task_struct *tsk)
{
unsigned long dummy;
enable_fpu();
asm volatile ("sts.l fpul, @-%0\n\t"
"sts.l fpscr, @-%0\n\t"
"lds %2, fpscr\n\t"
"frchg\n\t"
"fmov.s fr15, @-%0\n\t"
"fmov.s fr14, @-%0\n\t"
"fmov.s fr13, @-%0\n\t"
"fmov.s fr12, @-%0\n\t"
"fmov.s fr11, @-%0\n\t"
"fmov.s fr10, @-%0\n\t"
"fmov.s fr9, @-%0\n\t"
"fmov.s fr8, @-%0\n\t"
"fmov.s fr7, @-%0\n\t"
"fmov.s fr6, @-%0\n\t"
"fmov.s fr5, @-%0\n\t"
"fmov.s fr4, @-%0\n\t"
"fmov.s fr3, @-%0\n\t"
"fmov.s fr2, @-%0\n\t"
"fmov.s fr1, @-%0\n\t"
"fmov.s fr0, @-%0\n\t"
"frchg\n\t"
"fmov.s fr15, @-%0\n\t"
"fmov.s fr14, @-%0\n\t"
"fmov.s fr13, @-%0\n\t"
"fmov.s fr12, @-%0\n\t"
"fmov.s fr11, @-%0\n\t"
"fmov.s fr10, @-%0\n\t"
"fmov.s fr9, @-%0\n\t"
"fmov.s fr8, @-%0\n\t"
"fmov.s fr7, @-%0\n\t"
"fmov.s fr6, @-%0\n\t"
"fmov.s fr5, @-%0\n\t"
"fmov.s fr4, @-%0\n\t"
"fmov.s fr3, @-%0\n\t"
"fmov.s fr2, @-%0\n\t"
"fmov.s fr1, @-%0\n\t"
"fmov.s fr0, @-%0\n\t"
"lds %3, fpscr\n\t":"=r" (dummy)
:"0"((char *)(&tsk->thread.xstate->hardfpu.status)),
"r"(FPSCR_RCHG), "r"(FPSCR_INIT)
:"memory");
disable_fpu();
}
void restore_fpu(struct task_struct *tsk)
{
unsigned long dummy;
enable_fpu();
asm volatile ("lds %2, fpscr\n\t"
"fmov.s @%0+, fr0\n\t"
"fmov.s @%0+, fr1\n\t"
"fmov.s @%0+, fr2\n\t"
"fmov.s @%0+, fr3\n\t"
"fmov.s @%0+, fr4\n\t"
"fmov.s @%0+, fr5\n\t"
"fmov.s @%0+, fr6\n\t"
"fmov.s @%0+, fr7\n\t"
"fmov.s @%0+, fr8\n\t"
"fmov.s @%0+, fr9\n\t"
"fmov.s @%0+, fr10\n\t"
"fmov.s @%0+, fr11\n\t"
"fmov.s @%0+, fr12\n\t"
"fmov.s @%0+, fr13\n\t"
"fmov.s @%0+, fr14\n\t"
"fmov.s @%0+, fr15\n\t"
"frchg\n\t"
"fmov.s @%0+, fr0\n\t"
"fmov.s @%0+, fr1\n\t"
"fmov.s @%0+, fr2\n\t"
"fmov.s @%0+, fr3\n\t"
"fmov.s @%0+, fr4\n\t"
"fmov.s @%0+, fr5\n\t"
"fmov.s @%0+, fr6\n\t"
"fmov.s @%0+, fr7\n\t"
"fmov.s @%0+, fr8\n\t"
"fmov.s @%0+, fr9\n\t"
"fmov.s @%0+, fr10\n\t"
"fmov.s @%0+, fr11\n\t"
"fmov.s @%0+, fr12\n\t"
"fmov.s @%0+, fr13\n\t"
"fmov.s @%0+, fr14\n\t"
"fmov.s @%0+, fr15\n\t"
"frchg\n\t"
"lds.l @%0+, fpscr\n\t"
"lds.l @%0+, fpul\n\t"
:"=r" (dummy)
:"0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
:"memory");
disable_fpu();
}
/**
* denormal_to_double - Given denormalized float number,
* store double float
*
* @fpu: Pointer to sh_fpu_hard structure
* @n: Index to FP register
*/
static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n)
{
unsigned long du, dl;
unsigned long x = fpu->fpul;
int exp = 1023 - 126;
if (x != 0 && (x & 0x7f800000) == 0) {
du = (x & 0x80000000);
while ((x & 0x00800000) == 0) {
x <<= 1;
exp--;
}
x &= 0x007fffff;
du |= (exp << 20) | (x >> 3);
dl = x << 29;
fpu->fp_regs[n] = du;
fpu->fp_regs[n + 1] = dl;
}
}
/**
* ieee_fpe_handler - Handle denormalized number exception
*
* @regs: Pointer to register structure
*
* Returns 1 when it's handled (should not cause exception).
*/
static int ieee_fpe_handler(struct pt_regs *regs)
{
unsigned short insn = *(unsigned short *)regs->pc;
unsigned short finsn;
unsigned long nextpc;
int nib[4] = {
(insn >> 12) & 0xf,
(insn >> 8) & 0xf,
(insn >> 4) & 0xf,
insn & 0xf
};
if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb))
regs->pr = regs->pc + 4; /* bsr & jsr */
if (nib[0] == 0xa || nib[0] == 0xb) {
/* bra & bsr */
nextpc = regs->pc + 4 + ((short)((insn & 0xfff) << 4) >> 3);
finsn = *(unsigned short *)(regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xd) {
/* bt/s */
if (regs->sr & 1)
nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
else
nextpc = regs->pc + 4;
finsn = *(unsigned short *)(regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xf) {
/* bf/s */
if (regs->sr & 1)
nextpc = regs->pc + 4;
else
nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1);
finsn = *(unsigned short *)(regs->pc + 2);
} else if (nib[0] == 0x4 && nib[3] == 0xb &&
(nib[2] == 0x0 || nib[2] == 0x2)) {
/* jmp & jsr */
nextpc = regs->regs[nib[1]];
finsn = *(unsigned short *)(regs->pc + 2);
} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
(nib[2] == 0x0 || nib[2] == 0x2)) {
/* braf & bsrf */
nextpc = regs->pc + 4 + regs->regs[nib[1]];
finsn = *(unsigned short *)(regs->pc + 2);
} else if (insn == 0x000b) {
/* rts */
nextpc = regs->pr;
finsn = *(unsigned short *)(regs->pc + 2);
} else {
nextpc = regs->pc + instruction_size(insn);
finsn = insn;
}
if ((finsn & 0xf1ff) == 0xf0ad) {
/* fcnvsd */
struct task_struct *tsk = current;
if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR))
/* FPU error */
denormal_to_double(&tsk->thread.xstate->hardfpu,
(finsn >> 8) & 0xf);
else
return 0;
regs->pc = nextpc;
return 1;
} else if ((finsn & 0xf00f) == 0xf002) {
/* fmul */
struct task_struct *tsk = current;
int fpscr;
int n, m, prec;
unsigned int hx, hy;
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & FPSCR_DBL_PRECISION;
if ((fpscr & FPSCR_CAUSE_ERROR)
&& (prec && ((hx & 0x7fffffff) < 0x00100000
|| (hy & 0x7fffffff) < 0x00100000))) {
long long llx, lly;
/* FPU error because of denormal (doubles) */
llx = ((long long)hx << 32)
| tsk->thread.xstate->hardfpu.fp_regs[n + 1];
lly = ((long long)hy << 32)
| tsk->thread.xstate->hardfpu.fp_regs[m + 1];
llx = float64_mul(llx, lly);
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_CAUSE_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
/* FPU error because of denormal (floats) */
hx = float32_mul(hx, hy);
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
regs->pc = nextpc;
return 1;
} else if ((finsn & 0xf00e) == 0xf000) {
/* fadd, fsub */
struct task_struct *tsk = current;
int fpscr;
int n, m, prec;
unsigned int hx, hy;
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & FPSCR_DBL_PRECISION;
if ((fpscr & FPSCR_CAUSE_ERROR)
&& (prec && ((hx & 0x7fffffff) < 0x00100000
|| (hy & 0x7fffffff) < 0x00100000))) {
long long llx, lly;
/* FPU error because of denormal (doubles) */
llx = ((long long)hx << 32)
| tsk->thread.xstate->hardfpu.fp_regs[n + 1];
lly = ((long long)hy << 32)
| tsk->thread.xstate->hardfpu.fp_regs[m + 1];
if ((finsn & 0xf00f) == 0xf000)
llx = float64_add(llx, lly);
else
llx = float64_sub(llx, lly);
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_CAUSE_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
/* FPU error because of denormal (floats) */
if ((finsn & 0xf00f) == 0xf000)
hx = float32_add(hx, hy);
else
hx = float32_sub(hx, hy);
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
regs->pc = nextpc;
return 1;
} else if ((finsn & 0xf003) == 0xf003) {
/* fdiv */
struct task_struct *tsk = current;
int fpscr;
int n, m, prec;
unsigned int hx, hy;
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & FPSCR_DBL_PRECISION;
if ((fpscr & FPSCR_CAUSE_ERROR)
&& (prec && ((hx & 0x7fffffff) < 0x00100000
|| (hy & 0x7fffffff) < 0x00100000))) {
long long llx, lly;
/* FPU error because of denormal (doubles) */
llx = ((long long)hx << 32)
| tsk->thread.xstate->hardfpu.fp_regs[n + 1];
lly = ((long long)hy << 32)
| tsk->thread.xstate->hardfpu.fp_regs[m + 1];
llx = float64_div(llx, lly);
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_CAUSE_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
/* FPU error because of denormal (floats) */
hx = float32_div(hx, hy);
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
regs->pc = nextpc;
return 1;
} else if ((finsn & 0xf0bd) == 0xf0bd) {
/* fcnvds - double to single precision convert */
struct task_struct *tsk = current;
int m;
unsigned int hx;
m = (finsn >> 8) & 0x7;
hx = tsk->thread.xstate->hardfpu.fp_regs[m];
if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)
&& ((hx & 0x7fffffff) < 0x00100000)) {
/* subnormal double to float conversion */
long long llx;
llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32)
| tsk->thread.xstate->hardfpu.fp_regs[m + 1];
tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx);
} else
return 0;
regs->pc = nextpc;
return 1;
}
return 0;
}
void float_raise(unsigned int flags)
{
fpu_exception_flags |= flags;
}
int float_rounding_mode(void)
{
struct task_struct *tsk = current;
int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr);
return roundingMode;
}
BUILD_TRAP_HANDLER(fpu_error)
{
struct task_struct *tsk = current;
TRAP_HANDLER_DECL;
__unlazy_fpu(tsk, regs);
fpu_exception_flags = 0;
if (ieee_fpe_handler(regs)) {
tsk->thread.xstate->hardfpu.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags;
/* Set the FPSCR flag as well as cause bits - simply
* replicate the cause */
tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10);
grab_fpu(regs);
restore_fpu(tsk);
task_thread_info(tsk)->status |= TS_USEDFPU;
if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) &
(fpu_exception_flags >> 2)) == 0) {
return;
}
}
force_sig(SIGFPE);
}
| linux-master | arch/sh/kernel/cpu/sh4/fpu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4/clock-sh4.c
*
* Generic SH-4 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
*
* FRQCR parsing hacked out of arch/sh/kernel/time.c
*
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf <[email protected]>
* Copyright (C) 2002, 2003, 2004 Paul Mundt
* Copyright (C) 2002 M. R. Brown <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static int ifc_divisors[] = { 1, 2, 3, 4, 6, 8, 1, 1 };
#define bfc_divisors ifc_divisors /* Same */
static int pfc_divisors[] = { 2, 3, 4, 6, 8, 2, 2, 2 };
static void master_clk_init(struct clk *clk)
{
clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0007];
}
static struct sh_clk_ops sh4_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh4_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) >> 3) & 0x0007;
return clk->parent->rate / bfc_divisors[idx];
}
static struct sh_clk_ops sh4_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) >> 6) & 0x0007;
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh4_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh4_clk_ops[] = {
&sh4_master_clk_ops,
&sh4_module_clk_ops,
&sh4_bus_clk_ops,
&sh4_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh4_clk_ops))
*ops = sh4_clk_ops[idx];
}
| linux-master | arch/sh/kernel/cpu/sh4/clock-sh4.c |
/*
* Floating point emulation support for subnormalised numbers on SH4
* architecture This file is derived from the SoftFloat IEC/IEEE
* Floating-point Arithmetic Package, Release 2 the original license of
* which is reproduced below.
*
* ========================================================================
*
* This C source file is part of the SoftFloat IEC/IEEE Floating-point
* Arithmetic Package, Release 2.
*
* Written by John R. Hauser. This work was made possible in part by the
* International Computer Science Institute, located at Suite 600, 1947 Center
* Street, Berkeley, California 94704. Funding was partially provided by the
* National Science Foundation under grant MIP-9311980. The original version
* of this code was written as part of a project to build a fixed-point vector
* processor in collaboration with the University of California at Berkeley,
* overseen by Profs. Nelson Morgan and John Wawrzynek. More information
* is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
* arithmetic/softfloat.html'.
*
* THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
* has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
* TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO
* PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
* AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
*
* Derivative works are acceptable, even for commercial purposes, so long as
* (1) they include prominent notice that the work is derivative, and (2) they
* include prominent notice akin to these three paragraphs for those parts of
* this code that are retained.
*
* ========================================================================
*
* SH4 modifications by Ismail Dhaoui <[email protected]>
* and Kamel Khelifi <[email protected]>
*/
#include <linux/kernel.h>
#include <cpu/fpu.h>
#include <asm/div64.h>
#define LIT64( a ) a##LL
typedef char flag;
typedef unsigned char uint8;
typedef signed char int8;
typedef int uint16;
typedef int int16;
typedef unsigned int uint32;
typedef signed int int32;
typedef unsigned long long int bits64;
typedef signed long long int sbits64;
typedef unsigned char bits8;
typedef signed char sbits8;
typedef unsigned short int bits16;
typedef signed short int sbits16;
typedef unsigned int bits32;
typedef signed int sbits32;
typedef unsigned long long int uint64;
typedef signed long long int int64;
typedef unsigned long int float32;
typedef unsigned long long float64;
extern void float_raise(unsigned int flags); /* in fpu.c */
extern int float_rounding_mode(void); /* in fpu.c */
bits64 extractFloat64Frac(float64 a);
flag extractFloat64Sign(float64 a);
int16 extractFloat64Exp(float64 a);
int16 extractFloat32Exp(float32 a);
flag extractFloat32Sign(float32 a);
bits32 extractFloat32Frac(float32 a);
float64 packFloat64(flag zSign, int16 zExp, bits64 zSig);
void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr);
float32 packFloat32(flag zSign, int16 zExp, bits32 zSig);
void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr);
float64 float64_sub(float64 a, float64 b);
float32 float32_sub(float32 a, float32 b);
float32 float32_add(float32 a, float32 b);
float64 float64_add(float64 a, float64 b);
float64 float64_div(float64 a, float64 b);
float32 float32_div(float32 a, float32 b);
float32 float32_mul(float32 a, float32 b);
float64 float64_mul(float64 a, float64 b);
float32 float64_to_float32(float64 a);
void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
bits64 * z1Ptr);
void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
bits64 * z1Ptr);
void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr);
static int8 countLeadingZeros32(bits32 a);
static int8 countLeadingZeros64(bits64 a);
static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp,
bits64 zSig);
static float64 subFloat64Sigs(float64 a, float64 b, flag zSign);
static float64 addFloat64Sigs(float64 a, float64 b, flag zSign);
static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig);
static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp,
bits32 zSig);
static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig);
static float32 subFloat32Sigs(float32 a, float32 b, flag zSign);
static float32 addFloat32Sigs(float32 a, float32 b, flag zSign);
static void normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr,
bits64 * zSigPtr);
static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b);
static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
bits32 * zSigPtr);
bits64 extractFloat64Frac(float64 a)
{
return a & LIT64(0x000FFFFFFFFFFFFF);
}
flag extractFloat64Sign(float64 a)
{
return a >> 63;
}
int16 extractFloat64Exp(float64 a)
{
return (a >> 52) & 0x7FF;
}
int16 extractFloat32Exp(float32 a)
{
return (a >> 23) & 0xFF;
}
flag extractFloat32Sign(float32 a)
{
return a >> 31;
}
bits32 extractFloat32Frac(float32 a)
{
return a & 0x007FFFFF;
}
float64 packFloat64(flag zSign, int16 zExp, bits64 zSig)
{
return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig;
}
void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr)
{
bits64 z;
if (count == 0) {
z = a;
} else if (count < 64) {
z = (a >> count) | ((a << ((-count) & 63)) != 0);
} else {
z = (a != 0);
}
*zPtr = z;
}
static int8 countLeadingZeros32(bits32 a)
{
static const int8 countLeadingZerosHigh[] = {
8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
int8 shiftCount;
shiftCount = 0;
if (a < 0x10000) {
shiftCount += 16;
a <<= 16;
}
if (a < 0x1000000) {
shiftCount += 8;
a <<= 8;
}
shiftCount += countLeadingZerosHigh[a >> 24];
return shiftCount;
}
static int8 countLeadingZeros64(bits64 a)
{
int8 shiftCount;
shiftCount = 0;
if (a < ((bits64) 1) << 32) {
shiftCount += 32;
} else {
a >>= 32;
}
shiftCount += countLeadingZeros32(a);
return shiftCount;
}
static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
{
int8 shiftCount;
shiftCount = countLeadingZeros64(zSig) - 1;
return roundAndPackFloat64(zSign, zExp - shiftCount,
zSig << shiftCount);
}
static float64 subFloat64Sigs(float64 a, float64 b, flag zSign)
{
int16 aExp, bExp, zExp;
bits64 aSig, bSig, zSig;
int16 expDiff;
aSig = extractFloat64Frac(a);
aExp = extractFloat64Exp(a);
bSig = extractFloat64Frac(b);
bExp = extractFloat64Exp(b);
expDiff = aExp - bExp;
aSig <<= 10;
bSig <<= 10;
if (0 < expDiff)
goto aExpBigger;
if (expDiff < 0)
goto bExpBigger;
if (aExp == 0) {
aExp = 1;
bExp = 1;
}
if (bSig < aSig)
goto aBigger;
if (aSig < bSig)
goto bBigger;
return packFloat64(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
bExpBigger:
if (bExp == 0x7FF) {
return packFloat64(zSign ^ 1, 0x7FF, 0);
}
if (aExp == 0) {
++expDiff;
} else {
aSig |= LIT64(0x4000000000000000);
}
shift64RightJamming(aSig, -expDiff, &aSig);
bSig |= LIT64(0x4000000000000000);
bBigger:
zSig = bSig - aSig;
zExp = bExp;
zSign ^= 1;
goto normalizeRoundAndPack;
aExpBigger:
if (aExp == 0x7FF) {
return a;
}
if (bExp == 0) {
--expDiff;
} else {
bSig |= LIT64(0x4000000000000000);
}
shift64RightJamming(bSig, expDiff, &bSig);
aSig |= LIT64(0x4000000000000000);
aBigger:
zSig = aSig - bSig;
zExp = aExp;
normalizeRoundAndPack:
--zExp;
return normalizeRoundAndPackFloat64(zSign, zExp, zSig);
}
static float64 addFloat64Sigs(float64 a, float64 b, flag zSign)
{
int16 aExp, bExp, zExp;
bits64 aSig, bSig, zSig;
int16 expDiff;
aSig = extractFloat64Frac(a);
aExp = extractFloat64Exp(a);
bSig = extractFloat64Frac(b);
bExp = extractFloat64Exp(b);
expDiff = aExp - bExp;
aSig <<= 9;
bSig <<= 9;
if (0 < expDiff) {
if (aExp == 0x7FF) {
return a;
}
if (bExp == 0) {
--expDiff;
} else {
bSig |= LIT64(0x2000000000000000);
}
shift64RightJamming(bSig, expDiff, &bSig);
zExp = aExp;
} else if (expDiff < 0) {
if (bExp == 0x7FF) {
return packFloat64(zSign, 0x7FF, 0);
}
if (aExp == 0) {
++expDiff;
} else {
aSig |= LIT64(0x2000000000000000);
}
shift64RightJamming(aSig, -expDiff, &aSig);
zExp = bExp;
} else {
if (aExp == 0x7FF) {
return a;
}
if (aExp == 0)
return packFloat64(zSign, 0, (aSig + bSig) >> 9);
zSig = LIT64(0x4000000000000000) + aSig + bSig;
zExp = aExp;
goto roundAndPack;
}
aSig |= LIT64(0x2000000000000000);
zSig = (aSig + bSig) << 1;
--zExp;
if ((sbits64) zSig < 0) {
zSig = aSig + bSig;
++zExp;
}
roundAndPack:
return roundAndPackFloat64(zSign, zExp, zSig);
}
float32 packFloat32(flag zSign, int16 zExp, bits32 zSig)
{
return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig;
}
void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr)
{
bits32 z;
if (count == 0) {
z = a;
} else if (count < 32) {
z = (a >> count) | ((a << ((-count) & 31)) != 0);
} else {
z = (a != 0);
}
*zPtr = z;
}
static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
{
flag roundNearestEven;
int8 roundIncrement, roundBits;
flag isTiny;
/* SH4 has only 2 rounding modes - round to nearest and round to zero */
roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
roundIncrement = 0x40;
if (!roundNearestEven) {
roundIncrement = 0;
}
roundBits = zSig & 0x7F;
if (0xFD <= (bits16) zExp) {
if ((0xFD < zExp)
|| ((zExp == 0xFD)
&& ((sbits32) (zSig + roundIncrement) < 0))
) {
float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
return packFloat32(zSign, 0xFF,
0) - (roundIncrement == 0);
}
if (zExp < 0) {
isTiny = (zExp < -1)
|| (zSig + roundIncrement < 0x80000000);
shift32RightJamming(zSig, -zExp, &zSig);
zExp = 0;
roundBits = zSig & 0x7F;
if (isTiny && roundBits)
float_raise(FPSCR_CAUSE_UNDERFLOW);
}
}
if (roundBits)
float_raise(FPSCR_CAUSE_INEXACT);
zSig = (zSig + roundIncrement) >> 7;
zSig &= ~(((roundBits ^ 0x40) == 0) & roundNearestEven);
if (zSig == 0)
zExp = 0;
return packFloat32(zSign, zExp, zSig);
}
static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig)
{
int8 shiftCount;
shiftCount = countLeadingZeros32(zSig) - 1;
return roundAndPackFloat32(zSign, zExp - shiftCount,
zSig << shiftCount);
}
static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig)
{
flag roundNearestEven;
int16 roundIncrement, roundBits;
flag isTiny;
/* SH4 has only 2 rounding modes - round to nearest and round to zero */
roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST);
roundIncrement = 0x200;
if (!roundNearestEven) {
roundIncrement = 0;
}
roundBits = zSig & 0x3FF;
if (0x7FD <= (bits16) zExp) {
if ((0x7FD < zExp)
|| ((zExp == 0x7FD)
&& ((sbits64) (zSig + roundIncrement) < 0))
) {
float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT);
return packFloat64(zSign, 0x7FF,
0) - (roundIncrement == 0);
}
if (zExp < 0) {
isTiny = (zExp < -1)
|| (zSig + roundIncrement <
LIT64(0x8000000000000000));
shift64RightJamming(zSig, -zExp, &zSig);
zExp = 0;
roundBits = zSig & 0x3FF;
if (isTiny && roundBits)
float_raise(FPSCR_CAUSE_UNDERFLOW);
}
}
if (roundBits)
float_raise(FPSCR_CAUSE_INEXACT);
zSig = (zSig + roundIncrement) >> 10;
zSig &= ~(((roundBits ^ 0x200) == 0) & roundNearestEven);
if (zSig == 0)
zExp = 0;
return packFloat64(zSign, zExp, zSig);
}
static float32 subFloat32Sigs(float32 a, float32 b, flag zSign)
{
int16 aExp, bExp, zExp;
bits32 aSig, bSig, zSig;
int16 expDiff;
aSig = extractFloat32Frac(a);
aExp = extractFloat32Exp(a);
bSig = extractFloat32Frac(b);
bExp = extractFloat32Exp(b);
expDiff = aExp - bExp;
aSig <<= 7;
bSig <<= 7;
if (0 < expDiff)
goto aExpBigger;
if (expDiff < 0)
goto bExpBigger;
if (aExp == 0) {
aExp = 1;
bExp = 1;
}
if (bSig < aSig)
goto aBigger;
if (aSig < bSig)
goto bBigger;
return packFloat32(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0);
bExpBigger:
if (bExp == 0xFF) {
return packFloat32(zSign ^ 1, 0xFF, 0);
}
if (aExp == 0) {
++expDiff;
} else {
aSig |= 0x40000000;
}
shift32RightJamming(aSig, -expDiff, &aSig);
bSig |= 0x40000000;
bBigger:
zSig = bSig - aSig;
zExp = bExp;
zSign ^= 1;
goto normalizeRoundAndPack;
aExpBigger:
if (aExp == 0xFF) {
return a;
}
if (bExp == 0) {
--expDiff;
} else {
bSig |= 0x40000000;
}
shift32RightJamming(bSig, expDiff, &bSig);
aSig |= 0x40000000;
aBigger:
zSig = aSig - bSig;
zExp = aExp;
normalizeRoundAndPack:
--zExp;
return normalizeRoundAndPackFloat32(zSign, zExp, zSig);
}
static float32 addFloat32Sigs(float32 a, float32 b, flag zSign)
{
int16 aExp, bExp, zExp;
bits32 aSig, bSig, zSig;
int16 expDiff;
aSig = extractFloat32Frac(a);
aExp = extractFloat32Exp(a);
bSig = extractFloat32Frac(b);
bExp = extractFloat32Exp(b);
expDiff = aExp - bExp;
aSig <<= 6;
bSig <<= 6;
if (0 < expDiff) {
if (aExp == 0xFF) {
return a;
}
if (bExp == 0) {
--expDiff;
} else {
bSig |= 0x20000000;
}
shift32RightJamming(bSig, expDiff, &bSig);
zExp = aExp;
} else if (expDiff < 0) {
if (bExp == 0xFF) {
return packFloat32(zSign, 0xFF, 0);
}
if (aExp == 0) {
++expDiff;
} else {
aSig |= 0x20000000;
}
shift32RightJamming(aSig, -expDiff, &aSig);
zExp = bExp;
} else {
if (aExp == 0xFF) {
return a;
}
if (aExp == 0)
return packFloat32(zSign, 0, (aSig + bSig) >> 6);
zSig = 0x40000000 + aSig + bSig;
zExp = aExp;
goto roundAndPack;
}
aSig |= 0x20000000;
zSig = (aSig + bSig) << 1;
--zExp;
if ((sbits32) zSig < 0) {
zSig = aSig + bSig;
++zExp;
}
roundAndPack:
return roundAndPackFloat32(zSign, zExp, zSig);
}
float64 float64_sub(float64 a, float64 b)
{
flag aSign, bSign;
aSign = extractFloat64Sign(a);
bSign = extractFloat64Sign(b);
if (aSign == bSign) {
return subFloat64Sigs(a, b, aSign);
} else {
return addFloat64Sigs(a, b, aSign);
}
}
float32 float32_sub(float32 a, float32 b)
{
flag aSign, bSign;
aSign = extractFloat32Sign(a);
bSign = extractFloat32Sign(b);
if (aSign == bSign) {
return subFloat32Sigs(a, b, aSign);
} else {
return addFloat32Sigs(a, b, aSign);
}
}
float32 float32_add(float32 a, float32 b)
{
flag aSign, bSign;
aSign = extractFloat32Sign(a);
bSign = extractFloat32Sign(b);
if (aSign == bSign) {
return addFloat32Sigs(a, b, aSign);
} else {
return subFloat32Sigs(a, b, aSign);
}
}
float64 float64_add(float64 a, float64 b)
{
flag aSign, bSign;
aSign = extractFloat64Sign(a);
bSign = extractFloat64Sign(b);
if (aSign == bSign) {
return addFloat64Sigs(a, b, aSign);
} else {
return subFloat64Sigs(a, b, aSign);
}
}
static void
normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr)
{
int8 shiftCount;
shiftCount = countLeadingZeros64(aSig) - 11;
*zSigPtr = aSig << shiftCount;
*zExpPtr = 1 - shiftCount;
}
void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
bits64 * z1Ptr)
{
bits64 z1;
z1 = a1 + b1;
*z1Ptr = z1;
*z0Ptr = a0 + b0 + (z1 < a1);
}
void
sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr,
bits64 * z1Ptr)
{
*z1Ptr = a1 - b1;
*z0Ptr = a0 - b0 - (a1 < b1);
}
static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b)
{
bits64 b0, b1;
bits64 rem0, rem1, term0, term1;
bits64 z, tmp;
if (b <= a0)
return LIT64(0xFFFFFFFFFFFFFFFF);
b0 = b >> 32;
tmp = a0;
do_div(tmp, b0);
z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : tmp << 32;
mul64To128(b, z, &term0, &term1);
sub128(a0, a1, term0, term1, &rem0, &rem1);
while (((sbits64) rem0) < 0) {
z -= LIT64(0x100000000);
b1 = b << 32;
add128(rem0, rem1, b0, b1, &rem0, &rem1);
}
rem0 = (rem0 << 32) | (rem1 >> 32);
tmp = rem0;
do_div(tmp, b0);
z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : tmp;
return z;
}
void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr)
{
bits32 aHigh, aLow, bHigh, bLow;
bits64 z0, zMiddleA, zMiddleB, z1;
aLow = a;
aHigh = a >> 32;
bLow = b;
bHigh = b >> 32;
z1 = ((bits64) aLow) * bLow;
zMiddleA = ((bits64) aLow) * bHigh;
zMiddleB = ((bits64) aHigh) * bLow;
z0 = ((bits64) aHigh) * bHigh;
zMiddleA += zMiddleB;
z0 += (((bits64) (zMiddleA < zMiddleB)) << 32) + (zMiddleA >> 32);
zMiddleA <<= 32;
z1 += zMiddleA;
z0 += (z1 < zMiddleA);
*z1Ptr = z1;
*z0Ptr = z0;
}
static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr,
bits32 * zSigPtr)
{
int8 shiftCount;
shiftCount = countLeadingZeros32(aSig) - 8;
*zSigPtr = aSig << shiftCount;
*zExpPtr = 1 - shiftCount;
}
float64 float64_div(float64 a, float64 b)
{
flag aSign, bSign, zSign;
int16 aExp, bExp, zExp;
bits64 aSig, bSig, zSig;
bits64 rem0, rem1;
bits64 term0, term1;
aSig = extractFloat64Frac(a);
aExp = extractFloat64Exp(a);
aSign = extractFloat64Sign(a);
bSig = extractFloat64Frac(b);
bExp = extractFloat64Exp(b);
bSign = extractFloat64Sign(b);
zSign = aSign ^ bSign;
if (aExp == 0x7FF) {
if (bExp == 0x7FF) {
}
return packFloat64(zSign, 0x7FF, 0);
}
if (bExp == 0x7FF) {
return packFloat64(zSign, 0, 0);
}
if (bExp == 0) {
if (bSig == 0) {
if ((aExp | aSig) == 0) {
float_raise(FPSCR_CAUSE_INVALID);
}
return packFloat64(zSign, 0x7FF, 0);
}
normalizeFloat64Subnormal(bSig, &bExp, &bSig);
}
if (aExp == 0) {
if (aSig == 0)
return packFloat64(zSign, 0, 0);
normalizeFloat64Subnormal(aSig, &aExp, &aSig);
}
zExp = aExp - bExp + 0x3FD;
aSig = (aSig | LIT64(0x0010000000000000)) << 10;
bSig = (bSig | LIT64(0x0010000000000000)) << 11;
if (bSig <= (aSig + aSig)) {
aSig >>= 1;
++zExp;
}
zSig = estimateDiv128To64(aSig, 0, bSig);
if ((zSig & 0x1FF) <= 2) {
mul64To128(bSig, zSig, &term0, &term1);
sub128(aSig, 0, term0, term1, &rem0, &rem1);
while ((sbits64) rem0 < 0) {
--zSig;
add128(rem0, rem1, 0, bSig, &rem0, &rem1);
}
zSig |= (rem1 != 0);
}
return roundAndPackFloat64(zSign, zExp, zSig);
}
float32 float32_div(float32 a, float32 b)
{
flag aSign, bSign, zSign;
int16 aExp, bExp, zExp;
bits32 aSig, bSig;
uint64_t zSig;
aSig = extractFloat32Frac(a);
aExp = extractFloat32Exp(a);
aSign = extractFloat32Sign(a);
bSig = extractFloat32Frac(b);
bExp = extractFloat32Exp(b);
bSign = extractFloat32Sign(b);
zSign = aSign ^ bSign;
if (aExp == 0xFF) {
if (bExp == 0xFF) {
}
return packFloat32(zSign, 0xFF, 0);
}
if (bExp == 0xFF) {
return packFloat32(zSign, 0, 0);
}
if (bExp == 0) {
if (bSig == 0) {
return packFloat32(zSign, 0xFF, 0);
}
normalizeFloat32Subnormal(bSig, &bExp, &bSig);
}
if (aExp == 0) {
if (aSig == 0)
return packFloat32(zSign, 0, 0);
normalizeFloat32Subnormal(aSig, &aExp, &aSig);
}
zExp = aExp - bExp + 0x7D;
aSig = (aSig | 0x00800000) << 7;
bSig = (bSig | 0x00800000) << 8;
if (bSig <= (aSig + aSig)) {
aSig >>= 1;
++zExp;
}
zSig = (((bits64) aSig) << 32);
do_div(zSig, bSig);
if ((zSig & 0x3F) == 0) {
zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32);
}
return roundAndPackFloat32(zSign, zExp, (bits32)zSig);
}
float32 float32_mul(float32 a, float32 b)
{
char aSign, bSign, zSign;
int aExp, bExp, zExp;
unsigned int aSig, bSig;
unsigned long long zSig64;
unsigned int zSig;
aSig = extractFloat32Frac(a);
aExp = extractFloat32Exp(a);
aSign = extractFloat32Sign(a);
bSig = extractFloat32Frac(b);
bExp = extractFloat32Exp(b);
bSign = extractFloat32Sign(b);
zSign = aSign ^ bSign;
if (aExp == 0) {
if (aSig == 0)
return packFloat32(zSign, 0, 0);
normalizeFloat32Subnormal(aSig, &aExp, &aSig);
}
if (bExp == 0) {
if (bSig == 0)
return packFloat32(zSign, 0, 0);
normalizeFloat32Subnormal(bSig, &bExp, &bSig);
}
if ((bExp == 0xff && bSig == 0) || (aExp == 0xff && aSig == 0))
return roundAndPackFloat32(zSign, 0xff, 0);
zExp = aExp + bExp - 0x7F;
aSig = (aSig | 0x00800000) << 7;
bSig = (bSig | 0x00800000) << 8;
shift64RightJamming(((unsigned long long)aSig) * bSig, 32, &zSig64);
zSig = zSig64;
if (0 <= (signed int)(zSig << 1)) {
zSig <<= 1;
--zExp;
}
return roundAndPackFloat32(zSign, zExp, zSig);
}
float64 float64_mul(float64 a, float64 b)
{
char aSign, bSign, zSign;
int aExp, bExp, zExp;
unsigned long long int aSig, bSig, zSig0, zSig1;
aSig = extractFloat64Frac(a);
aExp = extractFloat64Exp(a);
aSign = extractFloat64Sign(a);
bSig = extractFloat64Frac(b);
bExp = extractFloat64Exp(b);
bSign = extractFloat64Sign(b);
zSign = aSign ^ bSign;
if (aExp == 0) {
if (aSig == 0)
return packFloat64(zSign, 0, 0);
normalizeFloat64Subnormal(aSig, &aExp, &aSig);
}
if (bExp == 0) {
if (bSig == 0)
return packFloat64(zSign, 0, 0);
normalizeFloat64Subnormal(bSig, &bExp, &bSig);
}
if ((aExp == 0x7ff && aSig == 0) || (bExp == 0x7ff && bSig == 0))
return roundAndPackFloat64(zSign, 0x7ff, 0);
zExp = aExp + bExp - 0x3FF;
aSig = (aSig | 0x0010000000000000LL) << 10;
bSig = (bSig | 0x0010000000000000LL) << 11;
mul64To128(aSig, bSig, &zSig0, &zSig1);
zSig0 |= (zSig1 != 0);
if (0 <= (signed long long int)(zSig0 << 1)) {
zSig0 <<= 1;
--zExp;
}
return roundAndPackFloat64(zSign, zExp, zSig0);
}
/*
* -------------------------------------------------------------------------------
* Returns the result of converting the double-precision floating-point value
* `a' to the single-precision floating-point format. The conversion is
* performed according to the IEC/IEEE Standard for Binary Floating-point
* Arithmetic.
* -------------------------------------------------------------------------------
* */
float32 float64_to_float32(float64 a)
{
flag aSign;
int16 aExp;
bits64 aSig;
bits32 zSig;
aSig = extractFloat64Frac( a );
aExp = extractFloat64Exp( a );
aSign = extractFloat64Sign( a );
shift64RightJamming( aSig, 22, &aSig );
zSig = aSig;
if ( aExp || zSig ) {
zSig |= 0x40000000;
aExp -= 0x381;
}
return roundAndPackFloat32(aSign, aExp, zSig);
}
| linux-master | arch/sh/kernel/cpu/sh4/softfloat.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4/clock-sh4-202.c
*
* Additional SH4-202 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
#include <asm/freq.h>
#define CPG2_FRQCR3 0xfe0a0018
static int frqcr3_divisors[] = { 1, 2, 3, 4, 6, 8, 16 };
static int frqcr3_values[] = { 0, 1, 2, 3, 4, 5, 6 };
static unsigned long emi_clk_recalc(struct clk *clk)
{
int idx = __raw_readl(CPG2_FRQCR3) & 0x0007;
return clk->parent->rate / frqcr3_divisors[idx];
}
static inline int frqcr3_lookup(struct clk *clk, unsigned long rate)
{
int divisor = clk->parent->rate / rate;
int i;
for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++)
if (frqcr3_divisors[i] == divisor)
return frqcr3_values[i];
/* Safe fallback */
return 5;
}
static struct sh_clk_ops sh4202_emi_clk_ops = {
.recalc = emi_clk_recalc,
};
static struct clk sh4202_emi_clk = {
.flags = CLK_ENABLE_ON_INIT,
.ops = &sh4202_emi_clk_ops,
};
static unsigned long femi_clk_recalc(struct clk *clk)
{
int idx = (__raw_readl(CPG2_FRQCR3) >> 3) & 0x0007;
return clk->parent->rate / frqcr3_divisors[idx];
}
static struct sh_clk_ops sh4202_femi_clk_ops = {
.recalc = femi_clk_recalc,
};
static struct clk sh4202_femi_clk = {
.flags = CLK_ENABLE_ON_INIT,
.ops = &sh4202_femi_clk_ops,
};
static void shoc_clk_init(struct clk *clk)
{
int i;
/*
* For some reason, the shoc_clk seems to be set to some really
* insane value at boot (values outside of the allowable frequency
* range for instance). We deal with this by scaling it back down
* to something sensible just in case.
*
* Start scaling from the high end down until we find something
* that passes rate verification..
*/
for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) {
int divisor = frqcr3_divisors[i];
if (clk->ops->set_rate(clk, clk->parent->rate / divisor) == 0)
break;
}
WARN_ON(i == ARRAY_SIZE(frqcr3_divisors)); /* Undefined clock */
}
static unsigned long shoc_clk_recalc(struct clk *clk)
{
int idx = (__raw_readl(CPG2_FRQCR3) >> 6) & 0x0007;
return clk->parent->rate / frqcr3_divisors[idx];
}
static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
{
struct clk *bclk = clk_get(NULL, "bus_clk");
unsigned long bclk_rate = clk_get_rate(bclk);
clk_put(bclk);
if (rate > bclk_rate)
return 1;
if (rate > 66000000)
return 1;
return 0;
}
static int shoc_clk_set_rate(struct clk *clk, unsigned long rate)
{
unsigned long frqcr3;
unsigned int tmp;
/* Make sure we have something sensible to switch to */
if (shoc_clk_verify_rate(clk, rate) != 0)
return -EINVAL;
tmp = frqcr3_lookup(clk, rate);
frqcr3 = __raw_readl(CPG2_FRQCR3);
frqcr3 &= ~(0x0007 << 6);
frqcr3 |= tmp << 6;
__raw_writel(frqcr3, CPG2_FRQCR3);
clk->rate = clk->parent->rate / frqcr3_divisors[tmp];
return 0;
}
static struct sh_clk_ops sh4202_shoc_clk_ops = {
.init = shoc_clk_init,
.recalc = shoc_clk_recalc,
.set_rate = shoc_clk_set_rate,
};
static struct clk sh4202_shoc_clk = {
.flags = CLK_ENABLE_ON_INIT,
.ops = &sh4202_shoc_clk_ops,
};
static struct clk *sh4202_onchip_clocks[] = {
&sh4202_emi_clk,
&sh4202_femi_clk,
&sh4202_shoc_clk,
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("emi_clk", &sh4202_emi_clk),
CLKDEV_CON_ID("femi_clk", &sh4202_femi_clk),
CLKDEV_CON_ID("shoc_clk", &sh4202_shoc_clk),
};
int __init arch_clk_init(void)
{
struct clk *clk;
int i, ret = 0;
cpg_clk_init();
clk = clk_get(NULL, "master_clk");
for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) {
struct clk *clkp = sh4202_onchip_clocks[i];
clkp->parent = clk;
ret |= clk_register(clkp);
}
clk_put(clk);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh4/clock-sh4-202.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4/sq.c
*
* General management API for SH-4 integrated Store Queues
*
* Copyright (C) 2001 - 2006 Paul Mundt
* Copyright (C) 2001, 2002 M. R. Brown
*/
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/bitmap.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/prefetch.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <cpu/sq.h>
struct sq_mapping;
struct sq_mapping {
const char *name;
unsigned long sq_addr;
unsigned long addr;
unsigned int size;
struct sq_mapping *next;
};
static struct sq_mapping *sq_mapping_list;
static DEFINE_SPINLOCK(sq_mapping_lock);
static struct kmem_cache *sq_cache;
static unsigned long *sq_bitmap;
#define store_queue_barrier() \
do { \
(void)__raw_readl(P4SEG_STORE_QUE); \
__raw_writel(0, P4SEG_STORE_QUE + 0); \
__raw_writel(0, P4SEG_STORE_QUE + 8); \
} while (0);
/**
* sq_flush_range - Flush (prefetch) a specific SQ range
* @start: the store queue address to start flushing from
* @len: the length to flush
*
* Flushes the store queue cache from @start to @start + @len in a
* linear fashion.
*/
void sq_flush_range(unsigned long start, unsigned int len)
{
unsigned long *sq = (unsigned long *)start;
/* Flush the queues */
for (len >>= 5; len--; sq += 8)
prefetchw(sq);
/* Wait for completion */
store_queue_barrier();
}
EXPORT_SYMBOL(sq_flush_range);
static inline void sq_mapping_list_add(struct sq_mapping *map)
{
struct sq_mapping **p, *tmp;
spin_lock_irq(&sq_mapping_lock);
p = &sq_mapping_list;
while ((tmp = *p) != NULL)
p = &tmp->next;
map->next = tmp;
*p = map;
spin_unlock_irq(&sq_mapping_lock);
}
static inline void sq_mapping_list_del(struct sq_mapping *map)
{
struct sq_mapping **p, *tmp;
spin_lock_irq(&sq_mapping_lock);
for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
if (tmp == map) {
*p = tmp->next;
break;
}
spin_unlock_irq(&sq_mapping_lock);
}
static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
{
#if defined(CONFIG_MMU)
struct vm_struct *vma;
vma = __get_vm_area_caller(map->size, VM_IOREMAP, map->sq_addr,
SQ_ADDRMAX, __builtin_return_address(0));
if (!vma)
return -ENOMEM;
vma->phys_addr = map->addr;
if (ioremap_page_range((unsigned long)vma->addr,
(unsigned long)vma->addr + map->size,
vma->phys_addr, prot)) {
vunmap(vma->addr);
return -EAGAIN;
}
#else
/*
* Without an MMU (or with it turned off), this is much more
* straightforward, as we can just load up each queue's QACR with
* the physical address appropriately masked.
*/
__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
#endif
return 0;
}
/**
* sq_remap - Map a physical address through the Store Queues
* @phys: Physical address of mapping.
* @size: Length of mapping.
* @name: User invoking mapping.
* @prot: Protection bits.
*
* Remaps the physical address @phys through the next available store queue
* address of @size length. @name is logged at boot time as well as through
* the sysfs interface.
*/
unsigned long sq_remap(unsigned long phys, unsigned int size,
const char *name, pgprot_t prot)
{
struct sq_mapping *map;
unsigned long end;
unsigned int psz;
int ret, page;
/* Don't allow wraparound or zero size */
end = phys + size - 1;
if (unlikely(!size || end < phys))
return -EINVAL;
/* Don't allow anyone to remap normal memory.. */
if (unlikely(phys < virt_to_phys(high_memory)))
return -EINVAL;
phys &= PAGE_MASK;
size = PAGE_ALIGN(end + 1) - phys;
map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
if (unlikely(!map))
return -ENOMEM;
map->addr = phys;
map->size = size;
map->name = name;
page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
get_order(map->size));
if (unlikely(page < 0)) {
ret = -ENOSPC;
goto out;
}
map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
ret = __sq_remap(map, prot);
if (unlikely(ret != 0))
goto out;
psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
likely(map->name) ? map->name : "???",
psz, psz == 1 ? " " : "s",
map->sq_addr, map->addr);
sq_mapping_list_add(map);
return map->sq_addr;
out:
kmem_cache_free(sq_cache, map);
return ret;
}
EXPORT_SYMBOL(sq_remap);
/**
* sq_unmap - Unmap a Store Queue allocation
* @vaddr: Pre-allocated Store Queue mapping.
*
* Unmaps the store queue allocation @map that was previously created by
* sq_remap(). Also frees up the pte that was previously inserted into
* the kernel page table and discards the UTLB translation.
*/
void sq_unmap(unsigned long vaddr)
{
struct sq_mapping **p, *map;
int page;
for (p = &sq_mapping_list; (map = *p); p = &map->next)
if (map->sq_addr == vaddr)
break;
if (unlikely(!map)) {
printk("%s: bad store queue address 0x%08lx\n",
__func__, vaddr);
return;
}
page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
bitmap_release_region(sq_bitmap, page, get_order(map->size));
#ifdef CONFIG_MMU
{
/*
* Tear down the VMA in the MMU case.
*/
struct vm_struct *vma;
vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
if (!vma) {
printk(KERN_ERR "%s: bad address 0x%08lx\n",
__func__, map->sq_addr);
return;
}
}
#endif
sq_mapping_list_del(map);
kmem_cache_free(sq_cache, map);
}
EXPORT_SYMBOL(sq_unmap);
/*
* Needlessly complex sysfs interface. Unfortunately it doesn't seem like
* there is any other easy way to add things on a per-cpu basis without
* putting the directory entries somewhere stupid and having to create
* links in sysfs by hand back in to the per-cpu directories.
*
* Some day we may want to have an additional abstraction per store
* queue, but considering the kobject hell we already have to deal with,
* it's simply not worth the trouble.
*/
static struct kobject *sq_kobject[NR_CPUS];
struct sq_sysfs_attr {
struct attribute attr;
ssize_t (*show)(char *buf);
ssize_t (*store)(const char *buf, size_t count);
};
#define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr)
static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
if (likely(sattr->show))
return sattr->show(buf);
return -EIO;
}
static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
if (likely(sattr->store))
return sattr->store(buf, count);
return -EIO;
}
static ssize_t mapping_show(char *buf)
{
struct sq_mapping **list, *entry;
char *p = buf;
for (list = &sq_mapping_list; (entry = *list); list = &entry->next)
p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
entry->sq_addr, entry->sq_addr + entry->size,
entry->addr, entry->name);
return p - buf;
}
static ssize_t mapping_store(const char *buf, size_t count)
{
unsigned long base = 0, len = 0;
sscanf(buf, "%lx %lx", &base, &len);
if (!base)
return -EIO;
if (likely(len)) {
int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
if (ret < 0)
return ret;
} else
sq_unmap(base);
return count;
}
static struct sq_sysfs_attr mapping_attr =
__ATTR(mapping, 0644, mapping_show, mapping_store);
static struct attribute *sq_sysfs_attrs[] = {
&mapping_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(sq_sysfs);
static const struct sysfs_ops sq_sysfs_ops = {
.show = sq_sysfs_show,
.store = sq_sysfs_store,
};
static struct kobj_type ktype_percpu_entry = {
.sysfs_ops = &sq_sysfs_ops,
.default_groups = sq_sysfs_groups,
};
static int sq_dev_add(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
struct kobject *kobj;
int error;
sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (unlikely(!sq_kobject[cpu]))
return -ENOMEM;
kobj = sq_kobject[cpu];
error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj,
"%s", "sq");
if (!error)
kobject_uevent(kobj, KOBJ_ADD);
return error;
}
static void sq_dev_remove(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
struct kobject *kobj = sq_kobject[cpu];
kobject_put(kobj);
}
static struct subsys_interface sq_interface = {
.name = "sq",
.subsys = &cpu_subsys,
.add_dev = sq_dev_add,
.remove_dev = sq_dev_remove,
};
static int __init sq_api_init(void)
{
unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT;
int ret = -ENOMEM;
printk(KERN_NOTICE "sq: Registering store queue API.\n");
sq_cache = kmem_cache_create("store_queue_cache",
sizeof(struct sq_mapping), 0, 0, NULL);
if (unlikely(!sq_cache))
return ret;
sq_bitmap = bitmap_zalloc(nr_pages, GFP_KERNEL);
if (unlikely(!sq_bitmap))
goto out;
ret = subsys_interface_register(&sq_interface);
if (unlikely(ret != 0))
goto out;
return 0;
out:
bitmap_free(sq_bitmap);
kmem_cache_destroy(sq_cache);
return ret;
}
static void __exit sq_api_exit(void)
{
subsys_interface_unregister(&sq_interface);
bitmap_free(sq_bitmap);
kmem_cache_destroy(sq_cache);
}
module_init(sq_api_init);
module_exit(sq_api_exit);
MODULE_AUTHOR("Paul Mundt <[email protected]>, M. R. Brown <[email protected]>");
MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
MODULE_LICENSE("GPL");
| linux-master | arch/sh/kernel/cpu/sh4/sq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7760 Setup
*
* Copyright (C) 2006 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/serial_sci.h>
#include <linux/io.h>
#include <asm/platform_early.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRL0, IRL1, IRL2, IRL3,
HUDI, GPIOI, DMAC,
IRQ4, IRQ5, IRQ6, IRQ7,
HCAN20, HCAN21,
SSI0, SSI1,
HAC0, HAC1,
I2C0, I2C1,
USB, LCDC,
DMABRG0, DMABRG1, DMABRG2,
SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI,
SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI,
HSPI,
MMCIF0, MMCIF1, MMCIF2, MMCIF3,
MFI, ADC, CMT,
TMU0, TMU1, TMU2,
WDT, REF,
/* interrupt groups */
DMABRG, SCIF0, SCIF1, SCIF2, SIM, MMCIF,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620),
INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660),
INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0),
INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0),
INTC_VECT(DMAC, 0x7c0), INTC_VECT(DMAC, 0x7e0),
INTC_VECT(DMAC, 0x6c0),
INTC_VECT(IRQ4, 0x800), INTC_VECT(IRQ5, 0x820),
INTC_VECT(IRQ6, 0x840), INTC_VECT(IRQ6, 0x860),
INTC_VECT(HCAN20, 0x900), INTC_VECT(HCAN21, 0x920),
INTC_VECT(SSI0, 0x940), INTC_VECT(SSI1, 0x960),
INTC_VECT(HAC0, 0x980), INTC_VECT(HAC1, 0x9a0),
INTC_VECT(I2C0, 0x9c0), INTC_VECT(I2C1, 0x9e0),
INTC_VECT(USB, 0xa00), INTC_VECT(LCDC, 0xa20),
INTC_VECT(DMABRG0, 0xa80), INTC_VECT(DMABRG1, 0xaa0),
INTC_VECT(DMABRG2, 0xac0),
INTC_VECT(SCIF0_ERI, 0x880), INTC_VECT(SCIF0_RXI, 0x8a0),
INTC_VECT(SCIF0_BRI, 0x8c0), INTC_VECT(SCIF0_TXI, 0x8e0),
INTC_VECT(SCIF1_ERI, 0xb00), INTC_VECT(SCIF1_RXI, 0xb20),
INTC_VECT(SCIF1_BRI, 0xb40), INTC_VECT(SCIF1_TXI, 0xb60),
INTC_VECT(SCIF2_ERI, 0xb80), INTC_VECT(SCIF2_RXI, 0xba0),
INTC_VECT(SCIF2_BRI, 0xbc0), INTC_VECT(SCIF2_TXI, 0xbe0),
INTC_VECT(SIM_ERI, 0xc00), INTC_VECT(SIM_RXI, 0xc20),
INTC_VECT(SIM_TXI, 0xc40), INTC_VECT(SIM_TEI, 0xc60),
INTC_VECT(HSPI, 0xc80),
INTC_VECT(MMCIF0, 0xd00), INTC_VECT(MMCIF1, 0xd20),
INTC_VECT(MMCIF2, 0xd40), INTC_VECT(MMCIF3, 0xd60),
INTC_VECT(MFI, 0xe80), /* 0xf80 according to data sheet */
INTC_VECT(ADC, 0xf80), INTC_VECT(CMT, 0xfa0),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
INTC_VECT(WDT, 0x560),
INTC_VECT(REF, 0x580), INTC_VECT(REF, 0x5a0),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(DMABRG, DMABRG0, DMABRG1, DMABRG2),
INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI),
INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI),
INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI),
INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI),
INTC_GROUP(MMCIF, MMCIF0, MMCIF1, MMCIF2, MMCIF3),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */
{ IRQ4, IRQ5, IRQ6, IRQ7, 0, 0, HCAN20, HCAN21,
SSI0, SSI1, HAC0, HAC1, I2C0, I2C1, USB, LCDC,
0, DMABRG0, DMABRG1, DMABRG2,
SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI,
SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI,
SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI, } },
{ 0xfe080044, 0xfe080064, 32, /* INTMSK04 / INTMSKCLR04 */
{ 0, 0, 0, 0, 0, 0, 0, 0,
SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI,
HSPI, MMCIF0, MMCIF1, MMCIF2,
MMCIF3, 0, 0, 0, 0, 0, 0, 0,
0, MFI, 0, 0, 0, 0, ADC, CMT, } },
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } },
{ 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } },
{ 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, 0, HUDI } },
{ 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } },
{ 0xfe080000, 0, 32, 4, /* INTPRI00 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfe080004, 0, 32, 4, /* INTPRI04 */ { HCAN20, HCAN21, SSI0, SSI1,
HAC0, HAC1, I2C0, I2C1 } },
{ 0xfe080008, 0, 32, 4, /* INTPRI08 */ { USB, LCDC, DMABRG, SCIF0,
SCIF1, SCIF2, SIM, HSPI } },
{ 0xfe08000c, 0, 32, 4, /* INTPRI0C */ { 0, 0, MMCIF, 0,
MFI, 0, ADC, CMT } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7760", vectors, groups,
mask_registers, prio_registers, NULL);
static struct intc_vect vectors_irq[] __initdata = {
INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360),
};
static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups,
mask_registers, prio_registers, NULL);
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xfe600000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x880)),
DEFINE_RES_IRQ(evt2irq(0x8a0)),
DEFINE_RES_IRQ(evt2irq(0x8e0)),
DEFINE_RES_IRQ(evt2irq(0x8c0)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.type = PORT_SCIF,
.scscr = SCSCR_REIE,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xfe610000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xb00)),
DEFINE_RES_IRQ(evt2irq(0xb20)),
DEFINE_RES_IRQ(evt2irq(0xb60)),
DEFINE_RES_IRQ(evt2irq(0xb40)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xfe620000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xb80)),
DEFINE_RES_IRQ(evt2irq(0xba0)),
DEFINE_RES_IRQ(evt2irq(0xbe0)),
DEFINE_RES_IRQ(evt2irq(0xbc0)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
/*
* This is actually a SIM card module serial port, based on an SCI with
* additional registers. The sh-sci driver doesn't support the SIM port
* type, declare it as a SCI. Don't declare the additional registers in
* the memory resource or the driver will compute an incorrect regshift
* value.
*/
.type = PORT_SCI,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xfe480000, 0x10),
DEFINE_RES_IRQ(evt2irq(0xc00)),
DEFINE_RES_IRQ(evt2irq(0xc20)),
DEFINE_RES_IRQ(evt2irq(0xc40)),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh7760_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&tmu0_device,
};
static int __init sh7760_devices_setup(void)
{
return platform_add_devices(sh7760_devices,
ARRAY_SIZE(sh7760_devices));
}
arch_initcall(sh7760_devices_setup);
static struct platform_device *sh7760_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&tmu0_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7760_early_devices,
ARRAY_SIZE(sh7760_early_devices));
}
#define INTC_ICR 0xffd00000UL
#define INTC_ICR_IRLM (1 << 7)
void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ:
__raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
register_intc_controller(&intc_desc_irq);
break;
default:
BUG();
}
}
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
| linux-master | arch/sh/kernel/cpu/sh4/setup-sh7760.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH4-202 Setup
*
* Copyright (C) 2006 Paul Mundt
* Copyright (C) 2009 Magnus Damm
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/io.h>
#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xffe80000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x700)),
DEFINE_RES_IRQ(evt2irq(0x720)),
DEFINE_RES_IRQ(evt2irq(0x760)),
DEFINE_RES_IRQ(evt2irq(0x740)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh4202_devices[] __initdata = {
&scif0_device,
&tmu0_device,
};
static int __init sh4202_devices_setup(void)
{
return platform_add_devices(sh4202_devices,
ARRAY_SIZE(sh4202_devices));
}
arch_initcall(sh4202_devices_setup);
static struct platform_device *sh4202_early_devices[] __initdata = {
&scif0_device,
&tmu0_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh4202_early_devices,
ARRAY_SIZE(sh4202_early_devices));
}
enum {
UNUSED = 0,
/* interrupt sources */
IRL0, IRL1, IRL2, IRL3, /* only IRLM mode supported */
HUDI, TMU0, TMU1, TMU2, RTC, SCIF, WDT,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(HUDI, 0x600),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
INTC_VECT(RTC, 0x4c0),
INTC_VECT(SCIF, 0x700), INTC_VECT(SCIF, 0x720),
INTC_VECT(SCIF, 0x740), INTC_VECT(SCIF, 0x760),
INTC_VECT(WDT, 0x560),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, 0, 0, 0 } },
{ 0xffd0000c, 0, 16, 4, /* IPRC */ { 0, 0, SCIF, HUDI } },
{ 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh4-202", vectors, NULL,
NULL, prio_registers, NULL);
static struct intc_vect vectors_irlm[] __initdata = {
INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360),
};
static DECLARE_INTC_DESC(intc_desc_irlm, "sh4-202_irlm", vectors_irlm, NULL,
NULL, prio_registers, NULL);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
#define INTC_ICR 0xffd00000UL
#define INTC_ICR_IRLM (1<<7)
void __init plat_irq_setup_pins(int mode)
{
switch (mode) {
case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */
__raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
register_intc_controller(&intc_desc_irlm);
break;
default:
BUG();
}
}
| linux-master | arch/sh/kernel/cpu/sh4/setup-sh4-202.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7091/SH7750/SH7750S/SH7750R/SH7751/SH7751R Setup
*
* Copyright (C) 2006 Paul Mundt
* Copyright (C) 2006 Jamie Lenehan
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/io.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/serial_sci.h>
#include <generated/machtypes.h>
#include <asm/platform_early.h>
static struct resource rtc_resources[] = {
[0] = {
.start = 0xffc80000,
.end = 0xffc80000 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Shared Period/Carry/Alarm IRQ */
.start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
static struct plat_sci_port sci_platform_data = {
.type = PORT_SCI,
};
static struct resource sci_resources[] = {
DEFINE_RES_MEM(0xffe00000, 0x20),
DEFINE_RES_IRQ(evt2irq(0x4e0)),
};
static struct platform_device sci_device = {
.name = "sh-sci",
.id = 0,
.resource = sci_resources,
.num_resources = ARRAY_SIZE(sci_resources),
.dev = {
.platform_data = &sci_platform_data,
},
};
static struct plat_sci_port scif_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif_resources[] = {
DEFINE_RES_MEM(0xffe80000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x700)),
};
static struct platform_device scif_device = {
.name = "sh-sci",
.id = 1,
.resource = scif_resources,
.num_resources = ARRAY_SIZE(scif_resources),
.dev = {
.platform_data = &scif_platform_data,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xffd80000, 0x30),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
/* SH7750R, SH7751 and SH7751R all have two extra timer channels */
#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
defined(CONFIG_CPU_SUBTYPE_SH7751) || \
defined(CONFIG_CPU_SUBTYPE_SH7751R)
static struct sh_timer_config tmu1_platform_data = {
.channels_mask = 3,
};
static struct resource tmu1_resources[] = {
DEFINE_RES_MEM(0xfe100000, 0x20),
DEFINE_RES_IRQ(evt2irq(0xb00)),
DEFINE_RES_IRQ(evt2irq(0xb80)),
};
static struct platform_device tmu1_device = {
.name = "sh-tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
#endif
static struct platform_device *sh7750_devices[] __initdata = {
&rtc_device,
&tmu0_device,
#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
defined(CONFIG_CPU_SUBTYPE_SH7751) || \
defined(CONFIG_CPU_SUBTYPE_SH7751R)
&tmu1_device,
#endif
};
static int __init sh7750_devices_setup(void)
{
if (mach_is_rts7751r2d()) {
platform_device_register(&scif_device);
} else {
platform_device_register(&sci_device);
platform_device_register(&scif_device);
}
return platform_add_devices(sh7750_devices,
ARRAY_SIZE(sh7750_devices));
}
arch_initcall(sh7750_devices_setup);
static struct platform_device *sh7750_early_devices[] __initdata = {
&tmu0_device,
#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
defined(CONFIG_CPU_SUBTYPE_SH7751) || \
defined(CONFIG_CPU_SUBTYPE_SH7751R)
&tmu1_device,
#endif
};
void __init plat_early_device_setup(void)
{
struct platform_device *dev[1];
if (mach_is_rts7751r2d()) {
scif_platform_data.scscr |= SCSCR_CKE1;
dev[0] = &scif_device;
sh_early_platform_add_devices(dev, 1);
} else {
dev[0] = &sci_device;
sh_early_platform_add_devices(dev, 1);
dev[0] = &scif_device;
sh_early_platform_add_devices(dev, 1);
}
sh_early_platform_add_devices(sh7750_early_devices,
ARRAY_SIZE(sh7750_early_devices));
}
enum {
UNUSED = 0,
/* interrupt sources */
IRL0, IRL1, IRL2, IRL3, /* only IRLM mode supported */
HUDI, GPIOI, DMAC,
PCIC0_PCISERR, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON,
PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3,
TMU3, TMU4, TMU0, TMU1, TMU2, RTC, SCI1, SCIF, WDT, REF,
/* interrupt groups */
PCIC1,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
INTC_VECT(RTC, 0x4c0),
INTC_VECT(SCI1, 0x4e0), INTC_VECT(SCI1, 0x500),
INTC_VECT(SCI1, 0x520), INTC_VECT(SCI1, 0x540),
INTC_VECT(SCIF, 0x700), INTC_VECT(SCIF, 0x720),
INTC_VECT(SCIF, 0x740), INTC_VECT(SCIF, 0x760),
INTC_VECT(WDT, 0x560),
INTC_VECT(REF, 0x580), INTC_VECT(REF, 0x5a0),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } },
{ 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, SCIF, HUDI } },
{ 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } },
{ 0xfe080000, 0, 32, 4, /* INTPRI00 */ { 0, 0, 0, 0,
TMU4, TMU3,
PCIC1, PCIC0_PCISERR } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, NULL,
NULL, prio_registers, NULL);
/* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */
#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
defined(CONFIG_CPU_SUBTYPE_SH7751) || \
defined(CONFIG_CPU_SUBTYPE_SH7091)
static struct intc_vect vectors_dma4[] __initdata = {
INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660),
INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0),
INTC_VECT(DMAC, 0x6c0),
};
static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4",
vectors_dma4, NULL,
NULL, prio_registers, NULL);
#endif
/* SH7750R and SH7751R both have 8-channel DMA controllers */
#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || defined(CONFIG_CPU_SUBTYPE_SH7751R)
static struct intc_vect vectors_dma8[] __initdata = {
INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660),
INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0),
INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0),
INTC_VECT(DMAC, 0x7c0), INTC_VECT(DMAC, 0x7e0),
INTC_VECT(DMAC, 0x6c0),
};
static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8",
vectors_dma8, NULL,
NULL, prio_registers, NULL);
#endif
/* SH7750R, SH7751 and SH7751R all have two extra timer channels */
#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \
defined(CONFIG_CPU_SUBTYPE_SH7751) || \
defined(CONFIG_CPU_SUBTYPE_SH7751R)
static struct intc_vect vectors_tmu34[] __initdata = {
INTC_VECT(TMU3, 0xb00), INTC_VECT(TMU4, 0xb80),
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, TMU4, TMU3,
PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON,
PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2,
PCIC1_PCIDMA3, PCIC0_PCISERR } },
};
static DECLARE_INTC_DESC(intc_desc_tmu34, "sh7750_tmu34",
vectors_tmu34, NULL,
mask_registers, prio_registers, NULL);
#endif
/* SH7750S, SH7750R, SH7751 and SH7751R all have IRLM priority registers */
static struct intc_vect vectors_irlm[] __initdata = {
INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0),
INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360),
};
static DECLARE_INTC_DESC(intc_desc_irlm, "sh7750_irlm", vectors_irlm, NULL,
NULL, prio_registers, NULL);
/* SH7751 and SH7751R both have PCI */
#if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH7751R)
static struct intc_vect vectors_pci[] __initdata = {
INTC_VECT(PCIC0_PCISERR, 0xa00), INTC_VECT(PCIC1_PCIERR, 0xae0),
INTC_VECT(PCIC1_PCIPWDWN, 0xac0), INTC_VECT(PCIC1_PCIPWON, 0xaa0),
INTC_VECT(PCIC1_PCIDMA0, 0xa80), INTC_VECT(PCIC1_PCIDMA1, 0xa60),
INTC_VECT(PCIC1_PCIDMA2, 0xa40), INTC_VECT(PCIC1_PCIDMA3, 0xa20),
};
static struct intc_group groups_pci[] __initdata = {
INTC_GROUP(PCIC1, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON,
PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3),
};
static DECLARE_INTC_DESC(intc_desc_pci, "sh7750_pci", vectors_pci, groups_pci,
mask_registers, prio_registers, NULL);
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \
defined(CONFIG_CPU_SUBTYPE_SH7750S) || \
defined(CONFIG_CPU_SUBTYPE_SH7091)
void __init plat_irq_setup(void)
{
/*
* same vectors for SH7750, SH7750S and SH7091 except for IRLM,
* see below..
*/
register_intc_controller(&intc_desc);
register_intc_controller(&intc_desc_dma4);
}
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7750R)
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
register_intc_controller(&intc_desc_dma8);
register_intc_controller(&intc_desc_tmu34);
}
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7751)
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
register_intc_controller(&intc_desc_dma4);
register_intc_controller(&intc_desc_tmu34);
register_intc_controller(&intc_desc_pci);
}
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7751R)
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
register_intc_controller(&intc_desc_dma8);
register_intc_controller(&intc_desc_tmu34);
register_intc_controller(&intc_desc_pci);
}
#endif
#define INTC_ICR 0xffd00000UL
#define INTC_ICR_IRLM (1<<7)
void __init plat_irq_setup_pins(int mode)
{
#if defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7091)
BUG(); /* impossible to mask interrupts on SH7750 and SH7091 */
return;
#endif
switch (mode) {
case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */
__raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR);
register_intc_controller(&intc_desc_irlm);
break;
default:
BUG();
}
}
| linux-master | arch/sh/kernel/cpu/sh4/setup-sh7750.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Performance events support for SH7750-style performance counters
*
* Copyright (C) 2009 Paul Mundt
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
#include <asm/processor.h>
#define PM_CR_BASE 0xff000084 /* 16-bit */
#define PM_CTR_BASE 0xff100004 /* 32-bit */
#define PMCR(n) (PM_CR_BASE + ((n) * 0x04))
#define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08))
#define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08))
#define PMCR_PMM_MASK 0x0000003f
#define PMCR_CLKF 0x00000100
#define PMCR_PMCLR 0x00002000
#define PMCR_PMST 0x00004000
#define PMCR_PMEN 0x00008000
static struct sh_pmu sh7750_pmu;
/*
* There are a number of events supported by each counter (33 in total).
* Since we have 2 counters, each counter will take the event code as it
* corresponds to the PMCR PMM setting. Each counter can be configured
* independently.
*
* Event Code Description
* ---------- -----------
*
* 0x01 Operand read access
* 0x02 Operand write access
* 0x03 UTLB miss
* 0x04 Operand cache read miss
* 0x05 Operand cache write miss
* 0x06 Instruction fetch (w/ cache)
* 0x07 Instruction TLB miss
* 0x08 Instruction cache miss
* 0x09 All operand accesses
* 0x0a All instruction accesses
* 0x0b OC RAM operand access
* 0x0d On-chip I/O space access
* 0x0e Operand access (r/w)
* 0x0f Operand cache miss (r/w)
* 0x10 Branch instruction
* 0x11 Branch taken
* 0x12 BSR/BSRF/JSR
* 0x13 Instruction execution
* 0x14 Instruction execution in parallel
* 0x15 FPU Instruction execution
* 0x16 Interrupt
* 0x17 NMI
* 0x18 trapa instruction execution
* 0x19 UBCA match
* 0x1a UBCB match
* 0x21 Instruction cache fill
* 0x22 Operand cache fill
* 0x23 Elapsed time
* 0x24 Pipeline freeze by I-cache miss
* 0x25 Pipeline freeze by D-cache miss
* 0x27 Pipeline freeze by branch instruction
* 0x28 Pipeline freeze by CPU register
* 0x29 Pipeline freeze by FPU
*/
static const int sh7750_general_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = 0x0023,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x000a,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0006, /* I-cache */
[PERF_COUNT_HW_CACHE_MISSES] = 0x0008, /* I-cache */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0010,
[PERF_COUNT_HW_BRANCH_MISSES] = -1,
[PERF_COUNT_HW_BUS_CYCLES] = -1,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
static const int sh7750_cache_events
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0001,
[ C(RESULT_MISS) ] = 0x0004,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x0002,
[ C(RESULT_MISS) ] = 0x0005,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(L1I) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x0006,
[ C(RESULT_MISS) ] = 0x0008,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0x0003,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = 0x0007,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(NODE) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
static int sh7750_event_map(int event)
{
return sh7750_general_events[event];
}
static u64 sh7750_pmu_read(int idx)
{
return (u64)((u64)(__raw_readl(PMCTRH(idx)) & 0xffff) << 32) |
__raw_readl(PMCTRL(idx));
}
static void sh7750_pmu_disable(struct hw_perf_event *hwc, int idx)
{
unsigned int tmp;
tmp = __raw_readw(PMCR(idx));
tmp &= ~(PMCR_PMM_MASK | PMCR_PMEN);
__raw_writew(tmp, PMCR(idx));
}
static void sh7750_pmu_enable(struct hw_perf_event *hwc, int idx)
{
__raw_writew(__raw_readw(PMCR(idx)) | PMCR_PMCLR, PMCR(idx));
__raw_writew(hwc->config | PMCR_PMEN | PMCR_PMST, PMCR(idx));
}
static void sh7750_pmu_disable_all(void)
{
int i;
for (i = 0; i < sh7750_pmu.num_events; i++)
__raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i));
}
static void sh7750_pmu_enable_all(void)
{
int i;
for (i = 0; i < sh7750_pmu.num_events; i++)
__raw_writew(__raw_readw(PMCR(i)) | PMCR_PMEN, PMCR(i));
}
static struct sh_pmu sh7750_pmu = {
.name = "sh7750",
.num_events = 2,
.event_map = sh7750_event_map,
.max_events = ARRAY_SIZE(sh7750_general_events),
.raw_event_mask = PMCR_PMM_MASK,
.cache_events = &sh7750_cache_events,
.read = sh7750_pmu_read,
.disable = sh7750_pmu_disable,
.enable = sh7750_pmu_enable,
.disable_all = sh7750_pmu_disable_all,
.enable_all = sh7750_pmu_enable_all,
};
static int __init sh7750_pmu_init(void)
{
/*
* Make sure this CPU actually has perf counters.
*/
if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
pr_notice("HW perf events unsupported, software events only.\n");
return -ENODEV;
}
return register_sh_pmu(&sh7750_pmu);
}
early_initcall(sh7750_pmu_init);
| linux-master | arch/sh/kernel/cpu/sh4/perf_event.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh4/probe.c
*
* CPU Subtype Probing for SH-4.
*
* Copyright (C) 2001 - 2007 Paul Mundt
* Copyright (C) 2003 Richard Curnow
*/
#include <linux/init.h>
#include <linux/io.h>
#include <asm/processor.h>
#include <asm/cache.h>
void cpu_probe(void)
{
unsigned long pvr, prr, cvr;
unsigned long size;
static unsigned long sizes[16] = {
[1] = (1 << 12),
[2] = (1 << 13),
[4] = (1 << 14),
[8] = (1 << 15),
[9] = (1 << 16)
};
pvr = (__raw_readl(CCN_PVR) >> 8) & 0xffffff;
prr = (__raw_readl(CCN_PRR) >> 4) & 0xff;
cvr = (__raw_readl(CCN_CVR));
/*
* Setup some sane SH-4 defaults for the icache
*/
boot_cpu_data.icache.way_incr = (1 << 13);
boot_cpu_data.icache.entry_shift = 5;
boot_cpu_data.icache.sets = 256;
boot_cpu_data.icache.ways = 1;
boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
/*
* And again for the dcache ..
*/
boot_cpu_data.dcache.way_incr = (1 << 14);
boot_cpu_data.dcache.entry_shift = 5;
boot_cpu_data.dcache.sets = 512;
boot_cpu_data.dcache.ways = 1;
boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
/* We don't know the chip cut */
boot_cpu_data.cut_major = boot_cpu_data.cut_minor = -1;
/*
* Setup some generic flags we can probe on SH-4A parts
*/
if (((pvr >> 16) & 0xff) == 0x10) {
boot_cpu_data.family = CPU_FAMILY_SH4A;
if ((cvr & 0x10000000) == 0) {
boot_cpu_data.flags |= CPU_HAS_DSP;
boot_cpu_data.family = CPU_FAMILY_SH4AL_DSP;
}
boot_cpu_data.flags |= CPU_HAS_LLSC | CPU_HAS_PERF_COUNTER;
boot_cpu_data.cut_major = pvr & 0x7f;
boot_cpu_data.icache.ways = 4;
boot_cpu_data.dcache.ways = 4;
} else {
/* And some SH-4 defaults.. */
boot_cpu_data.flags |= CPU_HAS_PTEA | CPU_HAS_FPU;
boot_cpu_data.family = CPU_FAMILY_SH4;
}
/* FPU detection works for almost everyone */
if ((cvr & 0x20000000))
boot_cpu_data.flags |= CPU_HAS_FPU;
/* Mask off the upper chip ID */
pvr &= 0xffff;
/*
* Probe the underlying processor version/revision and
* adjust cpu_data setup accordingly.
*/
switch (pvr) {
case 0x205:
boot_cpu_data.type = CPU_SH7750;
boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG |
CPU_HAS_PERF_COUNTER;
break;
case 0x206:
boot_cpu_data.type = CPU_SH7750S;
boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG |
CPU_HAS_PERF_COUNTER;
break;
case 0x1100:
boot_cpu_data.type = CPU_SH7751;
break;
case 0x2001:
case 0x2004:
boot_cpu_data.type = CPU_SH7770;
break;
case 0x2006:
case 0x200A:
if (prr == 0x61)
boot_cpu_data.type = CPU_SH7781;
else if (prr == 0xa1)
boot_cpu_data.type = CPU_SH7763;
else
boot_cpu_data.type = CPU_SH7780;
break;
case 0x3000:
case 0x3003:
case 0x3009:
boot_cpu_data.type = CPU_SH7343;
break;
case 0x3004:
case 0x3007:
boot_cpu_data.type = CPU_SH7785;
break;
case 0x4004:
case 0x4005:
boot_cpu_data.type = CPU_SH7786;
boot_cpu_data.flags |= CPU_HAS_PTEAEX | CPU_HAS_L2_CACHE;
break;
case 0x3008:
switch (prr) {
case 0x50:
case 0x51:
boot_cpu_data.type = CPU_SH7723;
boot_cpu_data.flags |= CPU_HAS_L2_CACHE;
break;
case 0x70:
boot_cpu_data.type = CPU_SH7366;
break;
case 0xa0:
case 0xa1:
boot_cpu_data.type = CPU_SH7722;
break;
}
break;
case 0x300b:
switch (prr) {
case 0x20:
boot_cpu_data.type = CPU_SH7724;
boot_cpu_data.flags |= CPU_HAS_L2_CACHE;
break;
case 0x10:
case 0x11:
boot_cpu_data.type = CPU_SH7757;
break;
case 0xd0:
case 0x40: /* yon-ten-go */
boot_cpu_data.type = CPU_SH7372;
break;
case 0xE0: /* 0x4E0 */
boot_cpu_data.type = CPU_SH7734; /* SH7733/SH7734 */
break;
}
break;
case 0x4000: /* 1st cut */
case 0x4001: /* 2nd cut */
boot_cpu_data.type = CPU_SHX3;
break;
case 0x700:
boot_cpu_data.type = CPU_SH4_501;
boot_cpu_data.flags &= ~CPU_HAS_FPU;
boot_cpu_data.icache.ways = 2;
boot_cpu_data.dcache.ways = 2;
break;
case 0x600:
boot_cpu_data.type = CPU_SH4_202;
boot_cpu_data.icache.ways = 2;
boot_cpu_data.dcache.ways = 2;
break;
case 0x500 ... 0x501:
switch (prr) {
case 0x10:
boot_cpu_data.type = CPU_SH7750R;
break;
case 0x11:
boot_cpu_data.type = CPU_SH7751R;
break;
case 0x50 ... 0x5f:
boot_cpu_data.type = CPU_SH7760;
break;
}
boot_cpu_data.icache.ways = 2;
boot_cpu_data.dcache.ways = 2;
break;
}
/*
* On anything that's not a direct-mapped cache, look to the CVR
* for I/D-cache specifics.
*/
if (boot_cpu_data.icache.ways > 1) {
size = sizes[(cvr >> 20) & 0xf];
boot_cpu_data.icache.way_incr = (size >> 1);
boot_cpu_data.icache.sets = (size >> 6);
}
/* And the rest of the D-cache */
if (boot_cpu_data.dcache.ways > 1) {
size = sizes[(cvr >> 16) & 0xf];
boot_cpu_data.dcache.way_incr = (size >> 1);
boot_cpu_data.dcache.sets = (size >> 6);
}
/*
* SH-4A's have an optional PIPT L2.
*/
if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
/*
* Verify that it really has something hooked up, this
* is the safety net for CPUs that have optional L2
* support yet do not implement it.
*/
if ((cvr & 0xf) == 0)
boot_cpu_data.flags &= ~CPU_HAS_L2_CACHE;
else {
/*
* Silicon and specifications have clearly never
* met..
*/
cvr ^= 0xf;
/*
* Size calculation is much more sensible
* than it is for the L1.
*
* Sizes are 128KB, 256KB, 512KB, and 1MB.
*/
size = (cvr & 0xf) << 17;
boot_cpu_data.scache.way_incr = (1 << 16);
boot_cpu_data.scache.entry_shift = 5;
boot_cpu_data.scache.ways = 4;
boot_cpu_data.scache.linesz = L1_CACHE_BYTES;
boot_cpu_data.scache.entry_mask =
(boot_cpu_data.scache.way_incr -
boot_cpu_data.scache.linesz);
boot_cpu_data.scache.sets = size /
(boot_cpu_data.scache.linesz *
boot_cpu_data.scache.ways);
boot_cpu_data.scache.way_size =
(boot_cpu_data.scache.sets *
boot_cpu_data.scache.linesz);
}
}
}
| linux-master | arch/sh/kernel/cpu/sh4/probe.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/serial_sci.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <cpu/serial.h>
#include <cpu/gpio.h>
static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
{
unsigned short data;
if (cflag & CRTSCTS) {
/* enable RTS/CTS */
if (port->mapbase == 0xa4430000) { /* SCIF0 */
/* Clear PTCR bit 9-2; enable all scif pins but sck */
data = __raw_readw(PORT_PTCR);
__raw_writew((data & 0xfc03), PORT_PTCR);
} else if (port->mapbase == 0xa4438000) { /* SCIF1 */
/* Clear PVCR bit 9-2 */
data = __raw_readw(PORT_PVCR);
__raw_writew((data & 0xfc03), PORT_PVCR);
}
} else {
if (port->mapbase == 0xa4430000) { /* SCIF0 */
/* Clear PTCR bit 5-2; enable only tx and rx */
data = __raw_readw(PORT_PTCR);
__raw_writew((data & 0xffc3), PORT_PTCR);
} else if (port->mapbase == 0xa4438000) { /* SCIF1 */
/* Clear PVCR bit 5-2 */
data = __raw_readw(PORT_PVCR);
__raw_writew((data & 0xffc3), PORT_PVCR);
}
}
}
struct plat_sci_port_ops sh7720_sci_port_ops = {
.init_pins = sh7720_sci_init_pins,
};
| linux-master | arch/sh/kernel/cpu/sh3/serial-sh7720.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh3/clock-sh7710.c
*
* SH7710 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
*
* FRQCR parsing hacked out of arch/sh/kernel/time.c
*
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf <[email protected]>
* Copyright (C) 2002, 2003, 2004 Paul Mundt
* Copyright (C) 2002 M. R. Brown <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static int md_table[] = { 1, 2, 3, 4, 6, 8, 12 };
static void master_clk_init(struct clk *clk)
{
clk->rate *= md_table[__raw_readw(FRQCR) & 0x0007];
}
static struct sh_clk_ops sh7710_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0007);
return clk->parent->rate / md_table[idx];
}
static struct sh_clk_ops sh7710_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0700) >> 8;
return clk->parent->rate / md_table[idx];
}
static struct sh_clk_ops sh7710_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0070) >> 4;
return clk->parent->rate / md_table[idx];
}
static struct sh_clk_ops sh7710_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7710_clk_ops[] = {
&sh7710_master_clk_ops,
&sh7710_module_clk_ops,
&sh7710_bus_clk_ops,
&sh7710_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7710_clk_ops))
*ops = sh7710_clk_ops[idx];
}
| linux-master | arch/sh/kernel/cpu/sh3/clock-sh7710.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh3/clock-sh3.c
*
* Generic SH-3 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
*
* FRQCR parsing hacked out of arch/sh/kernel/time.c
*
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf <[email protected]>
* Copyright (C) 2002, 2003, 2004 Paul Mundt
* Copyright (C) 2002 M. R. Brown <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static int stc_multipliers[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static int ifc_divisors[] = { 1, 2, 3, 4, 1, 1, 1, 1 };
static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
clk->rate *= pfc_divisors[idx];
}
static struct sh_clk_ops sh3_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh3_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
return clk->parent->rate / stc_multipliers[idx];
}
static struct sh_clk_ops sh3_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh3_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh3_clk_ops[] = {
&sh3_master_clk_ops,
&sh3_module_clk_ops,
&sh3_bus_clk_ops,
&sh3_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh3_clk_ops))
*ops = sh3_clk_ops[idx];
}
| linux-master | arch/sh/kernel/cpu/sh3/clock-sh3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh3/clock-sh7709.c
*
* SH7709 support for the clock framework
*
* Copyright (C) 2005 Andriy Skulysh
*
* Based on arch/sh/kernel/cpu/sh3/clock-sh7705.c
* Copyright (C) 2005 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static int stc_multipliers[] = { 1, 2, 4, 8, 3, 6, 1, 1 };
static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 };
static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
static void master_clk_init(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
clk->rate *= pfc_divisors[idx];
}
static struct sh_clk_ops sh7709_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7709_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = (frqcr & 0x0080) ?
((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4) : 1;
return clk->parent->rate * stc_multipliers[idx];
}
static struct sh_clk_ops sh7709_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh7709_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7709_clk_ops[] = {
&sh7709_master_clk_ops,
&sh7709_module_clk_ops,
&sh7709_bus_clk_ops,
&sh7709_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7709_clk_ops))
*ops = sh7709_clk_ops[idx];
}
| linux-master | arch/sh/kernel/cpu/sh3/clock-sh7709.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Setup code for SH7720, SH7721.
*
* Copyright (C) 2007 Markus Brunner, Mark Jonas
* Copyright (C) 2009 Paul Mundt
*
* Based on arch/sh/kernel/cpu/sh4/setup-sh7750.c:
*
* Copyright (C) 2006 Paul Mundt
* Copyright (C) 2006 Jamie Lenehan
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/io.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/rtc.h>
#include <asm/platform_early.h>
#include <cpu/serial.h>
static struct resource rtc_resources[] = {
[0] = {
.start = 0xa413fec0,
.end = 0xa413fec0 + 0x28 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Shared Period/Carry/Alarm IRQ */
.start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
static struct sh_rtc_platform_info rtc_info = {
.capabilities = RTC_CAP_4_DIGIT_YEAR,
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
.dev = {
.platform_data = &rtc_info,
},
};
static struct plat_sci_port scif0_platform_data = {
.type = PORT_SCIF,
.ops = &sh7720_sci_port_ops,
.regtype = SCIx_SH7705_SCIF_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xa4430000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc00)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.type = PORT_SCIF,
.ops = &sh7720_sci_port_ops,
.regtype = SCIx_SH7705_SCIF_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xa4438000, 0x100),
DEFINE_RES_IRQ(evt2irq(0xc20)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct resource usb_ohci_resources[] = {
[0] = {
.start = 0xA4428000,
.end = 0xA44280FF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0xa60),
.end = evt2irq(0xa60),
.flags = IORESOURCE_IRQ,
},
};
static u64 usb_ohci_dma_mask = 0xffffffffUL;
static struct usb_ohci_pdata usb_ohci_pdata;
static struct platform_device usb_ohci_device = {
.name = "ohci-platform",
.id = -1,
.dev = {
.dma_mask = &usb_ohci_dma_mask,
.coherent_dma_mask = 0xffffffff,
.platform_data = &usb_ohci_pdata,
},
.num_resources = ARRAY_SIZE(usb_ohci_resources),
.resource = usb_ohci_resources,
};
static struct resource usbf_resources[] = {
[0] = {
.name = "sh_udc",
.start = 0xA4420000,
.end = 0xA44200FF,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "sh_udc",
.start = evt2irq(0xa20),
.end = evt2irq(0xa20),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usbf_device = {
.name = "sh_udc",
.id = -1,
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
},
.num_resources = ARRAY_SIZE(usbf_resources),
.resource = usbf_resources,
};
static struct sh_timer_config cmt_platform_data = {
.channels_mask = 0x1f,
};
static struct resource cmt_resources[] = {
DEFINE_RES_MEM(0x044a0000, 0x60),
DEFINE_RES_IRQ(evt2irq(0xf00)),
};
static struct platform_device cmt_device = {
.name = "sh-cmt-32",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xa412fe90, 0x28),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu-sh3",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh7720_devices[] __initdata = {
&scif0_device,
&scif1_device,
&cmt_device,
&tmu0_device,
&rtc_device,
&usb_ohci_device,
&usbf_device,
};
static int __init sh7720_devices_setup(void)
{
return platform_add_devices(sh7720_devices,
ARRAY_SIZE(sh7720_devices));
}
arch_initcall(sh7720_devices_setup);
static struct platform_device *sh7720_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&cmt_device,
&tmu0_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7720_early_devices,
ARRAY_SIZE(sh7720_early_devices));
}
enum {
UNUSED = 0,
/* interrupt sources */
TMU0, TMU1, TMU2, RTC,
WDT, REF_RCMI, SIM,
IRQ0, IRQ1, IRQ2, IRQ3,
USBF_SPD, TMU_SUNI, IRQ5, IRQ4,
DMAC1, LCDC, SSL,
ADC, DMAC2, USBFI, CMT,
SCIF0, SCIF1,
PINT07, PINT815, TPU, IIC,
SIOF0, SIOF1, MMC, PCC,
USBHI, AFEIF,
H_UDI,
};
static struct intc_vect vectors[] __initdata = {
/* IRQ0->5 are handled in setup-sh3.c */
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440), INTC_VECT(RTC, 0x480),
INTC_VECT(RTC, 0x4a0), INTC_VECT(RTC, 0x4c0),
INTC_VECT(SIM, 0x4e0), INTC_VECT(SIM, 0x500),
INTC_VECT(SIM, 0x520), INTC_VECT(SIM, 0x540),
INTC_VECT(WDT, 0x560), INTC_VECT(REF_RCMI, 0x580),
/* H_UDI cannot be masked */ INTC_VECT(TMU_SUNI, 0x6c0),
INTC_VECT(USBF_SPD, 0x6e0), INTC_VECT(DMAC1, 0x800),
INTC_VECT(DMAC1, 0x820), INTC_VECT(DMAC1, 0x840),
INTC_VECT(DMAC1, 0x860), INTC_VECT(LCDC, 0x900),
#if defined(CONFIG_CPU_SUBTYPE_SH7720)
INTC_VECT(SSL, 0x980),
#endif
INTC_VECT(USBFI, 0xa20), INTC_VECT(USBFI, 0xa40),
INTC_VECT(USBHI, 0xa60),
INTC_VECT(DMAC2, 0xb80), INTC_VECT(DMAC2, 0xba0),
INTC_VECT(ADC, 0xbe0), INTC_VECT(SCIF0, 0xc00),
INTC_VECT(SCIF1, 0xc20), INTC_VECT(PINT07, 0xc80),
INTC_VECT(PINT815, 0xca0), INTC_VECT(SIOF0, 0xd00),
INTC_VECT(SIOF1, 0xd20), INTC_VECT(TPU, 0xd80),
INTC_VECT(TPU, 0xda0), INTC_VECT(TPU, 0xdc0),
INTC_VECT(TPU, 0xde0), INTC_VECT(IIC, 0xe00),
INTC_VECT(MMC, 0xe80), INTC_VECT(MMC, 0xea0),
INTC_VECT(MMC, 0xec0), INTC_VECT(MMC, 0xee0),
INTC_VECT(CMT, 0xf00), INTC_VECT(PCC, 0xf60),
INTC_VECT(AFEIF, 0xfe0),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xA414FEE2UL, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xA414FEE4UL, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, SIM, 0 } },
{ 0xA4140016UL, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
{ 0xA4140018UL, 0, 16, 4, /* IPRD */ { USBF_SPD, TMU_SUNI, IRQ5, IRQ4 } },
{ 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, SSL } },
{ 0xA4080000UL, 0, 16, 4, /* IPRF */ { ADC, DMAC2, USBFI, CMT } },
{ 0xA4080002UL, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, 0, 0 } },
{ 0xA4080004UL, 0, 16, 4, /* IPRH */ { PINT07, PINT815, TPU, IIC } },
{ 0xA4080006UL, 0, 16, 4, /* IPRI */ { SIOF0, SIOF1, MMC, PCC } },
{ 0xA4080008UL, 0, 16, 4, /* IPRJ */ { 0, USBHI, 0, AFEIF } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, NULL,
NULL, prio_registers, NULL);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
plat_irq_setup_sh3();
}
| linux-master | arch/sh/kernel/cpu/sh3/setup-sh7720.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7705 Setup
*
* Copyright (C) 2006 - 2009 Paul Mundt
* Copyright (C) 2007 Nobuhiro Iwamatsu
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <asm/rtc.h>
#include <cpu/serial.h>
#include <asm/platform_early.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
PINT07, PINT815,
DMAC, SCIF0, SCIF2, ADC_ADI, USB,
TPU0, TPU1, TPU2, TPU3,
TMU0, TMU1, TMU2,
RTC, WDT, REF_RCMI,
};
static struct intc_vect vectors[] __initdata = {
/* IRQ0->5 are handled in setup-sh3.c */
INTC_VECT(PINT07, 0x700), INTC_VECT(PINT815, 0x720),
INTC_VECT(DMAC, 0x800), INTC_VECT(DMAC, 0x820),
INTC_VECT(DMAC, 0x840), INTC_VECT(DMAC, 0x860),
INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0),
INTC_VECT(SCIF0, 0x8e0),
INTC_VECT(SCIF2, 0x900), INTC_VECT(SCIF2, 0x920),
INTC_VECT(SCIF2, 0x960),
INTC_VECT(ADC_ADI, 0x980),
INTC_VECT(USB, 0xa20), INTC_VECT(USB, 0xa40),
INTC_VECT(TPU0, 0xc00), INTC_VECT(TPU1, 0xc20),
INTC_VECT(TPU2, 0xc80), INTC_VECT(TPU3, 0xca0),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
INTC_VECT(RTC, 0x4c0),
INTC_VECT(WDT, 0x560),
INTC_VECT(REF_RCMI, 0x580),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, 0, 0 } },
{ 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
{ 0xa4000018, 0, 16, 4, /* IPRD */ { PINT07, PINT815, IRQ5, IRQ4 } },
{ 0xa400001a, 0, 16, 4, /* IPRE */ { DMAC, SCIF0, SCIF2, ADC_ADI } },
{ 0xa4080000, 0, 16, 4, /* IPRF */ { 0, 0, USB } },
{ 0xa4080002, 0, 16, 4, /* IPRG */ { TPU0, TPU1 } },
{ 0xa4080004, 0, 16, 4, /* IPRH */ { TPU2, TPU3 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL,
NULL, prio_registers, NULL);
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_CKE1,
.type = PORT_SCIF,
.ops = &sh770x_sci_port_ops,
.regtype = SCIx_SH7705_SCIF_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xa4410000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x900)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.type = PORT_SCIF,
.ops = &sh770x_sci_port_ops,
.regtype = SCIx_SH7705_SCIF_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xa4400000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x880)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0xfffffec0,
.end = 0xfffffec0 + 0x1e,
.flags = IORESOURCE_IO,
},
[1] = {
.start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
static struct sh_rtc_platform_info rtc_info = {
.capabilities = RTC_CAP_4_DIGIT_YEAR,
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
.dev = {
.platform_data = &rtc_info,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xfffffe90, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu-sh3",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh7705_devices[] __initdata = {
&scif0_device,
&scif1_device,
&tmu0_device,
&rtc_device,
};
static int __init sh7705_devices_setup(void)
{
return platform_add_devices(sh7705_devices,
ARRAY_SIZE(sh7705_devices));
}
arch_initcall(sh7705_devices_setup);
static struct platform_device *sh7705_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&tmu0_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7705_early_devices,
ARRAY_SIZE(sh7705_early_devices));
}
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
plat_irq_setup_sh3();
}
| linux-master | arch/sh/kernel/cpu/sh3/setup-sh7705.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh3/clock-sh7712.c
*
* SH7712 support for the clock framework
*
* Copyright (C) 2007 Andrew Murray <[email protected]>
*
* Based on arch/sh/kernel/cpu/sh3/clock-sh3.c
* Copyright (C) 2005 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static int multipliers[] = { 1, 2, 3 };
static int divisors[] = { 1, 2, 3, 4, 6 };
static void master_clk_init(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = (frqcr & 0x0300) >> 8;
clk->rate *= multipliers[idx];
}
static struct sh_clk_ops sh7712_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = frqcr & 0x0007;
return clk->parent->rate / divisors[idx];
}
static struct sh_clk_ops sh7712_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = (frqcr & 0x0030) >> 4;
return clk->parent->rate / divisors[idx];
}
static struct sh_clk_ops sh7712_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7712_clk_ops[] = {
&sh7712_master_clk_ops,
&sh7712_module_clk_ops,
&sh7712_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7712_clk_ops))
*ops = sh7712_clk_ops[idx];
}
| linux-master | arch/sh/kernel/cpu/sh3/clock-sh7712.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH3 Setup code for SH7710, SH7712
*
* Copyright (C) 2006 - 2009 Paul Mundt
* Copyright (C) 2007 Nobuhiro Iwamatsu
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <asm/rtc.h>
#include <asm/platform_early.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
DMAC1, SCIF0, SCIF1, DMAC2, IPSEC,
EDMAC0, EDMAC1, EDMAC2,
SIOF0, SIOF1,
TMU0, TMU1, TMU2,
RTC, WDT, REF,
};
static struct intc_vect vectors[] __initdata = {
/* IRQ0->5 are handled in setup-sh3.c */
INTC_VECT(DMAC1, 0x800), INTC_VECT(DMAC1, 0x820),
INTC_VECT(DMAC1, 0x840), INTC_VECT(DMAC1, 0x860),
INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0),
INTC_VECT(SCIF0, 0x8c0), INTC_VECT(SCIF0, 0x8e0),
INTC_VECT(SCIF1, 0x900), INTC_VECT(SCIF1, 0x920),
INTC_VECT(SCIF1, 0x940), INTC_VECT(SCIF1, 0x960),
INTC_VECT(DMAC2, 0xb80), INTC_VECT(DMAC2, 0xba0),
#ifdef CONFIG_CPU_SUBTYPE_SH7710
INTC_VECT(IPSEC, 0xbe0),
#endif
INTC_VECT(EDMAC0, 0xc00), INTC_VECT(EDMAC1, 0xc20),
INTC_VECT(EDMAC2, 0xc40),
INTC_VECT(SIOF0, 0xe00), INTC_VECT(SIOF0, 0xe20),
INTC_VECT(SIOF0, 0xe40), INTC_VECT(SIOF0, 0xe60),
INTC_VECT(SIOF1, 0xe80), INTC_VECT(SIOF1, 0xea0),
INTC_VECT(SIOF1, 0xec0), INTC_VECT(SIOF1, 0xee0),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440),
INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
INTC_VECT(RTC, 0x4c0),
INTC_VECT(WDT, 0x560),
INTC_VECT(REF, 0x580),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } },
{ 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
{ 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } },
{ 0xa400001a, 0, 16, 4, /* IPRE */ { DMAC1, SCIF0, SCIF1 } },
{ 0xa4080000, 0, 16, 4, /* IPRF */ { IPSEC, DMAC2 } },
{ 0xa4080002, 0, 16, 4, /* IPRG */ { EDMAC0, EDMAC1, EDMAC2 } },
{ 0xa4080004, 0, 16, 4, /* IPRH */ { 0, 0, 0, SIOF0 } },
{ 0xa4080006, 0, 16, 4, /* IPRI */ { 0, 0, SIOF1 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, NULL,
NULL, prio_registers, NULL);
static struct resource rtc_resources[] = {
[0] = {
.start = 0xa413fec0,
.end = 0xa413fec0 + 0x1e,
.flags = IORESOURCE_IO,
},
[1] = {
.start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
static struct sh_rtc_platform_info rtc_info = {
.capabilities = RTC_CAP_4_DIGIT_YEAR,
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
.dev = {
.platform_data = &rtc_info,
},
};
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xa4400000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x880)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE | SCSCR_CKE1,
.type = PORT_SCIF,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xa4410000, 0x100),
DEFINE_RES_IRQ(evt2irq(0x900)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xa412fe90, 0x28),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu-sh3",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh7710_devices[] __initdata = {
&scif0_device,
&scif1_device,
&tmu0_device,
&rtc_device,
};
static int __init sh7710_devices_setup(void)
{
return platform_add_devices(sh7710_devices,
ARRAY_SIZE(sh7710_devices));
}
arch_initcall(sh7710_devices_setup);
static struct platform_device *sh7710_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&tmu0_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7710_early_devices,
ARRAY_SIZE(sh7710_early_devices));
}
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
plat_irq_setup_sh3();
}
| linux-master | arch/sh/kernel/cpu/sh3/setup-sh7710.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Shared SH3 Setup code
*
* Copyright (C) 2008 Magnus Damm
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/platform_early.h>
/* All SH3 devices are equipped with IRQ0->5 (except sh7708) */
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
};
static struct intc_vect vectors_irq0123[] __initdata = {
INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620),
INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660),
};
static struct intc_vect vectors_irq45[] __initdata = {
INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
{ 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } },
};
static struct intc_mask_reg ack_registers[] __initdata = {
{ 0xa4000004, 0, 8, /* IRR0 */
{ 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } },
};
static struct intc_sense_reg sense_registers[] __initdata = {
{ 0xa4000010, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } },
};
static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh3-irq0123",
vectors_irq0123, NULL, NULL,
prio_registers, sense_registers, ack_registers);
static DECLARE_INTC_DESC_ACK(intc_desc_irq45, "sh3-irq45",
vectors_irq45, NULL, NULL,
prio_registers, sense_registers, ack_registers);
#define INTC_ICR1 0xa4000010UL
#define INTC_ICR1_IRQLVL (1<<14)
void __init plat_irq_setup_pins(int mode)
{
if (mode == IRQ_MODE_IRQ) {
__raw_writew(__raw_readw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1);
register_intc_controller(&intc_desc_irq0123);
return;
}
BUG();
}
void __init plat_irq_setup_sh3(void)
{
register_intc_controller(&intc_desc_irq45);
}
| linux-master | arch/sh/kernel/cpu/sh3/setup-sh3.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH3 Setup code for SH7706, SH7707, SH7708, SH7709
*
* Copyright (C) 2007 Magnus Damm
* Copyright (C) 2009 Paul Mundt
*
* Based on setup-sh7709.c
*
* Copyright (C) 2006 Paul Mundt
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <cpu/serial.h>
#include <asm/platform_early.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
PINT07, PINT815,
DMAC, SCIF0, SCIF2, SCI, ADC_ADI,
LCDC, PCC0, PCC1,
TMU0, TMU1, TMU2,
RTC, WDT, REF,
};
static struct intc_vect vectors[] __initdata = {
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
INTC_VECT(RTC, 0x4c0),
INTC_VECT(SCI, 0x4e0), INTC_VECT(SCI, 0x500),
INTC_VECT(SCI, 0x520), INTC_VECT(SCI, 0x540),
INTC_VECT(WDT, 0x560),
INTC_VECT(REF, 0x580),
INTC_VECT(REF, 0x5a0),
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
/* IRQ0->5 are handled in setup-sh3.c */
INTC_VECT(DMAC, 0x800), INTC_VECT(DMAC, 0x820),
INTC_VECT(DMAC, 0x840), INTC_VECT(DMAC, 0x860),
INTC_VECT(ADC_ADI, 0x980),
INTC_VECT(SCIF2, 0x900), INTC_VECT(SCIF2, 0x920),
INTC_VECT(SCIF2, 0x940), INTC_VECT(SCIF2, 0x960),
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
INTC_VECT(PINT07, 0x700), INTC_VECT(PINT815, 0x720),
INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0),
INTC_VECT(SCIF0, 0x8c0), INTC_VECT(SCIF0, 0x8e0),
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707)
INTC_VECT(LCDC, 0x9a0),
INTC_VECT(PCC0, 0x9c0), INTC_VECT(PCC1, 0x9e0),
#endif
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, SCI, 0 } },
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
{ 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
{ 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } },
{ 0xa400001a, 0, 16, 4, /* IPRE */ { DMAC, 0, SCIF2, ADC_ADI } },
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
{ 0xa4000018, 0, 16, 4, /* IPRD */ { PINT07, PINT815, } },
{ 0xa400001a, 0, 16, 4, /* IPRE */ { 0, SCIF0 } },
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707)
{ 0xa400001c, 0, 16, 4, /* IPRF */ { 0, LCDC, PCC0, PCC1, } },
#endif
};
static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, NULL,
NULL, prio_registers, NULL);
static struct resource rtc_resources[] = {
[0] = {
.start = 0xfffffec0,
.end = 0xfffffec0 + 0x1e,
.flags = IORESOURCE_IO,
},
[1] = {
.start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
static struct plat_sci_port scif0_platform_data = {
.type = PORT_SCI,
.ops = &sh770x_sci_port_ops,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xfffffe80, 0x10),
DEFINE_RES_IRQ(evt2irq(0x4e0)),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
static struct plat_sci_port scif1_platform_data = {
.type = PORT_SCIF,
.ops = &sh770x_sci_port_ops,
.regtype = SCIx_SH3_SCIF_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xa4000150, 0x10),
DEFINE_RES_IRQ(evt2irq(0x900)),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
static struct plat_sci_port scif2_platform_data = {
.type = PORT_IRDA,
.ops = &sh770x_sci_port_ops,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xa4000140, 0x10),
DEFINE_RES_IRQ(evt2irq(0x880)),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
#endif
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(0xfffffe90, 0x2c),
DEFINE_RES_IRQ(evt2irq(0x400)),
DEFINE_RES_IRQ(evt2irq(0x420)),
DEFINE_RES_IRQ(evt2irq(0x440)),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu-sh3",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh770x_devices[] __initdata = {
&scif0_device,
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
&scif1_device,
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
&scif2_device,
#endif
&tmu0_device,
&rtc_device,
};
static int __init sh770x_devices_setup(void)
{
return platform_add_devices(sh770x_devices,
ARRAY_SIZE(sh770x_devices));
}
arch_initcall(sh770x_devices_setup);
static struct platform_device *sh770x_early_devices[] __initdata = {
&scif0_device,
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
&scif1_device,
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
&scif2_device,
#endif
&tmu0_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh770x_early_devices,
ARRAY_SIZE(sh770x_early_devices));
}
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \
defined(CONFIG_CPU_SUBTYPE_SH7707) || \
defined(CONFIG_CPU_SUBTYPE_SH7709)
plat_irq_setup_sh3();
#endif
}
| linux-master | arch/sh/kernel/cpu/sh3/setup-sh770x.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh3/clock-sh7706.c
*
* SH7706 support for the clock framework
*
* Copyright (C) 2006 Takashi YOSHII
*
* Based on arch/sh/kernel/cpu/sh3/clock-sh7709.c
* Copyright (C) 2005 Andriy Skulysh
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static int stc_multipliers[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 };
static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 };
static void master_clk_init(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
clk->rate *= pfc_divisors[idx];
}
static struct sh_clk_ops sh7706_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7706_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4);
return clk->parent->rate / stc_multipliers[idx];
}
static struct sh_clk_ops sh7706_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int frqcr = __raw_readw(FRQCR);
int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2);
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh7706_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7706_clk_ops[] = {
&sh7706_master_clk_ops,
&sh7706_module_clk_ops,
&sh7706_bus_clk_ops,
&sh7706_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7706_clk_ops))
*ops = sh7706_clk_ops[idx];
}
| linux-master | arch/sh/kernel/cpu/sh3/clock-sh7706.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/serial_sci.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <cpu/serial.h>
#define SCPCR 0xA4000116
#define SCPDR 0xA4000136
static void sh770x_sci_init_pins(struct uart_port *port, unsigned int cflag)
{
unsigned short data;
/* We need to set SCPCR to enable RTS/CTS */
data = __raw_readw(SCPCR);
/* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/
__raw_writew(data & 0x0fcf, SCPCR);
if (!(cflag & CRTSCTS)) {
/* We need to set SCPCR to enable RTS/CTS */
data = __raw_readw(SCPCR);
/* Clear out SCP7MD1,0, SCP4MD1,0,
Set SCP6MD1,0 = {01} (output) */
__raw_writew((data & 0x0fcf) | 0x1000, SCPCR);
data = __raw_readb(SCPDR);
/* Set /RTS2 (bit6) = 0 */
__raw_writeb(data & 0xbf, SCPDR);
}
}
struct plat_sci_port_ops sh770x_sci_port_ops = {
.init_pins = sh770x_sci_init_pins,
};
| linux-master | arch/sh/kernel/cpu/sh3/serial-sh770x.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh3/probe.c
*
* CPU Subtype Probing for SH-3.
*
* Copyright (C) 1999, 2000 Niibe Yutaka
* Copyright (C) 2002 Paul Mundt
*/
#include <linux/init.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/io.h>
void cpu_probe(void)
{
unsigned long addr0, addr1, data0, data1, data2, data3;
jump_to_uncached();
/*
* Check if the entry shadows or not.
* When shadowed, it's 128-entry system.
* Otherwise, it's 256-entry system.
*/
addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12);
addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);
/* First, write back & invalidate */
data0 = __raw_readl(addr0);
__raw_writel(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
data1 = __raw_readl(addr1);
__raw_writel(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);
/* Next, check if there's shadow or not */
data0 = __raw_readl(addr0);
data0 ^= SH_CACHE_VALID;
__raw_writel(data0, addr0);
data1 = __raw_readl(addr1);
data2 = data1 ^ SH_CACHE_VALID;
__raw_writel(data2, addr1);
data3 = __raw_readl(addr0);
/* Lastly, invaliate them. */
__raw_writel(data0&~SH_CACHE_VALID, addr0);
__raw_writel(data2&~SH_CACHE_VALID, addr1);
back_to_cached();
boot_cpu_data.dcache.ways = 4;
boot_cpu_data.dcache.entry_shift = 4;
boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
boot_cpu_data.dcache.flags = 0;
/*
* 7709A/7729 has 16K cache (256-entry), while 7702 has only
* 2K(direct) 7702 is not supported (yet)
*/
if (data0 == data1 && data2 == data3) { /* Shadow */
boot_cpu_data.dcache.way_incr = (1 << 11);
boot_cpu_data.dcache.entry_mask = 0x7f0;
boot_cpu_data.dcache.sets = 128;
boot_cpu_data.type = CPU_SH7708;
boot_cpu_data.flags |= CPU_HAS_MMU_PAGE_ASSOC;
} else { /* 7709A or 7729 */
boot_cpu_data.dcache.way_incr = (1 << 12);
boot_cpu_data.dcache.entry_mask = 0xff0;
boot_cpu_data.dcache.sets = 256;
boot_cpu_data.type = CPU_SH7729;
#if defined(CONFIG_CPU_SUBTYPE_SH7706)
boot_cpu_data.type = CPU_SH7706;
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7710)
boot_cpu_data.type = CPU_SH7710;
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7712)
boot_cpu_data.type = CPU_SH7712;
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7720)
boot_cpu_data.type = CPU_SH7720;
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7721)
boot_cpu_data.type = CPU_SH7721;
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7705)
boot_cpu_data.type = CPU_SH7705;
#if defined(CONFIG_SH7705_CACHE_32KB)
boot_cpu_data.dcache.way_incr = (1 << 13);
boot_cpu_data.dcache.entry_mask = 0x1ff0;
boot_cpu_data.dcache.sets = 512;
__raw_writel(CCR_CACHE_32KB, CCR3_REG);
#else
__raw_writel(CCR_CACHE_16KB, CCR3_REG);
#endif
#endif
}
/*
* SH-3 doesn't have separate caches
*/
boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
boot_cpu_data.icache = boot_cpu_data.dcache;
boot_cpu_data.family = CPU_FAMILY_SH3;
}
| linux-master | arch/sh/kernel/cpu/sh3/probe.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/serial_sci.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <cpu/serial.h>
#define PACR 0xa4050100
#define PBCR 0xa4050102
static void sh7710_sci_init_pins(struct uart_port *port, unsigned int cflag)
{
if (port->mapbase == 0xA4400000) {
__raw_writew(__raw_readw(PACR) & 0xffc0, PACR);
__raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR);
} else if (port->mapbase == 0xA4410000)
__raw_writew(__raw_readw(PBCR) & 0xf003, PBCR);
}
struct plat_sci_port_ops sh7710_sci_port_ops = {
.init_pins = sh7710_sci_init_pins,
};
| linux-master | arch/sh/kernel/cpu/sh3/serial-sh7710.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7720 Pinmux
*
* Copyright (C) 2008 Magnus Damm
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <cpu/pfc.h>
static struct resource sh7720_pfc_resources[] = {
[0] = {
.start = 0xa4050100,
.end = 0xa405016f,
.flags = IORESOURCE_MEM,
},
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-sh7720", sh7720_pfc_resources,
ARRAY_SIZE(sh7720_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
| linux-master | arch/sh/kernel/cpu/sh3/pinmux-sh7720.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh3/clock-sh7705.c
*
* SH7705 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
*
* FRQCR parsing hacked out of arch/sh/kernel/time.c
*
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf <[email protected]>
* Copyright (C) 2002, 2003, 2004 Paul Mundt
* Copyright (C) 2002 M. R. Brown <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
/*
* SH7705 uses the same divisors as the generic SH-3 case, it's just the
* FRQCR layout that is a bit different..
*/
static int stc_multipliers[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static int ifc_divisors[] = { 1, 2, 3, 4, 1, 1, 1, 1 };
static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0003];
}
static struct sh_clk_ops sh7705_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = __raw_readw(FRQCR) & 0x0003;
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7705_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0300) >> 8;
return clk->parent->rate / stc_multipliers[idx];
}
static struct sh_clk_ops sh7705_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0030) >> 4;
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh7705_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7705_clk_ops[] = {
&sh7705_master_clk_ops,
&sh7705_module_clk_ops,
&sh7705_bus_clk_ops,
&sh7705_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7705_clk_ops))
*ops = sh7705_clk_ops[idx];
}
| linux-master | arch/sh/kernel/cpu/sh3/clock-sh7705.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh2/clock-sh7619.c
*
* SH7619 support for the clock framework
*
* Copyright (C) 2006 Yoshinori Sato
*
* Based on clock-sh4.c
* Copyright (C) 2005 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/processor.h>
static const int pll1rate[] = {1,2};
static const int pfc_divisors[] = {1,2,0,4};
static unsigned int pll2_mult;
static void master_clk_init(struct clk *clk)
{
clk->rate *= pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
}
static struct sh_clk_ops sh7619_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7619_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 7];
}
static struct sh_clk_ops sh7619_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static struct sh_clk_ops sh7619_cpu_clk_ops = {
.recalc = followparent_recalc,
};
static struct sh_clk_ops *sh7619_clk_ops[] = {
&sh7619_master_clk_ops,
&sh7619_module_clk_ops,
&sh7619_bus_clk_ops,
&sh7619_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (test_mode_pin(MODE_PIN2 | MODE_PIN0) ||
test_mode_pin(MODE_PIN2 | MODE_PIN1))
pll2_mult = 2;
else if (test_mode_pin(MODE_PIN0) || test_mode_pin(MODE_PIN1))
pll2_mult = 4;
BUG_ON(!pll2_mult);
if (idx < ARRAY_SIZE(sh7619_clk_ops))
*ops = sh7619_clk_ops[idx];
}
| linux-master | arch/sh/kernel/cpu/sh2/clock-sh7619.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SMP support for J2 processor
*
* Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
*/
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/cmpxchg.h>
DEFINE_PER_CPU(unsigned, j2_ipi_messages);
extern u32 *sh2_cpuid_addr;
static u32 *j2_ipi_trigger;
static int j2_ipi_irq;
static irqreturn_t j2_ipi_interrupt_handler(int irq, void *arg)
{
unsigned cpu = hard_smp_processor_id();
volatile unsigned *pmsg = &per_cpu(j2_ipi_messages, cpu);
unsigned messages, i;
do messages = *pmsg;
while (cmpxchg(pmsg, messages, 0) != messages);
if (!messages) return IRQ_NONE;
for (i=0; i<SMP_MSG_NR; i++)
if (messages & (1U<<i))
smp_message_recv(i);
return IRQ_HANDLED;
}
static void j2_smp_setup(void)
{
}
static void j2_prepare_cpus(unsigned int max_cpus)
{
struct device_node *np;
unsigned i, max = 1;
np = of_find_compatible_node(NULL, NULL, "jcore,ipi-controller");
if (!np)
goto out;
j2_ipi_irq = irq_of_parse_and_map(np, 0);
j2_ipi_trigger = of_iomap(np, 0);
if (!j2_ipi_irq || !j2_ipi_trigger)
goto out;
np = of_find_compatible_node(NULL, NULL, "jcore,cpuid-mmio");
if (!np)
goto out;
sh2_cpuid_addr = of_iomap(np, 0);
if (!sh2_cpuid_addr)
goto out;
if (request_irq(j2_ipi_irq, j2_ipi_interrupt_handler, IRQF_PERCPU,
"ipi", (void *)j2_ipi_interrupt_handler) != 0)
goto out;
max = max_cpus;
out:
/* Disable any cpus past max_cpus, or all secondaries if we didn't
* get the necessary resources to support SMP. */
for (i=max; i<NR_CPUS; i++) {
set_cpu_possible(i, false);
set_cpu_present(i, false);
}
}
static void j2_start_cpu(unsigned int cpu, unsigned long entry_point)
{
struct device_node *np;
u32 regs[2];
void __iomem *release, *initpc;
if (!cpu) return;
np = of_get_cpu_node(cpu, NULL);
if (!np) return;
if (of_property_read_u32_array(np, "cpu-release-addr", regs, 2)) return;
release = ioremap(regs[0], sizeof(u32));
initpc = ioremap(regs[1], sizeof(u32));
__raw_writel(entry_point, initpc);
__raw_writel(1, release);
iounmap(initpc);
iounmap(release);
pr_info("J2 SMP: requested start of cpu %u\n", cpu);
}
static unsigned int j2_smp_processor_id(void)
{
return __raw_readl(sh2_cpuid_addr);
}
static void j2_send_ipi(unsigned int cpu, unsigned int message)
{
volatile unsigned *pmsg;
unsigned old;
unsigned long val;
/* There is only one IPI interrupt shared by all messages, so
* we keep a separate interrupt flag per message type in sw. */
pmsg = &per_cpu(j2_ipi_messages, cpu);
do old = *pmsg;
while (cmpxchg(pmsg, old, old|(1U<<message)) != old);
/* Generate the actual interrupt by writing to CCRn bit 28. */
val = __raw_readl(j2_ipi_trigger + cpu);
__raw_writel(val | (1U<<28), j2_ipi_trigger + cpu);
}
static struct plat_smp_ops j2_smp_ops = {
.smp_setup = j2_smp_setup,
.prepare_cpus = j2_prepare_cpus,
.start_cpu = j2_start_cpu,
.smp_processor_id = j2_smp_processor_id,
.send_ipi = j2_send_ipi,
.cpu_die = native_cpu_die,
.cpu_disable = native_cpu_disable,
.play_dead = native_play_dead,
};
CPU_METHOD_OF_DECLARE(j2_cpu_method, "jcore,spin-table", &j2_smp_ops);
| linux-master | arch/sh/kernel/cpu/sh2/smp-j2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7619 Setup
*
* Copyright (C) 2006 Yoshinori Sato
* Copyright (C) 2009 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_eth.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
#include <asm/platform_early.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
WDT, EDMAC, CMT0, CMT1,
SCIF0, SCIF1, SCIF2,
HIF_HIFI, HIF_HIFBI,
DMAC0, DMAC1, DMAC2, DMAC3,
SIOF,
};
static struct intc_vect vectors[] __initdata = {
INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
INTC_IRQ(IRQ4, 80), INTC_IRQ(IRQ5, 81),
INTC_IRQ(IRQ6, 82), INTC_IRQ(IRQ7, 83),
INTC_IRQ(WDT, 84), INTC_IRQ(EDMAC, 85),
INTC_IRQ(CMT0, 86), INTC_IRQ(CMT1, 87),
INTC_IRQ(SCIF0, 88), INTC_IRQ(SCIF0, 89),
INTC_IRQ(SCIF0, 90), INTC_IRQ(SCIF0, 91),
INTC_IRQ(SCIF1, 92), INTC_IRQ(SCIF1, 93),
INTC_IRQ(SCIF1, 94), INTC_IRQ(SCIF1, 95),
INTC_IRQ(SCIF2, 96), INTC_IRQ(SCIF2, 97),
INTC_IRQ(SCIF2, 98), INTC_IRQ(SCIF2, 99),
INTC_IRQ(HIF_HIFI, 100), INTC_IRQ(HIF_HIFBI, 101),
INTC_IRQ(DMAC0, 104), INTC_IRQ(DMAC1, 105),
INTC_IRQ(DMAC2, 106), INTC_IRQ(DMAC3, 107),
INTC_IRQ(SIOF, 108),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xf8140006, 0, 16, 4, /* IPRA */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xf8140008, 0, 16, 4, /* IPRB */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xf8080000, 0, 16, 4, /* IPRC */ { WDT, EDMAC, CMT0, CMT1 } },
{ 0xf8080002, 0, 16, 4, /* IPRD */ { SCIF0, SCIF1, SCIF2 } },
{ 0xf8080004, 0, 16, 4, /* IPRE */ { HIF_HIFI, HIF_HIFBI } },
{ 0xf8080006, 0, 16, 4, /* IPRF */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
{ 0xf8080008, 0, 16, 4, /* IPRG */ { SIOF } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL,
NULL, prio_registers, NULL);
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xf8400000, 0x100),
DEFINE_RES_IRQ(88),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xf8410000, 0x100),
DEFINE_RES_IRQ(92),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xf8420000, 0x100),
DEFINE_RES_IRQ(96),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct sh_eth_plat_data eth_platform_data = {
.phy = 1,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct resource eth_resources[] = {
[0] = {
.start = 0xfb000000,
.end = 0xfb0001c7,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 85,
.end = 85,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device eth_device = {
.name = "sh7619-ether",
.id = -1,
.dev = {
.platform_data = ð_platform_data,
},
.num_resources = ARRAY_SIZE(eth_resources),
.resource = eth_resources,
};
static struct sh_timer_config cmt_platform_data = {
.channels_mask = 3,
};
static struct resource cmt_resources[] = {
DEFINE_RES_MEM(0xf84a0070, 0x10),
DEFINE_RES_IRQ(86),
DEFINE_RES_IRQ(87),
};
static struct platform_device cmt_device = {
.name = "sh-cmt-16",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct platform_device *sh7619_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
ð_device,
&cmt_device,
};
static int __init sh7619_devices_setup(void)
{
return platform_add_devices(sh7619_devices,
ARRAY_SIZE(sh7619_devices));
}
arch_initcall(sh7619_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
static struct platform_device *sh7619_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&cmt_device,
};
#define STBCR3 0xf80a0000
void __init plat_early_device_setup(void)
{
/* enable CMT clock */
__raw_writeb(__raw_readb(STBCR3) & ~0x10, STBCR3);
sh_early_platform_add_devices(sh7619_early_devices,
ARRAY_SIZE(sh7619_early_devices));
}
| linux-master | arch/sh/kernel/cpu/sh2/setup-sh7619.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh2/probe.c
*
* CPU Subtype Probing for SH-2.
*
* Copyright (C) 2002 Paul Mundt
*/
#include <linux/init.h>
#include <linux/of_fdt.h>
#include <linux/smp.h>
#include <linux/io.h>
#include <asm/processor.h>
#include <asm/cache.h>
#if defined(CONFIG_CPU_J2)
extern u32 __iomem *j2_ccr_base;
static int __init scan_cache(unsigned long node, const char *uname,
int depth, void *data)
{
if (!of_flat_dt_is_compatible(node, "jcore,cache"))
return 0;
j2_ccr_base = ioremap(of_flat_dt_translate_address(node), 4);
return 1;
}
#endif
void __ref cpu_probe(void)
{
#if defined(CONFIG_CPU_SUBTYPE_SH7619)
boot_cpu_data.type = CPU_SH7619;
boot_cpu_data.dcache.ways = 4;
boot_cpu_data.dcache.way_incr = (1<<12);
boot_cpu_data.dcache.sets = 256;
boot_cpu_data.dcache.entry_shift = 4;
boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
boot_cpu_data.dcache.flags = 0;
#endif
#if defined(CONFIG_CPU_J2)
#if defined(CONFIG_SMP)
unsigned cpu = hard_smp_processor_id();
#else
unsigned cpu = 0;
#endif
if (cpu == 0) of_scan_flat_dt(scan_cache, NULL);
if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu);
if (cpu != 0) return;
boot_cpu_data.type = CPU_J2;
/* These defaults are appropriate for the original/current
* J2 cache. Once there is a proper framework for getting cache
* info from device tree, we should switch to that. */
boot_cpu_data.dcache.ways = 1;
boot_cpu_data.dcache.sets = 256;
boot_cpu_data.dcache.entry_shift = 5;
boot_cpu_data.dcache.linesz = 32;
boot_cpu_data.dcache.flags = 0;
boot_cpu_data.flags |= CPU_HAS_CAS_L;
#else
/*
* SH-2 doesn't have separate caches
*/
boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
#endif
boot_cpu_data.icache = boot_cpu_data.dcache;
boot_cpu_data.family = CPU_FAMILY_SH2;
}
| linux-master | arch/sh/kernel/cpu/sh2/probe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Interrupt handling for IPR-based IRQ.
*
* Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
* Copyright (C) 2000 Kazumoto Kojima
* Copyright (C) 2003 Takashi Kusuda <[email protected]>
* Copyright (C) 2006 Paul Mundt
*
* Supported system:
* On-chip supporting modules (TMU, RTC, etc.).
* On-chip supporting modules for SH7709/SH7709A/SH7729.
* Hitachi SolutionEngine external I/O:
* MS7709SE01, MS7709ASE01, and MS7750SE01
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/topology.h>
static inline struct ipr_desc *get_ipr_desc(struct irq_data *data)
{
struct irq_chip *chip = irq_data_get_irq_chip(data);
return container_of(chip, struct ipr_desc, chip);
}
static void disable_ipr_irq(struct irq_data *data)
{
struct ipr_data *p = irq_data_get_irq_chip_data(data);
unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
/* Set the priority in IPR to 0 */
__raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr);
(void)__raw_readw(addr); /* Read back to flush write posting */
}
static void enable_ipr_irq(struct irq_data *data)
{
struct ipr_data *p = irq_data_get_irq_chip_data(data);
unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx];
/* Set priority in IPR back to original value */
__raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr);
}
/*
* The shift value is now the number of bits to shift, not the number of
* bits/4. This is to make it easier to read the value directly from the
* datasheets. The IPR address is calculated using the ipr_offset table.
*/
void register_ipr_controller(struct ipr_desc *desc)
{
int i;
desc->chip.irq_mask = disable_ipr_irq;
desc->chip.irq_unmask = enable_ipr_irq;
for (i = 0; i < desc->nr_irqs; i++) {
struct ipr_data *p = desc->ipr_data + i;
int res;
BUG_ON(p->ipr_idx >= desc->nr_offsets);
BUG_ON(!desc->ipr_offsets[p->ipr_idx]);
res = irq_alloc_desc_at(p->irq, numa_node_id());
if (unlikely(res != p->irq && res != -EEXIST)) {
printk(KERN_INFO "can not get irq_desc for %d\n",
p->irq);
continue;
}
disable_irq_nosync(p->irq);
irq_set_chip_and_handler_name(p->irq, &desc->chip,
handle_level_irq, "level");
irq_set_chip_data(p->irq, p);
disable_ipr_irq(irq_get_irq_data(p->irq));
}
}
EXPORT_SYMBOL(register_ipr_controller);
| linux-master | arch/sh/kernel/cpu/irq/ipr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/irq/imask.c
*
* Copyright (C) 1999, 2000 Niibe Yutaka
*
* Simple interrupt handling using IMASK of SR register.
*
*/
/* NOTE: Will not work on level 15 */
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/irq.h>
#include <linux/bitmap.h>
#include <asm/irq.h>
/* Bitmap of IRQ masked */
#define IMASK_PRIORITY 15
static DECLARE_BITMAP(imask_mask, IMASK_PRIORITY);
static int interrupt_priority;
static inline void set_interrupt_registers(int ip)
{
unsigned long __dummy;
asm volatile(
#ifdef CONFIG_CPU_HAS_SR_RB
"ldc %2, r6_bank\n\t"
#endif
"stc sr, %0\n\t"
"and #0xf0, %0\n\t"
"shlr2 %0\n\t"
"cmp/eq #0x3c, %0\n\t"
"bt/s 1f ! CLI-ed\n\t"
" stc sr, %0\n\t"
"and %1, %0\n\t"
"or %2, %0\n\t"
"ldc %0, sr\n"
"1:"
: "=&z" (__dummy)
: "r" (~0xf0), "r" (ip << 4)
: "t");
}
static void mask_imask_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
clear_bit(irq, imask_mask);
if (interrupt_priority < IMASK_PRIORITY - irq)
interrupt_priority = IMASK_PRIORITY - irq;
set_interrupt_registers(interrupt_priority);
}
static void unmask_imask_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
set_bit(irq, imask_mask);
interrupt_priority = IMASK_PRIORITY -
find_first_zero_bit(imask_mask, IMASK_PRIORITY);
set_interrupt_registers(interrupt_priority);
}
static struct irq_chip imask_irq_chip = {
.name = "SR.IMASK",
.irq_mask = mask_imask_irq,
.irq_unmask = unmask_imask_irq,
.irq_mask_ack = mask_imask_irq,
};
void make_imask_irq(unsigned int irq)
{
irq_set_chip_and_handler_name(irq, &imask_irq_chip, handle_level_irq,
"level");
}
| linux-master | arch/sh/kernel/cpu/irq/imask.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Save/restore floating point context for signal handlers.
*
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*
* FIXME! These routines can be optimized in big endian case.
*/
#include <linux/sched/signal.h>
#include <linux/signal.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/fpu.h>
#include <asm/traps.h>
/* The PR (precision) bit in the FP Status Register must be clear when
* an frchg instruction is executed, otherwise the instruction is undefined.
* Executing frchg with PR set causes a trap on some SH4 implementations.
*/
#define FPSCR_RCHG 0x00000000
/*
* Save FPU registers onto task structure.
*/
void save_fpu(struct task_struct *tsk)
{
unsigned long dummy;
enable_fpu();
asm volatile("sts.l fpul, @-%0\n\t"
"sts.l fpscr, @-%0\n\t"
"fmov.s fr15, @-%0\n\t"
"fmov.s fr14, @-%0\n\t"
"fmov.s fr13, @-%0\n\t"
"fmov.s fr12, @-%0\n\t"
"fmov.s fr11, @-%0\n\t"
"fmov.s fr10, @-%0\n\t"
"fmov.s fr9, @-%0\n\t"
"fmov.s fr8, @-%0\n\t"
"fmov.s fr7, @-%0\n\t"
"fmov.s fr6, @-%0\n\t"
"fmov.s fr5, @-%0\n\t"
"fmov.s fr4, @-%0\n\t"
"fmov.s fr3, @-%0\n\t"
"fmov.s fr2, @-%0\n\t"
"fmov.s fr1, @-%0\n\t"
"fmov.s fr0, @-%0\n\t"
"lds %3, fpscr\n\t"
: "=r" (dummy)
: "0" ((char *)(&tsk->thread.xstate->hardfpu.status)),
"r" (FPSCR_RCHG),
"r" (FPSCR_INIT)
: "memory");
disable_fpu();
}
void restore_fpu(struct task_struct *tsk)
{
unsigned long dummy;
enable_fpu();
asm volatile("fmov.s @%0+, fr0\n\t"
"fmov.s @%0+, fr1\n\t"
"fmov.s @%0+, fr2\n\t"
"fmov.s @%0+, fr3\n\t"
"fmov.s @%0+, fr4\n\t"
"fmov.s @%0+, fr5\n\t"
"fmov.s @%0+, fr6\n\t"
"fmov.s @%0+, fr7\n\t"
"fmov.s @%0+, fr8\n\t"
"fmov.s @%0+, fr9\n\t"
"fmov.s @%0+, fr10\n\t"
"fmov.s @%0+, fr11\n\t"
"fmov.s @%0+, fr12\n\t"
"fmov.s @%0+, fr13\n\t"
"fmov.s @%0+, fr14\n\t"
"fmov.s @%0+, fr15\n\t"
"lds.l @%0+, fpscr\n\t"
"lds.l @%0+, fpul\n\t"
: "=r" (dummy)
: "0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
: "memory");
disable_fpu();
}
/*
* Emulate arithmetic ops on denormalized number for some FPU insns.
*/
/* denormalized float * float */
static int denormal_mulf(int hx, int hy)
{
unsigned int ix, iy;
unsigned long long m, n;
int exp, w;
ix = hx & 0x7fffffff;
iy = hy & 0x7fffffff;
if (iy < 0x00800000 || ix == 0)
return ((hx ^ hy) & 0x80000000);
exp = (iy & 0x7f800000) >> 23;
ix &= 0x007fffff;
iy = (iy & 0x007fffff) | 0x00800000;
m = (unsigned long long)ix * iy;
n = m;
w = -1;
while (n) { n >>= 1; w++; }
/* FIXME: use guard bits */
exp += w - 126 - 46;
if (exp > 0)
ix = ((int) (m >> (w - 23)) & 0x007fffff) | (exp << 23);
else if (exp + 22 >= 0)
ix = (int) (m >> (w - 22 - exp)) & 0x007fffff;
else
ix = 0;
ix |= (hx ^ hy) & 0x80000000;
return ix;
}
/* denormalized double * double */
static void mult64(unsigned long long x, unsigned long long y,
unsigned long long *highp, unsigned long long *lowp)
{
unsigned long long sub0, sub1, sub2, sub3;
unsigned long long high, low;
sub0 = (x >> 32) * (unsigned long) (y >> 32);
sub1 = (x & 0xffffffffLL) * (unsigned long) (y >> 32);
sub2 = (x >> 32) * (unsigned long) (y & 0xffffffffLL);
sub3 = (x & 0xffffffffLL) * (unsigned long) (y & 0xffffffffLL);
low = sub3;
high = 0LL;
sub3 += (sub1 << 32);
if (low > sub3)
high++;
low = sub3;
sub3 += (sub2 << 32);
if (low > sub3)
high++;
low = sub3;
high += (sub1 >> 32) + (sub2 >> 32);
high += sub0;
*lowp = low;
*highp = high;
}
static inline long long rshift64(unsigned long long mh,
unsigned long long ml, int n)
{
if (n >= 64)
return mh >> (n - 64);
return (mh << (64 - n)) | (ml >> n);
}
static long long denormal_muld(long long hx, long long hy)
{
unsigned long long ix, iy;
unsigned long long mh, ml, nh, nl;
int exp, w;
ix = hx & 0x7fffffffffffffffLL;
iy = hy & 0x7fffffffffffffffLL;
if (iy < 0x0010000000000000LL || ix == 0)
return ((hx ^ hy) & 0x8000000000000000LL);
exp = (iy & 0x7ff0000000000000LL) >> 52;
ix &= 0x000fffffffffffffLL;
iy = (iy & 0x000fffffffffffffLL) | 0x0010000000000000LL;
mult64(ix, iy, &mh, &ml);
nh = mh;
nl = ml;
w = -1;
if (nh) {
while (nh) { nh >>= 1; w++;}
w += 64;
} else
while (nl) { nl >>= 1; w++;}
/* FIXME: use guard bits */
exp += w - 1022 - 52 * 2;
if (exp > 0)
ix = (rshift64(mh, ml, w - 52) & 0x000fffffffffffffLL)
| ((long long)exp << 52);
else if (exp + 51 >= 0)
ix = rshift64(mh, ml, w - 51 - exp) & 0x000fffffffffffffLL;
else
ix = 0;
ix |= (hx ^ hy) & 0x8000000000000000LL;
return ix;
}
/* ix - iy where iy: denormal and ix, iy >= 0 */
static int denormal_subf1(unsigned int ix, unsigned int iy)
{
int frac;
int exp;
if (ix < 0x00800000)
return ix - iy;
exp = (ix & 0x7f800000) >> 23;
if (exp - 1 > 31)
return ix;
iy >>= exp - 1;
if (iy == 0)
return ix;
frac = (ix & 0x007fffff) | 0x00800000;
frac -= iy;
while (frac < 0x00800000) {
if (--exp == 0)
return frac;
frac <<= 1;
}
return (exp << 23) | (frac & 0x007fffff);
}
/* ix + iy where iy: denormal and ix, iy >= 0 */
static int denormal_addf1(unsigned int ix, unsigned int iy)
{
int frac;
int exp;
if (ix < 0x00800000)
return ix + iy;
exp = (ix & 0x7f800000) >> 23;
if (exp - 1 > 31)
return ix;
iy >>= exp - 1;
if (iy == 0)
return ix;
frac = (ix & 0x007fffff) | 0x00800000;
frac += iy;
if (frac >= 0x01000000) {
frac >>= 1;
++exp;
}
return (exp << 23) | (frac & 0x007fffff);
}
static int denormal_addf(int hx, int hy)
{
unsigned int ix, iy;
int sign;
if ((hx ^ hy) & 0x80000000) {
sign = hx & 0x80000000;
ix = hx & 0x7fffffff;
iy = hy & 0x7fffffff;
if (iy < 0x00800000) {
ix = denormal_subf1(ix, iy);
if ((int) ix < 0) {
ix = -ix;
sign ^= 0x80000000;
}
} else {
ix = denormal_subf1(iy, ix);
sign ^= 0x80000000;
}
} else {
sign = hx & 0x80000000;
ix = hx & 0x7fffffff;
iy = hy & 0x7fffffff;
if (iy < 0x00800000)
ix = denormal_addf1(ix, iy);
else
ix = denormal_addf1(iy, ix);
}
return sign | ix;
}
/* ix - iy where iy: denormal and ix, iy >= 0 */
static long long denormal_subd1(unsigned long long ix, unsigned long long iy)
{
long long frac;
int exp;
if (ix < 0x0010000000000000LL)
return ix - iy;
exp = (ix & 0x7ff0000000000000LL) >> 52;
if (exp - 1 > 63)
return ix;
iy >>= exp - 1;
if (iy == 0)
return ix;
frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
frac -= iy;
while (frac < 0x0010000000000000LL) {
if (--exp == 0)
return frac;
frac <<= 1;
}
return ((long long)exp << 52) | (frac & 0x000fffffffffffffLL);
}
/* ix + iy where iy: denormal and ix, iy >= 0 */
static long long denormal_addd1(unsigned long long ix, unsigned long long iy)
{
long long frac;
long long exp;
if (ix < 0x0010000000000000LL)
return ix + iy;
exp = (ix & 0x7ff0000000000000LL) >> 52;
if (exp - 1 > 63)
return ix;
iy >>= exp - 1;
if (iy == 0)
return ix;
frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL;
frac += iy;
if (frac >= 0x0020000000000000LL) {
frac >>= 1;
++exp;
}
return (exp << 52) | (frac & 0x000fffffffffffffLL);
}
static long long denormal_addd(long long hx, long long hy)
{
unsigned long long ix, iy;
long long sign;
if ((hx ^ hy) & 0x8000000000000000LL) {
sign = hx & 0x8000000000000000LL;
ix = hx & 0x7fffffffffffffffLL;
iy = hy & 0x7fffffffffffffffLL;
if (iy < 0x0010000000000000LL) {
ix = denormal_subd1(ix, iy);
if ((int) ix < 0) {
ix = -ix;
sign ^= 0x8000000000000000LL;
}
} else {
ix = denormal_subd1(iy, ix);
sign ^= 0x8000000000000000LL;
}
} else {
sign = hx & 0x8000000000000000LL;
ix = hx & 0x7fffffffffffffffLL;
iy = hy & 0x7fffffffffffffffLL;
if (iy < 0x0010000000000000LL)
ix = denormal_addd1(ix, iy);
else
ix = denormal_addd1(iy, ix);
}
return sign | ix;
}
/**
* denormal_to_double - Given denormalized float number,
* store double float
*
* @fpu: Pointer to sh_fpu_hard structure
* @n: Index to FP register
*/
static void
denormal_to_double (struct sh_fpu_hard_struct *fpu, int n)
{
unsigned long du, dl;
unsigned long x = fpu->fpul;
int exp = 1023 - 126;
if (x != 0 && (x & 0x7f800000) == 0) {
du = (x & 0x80000000);
while ((x & 0x00800000) == 0) {
x <<= 1;
exp--;
}
x &= 0x007fffff;
du |= (exp << 20) | (x >> 3);
dl = x << 29;
fpu->fp_regs[n] = du;
fpu->fp_regs[n+1] = dl;
}
}
/**
* ieee_fpe_handler - Handle denormalized number exception
*
* @regs: Pointer to register structure
*
* Returns 1 when it's handled (should not cause exception).
*/
static int
ieee_fpe_handler (struct pt_regs *regs)
{
unsigned short insn = *(unsigned short *) regs->pc;
unsigned short finsn;
unsigned long nextpc;
int nib[4] = {
(insn >> 12) & 0xf,
(insn >> 8) & 0xf,
(insn >> 4) & 0xf,
insn & 0xf};
if (nib[0] == 0xb ||
(nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */
regs->pr = regs->pc + 4;
if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */
nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3);
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */
if (regs->sr & 1)
nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
else
nextpc = regs->pc + 4;
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */
if (regs->sr & 1)
nextpc = regs->pc + 4;
else
nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1);
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x4 && nib[3] == 0xb &&
(nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */
nextpc = regs->regs[nib[1]];
finsn = *(unsigned short *) (regs->pc + 2);
} else if (nib[0] == 0x0 && nib[3] == 0x3 &&
(nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */
nextpc = regs->pc + 4 + regs->regs[nib[1]];
finsn = *(unsigned short *) (regs->pc + 2);
} else if (insn == 0x000b) { /* rts */
nextpc = regs->pr;
finsn = *(unsigned short *) (regs->pc + 2);
} else {
nextpc = regs->pc + 2;
finsn = insn;
}
#define FPSCR_FPU_ERROR (1 << 17)
if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */
struct task_struct *tsk = current;
if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) {
/* FPU error */
denormal_to_double (&tsk->thread.xstate->hardfpu,
(finsn >> 8) & 0xf);
} else
return 0;
regs->pc = nextpc;
return 1;
} else if ((finsn & 0xf00f) == 0xf002) { /* fmul */
struct task_struct *tsk = current;
int fpscr;
int n, m, prec;
unsigned int hx, hy;
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & (1 << 19);
if ((fpscr & FPSCR_FPU_ERROR)
&& (prec && ((hx & 0x7fffffff) < 0x00100000
|| (hy & 0x7fffffff) < 0x00100000))) {
long long llx, lly;
/* FPU error because of denormal */
llx = ((long long) hx << 32)
| tsk->thread.xstate->hardfpu.fp_regs[n+1];
lly = ((long long) hy << 32)
| tsk->thread.xstate->hardfpu.fp_regs[m+1];
if ((hx & 0x7fffffff) >= 0x00100000)
llx = denormal_muld(lly, llx);
else
llx = denormal_muld(llx, lly);
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_FPU_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
/* FPU error because of denormal */
if ((hx & 0x7fffffff) >= 0x00800000)
hx = denormal_mulf(hy, hx);
else
hx = denormal_mulf(hx, hy);
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
regs->pc = nextpc;
return 1;
} else if ((finsn & 0xf00e) == 0xf000) { /* fadd, fsub */
struct task_struct *tsk = current;
int fpscr;
int n, m, prec;
unsigned int hx, hy;
n = (finsn >> 8) & 0xf;
m = (finsn >> 4) & 0xf;
hx = tsk->thread.xstate->hardfpu.fp_regs[n];
hy = tsk->thread.xstate->hardfpu.fp_regs[m];
fpscr = tsk->thread.xstate->hardfpu.fpscr;
prec = fpscr & (1 << 19);
if ((fpscr & FPSCR_FPU_ERROR)
&& (prec && ((hx & 0x7fffffff) < 0x00100000
|| (hy & 0x7fffffff) < 0x00100000))) {
long long llx, lly;
/* FPU error because of denormal */
llx = ((long long) hx << 32)
| tsk->thread.xstate->hardfpu.fp_regs[n+1];
lly = ((long long) hy << 32)
| tsk->thread.xstate->hardfpu.fp_regs[m+1];
if ((finsn & 0xf00f) == 0xf000)
llx = denormal_addd(llx, lly);
else
llx = denormal_addd(llx, lly ^ (1LL << 63));
tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
} else if ((fpscr & FPSCR_FPU_ERROR)
&& (!prec && ((hx & 0x7fffffff) < 0x00800000
|| (hy & 0x7fffffff) < 0x00800000))) {
/* FPU error because of denormal */
if ((finsn & 0xf00f) == 0xf000)
hx = denormal_addf(hx, hy);
else
hx = denormal_addf(hx, hy ^ 0x80000000);
tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
} else
return 0;
regs->pc = nextpc;
return 1;
}
return 0;
}
BUILD_TRAP_HANDLER(fpu_error)
{
struct task_struct *tsk = current;
TRAP_HANDLER_DECL;
__unlazy_fpu(tsk, regs);
if (ieee_fpe_handler(regs)) {
tsk->thread.xstate->hardfpu.fpscr &=
~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK);
grab_fpu(regs);
restore_fpu(tsk);
task_thread_info(tsk)->status |= TS_USEDFPU;
return;
}
force_sig(SIGFPE);
}
| linux-master | arch/sh/kernel/cpu/sh2a/fpu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7206 Setup
*
* Copyright (C) 2006 Yoshinori Sato
* Copyright (C) 2009 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
#include <asm/platform_early.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
ADC_ADI0, ADC_ADI1,
DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
MTU3_ABCD, MTU4_ABCD, MTU5, POE2_12, MTU3S_ABCD, MTU4S_ABCD, MTU5S,
IIC3,
CMT0, CMT1, BSC, WDT,
MTU2_TCI3V, MTU2_TCI4V, MTU2S_TCI3V, MTU2S_TCI4V,
POE2_OEI3,
SCIF0, SCIF1, SCIF2, SCIF3,
/* interrupt groups */
PINT,
};
static struct intc_vect vectors[] __initdata = {
INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
INTC_IRQ(ADC_ADI0, 92), INTC_IRQ(ADC_ADI1, 96),
INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109),
INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113),
INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117),
INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121),
INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125),
INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129),
INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133),
INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137),
INTC_IRQ(CMT0, 140), INTC_IRQ(CMT1, 144),
INTC_IRQ(BSC, 148), INTC_IRQ(WDT, 152),
INTC_IRQ(MTU0_ABCD, 156), INTC_IRQ(MTU0_ABCD, 157),
INTC_IRQ(MTU0_ABCD, 158), INTC_IRQ(MTU0_ABCD, 159),
INTC_IRQ(MTU0_VEF, 160), INTC_IRQ(MTU0_VEF, 161),
INTC_IRQ(MTU0_VEF, 162),
INTC_IRQ(MTU1_AB, 164), INTC_IRQ(MTU1_AB, 165),
INTC_IRQ(MTU1_VU, 168), INTC_IRQ(MTU1_VU, 169),
INTC_IRQ(MTU2_AB, 172), INTC_IRQ(MTU2_AB, 173),
INTC_IRQ(MTU2_VU, 176), INTC_IRQ(MTU2_VU, 177),
INTC_IRQ(MTU3_ABCD, 180), INTC_IRQ(MTU3_ABCD, 181),
INTC_IRQ(MTU3_ABCD, 182), INTC_IRQ(MTU3_ABCD, 183),
INTC_IRQ(MTU2_TCI3V, 184),
INTC_IRQ(MTU4_ABCD, 188), INTC_IRQ(MTU4_ABCD, 189),
INTC_IRQ(MTU4_ABCD, 190), INTC_IRQ(MTU4_ABCD, 191),
INTC_IRQ(MTU2_TCI4V, 192),
INTC_IRQ(MTU5, 196), INTC_IRQ(MTU5, 197),
INTC_IRQ(MTU5, 198),
INTC_IRQ(POE2_12, 200), INTC_IRQ(POE2_12, 201),
INTC_IRQ(MTU3S_ABCD, 204), INTC_IRQ(MTU3S_ABCD, 205),
INTC_IRQ(MTU3S_ABCD, 206), INTC_IRQ(MTU3S_ABCD, 207),
INTC_IRQ(MTU2S_TCI3V, 208),
INTC_IRQ(MTU4S_ABCD, 212), INTC_IRQ(MTU4S_ABCD, 213),
INTC_IRQ(MTU4S_ABCD, 214), INTC_IRQ(MTU4S_ABCD, 215),
INTC_IRQ(MTU2S_TCI4V, 216),
INTC_IRQ(MTU5S, 220), INTC_IRQ(MTU5S, 221),
INTC_IRQ(MTU5S, 222),
INTC_IRQ(POE2_OEI3, 224),
INTC_IRQ(IIC3, 228), INTC_IRQ(IIC3, 229),
INTC_IRQ(IIC3, 230), INTC_IRQ(IIC3, 231),
INTC_IRQ(IIC3, 232),
INTC_IRQ(SCIF0, 240), INTC_IRQ(SCIF0, 241),
INTC_IRQ(SCIF0, 242), INTC_IRQ(SCIF0, 243),
INTC_IRQ(SCIF1, 244), INTC_IRQ(SCIF1, 245),
INTC_IRQ(SCIF1, 246), INTC_IRQ(SCIF1, 247),
INTC_IRQ(SCIF2, 248), INTC_IRQ(SCIF2, 249),
INTC_IRQ(SCIF2, 250), INTC_IRQ(SCIF2, 251),
INTC_IRQ(SCIF3, 252), INTC_IRQ(SCIF3, 253),
INTC_IRQ(SCIF3, 254), INTC_IRQ(SCIF3, 255),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
PINT4, PINT5, PINT6, PINT7),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, ADC_ADI0, ADC_ADI1 } },
{ 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
{ 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
{ 0xfffe0c04, 0, 16, 4, /* IPR08 */ { CMT0, CMT1, BSC, WDT } },
{ 0xfffe0c06, 0, 16, 4, /* IPR09 */ { MTU0_ABCD, MTU0_VEF,
MTU1_AB, MTU1_VU } },
{ 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU2_AB, MTU2_VU,
MTU3_ABCD, MTU2_TCI3V } },
{ 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU4_ABCD, MTU2_TCI4V,
MTU5, POE2_12 } },
{ 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { MTU3S_ABCD, MTU2S_TCI3V,
MTU4S_ABCD, MTU2S_TCI4V } },
{ 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { MTU5S, POE2_OEI3, IIC3, 0 } },
{ 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF0, SCIF1, SCIF2, SCIF3 } },
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfffe0808, 0, 16, /* PINTER */
{ 0, 0, 0, 0, 0, 0, 0, 0,
PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups,
mask_registers, prio_registers, NULL);
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xfffe8000, 0x100),
DEFINE_RES_IRQ(240),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xfffe8800, 0x100),
DEFINE_RES_IRQ(244),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xfffe9000, 0x100),
DEFINE_RES_IRQ(248),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xfffe9800, 0x100),
DEFINE_RES_IRQ(252),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct sh_timer_config cmt_platform_data = {
.channels_mask = 3,
};
static struct resource cmt_resources[] = {
DEFINE_RES_MEM(0xfffec000, 0x10),
DEFINE_RES_IRQ(140),
DEFINE_RES_IRQ(144),
};
static struct platform_device cmt_device = {
.name = "sh-cmt-16",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct resource mtu2_resources[] = {
DEFINE_RES_MEM(0xfffe4000, 0x400),
DEFINE_RES_IRQ_NAMED(156, "tgi0a"),
DEFINE_RES_IRQ_NAMED(164, "tgi1a"),
DEFINE_RES_IRQ_NAMED(180, "tgi2a"),
};
static struct platform_device mtu2_device = {
.name = "sh-mtu2s",
.id = -1,
.resource = mtu2_resources,
.num_resources = ARRAY_SIZE(mtu2_resources),
};
static struct platform_device *sh7206_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&cmt_device,
&mtu2_device,
};
static int __init sh7206_devices_setup(void)
{
return platform_add_devices(sh7206_devices,
ARRAY_SIZE(sh7206_devices));
}
arch_initcall(sh7206_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
static struct platform_device *sh7206_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&cmt_device,
&mtu2_device,
};
#define STBCR3 0xfffe0408
#define STBCR4 0xfffe040c
void __init plat_early_device_setup(void)
{
/* enable CMT clock */
__raw_writeb(__raw_readb(STBCR4) & ~0x04, STBCR4);
/* enable MTU2 clock */
__raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
sh_early_platform_add_devices(sh7206_early_devices,
ARRAY_SIZE(sh7206_early_devices));
}
| linux-master | arch/sh/kernel/cpu/sh2a/setup-sh7206.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7269 Setup
*
* Copyright (C) 2012 Renesas Electronics Europe Ltd
* Copyright (C) 2012 Phil Edworthy
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/usb/r8a66597.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
#include <asm/platform_early.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
DMAC8, DMAC9, DMAC10, DMAC11, DMAC12, DMAC13, DMAC14, DMAC15,
USB, VDC4, CMT0, CMT1, BSC, WDT,
MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
MTU3_ABCD, MTU3_TCI3V, MTU4_ABCD, MTU4_TCI4V,
PWMT1, PWMT2, ADC_ADI,
SSIF0, SSII1, SSII2, SSII3, SSII4, SSII5,
RSPDIF,
IIC30, IIC31, IIC32, IIC33,
SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI,
SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI,
SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI,
SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI,
SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI,
SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI,
SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI,
SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI,
RCAN0, RCAN1, RCAN2,
RSPIC0, RSPIC1,
IEBC, CD_ROMD,
NFMC,
SDHI0, SDHI1,
RTC,
SRCC0, SRCC1, SRCC2,
/* interrupt groups */
PINT, SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7,
};
static struct intc_vect vectors[] __initdata = {
INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109),
INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113),
INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117),
INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121),
INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125),
INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129),
INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133),
INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137),
INTC_IRQ(DMAC8, 140), INTC_IRQ(DMAC8, 141),
INTC_IRQ(DMAC9, 144), INTC_IRQ(DMAC9, 145),
INTC_IRQ(DMAC10, 148), INTC_IRQ(DMAC10, 149),
INTC_IRQ(DMAC11, 152), INTC_IRQ(DMAC11, 153),
INTC_IRQ(DMAC12, 156), INTC_IRQ(DMAC12, 157),
INTC_IRQ(DMAC13, 160), INTC_IRQ(DMAC13, 161),
INTC_IRQ(DMAC14, 164), INTC_IRQ(DMAC14, 165),
INTC_IRQ(DMAC15, 168), INTC_IRQ(DMAC15, 169),
INTC_IRQ(USB, 170),
INTC_IRQ(VDC4, 171), INTC_IRQ(VDC4, 172),
INTC_IRQ(VDC4, 173), INTC_IRQ(VDC4, 174),
INTC_IRQ(VDC4, 175), INTC_IRQ(VDC4, 176),
INTC_IRQ(VDC4, 177), INTC_IRQ(VDC4, 177),
INTC_IRQ(CMT0, 188), INTC_IRQ(CMT1, 189),
INTC_IRQ(BSC, 190), INTC_IRQ(WDT, 191),
INTC_IRQ(MTU0_ABCD, 192), INTC_IRQ(MTU0_ABCD, 193),
INTC_IRQ(MTU0_ABCD, 194), INTC_IRQ(MTU0_ABCD, 195),
INTC_IRQ(MTU0_VEF, 196), INTC_IRQ(MTU0_VEF, 197),
INTC_IRQ(MTU0_VEF, 198),
INTC_IRQ(MTU1_AB, 199), INTC_IRQ(MTU1_AB, 200),
INTC_IRQ(MTU1_VU, 201), INTC_IRQ(MTU1_VU, 202),
INTC_IRQ(MTU2_AB, 203), INTC_IRQ(MTU2_AB, 204),
INTC_IRQ(MTU2_VU, 205), INTC_IRQ(MTU2_VU, 206),
INTC_IRQ(MTU3_ABCD, 207), INTC_IRQ(MTU3_ABCD, 208),
INTC_IRQ(MTU3_ABCD, 209), INTC_IRQ(MTU3_ABCD, 210),
INTC_IRQ(MTU3_TCI3V, 211),
INTC_IRQ(MTU4_ABCD, 212), INTC_IRQ(MTU4_ABCD, 213),
INTC_IRQ(MTU4_ABCD, 214), INTC_IRQ(MTU4_ABCD, 215),
INTC_IRQ(MTU4_TCI4V, 216),
INTC_IRQ(PWMT1, 217), INTC_IRQ(PWMT2, 218),
INTC_IRQ(ADC_ADI, 223),
INTC_IRQ(SSIF0, 224), INTC_IRQ(SSIF0, 225),
INTC_IRQ(SSIF0, 226),
INTC_IRQ(SSII1, 227), INTC_IRQ(SSII1, 228),
INTC_IRQ(SSII2, 229), INTC_IRQ(SSII2, 230),
INTC_IRQ(SSII3, 231), INTC_IRQ(SSII3, 232),
INTC_IRQ(SSII4, 233), INTC_IRQ(SSII4, 234),
INTC_IRQ(SSII5, 235), INTC_IRQ(SSII5, 236),
INTC_IRQ(RSPDIF, 237),
INTC_IRQ(IIC30, 238), INTC_IRQ(IIC30, 239),
INTC_IRQ(IIC30, 240), INTC_IRQ(IIC30, 241),
INTC_IRQ(IIC30, 242),
INTC_IRQ(IIC31, 243), INTC_IRQ(IIC31, 244),
INTC_IRQ(IIC31, 245), INTC_IRQ(IIC31, 246),
INTC_IRQ(IIC31, 247),
INTC_IRQ(IIC32, 248), INTC_IRQ(IIC32, 249),
INTC_IRQ(IIC32, 250), INTC_IRQ(IIC32, 251),
INTC_IRQ(IIC32, 252),
INTC_IRQ(IIC33, 253), INTC_IRQ(IIC33, 254),
INTC_IRQ(IIC33, 255), INTC_IRQ(IIC33, 256),
INTC_IRQ(IIC33, 257),
INTC_IRQ(SCIF0_BRI, 258), INTC_IRQ(SCIF0_ERI, 259),
INTC_IRQ(SCIF0_RXI, 260), INTC_IRQ(SCIF0_TXI, 261),
INTC_IRQ(SCIF1_BRI, 262), INTC_IRQ(SCIF1_ERI, 263),
INTC_IRQ(SCIF1_RXI, 264), INTC_IRQ(SCIF1_TXI, 265),
INTC_IRQ(SCIF2_BRI, 266), INTC_IRQ(SCIF2_ERI, 267),
INTC_IRQ(SCIF2_RXI, 268), INTC_IRQ(SCIF2_TXI, 269),
INTC_IRQ(SCIF3_BRI, 270), INTC_IRQ(SCIF3_ERI, 271),
INTC_IRQ(SCIF3_RXI, 272), INTC_IRQ(SCIF3_TXI, 273),
INTC_IRQ(SCIF4_BRI, 274), INTC_IRQ(SCIF4_ERI, 275),
INTC_IRQ(SCIF4_RXI, 276), INTC_IRQ(SCIF4_TXI, 277),
INTC_IRQ(SCIF5_BRI, 278), INTC_IRQ(SCIF5_ERI, 279),
INTC_IRQ(SCIF5_RXI, 280), INTC_IRQ(SCIF5_TXI, 281),
INTC_IRQ(SCIF6_BRI, 282), INTC_IRQ(SCIF6_ERI, 283),
INTC_IRQ(SCIF6_RXI, 284), INTC_IRQ(SCIF6_TXI, 285),
INTC_IRQ(SCIF7_BRI, 286), INTC_IRQ(SCIF7_ERI, 287),
INTC_IRQ(SCIF7_RXI, 288), INTC_IRQ(SCIF7_TXI, 289),
INTC_IRQ(RCAN0, 291), INTC_IRQ(RCAN0, 292),
INTC_IRQ(RCAN0, 293), INTC_IRQ(RCAN0, 294),
INTC_IRQ(RCAN0, 295),
INTC_IRQ(RCAN1, 296), INTC_IRQ(RCAN1, 297),
INTC_IRQ(RCAN1, 298), INTC_IRQ(RCAN1, 299),
INTC_IRQ(RCAN1, 300),
INTC_IRQ(RCAN2, 301), INTC_IRQ(RCAN2, 302),
INTC_IRQ(RCAN2, 303), INTC_IRQ(RCAN2, 304),
INTC_IRQ(RCAN2, 305),
INTC_IRQ(RSPIC0, 306), INTC_IRQ(RSPIC0, 307),
INTC_IRQ(RSPIC0, 308),
INTC_IRQ(RSPIC1, 309), INTC_IRQ(RSPIC1, 310),
INTC_IRQ(RSPIC1, 311),
INTC_IRQ(IEBC, 318),
INTC_IRQ(CD_ROMD, 319), INTC_IRQ(CD_ROMD, 320),
INTC_IRQ(CD_ROMD, 321), INTC_IRQ(CD_ROMD, 322),
INTC_IRQ(CD_ROMD, 323), INTC_IRQ(CD_ROMD, 324),
INTC_IRQ(NFMC, 325), INTC_IRQ(NFMC, 326),
INTC_IRQ(NFMC, 327), INTC_IRQ(NFMC, 328),
INTC_IRQ(SDHI0, 332), INTC_IRQ(SDHI0, 333),
INTC_IRQ(SDHI0, 334),
INTC_IRQ(SDHI1, 335), INTC_IRQ(SDHI1, 336),
INTC_IRQ(SDHI1, 337),
INTC_IRQ(RTC, 338), INTC_IRQ(RTC, 339),
INTC_IRQ(RTC, 340),
INTC_IRQ(SRCC0, 341), INTC_IRQ(SRCC0, 342),
INTC_IRQ(SRCC0, 343), INTC_IRQ(SRCC0, 344),
INTC_IRQ(SRCC0, 345),
INTC_IRQ(SRCC1, 346), INTC_IRQ(SRCC1, 347),
INTC_IRQ(SRCC1, 348), INTC_IRQ(SRCC1, 349),
INTC_IRQ(SRCC1, 350),
INTC_IRQ(SRCC2, 351), INTC_IRQ(SRCC2, 352),
INTC_IRQ(SRCC2, 353), INTC_IRQ(SRCC2, 354),
INTC_IRQ(SRCC2, 355),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
PINT4, PINT5, PINT6, PINT7),
INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI),
INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI),
INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI),
INTC_GROUP(SCIF4, SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI),
INTC_GROUP(SCIF5, SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI),
INTC_GROUP(SCIF6, SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI),
INTC_GROUP(SCIF7, SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
{ 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
{ 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
{ 0xfffe0c04, 0, 16, 4, /* IPR08 */ { DMAC8, DMAC9,
DMAC10, DMAC11 } },
{ 0xfffe0c06, 0, 16, 4, /* IPR09 */ { DMAC12, DMAC13,
DMAC14, DMAC15 } },
{ 0xfffe0c08, 0, 16, 4, /* IPR10 */ { USB, VDC4, VDC4, VDC4 } },
{ 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { 0, 0, 0, 0 } },
{ 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { CMT0, CMT1, BSC, WDT } },
{ 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { MTU0_ABCD, MTU0_VEF,
MTU1_AB, MTU1_VU } },
{ 0xfffe0c10, 0, 16, 4, /* IPR14 */ { MTU2_AB, MTU2_VU,
MTU3_ABCD, MTU3_TCI3V } },
{ 0xfffe0c12, 0, 16, 4, /* IPR15 */ { MTU4_ABCD, MTU4_TCI4V,
PWMT1, PWMT2 } },
{ 0xfffe0c14, 0, 16, 4, /* IPR16 */ { 0, 0, 0, 0 } },
{ 0xfffe0c16, 0, 16, 4, /* IPR17 */ { ADC_ADI, SSIF0, SSII1, SSII2 } },
{ 0xfffe0c18, 0, 16, 4, /* IPR18 */ { SSII3, SSII4, SSII5, RSPDIF} },
{ 0xfffe0c1a, 0, 16, 4, /* IPR19 */ { IIC30, IIC31, IIC32, IIC33 } },
{ 0xfffe0c1c, 0, 16, 4, /* IPR20 */ { SCIF0, SCIF1, SCIF2, SCIF3 } },
{ 0xfffe0c1e, 0, 16, 4, /* IPR21 */ { SCIF4, SCIF5, SCIF6, SCIF7 } },
{ 0xfffe0c20, 0, 16, 4, /* IPR22 */ { 0, RCAN0, RCAN1, RCAN2 } },
{ 0xfffe0c22, 0, 16, 4, /* IPR23 */ { RSPIC0, RSPIC1, 0, 0 } },
{ 0xfffe0c24, 0, 16, 4, /* IPR24 */ { IEBC, CD_ROMD, NFMC, 0 } },
{ 0xfffe0c26, 0, 16, 4, /* IPR25 */ { SDHI0, SDHI1, RTC, 0 } },
{ 0xfffe0c28, 0, 16, 4, /* IPR26 */ { SRCC0, SRCC1, SRCC2, 0 } },
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfffe0808, 0, 16, /* PINTER */
{ 0, 0, 0, 0, 0, 0, 0, 0,
PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7269", vectors, groups,
mask_registers, prio_registers, NULL);
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xe8007000, 0x100),
DEFINE_RES_IRQ(259),
DEFINE_RES_IRQ(260),
DEFINE_RES_IRQ(261),
DEFINE_RES_IRQ(258),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xe8007800, 0x100),
DEFINE_RES_IRQ(263),
DEFINE_RES_IRQ(264),
DEFINE_RES_IRQ(265),
DEFINE_RES_IRQ(262),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xe8008000, 0x100),
DEFINE_RES_IRQ(267),
DEFINE_RES_IRQ(268),
DEFINE_RES_IRQ(269),
DEFINE_RES_IRQ(266),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xe8008800, 0x100),
DEFINE_RES_IRQ(271),
DEFINE_RES_IRQ(272),
DEFINE_RES_IRQ(273),
DEFINE_RES_IRQ(270),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct plat_sci_port scif4_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif4_resources[] = {
DEFINE_RES_MEM(0xe8009000, 0x100),
DEFINE_RES_IRQ(275),
DEFINE_RES_IRQ(276),
DEFINE_RES_IRQ(277),
DEFINE_RES_IRQ(274),
};
static struct platform_device scif4_device = {
.name = "sh-sci",
.id = 4,
.resource = scif4_resources,
.num_resources = ARRAY_SIZE(scif4_resources),
.dev = {
.platform_data = &scif4_platform_data,
},
};
static struct plat_sci_port scif5_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif5_resources[] = {
DEFINE_RES_MEM(0xe8009800, 0x100),
DEFINE_RES_IRQ(279),
DEFINE_RES_IRQ(280),
DEFINE_RES_IRQ(281),
DEFINE_RES_IRQ(278),
};
static struct platform_device scif5_device = {
.name = "sh-sci",
.id = 5,
.resource = scif5_resources,
.num_resources = ARRAY_SIZE(scif5_resources),
.dev = {
.platform_data = &scif5_platform_data,
},
};
static struct plat_sci_port scif6_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif6_resources[] = {
DEFINE_RES_MEM(0xe800a000, 0x100),
DEFINE_RES_IRQ(283),
DEFINE_RES_IRQ(284),
DEFINE_RES_IRQ(285),
DEFINE_RES_IRQ(282),
};
static struct platform_device scif6_device = {
.name = "sh-sci",
.id = 6,
.resource = scif6_resources,
.num_resources = ARRAY_SIZE(scif6_resources),
.dev = {
.platform_data = &scif6_platform_data,
},
};
static struct plat_sci_port scif7_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif7_resources[] = {
DEFINE_RES_MEM(0xe800a800, 0x100),
DEFINE_RES_IRQ(287),
DEFINE_RES_IRQ(288),
DEFINE_RES_IRQ(289),
DEFINE_RES_IRQ(286),
};
static struct platform_device scif7_device = {
.name = "sh-sci",
.id = 7,
.resource = scif7_resources,
.num_resources = ARRAY_SIZE(scif7_resources),
.dev = {
.platform_data = &scif7_platform_data,
},
};
static struct sh_timer_config cmt_platform_data = {
.channels_mask = 3,
};
static struct resource cmt_resources[] = {
DEFINE_RES_MEM(0xfffec000, 0x10),
DEFINE_RES_IRQ(188),
DEFINE_RES_IRQ(189),
};
static struct platform_device cmt_device = {
.name = "sh-cmt-16",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct resource mtu2_resources[] = {
DEFINE_RES_MEM(0xfffe4000, 0x400),
DEFINE_RES_IRQ_NAMED(192, "tgi0a"),
DEFINE_RES_IRQ_NAMED(203, "tgi1a"),
};
static struct platform_device mtu2_device = {
.name = "sh-mtu2",
.id = -1,
.resource = mtu2_resources,
.num_resources = ARRAY_SIZE(mtu2_resources),
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0xfffe6000,
.end = 0xfffe6000 + 0x30 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Shared Period/Carry/Alarm IRQ */
.start = 338,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
/* USB Host */
static struct r8a66597_platdata r8a66597_data = {
.on_chip = 1,
.endian = 1,
};
static struct resource r8a66597_usb_host_resources[] = {
[0] = {
.start = 0xe8010000,
.end = 0xe80100e4,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 170,
.end = 170,
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
},
};
static struct platform_device r8a66597_usb_host_device = {
.name = "r8a66597_hcd",
.id = 0,
.dev = {
.dma_mask = NULL, /* not use dma */
.coherent_dma_mask = 0xffffffff,
.platform_data = &r8a66597_data,
},
.num_resources = ARRAY_SIZE(r8a66597_usb_host_resources),
.resource = r8a66597_usb_host_resources,
};
static struct platform_device *sh7269_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&scif6_device,
&scif7_device,
&cmt_device,
&mtu2_device,
&rtc_device,
&r8a66597_usb_host_device,
};
static int __init sh7269_devices_setup(void)
{
return platform_add_devices(sh7269_devices,
ARRAY_SIZE(sh7269_devices));
}
arch_initcall(sh7269_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
static struct platform_device *sh7269_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&scif6_device,
&scif7_device,
&cmt_device,
&mtu2_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7269_early_devices,
ARRAY_SIZE(sh7269_early_devices));
}
| linux-master | arch/sh/kernel/cpu/sh2a/setup-sh7269.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7264 Setup
*
* Copyright (C) 2012 Renesas Electronics Europe Ltd
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/usb/r8a66597.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
#include <asm/platform_early.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
DMAC8, DMAC9, DMAC10, DMAC11, DMAC12, DMAC13, DMAC14, DMAC15,
USB, VDC3, CMT0, CMT1, BSC, WDT,
MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
MTU3_ABCD, MTU3_TCI3V, MTU4_ABCD, MTU4_TCI4V,
PWMT1, PWMT2, ADC_ADI,
SSIF0, SSII1, SSII2, SSII3,
RSPDIF,
IIC30, IIC31, IIC32, IIC33,
SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI,
SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI,
SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI,
SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI,
SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI,
SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI,
SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI,
SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI,
SIO_FIFO, RSPIC0, RSPIC1,
RCAN0, RCAN1, IEBC, CD_ROMD,
NFMC, SDHI, RTC,
SRCC0, SRCC1, DCOMU, OFFI, IFEI,
/* interrupt groups */
PINT, SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7,
};
static struct intc_vect vectors[] __initdata = {
INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109),
INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113),
INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117),
INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121),
INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125),
INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129),
INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133),
INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137),
INTC_IRQ(DMAC8, 140), INTC_IRQ(DMAC8, 141),
INTC_IRQ(DMAC9, 144), INTC_IRQ(DMAC9, 145),
INTC_IRQ(DMAC10, 148), INTC_IRQ(DMAC10, 149),
INTC_IRQ(DMAC11, 152), INTC_IRQ(DMAC11, 153),
INTC_IRQ(DMAC12, 156), INTC_IRQ(DMAC12, 157),
INTC_IRQ(DMAC13, 160), INTC_IRQ(DMAC13, 161),
INTC_IRQ(DMAC14, 164), INTC_IRQ(DMAC14, 165),
INTC_IRQ(DMAC15, 168), INTC_IRQ(DMAC15, 169),
INTC_IRQ(USB, 170),
INTC_IRQ(VDC3, 171), INTC_IRQ(VDC3, 172),
INTC_IRQ(VDC3, 173), INTC_IRQ(VDC3, 174),
INTC_IRQ(CMT0, 175), INTC_IRQ(CMT1, 176),
INTC_IRQ(BSC, 177), INTC_IRQ(WDT, 178),
INTC_IRQ(MTU0_ABCD, 179), INTC_IRQ(MTU0_ABCD, 180),
INTC_IRQ(MTU0_ABCD, 181), INTC_IRQ(MTU0_ABCD, 182),
INTC_IRQ(MTU0_VEF, 183),
INTC_IRQ(MTU0_VEF, 184), INTC_IRQ(MTU0_VEF, 185),
INTC_IRQ(MTU1_AB, 186), INTC_IRQ(MTU1_AB, 187),
INTC_IRQ(MTU1_VU, 188), INTC_IRQ(MTU1_VU, 189),
INTC_IRQ(MTU2_AB, 190), INTC_IRQ(MTU2_AB, 191),
INTC_IRQ(MTU2_VU, 192), INTC_IRQ(MTU2_VU, 193),
INTC_IRQ(MTU3_ABCD, 194), INTC_IRQ(MTU3_ABCD, 195),
INTC_IRQ(MTU3_ABCD, 196), INTC_IRQ(MTU3_ABCD, 197),
INTC_IRQ(MTU3_TCI3V, 198),
INTC_IRQ(MTU4_ABCD, 199), INTC_IRQ(MTU4_ABCD, 200),
INTC_IRQ(MTU4_ABCD, 201), INTC_IRQ(MTU4_ABCD, 202),
INTC_IRQ(MTU4_TCI4V, 203),
INTC_IRQ(PWMT1, 204), INTC_IRQ(PWMT2, 205),
INTC_IRQ(ADC_ADI, 206),
INTC_IRQ(SSIF0, 207), INTC_IRQ(SSIF0, 208),
INTC_IRQ(SSIF0, 209),
INTC_IRQ(SSII1, 210), INTC_IRQ(SSII1, 211),
INTC_IRQ(SSII2, 212), INTC_IRQ(SSII2, 213),
INTC_IRQ(SSII3, 214), INTC_IRQ(SSII3, 215),
INTC_IRQ(RSPDIF, 216),
INTC_IRQ(IIC30, 217), INTC_IRQ(IIC30, 218),
INTC_IRQ(IIC30, 219), INTC_IRQ(IIC30, 220),
INTC_IRQ(IIC30, 221),
INTC_IRQ(IIC31, 222), INTC_IRQ(IIC31, 223),
INTC_IRQ(IIC31, 224), INTC_IRQ(IIC31, 225),
INTC_IRQ(IIC31, 226),
INTC_IRQ(IIC32, 227), INTC_IRQ(IIC32, 228),
INTC_IRQ(IIC32, 229), INTC_IRQ(IIC32, 230),
INTC_IRQ(IIC32, 231),
INTC_IRQ(SCIF0_BRI, 232), INTC_IRQ(SCIF0_ERI, 233),
INTC_IRQ(SCIF0_RXI, 234), INTC_IRQ(SCIF0_TXI, 235),
INTC_IRQ(SCIF1_BRI, 236), INTC_IRQ(SCIF1_ERI, 237),
INTC_IRQ(SCIF1_RXI, 238), INTC_IRQ(SCIF1_TXI, 239),
INTC_IRQ(SCIF2_BRI, 240), INTC_IRQ(SCIF2_ERI, 241),
INTC_IRQ(SCIF2_RXI, 242), INTC_IRQ(SCIF2_TXI, 243),
INTC_IRQ(SCIF3_BRI, 244), INTC_IRQ(SCIF3_ERI, 245),
INTC_IRQ(SCIF3_RXI, 246), INTC_IRQ(SCIF3_TXI, 247),
INTC_IRQ(SCIF4_BRI, 248), INTC_IRQ(SCIF4_ERI, 249),
INTC_IRQ(SCIF4_RXI, 250), INTC_IRQ(SCIF4_TXI, 251),
INTC_IRQ(SCIF5_BRI, 252), INTC_IRQ(SCIF5_ERI, 253),
INTC_IRQ(SCIF5_RXI, 254), INTC_IRQ(SCIF5_TXI, 255),
INTC_IRQ(SCIF6_BRI, 256), INTC_IRQ(SCIF6_ERI, 257),
INTC_IRQ(SCIF6_RXI, 258), INTC_IRQ(SCIF6_TXI, 259),
INTC_IRQ(SCIF7_BRI, 260), INTC_IRQ(SCIF7_ERI, 261),
INTC_IRQ(SCIF7_RXI, 262), INTC_IRQ(SCIF7_TXI, 263),
INTC_IRQ(SIO_FIFO, 264),
INTC_IRQ(RSPIC0, 265), INTC_IRQ(RSPIC0, 266),
INTC_IRQ(RSPIC0, 267),
INTC_IRQ(RSPIC1, 268), INTC_IRQ(RSPIC1, 269),
INTC_IRQ(RSPIC1, 270),
INTC_IRQ(RCAN0, 271), INTC_IRQ(RCAN0, 272),
INTC_IRQ(RCAN0, 273), INTC_IRQ(RCAN0, 274),
INTC_IRQ(RCAN0, 275),
INTC_IRQ(RCAN1, 276), INTC_IRQ(RCAN1, 277),
INTC_IRQ(RCAN1, 278), INTC_IRQ(RCAN1, 279),
INTC_IRQ(RCAN1, 280),
INTC_IRQ(IEBC, 281),
INTC_IRQ(CD_ROMD, 282), INTC_IRQ(CD_ROMD, 283),
INTC_IRQ(CD_ROMD, 284), INTC_IRQ(CD_ROMD, 285),
INTC_IRQ(CD_ROMD, 286), INTC_IRQ(CD_ROMD, 287),
INTC_IRQ(NFMC, 288), INTC_IRQ(NFMC, 289),
INTC_IRQ(NFMC, 290), INTC_IRQ(NFMC, 291),
INTC_IRQ(SDHI, 292), INTC_IRQ(SDHI, 293),
INTC_IRQ(SDHI, 294),
INTC_IRQ(RTC, 296), INTC_IRQ(RTC, 297),
INTC_IRQ(RTC, 298),
INTC_IRQ(SRCC0, 299), INTC_IRQ(SRCC0, 300),
INTC_IRQ(SRCC0, 301), INTC_IRQ(SRCC0, 302),
INTC_IRQ(SRCC0, 303),
INTC_IRQ(SRCC1, 304), INTC_IRQ(SRCC1, 305),
INTC_IRQ(SRCC1, 306), INTC_IRQ(SRCC1, 307),
INTC_IRQ(SRCC1, 308),
INTC_IRQ(DCOMU, 310), INTC_IRQ(DCOMU, 311),
INTC_IRQ(DCOMU, 312),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
PINT4, PINT5, PINT6, PINT7),
INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI),
INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI),
INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI),
INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI),
INTC_GROUP(SCIF4, SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI),
INTC_GROUP(SCIF5, SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI),
INTC_GROUP(SCIF6, SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI),
INTC_GROUP(SCIF7, SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
{ 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
{ 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
{ 0xfffe0c04, 0, 16, 4, /* IPR08 */ { DMAC8, DMAC9,
DMAC10, DMAC11 } },
{ 0xfffe0c06, 0, 16, 4, /* IPR09 */ { DMAC12, DMAC13,
DMAC14, DMAC15 } },
{ 0xfffe0c08, 0, 16, 4, /* IPR10 */ { USB, VDC3, CMT0, CMT1 } },
{ 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } },
{ 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { MTU1_AB, MTU1_VU,
MTU2_AB, MTU2_VU } },
{ 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { MTU3_ABCD, MTU3_TCI3V,
MTU4_ABCD, MTU4_TCI4V } },
{ 0xfffe0c10, 0, 16, 4, /* IPR14 */ { PWMT1, PWMT2, ADC_ADI, 0 } },
{ 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSIF0, SSII1, SSII2, SSII3 } },
{ 0xfffe0c14, 0, 16, 4, /* IPR16 */ { RSPDIF, IIC30, IIC31, IIC32 } },
{ 0xfffe0c16, 0, 16, 4, /* IPR17 */ { SCIF0, SCIF1, SCIF2, SCIF3 } },
{ 0xfffe0c18, 0, 16, 4, /* IPR18 */ { SCIF4, SCIF5, SCIF6, SCIF7 } },
{ 0xfffe0c1a, 0, 16, 4, /* IPR19 */ { SIO_FIFO, 0, RSPIC0, RSPIC1, } },
{ 0xfffe0c1c, 0, 16, 4, /* IPR20 */ { RCAN0, RCAN1, IEBC, CD_ROMD } },
{ 0xfffe0c1e, 0, 16, 4, /* IPR21 */ { NFMC, SDHI, RTC, 0 } },
{ 0xfffe0c20, 0, 16, 4, /* IPR22 */ { SRCC0, SRCC1, 0, DCOMU } },
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfffe0808, 0, 16, /* PINTER */
{ 0, 0, 0, 0, 0, 0, 0, 0,
PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7264", vectors, groups,
mask_registers, prio_registers, NULL);
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xfffe8000, 0x100),
DEFINE_RES_IRQ(233),
DEFINE_RES_IRQ(234),
DEFINE_RES_IRQ(235),
DEFINE_RES_IRQ(232),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xfffe8800, 0x100),
DEFINE_RES_IRQ(237),
DEFINE_RES_IRQ(238),
DEFINE_RES_IRQ(239),
DEFINE_RES_IRQ(236),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xfffe9000, 0x100),
DEFINE_RES_IRQ(241),
DEFINE_RES_IRQ(242),
DEFINE_RES_IRQ(243),
DEFINE_RES_IRQ(240),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xfffe9800, 0x100),
DEFINE_RES_IRQ(245),
DEFINE_RES_IRQ(246),
DEFINE_RES_IRQ(247),
DEFINE_RES_IRQ(244),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct plat_sci_port scif4_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif4_resources[] = {
DEFINE_RES_MEM(0xfffea000, 0x100),
DEFINE_RES_IRQ(249),
DEFINE_RES_IRQ(250),
DEFINE_RES_IRQ(251),
DEFINE_RES_IRQ(248),
};
static struct platform_device scif4_device = {
.name = "sh-sci",
.id = 4,
.resource = scif4_resources,
.num_resources = ARRAY_SIZE(scif4_resources),
.dev = {
.platform_data = &scif4_platform_data,
},
};
static struct plat_sci_port scif5_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif5_resources[] = {
DEFINE_RES_MEM(0xfffea800, 0x100),
DEFINE_RES_IRQ(253),
DEFINE_RES_IRQ(254),
DEFINE_RES_IRQ(255),
DEFINE_RES_IRQ(252),
};
static struct platform_device scif5_device = {
.name = "sh-sci",
.id = 5,
.resource = scif5_resources,
.num_resources = ARRAY_SIZE(scif5_resources),
.dev = {
.platform_data = &scif5_platform_data,
},
};
static struct plat_sci_port scif6_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif6_resources[] = {
DEFINE_RES_MEM(0xfffeb000, 0x100),
DEFINE_RES_IRQ(257),
DEFINE_RES_IRQ(258),
DEFINE_RES_IRQ(259),
DEFINE_RES_IRQ(256),
};
static struct platform_device scif6_device = {
.name = "sh-sci",
.id = 6,
.resource = scif6_resources,
.num_resources = ARRAY_SIZE(scif6_resources),
.dev = {
.platform_data = &scif6_platform_data,
},
};
static struct plat_sci_port scif7_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif7_resources[] = {
DEFINE_RES_MEM(0xfffeb800, 0x100),
DEFINE_RES_IRQ(261),
DEFINE_RES_IRQ(262),
DEFINE_RES_IRQ(263),
DEFINE_RES_IRQ(260),
};
static struct platform_device scif7_device = {
.name = "sh-sci",
.id = 7,
.resource = scif7_resources,
.num_resources = ARRAY_SIZE(scif7_resources),
.dev = {
.platform_data = &scif7_platform_data,
},
};
static struct sh_timer_config cmt_platform_data = {
.channels_mask = 3,
};
static struct resource cmt_resources[] = {
DEFINE_RES_MEM(0xfffec000, 0x10),
DEFINE_RES_IRQ(175),
DEFINE_RES_IRQ(176),
};
static struct platform_device cmt_device = {
.name = "sh-cmt-16",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct resource mtu2_resources[] = {
DEFINE_RES_MEM(0xfffe4000, 0x400),
DEFINE_RES_IRQ_NAMED(179, "tgi0a"),
DEFINE_RES_IRQ_NAMED(186, "tgi1a"),
};
static struct platform_device mtu2_device = {
.name = "sh-mtu2",
.id = -1,
.resource = mtu2_resources,
.num_resources = ARRAY_SIZE(mtu2_resources),
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0xfffe6000,
.end = 0xfffe6000 + 0x30 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Shared Period/Carry/Alarm IRQ */
.start = 296,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
/* USB Host */
static void usb_port_power(int port, int power)
{
__raw_writew(0x200 , 0xffffc0c2) ; /* Initialise UACS25 */
}
static struct r8a66597_platdata r8a66597_data = {
.on_chip = 1,
.endian = 1,
.port_power = usb_port_power,
};
static struct resource r8a66597_usb_host_resources[] = {
[0] = {
.start = 0xffffc000,
.end = 0xffffc0e4,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 170,
.end = 170,
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
},
};
static struct platform_device r8a66597_usb_host_device = {
.name = "r8a66597_hcd",
.id = 0,
.dev = {
.dma_mask = NULL, /* not use dma */
.coherent_dma_mask = 0xffffffff,
.platform_data = &r8a66597_data,
},
.num_resources = ARRAY_SIZE(r8a66597_usb_host_resources),
.resource = r8a66597_usb_host_resources,
};
static struct platform_device *sh7264_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&scif6_device,
&scif7_device,
&cmt_device,
&mtu2_device,
&rtc_device,
&r8a66597_usb_host_device,
};
static int __init sh7264_devices_setup(void)
{
return platform_add_devices(sh7264_devices,
ARRAY_SIZE(sh7264_devices));
}
arch_initcall(sh7264_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
static struct platform_device *sh7264_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&scif6_device,
&scif7_device,
&cmt_device,
&mtu2_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh7264_early_devices,
ARRAY_SIZE(sh7264_early_devices));
}
| linux-master | arch/sh/kernel/cpu/sh2a/setup-sh7264.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7201 setup
*
* Copyright (C) 2008 Peter Griffin [email protected]
* Copyright (C) 2009 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
#include <asm/platform_early.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
ADC_ADI,
MTU20_ABCD, MTU20_VEF, MTU21_AB, MTU21_VU, MTU22_AB, MTU22_VU,
MTU23_ABCD, MTU24_ABCD, MTU25_UVW, MTU2_TCI3V, MTU2_TCI4V,
RTC, WDT,
IIC30, IIC31, IIC32,
DMAC0_DMINT0, DMAC1_DMINT1,
DMAC2_DMINT2, DMAC3_DMINT3,
SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7,
DMAC0_DMINTA, DMAC4_DMINT4, DMAC5_DMINT5, DMAC6_DMINT6,
DMAC7_DMINT7,
RCAN0, RCAN1,
SSI0_SSII, SSI1_SSII,
TMR0, TMR1,
/* interrupt groups */
PINT,
};
static struct intc_vect vectors[] __initdata = {
INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
INTC_IRQ(ADC_ADI, 92),
INTC_IRQ(MTU20_ABCD, 108), INTC_IRQ(MTU20_ABCD, 109),
INTC_IRQ(MTU20_ABCD, 110), INTC_IRQ(MTU20_ABCD, 111),
INTC_IRQ(MTU20_VEF, 112), INTC_IRQ(MTU20_VEF, 113),
INTC_IRQ(MTU20_VEF, 114),
INTC_IRQ(MTU21_AB, 116), INTC_IRQ(MTU21_AB, 117),
INTC_IRQ(MTU21_VU, 120), INTC_IRQ(MTU21_VU, 121),
INTC_IRQ(MTU22_AB, 124), INTC_IRQ(MTU22_AB, 125),
INTC_IRQ(MTU22_VU, 128), INTC_IRQ(MTU22_VU, 129),
INTC_IRQ(MTU23_ABCD, 132), INTC_IRQ(MTU23_ABCD, 133),
INTC_IRQ(MTU23_ABCD, 134), INTC_IRQ(MTU23_ABCD, 135),
INTC_IRQ(MTU2_TCI3V, 136),
INTC_IRQ(MTU24_ABCD, 140), INTC_IRQ(MTU24_ABCD, 141),
INTC_IRQ(MTU24_ABCD, 142), INTC_IRQ(MTU24_ABCD, 143),
INTC_IRQ(MTU2_TCI4V, 144),
INTC_IRQ(MTU25_UVW, 148), INTC_IRQ(MTU25_UVW, 149),
INTC_IRQ(MTU25_UVW, 150),
INTC_IRQ(RTC, 152), INTC_IRQ(RTC, 153),
INTC_IRQ(RTC, 154),
INTC_IRQ(WDT, 156),
INTC_IRQ(IIC30, 157), INTC_IRQ(IIC30, 158),
INTC_IRQ(IIC30, 159), INTC_IRQ(IIC30, 160),
INTC_IRQ(IIC30, 161),
INTC_IRQ(IIC31, 164), INTC_IRQ(IIC31, 165),
INTC_IRQ(IIC31, 166), INTC_IRQ(IIC31, 167),
INTC_IRQ(IIC31, 168),
INTC_IRQ(IIC32, 170), INTC_IRQ(IIC32, 171),
INTC_IRQ(IIC32, 172), INTC_IRQ(IIC32, 173),
INTC_IRQ(IIC32, 174),
INTC_IRQ(DMAC0_DMINT0, 176), INTC_IRQ(DMAC1_DMINT1, 177),
INTC_IRQ(DMAC2_DMINT2, 178), INTC_IRQ(DMAC3_DMINT3, 179),
INTC_IRQ(SCIF0, 180), INTC_IRQ(SCIF0, 181),
INTC_IRQ(SCIF0, 182), INTC_IRQ(SCIF0, 183),
INTC_IRQ(SCIF1, 184), INTC_IRQ(SCIF1, 185),
INTC_IRQ(SCIF1, 186), INTC_IRQ(SCIF1, 187),
INTC_IRQ(SCIF2, 188), INTC_IRQ(SCIF2, 189),
INTC_IRQ(SCIF2, 190), INTC_IRQ(SCIF2, 191),
INTC_IRQ(SCIF3, 192), INTC_IRQ(SCIF3, 193),
INTC_IRQ(SCIF3, 194), INTC_IRQ(SCIF3, 195),
INTC_IRQ(SCIF4, 196), INTC_IRQ(SCIF4, 197),
INTC_IRQ(SCIF4, 198), INTC_IRQ(SCIF4, 199),
INTC_IRQ(SCIF5, 200), INTC_IRQ(SCIF5, 201),
INTC_IRQ(SCIF5, 202), INTC_IRQ(SCIF5, 203),
INTC_IRQ(SCIF6, 204), INTC_IRQ(SCIF6, 205),
INTC_IRQ(SCIF6, 206), INTC_IRQ(SCIF6, 207),
INTC_IRQ(SCIF7, 208), INTC_IRQ(SCIF7, 209),
INTC_IRQ(SCIF7, 210), INTC_IRQ(SCIF7, 211),
INTC_IRQ(DMAC0_DMINTA, 212), INTC_IRQ(DMAC4_DMINT4, 216),
INTC_IRQ(DMAC5_DMINT5, 217), INTC_IRQ(DMAC6_DMINT6, 218),
INTC_IRQ(DMAC7_DMINT7, 219),
INTC_IRQ(RCAN0, 228), INTC_IRQ(RCAN0, 229),
INTC_IRQ(RCAN0, 230),
INTC_IRQ(RCAN0, 231), INTC_IRQ(RCAN0, 232),
INTC_IRQ(RCAN1, 234), INTC_IRQ(RCAN1, 235),
INTC_IRQ(RCAN1, 236),
INTC_IRQ(RCAN1, 237), INTC_IRQ(RCAN1, 238),
INTC_IRQ(SSI0_SSII, 244), INTC_IRQ(SSI1_SSII, 245),
INTC_IRQ(TMR0, 246), INTC_IRQ(TMR0, 247),
INTC_IRQ(TMR0, 248),
INTC_IRQ(TMR1, 252), INTC_IRQ(TMR1, 253),
INTC_IRQ(TMR1, 254),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
PINT4, PINT5, PINT6, PINT7),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffe9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xfffe941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfffe9420, 0, 16, 4, /* IPR05 */ { PINT, 0, ADC_ADI, 0 } },
{ 0xfffe9800, 0, 16, 4, /* IPR06 */ { 0, MTU20_ABCD, MTU20_VEF, MTU21_AB } },
{ 0xfffe9802, 0, 16, 4, /* IPR07 */ { MTU21_VU, MTU22_AB, MTU22_VU, MTU23_ABCD } },
{ 0xfffe9804, 0, 16, 4, /* IPR08 */ { MTU2_TCI3V, MTU24_ABCD, MTU2_TCI4V, MTU25_UVW } },
{ 0xfffe9806, 0, 16, 4, /* IPR09 */ { RTC, WDT, IIC30, 0 } },
{ 0xfffe9808, 0, 16, 4, /* IPR10 */ { IIC31, IIC32, DMAC0_DMINT0, DMAC1_DMINT1 } },
{ 0xfffe980a, 0, 16, 4, /* IPR11 */ { DMAC2_DMINT2, DMAC3_DMINT3, SCIF0, SCIF1 } },
{ 0xfffe980c, 0, 16, 4, /* IPR12 */ { SCIF2, SCIF3, SCIF4, SCIF5 } },
{ 0xfffe980e, 0, 16, 4, /* IPR13 */ { SCIF6, SCIF7, DMAC0_DMINTA, DMAC4_DMINT4 } },
{ 0xfffe9810, 0, 16, 4, /* IPR14 */ { DMAC5_DMINT5, DMAC6_DMINT6, DMAC7_DMINT7, 0 } },
{ 0xfffe9812, 0, 16, 4, /* IPR15 */ { 0, RCAN0, RCAN1, 0 } },
{ 0xfffe9814, 0, 16, 4, /* IPR16 */ { SSI0_SSII, SSI1_SSII, TMR0, TMR1 } },
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfffe9408, 0, 16, /* PINTER */
{ 0, 0, 0, 0, 0, 0, 0, 0,
PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups,
mask_registers, prio_registers, NULL);
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xfffe8000, 0x100),
DEFINE_RES_IRQ(180),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xfffe8800, 0x100),
DEFINE_RES_IRQ(184),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xfffe9000, 0x100),
DEFINE_RES_IRQ(188),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xfffe9800, 0x100),
DEFINE_RES_IRQ(192),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct plat_sci_port scif4_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif4_resources[] = {
DEFINE_RES_MEM(0xfffea000, 0x100),
DEFINE_RES_IRQ(196),
};
static struct platform_device scif4_device = {
.name = "sh-sci",
.id = 4,
.resource = scif4_resources,
.num_resources = ARRAY_SIZE(scif4_resources),
.dev = {
.platform_data = &scif4_platform_data,
},
};
static struct plat_sci_port scif5_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif5_resources[] = {
DEFINE_RES_MEM(0xfffea800, 0x100),
DEFINE_RES_IRQ(200),
};
static struct platform_device scif5_device = {
.name = "sh-sci",
.id = 5,
.resource = scif5_resources,
.num_resources = ARRAY_SIZE(scif5_resources),
.dev = {
.platform_data = &scif5_platform_data,
},
};
static struct plat_sci_port scif6_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif6_resources[] = {
DEFINE_RES_MEM(0xfffeb000, 0x100),
DEFINE_RES_IRQ(204),
};
static struct platform_device scif6_device = {
.name = "sh-sci",
.id = 6,
.resource = scif6_resources,
.num_resources = ARRAY_SIZE(scif6_resources),
.dev = {
.platform_data = &scif6_platform_data,
},
};
static struct plat_sci_port scif7_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif7_resources[] = {
DEFINE_RES_MEM(0xfffeb800, 0x100),
DEFINE_RES_IRQ(208),
};
static struct platform_device scif7_device = {
.name = "sh-sci",
.id = 7,
.resource = scif7_resources,
.num_resources = ARRAY_SIZE(scif7_resources),
.dev = {
.platform_data = &scif7_platform_data,
},
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0xffff0800,
.end = 0xffff2000 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Shared Period/Carry/Alarm IRQ */
.start = 152,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
static struct resource mtu2_resources[] = {
DEFINE_RES_MEM(0xfffe4000, 0x400),
DEFINE_RES_IRQ_NAMED(108, "tgi0a"),
DEFINE_RES_IRQ_NAMED(116, "tgi1a"),
DEFINE_RES_IRQ_NAMED(124, "tgi1b"),
};
static struct platform_device mtu2_device = {
.name = "sh-mtu2",
.id = -1,
.resource = mtu2_resources,
.num_resources = ARRAY_SIZE(mtu2_resources),
};
static struct platform_device *sh7201_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&scif6_device,
&scif7_device,
&rtc_device,
&mtu2_device,
};
static int __init sh7201_devices_setup(void)
{
return platform_add_devices(sh7201_devices,
ARRAY_SIZE(sh7201_devices));
}
arch_initcall(sh7201_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
static struct platform_device *sh7201_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&scif4_device,
&scif5_device,
&scif6_device,
&scif7_device,
&mtu2_device,
};
#define STBCR3 0xfffe0408
void __init plat_early_device_setup(void)
{
/* enable MTU2 clock */
__raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
sh_early_platform_add_devices(sh7201_early_devices,
ARRAY_SIZE(sh7201_early_devices));
}
| linux-master | arch/sh/kernel/cpu/sh2a/setup-sh7201.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh2a/clock-sh7203.c
*
* SH7203 support for the clock framework
*
* Copyright (C) 2007 Kieran Bingham (MPC-Data Ltd)
*
* Based on clock-sh7263.c
* Copyright (C) 2006 Yoshinori Sato
*
* Based on clock-sh4.c
* Copyright (C) 2005 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static const int pll1rate[]={8,12,16,0};
static const int pfc_divisors[]={1,2,3,4,6,8,12};
#define ifc_divisors pfc_divisors
static unsigned int pll2_mult;
static void master_clk_init(struct clk *clk)
{
clk->rate *= pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0003] * pll2_mult;
}
static struct sh_clk_ops sh7203_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7203_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx-2];
}
static struct sh_clk_ops sh7203_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static struct sh_clk_ops sh7203_cpu_clk_ops = {
.recalc = followparent_recalc,
};
static struct sh_clk_ops *sh7203_clk_ops[] = {
&sh7203_master_clk_ops,
&sh7203_module_clk_ops,
&sh7203_bus_clk_ops,
&sh7203_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (test_mode_pin(MODE_PIN1))
pll2_mult = 4;
else if (test_mode_pin(MODE_PIN0))
pll2_mult = 2;
else
pll2_mult = 1;
if (idx < ARRAY_SIZE(sh7203_clk_ops))
*ops = sh7203_clk_ops[idx];
}
| linux-master | arch/sh/kernel/cpu/sh2a/clock-sh7203.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7203 Pinmux
*
* Copyright (C) 2008 Magnus Damm
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <cpu/pfc.h>
static struct resource sh7203_pfc_resources[] = {
[0] = {
.start = 0xfffe3800,
.end = 0xfffe3a9f,
.flags = IORESOURCE_MEM,
},
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-sh7203", sh7203_pfc_resources,
ARRAY_SIZE(sh7203_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
| linux-master | arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh2a/clock-sh7206.c
*
* SH7206 support for the clock framework
*
* Copyright (C) 2006 Yoshinori Sato
*
* Based on clock-sh4.c
* Copyright (C) 2005 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static const int pll1rate[]={1,2,3,4,6,8};
static const int pfc_divisors[]={1,2,3,4,6,8,12};
#define ifc_divisors pfc_divisors
static unsigned int pll2_mult;
static void master_clk_init(struct clk *clk)
{
clk->rate *= pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
}
static struct sh_clk_ops sh7206_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7206_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
}
static struct sh_clk_ops sh7206_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh7206_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7206_clk_ops[] = {
&sh7206_master_clk_ops,
&sh7206_module_clk_ops,
&sh7206_bus_clk_ops,
&sh7206_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (test_mode_pin(MODE_PIN2 | MODE_PIN1 | MODE_PIN0))
pll2_mult = 1;
else if (test_mode_pin(MODE_PIN2 | MODE_PIN1))
pll2_mult = 2;
else if (test_mode_pin(MODE_PIN1))
pll2_mult = 4;
if (idx < ARRAY_SIZE(sh7206_clk_ops))
*ops = sh7206_clk_ops[idx];
}
| linux-master | arch/sh/kernel/cpu/sh2a/clock-sh7206.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh2a/clock-sh7269.c
*
* SH7269 clock framework support
*
* Copyright (C) 2012 Phil Edworthy
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
/* SH7269 registers */
#define FRQCR 0xfffe0010
#define STBCR3 0xfffe0408
#define STBCR4 0xfffe040c
#define STBCR5 0xfffe0410
#define STBCR6 0xfffe0414
#define STBCR7 0xfffe0418
#define PLL_RATE 20
/* Fixed 32 KHz root clock for RTC */
static struct clk r_clk = {
.rate = 32768,
};
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
static struct clk extal_clk = {
.rate = 13340000,
};
static unsigned long pll_recalc(struct clk *clk)
{
return clk->parent->rate * PLL_RATE;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.parent = &extal_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static unsigned long peripheral0_recalc(struct clk *clk)
{
return clk->parent->rate / 8;
}
static struct sh_clk_ops peripheral0_clk_ops = {
.recalc = peripheral0_recalc,
};
static struct clk peripheral0_clk = {
.ops = &peripheral0_clk_ops,
.parent = &pll_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static unsigned long peripheral1_recalc(struct clk *clk)
{
return clk->parent->rate / 4;
}
static struct sh_clk_ops peripheral1_clk_ops = {
.recalc = peripheral1_recalc,
};
static struct clk peripheral1_clk = {
.ops = &peripheral1_clk_ops,
.parent = &pll_clk,
.flags = CLK_ENABLE_ON_INIT,
};
struct clk *main_clks[] = {
&r_clk,
&extal_clk,
&pll_clk,
&peripheral0_clk,
&peripheral1_clk,
};
static int div2[] = { 1, 2, 0, 4 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = div2,
.nr_divisors = ARRAY_SIZE(div2),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_B,
DIV4_NR };
#define DIV4(_reg, _bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
/* The mask field specifies the div2 entries that are valid */
struct clk div4_clks[DIV4_NR] = {
[DIV4_I] = DIV4(FRQCR, 8, 0xB, CLK_ENABLE_REG_16BIT
| CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(FRQCR, 4, 0xA, CLK_ENABLE_REG_16BIT
| CLK_ENABLE_ON_INIT),
};
enum { MSTP72,
MSTP60,
MSTP47, MSTP46, MSTP45, MSTP44, MSTP43, MSTP42, MSTP41, MSTP40,
MSTP35, MSTP32, MSTP30,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
[MSTP72] = SH_CLK_MSTP8(&peripheral0_clk, STBCR7, 2, 0), /* CMT */
[MSTP60] = SH_CLK_MSTP8(&peripheral1_clk, STBCR6, 0, 0), /* USB */
[MSTP47] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 7, 0), /* SCIF0 */
[MSTP46] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 6, 0), /* SCIF1 */
[MSTP45] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 5, 0), /* SCIF2 */
[MSTP44] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 4, 0), /* SCIF3 */
[MSTP43] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 3, 0), /* SCIF4 */
[MSTP42] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 2, 0), /* SCIF5 */
[MSTP41] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 1, 0), /* SCIF6 */
[MSTP40] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 0, 0), /* SCIF7 */
[MSTP35] = SH_CLK_MSTP8(&peripheral0_clk, STBCR3, 5, 0), /* MTU2 */
[MSTP32] = SH_CLK_MSTP8(&peripheral1_clk, STBCR3, 2, 0), /* ADC */
[MSTP30] = SH_CLK_MSTP8(&r_clk, STBCR3, 0, 0), /* RTC */
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
CLKDEV_CON_ID("peripheral_clk", &peripheral1_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
/* MSTP clocks */
CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP47]),
CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP46]),
CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP45]),
CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP44]),
CLKDEV_ICK_ID("fck", "sh-sci.4", &mstp_clks[MSTP43]),
CLKDEV_ICK_ID("fck", "sh-sci.5", &mstp_clks[MSTP42]),
CLKDEV_ICK_ID("fck", "sh-sci.6", &mstp_clks[MSTP41]),
CLKDEV_ICK_ID("fck", "sh-sci.7", &mstp_clks[MSTP40]),
CLKDEV_ICK_ID("fck", "sh-cmt-16.0", &mstp_clks[MSTP72]),
CLKDEV_CON_ID("usb0", &mstp_clks[MSTP60]),
CLKDEV_ICK_ID("fck", "sh-mtu2", &mstp_clks[MSTP35]),
CLKDEV_CON_ID("adc0", &mstp_clks[MSTP32]),
CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP30]),
};
int __init arch_clk_init(void)
{
int k, ret = 0;
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh2a/clock-sh7269.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7269 Pinmux
*
* Copyright (C) 2012 Renesas Electronics Europe Ltd
* Copyright (C) 2012 Phil Edworthy
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <cpu/pfc.h>
static struct resource sh7269_pfc_resources[] = {
[0] = {
.start = 0xfffe3800,
.end = 0xfffe391f,
.flags = IORESOURCE_MEM,
},
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-sh7269", sh7269_pfc_resources,
ARRAY_SIZE(sh7269_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
| linux-master | arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh2a/clock-sh7264.c
*
* SH7264 clock framework support
*
* Copyright (C) 2012 Phil Edworthy
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
/* SH7264 registers */
#define FRQCR 0xfffe0010
#define STBCR3 0xfffe0408
#define STBCR4 0xfffe040c
#define STBCR5 0xfffe0410
#define STBCR6 0xfffe0414
#define STBCR7 0xfffe0418
#define STBCR8 0xfffe041c
static const unsigned int pll1rate[] = {8, 12};
static unsigned int pll1_div;
/* Fixed 32 KHz root clock for RTC */
static struct clk r_clk = {
.rate = 32768,
};
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
static struct clk extal_clk = {
.rate = 18000000,
};
static unsigned long pll_recalc(struct clk *clk)
{
unsigned long rate = clk->parent->rate / pll1_div;
return rate * pll1rate[(__raw_readw(FRQCR) >> 8) & 1];
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.parent = &extal_clk,
.flags = CLK_ENABLE_ON_INIT,
};
struct clk *main_clks[] = {
&r_clk,
&extal_clk,
&pll_clk,
};
static int div2[] = { 1, 2, 3, 4, 6, 8, 12 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = div2,
.nr_divisors = ARRAY_SIZE(div2),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_P,
DIV4_NR };
#define DIV4(_reg, _bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
/* The mask field specifies the div2 entries that are valid */
struct clk div4_clks[DIV4_NR] = {
[DIV4_I] = DIV4(FRQCR, 4, 0x7, CLK_ENABLE_REG_16BIT
| CLK_ENABLE_ON_INIT),
[DIV4_P] = DIV4(FRQCR, 0, 0x78, CLK_ENABLE_REG_16BIT),
};
enum { MSTP77, MSTP74, MSTP72,
MSTP60,
MSTP35, MSTP34, MSTP33, MSTP32, MSTP30,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
[MSTP77] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR7, 7, 0), /* SCIF */
[MSTP74] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR7, 4, 0), /* VDC */
[MSTP72] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR7, 2, 0), /* CMT */
[MSTP60] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR6, 0, 0), /* USB */
[MSTP35] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 6, 0), /* MTU2 */
[MSTP34] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 4, 0), /* SDHI0 */
[MSTP33] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 3, 0), /* SDHI1 */
[MSTP32] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 2, 0), /* ADC */
[MSTP30] = SH_CLK_MSTP8(&r_clk, STBCR3, 0, 0), /* RTC */
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
/* MSTP clocks */
CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP77]),
CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP77]),
CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP77]),
CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP77]),
CLKDEV_ICK_ID("fck", "sh-sci.4", &mstp_clks[MSTP77]),
CLKDEV_ICK_ID("fck", "sh-sci.5", &mstp_clks[MSTP77]),
CLKDEV_ICK_ID("fck", "sh-sci.6", &mstp_clks[MSTP77]),
CLKDEV_ICK_ID("fck", "sh-sci.7", &mstp_clks[MSTP77]),
CLKDEV_CON_ID("vdc3", &mstp_clks[MSTP74]),
CLKDEV_ICK_ID("fck", "sh-cmt-16.0", &mstp_clks[MSTP72]),
CLKDEV_CON_ID("usb0", &mstp_clks[MSTP60]),
CLKDEV_ICK_ID("fck", "sh-mtu2", &mstp_clks[MSTP35]),
CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP34]),
CLKDEV_CON_ID("sdhi1", &mstp_clks[MSTP33]),
CLKDEV_CON_ID("adc0", &mstp_clks[MSTP32]),
CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP30]),
};
int __init arch_clk_init(void)
{
int k, ret = 0;
if (test_mode_pin(MODE_PIN0)) {
if (test_mode_pin(MODE_PIN1))
pll1_div = 3;
else
pll1_div = 4;
} else
pll1_div = 1;
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
| linux-master | arch/sh/kernel/cpu/sh2a/clock-sh7264.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7264 Pinmux
*
* Copyright (C) 2012 Renesas Electronics Europe Ltd
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <cpu/pfc.h>
static struct resource sh7264_pfc_resources[] = {
[0] = {
.start = 0xfffe3800,
.end = 0xfffe393f,
.flags = IORESOURCE_MEM,
},
};
static int __init plat_pinmux_setup(void)
{
return sh_pfc_register("pfc-sh7264", sh7264_pfc_resources,
ARRAY_SIZE(sh7264_pfc_resources));
}
arch_initcall(plat_pinmux_setup);
| linux-master | arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas MX-G (R8A03022BG) Setup
*
* Copyright (C) 2008, 2009 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <asm/platform_early.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
IRQ8, IRQ9, IRQ10, IRQ11, IRQ12, IRQ13, IRQ14, IRQ15,
PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
SINT8, SINT7, SINT6, SINT5, SINT4, SINT3, SINT2, SINT1,
SCIF0, SCIF1,
MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3, MTU2_GROUP4, MTU2_GROUP5,
MTU2_TGI3B, MTU2_TGI3C,
/* interrupt groups */
PINT,
};
static struct intc_vect vectors[] __initdata = {
INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
INTC_IRQ(IRQ8, 72), INTC_IRQ(IRQ9, 73),
INTC_IRQ(IRQ10, 74), INTC_IRQ(IRQ11, 75),
INTC_IRQ(IRQ12, 76), INTC_IRQ(IRQ13, 77),
INTC_IRQ(IRQ14, 78), INTC_IRQ(IRQ15, 79),
INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
INTC_IRQ(SINT8, 94), INTC_IRQ(SINT7, 95),
INTC_IRQ(SINT6, 96), INTC_IRQ(SINT5, 97),
INTC_IRQ(SINT4, 98), INTC_IRQ(SINT3, 99),
INTC_IRQ(SINT2, 100), INTC_IRQ(SINT1, 101),
INTC_IRQ(SCIF0, 220), INTC_IRQ(SCIF0, 221),
INTC_IRQ(SCIF0, 222), INTC_IRQ(SCIF0, 223),
INTC_IRQ(SCIF1, 224), INTC_IRQ(SCIF1, 225),
INTC_IRQ(SCIF1, 226), INTC_IRQ(SCIF1, 227),
INTC_IRQ(MTU2_GROUP1, 228), INTC_IRQ(MTU2_GROUP1, 229),
INTC_IRQ(MTU2_GROUP1, 230), INTC_IRQ(MTU2_GROUP1, 231),
INTC_IRQ(MTU2_GROUP1, 232), INTC_IRQ(MTU2_GROUP1, 233),
INTC_IRQ(MTU2_GROUP2, 234), INTC_IRQ(MTU2_GROUP2, 235),
INTC_IRQ(MTU2_GROUP2, 236), INTC_IRQ(MTU2_GROUP2, 237),
INTC_IRQ(MTU2_GROUP2, 238), INTC_IRQ(MTU2_GROUP2, 239),
INTC_IRQ(MTU2_GROUP3, 240), INTC_IRQ(MTU2_GROUP3, 241),
INTC_IRQ(MTU2_GROUP3, 242), INTC_IRQ(MTU2_GROUP3, 243),
INTC_IRQ(MTU2_TGI3B, 244),
INTC_IRQ(MTU2_TGI3C, 245),
INTC_IRQ(MTU2_GROUP4, 246), INTC_IRQ(MTU2_GROUP4, 247),
INTC_IRQ(MTU2_GROUP4, 248), INTC_IRQ(MTU2_GROUP4, 249),
INTC_IRQ(MTU2_GROUP4, 250), INTC_IRQ(MTU2_GROUP4, 251),
INTC_IRQ(MTU2_GROUP5, 252), INTC_IRQ(MTU2_GROUP5, 253),
INTC_IRQ(MTU2_GROUP5, 254), INTC_IRQ(MTU2_GROUP5, 255),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
PINT4, PINT5, PINT6, PINT7),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffd9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xfffd941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfffd941c, 0, 16, 4, /* IPR03 */ { IRQ8, IRQ9, IRQ10, IRQ11 } },
{ 0xfffd941e, 0, 16, 4, /* IPR04 */ { IRQ12, IRQ13, IRQ14, IRQ15 } },
{ 0xfffd9420, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
{ 0xfffd9800, 0, 16, 4, /* IPR06 */ { } },
{ 0xfffd9802, 0, 16, 4, /* IPR07 */ { } },
{ 0xfffd9804, 0, 16, 4, /* IPR08 */ { } },
{ 0xfffd9806, 0, 16, 4, /* IPR09 */ { } },
{ 0xfffd9808, 0, 16, 4, /* IPR10 */ { } },
{ 0xfffd980a, 0, 16, 4, /* IPR11 */ { } },
{ 0xfffd980c, 0, 16, 4, /* IPR12 */ { } },
{ 0xfffd980e, 0, 16, 4, /* IPR13 */ { } },
{ 0xfffd9810, 0, 16, 4, /* IPR14 */ { 0, 0, 0, SCIF0 } },
{ 0xfffd9812, 0, 16, 4, /* IPR15 */
{ SCIF1, MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3 } },
{ 0xfffd9814, 0, 16, 4, /* IPR16 */
{ MTU2_TGI3B, MTU2_TGI3C, MTU2_GROUP4, MTU2_GROUP5 } },
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfffd9408, 0, 16, /* PINTER */
{ 0, 0, 0, 0, 0, 0, 0, 0,
PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
};
static DECLARE_INTC_DESC(intc_desc, "mxg", vectors, groups,
mask_registers, prio_registers, NULL);
static struct resource mtu2_resources[] = {
DEFINE_RES_MEM(0xff801000, 0x400),
DEFINE_RES_IRQ_NAMED(228, "tgi0a"),
DEFINE_RES_IRQ_NAMED(234, "tgi1a"),
DEFINE_RES_IRQ_NAMED(240, "tgi2a"),
};
static struct platform_device mtu2_device = {
.name = "sh-mtu2",
.id = -1,
.resource = mtu2_resources,
.num_resources = ARRAY_SIZE(mtu2_resources),
};
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xff804000, 0x100),
DEFINE_RES_IRQ(220),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct platform_device *mxg_devices[] __initdata = {
&scif0_device,
&mtu2_device,
};
static int __init mxg_devices_setup(void)
{
return platform_add_devices(mxg_devices,
ARRAY_SIZE(mxg_devices));
}
arch_initcall(mxg_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
static struct platform_device *mxg_early_devices[] __initdata = {
&scif0_device,
&mtu2_device,
};
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(mxg_early_devices,
ARRAY_SIZE(mxg_early_devices));
}
| linux-master | arch/sh/kernel/cpu/sh2a/setup-mxg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7203 and SH7263 Setup
*
* Copyright (C) 2007 - 2009 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
#include <asm/platform_early.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7,
USB, LCDC, CMT0, CMT1, BSC, WDT,
MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU,
MTU3_ABCD, MTU4_ABCD, MTU2_TCI3V, MTU2_TCI4V,
ADC_ADI,
IIC30, IIC31, IIC32, IIC33,
SCIF0, SCIF1, SCIF2, SCIF3,
SSU0, SSU1,
SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII,
/* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */
ROMDEC, FLCTL, SDHI, RTC, RCAN0, RCAN1,
SRC, IEBI,
/* interrupt groups */
PINT,
};
static struct intc_vect vectors[] __initdata = {
INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109),
INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113),
INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117),
INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121),
INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125),
INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129),
INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133),
INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137),
INTC_IRQ(USB, 140), INTC_IRQ(LCDC, 141),
INTC_IRQ(CMT0, 142), INTC_IRQ(CMT1, 143),
INTC_IRQ(BSC, 144), INTC_IRQ(WDT, 145),
INTC_IRQ(MTU0_ABCD, 146), INTC_IRQ(MTU0_ABCD, 147),
INTC_IRQ(MTU0_ABCD, 148), INTC_IRQ(MTU0_ABCD, 149),
INTC_IRQ(MTU0_VEF, 150),
INTC_IRQ(MTU0_VEF, 151), INTC_IRQ(MTU0_VEF, 152),
INTC_IRQ(MTU1_AB, 153), INTC_IRQ(MTU1_AB, 154),
INTC_IRQ(MTU1_VU, 155), INTC_IRQ(MTU1_VU, 156),
INTC_IRQ(MTU2_AB, 157), INTC_IRQ(MTU2_AB, 158),
INTC_IRQ(MTU2_VU, 159), INTC_IRQ(MTU2_VU, 160),
INTC_IRQ(MTU3_ABCD, 161), INTC_IRQ(MTU3_ABCD, 162),
INTC_IRQ(MTU3_ABCD, 163), INTC_IRQ(MTU3_ABCD, 164),
INTC_IRQ(MTU2_TCI3V, 165),
INTC_IRQ(MTU4_ABCD, 166), INTC_IRQ(MTU4_ABCD, 167),
INTC_IRQ(MTU4_ABCD, 168), INTC_IRQ(MTU4_ABCD, 169),
INTC_IRQ(MTU2_TCI4V, 170),
INTC_IRQ(ADC_ADI, 171),
INTC_IRQ(IIC30, 172), INTC_IRQ(IIC30, 173),
INTC_IRQ(IIC30, 174), INTC_IRQ(IIC30, 175),
INTC_IRQ(IIC30, 176),
INTC_IRQ(IIC31, 177), INTC_IRQ(IIC31, 178),
INTC_IRQ(IIC31, 179), INTC_IRQ(IIC31, 180),
INTC_IRQ(IIC31, 181),
INTC_IRQ(IIC32, 182), INTC_IRQ(IIC32, 183),
INTC_IRQ(IIC32, 184), INTC_IRQ(IIC32, 185),
INTC_IRQ(IIC32, 186),
INTC_IRQ(IIC33, 187), INTC_IRQ(IIC33, 188),
INTC_IRQ(IIC33, 189), INTC_IRQ(IIC33, 190),
INTC_IRQ(IIC33, 191),
INTC_IRQ(SCIF0, 192), INTC_IRQ(SCIF0, 193),
INTC_IRQ(SCIF0, 194), INTC_IRQ(SCIF0, 195),
INTC_IRQ(SCIF1, 196), INTC_IRQ(SCIF1, 197),
INTC_IRQ(SCIF1, 198), INTC_IRQ(SCIF1, 199),
INTC_IRQ(SCIF2, 200), INTC_IRQ(SCIF2, 201),
INTC_IRQ(SCIF2, 202), INTC_IRQ(SCIF2, 203),
INTC_IRQ(SCIF3, 204), INTC_IRQ(SCIF3, 205),
INTC_IRQ(SCIF3, 206), INTC_IRQ(SCIF3, 207),
INTC_IRQ(SSU0, 208), INTC_IRQ(SSU0, 209),
INTC_IRQ(SSU0, 210),
INTC_IRQ(SSU1, 211), INTC_IRQ(SSU1, 212),
INTC_IRQ(SSU1, 213),
INTC_IRQ(SSI0_SSII, 214), INTC_IRQ(SSI1_SSII, 215),
INTC_IRQ(SSI2_SSII, 216), INTC_IRQ(SSI3_SSII, 217),
INTC_IRQ(FLCTL, 224), INTC_IRQ(FLCTL, 225),
INTC_IRQ(FLCTL, 226), INTC_IRQ(FLCTL, 227),
INTC_IRQ(RTC, 231), INTC_IRQ(RTC, 232),
INTC_IRQ(RTC, 233),
INTC_IRQ(RCAN0, 234), INTC_IRQ(RCAN0, 235),
INTC_IRQ(RCAN0, 236), INTC_IRQ(RCAN0, 237),
INTC_IRQ(RCAN0, 238),
INTC_IRQ(RCAN1, 239), INTC_IRQ(RCAN1, 240),
INTC_IRQ(RCAN1, 241), INTC_IRQ(RCAN1, 242),
INTC_IRQ(RCAN1, 243),
/* SH7263-specific trash */
#ifdef CONFIG_CPU_SUBTYPE_SH7263
INTC_IRQ(ROMDEC, 218), INTC_IRQ(ROMDEC, 219),
INTC_IRQ(ROMDEC, 220), INTC_IRQ(ROMDEC, 221),
INTC_IRQ(ROMDEC, 222), INTC_IRQ(ROMDEC, 223),
INTC_IRQ(SDHI, 228), INTC_IRQ(SDHI, 229),
INTC_IRQ(SDHI, 230),
INTC_IRQ(SRC, 244), INTC_IRQ(SRC, 245),
INTC_IRQ(SRC, 246),
INTC_IRQ(IEBI, 247),
#endif
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
PINT4, PINT5, PINT6, PINT7),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
{ 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } },
{ 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } },
{ 0xfffe0c04, 0, 16, 4, /* IPR08 */ { USB, LCDC, CMT0, CMT1 } },
{ 0xfffe0c06, 0, 16, 4, /* IPR09 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } },
{ 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU1_AB, MTU1_VU, MTU2_AB,
MTU2_VU } },
{ 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU3_ABCD, MTU2_TCI3V, MTU4_ABCD,
MTU2_TCI4V } },
{ 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { ADC_ADI, IIC30, IIC31, IIC32 } },
{ 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { IIC33, SCIF0, SCIF1, SCIF2 } },
{ 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF3, SSU0, SSU1, SSI0_SSII } },
#ifdef CONFIG_CPU_SUBTYPE_SH7203
{ 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
SSI3_SSII, 0 } },
{ 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, 0, RTC, RCAN0 } },
{ 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, 0, 0, 0 } },
#else
{ 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII,
SSI3_SSII, ROMDEC } },
{ 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, SDHI, RTC, RCAN0 } },
{ 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, SRC, IEBI, 0 } },
#endif
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfffe0808, 0, 16, /* PINTER */
{ 0, 0, 0, 0, 0, 0, 0, 0,
PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups,
mask_registers, prio_registers, NULL);
static struct plat_sci_port scif0_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(0xfffe8000, 0x100),
DEFINE_RES_IRQ(192),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif1_resources[] = {
DEFINE_RES_MEM(0xfffe8800, 0x100),
DEFINE_RES_IRQ(196),
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.resource = scif1_resources,
.num_resources = ARRAY_SIZE(scif1_resources),
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct plat_sci_port scif2_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif2_resources[] = {
DEFINE_RES_MEM(0xfffe9000, 0x100),
DEFINE_RES_IRQ(200),
};
static struct platform_device scif2_device = {
.name = "sh-sci",
.id = 2,
.resource = scif2_resources,
.num_resources = ARRAY_SIZE(scif2_resources),
.dev = {
.platform_data = &scif2_platform_data,
},
};
static struct plat_sci_port scif3_platform_data = {
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
.regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE,
};
static struct resource scif3_resources[] = {
DEFINE_RES_MEM(0xfffe9800, 0x100),
DEFINE_RES_IRQ(204),
};
static struct platform_device scif3_device = {
.name = "sh-sci",
.id = 3,
.resource = scif3_resources,
.num_resources = ARRAY_SIZE(scif3_resources),
.dev = {
.platform_data = &scif3_platform_data,
},
};
static struct sh_timer_config cmt_platform_data = {
.channels_mask = 3,
};
static struct resource cmt_resources[] = {
DEFINE_RES_MEM(0xfffec000, 0x10),
DEFINE_RES_IRQ(142),
DEFINE_RES_IRQ(143),
};
static struct platform_device cmt_device = {
.name = "sh-cmt-16",
.id = 0,
.dev = {
.platform_data = &cmt_platform_data,
},
.resource = cmt_resources,
.num_resources = ARRAY_SIZE(cmt_resources),
};
static struct resource mtu2_resources[] = {
DEFINE_RES_MEM(0xfffe4000, 0x400),
DEFINE_RES_IRQ_NAMED(146, "tgi0a"),
DEFINE_RES_IRQ_NAMED(153, "tgi1a"),
};
static struct platform_device mtu2_device = {
.name = "sh-mtu2",
.id = -1,
.resource = mtu2_resources,
.num_resources = ARRAY_SIZE(mtu2_resources),
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0xffff2000,
.end = 0xffff2000 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Shared Period/Carry/Alarm IRQ */
.start = 231,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
static struct platform_device *sh7203_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&cmt_device,
&mtu2_device,
&rtc_device,
};
static int __init sh7203_devices_setup(void)
{
return platform_add_devices(sh7203_devices,
ARRAY_SIZE(sh7203_devices));
}
arch_initcall(sh7203_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
static struct platform_device *sh7203_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&scif2_device,
&scif3_device,
&cmt_device,
&mtu2_device,
};
#define STBCR3 0xfffe0408
#define STBCR4 0xfffe040c
void __init plat_early_device_setup(void)
{
/* enable CMT clock */
__raw_writeb(__raw_readb(STBCR4) & ~0x04, STBCR4);
/* enable MTU2 clock */
__raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3);
sh_early_platform_add_devices(sh7203_early_devices,
ARRAY_SIZE(sh7203_early_devices));
}
| linux-master | arch/sh/kernel/cpu/sh2a/setup-sh7203.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh2a/clock-sh7201.c
*
* SH7201 support for the clock framework
*
* Copyright (C) 2008 Peter Griffin <[email protected]>
*
* Based on clock-sh4.c
* Copyright (C) 2005 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static const int pll1rate[]={1,2,3,4,6,8};
static const int pfc_divisors[]={1,2,3,4,6,8,12};
#define ifc_divisors pfc_divisors
static unsigned int pll2_mult;
static void master_clk_init(struct clk *clk)
{
clk->rate = 10000000 * pll2_mult *
pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
}
static struct sh_clk_ops sh7201_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7201_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FREQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7201_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = ((__raw_readw(FREQCR) >> 4) & 0x0007);
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh7201_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7201_clk_ops[] = {
&sh7201_master_clk_ops,
&sh7201_module_clk_ops,
&sh7201_bus_clk_ops,
&sh7201_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (test_mode_pin(MODE_PIN1 | MODE_PIN0))
pll2_mult = 1;
else if (test_mode_pin(MODE_PIN1))
pll2_mult = 2;
else
pll2_mult = 4;
if (idx < ARRAY_SIZE(sh7201_clk_ops))
*ops = sh7201_clk_ops[idx];
}
| linux-master | arch/sh/kernel/cpu/sh2a/clock-sh7201.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh2a/probe.c
*
* CPU Subtype Probing for SH-2A.
*
* Copyright (C) 2004 - 2007 Paul Mundt
*/
#include <linux/init.h>
#include <asm/processor.h>
#include <asm/cache.h>
void cpu_probe(void)
{
boot_cpu_data.family = CPU_FAMILY_SH2A;
/* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */
boot_cpu_data.flags |= CPU_HAS_OP32;
#if defined(CONFIG_CPU_SUBTYPE_SH7201)
boot_cpu_data.type = CPU_SH7201;
boot_cpu_data.flags |= CPU_HAS_FPU;
#elif defined(CONFIG_CPU_SUBTYPE_SH7203)
boot_cpu_data.type = CPU_SH7203;
boot_cpu_data.flags |= CPU_HAS_FPU;
#elif defined(CONFIG_CPU_SUBTYPE_SH7263)
boot_cpu_data.type = CPU_SH7263;
boot_cpu_data.flags |= CPU_HAS_FPU;
#elif defined(CONFIG_CPU_SUBTYPE_SH7264)
boot_cpu_data.type = CPU_SH7264;
boot_cpu_data.flags |= CPU_HAS_FPU;
#elif defined(CONFIG_CPU_SUBTYPE_SH7269)
boot_cpu_data.type = CPU_SH7269;
boot_cpu_data.flags |= CPU_HAS_FPU;
#elif defined(CONFIG_CPU_SUBTYPE_SH7206)
boot_cpu_data.type = CPU_SH7206;
boot_cpu_data.flags |= CPU_HAS_DSP;
#elif defined(CONFIG_CPU_SUBTYPE_MXG)
boot_cpu_data.type = CPU_MXG;
boot_cpu_data.flags |= CPU_HAS_DSP;
#endif
boot_cpu_data.dcache.ways = 4;
boot_cpu_data.dcache.way_incr = (1 << 11);
boot_cpu_data.dcache.sets = 128;
boot_cpu_data.dcache.entry_shift = 4;
boot_cpu_data.dcache.linesz = L1_CACHE_BYTES;
boot_cpu_data.dcache.flags = 0;
/*
* The icache is the same as the dcache as far as this setup is
* concerned. The only real difference in hardware is that the icache
* lacks the U bit that the dcache has, none of this has any bearing
* on the cache info.
*/
boot_cpu_data.icache = boot_cpu_data.dcache;
}
| linux-master | arch/sh/kernel/cpu/sh2a/probe.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.