python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Copyright (C) 2007 Ralf Baechle <[email protected]>
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/cpufreq.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/time.h>
#include <asm/cevt-r4k.h>
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
{
unsigned int cnt;
int res;
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
return res;
}
/**
* calculate_min_delta() - Calculate a good minimum delta for mips_next_event().
*
* Running under virtualisation can introduce overhead into mips_next_event() in
* the form of hypervisor emulation of CP0_Count/CP0_Compare registers,
* potentially with an unnatural frequency, which makes a fixed min_delta_ns
* value inappropriate as it may be too small.
*
* It can also introduce occasional latency from the guest being descheduled.
*
* This function calculates a good minimum delta based roughly on the 75th
* percentile of the time taken to do the mips_next_event() sequence, in order
* to handle potentially higher overhead while also eliminating outliers due to
* unpredictable hypervisor latency (which can be handled by retries).
*
* Return: An appropriate minimum delta for the clock event device.
*/
static unsigned int calculate_min_delta(void)
{
unsigned int cnt, i, j, k, l;
unsigned int buf1[4], buf2[3];
unsigned int min_delta;
/*
* Calculate the median of 5 75th percentiles of 5 samples of how long
* it takes to set CP0_Compare = CP0_Count + delta.
*/
for (i = 0; i < 5; ++i) {
for (j = 0; j < 5; ++j) {
/*
* This is like the code in mips_next_event(), and
* directly measures the borderline "safe" delta.
*/
cnt = read_c0_count();
write_c0_compare(cnt);
cnt = read_c0_count() - cnt;
/* Sorted insert into buf1 */
for (k = 0; k < j; ++k) {
if (cnt < buf1[k]) {
l = min_t(unsigned int,
j, ARRAY_SIZE(buf1) - 1);
for (; l > k; --l)
buf1[l] = buf1[l - 1];
break;
}
}
if (k < ARRAY_SIZE(buf1))
buf1[k] = cnt;
}
/* Sorted insert of 75th percentile into buf2 */
for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
l = min_t(unsigned int,
i, ARRAY_SIZE(buf2) - 1);
for (; l > k; --l)
buf2[l] = buf2[l - 1];
break;
}
}
if (k < ARRAY_SIZE(buf2))
buf2[k] = buf1[ARRAY_SIZE(buf1) - 1];
}
/* Use 2 * median of 75th percentiles */
min_delta = buf2[ARRAY_SIZE(buf2) - 1] * 2;
/* Don't go too low */
if (min_delta < 0x300)
min_delta = 0x300;
pr_debug("%s: median 75th percentile=%#x, min_delta=%#x\n",
__func__, buf2[ARRAY_SIZE(buf2) - 1], min_delta);
return min_delta;
}
DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
int cp0_timer_irq_installed;
/*
* Possibly handle a performance counter interrupt.
* Return true if the timer interrupt should not be checked
*/
static inline int handle_perf_irq(int r2)
{
/*
* The performance counter overflow interrupt may be shared with the
* timer interrupt (cp0_perfcount_irq < 0). If it is and a
* performance counter has overflowed (perf_irq() == IRQ_HANDLED)
* and we can't reliably determine if a counter interrupt has also
* happened (!r2) then don't check for a timer interrupt.
*/
return (cp0_perfcount_irq < 0) &&
perf_irq() == IRQ_HANDLED &&
!r2;
}
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
const int r2 = cpu_has_mips_r2_r6;
struct clock_event_device *cd;
int cpu = smp_processor_id();
/*
* Suckage alert:
* Before R2 of the architecture there was no way to see if a
* performance counter interrupt was pending, so we have to run
* the performance counter interrupt handler anyway.
*/
if (handle_perf_irq(r2))
return IRQ_HANDLED;
/*
* The same applies to performance counter interrupts. But with the
* above we now know that the reason we got here must be a timer
* interrupt. Being the paranoiacs we are we check anyway.
*/
if (!r2 || (read_c0_cause() & CAUSEF_TI)) {
/* Clear Count/Compare Interrupt */
write_c0_compare(read_c0_compare());
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt,
/*
* IRQF_SHARED: The timer interrupt may be shared with other interrupts
* such as perf counter and FDC interrupts.
*/
.flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED,
.name = "timer",
};
void mips_event_handler(struct clock_event_device *dev)
{
}
/*
* FIXME: This doesn't hold for the relocated E9000 compare interrupt.
*/
static int c0_compare_int_pending(void)
{
/* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
}
/*
* Compare interrupt can be routed and latched outside the core,
* so wait up to worst case number of cycle counter ticks for timer interrupt
* changes to propagate to the cause register.
*/
#define COMPARE_INT_SEEN_TICKS 50
int c0_compare_int_usable(void)
{
unsigned int delta;
unsigned int cnt;
/*
* IP7 already pending? Try to clear it by acking the timer.
*/
if (c0_compare_int_pending()) {
cnt = read_c0_count();
write_c0_compare(cnt - 1);
back_to_back_c0_hazard();
while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
if (!c0_compare_int_pending())
break;
if (c0_compare_int_pending())
return 0;
}
for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
cnt = read_c0_count();
cnt += delta;
write_c0_compare(cnt);
back_to_back_c0_hazard();
if ((int)(read_c0_count() - cnt) < 0)
break;
/* increase delta if the timer was already expired */
}
while ((int)(read_c0_count() - cnt) <= 0)
; /* Wait for expiry */
while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
if (c0_compare_int_pending())
break;
if (!c0_compare_int_pending())
return 0;
cnt = read_c0_count();
write_c0_compare(cnt - 1);
back_to_back_c0_hazard();
while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
if (!c0_compare_int_pending())
break;
if (c0_compare_int_pending())
return 0;
/*
* Feels like a real count / compare timer.
*/
return 1;
}
unsigned int __weak get_c0_compare_int(void)
{
return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
}
#ifdef CONFIG_CPU_FREQ
static unsigned long mips_ref_freq;
static int r4k_cpufreq_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
struct clock_event_device *cd;
unsigned long rate;
int cpu;
if (!mips_ref_freq)
mips_ref_freq = freq->old;
if (val == CPUFREQ_POSTCHANGE) {
rate = cpufreq_scale(mips_hpt_frequency, mips_ref_freq,
freq->new);
for_each_cpu(cpu, freq->policy->cpus) {
cd = &per_cpu(mips_clockevent_device, cpu);
clockevents_update_freq(cd, rate);
}
}
return 0;
}
static struct notifier_block r4k_cpufreq_notifier = {
.notifier_call = r4k_cpufreq_callback,
};
static int __init r4k_register_cpufreq_notifier(void)
{
return cpufreq_register_notifier(&r4k_cpufreq_notifier,
CPUFREQ_TRANSITION_NOTIFIER);
}
core_initcall(r4k_register_cpufreq_notifier);
#endif /* !CONFIG_CPU_FREQ */
int r4k_clockevent_init(void)
{
unsigned long flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED;
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
unsigned int irq, min_delta;
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
if (!c0_compare_int_usable())
return -ENXIO;
/*
* With vectored interrupts things are getting platform specific.
* get_c0_compare_int is a hook to allow a platform to return the
* interrupt number of its liking.
*/
irq = get_c0_compare_int();
cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS";
cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP |
CLOCK_EVT_FEAT_PERCPU;
min_delta = calculate_min_delta();
cd->rating = 300;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = mips_next_event;
cd->event_handler = mips_event_handler;
clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
if (cp0_timer_irq_installed)
return 0;
cp0_timer_irq_installed = 1;
if (request_irq(irq, c0_compare_interrupt, flags, "timer",
c0_compare_interrupt))
pr_err("Failed to request irq %d (timer)\n", irq);
return 0;
}
| linux-master | arch/mips/kernel/cevt-r4k.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2014, Imagination Technologies Ltd.
*/
#include <linux/cache.h>
#include <linux/context_tracking.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/personality.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/uprobes.h>
#include <linux/compiler.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/resume_user_mode.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#include <asm/sim.h>
#include <asm/ucontext.h>
#include <asm/cpu-features.h>
#include <asm/dsp.h>
#include <asm/inst.h>
#include <asm/msa.h>
#include "signal-common.h"
static int (*save_fp_context)(void __user *sc);
static int (*restore_fp_context)(void __user *sc);
struct sigframe {
u32 sf_ass[4]; /* argument save space for o32 */
u32 sf_pad[2]; /* Was: signal trampoline */
/* Matches struct ucontext from its uc_mcontext field onwards */
struct sigcontext sf_sc;
sigset_t sf_mask;
unsigned long long sf_extcontext[];
};
struct rt_sigframe {
u32 rs_ass[4]; /* argument save space for o32 */
u32 rs_pad[2]; /* Was: signal trampoline */
struct siginfo rs_info;
struct ucontext rs_uc;
};
#ifdef CONFIG_MIPS_FP_SUPPORT
/*
* Thread saved context copy to/from a signal context presumed to be on the
* user stack, and therefore accessed with appropriate macros from uaccess.h.
*/
static int copy_fp_to_sigcontext(void __user *sc)
{
struct mips_abi *abi = current->thread.abi;
uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
int i;
int err = 0;
int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
for (i = 0; i < NUM_FPU_REGS; i += inc) {
err |=
__put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
&fpregs[i]);
}
err |= __put_user(current->thread.fpu.fcr31, csr);
return err;
}
static int copy_fp_from_sigcontext(void __user *sc)
{
struct mips_abi *abi = current->thread.abi;
uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
int i;
int err = 0;
int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
u64 fpr_val;
for (i = 0; i < NUM_FPU_REGS; i += inc) {
err |= __get_user(fpr_val, &fpregs[i]);
set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
}
err |= __get_user(current->thread.fpu.fcr31, csr);
return err;
}
#else /* !CONFIG_MIPS_FP_SUPPORT */
static int copy_fp_to_sigcontext(void __user *sc)
{
return 0;
}
static int copy_fp_from_sigcontext(void __user *sc)
{
return 0;
}
#endif /* !CONFIG_MIPS_FP_SUPPORT */
/*
* Wrappers for the assembly _{save,restore}_fp_context functions.
*/
static int save_hw_fp_context(void __user *sc)
{
struct mips_abi *abi = current->thread.abi;
uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
return _save_fp_context(fpregs, csr);
}
static int restore_hw_fp_context(void __user *sc)
{
struct mips_abi *abi = current->thread.abi;
uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
return _restore_fp_context(fpregs, csr);
}
/*
* Extended context handling.
*/
static inline void __user *sc_to_extcontext(void __user *sc)
{
struct ucontext __user *uc;
/*
* We can just pretend the sigcontext is always embedded in a struct
* ucontext here, because the offset from sigcontext to extended
* context is the same in the struct sigframe case.
*/
uc = container_of(sc, struct ucontext, uc_mcontext);
return &uc->uc_extcontext;
}
#ifdef CONFIG_CPU_HAS_MSA
static int save_msa_extcontext(void __user *buf)
{
struct msa_extcontext __user *msa = buf;
uint64_t val;
int i, err;
if (!thread_msa_context_live())
return 0;
/*
* Ensure that we can't lose the live MSA context between checking
* for it & writing it to memory.
*/
preempt_disable();
if (is_msa_enabled()) {
/*
* There are no EVA versions of the vector register load/store
* instructions, so MSA context has to be saved to kernel memory
* and then copied to user memory. The save to kernel memory
* should already have been done when handling scalar FP
* context.
*/
BUG_ON(IS_ENABLED(CONFIG_EVA));
err = __put_user(read_msa_csr(), &msa->csr);
err |= _save_msa_all_upper(&msa->wr);
preempt_enable();
} else {
preempt_enable();
err = __put_user(current->thread.fpu.msacsr, &msa->csr);
for (i = 0; i < NUM_FPU_REGS; i++) {
val = get_fpr64(¤t->thread.fpu.fpr[i], 1);
err |= __put_user(val, &msa->wr[i]);
}
}
err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic);
err |= __put_user(sizeof(*msa), &msa->ext.size);
return err ? -EFAULT : sizeof(*msa);
}
static int restore_msa_extcontext(void __user *buf, unsigned int size)
{
struct msa_extcontext __user *msa = buf;
unsigned long long val;
unsigned int csr;
int i, err;
if (size != sizeof(*msa))
return -EINVAL;
err = get_user(csr, &msa->csr);
if (err)
return err;
preempt_disable();
if (is_msa_enabled()) {
/*
* There are no EVA versions of the vector register load/store
* instructions, so MSA context has to be copied to kernel
* memory and later loaded to registers. The same is true of
* scalar FP context, so FPU & MSA should have already been
* disabled whilst handling scalar FP context.
*/
BUG_ON(IS_ENABLED(CONFIG_EVA));
write_msa_csr(csr);
err |= _restore_msa_all_upper(&msa->wr);
preempt_enable();
} else {
preempt_enable();
current->thread.fpu.msacsr = csr;
for (i = 0; i < NUM_FPU_REGS; i++) {
err |= __get_user(val, &msa->wr[i]);
set_fpr64(¤t->thread.fpu.fpr[i], 1, val);
}
}
return err;
}
#else /* !CONFIG_CPU_HAS_MSA */
static int save_msa_extcontext(void __user *buf)
{
return 0;
}
static int restore_msa_extcontext(void __user *buf, unsigned int size)
{
return SIGSYS;
}
#endif /* !CONFIG_CPU_HAS_MSA */
static int save_extcontext(void __user *buf)
{
int sz;
sz = save_msa_extcontext(buf);
if (sz < 0)
return sz;
buf += sz;
/* If no context was saved then trivially return */
if (!sz)
return 0;
/* Write the end marker */
if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf))
return -EFAULT;
sz += sizeof(((struct extcontext *)NULL)->magic);
return sz;
}
static int restore_extcontext(void __user *buf)
{
struct extcontext ext;
int err;
while (1) {
err = __get_user(ext.magic, (unsigned int *)buf);
if (err)
return err;
if (ext.magic == END_EXTCONTEXT_MAGIC)
return 0;
err = __get_user(ext.size, (unsigned int *)(buf
+ offsetof(struct extcontext, size)));
if (err)
return err;
switch (ext.magic) {
case MSA_EXTCONTEXT_MAGIC:
err = restore_msa_extcontext(buf, ext.size);
break;
default:
err = -EINVAL;
break;
}
if (err)
return err;
buf += ext.size;
}
}
/*
* Helper routines
*/
int protected_save_fp_context(void __user *sc)
{
struct mips_abi *abi = current->thread.abi;
uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
uint32_t __user *used_math = sc + abi->off_sc_used_math;
unsigned int used, ext_sz;
int err;
used = used_math() ? USED_FP : 0;
if (!used)
goto fp_done;
if (!test_thread_flag(TIF_32BIT_FPREGS))
used |= USED_FR1;
if (test_thread_flag(TIF_HYBRID_FPREGS))
used |= USED_HYBRID_FPRS;
/*
* EVA does not have userland equivalents of ldc1 or sdc1, so
* save to the kernel FP context & copy that to userland below.
*/
if (IS_ENABLED(CONFIG_EVA))
lose_fpu(1);
while (1) {
lock_fpu_owner();
if (is_fpu_owner()) {
err = save_fp_context(sc);
unlock_fpu_owner();
} else {
unlock_fpu_owner();
err = copy_fp_to_sigcontext(sc);
}
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __put_user(0, &fpregs[0]) |
__put_user(0, &fpregs[31]) |
__put_user(0, csr);
if (err)
return err; /* really bad sigcontext */
}
fp_done:
ext_sz = err = save_extcontext(sc_to_extcontext(sc));
if (err < 0)
return err;
used |= ext_sz ? USED_EXTCONTEXT : 0;
return __put_user(used, used_math);
}
int protected_restore_fp_context(void __user *sc)
{
struct mips_abi *abi = current->thread.abi;
uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
uint32_t __user *used_math = sc + abi->off_sc_used_math;
unsigned int used;
int err, sig = 0, tmp __maybe_unused;
err = __get_user(used, used_math);
conditional_used_math(used & USED_FP);
/*
* The signal handler may have used FPU; give it up if the program
* doesn't want it following sigreturn.
*/
if (err || !(used & USED_FP))
lose_fpu(0);
if (err)
return err;
if (!(used & USED_FP))
goto fp_done;
err = sig = fpcsr_pending(csr);
if (err < 0)
return err;
/*
* EVA does not have userland equivalents of ldc1 or sdc1, so we
* disable the FPU here such that the code below simply copies to
* the kernel FP context.
*/
if (IS_ENABLED(CONFIG_EVA))
lose_fpu(0);
while (1) {
lock_fpu_owner();
if (is_fpu_owner()) {
err = restore_fp_context(sc);
unlock_fpu_owner();
} else {
unlock_fpu_owner();
err = copy_fp_from_sigcontext(sc);
}
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __get_user(tmp, &fpregs[0]) |
__get_user(tmp, &fpregs[31]) |
__get_user(tmp, csr);
if (err)
break; /* really bad sigcontext */
}
fp_done:
if (!err && (used & USED_EXTCONTEXT))
err = restore_extcontext(sc_to_extcontext(sc));
return err ?: sig;
}
int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int i;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
err |= __put_user(0, &sc->sc_regs[0]);
for (i = 1; i < 32; i++)
err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
#ifdef CONFIG_CPU_HAS_SMARTMIPS
err |= __put_user(regs->acx, &sc->sc_acx);
#endif
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __put_user(mfhi1(), &sc->sc_hi1);
err |= __put_user(mflo1(), &sc->sc_lo1);
err |= __put_user(mfhi2(), &sc->sc_hi2);
err |= __put_user(mflo2(), &sc->sc_lo2);
err |= __put_user(mfhi3(), &sc->sc_hi3);
err |= __put_user(mflo3(), &sc->sc_lo3);
err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
}
/*
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
err |= protected_save_fp_context(sc);
return err;
}
static size_t extcontext_max_size(void)
{
size_t sz = 0;
/*
* The assumption here is that between this point & the point at which
* the extended context is saved the size of the context should only
* ever be able to shrink (if the task is preempted), but never grow.
* That is, what this function returns is an upper bound on the size of
* the extended context for the current task at the current time.
*/
if (thread_msa_context_live())
sz += sizeof(struct msa_extcontext);
/* If any context is saved then we'll append the end marker */
if (sz)
sz += sizeof(((struct extcontext *)NULL)->magic);
return sz;
}
int fpcsr_pending(unsigned int __user *fpcsr)
{
int err, sig = 0;
unsigned int csr, enabled;
err = __get_user(csr, fpcsr);
enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
/*
* If the signal handler set some FPU exceptions, clear it and
* send SIGFPE.
*/
if (csr & enabled) {
csr &= ~enabled;
err |= __put_user(csr, fpcsr);
sig = SIGFPE;
}
return err ?: sig;
}
int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
unsigned long treg;
int err = 0;
int i;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
#ifdef CONFIG_CPU_HAS_SMARTMIPS
err |= __get_user(regs->acx, &sc->sc_acx);
#endif
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
if (cpu_has_dsp) {
err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
}
for (i = 1; i < 32; i++)
err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
return err ?: protected_restore_fp_context(sc);
}
#ifdef CONFIG_WAR_ICACHE_REFILLS
#define SIGMASK ~(cpu_icache_line_size()-1)
#else
#define SIGMASK ALMASK
#endif
void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
size_t frame_size)
{
unsigned long sp;
/* Leave space for potential extended context */
frame_size += extcontext_max_size();
/* Default to using normal stack */
sp = regs->regs[29];
/*
* If we are on the alternate signal stack and would overflow it, don't.
* Return an always-bogus address instead so we will die with SIGSEGV.
*/
if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
return (void __user __force *)(-1UL);
/*
* FPU emulator may have it's own trampoline active just
* above the user stack, 16-bytes before the next lowest
* 16 byte boundary. Try to avoid trashing it.
*/
sp -= 32;
sp = sigsp(sp, ksig);
return (void __user *)((sp - frame_size) & SIGMASK);
}
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
#ifdef CONFIG_TRAD_SIGNALS
SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset)
{
return sys_rt_sigsuspend(uset, sizeof(sigset_t));
}
#endif
#ifdef CONFIG_TRAD_SIGNALS
SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
struct sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
int err = 0;
if (act) {
old_sigset_t mask;
if (!access_ok(act, sizeof(*act)))
return -EFAULT;
err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
err |= __get_user(mask, &act->sa_mask.sig[0]);
if (err)
return -EFAULT;
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(oact, sizeof(*oact)))
return -EFAULT;
err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
err |= __put_user(0, &oact->sa_mask.sig[1]);
err |= __put_user(0, &oact->sa_mask.sig[2]);
err |= __put_user(0, &oact->sa_mask.sig[3]);
if (err)
return -EFAULT;
}
return ret;
}
#endif
#ifdef CONFIG_TRAD_SIGNALS
asmlinkage void sys_sigreturn(void)
{
struct sigframe __user *frame;
struct pt_regs *regs;
sigset_t blocked;
int sig;
regs = current_pt_regs();
frame = (struct sigframe __user *)regs->regs[29];
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
goto badframe;
set_current_blocked(&blocked);
sig = restore_sigcontext(regs, &frame->sf_sc);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig);
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
: /* no outputs */
: "r" (regs));
/* Unreached */
badframe:
force_sig(SIGSEGV);
}
#endif /* CONFIG_TRAD_SIGNALS */
asmlinkage void sys_rt_sigreturn(void)
{
struct rt_sigframe __user *frame;
struct pt_regs *regs;
sigset_t set;
int sig;
regs = current_pt_regs();
frame = (struct rt_sigframe __user *)regs->regs[29];
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
if (sig < 0)
goto badframe;
else if (sig)
force_sig(sig);
if (restore_altstack(&frame->rs_uc.uc_stack))
goto badframe;
/*
* Don't let your children do this ...
*/
__asm__ __volatile__(
"move\t$29, %0\n\t"
"j\tsyscall_exit"
: /* no outputs */
: "r" (regs));
/* Unreached */
badframe:
force_sig(SIGSEGV);
}
#ifdef CONFIG_TRAD_SIGNALS
static int setup_frame(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
struct sigframe __user *frame;
int err = 0;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(frame, sizeof (*frame)))
return -EFAULT;
err |= setup_sigcontext(regs, &frame->sf_sc);
err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
if (err)
return -EFAULT;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to struct sigcontext
*
* $25 and c0_epc point to the signal handler, $29 points to the
* struct sigframe.
*/
regs->regs[ 4] = ksig->sig;
regs->regs[ 5] = 0;
regs->regs[ 6] = (unsigned long) &frame->sf_sc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
}
#endif
static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
struct rt_sigframe __user *frame;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(frame, sizeof (*frame)))
return -EFAULT;
/* Create siginfo. */
if (copy_siginfo_to_user(&frame->rs_info, &ksig->info))
return -EFAULT;
/* Create the ucontext. */
if (__put_user(0, &frame->rs_uc.uc_flags))
return -EFAULT;
if (__put_user(NULL, &frame->rs_uc.uc_link))
return -EFAULT;
if (__save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]))
return -EFAULT;
if (setup_sigcontext(regs, &frame->rs_uc.uc_mcontext))
return -EFAULT;
if (__copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)))
return -EFAULT;
/*
* Arguments to signal handler:
*
* a0 = signal number
* a1 = 0 (should be cause)
* a2 = pointer to ucontext
*
* $25 and c0_epc point to the signal handler, $29 points to
* the struct rt_sigframe.
*/
regs->regs[ 4] = ksig->sig;
regs->regs[ 5] = (unsigned long) &frame->rs_info;
regs->regs[ 6] = (unsigned long) &frame->rs_uc;
regs->regs[29] = (unsigned long) frame;
regs->regs[31] = (unsigned long) sig_return;
regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
return 0;
}
struct mips_abi mips_abi = {
#ifdef CONFIG_TRAD_SIGNALS
.setup_frame = setup_frame,
#endif
.setup_rt_frame = setup_rt_frame,
.restart = __NR_restart_syscall,
.off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
.off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
.off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
.vdso = &vdso_image,
};
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int ret;
struct mips_abi *abi = current->thread.abi;
void *vdso = current->mm->context.vdso;
/*
* If we were emulating a delay slot instruction, exit that frame such
* that addresses in the sigframe are as expected for userland and we
* don't have a problem if we reuse the thread's frame for an
* instruction within the signal handler.
*/
dsemul_thread_rollback(regs);
if (regs->regs[0]) {
switch(regs->regs[2]) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
regs->regs[2] = EINTR;
break;
case ERESTARTSYS:
if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
regs->regs[2] = EINTR;
break;
}
fallthrough;
case ERESTARTNOINTR:
regs->regs[7] = regs->regs[26];
regs->regs[2] = regs->regs[0];
regs->cp0_epc -= 4;
}
regs->regs[0] = 0; /* Don't deal with this again. */
}
rseq_signal_deliver(ksig, regs);
if (sig_uses_siginfo(&ksig->ka, abi))
ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
ksig, regs, oldset);
else
ret = abi->setup_frame(vdso + abi->vdso->off_sigreturn,
ksig, regs, oldset);
signal_setup_done(ret, ksig, 0);
}
static void do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
if (get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
return;
}
if (regs->regs[0]) {
switch (regs->regs[2]) {
case ERESTARTNOHAND:
case ERESTARTSYS:
case ERESTARTNOINTR:
regs->regs[2] = regs->regs[0];
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
break;
case ERESTART_RESTARTBLOCK:
regs->regs[2] = current->thread.abi->restart;
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
break;
}
regs->regs[0] = 0; /* Don't deal with this again. */
}
/*
* If there's no signal to deliver, we just put the saved sigmask
* back
*/
restore_saved_sigmask();
}
/*
* notification of userspace execution resumption
* - triggered by the TIF_WORK_MASK flags
*/
asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
__u32 thread_info_flags)
{
local_irq_enable();
user_exit();
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
/* deal with pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME)
resume_user_mode_work(regs);
user_enter();
}
#if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT)
static int smp_save_fp_context(void __user *sc)
{
return raw_cpu_has_fpu
? save_hw_fp_context(sc)
: copy_fp_to_sigcontext(sc);
}
static int smp_restore_fp_context(void __user *sc)
{
return raw_cpu_has_fpu
? restore_hw_fp_context(sc)
: copy_fp_from_sigcontext(sc);
}
#endif
static int signal_setup(void)
{
/*
* The offset from sigcontext to extended context should be the same
* regardless of the type of signal, such that userland can always know
* where to look if it wishes to find the extended context structures.
*/
BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) -
offsetof(struct sigframe, sf_sc)) !=
(offsetof(struct rt_sigframe, rs_uc.uc_extcontext) -
offsetof(struct rt_sigframe, rs_uc.uc_mcontext)));
#if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT)
/* For now just do the cpu_has_fpu check when the functions are invoked */
save_fp_context = smp_save_fp_context;
restore_fp_context = smp_restore_fp_context;
#else
if (cpu_has_fpu) {
save_fp_context = save_hw_fp_context;
restore_fp_context = restore_hw_fp_context;
} else {
save_fp_context = copy_fp_to_sigcontext;
restore_fp_context = copy_fp_from_sigcontext;
}
#endif /* CONFIG_SMP */
return 0;
}
arch_initcall(signal_setup);
| linux-master | arch/mips/kernel/signal.c |
/*
* Originally written by Glenn Engel, Lake Stevens Instrument Division
*
* Contributed by HP Systems
*
* Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
* Send complaints, suggestions etc. to <[email protected]>
*
* Copyright (C) 1995 Andreas Busse
*
* Copyright (C) 2003 MontaVista Software Inc.
* Author: Jun Sun, [email protected] or [email protected]
*
* Copyright (C) 2004-2005 MontaVista Software Inc.
* Author: Manish Lachwani, [email protected] or [email protected]
*
* Copyright (C) 2007-2008 Wind River Systems, Inc.
* Author/Maintainer: Jason Wessel, [email protected]
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/ptrace.h> /* for linux pt_regs struct */
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/inst.h>
#include <asm/fpu.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <asm/irq_regs.h>
static struct hard_trap_info {
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
unsigned char signo; /* Signal that we map this trap into */
} hard_trap_info[] = {
{ 6, SIGBUS }, /* instruction bus error */
{ 7, SIGBUS }, /* data bus error */
{ 9, SIGTRAP }, /* break */
/* { 11, SIGILL }, */ /* CPU unusable */
{ 12, SIGFPE }, /* overflow */
{ 13, SIGTRAP }, /* trap */
{ 14, SIGSEGV }, /* virtual instruction cache coherency */
{ 15, SIGFPE }, /* floating point exception */
{ 23, SIGSEGV }, /* watch */
{ 31, SIGSEGV }, /* virtual data cache coherency */
{ 0, 0} /* Must be last */
};
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
{ "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
{ "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
{ "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
{ "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
{ "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
{ "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
{ "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
{ "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
{ "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
{ "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
{ "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
{ "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
{ "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
{ "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
{ "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
{ "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
{ "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
{ "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
{ "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
{ "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
{ "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
{ "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
{ "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
{ "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
{ "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
{ "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
{ "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
{ "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
{ "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
{ "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
{ "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
{ "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
{ "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) },
{ "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) },
{ "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) },
{ "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) },
{ "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) },
{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) },
{ "f0", GDB_SIZEOF_REG, 0 },
{ "f1", GDB_SIZEOF_REG, 1 },
{ "f2", GDB_SIZEOF_REG, 2 },
{ "f3", GDB_SIZEOF_REG, 3 },
{ "f4", GDB_SIZEOF_REG, 4 },
{ "f5", GDB_SIZEOF_REG, 5 },
{ "f6", GDB_SIZEOF_REG, 6 },
{ "f7", GDB_SIZEOF_REG, 7 },
{ "f8", GDB_SIZEOF_REG, 8 },
{ "f9", GDB_SIZEOF_REG, 9 },
{ "f10", GDB_SIZEOF_REG, 10 },
{ "f11", GDB_SIZEOF_REG, 11 },
{ "f12", GDB_SIZEOF_REG, 12 },
{ "f13", GDB_SIZEOF_REG, 13 },
{ "f14", GDB_SIZEOF_REG, 14 },
{ "f15", GDB_SIZEOF_REG, 15 },
{ "f16", GDB_SIZEOF_REG, 16 },
{ "f17", GDB_SIZEOF_REG, 17 },
{ "f18", GDB_SIZEOF_REG, 18 },
{ "f19", GDB_SIZEOF_REG, 19 },
{ "f20", GDB_SIZEOF_REG, 20 },
{ "f21", GDB_SIZEOF_REG, 21 },
{ "f22", GDB_SIZEOF_REG, 22 },
{ "f23", GDB_SIZEOF_REG, 23 },
{ "f24", GDB_SIZEOF_REG, 24 },
{ "f25", GDB_SIZEOF_REG, 25 },
{ "f26", GDB_SIZEOF_REG, 26 },
{ "f27", GDB_SIZEOF_REG, 27 },
{ "f28", GDB_SIZEOF_REG, 28 },
{ "f29", GDB_SIZEOF_REG, 29 },
{ "f30", GDB_SIZEOF_REG, 30 },
{ "f31", GDB_SIZEOF_REG, 31 },
{ "fsr", GDB_SIZEOF_REG, 0 },
{ "fir", GDB_SIZEOF_REG, 0 },
};
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
int fp_reg;
if (regno < 0 || regno >= DBG_MAX_REG_NUM)
return -EINVAL;
if (dbg_reg_def[regno].offset != -1 && regno < 38) {
memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
dbg_reg_def[regno].size);
} else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
/* FP registers 38 -> 69 */
if (!(regs->cp0_status & ST0_CU1))
return 0;
if (regno == 70) {
/* Process the fcr31/fsr (register 70) */
memcpy((void *)¤t->thread.fpu.fcr31, mem,
dbg_reg_def[regno].size);
goto out_save;
} else if (regno == 71) {
/* Ignore the fir (register 71) */
goto out_save;
}
fp_reg = dbg_reg_def[regno].offset;
memcpy((void *)¤t->thread.fpu.fpr[fp_reg], mem,
dbg_reg_def[regno].size);
out_save:
restore_fp(current);
}
return 0;
}
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
int fp_reg;
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
if (dbg_reg_def[regno].offset != -1 && regno < 38) {
/* First 38 registers */
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
dbg_reg_def[regno].size);
} else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
/* FP registers 38 -> 69 */
if (!(regs->cp0_status & ST0_CU1))
goto out;
save_fp(current);
if (regno == 70) {
/* Process the fcr31/fsr (register 70) */
memcpy(mem, (void *)¤t->thread.fpu.fcr31,
dbg_reg_def[regno].size);
goto out;
} else if (regno == 71) {
/* Ignore the fir (register 71) */
memset(mem, 0, dbg_reg_def[regno].size);
goto out;
}
fp_reg = dbg_reg_def[regno].offset;
memcpy(mem, (void *)¤t->thread.fpu.fpr[fp_reg],
dbg_reg_def[regno].size);
}
out:
return dbg_reg_def[regno].name;
}
void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__(
".globl breakinst\n\t"
".set\tnoreorder\n\t"
"nop\n"
"breakinst:\tbreak\n\t"
"nop\n\t"
".set\treorder");
}
static int compute_signal(int tt)
{
struct hard_trap_info *ht;
for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
if (ht->tt == tt)
return ht->signo;
return SIGHUP; /* default for things we don't know about */
}
/*
* Similar to regs_to_gdb_regs() except that process is sleeping and so
* we may not be able to get all the info.
*/
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
int reg;
#if (KGDB_GDB_REG_SIZE == 32)
u32 *ptr = (u32 *)gdb_regs;
#else
u64 *ptr = (u64 *)gdb_regs;
#endif
for (reg = 0; reg < 16; reg++)
*(ptr++) = 0;
/* S0 - S7 */
*(ptr++) = p->thread.reg16;
*(ptr++) = p->thread.reg17;
*(ptr++) = p->thread.reg18;
*(ptr++) = p->thread.reg19;
*(ptr++) = p->thread.reg20;
*(ptr++) = p->thread.reg21;
*(ptr++) = p->thread.reg22;
*(ptr++) = p->thread.reg23;
for (reg = 24; reg < 28; reg++)
*(ptr++) = 0;
/* GP, SP, FP, RA */
*(ptr++) = (long)p;
*(ptr++) = p->thread.reg29;
*(ptr++) = p->thread.reg30;
*(ptr++) = p->thread.reg31;
*(ptr++) = p->thread.cp0_status;
/* lo, hi */
*(ptr++) = 0;
*(ptr++) = 0;
/*
* BadVAddr, Cause
* Ideally these would come from the last exception frame up the stack
* but that requires unwinding, otherwise we can't know much for sure.
*/
*(ptr++) = 0;
*(ptr++) = 0;
/*
* PC
* use return address (RA), i.e. the moment after return from resume()
*/
*(ptr++) = p->thread.reg31;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
regs->cp0_epc = pc;
}
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
* then try to fall into the debugger
*/
static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
void *ptr)
{
struct die_args *args = (struct die_args *)ptr;
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
#ifdef CONFIG_KPROBES
/*
* Return immediately if the kprobes fault notifier has set
* DIE_PAGE_FAULT.
*/
if (cmd == DIE_PAGE_FAULT)
return NOTIFY_DONE;
#endif /* CONFIG_KPROBES */
/* Userspace events, ignore. */
if (user_mode(regs))
return NOTIFY_DONE;
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(smp_processor_id(), regs);
if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
return NOTIFY_DONE;
if (atomic_read(&kgdb_setting_breakpoint))
if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
regs->cp0_epc += 4;
/* In SMP mode, __flush_cache_all does IPI */
local_irq_enable();
__flush_cache_all();
return NOTIFY_STOP;
}
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
int kgdb_ll_trap(int cmd, const char *str,
struct pt_regs *regs, long err, int trap, int sig)
{
struct die_args args = {
.regs = regs,
.str = str,
.err = err,
.trapnr = trap,
.signr = sig,
};
if (!kgdb_io_module_registered)
return NOTIFY_DONE;
return kgdb_mips_notify(NULL, cmd, &args);
}
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_mips_notify,
};
/*
* Handle the 'c' command
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *regs)
{
char *ptr;
unsigned long address;
switch (remcom_in_buffer[0]) {
case 'c':
/* handle the optional parameter */
ptr = &remcom_in_buffer[1];
if (kgdb_hex2long(&ptr, &address))
regs->cp0_epc = address;
return 0;
}
return -1;
}
const struct kgdb_arch arch_kgdb_ops = {
#ifdef CONFIG_CPU_BIG_ENDIAN
.gdb_bpt_instr = { spec_op << 2, 0x00, 0x00, break_op },
#else
.gdb_bpt_instr = { break_op, 0x00, 0x00, spec_op << 2 },
#endif
};
int kgdb_arch_init(void)
{
register_die_notifier(&kgdb_notifier);
return 0;
}
/*
* kgdb_arch_exit - Perform any architecture specific uninitalization.
*
* This function will handle the uninitalization of any architecture
* specific callbacks, for dynamic registration and unregistration.
*/
void kgdb_arch_exit(void)
{
unregister_die_notifier(&kgdb_notifier);
}
| linux-master | arch/mips/kernel/kgdb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Some parts derived from x86 version of this file.
*
* Copyright (C) 2013 Cavium, Inc.
*/
#include <linux/perf_event.h>
#include <asm/ptrace.h>
#ifdef CONFIG_32BIT
u64 perf_reg_abi(struct task_struct *tsk)
{
return PERF_SAMPLE_REGS_ABI_32;
}
#else /* Must be CONFIG_64BIT */
u64 perf_reg_abi(struct task_struct *tsk)
{
if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
return PERF_SAMPLE_REGS_ABI_32;
else
return PERF_SAMPLE_REGS_ABI_64;
}
#endif /* CONFIG_32BIT */
int perf_reg_validate(u64 mask)
{
if (!mask)
return -EINVAL;
if (mask & ~((1ull << PERF_REG_MIPS_MAX) - 1))
return -EINVAL;
return 0;
}
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
long v;
switch (idx) {
case PERF_REG_MIPS_PC:
v = regs->cp0_epc;
break;
case PERF_REG_MIPS_R1 ... PERF_REG_MIPS_R25:
v = regs->regs[idx - PERF_REG_MIPS_R1 + 1];
break;
case PERF_REG_MIPS_R28 ... PERF_REG_MIPS_R31:
v = regs->regs[idx - PERF_REG_MIPS_R28 + 28];
break;
default:
WARN_ON_ONCE(1);
return 0;
}
return (s64)v; /* Sign extend if 32-bit. */
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
}
| linux-master | arch/mips/kernel/perf_regs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GT641xx IRQ routines.
*
* Copyright (C) 2007 Yoichi Yuasa <[email protected]>
*/
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/gt64120.h>
#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
static void ack_gt641xx_irq(struct irq_data *d)
{
unsigned long flags;
u32 cause;
raw_spin_lock_irqsave(>641xx_irq_lock, flags);
cause = GT_READ(GT_INTRCAUSE_OFS);
cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRCAUSE_OFS, cause);
raw_spin_unlock_irqrestore(>641xx_irq_lock, flags);
}
static void mask_gt641xx_irq(struct irq_data *d)
{
unsigned long flags;
u32 mask;
raw_spin_lock_irqsave(>641xx_irq_lock, flags);
mask = GT_READ(GT_INTRMASK_OFS);
mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRMASK_OFS, mask);
raw_spin_unlock_irqrestore(>641xx_irq_lock, flags);
}
static void mask_ack_gt641xx_irq(struct irq_data *d)
{
unsigned long flags;
u32 cause, mask;
raw_spin_lock_irqsave(>641xx_irq_lock, flags);
mask = GT_READ(GT_INTRMASK_OFS);
mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRMASK_OFS, mask);
cause = GT_READ(GT_INTRCAUSE_OFS);
cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRCAUSE_OFS, cause);
raw_spin_unlock_irqrestore(>641xx_irq_lock, flags);
}
static void unmask_gt641xx_irq(struct irq_data *d)
{
unsigned long flags;
u32 mask;
raw_spin_lock_irqsave(>641xx_irq_lock, flags);
mask = GT_READ(GT_INTRMASK_OFS);
mask |= GT641XX_IRQ_TO_BIT(d->irq);
GT_WRITE(GT_INTRMASK_OFS, mask);
raw_spin_unlock_irqrestore(>641xx_irq_lock, flags);
}
static struct irq_chip gt641xx_irq_chip = {
.name = "GT641xx",
.irq_ack = ack_gt641xx_irq,
.irq_mask = mask_gt641xx_irq,
.irq_mask_ack = mask_ack_gt641xx_irq,
.irq_unmask = unmask_gt641xx_irq,
};
void gt641xx_irq_dispatch(void)
{
u32 cause, mask;
int i;
cause = GT_READ(GT_INTRCAUSE_OFS);
mask = GT_READ(GT_INTRMASK_OFS);
cause &= mask;
/*
* bit0 : logical or of all the interrupt bits.
* bit30: logical or of bits[29:26,20:1].
* bit31: logical or of bits[25:1].
*/
for (i = 1; i < 30; i++) {
if (cause & (1U << i)) {
do_IRQ(GT641XX_IRQ_BASE + i);
return;
}
}
atomic_inc(&irq_err_count);
}
void __init gt641xx_irq_init(void)
{
int i;
GT_WRITE(GT_INTRMASK_OFS, 0);
GT_WRITE(GT_INTRCAUSE_OFS, 0);
/*
* bit0 : logical or of all the interrupt bits.
* bit30: logical or of bits[29:26,20:1].
* bit31: logical or of bits[25:1].
*/
for (i = 1; i < 30; i++)
irq_set_chip_and_handler(GT641XX_IRQ_BASE + i,
>641xx_irq_chip, handle_level_irq);
}
| linux-master | arch/mips/kernel/irq-gt641xx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Conversion between 32-bit and 64-bit native system calls.
*
* Copyright (C) 2000 Silicon Graphics, Inc.
* Written by Ulf Carlsson ([email protected])
*/
#include <linux/compiler.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/highuid.h>
#include <linux/resource.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/times.h>
#include <linux/poll.h>
#include <linux/skbuff.h>
#include <linux/filter.h>
#include <linux/shm.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/icmpv6.h>
#include <linux/syscalls.h>
#include <linux/sysctl.h>
#include <linux/utime.h>
#include <linux/utsname.h>
#include <linux/personality.h>
#include <linux/dnotify.h>
#include <linux/binfmts.h>
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/ipc.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/scm.h>
#include <asm/compat-signal.h>
#include <asm/sim.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/mman.h>
#ifdef __MIPSEB__
#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
#endif
#ifdef __MIPSEL__
#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
#endif
SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
{
return ksys_truncate(path, merge_64(a2, a3));
}
SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy,
unsigned long, a2, unsigned long, a3)
{
return ksys_ftruncate(fd, merge_64(a2, a3));
}
SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high,
unsigned int, offset_low, loff_t __user *, result,
unsigned int, origin)
{
return sys_llseek(fd, offset_high, offset_low, result, origin);
}
/* From the Single Unix Spec: pread & pwrite act like lseek to pos + op +
lseek back to original location. They fail just like lseek does on
non-seekable files. */
SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count,
unsigned long, unused, unsigned long, a4, unsigned long, a5)
{
return ksys_pread64(fd, buf, count, merge_64(a4, a5));
}
SYSCALL_DEFINE6(32_pwrite, unsigned int, fd, const char __user *, buf,
size_t, count, u32, unused, u64, a4, u64, a5)
{
return ksys_pwrite64(fd, buf, count, merge_64(a4, a5));
}
SYSCALL_DEFINE1(32_personality, unsigned long, personality)
{
unsigned int p = personality & 0xffffffff;
int ret;
if (personality(current->personality) == PER_LINUX32 &&
personality(p) == PER_LINUX)
p = (p & ~PER_MASK) | PER_LINUX32;
ret = sys_personality(p);
if (ret != -1 && personality(ret) == PER_LINUX32)
ret = (ret & ~PER_MASK) | PER_LINUX;
return ret;
}
asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
size_t count)
{
return ksys_readahead(fd, merge_64(a2, a3), count);
}
asmlinkage long sys32_sync_file_range(int fd, int __pad,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5,
int flags)
{
return ksys_sync_file_range(fd,
merge_64(a2, a3), merge_64(a4, a5),
flags);
}
asmlinkage long sys32_fadvise64_64(int fd, int __pad,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5,
int flags)
{
return ksys_fadvise64_64(fd,
merge_64(a2, a3), merge_64(a4, a5),
flags);
}
asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
unsigned offset_a3, unsigned len_a4, unsigned len_a5)
{
return ksys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
merge_64(len_a4, len_a5));
}
| linux-master | arch/mips/kernel/linux32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2013 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <asm/mips-cps.h>
#include <asm/mipsregs.h>
void __iomem *mips_gcr_base;
void __iomem *mips_cm_l2sync_base;
int mips_cm_is64;
static char *cm2_tr[8] = {
"mem", "gcr", "gic", "mmio",
"0x04", "cpc", "0x06", "0x07"
};
/* CM3 Tag ECC transaction type */
static char *cm3_tr[16] = {
[0x0] = "ReqNoData",
[0x1] = "0x1",
[0x2] = "ReqWData",
[0x3] = "0x3",
[0x4] = "IReqNoResp",
[0x5] = "IReqWResp",
[0x6] = "IReqNoRespDat",
[0x7] = "IReqWRespDat",
[0x8] = "RespNoData",
[0x9] = "RespDataFol",
[0xa] = "RespWData",
[0xb] = "RespDataOnly",
[0xc] = "IRespNoData",
[0xd] = "IRespDataFol",
[0xe] = "IRespWData",
[0xf] = "IRespDataOnly"
};
static char *cm2_cmd[32] = {
[0x00] = "0x00",
[0x01] = "Legacy Write",
[0x02] = "Legacy Read",
[0x03] = "0x03",
[0x04] = "0x04",
[0x05] = "0x05",
[0x06] = "0x06",
[0x07] = "0x07",
[0x08] = "Coherent Read Own",
[0x09] = "Coherent Read Share",
[0x0a] = "Coherent Read Discard",
[0x0b] = "Coherent Ready Share Always",
[0x0c] = "Coherent Upgrade",
[0x0d] = "Coherent Writeback",
[0x0e] = "0x0e",
[0x0f] = "0x0f",
[0x10] = "Coherent Copyback",
[0x11] = "Coherent Copyback Invalidate",
[0x12] = "Coherent Invalidate",
[0x13] = "Coherent Write Invalidate",
[0x14] = "Coherent Completion Sync",
[0x15] = "0x15",
[0x16] = "0x16",
[0x17] = "0x17",
[0x18] = "0x18",
[0x19] = "0x19",
[0x1a] = "0x1a",
[0x1b] = "0x1b",
[0x1c] = "0x1c",
[0x1d] = "0x1d",
[0x1e] = "0x1e",
[0x1f] = "0x1f"
};
/* CM3 Tag ECC command type */
static char *cm3_cmd[16] = {
[0x0] = "Legacy Read",
[0x1] = "Legacy Write",
[0x2] = "Coherent Read Own",
[0x3] = "Coherent Read Share",
[0x4] = "Coherent Read Discard",
[0x5] = "Coherent Evicted",
[0x6] = "Coherent Upgrade",
[0x7] = "Coherent Upgrade for Store Conditional",
[0x8] = "Coherent Writeback",
[0x9] = "Coherent Write Invalidate",
[0xa] = "0xa",
[0xb] = "0xb",
[0xc] = "0xc",
[0xd] = "0xd",
[0xe] = "0xe",
[0xf] = "0xf"
};
/* CM3 Tag ECC command group */
static char *cm3_cmd_group[8] = {
[0x0] = "Normal",
[0x1] = "Registers",
[0x2] = "TLB",
[0x3] = "0x3",
[0x4] = "L1I",
[0x5] = "L1D",
[0x6] = "L3",
[0x7] = "L2"
};
static char *cm2_core[8] = {
"Invalid/OK", "Invalid/Data",
"Shared/OK", "Shared/Data",
"Modified/OK", "Modified/Data",
"Exclusive/OK", "Exclusive/Data"
};
static char *cm2_l2_type[4] = {
[0x0] = "None",
[0x1] = "Tag RAM single/double ECC error",
[0x2] = "Data RAM single/double ECC error",
[0x3] = "WS RAM uncorrectable dirty parity"
};
static char *cm2_l2_instr[32] = {
[0x00] = "L2_NOP",
[0x01] = "L2_ERR_CORR",
[0x02] = "L2_TAG_INV",
[0x03] = "L2_WS_CLEAN",
[0x04] = "L2_RD_MDYFY_WR",
[0x05] = "L2_WS_MRU",
[0x06] = "L2_EVICT_LN2",
[0x07] = "0x07",
[0x08] = "L2_EVICT",
[0x09] = "L2_REFL",
[0x0a] = "L2_RD",
[0x0b] = "L2_WR",
[0x0c] = "L2_EVICT_MRU",
[0x0d] = "L2_SYNC",
[0x0e] = "L2_REFL_ERR",
[0x0f] = "0x0f",
[0x10] = "L2_INDX_WB_INV",
[0x11] = "L2_INDX_LD_TAG",
[0x12] = "L2_INDX_ST_TAG",
[0x13] = "L2_INDX_ST_DATA",
[0x14] = "L2_INDX_ST_ECC",
[0x15] = "0x15",
[0x16] = "0x16",
[0x17] = "0x17",
[0x18] = "L2_FTCH_AND_LCK",
[0x19] = "L2_HIT_INV",
[0x1a] = "L2_HIT_WB_INV",
[0x1b] = "L2_HIT_WB",
[0x1c] = "0x1c",
[0x1d] = "0x1d",
[0x1e] = "0x1e",
[0x1f] = "0x1f"
};
static char *cm2_causes[32] = {
"None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
"0x08", "0x09", "0x0a", "0x0b",
"0x0c", "0x0d", "0x0e", "0x0f",
"0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13",
"0x14", "0x15", "0x16", "0x17",
"L2_RD_UNCORR", "L2_WR_UNCORR", "L2_CORR", "0x1b",
"0x1c", "0x1d", "0x1e", "0x1f"
};
static char *cm3_causes[32] = {
"0x0", "MP_CORRECTABLE_ECC_ERR", "MP_REQUEST_DECODE_ERR",
"MP_UNCORRECTABLE_ECC_ERR", "MP_PARITY_ERR", "MP_COHERENCE_ERR",
"CMBIU_REQUEST_DECODE_ERR", "CMBIU_PARITY_ERR", "CMBIU_AXI_RESP_ERR",
"0x9", "RBI_BUS_ERR", "0xb", "0xc", "0xd", "0xe", "0xf", "0x10",
"0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "0x17", "0x18",
"0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f"
};
static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock);
static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags);
phys_addr_t __mips_cm_phys_base(void)
{
unsigned long cmgcr;
/* Check the CMGCRBase register is implemented */
if (!(read_c0_config() & MIPS_CONF_M))
return 0;
if (!(read_c0_config2() & MIPS_CONF_M))
return 0;
if (!(read_c0_config3() & MIPS_CONF3_CMGCR))
return 0;
/* Read the address from CMGCRBase */
cmgcr = read_c0_cmgcrbase();
return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
}
phys_addr_t mips_cm_phys_base(void)
__attribute__((weak, alias("__mips_cm_phys_base")));
phys_addr_t __mips_cm_l2sync_phys_base(void)
{
u32 base_reg;
/*
* If the L2-only sync region is already enabled then leave it at it's
* current location.
*/
base_reg = read_gcr_l2_only_sync_base();
if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN)
return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE;
/* Default to following the CM */
return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
}
phys_addr_t mips_cm_l2sync_phys_base(void)
__attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
static void mips_cm_probe_l2sync(void)
{
unsigned major_rev;
phys_addr_t addr;
/* L2-only sync was introduced with CM major revision 6 */
major_rev = FIELD_GET(CM_GCR_REV_MAJOR, read_gcr_rev());
if (major_rev < 6)
return;
/* Find a location for the L2 sync region */
addr = mips_cm_l2sync_phys_base();
BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE) != addr);
if (!addr)
return;
/* Set the region base address & enable it */
write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN);
/* Map the region */
mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE);
}
int mips_cm_probe(void)
{
phys_addr_t addr;
u32 base_reg;
unsigned cpu;
/*
* No need to probe again if we have already been
* here before.
*/
if (mips_gcr_base)
return 0;
addr = mips_cm_phys_base();
BUG_ON((addr & CM_GCR_BASE_GCRBASE) != addr);
if (!addr)
return -ENODEV;
mips_gcr_base = ioremap(addr, MIPS_CM_GCR_SIZE);
if (!mips_gcr_base)
return -ENXIO;
/* sanity check that we're looking at a CM */
base_reg = read_gcr_base();
if ((base_reg & CM_GCR_BASE_GCRBASE) != addr) {
pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n",
(unsigned long)addr);
iounmap(mips_gcr_base);
mips_gcr_base = NULL;
return -ENODEV;
}
/* set default target to memory */
change_gcr_base(CM_GCR_BASE_CMDEFTGT, CM_GCR_BASE_CMDEFTGT_MEM);
/* disable CM regions */
write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR);
write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK);
write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR);
write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK);
write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR);
write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK);
write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR);
write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK);
/* probe for an L2-only sync region */
mips_cm_probe_l2sync();
/* determine register width for this CM */
mips_cm_is64 = IS_ENABLED(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(cm_core_lock, cpu));
return 0;
}
void mips_cm_lock_other(unsigned int cluster, unsigned int core,
unsigned int vp, unsigned int block)
{
unsigned int curr_core, cm_rev;
u32 val;
cm_rev = mips_cm_revision();
preempt_disable();
if (cm_rev >= CM_REV_CM3) {
val = FIELD_PREP(CM3_GCR_Cx_OTHER_CORE, core) |
FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp);
if (cm_rev >= CM_REV_CM3_5) {
val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster);
val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block);
} else {
WARN_ON(cluster != 0);
WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
}
/*
* We need to disable interrupts in SMP systems in order to
* ensure that we don't interrupt the caller with code which
* may modify the redirect register. We do so here in a
* slightly obscure way by using a spin lock, since this has
* the neat property of also catching any nested uses of
* mips_cm_lock_other() leading to a deadlock or a nice warning
* with lockdep enabled.
*/
spin_lock_irqsave(this_cpu_ptr(&cm_core_lock),
*this_cpu_ptr(&cm_core_lock_flags));
} else {
WARN_ON(cluster != 0);
WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
/*
* We only have a GCR_CL_OTHER per core in systems with
* CM 2.5 & older, so have to ensure other VP(E)s don't
* race with us.
*/
curr_core = cpu_core(¤t_cpu_data);
spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
per_cpu(cm_core_lock_flags, curr_core));
val = FIELD_PREP(CM_GCR_Cx_OTHER_CORENUM, core);
}
write_gcr_cl_other(val);
/*
* Ensure the core-other region reflects the appropriate core &
* VP before any accesses to it occur.
*/
mb();
}
void mips_cm_unlock_other(void)
{
unsigned int curr_core;
if (mips_cm_revision() < CM_REV_CM3) {
curr_core = cpu_core(¤t_cpu_data);
spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
per_cpu(cm_core_lock_flags, curr_core));
} else {
spin_unlock_irqrestore(this_cpu_ptr(&cm_core_lock),
*this_cpu_ptr(&cm_core_lock_flags));
}
preempt_enable();
}
void mips_cm_error_report(void)
{
u64 cm_error, cm_addr, cm_other;
unsigned long revision;
int ocause, cause;
char buf[256];
if (!mips_cm_present())
return;
revision = mips_cm_revision();
cm_error = read_gcr_error_cause();
cm_addr = read_gcr_error_addr();
cm_other = read_gcr_error_mult();
if (revision < CM_REV_CM3) { /* CM2 */
cause = FIELD_GET(CM_GCR_ERROR_CAUSE_ERRTYPE, cm_error);
ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other);
if (!cause)
return;
if (cause < 16) {
unsigned long cca_bits = (cm_error >> 15) & 7;
unsigned long tr_bits = (cm_error >> 12) & 7;
unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
unsigned long stag_bits = (cm_error >> 3) & 15;
unsigned long sport_bits = (cm_error >> 0) & 7;
snprintf(buf, sizeof(buf),
"CCA=%lu TR=%s MCmd=%s STag=%lu "
"SPort=%lu\n", cca_bits, cm2_tr[tr_bits],
cm2_cmd[cmd_bits], stag_bits, sport_bits);
} else if (cause < 24) {
/* glob state & sresp together */
unsigned long c3_bits = (cm_error >> 18) & 7;
unsigned long c2_bits = (cm_error >> 15) & 7;
unsigned long c1_bits = (cm_error >> 12) & 7;
unsigned long c0_bits = (cm_error >> 9) & 7;
unsigned long sc_bit = (cm_error >> 8) & 1;
unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
unsigned long sport_bits = (cm_error >> 0) & 7;
snprintf(buf, sizeof(buf),
"C3=%s C2=%s C1=%s C0=%s SC=%s "
"MCmd=%s SPort=%lu\n",
cm2_core[c3_bits], cm2_core[c2_bits],
cm2_core[c1_bits], cm2_core[c0_bits],
sc_bit ? "True" : "False",
cm2_cmd[cmd_bits], sport_bits);
} else {
unsigned long muc_bit = (cm_error >> 23) & 1;
unsigned long ins_bits = (cm_error >> 18) & 0x1f;
unsigned long arr_bits = (cm_error >> 16) & 3;
unsigned long dw_bits = (cm_error >> 12) & 15;
unsigned long way_bits = (cm_error >> 9) & 7;
unsigned long mway_bit = (cm_error >> 8) & 1;
unsigned long syn_bits = (cm_error >> 0) & 0xFF;
snprintf(buf, sizeof(buf),
"Type=%s%s Instr=%s DW=%lu Way=%lu "
"MWay=%s Syndrome=0x%02lx",
muc_bit ? "Multi-UC " : "",
cm2_l2_type[arr_bits],
cm2_l2_instr[ins_bits], dw_bits, way_bits,
mway_bit ? "True" : "False", syn_bits);
}
pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error,
cm2_causes[cause], buf);
pr_err("CM_ADDR =%08llx\n", cm_addr);
pr_err("CM_OTHER=%08llx %s\n", cm_other, cm2_causes[ocause]);
} else { /* CM3 */
ulong core_id_bits, vp_id_bits, cmd_bits, cmd_group_bits;
ulong cm3_cca_bits, mcp_bits, cm3_tr_bits, sched_bit;
cause = FIELD_GET(CM3_GCR_ERROR_CAUSE_ERRTYPE, cm_error);
ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other);
if (!cause)
return;
/* Used by cause == {1,2,3} */
core_id_bits = (cm_error >> 22) & 0xf;
vp_id_bits = (cm_error >> 18) & 0xf;
cmd_bits = (cm_error >> 14) & 0xf;
cmd_group_bits = (cm_error >> 11) & 0xf;
cm3_cca_bits = (cm_error >> 8) & 7;
mcp_bits = (cm_error >> 5) & 0xf;
cm3_tr_bits = (cm_error >> 1) & 0xf;
sched_bit = cm_error & 0x1;
if (cause == 1 || cause == 3) { /* Tag ECC */
unsigned long tag_ecc = (cm_error >> 57) & 0x1;
unsigned long tag_way_bits = (cm_error >> 29) & 0xffff;
unsigned long dword_bits = (cm_error >> 49) & 0xff;
unsigned long data_way_bits = (cm_error >> 45) & 0xf;
unsigned long data_sets_bits = (cm_error >> 29) & 0xfff;
unsigned long bank_bit = (cm_error >> 28) & 0x1;
snprintf(buf, sizeof(buf),
"%s ECC Error: Way=%lu (DWORD=%lu, Sets=%lu)"
"Bank=%lu CoreID=%lu VPID=%lu Command=%s"
"Command Group=%s CCA=%lu MCP=%d"
"Transaction type=%s Scheduler=%lu\n",
tag_ecc ? "TAG" : "DATA",
tag_ecc ? (unsigned long)ffs(tag_way_bits) - 1 :
data_way_bits, bank_bit, dword_bits,
data_sets_bits,
core_id_bits, vp_id_bits,
cm3_cmd[cmd_bits],
cm3_cmd_group[cmd_group_bits],
cm3_cca_bits, 1 << mcp_bits,
cm3_tr[cm3_tr_bits], sched_bit);
} else if (cause == 2) {
unsigned long data_error_type = (cm_error >> 41) & 0xfff;
unsigned long data_decode_cmd = (cm_error >> 37) & 0xf;
unsigned long data_decode_group = (cm_error >> 34) & 0x7;
unsigned long data_decode_destination_id = (cm_error >> 28) & 0x3f;
snprintf(buf, sizeof(buf),
"Decode Request Error: Type=%lu, Command=%lu"
"Command Group=%lu Destination ID=%lu"
"CoreID=%lu VPID=%lu Command=%s"
"Command Group=%s CCA=%lu MCP=%d"
"Transaction type=%s Scheduler=%lu\n",
data_error_type, data_decode_cmd,
data_decode_group, data_decode_destination_id,
core_id_bits, vp_id_bits,
cm3_cmd[cmd_bits],
cm3_cmd_group[cmd_group_bits],
cm3_cca_bits, 1 << mcp_bits,
cm3_tr[cm3_tr_bits], sched_bit);
} else {
buf[0] = 0;
}
pr_err("CM_ERROR=%llx %s <%s>\n", cm_error,
cm3_causes[cause], buf);
pr_err("CM_ADDR =%llx\n", cm_addr);
pr_err("CM_OTHER=%llx %s\n", cm_other, cm3_causes[ocause]);
}
/* reprime cause register */
write_gcr_error_cause(cm_error);
}
| linux-master | arch/mips/kernel/mips-cm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MIPS SPRAM support
*
* Copyright (C) 2007, 2008 MIPS Technologies, Inc.
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/stddef.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/r4kcache.h>
#include <asm/hazards.h>
/*
* These definitions are correct for the 24K/34K/74K SPRAM sample
* implementation. The 4KS interpreted the tags differently...
*/
#define SPRAM_TAG0_ENABLE 0x00000080
#define SPRAM_TAG0_PA_MASK 0xfffff000
#define SPRAM_TAG1_SIZE_MASK 0xfffff000
#define SPRAM_TAG_STRIDE 8
#define ERRCTL_SPRAM (1 << 28)
/* errctl access */
#define read_c0_errctl(x) read_c0_ecc(x)
#define write_c0_errctl(x) write_c0_ecc(x)
/*
* Different semantics to the set_c0_* function built by __BUILD_SET_C0
*/
static unsigned int bis_c0_errctl(unsigned int set)
{
unsigned int res;
res = read_c0_errctl();
write_c0_errctl(res | set);
return res;
}
static void ispram_store_tag(unsigned int offset, unsigned int data)
{
unsigned int errctl;
/* enable SPRAM tag access */
errctl = bis_c0_errctl(ERRCTL_SPRAM);
ehb();
write_c0_taglo(data);
ehb();
cache_op(Index_Store_Tag_I, CKSEG0|offset);
ehb();
write_c0_errctl(errctl);
ehb();
}
static unsigned int ispram_load_tag(unsigned int offset)
{
unsigned int data;
unsigned int errctl;
/* enable SPRAM tag access */
errctl = bis_c0_errctl(ERRCTL_SPRAM);
ehb();
cache_op(Index_Load_Tag_I, CKSEG0 | offset);
ehb();
data = read_c0_taglo();
ehb();
write_c0_errctl(errctl);
ehb();
return data;
}
static void dspram_store_tag(unsigned int offset, unsigned int data)
{
unsigned int errctl;
/* enable SPRAM tag access */
errctl = bis_c0_errctl(ERRCTL_SPRAM);
ehb();
write_c0_dtaglo(data);
ehb();
cache_op(Index_Store_Tag_D, CKSEG0 | offset);
ehb();
write_c0_errctl(errctl);
ehb();
}
static unsigned int dspram_load_tag(unsigned int offset)
{
unsigned int data;
unsigned int errctl;
errctl = bis_c0_errctl(ERRCTL_SPRAM);
ehb();
cache_op(Index_Load_Tag_D, CKSEG0 | offset);
ehb();
data = read_c0_dtaglo();
ehb();
write_c0_errctl(errctl);
ehb();
return data;
}
static void probe_spram(char *type,
unsigned int base,
unsigned int (*read)(unsigned int),
void (*write)(unsigned int, unsigned int))
{
unsigned int firstsize = 0, lastsize = 0;
unsigned int firstpa = 0, lastpa = 0, pa = 0;
unsigned int offset = 0;
unsigned int size, tag0, tag1;
unsigned int enabled;
int i;
/*
* The limit is arbitrary but avoids the loop running away if
* the SPRAM tags are implemented differently
*/
for (i = 0; i < 8; i++) {
tag0 = read(offset);
tag1 = read(offset+SPRAM_TAG_STRIDE);
pr_debug("DBG %s%d: tag0=%08x tag1=%08x\n",
type, i, tag0, tag1);
size = tag1 & SPRAM_TAG1_SIZE_MASK;
if (size == 0)
break;
if (i != 0) {
/* tags may repeat... */
if ((pa == firstpa && size == firstsize) ||
(pa == lastpa && size == lastsize))
break;
}
/* Align base with size */
base = (base + size - 1) & ~(size-1);
/* reprogram the base address base address and enable */
tag0 = (base & SPRAM_TAG0_PA_MASK) | SPRAM_TAG0_ENABLE;
write(offset, tag0);
base += size;
/* reread the tag */
tag0 = read(offset);
pa = tag0 & SPRAM_TAG0_PA_MASK;
enabled = tag0 & SPRAM_TAG0_ENABLE;
if (i == 0) {
firstpa = pa;
firstsize = size;
}
lastpa = pa;
lastsize = size;
if (strcmp(type, "DSPRAM") == 0) {
unsigned int *vp = (unsigned int *)(CKSEG1 | pa);
unsigned int v;
#define TDAT 0x5a5aa5a5
vp[0] = TDAT;
vp[1] = ~TDAT;
mb();
v = vp[0];
if (v != TDAT)
printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
vp, TDAT, v);
v = vp[1];
if (v != ~TDAT)
printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
vp+1, ~TDAT, v);
}
pr_info("%s%d: PA=%08x,Size=%08x%s\n",
type, i, pa, size, enabled ? ",enabled" : "");
offset += 2 * SPRAM_TAG_STRIDE;
}
}
void spram_config(void)
{
unsigned int config0;
switch (current_cpu_type()) {
case CPU_24K:
case CPU_34K:
case CPU_74K:
case CPU_1004K:
case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_PROAPTIV:
case CPU_P5600:
case CPU_QEMU_GENERIC:
case CPU_I6400:
case CPU_P6600:
config0 = read_c0_config();
/* FIXME: addresses are Malta specific */
if (config0 & MIPS_CONF_ISP) {
probe_spram("ISPRAM", 0x1c000000,
&ispram_load_tag, &ispram_store_tag);
}
if (config0 & MIPS_CONF_DSP)
probe_spram("DSPRAM", 0x1c100000,
&dspram_load_tag, &dspram_store_tag);
}
}
| linux-master | arch/mips/kernel/spram.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MIPS idle loop and WAIT instruction support.
*
* Copyright (C) xxxx the Anonymous
* Copyright (C) 1994 - 2006 Ralf Baechle
* Copyright (C) 2003, 2004 Maciej W. Rozycki
* Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
*/
#include <linux/cpu.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/irqflags.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
#include <asm/cpu-type.h>
#include <asm/idle.h>
#include <asm/mipsregs.h>
/*
* Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
* the implementation of the "wait" feature differs between CPU families. This
* points to the function that implements CPU specific wait.
* The wait instruction stops the pipeline and reduces the power consumption of
* the CPU very much.
*/
void (*cpu_wait)(void);
EXPORT_SYMBOL(cpu_wait);
static void __cpuidle r3081_wait(void)
{
unsigned long cfg = read_c0_conf();
write_c0_conf(cfg | R30XX_CONF_HALT);
}
void __cpuidle r4k_wait(void)
{
raw_local_irq_enable();
__r4k_wait();
raw_local_irq_disable();
}
/*
* This variant is preferable as it allows testing need_resched and going to
* sleep depending on the outcome atomically. Unfortunately the "It is
* implementation-dependent whether the pipeline restarts when a non-enabled
* interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
* using this version a gamble.
*/
void __cpuidle r4k_wait_irqoff(void)
{
if (!need_resched())
__asm__(
" .set push \n"
" .set arch=r4000 \n"
" wait \n"
" .set pop \n");
}
/*
* The RM7000 variant has to handle erratum 38. The workaround is to not
* have any pending stores when the WAIT instruction is executed.
*/
static void __cpuidle rm7k_wait_irqoff(void)
{
if (!need_resched())
__asm__(
" .set push \n"
" .set arch=r4000 \n"
" .set noat \n"
" mfc0 $1, $12 \n"
" sync \n"
" mtc0 $1, $12 # stalls until W stage \n"
" wait \n"
" mtc0 $1, $12 # stalls until W stage \n"
" .set pop \n");
}
/*
* Au1 'wait' is only useful when the 32kHz counter is used as timer,
* since coreclock (and the cp0 counter) stops upon executing it. Only an
* interrupt can wake it, so they must be enabled before entering idle modes.
*/
static void __cpuidle au1k_wait(void)
{
unsigned long c0status = read_c0_status() | 1; /* irqs on */
__asm__(
" .set push \n"
" .set arch=r4000 \n"
" cache 0x14, 0(%0) \n"
" cache 0x14, 32(%0) \n"
" sync \n"
" mtc0 %1, $12 \n" /* wr c0status */
" wait \n"
" nop \n"
" nop \n"
" nop \n"
" nop \n"
" .set pop \n"
: : "r" (au1k_wait), "r" (c0status));
raw_local_irq_disable();
}
static int __initdata nowait;
static int __init wait_disable(char *s)
{
nowait = 1;
return 1;
}
__setup("nowait", wait_disable);
void __init check_wait(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
if (nowait) {
printk("Wait instruction disabled.\n");
return;
}
/*
* MIPSr6 specifies that masked interrupts should unblock an executing
* wait instruction, and thus that it is safe for us to use
* r4k_wait_irqoff. Yippee!
*/
if (cpu_has_mips_r6) {
cpu_wait = r4k_wait_irqoff;
return;
}
switch (current_cpu_type()) {
case CPU_R3081:
case CPU_R3081E:
cpu_wait = r3081_wait;
break;
case CPU_R4200:
/* case CPU_R4300: */
case CPU_R4600:
case CPU_R4640:
case CPU_R4650:
case CPU_R4700:
case CPU_R5000:
case CPU_R5500:
case CPU_NEVADA:
case CPU_4KC:
case CPU_4KEC:
case CPU_4KSC:
case CPU_5KC:
case CPU_5KE:
case CPU_25KF:
case CPU_PR4450:
case CPU_BMIPS3300:
case CPU_BMIPS4350:
case CPU_BMIPS4380:
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
case CPU_CAVIUM_OCTEON3:
case CPU_XBURST:
case CPU_LOONGSON32:
cpu_wait = r4k_wait;
break;
case CPU_LOONGSON64:
if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
(PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
(c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
cpu_wait = r4k_wait;
break;
case CPU_BMIPS5000:
cpu_wait = r4k_wait_irqoff;
break;
case CPU_RM7000:
cpu_wait = rm7k_wait_irqoff;
break;
case CPU_PROAPTIV:
case CPU_P5600:
/*
* Incoming Fast Debug Channel (FDC) data during a wait
* instruction causes the wait never to resume, even if an
* interrupt is received. Avoid using wait at all if FDC data is
* likely to be received.
*/
if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
break;
fallthrough;
case CPU_M14KC:
case CPU_M14KEC:
case CPU_24K:
case CPU_34K:
case CPU_1004K:
case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_M5150:
case CPU_QEMU_GENERIC:
cpu_wait = r4k_wait;
if (read_c0_config7() & MIPS_CONF7_WII)
cpu_wait = r4k_wait_irqoff;
break;
case CPU_74K:
cpu_wait = r4k_wait;
if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
cpu_wait = r4k_wait_irqoff;
break;
case CPU_TX49XX:
cpu_wait = r4k_wait_irqoff;
break;
case CPU_ALCHEMY:
cpu_wait = au1k_wait;
break;
case CPU_20KC:
/*
* WAIT on Rev1.0 has E1, E2, E3 and E16.
* WAIT on Rev2.0 and Rev3.0 has E16.
* Rev3.1 WAIT is nop, why bother
*/
if ((c->processor_id & 0xff) <= 0x64)
break;
/*
* Another rev is incrementing c0_count at a reduced clock
* rate while in WAIT mode. So we basically have the choice
* between using the cp0 timer as clocksource or avoiding
* the WAIT instruction. Until more details are known,
* disable the use of WAIT for 20Kc entirely.
cpu_wait = r4k_wait;
*/
break;
default:
break;
}
}
__cpuidle void arch_cpu_idle(void)
{
if (cpu_wait)
cpu_wait();
}
#ifdef CONFIG_CPU_IDLE
__cpuidle int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
arch_cpu_idle();
return index;
}
#endif
| linux-master | arch/mips/kernel/idle.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kernel Probes (KProbes)
* arch/mips/kernel/kprobes.c
*
* Copyright 2006 Sony Corp.
* Copyright 2010 Cavium Networks
*
* Some portions copied from the powerpc version.
*
* Copyright (C) IBM Corporation, 2002, 2004
*/
#define pr_fmt(fmt) "kprobes: " fmt
#include <linux/kprobes.h>
#include <linux/preempt.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
#include <asm/ptrace.h>
#include <asm/branch.h>
#include <asm/break.h>
#include "probes-common.h"
static const union mips_instruction breakpoint_insn = {
.b_format = {
.opcode = spec_op,
.code = BRK_KPROBE_BP,
.func = break_op
}
};
static const union mips_instruction breakpoint2_insn = {
.b_format = {
.opcode = spec_op,
.code = BRK_KPROBE_SSTEPBP,
.func = break_op
}
};
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static int insn_has_delayslot(union mips_instruction insn)
{
return __insn_has_delay_slot(insn);
}
NOKPROBE_SYMBOL(insn_has_delayslot);
/*
* insn_has_ll_or_sc function checks whether instruction is ll or sc
* one; putting breakpoint on top of atomic ll/sc pair is bad idea;
* so we need to prevent it and refuse kprobes insertion for such
* instructions; cannot do much about breakpoint in the middle of
* ll/sc pair; it is upto user to avoid those places
*/
static int insn_has_ll_or_sc(union mips_instruction insn)
{
int ret = 0;
switch (insn.i_format.opcode) {
case ll_op:
case lld_op:
case sc_op:
case scd_op:
ret = 1;
break;
default:
break;
}
return ret;
}
NOKPROBE_SYMBOL(insn_has_ll_or_sc);
int arch_prepare_kprobe(struct kprobe *p)
{
union mips_instruction insn;
union mips_instruction prev_insn;
int ret = 0;
insn = p->addr[0];
if (insn_has_ll_or_sc(insn)) {
pr_notice("Kprobes for ll and sc instructions are not supported\n");
ret = -EINVAL;
goto out;
}
if (copy_from_kernel_nofault(&prev_insn, p->addr - 1,
sizeof(mips_instruction)) == 0 &&
insn_has_delayslot(prev_insn)) {
pr_notice("Kprobes for branch delayslot are not supported\n");
ret = -EINVAL;
goto out;
}
if (__insn_is_compact_branch(insn)) {
pr_notice("Kprobes for compact branches are not supported\n");
ret = -EINVAL;
goto out;
}
/* insn: must be on special executable page on mips. */
p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn) {
ret = -ENOMEM;
goto out;
}
/*
* In the kprobe->ainsn.insn[] array we store the original
* instruction at index zero and a break trap instruction at
* index one.
*
* On MIPS arch if the instruction at probed address is a
* branch instruction, we need to execute the instruction at
* Branch Delayslot (BD) at the time of probe hit. As MIPS also
* doesn't have single stepping support, the BD instruction can
* not be executed in-line and it would be executed on SSOL slot
* using a normal breakpoint instruction in the next slot.
* So, read the instruction and save it for later execution.
*/
if (insn_has_delayslot(insn))
memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
else
memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
p->ainsn.insn[1] = breakpoint2_insn;
p->opcode = *p->addr;
out:
return ret;
}
NOKPROBE_SYMBOL(arch_prepare_kprobe);
void arch_arm_kprobe(struct kprobe *p)
{
*p->addr = breakpoint_insn;
flush_insn_slot(p);
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
void arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flush_insn_slot(p);
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
void arch_remove_kprobe(struct kprobe *p)
{
if (p->ainsn.insn) {
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
}
}
NOKPROBE_SYMBOL(arch_remove_kprobe);
static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
}
static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
}
static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
kcb->kprobe_saved_epc = regs->cp0_epc;
}
/**
* evaluate_branch_instrucion -
*
* Evaluate the branch instruction at probed address during probe hit. The
* result of evaluation would be the updated epc. The insturction in delayslot
* would actually be single stepped using a normal breakpoint) on SSOL slot.
*
* The result is also saved in the kprobe control block for later use,
* in case we need to execute the delayslot instruction. The latter will be
* false for NOP instruction in dealyslot and the branch-likely instructions
* when the branch is taken. And for those cases we set a flag as
* SKIP_DELAYSLOT in the kprobe control block
*/
static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
union mips_instruction insn = p->opcode;
long epc;
int ret = 0;
epc = regs->cp0_epc;
if (epc & 3)
goto unaligned;
if (p->ainsn.insn->word == 0)
kcb->flags |= SKIP_DELAYSLOT;
else
kcb->flags &= ~SKIP_DELAYSLOT;
ret = __compute_return_epc_for_insn(regs, insn);
if (ret < 0)
return ret;
if (ret == BRANCH_LIKELY_TAKEN)
kcb->flags |= SKIP_DELAYSLOT;
kcb->target_epc = regs->cp0_epc;
return 0;
unaligned:
pr_notice("Failed to emulate branch instruction because of unaligned epc - sending SIGBUS to %s.\n", current->comm);
force_sig(SIGBUS);
return -EFAULT;
}
static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
int ret = 0;
regs->cp0_status &= ~ST0_IE;
/* single step inline if the instruction is a break */
if (p->opcode.word == breakpoint_insn.word ||
p->opcode.word == breakpoint2_insn.word)
regs->cp0_epc = (unsigned long)p->addr;
else if (insn_has_delayslot(p->opcode)) {
ret = evaluate_branch_instruction(p, regs, kcb);
if (ret < 0)
return;
}
regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
}
/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "break 0"
* instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn.
*
* This function prepares to return from the post-single-step
* breakpoint trap. In case of branch instructions, the target
* epc to be restored.
*/
static void resume_execution(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
if (insn_has_delayslot(p->opcode))
regs->cp0_epc = kcb->target_epc;
else {
unsigned long orig_epc = kcb->kprobe_saved_epc;
regs->cp0_epc = orig_epc + 4;
}
}
NOKPROBE_SYMBOL(resume_execution);
static int kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
kprobe_opcode_t *addr;
struct kprobe_ctlblk *kcb;
addr = (kprobe_opcode_t *) regs->cp0_epc;
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
*/
preempt_disable();
kcb = get_kprobe_ctlblk();
/* Check we're not actually recursing */
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
if (kcb->kprobe_status == KPROBE_HIT_SS &&
p->ainsn.insn->word == breakpoint_insn.word) {
regs->cp0_status &= ~ST0_IE;
regs->cp0_status |= kcb->kprobe_saved_SR;
goto no_kprobe;
}
/*
* We have reentered the kprobe_handler(), since
* another probe was hit while within the handler.
* We here save the original kprobes variables and
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs, kcb);
kcb->kprobe_status = KPROBE_REENTER;
if (kcb->flags & SKIP_DELAYSLOT) {
resume_execution(p, regs, kcb);
restore_previous_kprobe(kcb);
preempt_enable_no_resched();
}
return 1;
} else if (addr->word != breakpoint_insn.word) {
/*
* The breakpoint instruction was removed by
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
*/
ret = 1;
}
goto no_kprobe;
}
p = get_kprobe(addr);
if (!p) {
if (addr->word != breakpoint_insn.word) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
*/
ret = 1;
}
/* Not one of ours: let kernel handle it */
goto no_kprobe;
}
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs)) {
/* handler has already set things up, so skip ss setup */
reset_current_kprobe();
preempt_enable_no_resched();
return 1;
}
prepare_singlestep(p, regs, kcb);
if (kcb->flags & SKIP_DELAYSLOT) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
if (p->post_handler)
p->post_handler(p, regs, 0);
resume_execution(p, regs, kcb);
preempt_enable_no_resched();
} else
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
preempt_enable_no_resched();
return ret;
}
NOKPROBE_SYMBOL(kprobe_handler);
static inline int post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (!cur)
return 0;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
resume_execution(cur, regs, kcb);
regs->cp0_status |= kcb->kprobe_saved_SR;
/* Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
preempt_enable_no_resched();
return 1;
}
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (kcb->kprobe_status & KPROBE_HIT_SS) {
resume_execution(cur, regs, kcb);
regs->cp0_status |= kcb->kprobe_old_SR;
reset_current_kprobe();
preempt_enable_no_resched();
}
return 0;
}
/*
* Wrapper routine for handling exceptions.
*/
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
switch (val) {
case DIE_BREAK:
if (kprobe_handler(args->regs))
ret = NOTIFY_STOP;
break;
case DIE_SSTEPBP:
if (post_kprobe_handler(args->regs))
ret = NOTIFY_STOP;
break;
case DIE_PAGE_FAULT:
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running()
&& kprobe_fault_handler(args->regs, args->trapnr))
ret = NOTIFY_STOP;
preempt_enable();
break;
default:
break;
}
return ret;
}
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
/*
* Function return probe trampoline:
* - init_kprobes() establishes a probepoint here
* - When the probed function returns, this probe causes the
* handlers to fire
*/
static void __used kretprobe_trampoline_holder(void)
{
asm volatile(
".set push\n\t"
/* Keep the assembler from reordering and placing JR here. */
".set noreorder\n\t"
"nop\n\t"
".global __kretprobe_trampoline\n"
"__kretprobe_trampoline:\n\t"
"nop\n\t"
".set pop"
: : : "memory");
}
void __kretprobe_trampoline(void);
void arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->regs[31] = (unsigned long)__kretprobe_trampoline;
}
NOKPROBE_SYMBOL(arch_prepare_kretprobe);
/*
* Called when the probe at kretprobe trampoline is hit
*/
static int trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
instruction_pointer(regs) = __kretprobe_trampoline_handler(regs, NULL);
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
* to run (and have re-enabled preemption)
*/
return 1;
}
NOKPROBE_SYMBOL(trampoline_probe_handler);
int arch_trampoline_kprobe(struct kprobe *p)
{
if (p->addr == (kprobe_opcode_t *)__kretprobe_trampoline)
return 1;
return 0;
}
NOKPROBE_SYMBOL(arch_trampoline_kprobe);
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *)__kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{
return register_kprobe(&trampoline_p);
}
| linux-master | arch/mips/kernel/kprobes.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#include <linux/bitops.h>
#include <asm/cmpxchg.h>
unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
{
u32 old32, new32, load32, mask;
volatile u32 *ptr32;
unsigned int shift;
/* Check that ptr is naturally aligned */
WARN_ON((unsigned long)ptr & (size - 1));
/* Mask value to the correct size. */
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
val &= mask;
/*
* Calculate a shift & mask that correspond to the value we wish to
* exchange within the naturally aligned 4 byte integer that includes
* it.
*/
shift = (unsigned long)ptr & 0x3;
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
shift ^= sizeof(u32) - size;
shift *= BITS_PER_BYTE;
mask <<= shift;
/*
* Calculate a pointer to the naturally aligned 4 byte integer that
* includes our byte of interest, and load its value.
*/
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
load32 = *ptr32;
do {
old32 = load32;
new32 = (load32 & ~mask) | (val << shift);
load32 = arch_cmpxchg(ptr32, old32, new32);
} while (load32 != old32);
return (load32 & mask) >> shift;
}
unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
u32 mask, old32, new32, load32, load;
volatile u32 *ptr32;
unsigned int shift;
/* Check that ptr is naturally aligned */
WARN_ON((unsigned long)ptr & (size - 1));
/* Mask inputs to the correct size. */
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
old &= mask;
new &= mask;
/*
* Calculate a shift & mask that correspond to the value we wish to
* compare & exchange within the naturally aligned 4 byte integer
* that includes it.
*/
shift = (unsigned long)ptr & 0x3;
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
shift ^= sizeof(u32) - size;
shift *= BITS_PER_BYTE;
mask <<= shift;
/*
* Calculate a pointer to the naturally aligned 4 byte integer that
* includes our byte of interest, and load its value.
*/
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
load32 = *ptr32;
while (true) {
/*
* Ensure the byte we want to exchange matches the expected
* old value, and if not then bail.
*/
load = (load32 & mask) >> shift;
if (load != old)
return load;
/*
* Calculate the old & new values of the naturally aligned
* 4 byte integer that include the byte we want to exchange.
* Attempt to exchange the old value for the new value, and
* return if we succeed.
*/
old32 = (load32 & ~mask) | (old << shift);
new32 = (load32 & ~mask) | (new << shift);
load32 = arch_cmpxchg(ptr32, old32, new32);
if (load32 == old32)
return old;
}
}
| linux-master | arch/mips/kernel/cmpxchg.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2005, 06 Ralf Baechle ([email protected])
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/syscalls.h>
#include <linux/moduleloader.h>
#include <linux/atomic.h>
#include <linux/sched/signal.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#include <asm/processor.h>
#include <asm/rtlx.h>
#include <asm/setup.h>
#include <asm/vpe.h>
static int sp_stopping;
struct rtlx_info *rtlx;
struct chan_waitqueues channel_wqs[RTLX_CHANNELS];
struct vpe_notifications rtlx_notify;
void (*aprp_hook)(void) = NULL;
EXPORT_SYMBOL(aprp_hook);
static void __used dump_rtlx(void)
{
int i;
pr_info("id 0x%lx state %d\n", rtlx->id, rtlx->state);
for (i = 0; i < RTLX_CHANNELS; i++) {
struct rtlx_channel *chan = &rtlx->channel[i];
pr_info(" rt_state %d lx_state %d buffer_size %d\n",
chan->rt_state, chan->lx_state, chan->buffer_size);
pr_info(" rt_read %d rt_write %d\n",
chan->rt_read, chan->rt_write);
pr_info(" lx_read %d lx_write %d\n",
chan->lx_read, chan->lx_write);
pr_info(" rt_buffer <%s>\n", chan->rt_buffer);
pr_info(" lx_buffer <%s>\n", chan->lx_buffer);
}
}
/* call when we have the address of the shared structure from the SP side. */
static int rtlx_init(struct rtlx_info *rtlxi)
{
if (rtlxi->id != RTLX_ID) {
pr_err("no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id);
return -ENOEXEC;
}
rtlx = rtlxi;
return 0;
}
/* notifications */
void rtlx_starting(int vpe)
{
int i;
sp_stopping = 0;
/* force a reload of rtlx */
rtlx = NULL;
/* wake up any sleeping rtlx_open's */
for (i = 0; i < RTLX_CHANNELS; i++)
wake_up_interruptible(&channel_wqs[i].lx_queue);
}
void rtlx_stopping(int vpe)
{
int i;
sp_stopping = 1;
for (i = 0; i < RTLX_CHANNELS; i++)
wake_up_interruptible(&channel_wqs[i].lx_queue);
}
int rtlx_open(int index, int can_sleep)
{
struct rtlx_info **p;
struct rtlx_channel *chan;
enum rtlx_state state;
int ret = 0;
if (index >= RTLX_CHANNELS) {
pr_debug("rtlx_open index out of range\n");
return -ENOSYS;
}
if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
pr_debug("rtlx_open channel %d already opened\n", index);
ret = -EBUSY;
goto out_fail;
}
if (rtlx == NULL) {
p = vpe_get_shared(aprp_cpu_index());
if (p == NULL) {
if (can_sleep) {
ret = __wait_event_interruptible(
channel_wqs[index].lx_queue,
(p = vpe_get_shared(aprp_cpu_index())));
if (ret)
goto out_fail;
} else {
pr_debug("No SP program loaded, and device opened with O_NONBLOCK\n");
ret = -ENOSYS;
goto out_fail;
}
}
smp_rmb();
if (*p == NULL) {
if (can_sleep) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(
&channel_wqs[index].lx_queue,
&wait, TASK_INTERRUPTIBLE);
smp_rmb();
if (*p != NULL)
break;
if (!signal_pending(current)) {
schedule();
continue;
}
ret = -ERESTARTSYS;
goto out_fail;
}
finish_wait(&channel_wqs[index].lx_queue,
&wait);
} else {
pr_err(" *vpe_get_shared is NULL. Has an SP program been loaded?\n");
ret = -ENOSYS;
goto out_fail;
}
}
if ((unsigned int)*p < KSEG0) {
pr_warn("vpe_get_shared returned an invalid pointer maybe an error code %d\n",
(int)*p);
ret = -ENOSYS;
goto out_fail;
}
ret = rtlx_init(*p);
if (ret < 0)
goto out_ret;
}
chan = &rtlx->channel[index];
state = xchg(&chan->lx_state, RTLX_STATE_OPENED);
if (state == RTLX_STATE_OPENED) {
ret = -EBUSY;
goto out_fail;
}
out_fail:
smp_mb();
atomic_dec(&channel_wqs[index].in_open);
smp_mb();
out_ret:
return ret;
}
int rtlx_release(int index)
{
if (rtlx == NULL) {
pr_err("rtlx_release() with null rtlx\n");
return 0;
}
rtlx->channel[index].lx_state = RTLX_STATE_UNUSED;
return 0;
}
unsigned int rtlx_read_poll(int index, int can_sleep)
{
struct rtlx_channel *chan;
if (rtlx == NULL)
return 0;
chan = &rtlx->channel[index];
/* data available to read? */
if (chan->lx_read == chan->lx_write) {
if (can_sleep) {
int ret = __wait_event_interruptible(
channel_wqs[index].lx_queue,
(chan->lx_read != chan->lx_write) ||
sp_stopping);
if (ret)
return ret;
if (sp_stopping)
return 0;
} else
return 0;
}
return (chan->lx_write + chan->buffer_size - chan->lx_read)
% chan->buffer_size;
}
static inline int write_spacefree(int read, int write, int size)
{
if (read == write) {
/*
* Never fill the buffer completely, so indexes are always
* equal if empty and only empty, or !equal if data available
*/
return size - 1;
}
return ((read + size - write) % size) - 1;
}
unsigned int rtlx_write_poll(int index)
{
struct rtlx_channel *chan = &rtlx->channel[index];
return write_spacefree(chan->rt_read, chan->rt_write,
chan->buffer_size);
}
ssize_t rtlx_read(int index, void __user *buff, size_t count)
{
size_t lx_write, fl = 0L;
struct rtlx_channel *lx;
unsigned long failed;
if (rtlx == NULL)
return -ENOSYS;
lx = &rtlx->channel[index];
mutex_lock(&channel_wqs[index].mutex);
smp_rmb();
lx_write = lx->lx_write;
/* find out how much in total */
count = min(count,
(size_t)(lx_write + lx->buffer_size - lx->lx_read)
% lx->buffer_size);
/* then how much from the read pointer onwards */
fl = min(count, (size_t)lx->buffer_size - lx->lx_read);
failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl);
if (failed)
goto out;
/* and if there is anything left at the beginning of the buffer */
if (count - fl)
failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl);
out:
count -= failed;
smp_wmb();
lx->lx_read = (lx->lx_read + count) % lx->buffer_size;
smp_wmb();
mutex_unlock(&channel_wqs[index].mutex);
return count;
}
ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
{
struct rtlx_channel *rt;
unsigned long failed;
size_t rt_read;
size_t fl;
if (rtlx == NULL)
return -ENOSYS;
rt = &rtlx->channel[index];
mutex_lock(&channel_wqs[index].mutex);
smp_rmb();
rt_read = rt->rt_read;
/* total number of bytes to copy */
count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write,
rt->buffer_size));
/* first bit from write pointer to the end of the buffer, or count */
fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl);
if (failed)
goto out;
/* if there's any left copy to the beginning of the buffer */
if (count - fl)
failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
out:
count -= failed;
smp_wmb();
rt->rt_write = (rt->rt_write + count) % rt->buffer_size;
smp_wmb();
mutex_unlock(&channel_wqs[index].mutex);
_interrupt_sp();
return count;
}
static int file_open(struct inode *inode, struct file *filp)
{
return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1);
}
static int file_release(struct inode *inode, struct file *filp)
{
return rtlx_release(iminor(inode));
}
static __poll_t file_poll(struct file *file, poll_table *wait)
{
int minor = iminor(file_inode(file));
__poll_t mask = 0;
poll_wait(file, &channel_wqs[minor].rt_queue, wait);
poll_wait(file, &channel_wqs[minor].lx_queue, wait);
if (rtlx == NULL)
return 0;
/* data available to read? */
if (rtlx_read_poll(minor, 0))
mask |= EPOLLIN | EPOLLRDNORM;
/* space to write */
if (rtlx_write_poll(minor))
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
}
static ssize_t file_read(struct file *file, char __user *buffer, size_t count,
loff_t *ppos)
{
int minor = iminor(file_inode(file));
/* data available? */
if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1))
return 0; /* -EAGAIN makes 'cat' whine */
return rtlx_read(minor, buffer, count);
}
static ssize_t file_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
int minor = iminor(file_inode(file));
/* any space left... */
if (!rtlx_write_poll(minor)) {
int ret;
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = __wait_event_interruptible(channel_wqs[minor].rt_queue,
rtlx_write_poll(minor));
if (ret)
return ret;
}
return rtlx_write(minor, buffer, count);
}
const struct file_operations rtlx_fops = {
.owner = THIS_MODULE,
.open = file_open,
.release = file_release,
.write = file_write,
.read = file_read,
.poll = file_poll,
.llseek = noop_llseek,
};
module_init(rtlx_module_init);
module_exit(rtlx_module_exit);
MODULE_DESCRIPTION("MIPS RTLX");
MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
MODULE_LICENSE("GPL");
| linux-master | arch/mips/kernel/rtlx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Count register synchronisation.
*
* All CPUs will have their count registers synchronised to the CPU0 next time
* value. This can cause a small timewarp for CPU0. All other CPU's should
* not have done anything significant (but they may have had interrupts
* enabled briefly - prom_smp_finish() should not be responsible for enabling
* interrupts...)
*/
#include <linux/kernel.h>
#include <linux/irqflags.h>
#include <linux/cpumask.h>
#include <asm/r4k-timer.h>
#include <linux/atomic.h>
#include <asm/barrier.h>
#include <asm/mipsregs.h>
static unsigned int initcount = 0;
static atomic_t count_count_start = ATOMIC_INIT(0);
static atomic_t count_count_stop = ATOMIC_INIT(0);
#define COUNTON 100
#define NR_LOOPS 3
void synchronise_count_master(int cpu)
{
int i;
unsigned long flags;
pr_info("Synchronize counters for CPU %u: ", cpu);
local_irq_save(flags);
/*
* We loop a few times to get a primed instruction cache,
* then the last pass is more or less synchronised and
* the master and slaves each set their cycle counters to a known
* value all at once. This reduces the chance of having random offsets
* between the processors, and guarantees that the maximum
* delay between the cycle counters is never bigger than
* the latency of information-passing (cachelines) between
* two CPUs.
*/
for (i = 0; i < NR_LOOPS; i++) {
/* slaves loop on '!= 2' */
while (atomic_read(&count_count_start) != 1)
mb();
atomic_set(&count_count_stop, 0);
smp_wmb();
/* Let the slave writes its count register */
atomic_inc(&count_count_start);
/* Count will be initialised to current timer */
if (i == 1)
initcount = read_c0_count();
/*
* Everyone initialises count in the last loop:
*/
if (i == NR_LOOPS-1)
write_c0_count(initcount);
/*
* Wait for slave to leave the synchronization point:
*/
while (atomic_read(&count_count_stop) != 1)
mb();
atomic_set(&count_count_start, 0);
smp_wmb();
atomic_inc(&count_count_stop);
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
local_irq_restore(flags);
/*
* i386 code reported the skew here, but the
* count registers were almost certainly out of sync
* so no point in alarming people
*/
pr_cont("done.\n");
}
void synchronise_count_slave(int cpu)
{
int i;
unsigned long flags;
local_irq_save(flags);
/*
* Not every cpu is online at the time this gets called,
* so we first wait for the master to say everyone is ready
*/
for (i = 0; i < NR_LOOPS; i++) {
atomic_inc(&count_count_start);
while (atomic_read(&count_count_start) != 2)
mb();
/*
* Everyone initialises count in the last loop:
*/
if (i == NR_LOOPS-1)
write_c0_count(initcount);
atomic_inc(&count_count_stop);
while (atomic_read(&count_count_stop) != 2)
mb();
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
local_irq_restore(flags);
}
#undef NR_LOOPS
| linux-master | arch/mips/kernel/sync-r4k.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/mips_mt.h>
#include <asm/vpe.h>
#include <asm/rtlx.h>
static int major;
static void rtlx_dispatch(void)
{
if (read_c0_cause() & read_c0_status() & C_SW0)
do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
}
/*
* Interrupt handler may be called before rtlx_init has otherwise had
* a chance to run.
*/
static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
{
unsigned int vpeflags;
unsigned long flags;
int i;
local_irq_save(flags);
vpeflags = dvpe();
set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
irq_enable_hazard();
evpe(vpeflags);
local_irq_restore(flags);
for (i = 0; i < RTLX_CHANNELS; i++) {
wake_up(&channel_wqs[i].lx_queue);
wake_up(&channel_wqs[i].rt_queue);
}
return IRQ_HANDLED;
}
static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
void _interrupt_sp(void)
{
unsigned long flags;
local_irq_save(flags);
dvpe();
settc(1);
write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0);
evpe(EVPE_ENABLE);
local_irq_restore(flags);
}
int __init rtlx_module_init(void)
{
struct device *dev;
int i, err;
if (!cpu_has_mipsmt) {
pr_warn("VPE loader: not a MIPS MT capable processor\n");
return -ENODEV;
}
if (aprp_cpu_index() == 0) {
pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
"Pass maxtcs=<n> argument as kernel argument\n");
return -ENODEV;
}
major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
if (major < 0) {
pr_err("rtlx_module_init: unable to register device\n");
return major;
}
/* initialise the wait queues */
for (i = 0; i < RTLX_CHANNELS; i++) {
init_waitqueue_head(&channel_wqs[i].rt_queue);
init_waitqueue_head(&channel_wqs[i].lx_queue);
atomic_set(&channel_wqs[i].in_open, 0);
mutex_init(&channel_wqs[i].mutex);
dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
"%s%d", RTLX_MODULE_NAME, i);
if (IS_ERR(dev)) {
while (i--)
device_destroy(mt_class, MKDEV(major, i));
err = PTR_ERR(dev);
goto out_chrdev;
}
}
/* set up notifiers */
rtlx_notify.start = rtlx_starting;
rtlx_notify.stop = rtlx_stopping;
vpe_notify(aprp_cpu_index(), &rtlx_notify);
if (cpu_has_vint) {
aprp_hook = rtlx_dispatch;
} else {
pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
err = -ENODEV;
goto out_class;
}
err = request_irq(rtlx_irq_num, rtlx_interrupt, 0, "RTLX", rtlx);
if (err)
goto out_class;
return 0;
out_class:
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
out_chrdev:
unregister_chrdev(major, RTLX_MODULE_NAME);
return err;
}
void __exit rtlx_module_exit(void)
{
int i;
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
unregister_chrdev(major, RTLX_MODULE_NAME);
aprp_hook = NULL;
}
| linux-master | arch/mips/kernel/rtlx-mt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2014 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#include <linux/binfmts.h>
#include <linux/elf.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <asm/cpu-features.h>
#include <asm/cpu-info.h>
#ifdef CONFIG_MIPS_FP_SUPPORT
/* Whether to accept legacy-NaN and 2008-NaN user binaries. */
bool mips_use_nan_legacy;
bool mips_use_nan_2008;
/* FPU modes */
enum {
FP_FRE,
FP_FR0,
FP_FR1,
};
/**
* struct mode_req - ABI FPU mode requirements
* @single: The program being loaded needs an FPU but it will only issue
* single precision instructions meaning that it can execute in
* either FR0 or FR1.
* @soft: The soft(-float) requirement means that the program being
* loaded needs has no FPU dependency at all (i.e. it has no
* FPU instructions).
* @fr1: The program being loaded depends on FPU being in FR=1 mode.
* @frdefault: The program being loaded depends on the default FPU mode.
* That is FR0 for O32 and FR1 for N32/N64.
* @fre: The program being loaded depends on FPU with FRE=1. This mode is
* a bridge which uses FR=1 whilst still being able to maintain
* full compatibility with pre-existing code using the O32 FP32
* ABI.
*
* More information about the FP ABIs can be found here:
*
* https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up
*
*/
struct mode_req {
bool single;
bool soft;
bool fr1;
bool frdefault;
bool fre;
};
static const struct mode_req fpu_reqs[] = {
[MIPS_ABI_FP_ANY] = { true, true, true, true, true },
[MIPS_ABI_FP_DOUBLE] = { false, false, false, true, true },
[MIPS_ABI_FP_SINGLE] = { true, false, false, false, false },
[MIPS_ABI_FP_SOFT] = { false, true, false, false, false },
[MIPS_ABI_FP_OLD_64] = { false, false, false, false, false },
[MIPS_ABI_FP_XX] = { false, false, true, true, true },
[MIPS_ABI_FP_64] = { false, false, true, false, false },
[MIPS_ABI_FP_64A] = { false, false, true, false, true }
};
/*
* Mode requirements when .MIPS.abiflags is not present in the ELF.
* Not present means that everything is acceptable except FR1.
*/
static struct mode_req none_req = { true, true, false, true, true };
int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
bool is_interp, struct arch_elf_state *state)
{
union {
struct elf32_hdr e32;
struct elf64_hdr e64;
} *ehdr = _ehdr;
struct elf32_phdr *phdr32 = _phdr;
struct elf64_phdr *phdr64 = _phdr;
struct mips_elf_abiflags_v0 abiflags;
bool elf32;
u32 flags;
int ret;
loff_t pos;
elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
/* Let's see if this is an O32 ELF */
if (elf32) {
if (flags & EF_MIPS_FP64) {
/*
* Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
* later if needed
*/
if (is_interp)
state->interp_fp_abi = MIPS_ABI_FP_OLD_64;
else
state->fp_abi = MIPS_ABI_FP_OLD_64;
}
if (phdr32->p_type != PT_MIPS_ABIFLAGS)
return 0;
if (phdr32->p_filesz < sizeof(abiflags))
return -EINVAL;
pos = phdr32->p_offset;
} else {
if (phdr64->p_type != PT_MIPS_ABIFLAGS)
return 0;
if (phdr64->p_filesz < sizeof(abiflags))
return -EINVAL;
pos = phdr64->p_offset;
}
ret = kernel_read(elf, &abiflags, sizeof(abiflags), &pos);
if (ret < 0)
return ret;
if (ret != sizeof(abiflags))
return -EIO;
/* Record the required FP ABIs for use by mips_check_elf */
if (is_interp)
state->interp_fp_abi = abiflags.fp_abi;
else
state->fp_abi = abiflags.fp_abi;
return 0;
}
int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
struct arch_elf_state *state)
{
union {
struct elf32_hdr e32;
struct elf64_hdr e64;
} *ehdr = _ehdr;
union {
struct elf32_hdr e32;
struct elf64_hdr e64;
} *iehdr = _interp_ehdr;
struct mode_req prog_req, interp_req;
int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
bool elf32;
u32 flags;
elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
/*
* Determine the NaN personality, reject the binary if not allowed.
* Also ensure that any interpreter matches the executable.
*/
if (flags & EF_MIPS_NAN2008) {
if (mips_use_nan_2008)
state->nan_2008 = 1;
else
return -ENOEXEC;
} else {
if (mips_use_nan_legacy)
state->nan_2008 = 0;
else
return -ENOEXEC;
}
if (has_interpreter) {
bool ielf32;
u32 iflags;
ielf32 = iehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
iflags = ielf32 ? iehdr->e32.e_flags : iehdr->e64.e_flags;
if ((flags ^ iflags) & EF_MIPS_NAN2008)
return -ELIBBAD;
}
if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
fp_abi = state->fp_abi;
if (has_interpreter) {
interp_fp_abi = state->interp_fp_abi;
abi0 = min(fp_abi, interp_fp_abi);
abi1 = max(fp_abi, interp_fp_abi);
} else {
abi0 = abi1 = fp_abi;
}
if (elf32 && !(flags & EF_MIPS_ABI2)) {
/* Default to a mode capable of running code expecting FR=0 */
state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
/* Allow all ABIs we know about */
max_abi = MIPS_ABI_FP_64A;
} else {
/* MIPS64 code always uses FR=1, thus the default is easy */
state->overall_fp_mode = FP_FR1;
/* Disallow access to the various FPXX & FP64 ABIs */
max_abi = MIPS_ABI_FP_SOFT;
}
if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
(abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
return -ELIBBAD;
/* It's time to determine the FPU mode requirements */
prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0];
interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1];
/*
* Check whether the program's and interp's ABIs have a matching FPU
* mode requirement.
*/
prog_req.single = interp_req.single && prog_req.single;
prog_req.soft = interp_req.soft && prog_req.soft;
prog_req.fr1 = interp_req.fr1 && prog_req.fr1;
prog_req.frdefault = interp_req.frdefault && prog_req.frdefault;
prog_req.fre = interp_req.fre && prog_req.fre;
/*
* Determine the desired FPU mode
*
* Decision making:
*
* - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This
* means that we have a combination of program and interpreter
* that inherently require the hybrid FP mode.
* - If FR1 and FRDEFAULT is true, that means we hit the any-abi or
* fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU
* instructions so we don't care about the mode. We will simply use
* the one preferred by the hardware. In fpxx case, that ABI can
* handle both FR=1 and FR=0, so, again, we simply choose the one
* preferred by the hardware. Next, if we only use single-precision
* FPU instructions, and the default ABI FPU mode is not good
* (ie single + any ABI combination), we set again the FPU mode to the
* one is preferred by the hardware. Next, if we know that the code
* will only use single-precision instructions, shown by single being
* true but frdefault being false, then we again set the FPU mode to
* the one that is preferred by the hardware.
* - We want FP_FR1 if that's the only matching mode and the default one
* is not good.
* - Return with -ELIBADD if we can't find a matching FPU mode.
*/
if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1)
state->overall_fp_mode = FP_FRE;
else if ((prog_req.fr1 && prog_req.frdefault) ||
(prog_req.single && !prog_req.frdefault))
/* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
cpu_has_mips_r2_r6) ?
FP_FR1 : FP_FR0;
else if (prog_req.fr1)
state->overall_fp_mode = FP_FR1;
else if (!prog_req.fre && !prog_req.frdefault &&
!prog_req.fr1 && !prog_req.single && !prog_req.soft)
return -ELIBBAD;
return 0;
}
static inline void set_thread_fp_mode(int hybrid, int regs32)
{
if (hybrid)
set_thread_flag(TIF_HYBRID_FPREGS);
else
clear_thread_flag(TIF_HYBRID_FPREGS);
if (regs32)
set_thread_flag(TIF_32BIT_FPREGS);
else
clear_thread_flag(TIF_32BIT_FPREGS);
}
void mips_set_personality_fp(struct arch_elf_state *state)
{
/*
* This function is only ever called for O32 ELFs so we should
* not be worried about N32/N64 binaries.
*/
if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return;
switch (state->overall_fp_mode) {
case FP_FRE:
set_thread_fp_mode(1, 0);
break;
case FP_FR0:
set_thread_fp_mode(0, 1);
break;
case FP_FR1:
set_thread_fp_mode(0, 0);
break;
default:
BUG();
}
}
/*
* Select the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode
* in FCSR according to the ELF NaN personality.
*/
void mips_set_personality_nan(struct arch_elf_state *state)
{
struct cpuinfo_mips *c = &boot_cpu_data;
struct task_struct *t = current;
t->thread.fpu.fcr31 = c->fpu_csr31;
switch (state->nan_2008) {
case 0:
break;
case 1:
if (!(c->fpu_msk31 & FPU_CSR_NAN2008))
t->thread.fpu.fcr31 |= FPU_CSR_NAN2008;
if (!(c->fpu_msk31 & FPU_CSR_ABS2008))
t->thread.fpu.fcr31 |= FPU_CSR_ABS2008;
break;
default:
BUG();
}
}
#endif /* CONFIG_MIPS_FP_SUPPORT */
int mips_elf_read_implies_exec(void *elf_ex, int exstack)
{
/*
* Set READ_IMPLIES_EXEC only on non-NX systems that
* do not request a specific state via PT_GNU_STACK.
*/
return (!cpu_has_rixi && exstack == EXSTACK_DEFAULT);
}
EXPORT_SYMBOL(mips_elf_read_implies_exec);
| linux-master | arch/mips/kernel/elf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*
* Copyright (C) 2000, 2001 Kanoj Sarcar
* Copyright (C) 2000, 2001 Ralf Baechle
* Copyright (C) 2000, 2001 Silicon Graphics, Inc.
* Copyright (C) 2000, 2001, 2003 Broadcom Corporation
*/
#include <linux/cache.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
#include <linux/export.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/sched/mm.h>
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/ftrace.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/atomic.h>
#include <asm/cpu.h>
#include <asm/ginvt.h>
#include <asm/processor.h>
#include <asm/idle.h>
#include <asm/r4k-timer.h>
#include <asm/mips-cps.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
#include <asm/setup.h>
#include <asm/maar.h>
int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */
EXPORT_SYMBOL(__cpu_number_map);
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
EXPORT_SYMBOL(__cpu_logical_map);
/* Number of TCs (or siblings in Intel speak) per CPU core */
int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
/* representing the TCs (or siblings in Intel speak) of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
/* representing the core map of multi-core chips of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
static DECLARE_COMPLETION(cpu_starting);
static DECLARE_COMPLETION(cpu_running);
/*
* A logical cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
*/
cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_foreign_map);
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
/* representing cpus for which core maps can be computed */
static cpumask_t cpu_core_setup_map;
cpumask_t cpu_coherent_mask;
unsigned int smp_max_threads __initdata = UINT_MAX;
static int __init early_nosmt(char *s)
{
smp_max_threads = 1;
return 0;
}
early_param("nosmt", early_nosmt);
static int __init early_smt(char *s)
{
get_option(&s, &smp_max_threads);
/* Ensure at least one thread is available */
smp_max_threads = clamp_val(smp_max_threads, 1U, UINT_MAX);
return 0;
}
early_param("smt", early_smt);
#ifdef CONFIG_GENERIC_IRQ_IPI
static struct irq_desc *call_desc;
static struct irq_desc *sched_desc;
#endif
static inline void set_cpu_sibling_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
if (smp_num_siblings > 1) {
for_each_cpu(i, &cpu_sibling_setup_map) {
if (cpus_are_siblings(cpu, i)) {
cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
}
}
} else
cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
}
static inline void set_cpu_core_map(int cpu)
{
int i;
cpumask_set_cpu(cpu, &cpu_core_setup_map);
for_each_cpu(i, &cpu_core_setup_map) {
if (cpu_data[cpu].package == cpu_data[i].package) {
cpumask_set_cpu(i, &cpu_core_map[cpu]);
cpumask_set_cpu(cpu, &cpu_core_map[i]);
}
}
}
/*
* Calculate a new cpu_foreign_map mask whenever a
* new cpu appears or disappears.
*/
void calculate_cpu_foreign_map(void)
{
int i, k, core_present;
cpumask_t temp_foreign_map;
/* Re-calculate the mask */
cpumask_clear(&temp_foreign_map);
for_each_online_cpu(i) {
core_present = 0;
for_each_cpu(k, &temp_foreign_map)
if (cpus_are_siblings(i, k))
core_present = 1;
if (!core_present)
cpumask_set_cpu(i, &temp_foreign_map);
}
for_each_online_cpu(i)
cpumask_andnot(&cpu_foreign_map[i],
&temp_foreign_map, &cpu_sibling_map[i]);
}
const struct plat_smp_ops *mp_ops;
EXPORT_SYMBOL(mp_ops);
void register_smp_ops(const struct plat_smp_ops *ops)
{
if (mp_ops)
printk(KERN_WARNING "Overriding previously set SMP ops\n");
mp_ops = ops;
}
#ifdef CONFIG_GENERIC_IRQ_IPI
void mips_smp_send_ipi_single(int cpu, unsigned int action)
{
mips_smp_send_ipi_mask(cpumask_of(cpu), action);
}
void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned long flags;
unsigned int core;
int cpu;
local_irq_save(flags);
switch (action) {
case SMP_CALL_FUNCTION:
__ipi_send_mask(call_desc, mask);
break;
case SMP_RESCHEDULE_YOURSELF:
__ipi_send_mask(sched_desc, mask);
break;
default:
BUG();
}
if (mips_cpc_present()) {
for_each_cpu(cpu, mask) {
if (cpus_are_siblings(cpu, smp_processor_id()))
continue;
core = cpu_core(&cpu_data[cpu]);
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
mips_cpc_lock_other(core);
write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
mips_cpc_unlock_other();
mips_cm_unlock_other();
}
}
}
local_irq_restore(flags);
}
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
scheduler_ipi();
return IRQ_HANDLED;
}
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
{
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
static void smp_ipi_init_one(unsigned int virq, const char *name,
irq_handler_t handler)
{
int ret;
irq_set_handler(virq, handle_percpu_irq);
ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
BUG_ON(ret);
}
static unsigned int call_virq, sched_virq;
int mips_smp_ipi_allocate(const struct cpumask *mask)
{
int virq;
struct irq_domain *ipidomain;
struct device_node *node;
node = of_irq_find_parent(of_root);
ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
/*
* Some platforms have half DT setup. So if we found irq node but
* didn't find an ipidomain, try to search for one that is not in the
* DT.
*/
if (node && !ipidomain)
ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
/*
* There are systems which use IPI IRQ domains, but only have one
* registered when some runtime condition is met. For example a Malta
* kernel may include support for GIC & CPU interrupt controller IPI
* IRQ domains, but if run on a system with no GIC & no MT ASE then
* neither will be supported or registered.
*
* We only have a problem if we're actually using multiple CPUs so fail
* loudly if that is the case. Otherwise simply return, skipping IPI
* setup, if we're running with only a single CPU.
*/
if (!ipidomain) {
BUG_ON(num_present_cpus() > 1);
return 0;
}
virq = irq_reserve_ipi(ipidomain, mask);
BUG_ON(!virq);
if (!call_virq)
call_virq = virq;
virq = irq_reserve_ipi(ipidomain, mask);
BUG_ON(!virq);
if (!sched_virq)
sched_virq = virq;
if (irq_domain_is_ipi_per_cpu(ipidomain)) {
int cpu;
for_each_cpu(cpu, mask) {
smp_ipi_init_one(call_virq + cpu, "IPI call",
ipi_call_interrupt);
smp_ipi_init_one(sched_virq + cpu, "IPI resched",
ipi_resched_interrupt);
}
} else {
smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
smp_ipi_init_one(sched_virq, "IPI resched",
ipi_resched_interrupt);
}
return 0;
}
int mips_smp_ipi_free(const struct cpumask *mask)
{
struct irq_domain *ipidomain;
struct device_node *node;
node = of_irq_find_parent(of_root);
ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
/*
* Some platforms have half DT setup. So if we found irq node but
* didn't find an ipidomain, try to search for one that is not in the
* DT.
*/
if (node && !ipidomain)
ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
BUG_ON(!ipidomain);
if (irq_domain_is_ipi_per_cpu(ipidomain)) {
int cpu;
for_each_cpu(cpu, mask) {
free_irq(call_virq + cpu, NULL);
free_irq(sched_virq + cpu, NULL);
}
}
irq_destroy_ipi(call_virq, mask);
irq_destroy_ipi(sched_virq, mask);
return 0;
}
static int __init mips_smp_ipi_init(void)
{
if (num_possible_cpus() == 1)
return 0;
mips_smp_ipi_allocate(cpu_possible_mask);
call_desc = irq_to_desc(call_virq);
sched_desc = irq_to_desc(sched_virq);
return 0;
}
early_initcall(mips_smp_ipi_init);
#endif
/*
* First C code run on the secondary CPUs after being started up by
* the master.
*/
asmlinkage void start_secondary(void)
{
unsigned int cpu;
cpu_probe();
per_cpu_trap_init(false);
mips_clockevent_init();
mp_ops->init_secondary();
cpu_report();
maar_init();
/*
* XXX parity protection should be folded in here when it's converted
* to an option instead of something based on .cputype
*/
calibrate_delay();
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
set_cpu_sibling_map(cpu);
set_cpu_core_map(cpu);
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
/* Notify boot CPU that we're starting & ready to sync counters */
complete(&cpu_starting);
synchronise_count_slave(cpu);
/* The CPU is running and counters synchronised, now mark it online */
set_cpu_online(cpu, true);
calculate_cpu_foreign_map();
/*
* Notify boot CPU that we're up & online and it can safely return
* from __cpu_up
*/
complete(&cpu_running);
/*
* irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous.
*/
WARN_ON_ONCE(!irqs_disabled());
mp_ops->smp_finish();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
static void stop_this_cpu(void *dummy)
{
/*
* Remove this CPU:
*/
set_cpu_online(smp_processor_id(), false);
calculate_cpu_foreign_map();
local_irq_disable();
while (1);
}
void smp_send_stop(void)
{
smp_call_function(stop_this_cpu, NULL, 0);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
/* called from main before smp_init() */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
init_new_context(current, &init_mm);
current_thread_info()->cpu = 0;
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
set_cpu_core_map(0);
calculate_cpu_foreign_map();
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask);
#endif
cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
}
/* preload SMP state for boot cpu */
void smp_prepare_boot_cpu(void)
{
if (mp_ops->prepare_boot_cpu)
mp_ops->prepare_boot_cpu();
set_cpu_possible(0, true);
set_cpu_online(0, true);
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int err;
err = mp_ops->boot_secondary(cpu, tidle);
if (err)
return err;
/* Wait for CPU to start and be ready to sync counters */
if (!wait_for_completion_timeout(&cpu_starting,
msecs_to_jiffies(1000))) {
pr_crit("CPU%u: failed to start\n", cpu);
return -EIO;
}
synchronise_count_master(cpu);
/* Wait for CPU to finish startup & mark itself online before return */
wait_for_completion(&cpu_running);
return 0;
}
/* Not really SMP stuff ... */
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
static void flush_tlb_all_ipi(void *info)
{
local_flush_tlb_all();
}
void flush_tlb_all(void)
{
if (cpu_has_mmid) {
htw_stop();
ginvt_full();
sync_ginv();
instruction_hazard();
htw_start();
return;
}
on_each_cpu(flush_tlb_all_ipi, NULL, 1);
}
static void flush_tlb_mm_ipi(void *mm)
{
drop_mmu_context((struct mm_struct *)mm);
}
/*
* Special Variant of smp_call_function for use by TLB functions:
*
* o No return value
* o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared
* primary cache.
*/
static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
{
smp_call_function(func, info, 1);
}
static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
{
preempt_disable();
smp_on_other_tlbs(func, info);
func(info);
preempt_enable();
}
/*
* The following tlb flush calls are invoked when old translations are
* being torn down, or pte attributes are changing. For single threaded
* address spaces, a new context is obtained on the current cpu, and tlb
* context on other cpus are invalidated to force a new context allocation
* at switch_mm time, should the mm ever be used on other cpus. For
* multithreaded address spaces, inter-CPU interrupts have to be sent.
* Another case where inter-CPU interrupts are required is when the target
* mm might be active on another cpu (eg debuggers doing the flushes on
* behalf of debugees, kswapd stealing pages from another process etc).
* Kanoj 07/00.
*/
void flush_tlb_mm(struct mm_struct *mm)
{
if (!mm)
return;
if (atomic_read(&mm->mm_users) == 0)
return; /* happens as a result of exit_mmap() */
preempt_disable();
if (cpu_has_mmid) {
/*
* No need to worry about other CPUs - the ginvt in
* drop_mmu_context() will be globalized.
*/
} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else {
unsigned int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
set_cpu_context(cpu, mm, 0);
}
}
drop_mmu_context(mm);
preempt_enable();
}
struct flush_tlb_data {
struct vm_area_struct *vma;
unsigned long addr1;
unsigned long addr2;
};
static void flush_tlb_range_ipi(void *info)
{
struct flush_tlb_data *fd = info;
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long addr;
u32 old_mmid;
preempt_disable();
if (cpu_has_mmid) {
htw_stop();
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(cpu_asid(0, mm));
mtc0_tlbw_hazard();
addr = round_down(start, PAGE_SIZE * 2);
end = round_up(end, PAGE_SIZE * 2);
do {
ginvt_va_mmid(addr);
sync_ginv();
addr += PAGE_SIZE * 2;
} while (addr < end);
write_c0_memorymapid(old_mmid);
instruction_hazard();
htw_start();
} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = start,
.addr2 = end,
};
smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
local_flush_tlb_range(vma, start, end);
} else {
unsigned int cpu;
int exec = vma->vm_flags & VM_EXEC;
for_each_online_cpu(cpu) {
/*
* flush_cache_range() will only fully flush icache if
* the VMA is executable, otherwise we must invalidate
* ASID without it appearing to has_valid_asid() as if
* mm has been completely unused by that CPU.
*/
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
set_cpu_context(cpu, mm, !exec);
}
local_flush_tlb_range(vma, start, end);
}
preempt_enable();
}
static void flush_tlb_kernel_range_ipi(void *info)
{
struct flush_tlb_data *fd = info;
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
struct flush_tlb_data fd = {
.addr1 = start,
.addr2 = end,
};
on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
}
static void flush_tlb_page_ipi(void *info)
{
struct flush_tlb_data *fd = info;
local_flush_tlb_page(fd->vma, fd->addr1);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
u32 old_mmid;
preempt_disable();
if (cpu_has_mmid) {
htw_stop();
old_mmid = read_c0_memorymapid();
write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
mtc0_tlbw_hazard();
ginvt_va_mmid(page);
sync_ginv();
write_c0_memorymapid(old_mmid);
instruction_hazard();
htw_start();
} else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
(current->mm != vma->vm_mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = page,
};
smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
local_flush_tlb_page(vma, page);
} else {
unsigned int cpu;
for_each_online_cpu(cpu) {
/*
* flush_cache_page() only does partial flushes, so
* invalidate ASID without it appearing to
* has_valid_asid() as if mm has been completely unused
* by that CPU.
*/
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
set_cpu_context(cpu, vma->vm_mm, 1);
}
local_flush_tlb_page(vma, page);
}
preempt_enable();
}
static void flush_tlb_one_ipi(void *info)
{
unsigned long vaddr = (unsigned long) info;
local_flush_tlb_one(vaddr);
}
void flush_tlb_one(unsigned long vaddr)
{
smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
}
EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(flush_tlb_one);
#ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
{
if (mp_ops->cleanup_dead_cpu)
mp_ops->cleanup_dead_cpu(cpu);
}
#endif
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static void tick_broadcast_callee(void *info)
{
tick_receive_broadcast();
}
static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
CSD_INIT(tick_broadcast_callee, NULL);
void tick_broadcast(const struct cpumask *mask)
{
call_single_data_t *csd;
int cpu;
for_each_cpu(cpu, mask) {
csd = &per_cpu(tick_broadcast_csd, cpu);
smp_call_function_single_async(cpu, csd);
}
}
#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
| linux-master | arch/mips/kernel/smp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2014 Imagination Technologies
* Author: Paul Burton <[email protected]>
*/
#include <linux/cpuhotplug.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <asm/asm-offsets.h>
#include <asm/cacheflush.h>
#include <asm/cacheops.h>
#include <asm/idle.h>
#include <asm/mips-cps.h>
#include <asm/mipsmtregs.h>
#include <asm/pm.h>
#include <asm/pm-cps.h>
#include <asm/smp-cps.h>
#include <asm/uasm.h>
/*
* cps_nc_entry_fn - type of a generated non-coherent state entry function
* @online: the count of online coupled VPEs
* @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
*
* The code entering & exiting non-coherent states is generated at runtime
* using uasm, in order to ensure that the compiler cannot insert a stray
* memory access at an unfortunate time and to allow the generation of optimal
* core-specific code particularly for cache routines. If coupled_coherence
* is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
* returns the number of VPEs that were in the wait state at the point this
* VPE left it. Returns garbage if coupled_coherence is zero or this is not
* the entry function for CPS_PM_NC_WAIT.
*/
typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
/*
* The entry point of the generated non-coherent idle state entry/exit
* functions. Actually per-core rather than per-CPU.
*/
static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
nc_asm_enter);
/* Bitmap indicating which states are supported by the system */
static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
/*
* Indicates the number of coupled VPEs ready to operate in a non-coherent
* state. Actually per-core rather than per-CPU.
*/
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
/* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
/*
* Used to synchronize entry to deep idle states. Actually per-core rather
* than per-CPU.
*/
static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
/* Saved CPU state across the CPS_PM_POWER_GATED state */
DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
/* A somewhat arbitrary number of labels & relocs for uasm */
static struct uasm_label labels[32];
static struct uasm_reloc relocs[32];
enum mips_reg {
zero, at, v0, v1, a0, a1, a2, a3,
t0, t1, t2, t3, t4, t5, t6, t7,
s0, s1, s2, s3, s4, s5, s6, s7,
t8, t9, k0, k1, gp, sp, fp, ra,
};
bool cps_pm_support_state(enum cps_pm_state state)
{
return test_bit(state, state_support);
}
static void coupled_barrier(atomic_t *a, unsigned online)
{
/*
* This function is effectively the same as
* cpuidle_coupled_parallel_barrier, which can't be used here since
* there's no cpuidle device.
*/
if (!coupled_coherence)
return;
smp_mb__before_atomic();
atomic_inc(a);
while (atomic_read(a) < online)
cpu_relax();
if (atomic_inc_return(a) == online * 2) {
atomic_set(a, 0);
return;
}
while (atomic_read(a) > online)
cpu_relax();
}
int cps_pm_enter_state(enum cps_pm_state state)
{
unsigned cpu = smp_processor_id();
unsigned core = cpu_core(¤t_cpu_data);
unsigned online, left;
cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
u32 *core_ready_count, *nc_core_ready_count;
void *nc_addr;
cps_nc_entry_fn entry;
struct core_boot_config *core_cfg;
struct vpe_boot_config *vpe_cfg;
/* Check that there is an entry function for this state */
entry = per_cpu(nc_asm_enter, core)[state];
if (!entry)
return -EINVAL;
/* Calculate which coupled CPUs (VPEs) are online */
#if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6)
if (cpu_online(cpu)) {
cpumask_and(coupled_mask, cpu_online_mask,
&cpu_sibling_map[cpu]);
online = cpumask_weight(coupled_mask);
cpumask_clear_cpu(cpu, coupled_mask);
} else
#endif
{
cpumask_clear(coupled_mask);
online = 1;
}
/* Setup the VPE to run mips_cps_pm_restore when started again */
if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
return -EINVAL;
core_cfg = &mips_cps_core_bootcfg[core];
vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(¤t_cpu_data)];
vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
vpe_cfg->gp = (unsigned long)current_thread_info();
vpe_cfg->sp = 0;
}
/* Indicate that this CPU might not be coherent */
cpumask_clear_cpu(cpu, &cpu_coherent_mask);
smp_mb__after_atomic();
/* Create a non-coherent mapping of the core ready_count */
core_ready_count = per_cpu(ready_count, core);
nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
(unsigned long)core_ready_count);
nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
nc_core_ready_count = nc_addr;
/* Ensure ready_count is zero-initialised before the assembly runs */
WRITE_ONCE(*nc_core_ready_count, 0);
coupled_barrier(&per_cpu(pm_barrier, core), online);
/* Run the generated entry code */
left = entry(online, nc_core_ready_count);
/* Remove the non-coherent mapping of ready_count */
kunmap_noncoherent();
/* Indicate that this CPU is definitely coherent */
cpumask_set_cpu(cpu, &cpu_coherent_mask);
/*
* If this VPE is the first to leave the non-coherent wait state then
* it needs to wake up any coupled VPEs still running their wait
* instruction so that they return to cpuidle, which can then complete
* coordination between the coupled VPEs & provide the governor with
* a chance to reflect on the length of time the VPEs were in the
* idle state.
*/
if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
arch_send_call_function_ipi_mask(coupled_mask);
return 0;
}
static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
const struct cache_desc *cache,
unsigned op, int lbl)
{
unsigned cache_size = cache->ways << cache->waybit;
unsigned i;
const unsigned unroll_lines = 32;
/* If the cache isn't present this function has it easy */
if (cache->flags & MIPS_CACHE_NOT_PRESENT)
return;
/* Load base address */
UASM_i_LA(pp, t0, (long)CKSEG0);
/* Calculate end address */
if (cache_size < 0x8000)
uasm_i_addiu(pp, t1, t0, cache_size);
else
UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
/* Start of cache op loop */
uasm_build_label(pl, *pp, lbl);
/* Generate the cache ops */
for (i = 0; i < unroll_lines; i++) {
if (cpu_has_mips_r6) {
uasm_i_cache(pp, op, 0, t0);
uasm_i_addiu(pp, t0, t0, cache->linesz);
} else {
uasm_i_cache(pp, op, i * cache->linesz, t0);
}
}
if (!cpu_has_mips_r6)
/* Update the base address */
uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
/* Loop if we haven't reached the end address yet */
uasm_il_bne(pp, pr, t0, t1, lbl);
uasm_i_nop(pp);
}
static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
const struct cpuinfo_mips *cpu_info,
int lbl)
{
unsigned i, fsb_size = 8;
unsigned num_loads = (fsb_size * 3) / 2;
unsigned line_stride = 2;
unsigned line_size = cpu_info->dcache.linesz;
unsigned perf_counter, perf_event;
unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
/*
* Determine whether this CPU requires an FSB flush, and if so which
* performance counter/event reflect stalls due to a full FSB.
*/
switch (__get_cpu_type(cpu_info->cputype)) {
case CPU_INTERAPTIV:
perf_counter = 1;
perf_event = 51;
break;
case CPU_PROAPTIV:
/* Newer proAptiv cores don't require this workaround */
if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
return 0;
/* On older ones it's unavailable */
return -1;
default:
/* Assume that the CPU does not need this workaround */
return 0;
}
/*
* Ensure that the fill/store buffer (FSB) is not holding the results
* of a prefetch, since if it is then the CPC sequencer may become
* stuck in the D3 (ClrBus) state whilst entering a low power state.
*/
/* Preserve perf counter setup */
uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
/* Setup perf counter to count FSB full pipeline stalls */
uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_ehb(pp);
uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
uasm_i_ehb(pp);
/* Base address for loads */
UASM_i_LA(pp, t0, (long)CKSEG0);
/* Start of clear loop */
uasm_build_label(pl, *pp, lbl);
/* Perform some loads to fill the FSB */
for (i = 0; i < num_loads; i++)
uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
/*
* Invalidate the new D-cache entries so that the cache will need
* refilling (via the FSB) if the loop is executed again.
*/
for (i = 0; i < num_loads; i++) {
uasm_i_cache(pp, Hit_Invalidate_D,
i * line_size * line_stride, t0);
uasm_i_cache(pp, Hit_Writeback_Inv_SD,
i * line_size * line_stride, t0);
}
/* Barrier ensuring previous cache invalidates are complete */
uasm_i_sync(pp, __SYNC_full);
uasm_i_ehb(pp);
/* Check whether the pipeline stalled due to the FSB being full */
uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
/* Loop if it didn't */
uasm_il_beqz(pp, pr, t1, lbl);
uasm_i_nop(pp);
/* Restore perf counter 1. The count may well now be wrong... */
uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
uasm_i_ehb(pp);
uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
uasm_i_ehb(pp);
return 0;
}
static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
struct uasm_reloc **pr,
unsigned r_addr, int lbl)
{
uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
uasm_build_label(pl, *pp, lbl);
uasm_i_ll(pp, t1, 0, r_addr);
uasm_i_or(pp, t1, t1, t0);
uasm_i_sc(pp, t1, 0, r_addr);
uasm_il_beqz(pp, pr, t1, lbl);
uasm_i_nop(pp);
}
static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
{
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
u32 *buf, *p;
const unsigned r_online = a0;
const unsigned r_nc_count = a1;
const unsigned r_pcohctl = t7;
const unsigned max_instrs = 256;
unsigned cpc_cmd;
int err;
enum {
lbl_incready = 1,
lbl_poll_cont,
lbl_secondary_hang,
lbl_disable_coherence,
lbl_flush_fsb,
lbl_invicache,
lbl_flushdcache,
lbl_hang,
lbl_set_cont,
lbl_secondary_cont,
lbl_decready,
};
/* Allocate a buffer to hold the generated code */
p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
if (!buf)
return NULL;
/* Clear labels & relocs ready for (re)use */
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
goto out_err;
/*
* Save CPU state. Note the non-standard calling convention
* with the return address placed in v0 to avoid clobbering
* the ra register before it is saved.
*/
UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
uasm_i_jalr(&p, v0, t0);
uasm_i_nop(&p);
}
/*
* Load addresses of required CM & CPC registers. This is done early
* because they're needed in both the enable & disable coherence steps
* but in the coupled case the enable step will only run on one VPE.
*/
UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
if (coupled_coherence) {
/* Increment ready_count */
uasm_i_sync(&p, __SYNC_mb);
uasm_build_label(&l, p, lbl_incready);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, 1);
uasm_i_sc(&p, t2, 0, r_nc_count);
uasm_il_beqz(&p, &r, t2, lbl_incready);
uasm_i_addiu(&p, t1, t1, 1);
/* Barrier ensuring all CPUs see the updated r_nc_count value */
uasm_i_sync(&p, __SYNC_mb);
/*
* If this is the last VPE to become ready for non-coherence
* then it should branch below.
*/
uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
uasm_i_nop(&p);
if (state < CPS_PM_POWER_GATED) {
/*
* Otherwise this is not the last VPE to become ready
* for non-coherence. It needs to wait until coherence
* has been disabled before proceeding, which it will do
* by polling for the top bit of ready_count being set.
*/
uasm_i_addiu(&p, t1, zero, -1);
uasm_build_label(&l, p, lbl_poll_cont);
uasm_i_lw(&p, t0, 0, r_nc_count);
uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
uasm_i_ehb(&p);
if (cpu_has_mipsmt)
uasm_i_yield(&p, zero, t1);
uasm_il_b(&p, &r, lbl_poll_cont);
uasm_i_nop(&p);
} else {
/*
* The core will lose power & this VPE will not continue
* so it can simply halt here.
*/
if (cpu_has_mipsmt) {
/* Halt the VPE via C0 tchalt register */
uasm_i_addiu(&p, t0, zero, TCHALT_H);
uasm_i_mtc0(&p, t0, 2, 4);
} else if (cpu_has_vp) {
/* Halt the VP via the CPC VP_STOP register */
unsigned int vpe_id;
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
uasm_i_addiu(&p, t0, zero, 1 << vpe_id);
UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop());
uasm_i_sw(&p, t0, 0, t1);
} else {
BUG();
}
uasm_build_label(&l, p, lbl_secondary_hang);
uasm_il_b(&p, &r, lbl_secondary_hang);
uasm_i_nop(&p);
}
}
/*
* This is the point of no return - this VPE will now proceed to
* disable coherence. At this point we *must* be sure that no other
* VPE within the core will interfere with the L1 dcache.
*/
uasm_build_label(&l, p, lbl_disable_coherence);
/* Invalidate the L1 icache */
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
Index_Invalidate_I, lbl_invicache);
/* Writeback & invalidate the L1 dcache */
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
Index_Writeback_Inv_D, lbl_flushdcache);
/* Barrier ensuring previous cache invalidates are complete */
uasm_i_sync(&p, __SYNC_full);
uasm_i_ehb(&p);
if (mips_cm_revision() < CM_REV_CM3) {
/*
* Disable all but self interventions. The load from COHCTL is
* defined by the interAptiv & proAptiv SUMs as ensuring that the
* operation resulting from the preceding store is complete.
*/
uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu]));
uasm_i_sw(&p, t0, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
/* Barrier to ensure write to coherence control is complete */
uasm_i_sync(&p, __SYNC_full);
uasm_i_ehb(&p);
}
/* Disable coherence */
uasm_i_sw(&p, zero, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
if (state >= CPS_PM_CLOCK_GATED) {
err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
lbl_flush_fsb);
if (err)
goto out_err;
/* Determine the CPC command to issue */
switch (state) {
case CPS_PM_CLOCK_GATED:
cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
break;
case CPS_PM_POWER_GATED:
cpc_cmd = CPC_Cx_CMD_PWRDOWN;
break;
default:
BUG();
goto out_err;
}
/* Issue the CPC command */
UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
uasm_i_addiu(&p, t1, zero, cpc_cmd);
uasm_i_sw(&p, t1, 0, t0);
if (state == CPS_PM_POWER_GATED) {
/* If anything goes wrong just hang */
uasm_build_label(&l, p, lbl_hang);
uasm_il_b(&p, &r, lbl_hang);
uasm_i_nop(&p);
/*
* There's no point generating more code, the core is
* powered down & if powered back up will run from the
* reset vector not from here.
*/
goto gen_done;
}
/* Barrier to ensure write to CPC command is complete */
uasm_i_sync(&p, __SYNC_full);
uasm_i_ehb(&p);
}
if (state == CPS_PM_NC_WAIT) {
/*
* At this point it is safe for all VPEs to proceed with
* execution. This VPE will set the top bit of ready_count
* to indicate to the other VPEs that they may continue.
*/
if (coupled_coherence)
cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
lbl_set_cont);
/*
* VPEs which did not disable coherence will continue
* executing, after coherence has been disabled, from this
* point.
*/
uasm_build_label(&l, p, lbl_secondary_cont);
/* Now perform our wait */
uasm_i_wait(&p, 0);
}
/*
* Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
* will run this. The first will actually re-enable coherence & the
* rest will just be performing a rather unusual nop.
*/
uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3
? CM_GCR_Cx_COHERENCE_COHDOMAINEN
: CM3_GCR_Cx_COHERENCE_COHEN);
uasm_i_sw(&p, t0, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
/* Barrier to ensure write to coherence control is complete */
uasm_i_sync(&p, __SYNC_full);
uasm_i_ehb(&p);
if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
/* Decrement ready_count */
uasm_build_label(&l, p, lbl_decready);
uasm_i_sync(&p, __SYNC_mb);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, -1);
uasm_i_sc(&p, t2, 0, r_nc_count);
uasm_il_beqz(&p, &r, t2, lbl_decready);
uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
/* Barrier ensuring all CPUs see the updated r_nc_count value */
uasm_i_sync(&p, __SYNC_mb);
}
if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
/*
* At this point it is safe for all VPEs to proceed with
* execution. This VPE will set the top bit of ready_count
* to indicate to the other VPEs that they may continue.
*/
cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
/*
* This core will be reliant upon another core sending a
* power-up command to the CPC in order to resume operation.
* Thus an arbitrary VPE can't trigger the core leaving the
* idle state and the one that disables coherence might as well
* be the one to re-enable it. The rest will continue from here
* after that has been done.
*/
uasm_build_label(&l, p, lbl_secondary_cont);
/* Barrier ensuring all CPUs see the updated r_nc_count value */
uasm_i_sync(&p, __SYNC_mb);
}
/* The core is coherent, time to return to C code */
uasm_i_jr(&p, ra);
uasm_i_nop(&p);
gen_done:
/* Ensure the code didn't exceed the resources allocated for it */
BUG_ON((p - buf) > max_instrs);
BUG_ON((l - labels) > ARRAY_SIZE(labels));
BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
/* Patch branch offsets */
uasm_resolve_relocs(relocs, labels);
/* Flush the icache */
local_flush_icache_range((unsigned long)buf, (unsigned long)p);
return buf;
out_err:
kfree(buf);
return NULL;
}
static int cps_pm_online_cpu(unsigned int cpu)
{
enum cps_pm_state state;
unsigned core = cpu_core(&cpu_data[cpu]);
void *entry_fn, *core_rc;
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
if (per_cpu(nc_asm_enter, core)[state])
continue;
if (!test_bit(state, state_support))
continue;
entry_fn = cps_gen_entry_code(cpu, state);
if (!entry_fn) {
pr_err("Failed to generate core %u state %u entry\n",
core, state);
clear_bit(state, state_support);
}
per_cpu(nc_asm_enter, core)[state] = entry_fn;
}
if (!per_cpu(ready_count, core)) {
core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM;
}
per_cpu(ready_count, core) = core_rc;
}
return 0;
}
static int cps_pm_power_notifier(struct notifier_block *this,
unsigned long event, void *ptr)
{
unsigned int stat;
switch (event) {
case PM_SUSPEND_PREPARE:
stat = read_cpc_cl_stat_conf();
/*
* If we're attempting to suspend the system and power down all
* of the cores, the JTAG detect bit indicates that the CPC will
* instead put the cores into clock-off state. In this state
* a connected debugger can cause the CPU to attempt
* interactions with the powered down system. At best this will
* fail. At worst, it can hang the NoC, requiring a hard reset.
* To avoid this, just block system suspend if a JTAG probe
* is detected.
*/
if (stat & CPC_Cx_STAT_CONF_EJTAG_PROBE) {
pr_warn("JTAG probe is connected - abort suspend\n");
return NOTIFY_BAD;
}
return NOTIFY_DONE;
default:
return NOTIFY_DONE;
}
}
static int __init cps_pm_init(void)
{
/* A CM is required for all non-coherent states */
if (!mips_cm_present()) {
pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
return 0;
}
/*
* If interrupts were enabled whilst running a wait instruction on a
* non-coherent core then the VPE may end up processing interrupts
* whilst non-coherent. That would be bad.
*/
if (cpu_wait == r4k_wait_irqoff)
set_bit(CPS_PM_NC_WAIT, state_support);
else
pr_warn("pm-cps: non-coherent wait unavailable\n");
/* Detect whether a CPC is present */
if (mips_cpc_present()) {
/* Detect whether clock gating is implemented */
if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL)
set_bit(CPS_PM_CLOCK_GATED, state_support);
else
pr_warn("pm-cps: CPC does not support clock gating\n");
/* Power gating is available with CPS SMP & any CPC */
if (mips_cps_smp_in_use())
set_bit(CPS_PM_POWER_GATED, state_support);
else
pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
} else {
pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
}
pm_notifier(cps_pm_power_notifier, 0);
return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online",
cps_pm_online_cpu, NULL);
}
arch_initcall(cps_pm_init);
| linux-master | arch/mips/kernel/pm-cps.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* A gpio chip driver for TXx9 SoCs
*
* Copyright (C) 2008 Atsushi Nemoto <[email protected]>
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/gpio/driver.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <asm/txx9pio.h>
static DEFINE_SPINLOCK(txx9_gpio_lock);
static struct txx9_pio_reg __iomem *txx9_pioptr;
static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
return !!(__raw_readl(&txx9_pioptr->din) & (1 << offset));
}
static void txx9_gpio_set_raw(unsigned int offset, int value)
{
u32 val;
val = __raw_readl(&txx9_pioptr->dout);
if (value)
val |= 1 << offset;
else
val &= ~(1 << offset);
__raw_writel(val, &txx9_pioptr->dout);
}
static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
unsigned long flags;
spin_lock_irqsave(&txx9_gpio_lock, flags);
txx9_gpio_set_raw(offset, value);
mmiowb();
spin_unlock_irqrestore(&txx9_gpio_lock, flags);
}
static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
{
unsigned long flags;
spin_lock_irqsave(&txx9_gpio_lock, flags);
__raw_writel(__raw_readl(&txx9_pioptr->dir) & ~(1 << offset),
&txx9_pioptr->dir);
mmiowb();
spin_unlock_irqrestore(&txx9_gpio_lock, flags);
return 0;
}
static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset,
int value)
{
unsigned long flags;
spin_lock_irqsave(&txx9_gpio_lock, flags);
txx9_gpio_set_raw(offset, value);
__raw_writel(__raw_readl(&txx9_pioptr->dir) | (1 << offset),
&txx9_pioptr->dir);
mmiowb();
spin_unlock_irqrestore(&txx9_gpio_lock, flags);
return 0;
}
static struct gpio_chip txx9_gpio_chip = {
.get = txx9_gpio_get,
.set = txx9_gpio_set,
.direction_input = txx9_gpio_dir_in,
.direction_output = txx9_gpio_dir_out,
.label = "TXx9",
};
int __init txx9_gpio_init(unsigned long baseaddr,
unsigned int base, unsigned int num)
{
txx9_pioptr = ioremap(baseaddr, sizeof(struct txx9_pio_reg));
if (!txx9_pioptr)
return -ENODEV;
txx9_gpio_chip.base = base;
txx9_gpio_chip.ngpio = num;
return gpiochip_add_data(&txx9_gpio_chip, NULL);
}
| linux-master | arch/mips/kernel/gpio_txx9.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000, 2006 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2016, Imagination Technologies Ltd.
*/
#include <linux/compat.h>
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <asm/compat-signal.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
#include "signal-common.h"
/* 32-bit compatibility types */
typedef unsigned int __sighandler32_t;
typedef void (*vfptr_t)(void);
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int sys32_sigsuspend(compat_sigset_t __user *uset)
{
return compat_sys_rt_sigsuspend(uset, sizeof(compat_sigset_t));
}
SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *, act,
struct compat_sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
int err = 0;
if (act) {
old_sigset_t mask;
s32 handler;
if (!access_ok(act, sizeof(*act)))
return -EFAULT;
err |= __get_user(handler, &act->sa_handler);
new_ka.sa.sa_handler = (void __user *)(s64)handler;
err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
err |= __get_user(mask, &act->sa_mask.sig[0]);
if (err)
return -EFAULT;
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(oact, sizeof(*oact)))
return -EFAULT;
err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
err |= __put_user((u32)(u64)old_ka.sa.sa_handler,
&oact->sa_handler);
err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
err |= __put_user(0, &oact->sa_mask.sig[1]);
err |= __put_user(0, &oact->sa_mask.sig[2]);
err |= __put_user(0, &oact->sa_mask.sig[3]);
if (err)
return -EFAULT;
}
return ret;
}
| linux-master | arch/mips/kernel/signal32.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 Linus Torvalds
* Copyright (C) 1994 - 2001, 2003, 07 Ralf Baechle
*/
#include <linux/clockchips.h>
#include <linux/i8253.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/pgtable.h>
#include <asm/irq_cpu.h>
#include <asm/i8259.h>
#include <asm/io.h>
#include <asm/jazz.h>
#include <asm/tlbmisc.h>
static DEFINE_RAW_SPINLOCK(r4030_lock);
static void enable_r4030_irq(struct irq_data *d)
{
unsigned int mask = 1 << (d->irq - JAZZ_IRQ_START);
unsigned long flags;
raw_spin_lock_irqsave(&r4030_lock, flags);
mask |= r4030_read_reg16(JAZZ_IO_IRQ_ENABLE);
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, mask);
raw_spin_unlock_irqrestore(&r4030_lock, flags);
}
void disable_r4030_irq(struct irq_data *d)
{
unsigned int mask = ~(1 << (d->irq - JAZZ_IRQ_START));
unsigned long flags;
raw_spin_lock_irqsave(&r4030_lock, flags);
mask &= r4030_read_reg16(JAZZ_IO_IRQ_ENABLE);
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, mask);
raw_spin_unlock_irqrestore(&r4030_lock, flags);
}
static struct irq_chip r4030_irq_type = {
.name = "R4030",
.irq_mask = disable_r4030_irq,
.irq_unmask = enable_r4030_irq,
};
void __init init_r4030_ints(void)
{
int i;
for (i = JAZZ_IRQ_START; i <= JAZZ_IRQ_END; i++)
irq_set_chip_and_handler(i, &r4030_irq_type, handle_level_irq);
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0);
r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */
r4030_read_reg32(JAZZ_R4030_INVAL_ADDR); /* clear error bits */
}
/*
* On systems with i8259-style interrupt controllers we assume for
* driver compatibility reasons interrupts 0 - 15 to be the i8259
* interrupts even if the hardware uses a different interrupt numbering.
*/
void __init arch_init_irq(void)
{
/*
* this is a hack to get back the still needed wired mapping
* killed by init_mm()
*/
/* Map 0xe0000000 -> 0x0:800005C0, 0xe0010000 -> 0x1:30000580 */
add_wired_entry(0x02000017, 0x03c00017, 0xe0000000, PM_64K);
/* Map 0xe2000000 -> 0x0:900005C0, 0xe3010000 -> 0x0:910005C0 */
add_wired_entry(0x02400017, 0x02440017, 0xe2000000, PM_16M);
/* Map 0xe4000000 -> 0x0:600005C0, 0xe4100000 -> 400005C0 */
add_wired_entry(0x01800017, 0x01000017, 0xe4000000, PM_4M);
init_i8259_irqs(); /* Integrated i8259 */
mips_cpu_irq_init();
init_r4030_ints();
change_c0_status(ST0_IM, IE_IRQ2 | IE_IRQ1);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_cause() & read_c0_status();
unsigned int irq;
if (pending & IE_IRQ4) {
r4030_read_reg32(JAZZ_TIMER_REGISTER);
do_IRQ(JAZZ_TIMER_IRQ);
} else if (pending & IE_IRQ2) {
irq = *(volatile u8 *)JAZZ_EISA_IRQ_ACK;
do_IRQ(irq);
} else if (pending & IE_IRQ1) {
irq = *(volatile u8 *)JAZZ_IO_IRQ_SOURCE >> 2;
if (likely(irq > 0))
do_IRQ(irq + JAZZ_IRQ_START - 1);
else
panic("Unimplemented loc_no_irq handler");
}
}
struct clock_event_device r4030_clockevent = {
.name = "r4030",
.features = CLOCK_EVT_FEAT_PERIODIC,
.rating = 300,
.irq = JAZZ_TIMER_IRQ,
};
static irqreturn_t r4030_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *cd = dev_id;
cd->event_handler(cd);
return IRQ_HANDLED;
}
void __init plat_time_init(void)
{
struct clock_event_device *cd = &r4030_clockevent;
unsigned int cpu = smp_processor_id();
BUG_ON(HZ != 100);
cd->cpumask = cpumask_of(cpu);
clockevents_register_device(cd);
if (request_irq(JAZZ_TIMER_IRQ, r4030_timer_interrupt, IRQF_TIMER,
"R4030 timer", cd))
pr_err("Failed to register R4030 timer interrupt\n");
/*
* Set clock to 100Hz.
*
* The R4030 timer receives an input clock of 1kHz which is divided by
* a programmable 4-bit divider. This makes it fairly inflexible.
*/
r4030_write_reg32(JAZZ_TIMER_INTERVAL, 9);
setup_pit_timer();
}
| linux-master | arch/mips/jazz/irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Mips Jazz DMA controller support
* Copyright (C) 1995, 1996 by Andreas Busse
*
* NOTE: Some of the argument checking could be removed when
* things have settled down. Also, instead of returning 0xffffffff
* on failure of vdma_alloc() one could leave page #0 unused
* and return the more usual NULL pointer as logical address.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
#include <linux/dma-map-ops.h>
#include <asm/mipsregs.h>
#include <asm/jazz.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <asm/dma.h>
#include <asm/jazzdma.h>
/*
* Set this to one to enable additional vdma debug code.
*/
#define CONF_DEBUG_VDMA 0
static VDMA_PGTBL_ENTRY *pgtbl;
static DEFINE_SPINLOCK(vdma_lock);
/*
* Debug stuff
*/
#define vdma_debug ((CONF_DEBUG_VDMA) ? debuglvl : 0)
static int debuglvl = 3;
/*
* Initialize the pagetable with a one-to-one mapping of
* the first 16 Mbytes of main memory and declare all
* entries to be unused. Using this method will at least
* allow some early device driver operations to work.
*/
static inline void vdma_pgtbl_init(void)
{
unsigned long paddr = 0;
int i;
for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
pgtbl[i].frame = paddr;
pgtbl[i].owner = VDMA_PAGE_EMPTY;
paddr += VDMA_PAGESIZE;
}
}
/*
* Initialize the Jazz R4030 dma controller
*/
static int __init vdma_init(void)
{
/*
* Allocate 32k of memory for DMA page tables. This needs to be page
* aligned and should be uncached to avoid cache flushing after every
* update.
*/
pgtbl = (VDMA_PGTBL_ENTRY *)__get_free_pages(GFP_KERNEL | GFP_DMA,
get_order(VDMA_PGTBL_SIZE));
BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
/*
* Clear the R4030 translation table
*/
vdma_pgtbl_init();
r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
CPHYSADDR((unsigned long)pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n");
return 0;
}
arch_initcall(vdma_init);
/*
* Allocate DMA pagetables using a simple first-fit algorithm
*/
unsigned long vdma_alloc(unsigned long paddr, unsigned long size)
{
int first, last, pages, frame, i;
unsigned long laddr, flags;
/* check arguments */
if (paddr > 0x1fffffff) {
if (vdma_debug)
printk("vdma_alloc: Invalid physical address: %08lx\n",
paddr);
return DMA_MAPPING_ERROR; /* invalid physical address */
}
if (size > 0x400000 || size == 0) {
if (vdma_debug)
printk("vdma_alloc: Invalid size: %08lx\n", size);
return DMA_MAPPING_ERROR; /* invalid physical address */
}
spin_lock_irqsave(&vdma_lock, flags);
/*
* Find free chunk
*/
pages = VDMA_PAGE(paddr + size) - VDMA_PAGE(paddr) + 1;
first = 0;
while (1) {
while (pgtbl[first].owner != VDMA_PAGE_EMPTY &&
first < VDMA_PGTBL_ENTRIES) first++;
if (first + pages > VDMA_PGTBL_ENTRIES) { /* nothing free */
spin_unlock_irqrestore(&vdma_lock, flags);
return DMA_MAPPING_ERROR;
}
last = first + 1;
while (pgtbl[last].owner == VDMA_PAGE_EMPTY
&& last - first < pages)
last++;
if (last - first == pages)
break; /* found */
first = last + 1;
}
/*
* Mark pages as allocated
*/
laddr = (first << 12) + (paddr & (VDMA_PAGESIZE - 1));
frame = paddr & ~(VDMA_PAGESIZE - 1);
for (i = first; i < last; i++) {
pgtbl[i].frame = frame;
pgtbl[i].owner = laddr;
frame += VDMA_PAGESIZE;
}
/*
* Update translation table and return logical start address
*/
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
if (vdma_debug > 1)
printk("vdma_alloc: Allocated %d pages starting from %08lx\n",
pages, laddr);
if (vdma_debug > 2) {
printk("LADDR: ");
for (i = first; i < last; i++)
printk("%08x ", i << 12);
printk("\nPADDR: ");
for (i = first; i < last; i++)
printk("%08x ", pgtbl[i].frame);
printk("\nOWNER: ");
for (i = first; i < last; i++)
printk("%08x ", pgtbl[i].owner);
printk("\n");
}
spin_unlock_irqrestore(&vdma_lock, flags);
return laddr;
}
EXPORT_SYMBOL(vdma_alloc);
/*
* Free previously allocated dma translation pages
* Note that this does NOT change the translation table,
* it just marks the free'd pages as unused!
*/
int vdma_free(unsigned long laddr)
{
int i;
i = laddr >> 12;
if (pgtbl[i].owner != laddr) {
printk
("vdma_free: trying to free other's dma pages, laddr=%8lx\n",
laddr);
return -1;
}
while (i < VDMA_PGTBL_ENTRIES && pgtbl[i].owner == laddr) {
pgtbl[i].owner = VDMA_PAGE_EMPTY;
i++;
}
if (vdma_debug > 1)
printk("vdma_free: freed %ld pages starting from %08lx\n",
i - (laddr >> 12), laddr);
return 0;
}
EXPORT_SYMBOL(vdma_free);
/*
* Translate a physical address to a logical address.
* This will return the logical address of the first
* match.
*/
unsigned long vdma_phys2log(unsigned long paddr)
{
int i;
int frame;
frame = paddr & ~(VDMA_PAGESIZE - 1);
for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
if (pgtbl[i].frame == frame)
break;
}
if (i == VDMA_PGTBL_ENTRIES)
return ~0UL;
return (i << 12) + (paddr & (VDMA_PAGESIZE - 1));
}
EXPORT_SYMBOL(vdma_phys2log);
/*
* Translate a logical DMA address to a physical address
*/
unsigned long vdma_log2phys(unsigned long laddr)
{
return pgtbl[laddr >> 12].frame + (laddr & (VDMA_PAGESIZE - 1));
}
EXPORT_SYMBOL(vdma_log2phys);
/*
* Print DMA statistics
*/
void vdma_stats(void)
{
int i;
printk("vdma_stats: CONFIG: %08x\n",
r4030_read_reg32(JAZZ_R4030_CONFIG));
printk("R4030 translation table base: %08x\n",
r4030_read_reg32(JAZZ_R4030_TRSTBL_BASE));
printk("R4030 translation table limit: %08x\n",
r4030_read_reg32(JAZZ_R4030_TRSTBL_LIM));
printk("vdma_stats: INV_ADDR: %08x\n",
r4030_read_reg32(JAZZ_R4030_INV_ADDR));
printk("vdma_stats: R_FAIL_ADDR: %08x\n",
r4030_read_reg32(JAZZ_R4030_R_FAIL_ADDR));
printk("vdma_stats: M_FAIL_ADDR: %08x\n",
r4030_read_reg32(JAZZ_R4030_M_FAIL_ADDR));
printk("vdma_stats: IRQ_SOURCE: %08x\n",
r4030_read_reg32(JAZZ_R4030_IRQ_SOURCE));
printk("vdma_stats: I386_ERROR: %08x\n",
r4030_read_reg32(JAZZ_R4030_I386_ERROR));
printk("vdma_chnl_modes: ");
for (i = 0; i < 8; i++)
printk("%04x ",
(unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
(i << 5)));
printk("\n");
printk("vdma_chnl_enables: ");
for (i = 0; i < 8; i++)
printk("%04x ",
(unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
(i << 5)));
printk("\n");
}
/*
* DMA transfer functions
*/
/*
* Enable a DMA channel. Also clear any error conditions.
*/
void vdma_enable(int channel)
{
int status;
if (vdma_debug)
printk("vdma_enable: channel %d\n", channel);
/*
* Check error conditions first
*/
status = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
if (status & 0x400)
printk("VDMA: Channel %d: Address error!\n", channel);
if (status & 0x200)
printk("VDMA: Channel %d: Memory error!\n", channel);
/*
* Clear all interrupt flags
*/
r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
(channel << 5)) | R4030_TC_INTR
| R4030_MEM_INTR | R4030_ADDR_INTR);
/*
* Enable the desired channel
*/
r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
(channel << 5)) |
R4030_CHNL_ENABLE);
}
EXPORT_SYMBOL(vdma_enable);
/*
* Disable a DMA channel
*/
void vdma_disable(int channel)
{
if (vdma_debug) {
int status =
r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
(channel << 5));
printk("vdma_disable: channel %d\n", channel);
printk("VDMA: channel %d status: %04x (%s) mode: "
"%02x addr: %06x count: %06x\n",
channel, status,
((status & 0x600) ? "ERROR" : "OK"),
(unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
(channel << 5)),
(unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ADDR +
(channel << 5)),
(unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_COUNT +
(channel << 5)));
}
r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
(channel << 5)) &
~R4030_CHNL_ENABLE);
/*
* After disabling a DMA channel a remote bus register should be
* read to ensure that the current DMA acknowledge cycle is completed.
*/
*((volatile unsigned int *) JAZZ_DUMMY_DEVICE);
}
EXPORT_SYMBOL(vdma_disable);
/*
* Set DMA mode. This function accepts the mode values used
* to set a PC-style DMA controller. For the SCSI and FDC
* channels, we also set the default modes each time we're
* called.
* NOTE: The FAST and BURST dma modes are supported by the
* R4030 Rev. 2 and PICA chipsets only. I leave them disabled
* for now.
*/
void vdma_set_mode(int channel, int mode)
{
if (vdma_debug)
printk("vdma_set_mode: channel %d, mode 0x%x\n", channel,
mode);
switch (channel) {
case JAZZ_SCSI_DMA: /* scsi */
r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
/* R4030_MODE_FAST | */
/* R4030_MODE_BURST | */
R4030_MODE_INTR_EN |
R4030_MODE_WIDTH_16 |
R4030_MODE_ATIME_80);
break;
case JAZZ_FLOPPY_DMA: /* floppy */
r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
/* R4030_MODE_FAST | */
/* R4030_MODE_BURST | */
R4030_MODE_INTR_EN |
R4030_MODE_WIDTH_8 |
R4030_MODE_ATIME_120);
break;
case JAZZ_AUDIOL_DMA:
case JAZZ_AUDIOR_DMA:
printk("VDMA: Audio DMA not supported yet.\n");
break;
default:
printk
("VDMA: vdma_set_mode() called with unsupported channel %d!\n",
channel);
}
switch (mode) {
case DMA_MODE_READ:
r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
(channel << 5)) &
~R4030_CHNL_WRITE);
break;
case DMA_MODE_WRITE:
r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
(channel << 5)) |
R4030_CHNL_WRITE);
break;
default:
printk
("VDMA: vdma_set_mode() called with unknown dma mode 0x%x\n",
mode);
}
}
EXPORT_SYMBOL(vdma_set_mode);
/*
* Set Transfer Address
*/
void vdma_set_addr(int channel, long addr)
{
if (vdma_debug)
printk("vdma_set_addr: channel %d, addr %lx\n", channel,
addr);
r4030_write_reg32(JAZZ_R4030_CHNL_ADDR + (channel << 5), addr);
}
EXPORT_SYMBOL(vdma_set_addr);
/*
* Set Transfer Count
*/
void vdma_set_count(int channel, int count)
{
if (vdma_debug)
printk("vdma_set_count: channel %d, count %08x\n", channel,
(unsigned) count);
r4030_write_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5), count);
}
EXPORT_SYMBOL(vdma_set_count);
/*
* Get Residual
*/
int vdma_get_residue(int channel)
{
int residual;
residual = r4030_read_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5));
if (vdma_debug)
printk("vdma_get_residual: channel %d: residual=%d\n",
channel, residual);
return residual;
}
/*
* Get DMA channel enable register
*/
int vdma_get_enable(int channel)
{
int enable;
enable = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
if (vdma_debug)
printk("vdma_get_enable: channel %d: enable=%d\n", channel,
enable);
return enable;
}
static void *jazz_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
struct page *page;
void *ret;
if (attrs & DMA_ATTR_NO_WARN)
gfp |= __GFP_NOWARN;
size = PAGE_ALIGN(size);
page = alloc_pages(gfp, get_order(size));
if (!page)
return NULL;
ret = page_address(page);
memset(ret, 0, size);
*dma_handle = vdma_alloc(virt_to_phys(ret), size);
if (*dma_handle == DMA_MAPPING_ERROR)
goto out_free_pages;
arch_dma_prep_coherent(page, size);
return (void *)(UNCAC_BASE + __pa(ret));
out_free_pages:
__free_pages(page, get_order(size));
return NULL;
}
static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
vdma_free(dma_handle);
__free_pages(virt_to_page(vaddr), get_order(size));
}
static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(phys, size, dir);
return vdma_alloc(phys, size);
}
static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
vdma_free(dma_addr);
}
static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nents, i) {
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(sg_phys(sg), sg->length,
dir);
sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
if (sg->dma_address == DMA_MAPPING_ERROR)
return -EIO;
sg_dma_len(sg) = sg->length;
}
return nents;
}
static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nents, i) {
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
vdma_free(sg->dma_address);
}
}
static void jazz_dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
}
static void jazz_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
}
static void jazz_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i)
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
}
static void jazz_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i)
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
}
const struct dma_map_ops jazz_dma_ops = {
.alloc = jazz_dma_alloc,
.free = jazz_dma_free,
.map_page = jazz_dma_map_page,
.unmap_page = jazz_dma_unmap_page,
.map_sg = jazz_dma_map_sg,
.unmap_sg = jazz_dma_unmap_sg,
.sync_single_for_cpu = jazz_dma_sync_single_for_cpu,
.sync_single_for_device = jazz_dma_sync_single_for_device,
.sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu,
.sync_sg_for_device = jazz_dma_sync_sg_for_device,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
EXPORT_SYMBOL(jazz_dma_ops);
| linux-master | arch/mips/jazz/jazzdma.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Reset a Jazz machine.
*
* We don't trust the firmware so we do it the classic way by poking and
* stabbing at the keyboard controller ...
*/
#include <linux/jiffies.h>
#include <asm/jazz.h>
#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */
static void jazz_write_output(unsigned char val)
{
int status;
do {
status = jazz_kh->command;
} while (status & KBD_STAT_IBF);
jazz_kh->data = val;
}
static void jazz_write_command(unsigned char val)
{
int status;
do {
status = jazz_kh->command;
} while (status & KBD_STAT_IBF);
jazz_kh->command = val;
}
static unsigned char jazz_read_status(void)
{
return jazz_kh->command;
}
static inline void kb_wait(void)
{
unsigned long start = jiffies;
unsigned long timeout = start + HZ/2;
do {
if (! (jazz_read_status() & 0x02))
return;
} while (time_before_eq(jiffies, timeout));
}
void jazz_machine_restart(char *command)
{
while(1) {
kb_wait();
jazz_write_command(0xd1);
kb_wait();
jazz_write_output(0x00);
}
}
| linux-master | arch/mips/jazz/reset.c |
/*
* Setup pointers to hardware-dependent routines.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1997, 1998, 2001, 07, 08 by Ralf Baechle
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2007 by Thomas Bogendoerfer
*/
#include <linux/eisa.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/dma-mapping.h>
#include <linux/pgtable.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
#include <asm/reboot.h>
#include <asm/tlbmisc.h>
extern asmlinkage void jazz_handle_int(void);
extern void jazz_machine_restart(char *command);
static struct resource jazz_io_resources[] = {
{
.start = 0x00,
.end = 0x1f,
.name = "dma1",
.flags = IORESOURCE_IO | IORESOURCE_BUSY
}, {
.start = 0x40,
.end = 0x5f,
.name = "timer",
.flags = IORESOURCE_IO | IORESOURCE_BUSY
}, {
.start = 0x80,
.end = 0x8f,
.name = "dma page reg",
.flags = IORESOURCE_IO | IORESOURCE_BUSY
}, {
.start = 0xc0,
.end = 0xdf,
.name = "dma2",
.flags = IORESOURCE_IO | IORESOURCE_BUSY
}
};
void __init plat_mem_setup(void)
{
int i;
/* Map 0xe0000000 -> 0x0:800005C0, 0xe0010000 -> 0x1:30000580 */
add_wired_entry(0x02000017, 0x03c00017, 0xe0000000, PM_64K);
/* Map 0xe2000000 -> 0x0:900005C0, 0xe3010000 -> 0x0:910005C0 */
add_wired_entry(0x02400017, 0x02440017, 0xe2000000, PM_16M);
/* Map 0xe4000000 -> 0x0:600005C0, 0xe4100000 -> 400005C0 */
add_wired_entry(0x01800017, 0x01000017, 0xe4000000, PM_4M);
set_io_port_base(JAZZ_PORT_BASE);
#ifdef CONFIG_EISA
EISA_bus = 1;
#endif
/* request I/O space for devices used on all i[345]86 PCs */
for (i = 0; i < ARRAY_SIZE(jazz_io_resources); i++)
request_resource(&ioport_resource, jazz_io_resources + i);
/* The RTC is outside the port address space */
_machine_restart = jazz_machine_restart;
#ifdef CONFIG_VT
screen_info = (struct screen_info) {
.orig_video_cols = 160,
.orig_video_lines = 64,
.orig_video_points = 16,
};
#endif
add_preferred_console("ttyS", 0, "9600");
}
#ifdef CONFIG_OLIVETTI_M700
#define UART_CLK 1843200
#else
/* Some Jazz machines seem to have an 8MHz crystal clock but I don't know
exactly which ones ... XXX */
#define UART_CLK (8000000 / 16) /* ( 3072000 / 16) */
#endif
#define MEMPORT(_base, _irq) \
{ \
.mapbase = (_base), \
.membase = (void *)(_base), \
.irq = (_irq), \
.uartclk = UART_CLK, \
.iotype = UPIO_MEM, \
.flags = UPF_BOOT_AUTOCONF, \
}
static struct plat_serial8250_port jazz_serial_data[] = {
MEMPORT(JAZZ_SERIAL1_BASE, JAZZ_SERIAL1_IRQ),
MEMPORT(JAZZ_SERIAL2_BASE, JAZZ_SERIAL2_IRQ),
{ },
};
static struct platform_device jazz_serial8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = jazz_serial_data,
},
};
static struct resource jazz_esp_rsrc[] = {
{
.start = JAZZ_SCSI_BASE,
.end = JAZZ_SCSI_BASE + 31,
.flags = IORESOURCE_MEM
},
{
.start = JAZZ_SCSI_DMA,
.end = JAZZ_SCSI_DMA,
.flags = IORESOURCE_MEM
},
{
.start = JAZZ_SCSI_IRQ,
.end = JAZZ_SCSI_IRQ,
.flags = IORESOURCE_IRQ
}
};
static u64 jazz_esp_dma_mask = DMA_BIT_MASK(32);
static struct platform_device jazz_esp_pdev = {
.name = "jazz_esp",
.num_resources = ARRAY_SIZE(jazz_esp_rsrc),
.resource = jazz_esp_rsrc,
.dev = {
.dma_mask = &jazz_esp_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
}
};
static struct resource jazz_sonic_rsrc[] = {
{
.start = JAZZ_ETHERNET_BASE,
.end = JAZZ_ETHERNET_BASE + 0xff,
.flags = IORESOURCE_MEM
},
{
.start = JAZZ_ETHERNET_IRQ,
.end = JAZZ_ETHERNET_IRQ,
.flags = IORESOURCE_IRQ
}
};
static u64 jazz_sonic_dma_mask = DMA_BIT_MASK(32);
static struct platform_device jazz_sonic_pdev = {
.name = "jazzsonic",
.num_resources = ARRAY_SIZE(jazz_sonic_rsrc),
.resource = jazz_sonic_rsrc,
.dev = {
.dma_mask = &jazz_sonic_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
}
};
static struct resource jazz_cmos_rsrc[] = {
{
.start = 0x70,
.end = 0x71,
.flags = IORESOURCE_IO
},
{
.start = 8,
.end = 8,
.flags = IORESOURCE_IRQ
}
};
static struct platform_device jazz_cmos_pdev = {
.name = "rtc_cmos",
.num_resources = ARRAY_SIZE(jazz_cmos_rsrc),
.resource = jazz_cmos_rsrc
};
static struct platform_device pcspeaker_pdev = {
.name = "pcspkr",
.id = -1,
};
static int __init jazz_setup_devinit(void)
{
platform_device_register(&jazz_serial8250_device);
platform_device_register(&jazz_esp_pdev);
platform_device_register(&jazz_sonic_pdev);
platform_device_register(&jazz_cmos_pdev);
platform_device_register(&pcspeaker_pdev);
return 0;
}
device_initcall(jazz_setup_devinit);
| linux-master | arch/mips/jazz/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* BRIEF MODULE DESCRIPTION
* MyCable XXS1500 board support
*
* Copyright 2003, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <prom.h>
const char *get_system_type(void)
{
return "XXS1500";
}
void prom_putchar(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
static void xxs1500_reset(char *c)
{
/* Jump to the reset vector */
__asm__ __volatile__("jr\t%0" : : "r"(0xbfc00000));
}
static void xxs1500_power_off(void)
{
while (1)
asm volatile (
" .set mips32 \n"
" wait \n"
" .set mips0 \n");
}
void __init board_setup(void)
{
u32 pin_func;
pm_power_off = xxs1500_power_off;
_machine_halt = xxs1500_power_off;
_machine_restart = xxs1500_reset;
alchemy_gpio1_input_enable();
alchemy_gpio2_enable();
/* Set multiple use pins (UART3/GPIO) to UART (it's used as UART too) */
pin_func = alchemy_rdsys(AU1000_SYS_PINFUNC) & ~SYS_PF_UR3;
pin_func |= SYS_PF_UR3;
alchemy_wrsys(pin_func, AU1000_SYS_PINFUNC);
/* Enable UART */
alchemy_uart_enable(AU1000_UART3_PHYS_ADDR);
/* Enable DTR (MCR bit 0) = USB power up */
__raw_writel(1, (void __iomem *)KSEG1ADDR(AU1000_UART3_PHYS_ADDR + 0x18));
wmb();
}
/******************************************************************************/
static struct resource xxs1500_pcmcia_res[] = {
{
.name = "pcmcia-io",
.flags = IORESOURCE_MEM,
.start = AU1000_PCMCIA_IO_PHYS_ADDR,
.end = AU1000_PCMCIA_IO_PHYS_ADDR + 0x000400000 - 1,
},
{
.name = "pcmcia-attr",
.flags = IORESOURCE_MEM,
.start = AU1000_PCMCIA_ATTR_PHYS_ADDR,
.end = AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
},
{
.name = "pcmcia-mem",
.flags = IORESOURCE_MEM,
.start = AU1000_PCMCIA_MEM_PHYS_ADDR,
.end = AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
},
};
static struct platform_device xxs1500_pcmcia_dev = {
.name = "xxs1500_pcmcia",
.id = -1,
.num_resources = ARRAY_SIZE(xxs1500_pcmcia_res),
.resource = xxs1500_pcmcia_res,
};
static struct platform_device *xxs1500_devs[] __initdata = {
&xxs1500_pcmcia_dev,
};
static int __init xxs1500_dev_init(void)
{
irq_set_irq_type(AU1500_GPIO204_INT, IRQ_TYPE_LEVEL_HIGH);
irq_set_irq_type(AU1500_GPIO201_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO202_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO203_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO205_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO207_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO0_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO1_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO2_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO3_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO4_INT, IRQ_TYPE_LEVEL_LOW); /* CF irq */
irq_set_irq_type(AU1500_GPIO5_INT, IRQ_TYPE_LEVEL_LOW);
return platform_add_devices(xxs1500_devs,
ARRAY_SIZE(xxs1500_devs));
}
device_initcall(xxs1500_dev_init);
| linux-master | arch/mips/alchemy/board-xxs1500.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GPR board platform device registration (Au1550)
*
* Copyright (C) 2010 Wolfgang Grandegger <[email protected]>
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/leds.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/platform_data/i2c-gpio.h>
#include <linux/gpio/machine.h>
#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <prom.h>
const char *get_system_type(void)
{
return "GPR";
}
void prom_putchar(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
static void gpr_reset(char *c)
{
/* switch System-LED to orange (red# and green# on) */
alchemy_gpio_direction_output(4, 0);
alchemy_gpio_direction_output(5, 0);
/* trigger watchdog to reset board in 200ms */
printk(KERN_EMERG "Triggering watchdog soft reset...\n");
raw_local_irq_disable();
alchemy_gpio_direction_output(1, 0);
udelay(1);
alchemy_gpio_set_value(1, 1);
while (1)
cpu_wait();
}
static void gpr_power_off(void)
{
while (1)
cpu_wait();
}
void __init board_setup(void)
{
printk(KERN_INFO "Trapeze ITS GPR board\n");
pm_power_off = gpr_power_off;
_machine_halt = gpr_power_off;
_machine_restart = gpr_reset;
/* Enable UART1/3 */
alchemy_uart_enable(AU1000_UART3_PHYS_ADDR);
alchemy_uart_enable(AU1000_UART1_PHYS_ADDR);
/* Take away Reset of UMTS-card */
alchemy_gpio_direction_output(215, 1);
}
/*
* Watchdog
*/
static struct resource gpr_wdt_resource[] = {
[0] = {
.start = 1,
.end = 1,
.name = "gpr-adm6320-wdt",
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device gpr_wdt_device = {
.name = "adm6320-wdt",
.id = 0,
.num_resources = ARRAY_SIZE(gpr_wdt_resource),
.resource = gpr_wdt_resource,
};
/*
* FLASH
*
* 0x00000000-0x00200000 : "kernel"
* 0x00200000-0x00a00000 : "rootfs"
* 0x01d00000-0x01f00000 : "config"
* 0x01c00000-0x01d00000 : "yamon"
* 0x01d00000-0x01d40000 : "yamon env vars"
* 0x00000000-0x00a00000 : "kernel+rootfs"
*/
static struct mtd_partition gpr_mtd_partitions[] = {
{
.name = "kernel",
.size = 0x00200000,
.offset = 0,
},
{
.name = "rootfs",
.size = 0x00800000,
.offset = MTDPART_OFS_APPEND,
.mask_flags = MTD_WRITEABLE,
},
{
.name = "config",
.size = 0x00200000,
.offset = 0x01d00000,
},
{
.name = "yamon",
.size = 0x00100000,
.offset = 0x01c00000,
},
{
.name = "yamon env vars",
.size = 0x00040000,
.offset = MTDPART_OFS_APPEND,
},
{
.name = "kernel+rootfs",
.size = 0x00a00000,
.offset = 0,
},
};
static struct physmap_flash_data gpr_flash_data = {
.width = 4,
.nr_parts = ARRAY_SIZE(gpr_mtd_partitions),
.parts = gpr_mtd_partitions,
};
static struct resource gpr_mtd_resource = {
.start = 0x1e000000,
.end = 0x1fffffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device gpr_mtd_device = {
.name = "physmap-flash",
.dev = {
.platform_data = &gpr_flash_data,
},
.num_resources = 1,
.resource = &gpr_mtd_resource,
};
/*
* LEDs
*/
static const struct gpio_led gpr_gpio_leds[] = {
{ /* green */
.name = "gpr:green",
.gpio = 4,
.active_low = 1,
},
{ /* red */
.name = "gpr:red",
.gpio = 5,
.active_low = 1,
}
};
static struct gpio_led_platform_data gpr_led_data = {
.num_leds = ARRAY_SIZE(gpr_gpio_leds),
.leds = gpr_gpio_leds,
};
static struct platform_device gpr_led_devices = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &gpr_led_data,
}
};
/*
* I2C
*/
static struct gpiod_lookup_table gpr_i2c_gpiod_table = {
.dev_id = "i2c-gpio",
.table = {
/*
* This should be on "GPIO2" which has base at 200 so
* the global numbers 209 and 210 should correspond to
* local offsets 9 and 10.
*/
GPIO_LOOKUP_IDX("alchemy-gpio2", 9, NULL, 0,
GPIO_ACTIVE_HIGH),
GPIO_LOOKUP_IDX("alchemy-gpio2", 10, NULL, 1,
GPIO_ACTIVE_HIGH),
},
};
static struct i2c_gpio_platform_data gpr_i2c_data = {
/*
* The open drain mode is hardwired somewhere or an electrical
* property of the alchemy GPIO controller.
*/
.sda_is_open_drain = 1,
.scl_is_open_drain = 1,
.udelay = 2, /* ~100 kHz */
.timeout = HZ,
};
static struct platform_device gpr_i2c_device = {
.name = "i2c-gpio",
.id = -1,
.dev.platform_data = &gpr_i2c_data,
};
static struct i2c_board_info gpr_i2c_info[] __initdata = {
{
I2C_BOARD_INFO("lm83", 0x18),
}
};
static struct resource alchemy_pci_host_res[] = {
[0] = {
.start = AU1500_PCI_PHYS_ADDR,
.end = AU1500_PCI_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
};
static int gpr_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin)
{
if ((slot == 0) && (pin == 1))
return AU1550_PCI_INTA;
else if ((slot == 0) && (pin == 2))
return AU1550_PCI_INTB;
return 0xff;
}
static struct alchemy_pci_platdata gpr_pci_pd = {
.board_map_irq = gpr_map_pci_irq,
.pci_cfg_set = PCI_CONFIG_AEN | PCI_CONFIG_R2H | PCI_CONFIG_R1H |
PCI_CONFIG_CH |
#if defined(__MIPSEB__)
PCI_CONFIG_SIC_HWA_DAT | PCI_CONFIG_SM,
#else
0,
#endif
};
static struct platform_device gpr_pci_host_dev = {
.dev.platform_data = &gpr_pci_pd,
.name = "alchemy-pci",
.id = 0,
.num_resources = ARRAY_SIZE(alchemy_pci_host_res),
.resource = alchemy_pci_host_res,
};
static struct platform_device *gpr_devices[] __initdata = {
&gpr_wdt_device,
&gpr_mtd_device,
&gpr_i2c_device,
&gpr_led_devices,
};
static int __init gpr_pci_init(void)
{
return platform_device_register(&gpr_pci_host_dev);
}
/* must be arch_initcall; MIPS PCI scans busses in a subsys_initcall */
arch_initcall(gpr_pci_init);
static int __init gpr_dev_init(void)
{
gpiod_add_lookup_table(&gpr_i2c_gpiod_table);
i2c_register_board_info(0, gpr_i2c_info, ARRAY_SIZE(gpr_i2c_info));
return platform_add_devices(gpr_devices, ARRAY_SIZE(gpr_devices));
}
device_initcall(gpr_dev_init);
| linux-master | arch/mips/alchemy/board-gpr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MTX-1 platform devices registration (Au1500)
*
* Copyright (C) 2007-2009, Florian Fainelli <[email protected]>
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/gpio.h>
#include <linux/gpio/machine.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <mtd/mtd-abi.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <asm/mach-au1x00/au1xxx_eth.h>
#include <prom.h>
const char *get_system_type(void)
{
return "MTX-1";
}
void prom_putchar(char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
static void mtx1_reset(char *c)
{
/* Jump to the reset vector */
__asm__ __volatile__("jr\t%0" : : "r"(0xbfc00000));
}
static void mtx1_power_off(void)
{
while (1)
asm volatile (
" .set mips32 \n"
" wait \n"
" .set mips0 \n");
}
void __init board_setup(void)
{
#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
/* Enable USB power switch */
alchemy_gpio_direction_output(204, 0);
#endif /* IS_ENABLED(CONFIG_USB_OHCI_HCD) */
/* Initialize sys_pinfunc */
alchemy_wrsys(SYS_PF_NI2, AU1000_SYS_PINFUNC);
/* Initialize GPIO */
alchemy_wrsys(~0, AU1000_SYS_TRIOUTCLR);
alchemy_gpio_direction_output(0, 0); /* Disable M66EN (PCI 66MHz) */
alchemy_gpio_direction_output(3, 1); /* Disable PCI CLKRUN# */
alchemy_gpio_direction_output(1, 1); /* Enable EXT_IO3 */
alchemy_gpio_direction_output(5, 0); /* Disable eth PHY TX_ER */
/* Enable LED and set it to green */
alchemy_gpio_direction_output(211, 1); /* green on */
alchemy_gpio_direction_output(212, 0); /* red off */
pm_power_off = mtx1_power_off;
_machine_halt = mtx1_power_off;
_machine_restart = mtx1_reset;
printk(KERN_INFO "4G Systems MTX-1 Board\n");
}
/******************************************************************************/
static struct gpio_keys_button mtx1_gpio_button[] = {
{
.gpio = 207,
.code = BTN_0,
.desc = "System button",
}
};
static struct gpio_keys_platform_data mtx1_buttons_data = {
.buttons = mtx1_gpio_button,
.nbuttons = ARRAY_SIZE(mtx1_gpio_button),
};
static struct platform_device mtx1_button = {
.name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &mtx1_buttons_data,
}
};
static struct gpiod_lookup_table mtx1_wdt_gpio_table = {
.dev_id = "mtx1-wdt.0",
.table = {
/* Global number 215 is offset 15 on Alchemy GPIO 2 */
GPIO_LOOKUP("alchemy-gpio2", 15, NULL, GPIO_ACTIVE_HIGH),
{ },
},
};
static struct platform_device mtx1_wdt = {
.name = "mtx1-wdt",
.id = 0,
};
static const struct gpio_led default_leds[] = {
{
.name = "mtx1:green",
.gpio = 211,
}, {
.name = "mtx1:red",
.gpio = 212,
},
};
static struct gpio_led_platform_data mtx1_led_data = {
.num_leds = ARRAY_SIZE(default_leds),
.leds = default_leds,
};
static struct platform_device mtx1_gpio_leds = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &mtx1_led_data,
}
};
static struct mtd_partition mtx1_mtd_partitions[] = {
{
.name = "filesystem",
.size = 0x01C00000,
.offset = 0,
},
{
.name = "yamon",
.size = 0x00100000,
.offset = MTDPART_OFS_APPEND,
.mask_flags = MTD_WRITEABLE,
},
{
.name = "kernel",
.size = 0x002c0000,
.offset = MTDPART_OFS_APPEND,
},
{
.name = "yamon env",
.size = 0x00040000,
.offset = MTDPART_OFS_APPEND,
},
};
static struct physmap_flash_data mtx1_flash_data = {
.width = 4,
.nr_parts = 4,
.parts = mtx1_mtd_partitions,
};
static struct resource mtx1_mtd_resource = {
.start = 0x1e000000,
.end = 0x1fffffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device mtx1_mtd = {
.name = "physmap-flash",
.dev = {
.platform_data = &mtx1_flash_data,
},
.num_resources = 1,
.resource = &mtx1_mtd_resource,
};
static struct resource alchemy_pci_host_res[] = {
[0] = {
.start = AU1500_PCI_PHYS_ADDR,
.end = AU1500_PCI_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
};
static int mtx1_pci_idsel(unsigned int devsel, int assert)
{
/* This function is only necessary to support a proprietary Cardbus
* adapter on the mtx-1 "singleboard" variant. It triggers a custom
* logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals.
*/
udelay(1);
if (assert && devsel != 0)
/* Suppress signal to Cardbus */
alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */
else
alchemy_gpio_set_value(1, 1); /* set EXT_IO3 ON */
udelay(1);
return 1;
}
static const char mtx1_irqtab[][5] = {
[0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 00 - AdapterA-Slot0 (top) */
[1] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 01 - AdapterA-Slot1 (bottom) */
[2] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 02 - AdapterB-Slot0 (top) */
[3] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 03 - AdapterB-Slot1 (bottom) */
[4] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, /* IDSEL 04 - AdapterC-Slot0 (top) */
[5] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 05 - AdapterC-Slot1 (bottom) */
[6] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 06 - AdapterD-Slot0 (top) */
[7] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 07 - AdapterD-Slot1 (bottom) */
};
static int mtx1_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin)
{
return mtx1_irqtab[slot][pin];
}
static struct alchemy_pci_platdata mtx1_pci_pd = {
.board_map_irq = mtx1_map_pci_irq,
.board_pci_idsel = mtx1_pci_idsel,
.pci_cfg_set = PCI_CONFIG_AEN | PCI_CONFIG_R2H | PCI_CONFIG_R1H |
PCI_CONFIG_CH |
#if defined(__MIPSEB__)
PCI_CONFIG_SIC_HWA_DAT | PCI_CONFIG_SM,
#else
0,
#endif
};
static struct platform_device mtx1_pci_host = {
.dev.platform_data = &mtx1_pci_pd,
.name = "alchemy-pci",
.id = 0,
.num_resources = ARRAY_SIZE(alchemy_pci_host_res),
.resource = alchemy_pci_host_res,
};
static struct platform_device *mtx1_devs[] __initdata = {
&mtx1_pci_host,
&mtx1_gpio_leds,
&mtx1_wdt,
&mtx1_button,
&mtx1_mtd,
};
static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
.phy_search_highest_addr = 1,
.phy1_search_mac0 = 1,
};
static int __init mtx1_register_devices(void)
{
int rc;
irq_set_irq_type(AU1500_GPIO204_INT, IRQ_TYPE_LEVEL_HIGH);
irq_set_irq_type(AU1500_GPIO201_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO202_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO203_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO205_INT, IRQ_TYPE_LEVEL_LOW);
au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
rc = gpio_request(mtx1_gpio_button[0].gpio,
mtx1_gpio_button[0].desc);
if (rc < 0) {
printk(KERN_INFO "mtx1: failed to request %d\n",
mtx1_gpio_button[0].gpio);
goto out;
}
gpio_direction_input(mtx1_gpio_button[0].gpio);
out:
gpiod_add_lookup_table(&mtx1_wdt_gpio_table);
return platform_add_devices(mtx1_devs, ARRAY_SIZE(mtx1_devs));
}
arch_initcall(mtx1_register_devices);
| linux-master | arch/mips/alchemy/board-mtx1.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Alchemy DB/PB1xxx board support.
*/
#include <asm/prom.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/bcsr.h>
int __init db1000_board_setup(void);
int __init db1000_dev_setup(void);
int __init db1500_pci_setup(void);
int __init db1200_board_setup(void);
int __init db1200_dev_setup(void);
int __init db1300_board_setup(void);
int __init db1300_dev_setup(void);
int __init db1550_board_setup(void);
int __init db1550_dev_setup(void);
int __init db1550_pci_setup(int);
static const char *board_type_str(void)
{
switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
case BCSR_WHOAMI_DB1000:
return "DB1000";
case BCSR_WHOAMI_DB1500:
return "DB1500";
case BCSR_WHOAMI_DB1100:
return "DB1100";
case BCSR_WHOAMI_PB1500:
case BCSR_WHOAMI_PB1500R2:
return "PB1500";
case BCSR_WHOAMI_PB1100:
return "PB1100";
case BCSR_WHOAMI_PB1200_DDR1:
case BCSR_WHOAMI_PB1200_DDR2:
return "PB1200";
case BCSR_WHOAMI_DB1200:
return "DB1200";
case BCSR_WHOAMI_DB1300:
return "DB1300";
case BCSR_WHOAMI_DB1550:
return "DB1550";
case BCSR_WHOAMI_PB1550_SDR:
case BCSR_WHOAMI_PB1550_DDR:
return "PB1550";
default:
return "(unknown)";
}
}
const char *get_system_type(void)
{
return board_type_str();
}
void __init board_setup(void)
{
int ret;
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1100:
ret = db1000_board_setup();
break;
case ALCHEMY_CPU_AU1550:
ret = db1550_board_setup();
break;
case ALCHEMY_CPU_AU1200:
ret = db1200_board_setup();
break;
case ALCHEMY_CPU_AU1300:
ret = db1300_board_setup();
break;
default:
pr_err("unsupported CPU on board\n");
ret = -ENODEV;
}
if (ret)
panic("cannot initialize board support");
}
static int __init db1xxx_arch_init(void)
{
int id = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
if (id == BCSR_WHOAMI_DB1550)
return db1550_pci_setup(0);
else if ((id == BCSR_WHOAMI_PB1550_SDR) ||
(id == BCSR_WHOAMI_PB1550_DDR))
return db1550_pci_setup(1);
else if ((id == BCSR_WHOAMI_DB1500) || (id == BCSR_WHOAMI_PB1500) ||
(id == BCSR_WHOAMI_PB1500R2))
return db1500_pci_setup();
return 0;
}
arch_initcall(db1xxx_arch_init);
static int __init db1xxx_dev_init(void)
{
mips_set_machine_name(board_type_str());
switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
case BCSR_WHOAMI_DB1000:
case BCSR_WHOAMI_DB1500:
case BCSR_WHOAMI_DB1100:
case BCSR_WHOAMI_PB1500:
case BCSR_WHOAMI_PB1500R2:
case BCSR_WHOAMI_PB1100:
return db1000_dev_setup();
case BCSR_WHOAMI_PB1200_DDR1:
case BCSR_WHOAMI_PB1200_DDR2:
case BCSR_WHOAMI_DB1200:
return db1200_dev_setup();
case BCSR_WHOAMI_DB1300:
return db1300_dev_setup();
case BCSR_WHOAMI_DB1550:
case BCSR_WHOAMI_PB1550_SDR:
case BCSR_WHOAMI_PB1550_DDR:
return db1550_dev_setup();
}
return 0;
}
device_initcall(db1xxx_dev_init);
| linux-master | arch/mips/alchemy/devboards/db1xxx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DBAu1000/1500/1100 PBAu1100/1500 board support
*
* Copyright 2000, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
#include <linux/gpio/machine.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/leds.h>
#include <linux/mmc/host.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_gpio.h>
#include <linux/spi/ads7846.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <asm/mach-au1x00/au1000_dma.h>
#include <asm/mach-au1x00/au1100_mmc.h>
#include <asm/mach-db1x00/bcsr.h>
#include <asm/reboot.h>
#include <prom.h>
#include "platform.h"
#define F_SWAPPED (bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT)
const char *get_system_type(void);
int __init db1000_board_setup(void)
{
/* initialize board register space */
bcsr_init(DB1000_BCSR_PHYS_ADDR,
DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS);
switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
case BCSR_WHOAMI_DB1000:
case BCSR_WHOAMI_DB1500:
case BCSR_WHOAMI_DB1100:
case BCSR_WHOAMI_PB1500:
case BCSR_WHOAMI_PB1500R2:
case BCSR_WHOAMI_PB1100:
pr_info("AMD Alchemy %s Board\n", get_system_type());
return 0;
}
return -ENODEV;
}
static int db1500_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin)
{
if ((slot < 12) || (slot > 13) || pin == 0)
return -1;
if (slot == 12)
return (pin == 1) ? AU1500_PCI_INTA : 0xff;
if (slot == 13) {
switch (pin) {
case 1: return AU1500_PCI_INTA;
case 2: return AU1500_PCI_INTB;
case 3: return AU1500_PCI_INTC;
case 4: return AU1500_PCI_INTD;
}
}
return -1;
}
static u64 au1xxx_all_dmamask = DMA_BIT_MASK(32);
static struct resource alchemy_pci_host_res[] = {
[0] = {
.start = AU1500_PCI_PHYS_ADDR,
.end = AU1500_PCI_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
};
static struct alchemy_pci_platdata db1500_pci_pd = {
.board_map_irq = db1500_map_pci_irq,
};
static struct platform_device db1500_pci_host_dev = {
.dev.platform_data = &db1500_pci_pd,
.name = "alchemy-pci",
.id = 0,
.num_resources = ARRAY_SIZE(alchemy_pci_host_res),
.resource = alchemy_pci_host_res,
};
int __init db1500_pci_setup(void)
{
return platform_device_register(&db1500_pci_host_dev);
}
static struct resource au1100_lcd_resources[] = {
[0] = {
.start = AU1100_LCD_PHYS_ADDR,
.end = AU1100_LCD_PHYS_ADDR + 0x800 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1100_LCD_INT,
.end = AU1100_LCD_INT,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device au1100_lcd_device = {
.name = "au1100-lcd",
.id = 0,
.dev = {
.dma_mask = &au1xxx_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(au1100_lcd_resources),
.resource = au1100_lcd_resources,
};
static struct resource alchemy_ac97c_res[] = {
[0] = {
.start = AU1000_AC97_PHYS_ADDR,
.end = AU1000_AC97_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DMA_ID_AC97C_TX,
.end = DMA_ID_AC97C_TX,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = DMA_ID_AC97C_RX,
.end = DMA_ID_AC97C_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device alchemy_ac97c_dev = {
.name = "alchemy-ac97c",
.id = -1,
.resource = alchemy_ac97c_res,
.num_resources = ARRAY_SIZE(alchemy_ac97c_res),
};
static struct platform_device alchemy_ac97c_dma_dev = {
.name = "alchemy-pcm-dma",
.id = 0,
};
static struct platform_device db1x00_codec_dev = {
.name = "ac97-codec",
.id = -1,
};
static struct platform_device db1x00_audio_dev = {
.name = "db1000-audio",
.dev = {
.dma_mask = &au1xxx_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
/******************************************************************************/
static irqreturn_t db1100_mmc_cd(int irq, void *ptr)
{
mmc_detect_change(ptr, msecs_to_jiffies(500));
return IRQ_HANDLED;
}
static int db1100_mmc_cd_setup(void *mmc_host, int en)
{
int ret = 0, irq;
if (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)) == BCSR_WHOAMI_DB1100)
irq = AU1100_GPIO19_INT;
else
irq = AU1100_GPIO14_INT; /* PB1100 SD0 CD# */
if (en) {
irq_set_irq_type(irq, IRQ_TYPE_EDGE_BOTH);
ret = request_irq(irq, db1100_mmc_cd, 0,
"sd0_cd", mmc_host);
} else
free_irq(irq, mmc_host);
return ret;
}
static int db1100_mmc1_cd_setup(void *mmc_host, int en)
{
int ret = 0, irq;
if (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)) == BCSR_WHOAMI_DB1100)
irq = AU1100_GPIO20_INT;
else
irq = AU1100_GPIO15_INT; /* PB1100 SD1 CD# */
if (en) {
irq_set_irq_type(irq, IRQ_TYPE_EDGE_BOTH);
ret = request_irq(irq, db1100_mmc_cd, 0,
"sd1_cd", mmc_host);
} else
free_irq(irq, mmc_host);
return ret;
}
static int db1100_mmc_card_readonly(void *mmc_host)
{
/* testing suggests that this bit is inverted */
return (bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD0WP) ? 0 : 1;
}
static int db1100_mmc_card_inserted(void *mmc_host)
{
return !alchemy_gpio_get_value(19);
}
static void db1100_mmc_set_power(void *mmc_host, int state)
{
int bit;
if (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)) == BCSR_WHOAMI_DB1100)
bit = BCSR_BOARD_SD0PWR;
else
bit = BCSR_BOARD_PB1100_SD0PWR;
if (state) {
bcsr_mod(BCSR_BOARD, 0, bit);
msleep(400); /* stabilization time */
} else
bcsr_mod(BCSR_BOARD, bit, 0);
}
static void db1100_mmcled_set(struct led_classdev *led, enum led_brightness b)
{
if (b != LED_OFF)
bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED0, 0);
else
bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED0);
}
static struct led_classdev db1100_mmc_led = {
.brightness_set = db1100_mmcled_set,
};
static int db1100_mmc1_card_readonly(void *mmc_host)
{
return (bcsr_read(BCSR_BOARD) & BCSR_BOARD_SD1WP) ? 1 : 0;
}
static int db1100_mmc1_card_inserted(void *mmc_host)
{
return !alchemy_gpio_get_value(20);
}
static void db1100_mmc1_set_power(void *mmc_host, int state)
{
int bit;
if (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)) == BCSR_WHOAMI_DB1100)
bit = BCSR_BOARD_SD1PWR;
else
bit = BCSR_BOARD_PB1100_SD1PWR;
if (state) {
bcsr_mod(BCSR_BOARD, 0, bit);
msleep(400); /* stabilization time */
} else
bcsr_mod(BCSR_BOARD, bit, 0);
}
static void db1100_mmc1led_set(struct led_classdev *led, enum led_brightness b)
{
if (b != LED_OFF)
bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED1, 0);
else
bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED1);
}
static struct led_classdev db1100_mmc1_led = {
.brightness_set = db1100_mmc1led_set,
};
static struct au1xmmc_platform_data db1100_mmc_platdata[2] = {
[0] = {
.cd_setup = db1100_mmc_cd_setup,
.set_power = db1100_mmc_set_power,
.card_inserted = db1100_mmc_card_inserted,
.card_readonly = db1100_mmc_card_readonly,
.led = &db1100_mmc_led,
},
[1] = {
.cd_setup = db1100_mmc1_cd_setup,
.set_power = db1100_mmc1_set_power,
.card_inserted = db1100_mmc1_card_inserted,
.card_readonly = db1100_mmc1_card_readonly,
.led = &db1100_mmc1_led,
},
};
static struct resource au1100_mmc0_resources[] = {
[0] = {
.start = AU1100_SD0_PHYS_ADDR,
.end = AU1100_SD0_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1100_SD_INT,
.end = AU1100_SD_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = DMA_ID_SD0_TX,
.end = DMA_ID_SD0_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = DMA_ID_SD0_RX,
.end = DMA_ID_SD0_RX,
.flags = IORESOURCE_DMA,
}
};
static struct platform_device db1100_mmc0_dev = {
.name = "au1xxx-mmc",
.id = 0,
.dev = {
.dma_mask = &au1xxx_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1100_mmc_platdata[0],
},
.num_resources = ARRAY_SIZE(au1100_mmc0_resources),
.resource = au1100_mmc0_resources,
};
static struct resource au1100_mmc1_res[] = {
[0] = {
.start = AU1100_SD1_PHYS_ADDR,
.end = AU1100_SD1_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1100_SD_INT,
.end = AU1100_SD_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = DMA_ID_SD1_TX,
.end = DMA_ID_SD1_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = DMA_ID_SD1_RX,
.end = DMA_ID_SD1_RX,
.flags = IORESOURCE_DMA,
}
};
static struct platform_device db1100_mmc1_dev = {
.name = "au1xxx-mmc",
.id = 1,
.dev = {
.dma_mask = &au1xxx_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1100_mmc_platdata[1],
},
.num_resources = ARRAY_SIZE(au1100_mmc1_res),
.resource = au1100_mmc1_res,
};
/******************************************************************************/
static struct ads7846_platform_data db1100_touch_pd = {
.model = 7846,
.vref_mv = 3300,
};
static struct spi_gpio_platform_data db1100_spictl_pd = {
.num_chipselect = 1,
};
static struct gpiod_lookup_table db1100_touch_gpio_table = {
.dev_id = "spi0.0",
.table = {
GPIO_LOOKUP("alchemy-gpio2", 21,
"pendown", GPIO_ACTIVE_LOW),
{ }
},
};
static struct spi_board_info db1100_spi_info[] __initdata = {
[0] = {
.modalias = "ads7846",
.max_speed_hz = 3250000,
.bus_num = 0,
.chip_select = 0,
.mode = 0,
.irq = AU1100_GPIO21_INT,
.platform_data = &db1100_touch_pd,
},
};
static struct platform_device db1100_spi_dev = {
.name = "spi_gpio",
.id = 0,
.dev = {
.platform_data = &db1100_spictl_pd,
.dma_mask = &au1xxx_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
/*
* Alchemy GPIO 2 has its base at 200 so the GPIO lines
* 207 thru 210 are GPIOs at offset 7 thru 10 at this chip.
*/
static struct gpiod_lookup_table db1100_spi_gpiod_table = {
.dev_id = "spi_gpio",
.table = {
GPIO_LOOKUP("alchemy-gpio2", 9,
"sck", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("alchemy-gpio2", 8,
"mosi", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("alchemy-gpio2", 7,
"miso", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("alchemy-gpio2", 10,
"cs", GPIO_ACTIVE_HIGH),
{ },
},
};
static struct platform_device *db1x00_devs[] = {
&db1x00_codec_dev,
&alchemy_ac97c_dma_dev,
&alchemy_ac97c_dev,
&db1x00_audio_dev,
};
static struct platform_device *db1100_devs[] = {
&au1100_lcd_device,
&db1100_mmc0_dev,
&db1100_mmc1_dev,
};
int __init db1000_dev_setup(void)
{
int board = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
int c0, c1, d0, d1, s0, s1, flashsize = 32, twosocks = 1;
unsigned long pfc;
struct clk *c, *p;
if (board == BCSR_WHOAMI_DB1500) {
c0 = AU1500_GPIO2_INT;
c1 = AU1500_GPIO5_INT;
d0 = 0; /* GPIO number, NOT irq! */
d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1500_GPIO1_INT;
s1 = AU1500_GPIO4_INT;
} else if (board == BCSR_WHOAMI_DB1100) {
c0 = AU1100_GPIO2_INT;
c1 = AU1100_GPIO5_INT;
d0 = 0; /* GPIO number, NOT irq! */
d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1100_GPIO1_INT;
s1 = AU1100_GPIO4_INT;
gpio_request(19, "sd0_cd");
gpio_request(20, "sd1_cd");
gpio_direction_input(19); /* sd0 cd# */
gpio_direction_input(20); /* sd1 cd# */
/* spi_gpio on SSI0 pins */
pfc = alchemy_rdsys(AU1000_SYS_PINFUNC);
pfc |= (1 << 0); /* SSI0 pins as GPIOs */
alchemy_wrsys(pfc, AU1000_SYS_PINFUNC);
gpiod_add_lookup_table(&db1100_touch_gpio_table);
spi_register_board_info(db1100_spi_info,
ARRAY_SIZE(db1100_spi_info));
/* link LCD clock to AUXPLL */
p = clk_get(NULL, "auxpll_clk");
c = clk_get(NULL, "lcd_intclk");
if (!IS_ERR(c) && !IS_ERR(p)) {
clk_set_parent(c, p);
clk_set_rate(c, clk_get_rate(p));
}
if (!IS_ERR(c))
clk_put(c);
if (!IS_ERR(p))
clk_put(p);
platform_add_devices(db1100_devs, ARRAY_SIZE(db1100_devs));
gpiod_add_lookup_table(&db1100_spi_gpiod_table);
platform_device_register(&db1100_spi_dev);
} else if (board == BCSR_WHOAMI_DB1000) {
c0 = AU1000_GPIO2_INT;
c1 = AU1000_GPIO5_INT;
d0 = 0; /* GPIO number, NOT irq! */
d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1000_GPIO1_INT;
s1 = AU1000_GPIO4_INT;
} else if ((board == BCSR_WHOAMI_PB1500) ||
(board == BCSR_WHOAMI_PB1500R2)) {
c0 = AU1500_GPIO203_INT;
d0 = 1; /* GPIO number, NOT irq! */
s0 = AU1500_GPIO202_INT;
twosocks = 0;
flashsize = 64;
/* RTC and daughtercard irqs */
irq_set_irq_type(AU1500_GPIO204_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO205_INT, IRQ_TYPE_LEVEL_LOW);
/* EPSON S1D13806 0x1b000000
* SRAM 1MB/2MB 0x1a000000
* DS1693 RTC 0x0c000000
*/
} else if (board == BCSR_WHOAMI_PB1100) {
c0 = AU1100_GPIO11_INT;
d0 = 9; /* GPIO number, NOT irq! */
s0 = AU1100_GPIO10_INT;
twosocks = 0;
flashsize = 64;
/* pendown, rtc, daughtercard irqs */
irq_set_irq_type(AU1100_GPIO8_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1100_GPIO12_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1100_GPIO13_INT, IRQ_TYPE_LEVEL_LOW);
/* EPSON S1D13806 0x1b000000
* SRAM 1MB/2MB 0x1a000000
* DiskOnChip 0x0d000000
* DS1693 RTC 0x0c000000
*/
platform_add_devices(db1100_devs, ARRAY_SIZE(db1100_devs));
} else
return 0; /* unknown board, no further dev setup to do */
irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
db1x_register_pcmcia_socket(
AU1000_PCMCIA_ATTR_PHYS_ADDR,
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_MEM_PHYS_ADDR,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
c0, d0, /*s0*/0, 0, 0);
if (twosocks) {
irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
db1x_register_pcmcia_socket(
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004400000 - 1,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
c1, d1, /*s1*/0, 0, 1);
}
platform_add_devices(db1x00_devs, ARRAY_SIZE(db1x00_devs));
db1x_register_norflash(flashsize << 20, 4 /* 32bit */, F_SWAPPED);
return 0;
}
| linux-master | arch/mips/alchemy/devboards/db1000.c |
// SPDX-License-Identifier: GPL-2.0
/*
* devoard misc stuff.
*/
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/physmap.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/bcsr.h>
#include <prom.h>
void prom_putchar(char c)
{
if (alchemy_get_cputype() == ALCHEMY_CPU_AU1300)
alchemy_uart_putchar(AU1300_UART2_PHYS_ADDR, c);
else
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
static struct platform_device db1x00_rtc_dev = {
.name = "rtc-au1xxx",
.id = -1,
};
static void db1x_power_off(void)
{
bcsr_write(BCSR_RESETS, 0);
bcsr_write(BCSR_SYSTEM, BCSR_SYSTEM_PWROFF | BCSR_SYSTEM_RESET);
while (1) /* sit and spin */
cpu_wait();
}
static void db1x_reset(char *c)
{
bcsr_write(BCSR_RESETS, 0);
bcsr_write(BCSR_SYSTEM, 0);
}
static int __init db1x_late_setup(void)
{
if (!pm_power_off)
pm_power_off = db1x_power_off;
if (!_machine_halt)
_machine_halt = db1x_power_off;
if (!_machine_restart)
_machine_restart = db1x_reset;
platform_device_register(&db1x00_rtc_dev);
return 0;
}
device_initcall(db1x_late_setup);
/* register a pcmcia socket */
int __init db1x_register_pcmcia_socket(phys_addr_t pcmcia_attr_start,
phys_addr_t pcmcia_attr_end,
phys_addr_t pcmcia_mem_start,
phys_addr_t pcmcia_mem_end,
phys_addr_t pcmcia_io_start,
phys_addr_t pcmcia_io_end,
int card_irq,
int cd_irq,
int stschg_irq,
int eject_irq,
int id)
{
int cnt, i, ret;
struct resource *sr;
struct platform_device *pd;
cnt = 5;
if (eject_irq)
cnt++;
if (stschg_irq)
cnt++;
sr = kcalloc(cnt, sizeof(struct resource), GFP_KERNEL);
if (!sr)
return -ENOMEM;
pd = platform_device_alloc("db1xxx_pcmcia", id);
if (!pd) {
ret = -ENOMEM;
goto out;
}
sr[0].name = "pcmcia-attr";
sr[0].flags = IORESOURCE_MEM;
sr[0].start = pcmcia_attr_start;
sr[0].end = pcmcia_attr_end;
sr[1].name = "pcmcia-mem";
sr[1].flags = IORESOURCE_MEM;
sr[1].start = pcmcia_mem_start;
sr[1].end = pcmcia_mem_end;
sr[2].name = "pcmcia-io";
sr[2].flags = IORESOURCE_MEM;
sr[2].start = pcmcia_io_start;
sr[2].end = pcmcia_io_end;
sr[3].name = "insert";
sr[3].flags = IORESOURCE_IRQ;
sr[3].start = sr[3].end = cd_irq;
sr[4].name = "card";
sr[4].flags = IORESOURCE_IRQ;
sr[4].start = sr[4].end = card_irq;
i = 5;
if (stschg_irq) {
sr[i].name = "stschg";
sr[i].flags = IORESOURCE_IRQ;
sr[i].start = sr[i].end = stschg_irq;
i++;
}
if (eject_irq) {
sr[i].name = "eject";
sr[i].flags = IORESOURCE_IRQ;
sr[i].start = sr[i].end = eject_irq;
}
pd->resource = sr;
pd->num_resources = cnt;
ret = platform_device_add(pd);
if (!ret)
return 0;
platform_device_put(pd);
out:
kfree(sr);
return ret;
}
#define YAMON_SIZE 0x00100000
#define YAMON_ENV_SIZE 0x00040000
int __init db1x_register_norflash(unsigned long size, int width,
int swapped)
{
struct physmap_flash_data *pfd;
struct platform_device *pd;
struct mtd_partition *parts;
struct resource *res;
int ret, i;
if (size < (8 * 1024 * 1024))
return -EINVAL;
ret = -ENOMEM;
parts = kcalloc(5, sizeof(struct mtd_partition), GFP_KERNEL);
if (!parts)
goto out;
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (!res)
goto out1;
pfd = kzalloc(sizeof(struct physmap_flash_data), GFP_KERNEL);
if (!pfd)
goto out2;
pd = platform_device_alloc("physmap-flash", 0);
if (!pd)
goto out3;
/* NOR flash ends at 0x20000000, regardless of size */
res->start = 0x20000000 - size;
res->end = 0x20000000 - 1;
res->flags = IORESOURCE_MEM;
/* partition setup. Most Develboards have a switch which allows
* to swap the physical locations of the 2 NOR flash banks.
*/
i = 0;
if (!swapped) {
/* first NOR chip */
parts[i].offset = 0;
parts[i].name = "User FS";
parts[i].size = size / 2;
i++;
}
parts[i].offset = MTDPART_OFS_APPEND;
parts[i].name = "User FS 2";
parts[i].size = (size / 2) - (0x20000000 - 0x1fc00000);
i++;
parts[i].offset = MTDPART_OFS_APPEND;
parts[i].name = "YAMON";
parts[i].size = YAMON_SIZE;
parts[i].mask_flags = MTD_WRITEABLE;
i++;
parts[i].offset = MTDPART_OFS_APPEND;
parts[i].name = "raw kernel";
parts[i].size = 0x00400000 - YAMON_SIZE - YAMON_ENV_SIZE;
i++;
parts[i].offset = MTDPART_OFS_APPEND;
parts[i].name = "YAMON Env";
parts[i].size = YAMON_ENV_SIZE;
parts[i].mask_flags = MTD_WRITEABLE;
i++;
if (swapped) {
parts[i].offset = MTDPART_OFS_APPEND;
parts[i].name = "User FS";
parts[i].size = size / 2;
i++;
}
pfd->width = width;
pfd->parts = parts;
pfd->nr_parts = 5;
pd->dev.platform_data = pfd;
pd->resource = res;
pd->num_resources = 1;
ret = platform_device_add(pd);
if (!ret)
return ret;
platform_device_put(pd);
out3:
kfree(pfd);
out2:
kfree(res);
out1:
kfree(parts);
out:
return ret;
}
| linux-master | arch/mips/alchemy/devboards/platform.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DBAu1200/PBAu1200 board platform device registration
*
* Copyright (C) 2008-2011 Manuel Lauss
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/leds.h>
#include <linux/mmc/host.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/platnand.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/smc91x.h>
#include <linux/ata_platform.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1100_mmc.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
#include <asm/mach-au1x00/au1200fb.h>
#include <asm/mach-au1x00/au1550_spi.h>
#include <asm/mach-db1x00/bcsr.h>
#include "platform.h"
#define BCSR_INT_IDE 0x0001
#define BCSR_INT_ETH 0x0002
#define BCSR_INT_PC0 0x0004
#define BCSR_INT_PC0STSCHG 0x0008
#define BCSR_INT_PC1 0x0010
#define BCSR_INT_PC1STSCHG 0x0020
#define BCSR_INT_DC 0x0040
#define BCSR_INT_FLASHBUSY 0x0080
#define BCSR_INT_PC0INSERT 0x0100
#define BCSR_INT_PC0EJECT 0x0200
#define BCSR_INT_PC1INSERT 0x0400
#define BCSR_INT_PC1EJECT 0x0800
#define BCSR_INT_SD0INSERT 0x1000
#define BCSR_INT_SD0EJECT 0x2000
#define BCSR_INT_SD1INSERT 0x4000
#define BCSR_INT_SD1EJECT 0x8000
#define DB1200_IDE_PHYS_ADDR 0x18800000
#define DB1200_IDE_REG_SHIFT 5
#define DB1200_IDE_PHYS_LEN (16 << DB1200_IDE_REG_SHIFT)
#define DB1200_ETH_PHYS_ADDR 0x19000300
#define DB1200_NAND_PHYS_ADDR 0x20000000
#define PB1200_IDE_PHYS_ADDR 0x0C800000
#define PB1200_ETH_PHYS_ADDR 0x0D000300
#define PB1200_NAND_PHYS_ADDR 0x1C000000
#define DB1200_INT_BEGIN (AU1000_MAX_INTR + 1)
#define DB1200_IDE_INT (DB1200_INT_BEGIN + 0)
#define DB1200_ETH_INT (DB1200_INT_BEGIN + 1)
#define DB1200_PC0_INT (DB1200_INT_BEGIN + 2)
#define DB1200_PC0_STSCHG_INT (DB1200_INT_BEGIN + 3)
#define DB1200_PC1_INT (DB1200_INT_BEGIN + 4)
#define DB1200_PC1_STSCHG_INT (DB1200_INT_BEGIN + 5)
#define DB1200_DC_INT (DB1200_INT_BEGIN + 6)
#define DB1200_FLASHBUSY_INT (DB1200_INT_BEGIN + 7)
#define DB1200_PC0_INSERT_INT (DB1200_INT_BEGIN + 8)
#define DB1200_PC0_EJECT_INT (DB1200_INT_BEGIN + 9)
#define DB1200_PC1_INSERT_INT (DB1200_INT_BEGIN + 10)
#define DB1200_PC1_EJECT_INT (DB1200_INT_BEGIN + 11)
#define DB1200_SD0_INSERT_INT (DB1200_INT_BEGIN + 12)
#define DB1200_SD0_EJECT_INT (DB1200_INT_BEGIN + 13)
#define PB1200_SD1_INSERT_INT (DB1200_INT_BEGIN + 14)
#define PB1200_SD1_EJECT_INT (DB1200_INT_BEGIN + 15)
#define DB1200_INT_END (DB1200_INT_BEGIN + 15)
const char *get_system_type(void);
static int __init db1200_detect_board(void)
{
int bid;
/* try the DB1200 first */
bcsr_init(DB1200_BCSR_PHYS_ADDR,
DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS);
if (BCSR_WHOAMI_DB1200 == BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
unsigned short t = bcsr_read(BCSR_HEXLEDS);
bcsr_write(BCSR_HEXLEDS, ~t);
if (bcsr_read(BCSR_HEXLEDS) != t) {
bcsr_write(BCSR_HEXLEDS, t);
return 0;
}
}
/* okay, try the PB1200 then */
bcsr_init(PB1200_BCSR_PHYS_ADDR,
PB1200_BCSR_PHYS_ADDR + PB1200_BCSR_HEXLED_OFS);
bid = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
if ((bid == BCSR_WHOAMI_PB1200_DDR1) ||
(bid == BCSR_WHOAMI_PB1200_DDR2)) {
unsigned short t = bcsr_read(BCSR_HEXLEDS);
bcsr_write(BCSR_HEXLEDS, ~t);
if (bcsr_read(BCSR_HEXLEDS) != t) {
bcsr_write(BCSR_HEXLEDS, t);
return 0;
}
}
return 1; /* it's neither */
}
int __init db1200_board_setup(void)
{
unsigned short whoami;
if (db1200_detect_board())
return -ENODEV;
whoami = bcsr_read(BCSR_WHOAMI);
switch (BCSR_WHOAMI_BOARD(whoami)) {
case BCSR_WHOAMI_PB1200_DDR1:
case BCSR_WHOAMI_PB1200_DDR2:
case BCSR_WHOAMI_DB1200:
break;
default:
return -ENODEV;
}
printk(KERN_INFO "Alchemy/AMD/RMI %s Board, CPLD Rev %d"
" Board-ID %d Daughtercard ID %d\n", get_system_type(),
(whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
return 0;
}
/******************************************************************************/
static u64 au1200_all_dmamask = DMA_BIT_MASK(32);
static struct mtd_partition db1200_spiflash_parts[] = {
{
.name = "spi_flash",
.offset = 0,
.size = MTDPART_SIZ_FULL,
},
};
static struct flash_platform_data db1200_spiflash_data = {
.name = "s25fl001",
.parts = db1200_spiflash_parts,
.nr_parts = ARRAY_SIZE(db1200_spiflash_parts),
.type = "m25p10",
};
static struct spi_board_info db1200_spi_devs[] __initdata = {
{
/* TI TMP121AIDBVR temp sensor */
.modalias = "tmp121",
.max_speed_hz = 2000000,
.bus_num = 0,
.chip_select = 0,
.mode = 0,
},
{
/* Spansion S25FL001D0FMA SPI flash */
.modalias = "m25p80",
.max_speed_hz = 50000000,
.bus_num = 0,
.chip_select = 1,
.mode = 0,
.platform_data = &db1200_spiflash_data,
},
};
static struct i2c_board_info db1200_i2c_devs[] __initdata = {
{ I2C_BOARD_INFO("24c04", 0x52), }, /* AT24C04-10 I2C eeprom */
{ I2C_BOARD_INFO("ne1619", 0x2d), }, /* adm1025-compat hwmon */
{ I2C_BOARD_INFO("wm8731", 0x1b), }, /* I2S audio codec WM8731 */
};
/**********************************************************************/
static void au1200_nand_cmd_ctrl(struct nand_chip *this, int cmd,
unsigned int ctrl)
{
unsigned long ioaddr = (unsigned long)this->legacy.IO_ADDR_W;
ioaddr &= 0xffffff00;
if (ctrl & NAND_CLE) {
ioaddr += MEM_STNAND_CMD;
} else if (ctrl & NAND_ALE) {
ioaddr += MEM_STNAND_ADDR;
} else {
/* assume we want to r/w real data by default */
ioaddr += MEM_STNAND_DATA;
}
this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = (void __iomem *)ioaddr;
if (cmd != NAND_CMD_NONE) {
__raw_writeb(cmd, this->legacy.IO_ADDR_W);
wmb();
}
}
static int au1200_nand_device_ready(struct nand_chip *this)
{
return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1;
}
static struct mtd_partition db1200_nand_parts[] = {
{
.name = "NAND FS 0",
.offset = 0,
.size = 8 * 1024 * 1024,
},
{
.name = "NAND FS 1",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL
},
};
struct platform_nand_data db1200_nand_platdata = {
.chip = {
.nr_chips = 1,
.chip_offset = 0,
.nr_partitions = ARRAY_SIZE(db1200_nand_parts),
.partitions = db1200_nand_parts,
.chip_delay = 20,
},
.ctrl = {
.dev_ready = au1200_nand_device_ready,
.cmd_ctrl = au1200_nand_cmd_ctrl,
},
};
static struct resource db1200_nand_res[] = {
[0] = {
.start = DB1200_NAND_PHYS_ADDR,
.end = DB1200_NAND_PHYS_ADDR + 0xff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device db1200_nand_dev = {
.name = "gen_nand",
.num_resources = ARRAY_SIZE(db1200_nand_res),
.resource = db1200_nand_res,
.id = -1,
.dev = {
.platform_data = &db1200_nand_platdata,
}
};
/**********************************************************************/
static struct smc91x_platdata db1200_eth_data = {
.flags = SMC91X_NOWAIT | SMC91X_USE_16BIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource db1200_eth_res[] = {
[0] = {
.start = DB1200_ETH_PHYS_ADDR,
.end = DB1200_ETH_PHYS_ADDR + 0xf,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DB1200_ETH_INT,
.end = DB1200_ETH_INT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device db1200_eth_dev = {
.dev = {
.platform_data = &db1200_eth_data,
},
.name = "smc91x",
.id = -1,
.num_resources = ARRAY_SIZE(db1200_eth_res),
.resource = db1200_eth_res,
};
/**********************************************************************/
static struct pata_platform_info db1200_ide_info = {
.ioport_shift = DB1200_IDE_REG_SHIFT,
};
#define IDE_ALT_START (14 << DB1200_IDE_REG_SHIFT)
static struct resource db1200_ide_res[] = {
[0] = {
.start = DB1200_IDE_PHYS_ADDR,
.end = DB1200_IDE_PHYS_ADDR + IDE_ALT_START - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DB1200_IDE_PHYS_ADDR + IDE_ALT_START,
.end = DB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = DB1200_IDE_INT,
.end = DB1200_IDE_INT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device db1200_ide_dev = {
.name = "pata_platform",
.id = 0,
.dev = {
.dma_mask = &au1200_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1200_ide_info,
},
.num_resources = ARRAY_SIZE(db1200_ide_res),
.resource = db1200_ide_res,
};
/**********************************************************************/
/* SD carddetects: they're supposed to be edge-triggered, but ack
* doesn't seem to work (CPLD Rev 2). Instead, the screaming one
* is disabled and its counterpart enabled. The 200ms timeout is
* because the carddetect usually triggers twice, after debounce.
*/
static irqreturn_t db1200_mmc_cd(int irq, void *ptr)
{
disable_irq_nosync(irq);
return IRQ_WAKE_THREAD;
}
static irqreturn_t db1200_mmc_cdfn(int irq, void *ptr)
{
mmc_detect_change(ptr, msecs_to_jiffies(200));
msleep(100); /* debounce */
if (irq == DB1200_SD0_INSERT_INT)
enable_irq(DB1200_SD0_EJECT_INT);
else
enable_irq(DB1200_SD0_INSERT_INT);
return IRQ_HANDLED;
}
static int db1200_mmc_cd_setup(void *mmc_host, int en)
{
int ret;
if (en) {
ret = request_threaded_irq(DB1200_SD0_INSERT_INT, db1200_mmc_cd,
db1200_mmc_cdfn, 0, "sd_insert", mmc_host);
if (ret)
goto out;
ret = request_threaded_irq(DB1200_SD0_EJECT_INT, db1200_mmc_cd,
db1200_mmc_cdfn, 0, "sd_eject", mmc_host);
if (ret) {
free_irq(DB1200_SD0_INSERT_INT, mmc_host);
goto out;
}
if (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD0INSERT)
enable_irq(DB1200_SD0_EJECT_INT);
else
enable_irq(DB1200_SD0_INSERT_INT);
} else {
free_irq(DB1200_SD0_INSERT_INT, mmc_host);
free_irq(DB1200_SD0_EJECT_INT, mmc_host);
}
ret = 0;
out:
return ret;
}
static void db1200_mmc_set_power(void *mmc_host, int state)
{
if (state) {
bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_SD0PWR);
msleep(400); /* stabilization time */
} else
bcsr_mod(BCSR_BOARD, BCSR_BOARD_SD0PWR, 0);
}
static int db1200_mmc_card_readonly(void *mmc_host)
{
return (bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD0WP) ? 1 : 0;
}
static int db1200_mmc_card_inserted(void *mmc_host)
{
return (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD0INSERT) ? 1 : 0;
}
static void db1200_mmcled_set(struct led_classdev *led,
enum led_brightness brightness)
{
if (brightness != LED_OFF)
bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED0, 0);
else
bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED0);
}
static struct led_classdev db1200_mmc_led = {
.brightness_set = db1200_mmcled_set,
};
/* -- */
static irqreturn_t pb1200_mmc1_cd(int irq, void *ptr)
{
disable_irq_nosync(irq);
return IRQ_WAKE_THREAD;
}
static irqreturn_t pb1200_mmc1_cdfn(int irq, void *ptr)
{
mmc_detect_change(ptr, msecs_to_jiffies(200));
msleep(100); /* debounce */
if (irq == PB1200_SD1_INSERT_INT)
enable_irq(PB1200_SD1_EJECT_INT);
else
enable_irq(PB1200_SD1_INSERT_INT);
return IRQ_HANDLED;
}
static int pb1200_mmc1_cd_setup(void *mmc_host, int en)
{
int ret;
if (en) {
ret = request_threaded_irq(PB1200_SD1_INSERT_INT, pb1200_mmc1_cd,
pb1200_mmc1_cdfn, 0, "sd1_insert", mmc_host);
if (ret)
goto out;
ret = request_threaded_irq(PB1200_SD1_EJECT_INT, pb1200_mmc1_cd,
pb1200_mmc1_cdfn, 0, "sd1_eject", mmc_host);
if (ret) {
free_irq(PB1200_SD1_INSERT_INT, mmc_host);
goto out;
}
if (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD1INSERT)
enable_irq(PB1200_SD1_EJECT_INT);
else
enable_irq(PB1200_SD1_INSERT_INT);
} else {
free_irq(PB1200_SD1_INSERT_INT, mmc_host);
free_irq(PB1200_SD1_EJECT_INT, mmc_host);
}
ret = 0;
out:
return ret;
}
static void pb1200_mmc1led_set(struct led_classdev *led,
enum led_brightness brightness)
{
if (brightness != LED_OFF)
bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED1, 0);
else
bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED1);
}
static struct led_classdev pb1200_mmc1_led = {
.brightness_set = pb1200_mmc1led_set,
};
static void pb1200_mmc1_set_power(void *mmc_host, int state)
{
if (state) {
bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_SD1PWR);
msleep(400); /* stabilization time */
} else
bcsr_mod(BCSR_BOARD, BCSR_BOARD_SD1PWR, 0);
}
static int pb1200_mmc1_card_readonly(void *mmc_host)
{
return (bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD1WP) ? 1 : 0;
}
static int pb1200_mmc1_card_inserted(void *mmc_host)
{
return (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD1INSERT) ? 1 : 0;
}
static struct au1xmmc_platform_data db1200_mmc_platdata[2] = {
[0] = {
.cd_setup = db1200_mmc_cd_setup,
.set_power = db1200_mmc_set_power,
.card_inserted = db1200_mmc_card_inserted,
.card_readonly = db1200_mmc_card_readonly,
.led = &db1200_mmc_led,
},
[1] = {
.cd_setup = pb1200_mmc1_cd_setup,
.set_power = pb1200_mmc1_set_power,
.card_inserted = pb1200_mmc1_card_inserted,
.card_readonly = pb1200_mmc1_card_readonly,
.led = &pb1200_mmc1_led,
},
};
static struct resource au1200_mmc0_resources[] = {
[0] = {
.start = AU1100_SD0_PHYS_ADDR,
.end = AU1100_SD0_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1200_SD_INT,
.end = AU1200_SD_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1200_DSCR_CMD0_SDMS_TX0,
.end = AU1200_DSCR_CMD0_SDMS_TX0,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1200_DSCR_CMD0_SDMS_RX0,
.end = AU1200_DSCR_CMD0_SDMS_RX0,
.flags = IORESOURCE_DMA,
}
};
static struct platform_device db1200_mmc0_dev = {
.name = "au1xxx-mmc",
.id = 0,
.dev = {
.dma_mask = &au1200_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1200_mmc_platdata[0],
},
.num_resources = ARRAY_SIZE(au1200_mmc0_resources),
.resource = au1200_mmc0_resources,
};
static struct resource au1200_mmc1_res[] = {
[0] = {
.start = AU1100_SD1_PHYS_ADDR,
.end = AU1100_SD1_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1200_SD_INT,
.end = AU1200_SD_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1200_DSCR_CMD0_SDMS_TX1,
.end = AU1200_DSCR_CMD0_SDMS_TX1,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1200_DSCR_CMD0_SDMS_RX1,
.end = AU1200_DSCR_CMD0_SDMS_RX1,
.flags = IORESOURCE_DMA,
}
};
static struct platform_device pb1200_mmc1_dev = {
.name = "au1xxx-mmc",
.id = 1,
.dev = {
.dma_mask = &au1200_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1200_mmc_platdata[1],
},
.num_resources = ARRAY_SIZE(au1200_mmc1_res),
.resource = au1200_mmc1_res,
};
/**********************************************************************/
static int db1200fb_panel_index(void)
{
return (bcsr_read(BCSR_SWITCHES) >> 8) & 0x0f;
}
static int db1200fb_panel_init(void)
{
/* Apply power */
bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD |
BCSR_BOARD_LCDBL);
return 0;
}
static int db1200fb_panel_shutdown(void)
{
/* Remove power */
bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD |
BCSR_BOARD_LCDBL, 0);
return 0;
}
static struct au1200fb_platdata db1200fb_pd = {
.panel_index = db1200fb_panel_index,
.panel_init = db1200fb_panel_init,
.panel_shutdown = db1200fb_panel_shutdown,
};
static struct resource au1200_lcd_res[] = {
[0] = {
.start = AU1200_LCD_PHYS_ADDR,
.end = AU1200_LCD_PHYS_ADDR + 0x800 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1200_LCD_INT,
.end = AU1200_LCD_INT,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device au1200_lcd_dev = {
.name = "au1200-lcd",
.id = 0,
.dev = {
.dma_mask = &au1200_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1200fb_pd,
},
.num_resources = ARRAY_SIZE(au1200_lcd_res),
.resource = au1200_lcd_res,
};
/**********************************************************************/
static struct resource au1200_psc0_res[] = {
[0] = {
.start = AU1550_PSC0_PHYS_ADDR,
.end = AU1550_PSC0_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1200_PSC0_INT,
.end = AU1200_PSC0_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1200_DSCR_CMD0_PSC0_TX,
.end = AU1200_DSCR_CMD0_PSC0_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1200_DSCR_CMD0_PSC0_RX,
.end = AU1200_DSCR_CMD0_PSC0_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1200_i2c_dev = {
.name = "au1xpsc_smbus",
.id = 0, /* bus number */
.num_resources = ARRAY_SIZE(au1200_psc0_res),
.resource = au1200_psc0_res,
};
static void db1200_spi_cs_en(struct au1550_spi_info *spi, int cs, int pol)
{
if (cs)
bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_SPISEL);
else
bcsr_mod(BCSR_RESETS, BCSR_RESETS_SPISEL, 0);
}
static struct au1550_spi_info db1200_spi_platdata = {
.mainclk_hz = 50000000, /* PSC0 clock */
.num_chipselect = 2,
.activate_cs = db1200_spi_cs_en,
};
static struct platform_device db1200_spi_dev = {
.dev = {
.dma_mask = &au1200_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1200_spi_platdata,
},
.name = "au1550-spi",
.id = 0, /* bus number */
.num_resources = ARRAY_SIZE(au1200_psc0_res),
.resource = au1200_psc0_res,
};
static struct resource au1200_psc1_res[] = {
[0] = {
.start = AU1550_PSC1_PHYS_ADDR,
.end = AU1550_PSC1_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1200_PSC1_INT,
.end = AU1200_PSC1_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1200_DSCR_CMD0_PSC1_TX,
.end = AU1200_DSCR_CMD0_PSC1_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1200_DSCR_CMD0_PSC1_RX,
.end = AU1200_DSCR_CMD0_PSC1_RX,
.flags = IORESOURCE_DMA,
},
};
/* AC97 or I2S device */
static struct platform_device db1200_audio_dev = {
/* name assigned later based on switch setting */
.id = 1, /* PSC ID */
.num_resources = ARRAY_SIZE(au1200_psc1_res),
.resource = au1200_psc1_res,
};
/* DB1200 ASoC card device */
static struct platform_device db1200_sound_dev = {
/* name assigned later based on switch setting */
.id = 1, /* PSC ID */
.dev = {
.dma_mask = &au1200_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
static struct platform_device db1200_stac_dev = {
.name = "ac97-codec",
.id = 1, /* on PSC1 */
};
static struct platform_device db1200_audiodma_dev = {
.name = "au1xpsc-pcm",
.id = 1, /* PSC ID */
};
static struct platform_device *db1200_devs[] __initdata = {
NULL, /* PSC0, selected by S6.8 */
&db1200_ide_dev,
&db1200_mmc0_dev,
&au1200_lcd_dev,
&db1200_eth_dev,
&db1200_nand_dev,
&db1200_audiodma_dev,
&db1200_audio_dev,
&db1200_stac_dev,
&db1200_sound_dev,
};
static struct platform_device *pb1200_devs[] __initdata = {
&pb1200_mmc1_dev,
};
/* Some peripheral base addresses differ on the PB1200 */
static int __init pb1200_res_fixup(void)
{
/* CPLD Revs earlier than 4 cause problems */
if (BCSR_WHOAMI_CPLD(bcsr_read(BCSR_WHOAMI)) <= 3) {
printk(KERN_ERR "WARNING!!!\n");
printk(KERN_ERR "WARNING!!!\n");
printk(KERN_ERR "PB1200 must be at CPLD rev 4. Please have\n");
printk(KERN_ERR "the board updated to latest revisions.\n");
printk(KERN_ERR "This software will not work reliably\n");
printk(KERN_ERR "on anything older than CPLD rev 4.!\n");
printk(KERN_ERR "WARNING!!!\n");
printk(KERN_ERR "WARNING!!!\n");
return 1;
}
db1200_nand_res[0].start = PB1200_NAND_PHYS_ADDR;
db1200_nand_res[0].end = PB1200_NAND_PHYS_ADDR + 0xff;
db1200_ide_res[0].start = PB1200_IDE_PHYS_ADDR;
db1200_ide_res[0].end = PB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1;
db1200_eth_res[0].start = PB1200_ETH_PHYS_ADDR;
db1200_eth_res[0].end = PB1200_ETH_PHYS_ADDR + 0xff;
return 0;
}
int __init db1200_dev_setup(void)
{
unsigned long pfc;
unsigned short sw;
int swapped, bid;
struct clk *c;
bid = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
if ((bid == BCSR_WHOAMI_PB1200_DDR1) ||
(bid == BCSR_WHOAMI_PB1200_DDR2)) {
if (pb1200_res_fixup())
return -ENODEV;
}
/* GPIO7 is low-level triggered CPLD cascade */
irq_set_irq_type(AU1200_GPIO7_INT, IRQ_TYPE_LEVEL_LOW);
bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT);
/* SMBus/SPI on PSC0, Audio on PSC1 */
pfc = alchemy_rdsys(AU1000_SYS_PINFUNC);
pfc &= ~(SYS_PINFUNC_P0A | SYS_PINFUNC_P0B);
pfc &= ~(SYS_PINFUNC_P1A | SYS_PINFUNC_P1B | SYS_PINFUNC_FS3);
pfc |= SYS_PINFUNC_P1C; /* SPI is configured later */
alchemy_wrsys(pfc, AU1000_SYS_PINFUNC);
/* get 50MHz for I2C driver on PSC0 */
c = clk_get(NULL, "psc0_intclk");
if (!IS_ERR(c)) {
pfc = clk_round_rate(c, 50000000);
if ((pfc < 1) || (abs(50000000 - pfc) > 2500000))
pr_warn("DB1200: can't get I2C close to 50MHz\n");
else
clk_set_rate(c, pfc);
clk_prepare_enable(c);
clk_put(c);
}
/* insert/eject pairs: one of both is always screaming. To avoid
* issues they must not be automatically enabled when initially
* requested.
*/
irq_set_status_flags(DB1200_SD0_INSERT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1200_SD0_EJECT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1200_PC0_INSERT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1200_PC0_EJECT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1200_PC1_INSERT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1200_PC1_EJECT_INT, IRQ_NOAUTOEN);
i2c_register_board_info(0, db1200_i2c_devs,
ARRAY_SIZE(db1200_i2c_devs));
spi_register_board_info(db1200_spi_devs,
ARRAY_SIZE(db1200_i2c_devs));
/* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
* S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
* or S12 on the PB1200.
*/
/* NOTE: GPIO215 controls OTG VBUS supply. In SPI mode however
* this pin is claimed by PSC0 (unused though, but pinmux doesn't
* allow to free it without crippling the SPI interface).
* As a result, in SPI mode, OTG simply won't work (PSC0 uses
* it as an input pin which is pulled high on the boards).
*/
pfc = alchemy_rdsys(AU1000_SYS_PINFUNC) & ~SYS_PINFUNC_P0A;
/* switch off OTG VBUS supply */
gpio_request(215, "otg-vbus");
gpio_direction_output(215, 1);
printk(KERN_INFO "%s device configuration:\n", get_system_type());
sw = bcsr_read(BCSR_SWITCHES);
if (sw & BCSR_SWITCHES_DIP_8) {
db1200_devs[0] = &db1200_i2c_dev;
bcsr_mod(BCSR_RESETS, BCSR_RESETS_PSC0MUX, 0);
pfc |= (2 << 17); /* GPIO2 block owns GPIO215 */
printk(KERN_INFO " S6.8 OFF: PSC0 mode I2C\n");
printk(KERN_INFO " OTG port VBUS supply available!\n");
} else {
db1200_devs[0] = &db1200_spi_dev;
bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_PSC0MUX);
pfc |= (1 << 17); /* PSC0 owns GPIO215 */
printk(KERN_INFO " S6.8 ON : PSC0 mode SPI\n");
printk(KERN_INFO " OTG port VBUS supply disabled\n");
}
alchemy_wrsys(pfc, AU1000_SYS_PINFUNC);
/* Audio: DIP7 selects I2S(0)/AC97(1), but need I2C for I2S!
* so: DIP7=1 || DIP8=0 => AC97, DIP7=0 && DIP8=1 => I2S
*/
sw &= BCSR_SWITCHES_DIP_8 | BCSR_SWITCHES_DIP_7;
if (sw == BCSR_SWITCHES_DIP_8) {
bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_PSC1MUX);
db1200_audio_dev.name = "au1xpsc_i2s";
db1200_sound_dev.name = "db1200-i2s";
printk(KERN_INFO " S6.7 ON : PSC1 mode I2S\n");
} else {
bcsr_mod(BCSR_RESETS, BCSR_RESETS_PSC1MUX, 0);
db1200_audio_dev.name = "au1xpsc_ac97";
db1200_sound_dev.name = "db1200-ac97";
printk(KERN_INFO " S6.7 OFF: PSC1 mode AC97\n");
}
/* Audio PSC clock is supplied externally. (FIXME: platdata!!) */
__raw_writel(PSC_SEL_CLK_SERCLK,
(void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
db1x_register_pcmcia_socket(
AU1000_PCMCIA_ATTR_PHYS_ADDR,
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_MEM_PHYS_ADDR,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
DB1200_PC0_INT, DB1200_PC0_INSERT_INT,
/*DB1200_PC0_STSCHG_INT*/0, DB1200_PC0_EJECT_INT, 0);
db1x_register_pcmcia_socket(
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004400000 - 1,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
DB1200_PC1_INT, DB1200_PC1_INSERT_INT,
/*DB1200_PC1_STSCHG_INT*/0, DB1200_PC1_EJECT_INT, 1);
swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1200_SWAPBOOT;
db1x_register_norflash(64 << 20, 2, swapped);
platform_add_devices(db1200_devs, ARRAY_SIZE(db1200_devs));
/* PB1200 is a DB1200 with a 2nd MMC and Camera connector */
if ((bid == BCSR_WHOAMI_PB1200_DDR1) ||
(bid == BCSR_WHOAMI_PB1200_DDR2))
platform_add_devices(pb1200_devs, ARRAY_SIZE(pb1200_devs));
return 0;
}
| linux-master | arch/mips/alchemy/devboards/db1200.c |
// SPDX-License-Identifier: GPL-2.0
/*
* DBAu1300 init and platform device setup.
*
* (c) 2009 Manuel Lauss <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/init.h>
#include <linux/input.h> /* KEY_* codes */
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/leds.h>
#include <linux/interrupt.h>
#include <linux/ata_platform.h>
#include <linux/mmc/host.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/platnand.h>
#include <linux/platform_device.h>
#include <linux/smsc911x.h>
#include <linux/wm97xx.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1300.h>
#include <asm/mach-au1x00/au1100_mmc.h>
#include <asm/mach-au1x00/au1200fb.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
#include <asm/mach-db1x00/bcsr.h>
#include <asm/mach-au1x00/prom.h>
#include "platform.h"
/* FPGA (external mux) interrupt sources */
#define DB1300_FIRST_INT (ALCHEMY_GPIC_INT_LAST + 1)
#define DB1300_IDE_INT (DB1300_FIRST_INT + 0)
#define DB1300_ETH_INT (DB1300_FIRST_INT + 1)
#define DB1300_CF_INT (DB1300_FIRST_INT + 2)
#define DB1300_VIDEO_INT (DB1300_FIRST_INT + 4)
#define DB1300_HDMI_INT (DB1300_FIRST_INT + 5)
#define DB1300_DC_INT (DB1300_FIRST_INT + 6)
#define DB1300_FLASH_INT (DB1300_FIRST_INT + 7)
#define DB1300_CF_INSERT_INT (DB1300_FIRST_INT + 8)
#define DB1300_CF_EJECT_INT (DB1300_FIRST_INT + 9)
#define DB1300_AC97_INT (DB1300_FIRST_INT + 10)
#define DB1300_AC97_PEN_INT (DB1300_FIRST_INT + 11)
#define DB1300_SD1_INSERT_INT (DB1300_FIRST_INT + 12)
#define DB1300_SD1_EJECT_INT (DB1300_FIRST_INT + 13)
#define DB1300_OTG_VBUS_OC_INT (DB1300_FIRST_INT + 14)
#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
#define DB1300_LAST_INT (DB1300_FIRST_INT + 15)
/* SMSC9210 CS */
#define DB1300_ETH_PHYS_ADDR 0x19000000
#define DB1300_ETH_PHYS_END 0x197fffff
/* ATA CS */
#define DB1300_IDE_PHYS_ADDR 0x18800000
#define DB1300_IDE_REG_SHIFT 5
#define DB1300_IDE_PHYS_LEN (16 << DB1300_IDE_REG_SHIFT)
/* NAND CS */
#define DB1300_NAND_PHYS_ADDR 0x20000000
#define DB1300_NAND_PHYS_END 0x20000fff
static struct i2c_board_info db1300_i2c_devs[] __initdata = {
{ I2C_BOARD_INFO("wm8731", 0x1b), }, /* I2S audio codec */
{ I2C_BOARD_INFO("ne1619", 0x2d), }, /* adm1025-compat hwmon */
};
/* multifunction pins to assign to GPIO controller */
static int db1300_gpio_pins[] __initdata = {
AU1300_PIN_LCDPWM0, AU1300_PIN_PSC2SYNC1, AU1300_PIN_WAKE1,
AU1300_PIN_WAKE2, AU1300_PIN_WAKE3, AU1300_PIN_FG3AUX,
AU1300_PIN_EXTCLK1,
-1, /* terminator */
};
/* multifunction pins to assign to device functions */
static int db1300_dev_pins[] __initdata = {
/* wake-from-str pins 0-3 */
AU1300_PIN_WAKE0,
/* external clock sources for PSC0 */
AU1300_PIN_EXTCLK0,
/* 8bit MMC interface on SD0: 6-9 */
AU1300_PIN_SD0DAT4, AU1300_PIN_SD0DAT5, AU1300_PIN_SD0DAT6,
AU1300_PIN_SD0DAT7,
/* UART1 pins: 11-18 */
AU1300_PIN_U1RI, AU1300_PIN_U1DCD, AU1300_PIN_U1DSR,
AU1300_PIN_U1CTS, AU1300_PIN_U1RTS, AU1300_PIN_U1DTR,
AU1300_PIN_U1RX, AU1300_PIN_U1TX,
/* UART0 pins: 19-24 */
AU1300_PIN_U0RI, AU1300_PIN_U0DCD, AU1300_PIN_U0DSR,
AU1300_PIN_U0CTS, AU1300_PIN_U0RTS, AU1300_PIN_U0DTR,
/* UART2: 25-26 */
AU1300_PIN_U2RX, AU1300_PIN_U2TX,
/* UART3: 27-28 */
AU1300_PIN_U3RX, AU1300_PIN_U3TX,
/* LCD controller PWMs, ext pixclock: 30-31 */
AU1300_PIN_LCDPWM1, AU1300_PIN_LCDCLKIN,
/* SD1 interface: 32-37 */
AU1300_PIN_SD1DAT0, AU1300_PIN_SD1DAT1, AU1300_PIN_SD1DAT2,
AU1300_PIN_SD1DAT3, AU1300_PIN_SD1CMD, AU1300_PIN_SD1CLK,
/* SD2 interface: 38-43 */
AU1300_PIN_SD2DAT0, AU1300_PIN_SD2DAT1, AU1300_PIN_SD2DAT2,
AU1300_PIN_SD2DAT3, AU1300_PIN_SD2CMD, AU1300_PIN_SD2CLK,
/* PSC0/1 clocks: 44-45 */
AU1300_PIN_PSC0CLK, AU1300_PIN_PSC1CLK,
/* PSCs: 46-49/50-53/54-57/58-61 */
AU1300_PIN_PSC0SYNC0, AU1300_PIN_PSC0SYNC1, AU1300_PIN_PSC0D0,
AU1300_PIN_PSC0D1,
AU1300_PIN_PSC1SYNC0, AU1300_PIN_PSC1SYNC1, AU1300_PIN_PSC1D0,
AU1300_PIN_PSC1D1,
AU1300_PIN_PSC2SYNC0, AU1300_PIN_PSC2D0,
AU1300_PIN_PSC2D1,
AU1300_PIN_PSC3SYNC0, AU1300_PIN_PSC3SYNC1, AU1300_PIN_PSC3D0,
AU1300_PIN_PSC3D1,
/* PCMCIA interface: 62-70 */
AU1300_PIN_PCE2, AU1300_PIN_PCE1, AU1300_PIN_PIOS16,
AU1300_PIN_PIOR, AU1300_PIN_PWE, AU1300_PIN_PWAIT,
AU1300_PIN_PREG, AU1300_PIN_POE, AU1300_PIN_PIOW,
/* camera interface H/V sync inputs: 71-72 */
AU1300_PIN_CIMLS, AU1300_PIN_CIMFS,
/* PSC2/3 clocks: 73-74 */
AU1300_PIN_PSC2CLK, AU1300_PIN_PSC3CLK,
-1, /* terminator */
};
static void __init db1300_gpio_config(void)
{
int *i;
i = &db1300_dev_pins[0];
while (*i != -1)
au1300_pinfunc_to_dev(*i++);
i = &db1300_gpio_pins[0];
while (*i != -1)
au1300_gpio_direction_input(*i++);/* implies pin_to_gpio */
au1300_set_dbdma_gpio(1, AU1300_PIN_FG3AUX);
}
/**********************************************************************/
static u64 au1300_all_dmamask = DMA_BIT_MASK(32);
static void au1300_nand_cmd_ctrl(struct nand_chip *this, int cmd,
unsigned int ctrl)
{
unsigned long ioaddr = (unsigned long)this->legacy.IO_ADDR_W;
ioaddr &= 0xffffff00;
if (ctrl & NAND_CLE) {
ioaddr += MEM_STNAND_CMD;
} else if (ctrl & NAND_ALE) {
ioaddr += MEM_STNAND_ADDR;
} else {
/* assume we want to r/w real data by default */
ioaddr += MEM_STNAND_DATA;
}
this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = (void __iomem *)ioaddr;
if (cmd != NAND_CMD_NONE) {
__raw_writeb(cmd, this->legacy.IO_ADDR_W);
wmb();
}
}
static int au1300_nand_device_ready(struct nand_chip *this)
{
return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1;
}
static struct mtd_partition db1300_nand_parts[] = {
{
.name = "NAND FS 0",
.offset = 0,
.size = 8 * 1024 * 1024,
},
{
.name = "NAND FS 1",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL
},
};
struct platform_nand_data db1300_nand_platdata = {
.chip = {
.nr_chips = 1,
.chip_offset = 0,
.nr_partitions = ARRAY_SIZE(db1300_nand_parts),
.partitions = db1300_nand_parts,
.chip_delay = 20,
},
.ctrl = {
.dev_ready = au1300_nand_device_ready,
.cmd_ctrl = au1300_nand_cmd_ctrl,
},
};
static struct resource db1300_nand_res[] = {
[0] = {
.start = DB1300_NAND_PHYS_ADDR,
.end = DB1300_NAND_PHYS_ADDR + 0xff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device db1300_nand_dev = {
.name = "gen_nand",
.num_resources = ARRAY_SIZE(db1300_nand_res),
.resource = db1300_nand_res,
.id = -1,
.dev = {
.platform_data = &db1300_nand_platdata,
}
};
/**********************************************************************/
static struct resource db1300_eth_res[] = {
[0] = {
.start = DB1300_ETH_PHYS_ADDR,
.end = DB1300_ETH_PHYS_END,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DB1300_ETH_INT,
.end = DB1300_ETH_INT,
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config db1300_eth_config = {
.phy_interface = PHY_INTERFACE_MODE_MII,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
.flags = SMSC911X_USE_32BIT,
};
static struct platform_device db1300_eth_dev = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(db1300_eth_res),
.resource = db1300_eth_res,
.dev = {
.platform_data = &db1300_eth_config,
},
};
/**********************************************************************/
static struct resource au1300_psc1_res[] = {
[0] = {
.start = AU1300_PSC1_PHYS_ADDR,
.end = AU1300_PSC1_PHYS_ADDR + 0x0fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_PSC1_INT,
.end = AU1300_PSC1_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_PSC1_TX,
.end = AU1300_DSCR_CMD0_PSC1_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_PSC1_RX,
.end = AU1300_DSCR_CMD0_PSC1_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_ac97_dev = {
.name = "au1xpsc_ac97",
.id = 1, /* PSC ID. match with AC97 codec ID! */
.num_resources = ARRAY_SIZE(au1300_psc1_res),
.resource = au1300_psc1_res,
};
/**********************************************************************/
static struct resource au1300_psc2_res[] = {
[0] = {
.start = AU1300_PSC2_PHYS_ADDR,
.end = AU1300_PSC2_PHYS_ADDR + 0x0fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_PSC2_INT,
.end = AU1300_PSC2_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_PSC2_TX,
.end = AU1300_DSCR_CMD0_PSC2_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_PSC2_RX,
.end = AU1300_DSCR_CMD0_PSC2_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_i2s_dev = {
.name = "au1xpsc_i2s",
.id = 2, /* PSC ID */
.num_resources = ARRAY_SIZE(au1300_psc2_res),
.resource = au1300_psc2_res,
};
/**********************************************************************/
static struct resource au1300_psc3_res[] = {
[0] = {
.start = AU1300_PSC3_PHYS_ADDR,
.end = AU1300_PSC3_PHYS_ADDR + 0x0fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_PSC3_INT,
.end = AU1300_PSC3_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_PSC3_TX,
.end = AU1300_DSCR_CMD0_PSC3_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_PSC3_RX,
.end = AU1300_DSCR_CMD0_PSC3_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_i2c_dev = {
.name = "au1xpsc_smbus",
.id = 0, /* bus number */
.num_resources = ARRAY_SIZE(au1300_psc3_res),
.resource = au1300_psc3_res,
};
/**********************************************************************/
/* proper key assignments when facing the LCD panel. For key assignments
* according to the schematics swap up with down and left with right.
* I chose to use it to emulate the arrow keys of a keyboard.
*/
static struct gpio_keys_button db1300_5waysw_arrowkeys[] = {
{
.code = KEY_DOWN,
.gpio = AU1300_PIN_LCDPWM0,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-down",
},
{
.code = KEY_UP,
.gpio = AU1300_PIN_PSC2SYNC1,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-up",
},
{
.code = KEY_RIGHT,
.gpio = AU1300_PIN_WAKE3,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-right",
},
{
.code = KEY_LEFT,
.gpio = AU1300_PIN_WAKE2,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-left",
},
{
.code = KEY_ENTER,
.gpio = AU1300_PIN_WAKE1,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-push",
},
};
static struct gpio_keys_platform_data db1300_5waysw_data = {
.buttons = db1300_5waysw_arrowkeys,
.nbuttons = ARRAY_SIZE(db1300_5waysw_arrowkeys),
.rep = 1,
.name = "db1300-5wayswitch",
};
static struct platform_device db1300_5waysw_dev = {
.name = "gpio-keys",
.dev = {
.platform_data = &db1300_5waysw_data,
},
};
/**********************************************************************/
static struct pata_platform_info db1300_ide_info = {
.ioport_shift = DB1300_IDE_REG_SHIFT,
};
#define IDE_ALT_START (14 << DB1300_IDE_REG_SHIFT)
static struct resource db1300_ide_res[] = {
[0] = {
.start = DB1300_IDE_PHYS_ADDR,
.end = DB1300_IDE_PHYS_ADDR + IDE_ALT_START - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DB1300_IDE_PHYS_ADDR + IDE_ALT_START,
.end = DB1300_IDE_PHYS_ADDR + DB1300_IDE_PHYS_LEN - 1,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = DB1300_IDE_INT,
.end = DB1300_IDE_INT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device db1300_ide_dev = {
.dev = {
.dma_mask = &au1300_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1300_ide_info,
},
.name = "pata_platform",
.resource = db1300_ide_res,
.num_resources = ARRAY_SIZE(db1300_ide_res),
};
/**********************************************************************/
static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
{
disable_irq_nosync(irq);
return IRQ_WAKE_THREAD;
}
static irqreturn_t db1300_mmc_cdfn(int irq, void *ptr)
{
mmc_detect_change(ptr, msecs_to_jiffies(200));
msleep(100); /* debounce */
if (irq == DB1300_SD1_INSERT_INT)
enable_irq(DB1300_SD1_EJECT_INT);
else
enable_irq(DB1300_SD1_INSERT_INT);
return IRQ_HANDLED;
}
static int db1300_mmc_card_readonly(void *mmc_host)
{
/* it uses SD1 interface, but the DB1200's SD0 bit in the CPLD */
return bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD0WP;
}
static int db1300_mmc_card_inserted(void *mmc_host)
{
return bcsr_read(BCSR_SIGSTAT) & (1 << 12); /* insertion irq signal */
}
static int db1300_mmc_cd_setup(void *mmc_host, int en)
{
int ret;
if (en) {
ret = request_threaded_irq(DB1300_SD1_INSERT_INT, db1300_mmc_cd,
db1300_mmc_cdfn, 0, "sd_insert", mmc_host);
if (ret)
goto out;
ret = request_threaded_irq(DB1300_SD1_EJECT_INT, db1300_mmc_cd,
db1300_mmc_cdfn, 0, "sd_eject", mmc_host);
if (ret) {
free_irq(DB1300_SD1_INSERT_INT, mmc_host);
goto out;
}
if (db1300_mmc_card_inserted(mmc_host))
enable_irq(DB1300_SD1_EJECT_INT);
else
enable_irq(DB1300_SD1_INSERT_INT);
} else {
free_irq(DB1300_SD1_INSERT_INT, mmc_host);
free_irq(DB1300_SD1_EJECT_INT, mmc_host);
}
ret = 0;
out:
return ret;
}
static void db1300_mmcled_set(struct led_classdev *led,
enum led_brightness brightness)
{
if (brightness != LED_OFF)
bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED0, 0);
else
bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED0);
}
static struct led_classdev db1300_mmc_led = {
.brightness_set = db1300_mmcled_set,
};
struct au1xmmc_platform_data db1300_sd1_platdata = {
.cd_setup = db1300_mmc_cd_setup,
.card_inserted = db1300_mmc_card_inserted,
.card_readonly = db1300_mmc_card_readonly,
.led = &db1300_mmc_led,
};
static struct resource au1300_sd1_res[] = {
[0] = {
.start = AU1300_SD1_PHYS_ADDR,
.end = AU1300_SD1_PHYS_ADDR,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_SD1_INT,
.end = AU1300_SD1_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_SDMS_TX1,
.end = AU1300_DSCR_CMD0_SDMS_TX1,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_SDMS_RX1,
.end = AU1300_DSCR_CMD0_SDMS_RX1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_sd1_dev = {
.dev = {
.dma_mask = &au1300_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1300_sd1_platdata,
},
.name = "au1xxx-mmc",
.id = 1,
.resource = au1300_sd1_res,
.num_resources = ARRAY_SIZE(au1300_sd1_res),
};
/**********************************************************************/
static int db1300_movinand_inserted(void *mmc_host)
{
return 0; /* disable for now, it doesn't work yet */
}
static int db1300_movinand_readonly(void *mmc_host)
{
return 0;
}
static void db1300_movinand_led_set(struct led_classdev *led,
enum led_brightness brightness)
{
if (brightness != LED_OFF)
bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED1, 0);
else
bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED1);
}
static struct led_classdev db1300_movinand_led = {
.brightness_set = db1300_movinand_led_set,
};
struct au1xmmc_platform_data db1300_sd0_platdata = {
.card_inserted = db1300_movinand_inserted,
.card_readonly = db1300_movinand_readonly,
.led = &db1300_movinand_led,
.mask_host_caps = MMC_CAP_NEEDS_POLL,
};
static struct resource au1300_sd0_res[] = {
[0] = {
.start = AU1100_SD0_PHYS_ADDR,
.end = AU1100_SD0_PHYS_ADDR,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_SD0_INT,
.end = AU1300_SD0_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_SDMS_TX0,
.end = AU1300_DSCR_CMD0_SDMS_TX0,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_SDMS_RX0,
.end = AU1300_DSCR_CMD0_SDMS_RX0,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_sd0_dev = {
.dev = {
.dma_mask = &au1300_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1300_sd0_platdata,
},
.name = "au1xxx-mmc",
.id = 0,
.resource = au1300_sd0_res,
.num_resources = ARRAY_SIZE(au1300_sd0_res),
};
/**********************************************************************/
static struct platform_device db1300_wm9715_dev = {
.name = "wm9712-codec",
.id = 1, /* ID of PSC for AC97 audio, see asoc glue! */
};
static struct platform_device db1300_ac97dma_dev = {
.name = "au1xpsc-pcm",
.id = 1, /* PSC ID */
};
static struct platform_device db1300_i2sdma_dev = {
.name = "au1xpsc-pcm",
.id = 2, /* PSC ID */
};
static struct platform_device db1300_sndac97_dev = {
.name = "db1300-ac97",
.dev = {
.dma_mask = &au1300_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
static struct platform_device db1300_sndi2s_dev = {
.name = "db1300-i2s",
.dev = {
.dma_mask = &au1300_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
/**********************************************************************/
static int db1300fb_panel_index(void)
{
return 9; /* DB1300_800x480 */
}
static int db1300fb_panel_init(void)
{
/* Apply power (Vee/Vdd logic is inverted on Panel DB1300_800x480) */
bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD,
BCSR_BOARD_LCDBL);
return 0;
}
static int db1300fb_panel_shutdown(void)
{
/* Remove power (Vee/Vdd logic is inverted on Panel DB1300_800x480) */
bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDBL,
BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD);
return 0;
}
static struct au1200fb_platdata db1300fb_pd = {
.panel_index = db1300fb_panel_index,
.panel_init = db1300fb_panel_init,
.panel_shutdown = db1300fb_panel_shutdown,
};
static struct resource au1300_lcd_res[] = {
[0] = {
.start = AU1200_LCD_PHYS_ADDR,
.end = AU1200_LCD_PHYS_ADDR + 0x800 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_LCD_INT,
.end = AU1300_LCD_INT,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device db1300_lcd_dev = {
.name = "au1200-lcd",
.id = 0,
.dev = {
.dma_mask = &au1300_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1300fb_pd,
},
.num_resources = ARRAY_SIZE(au1300_lcd_res),
.resource = au1300_lcd_res,
};
/**********************************************************************/
#if IS_ENABLED(CONFIG_TOUCHSCREEN_WM97XX)
static struct wm97xx_mach_ops db1300_wm97xx_ops = {
.irq_gpio = WM97XX_GPIO_3,
};
static int db1300_wm97xx_probe(struct platform_device *pdev)
{
struct wm97xx *wm = platform_get_drvdata(pdev);
/* external pendown indicator */
wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
WM97XX_GPIO_POL_LOW, WM97XX_GPIO_STICKY,
WM97XX_GPIO_WAKE);
/* internal "virtual" pendown gpio */
wm97xx_config_gpio(wm, WM97XX_GPIO_3, WM97XX_GPIO_OUT,
WM97XX_GPIO_POL_LOW, WM97XX_GPIO_NOTSTICKY,
WM97XX_GPIO_NOWAKE);
wm->pen_irq = DB1300_AC97_PEN_INT;
return wm97xx_register_mach_ops(wm, &db1300_wm97xx_ops);
}
#else
static int db1300_wm97xx_probe(struct platform_device *pdev)
{
return -ENODEV;
}
#endif
static struct platform_driver db1300_wm97xx_driver = {
.driver.name = "wm97xx-touch",
.driver.owner = THIS_MODULE,
.probe = db1300_wm97xx_probe,
};
/**********************************************************************/
static struct platform_device *db1300_dev[] __initdata = {
&db1300_eth_dev,
&db1300_i2c_dev,
&db1300_5waysw_dev,
&db1300_nand_dev,
&db1300_ide_dev,
&db1300_sd0_dev,
&db1300_sd1_dev,
&db1300_lcd_dev,
&db1300_ac97_dev,
&db1300_i2s_dev,
&db1300_wm9715_dev,
&db1300_ac97dma_dev,
&db1300_i2sdma_dev,
&db1300_sndac97_dev,
&db1300_sndi2s_dev,
};
int __init db1300_dev_setup(void)
{
int swapped, cpldirq;
struct clk *c;
/* setup CPLD IRQ muxer */
cpldirq = au1300_gpio_to_irq(AU1300_PIN_EXTCLK1);
irq_set_irq_type(cpldirq, IRQ_TYPE_LEVEL_HIGH);
bcsr_init_irq(DB1300_FIRST_INT, DB1300_LAST_INT, cpldirq);
/* insert/eject IRQs: one always triggers so don't enable them
* when doing request_irq() on them. DB1200 has this bug too.
*/
irq_set_status_flags(DB1300_SD1_INSERT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1300_SD1_EJECT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1300_CF_INSERT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1300_CF_EJECT_INT, IRQ_NOAUTOEN);
/*
* setup board
*/
prom_get_ethernet_addr(&db1300_eth_config.mac[0]);
i2c_register_board_info(0, db1300_i2c_devs,
ARRAY_SIZE(db1300_i2c_devs));
if (platform_driver_register(&db1300_wm97xx_driver))
pr_warn("DB1300: failed to init touch pen irq support!\n");
/* Audio PSC clock is supplied by codecs (PSC1, 2) */
__raw_writel(PSC_SEL_CLK_SERCLK,
(void __iomem *)KSEG1ADDR(AU1300_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
__raw_writel(PSC_SEL_CLK_SERCLK,
(void __iomem *)KSEG1ADDR(AU1300_PSC2_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
/* I2C driver wants 50MHz, get as close as possible */
c = clk_get(NULL, "psc3_intclk");
if (!IS_ERR(c)) {
clk_set_rate(c, 50000000);
clk_prepare_enable(c);
clk_put(c);
}
__raw_writel(PSC_SEL_CLK_INTCLK,
(void __iomem *)KSEG1ADDR(AU1300_PSC3_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
/* enable power to USB ports */
bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_USBHPWR | BCSR_RESETS_OTGPWR);
/* although it is socket #0, it uses the CPLD bits which previous boards
* have used for socket #1.
*/
db1x_register_pcmcia_socket(
AU1000_PCMCIA_ATTR_PHYS_ADDR,
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x00400000 - 1,
AU1000_PCMCIA_MEM_PHYS_ADDR,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x00400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x00010000 - 1,
DB1300_CF_INT, DB1300_CF_INSERT_INT, 0, DB1300_CF_EJECT_INT, 1);
swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1200_SWAPBOOT;
db1x_register_norflash(64 << 20, 2, swapped);
return platform_add_devices(db1300_dev, ARRAY_SIZE(db1300_dev));
}
int __init db1300_board_setup(void)
{
unsigned short whoami;
bcsr_init(DB1300_BCSR_PHYS_ADDR,
DB1300_BCSR_PHYS_ADDR + DB1300_BCSR_HEXLED_OFS);
whoami = bcsr_read(BCSR_WHOAMI);
if (BCSR_WHOAMI_BOARD(whoami) != BCSR_WHOAMI_DB1300)
return -ENODEV;
db1300_gpio_config();
printk(KERN_INFO "NetLogic DBAu1300 Development Platform.\n\t"
"BoardID %d CPLD Rev %d DaughtercardID %d\n",
BCSR_WHOAMI_BOARD(whoami), BCSR_WHOAMI_CPLD(whoami),
BCSR_WHOAMI_DCID(whoami));
/* enable UARTs, YAMON only enables #2 */
alchemy_uart_enable(AU1300_UART0_PHYS_ADDR);
alchemy_uart_enable(AU1300_UART1_PHYS_ADDR);
alchemy_uart_enable(AU1300_UART3_PHYS_ADDR);
return 0;
}
| linux-master | arch/mips/alchemy/devboards/db1300.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Alchemy Db1550/Pb1550 board support
*
* (c) 2011 Manuel Lauss <[email protected]>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/platnand.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <asm/bootinfo.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <asm/mach-au1x00/au1xxx_eth.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
#include <asm/mach-au1x00/au1550_spi.h>
#include <asm/mach-au1x00/au1550nd.h>
#include <asm/mach-db1x00/bcsr.h>
#include <prom.h>
#include "platform.h"
static void __init db1550_hw_setup(void)
{
void __iomem *base;
unsigned long v;
/* complete pin setup: assign GPIO16 to PSC0_SYNC1 (SPI cs# line)
* as well as PSC1_SYNC for AC97 on PB1550.
*/
v = alchemy_rdsys(AU1000_SYS_PINFUNC);
alchemy_wrsys(v | 1 | SYS_PF_PSC1_S1, AU1000_SYS_PINFUNC);
/* reset the AC97 codec now, the reset time in the psc-ac97 driver
* is apparently too short although it's ridiculous as it is.
*/
base = (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR);
__raw_writel(PSC_SEL_CLK_SERCLK | PSC_SEL_PS_AC97MODE,
base + PSC_SEL_OFFSET);
__raw_writel(PSC_CTRL_DISABLE, base + PSC_CTRL_OFFSET);
wmb();
__raw_writel(PSC_AC97RST_RST, base + PSC_AC97RST_OFFSET);
wmb();
}
int __init db1550_board_setup(void)
{
unsigned short whoami;
bcsr_init(DB1550_BCSR_PHYS_ADDR,
DB1550_BCSR_PHYS_ADDR + DB1550_BCSR_HEXLED_OFS);
whoami = bcsr_read(BCSR_WHOAMI); /* PB1550 hexled offset differs */
switch (BCSR_WHOAMI_BOARD(whoami)) {
case BCSR_WHOAMI_PB1550_SDR:
case BCSR_WHOAMI_PB1550_DDR:
bcsr_init(PB1550_BCSR_PHYS_ADDR,
PB1550_BCSR_PHYS_ADDR + PB1550_BCSR_HEXLED_OFS);
break;
case BCSR_WHOAMI_DB1550:
break;
default:
return -ENODEV;
}
pr_info("Alchemy/AMD %s Board, CPLD Rev %d Board-ID %d " \
"Daughtercard ID %d\n", get_system_type(),
(whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
db1550_hw_setup();
return 0;
}
/*****************************************************************************/
static u64 au1550_all_dmamask = DMA_BIT_MASK(32);
static struct mtd_partition db1550_spiflash_parts[] = {
{
.name = "spi_flash",
.offset = 0,
.size = MTDPART_SIZ_FULL,
},
};
static struct flash_platform_data db1550_spiflash_data = {
.name = "s25fl010",
.parts = db1550_spiflash_parts,
.nr_parts = ARRAY_SIZE(db1550_spiflash_parts),
.type = "m25p10",
};
static struct spi_board_info db1550_spi_devs[] __initdata = {
{
/* TI TMP121AIDBVR temp sensor */
.modalias = "tmp121",
.max_speed_hz = 2400000,
.bus_num = 0,
.chip_select = 0,
.mode = SPI_MODE_0,
},
{
/* Spansion S25FL001D0FMA SPI flash */
.modalias = "m25p80",
.max_speed_hz = 2400000,
.bus_num = 0,
.chip_select = 1,
.mode = SPI_MODE_0,
.platform_data = &db1550_spiflash_data,
},
};
static struct i2c_board_info db1550_i2c_devs[] __initdata = {
{ I2C_BOARD_INFO("24c04", 0x52),}, /* AT24C04-10 I2C eeprom */
{ I2C_BOARD_INFO("ne1619", 0x2d),}, /* adm1025-compat hwmon */
{ I2C_BOARD_INFO("wm8731", 0x1b),}, /* I2S audio codec WM8731 */
};
/**********************************************************************/
static void au1550_nand_cmd_ctrl(struct nand_chip *this, int cmd,
unsigned int ctrl)
{
unsigned long ioaddr = (unsigned long)this->legacy.IO_ADDR_W;
ioaddr &= 0xffffff00;
if (ctrl & NAND_CLE) {
ioaddr += MEM_STNAND_CMD;
} else if (ctrl & NAND_ALE) {
ioaddr += MEM_STNAND_ADDR;
} else {
/* assume we want to r/w real data by default */
ioaddr += MEM_STNAND_DATA;
}
this->legacy.IO_ADDR_R = this->legacy.IO_ADDR_W = (void __iomem *)ioaddr;
if (cmd != NAND_CMD_NONE) {
__raw_writeb(cmd, this->legacy.IO_ADDR_W);
wmb();
}
}
static int au1550_nand_device_ready(struct nand_chip *this)
{
return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1;
}
static struct mtd_partition db1550_nand_parts[] = {
{
.name = "NAND FS 0",
.offset = 0,
.size = 8 * 1024 * 1024,
},
{
.name = "NAND FS 1",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL
},
};
struct platform_nand_data db1550_nand_platdata = {
.chip = {
.nr_chips = 1,
.chip_offset = 0,
.nr_partitions = ARRAY_SIZE(db1550_nand_parts),
.partitions = db1550_nand_parts,
.chip_delay = 20,
},
.ctrl = {
.dev_ready = au1550_nand_device_ready,
.cmd_ctrl = au1550_nand_cmd_ctrl,
},
};
static struct resource db1550_nand_res[] = {
[0] = {
.start = 0x20000000,
.end = 0x200000ff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device db1550_nand_dev = {
.name = "gen_nand",
.num_resources = ARRAY_SIZE(db1550_nand_res),
.resource = db1550_nand_res,
.id = -1,
.dev = {
.platform_data = &db1550_nand_platdata,
}
};
static struct au1550nd_platdata pb1550_nand_pd = {
.parts = db1550_nand_parts,
.num_parts = ARRAY_SIZE(db1550_nand_parts),
.devwidth = 0, /* x8 NAND default, needs fixing up */
};
static struct platform_device pb1550_nand_dev = {
.name = "au1550-nand",
.id = -1,
.resource = db1550_nand_res,
.num_resources = ARRAY_SIZE(db1550_nand_res),
.dev = {
.platform_data = &pb1550_nand_pd,
},
};
static void __init pb1550_nand_setup(void)
{
int boot_swapboot = (alchemy_rdsmem(AU1000_MEM_STSTAT) & (0x7 << 1)) |
((bcsr_read(BCSR_STATUS) >> 6) & 0x1);
gpio_direction_input(206); /* de-assert NAND CS# */
switch (boot_swapboot) {
case 0: case 2: case 8: case 0xC: case 0xD:
/* x16 NAND Flash */
pb1550_nand_pd.devwidth = 1;
fallthrough;
case 1: case 3: case 9: case 0xE: case 0xF:
/* x8 NAND, already set up */
platform_device_register(&pb1550_nand_dev);
}
}
/**********************************************************************/
static struct resource au1550_psc0_res[] = {
[0] = {
.start = AU1550_PSC0_PHYS_ADDR,
.end = AU1550_PSC0_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1550_PSC0_INT,
.end = AU1550_PSC0_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1550_DSCR_CMD0_PSC0_TX,
.end = AU1550_DSCR_CMD0_PSC0_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1550_DSCR_CMD0_PSC0_RX,
.end = AU1550_DSCR_CMD0_PSC0_RX,
.flags = IORESOURCE_DMA,
},
};
static void db1550_spi_cs_en(struct au1550_spi_info *spi, int cs, int pol)
{
if (cs)
bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_SPISEL);
else
bcsr_mod(BCSR_BOARD, BCSR_BOARD_SPISEL, 0);
}
static struct au1550_spi_info db1550_spi_platdata = {
.mainclk_hz = 48000000, /* PSC0 clock: max. 2.4MHz SPI clk */
.num_chipselect = 2,
.activate_cs = db1550_spi_cs_en,
};
static struct platform_device db1550_spi_dev = {
.dev = {
.dma_mask = &au1550_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1550_spi_platdata,
},
.name = "au1550-spi",
.id = 0, /* bus number */
.num_resources = ARRAY_SIZE(au1550_psc0_res),
.resource = au1550_psc0_res,
};
/**********************************************************************/
static struct resource au1550_psc1_res[] = {
[0] = {
.start = AU1550_PSC1_PHYS_ADDR,
.end = AU1550_PSC1_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1550_PSC1_INT,
.end = AU1550_PSC1_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1550_DSCR_CMD0_PSC1_TX,
.end = AU1550_DSCR_CMD0_PSC1_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1550_DSCR_CMD0_PSC1_RX,
.end = AU1550_DSCR_CMD0_PSC1_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1550_ac97_dev = {
.name = "au1xpsc_ac97",
.id = 1, /* PSC ID */
.num_resources = ARRAY_SIZE(au1550_psc1_res),
.resource = au1550_psc1_res,
};
static struct resource au1550_psc2_res[] = {
[0] = {
.start = AU1550_PSC2_PHYS_ADDR,
.end = AU1550_PSC2_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1550_PSC2_INT,
.end = AU1550_PSC2_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1550_DSCR_CMD0_PSC2_TX,
.end = AU1550_DSCR_CMD0_PSC2_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1550_DSCR_CMD0_PSC2_RX,
.end = AU1550_DSCR_CMD0_PSC2_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1550_i2c_dev = {
.name = "au1xpsc_smbus",
.id = 0, /* bus number */
.num_resources = ARRAY_SIZE(au1550_psc2_res),
.resource = au1550_psc2_res,
};
/**********************************************************************/
static struct resource au1550_psc3_res[] = {
[0] = {
.start = AU1550_PSC3_PHYS_ADDR,
.end = AU1550_PSC3_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1550_PSC3_INT,
.end = AU1550_PSC3_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1550_DSCR_CMD0_PSC3_TX,
.end = AU1550_DSCR_CMD0_PSC3_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1550_DSCR_CMD0_PSC3_RX,
.end = AU1550_DSCR_CMD0_PSC3_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1550_i2s_dev = {
.name = "au1xpsc_i2s",
.id = 3, /* PSC ID */
.num_resources = ARRAY_SIZE(au1550_psc3_res),
.resource = au1550_psc3_res,
};
/**********************************************************************/
static struct platform_device db1550_stac_dev = {
.name = "ac97-codec",
.id = 1, /* on PSC1 */
};
static struct platform_device db1550_ac97dma_dev = {
.name = "au1xpsc-pcm",
.id = 1, /* on PSC3 */
};
static struct platform_device db1550_i2sdma_dev = {
.name = "au1xpsc-pcm",
.id = 3, /* on PSC3 */
};
static struct platform_device db1550_sndac97_dev = {
.name = "db1550-ac97",
.dev = {
.dma_mask = &au1550_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
static struct platform_device db1550_sndi2s_dev = {
.name = "db1550-i2s",
.dev = {
.dma_mask = &au1550_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
/**********************************************************************/
static int db1550_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin)
{
if ((slot < 11) || (slot > 13) || pin == 0)
return -1;
if (slot == 11)
return (pin == 1) ? AU1550_PCI_INTC : 0xff;
if (slot == 12) {
switch (pin) {
case 1: return AU1550_PCI_INTB;
case 2: return AU1550_PCI_INTC;
case 3: return AU1550_PCI_INTD;
case 4: return AU1550_PCI_INTA;
}
}
if (slot == 13) {
switch (pin) {
case 1: return AU1550_PCI_INTA;
case 2: return AU1550_PCI_INTB;
case 3: return AU1550_PCI_INTC;
case 4: return AU1550_PCI_INTD;
}
}
return -1;
}
static int pb1550_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin)
{
if ((slot < 12) || (slot > 13) || pin == 0)
return -1;
if (slot == 12) {
switch (pin) {
case 1: return AU1500_PCI_INTB;
case 2: return AU1500_PCI_INTC;
case 3: return AU1500_PCI_INTD;
case 4: return AU1500_PCI_INTA;
}
}
if (slot == 13) {
switch (pin) {
case 1: return AU1500_PCI_INTA;
case 2: return AU1500_PCI_INTB;
case 3: return AU1500_PCI_INTC;
case 4: return AU1500_PCI_INTD;
}
}
return -1;
}
static struct resource alchemy_pci_host_res[] = {
[0] = {
.start = AU1500_PCI_PHYS_ADDR,
.end = AU1500_PCI_PHYS_ADDR + 0xfff,
.flags = IORESOURCE_MEM,
},
};
static struct alchemy_pci_platdata db1550_pci_pd = {
.board_map_irq = db1550_map_pci_irq,
};
static struct platform_device db1550_pci_host_dev = {
.dev.platform_data = &db1550_pci_pd,
.name = "alchemy-pci",
.id = 0,
.num_resources = ARRAY_SIZE(alchemy_pci_host_res),
.resource = alchemy_pci_host_res,
};
/**********************************************************************/
static struct platform_device *db1550_devs[] __initdata = {
&db1550_i2c_dev,
&db1550_ac97_dev,
&db1550_spi_dev,
&db1550_i2s_dev,
&db1550_stac_dev,
&db1550_ac97dma_dev,
&db1550_i2sdma_dev,
&db1550_sndac97_dev,
&db1550_sndi2s_dev,
};
/* must be arch_initcall; MIPS PCI scans busses in a subsys_initcall */
int __init db1550_pci_setup(int id)
{
if (id)
db1550_pci_pd.board_map_irq = pb1550_map_pci_irq;
return platform_device_register(&db1550_pci_host_dev);
}
static void __init db1550_devices(void)
{
alchemy_gpio_direction_output(203, 0); /* red led on */
irq_set_irq_type(AU1550_GPIO0_INT, IRQ_TYPE_EDGE_BOTH); /* CD0# */
irq_set_irq_type(AU1550_GPIO1_INT, IRQ_TYPE_EDGE_BOTH); /* CD1# */
irq_set_irq_type(AU1550_GPIO3_INT, IRQ_TYPE_LEVEL_LOW); /* CARD0# */
irq_set_irq_type(AU1550_GPIO5_INT, IRQ_TYPE_LEVEL_LOW); /* CARD1# */
irq_set_irq_type(AU1550_GPIO21_INT, IRQ_TYPE_LEVEL_LOW); /* STSCHG0# */
irq_set_irq_type(AU1550_GPIO22_INT, IRQ_TYPE_LEVEL_LOW); /* STSCHG1# */
db1x_register_pcmcia_socket(
AU1000_PCMCIA_ATTR_PHYS_ADDR,
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_MEM_PHYS_ADDR,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
AU1550_GPIO3_INT, 0,
/*AU1550_GPIO21_INT*/0, 0, 0);
db1x_register_pcmcia_socket(
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004400000 - 1,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
AU1550_GPIO5_INT, 1,
/*AU1550_GPIO22_INT*/0, 0, 1);
platform_device_register(&db1550_nand_dev);
alchemy_gpio_direction_output(202, 0); /* green led on */
}
static void __init pb1550_devices(void)
{
irq_set_irq_type(AU1550_GPIO0_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1550_GPIO1_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1550_GPIO201_205_INT, IRQ_TYPE_LEVEL_HIGH);
/* enable both PCMCIA card irqs in the shared line */
alchemy_gpio2_enable_int(201); /* socket 0 card irq */
alchemy_gpio2_enable_int(202); /* socket 1 card irq */
/* Pb1550, like all others, also has statuschange irqs; however they're
* wired up on one of the Au1550's shared GPIO201_205 line, which also
* services the PCMCIA card interrupts. So we ignore statuschange and
* use the GPIO201_205 exclusively for card interrupts, since a) pcmcia
* drivers are used to shared irqs and b) statuschange isn't really use-
* ful anyway.
*/
db1x_register_pcmcia_socket(
AU1000_PCMCIA_ATTR_PHYS_ADDR,
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_MEM_PHYS_ADDR,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
AU1550_GPIO201_205_INT, AU1550_GPIO0_INT, 0, 0, 0);
db1x_register_pcmcia_socket(
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x008000000,
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x008400000 - 1,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x008000000,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x008400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x008000000,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x008010000 - 1,
AU1550_GPIO201_205_INT, AU1550_GPIO1_INT, 0, 0, 1);
pb1550_nand_setup();
}
int __init db1550_dev_setup(void)
{
int swapped, id;
struct clk *c;
id = (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)) != BCSR_WHOAMI_DB1550);
i2c_register_board_info(0, db1550_i2c_devs,
ARRAY_SIZE(db1550_i2c_devs));
spi_register_board_info(db1550_spi_devs,
ARRAY_SIZE(db1550_i2c_devs));
c = clk_get(NULL, "psc0_intclk");
if (!IS_ERR(c)) {
clk_set_rate(c, 50000000);
clk_prepare_enable(c);
clk_put(c);
}
c = clk_get(NULL, "psc2_intclk");
if (!IS_ERR(c)) {
clk_set_rate(c, db1550_spi_platdata.mainclk_hz);
clk_prepare_enable(c);
clk_put(c);
}
/* Audio PSC clock is supplied by codecs (PSC1, 3) FIXME: platdata!! */
__raw_writel(PSC_SEL_CLK_SERCLK,
(void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
__raw_writel(PSC_SEL_CLK_SERCLK,
(void __iomem *)KSEG1ADDR(AU1550_PSC3_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
/* SPI/I2C use internally supplied 50MHz source */
__raw_writel(PSC_SEL_CLK_INTCLK,
(void __iomem *)KSEG1ADDR(AU1550_PSC0_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
__raw_writel(PSC_SEL_CLK_INTCLK,
(void __iomem *)KSEG1ADDR(AU1550_PSC2_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
id ? pb1550_devices() : db1550_devices();
swapped = bcsr_read(BCSR_STATUS) &
(id ? BCSR_STATUS_PB1550_SWAPBOOT : BCSR_STATUS_DB1000_SWAPBOOT);
db1x_register_norflash(128 << 20, 4, swapped);
return platform_add_devices(db1550_devs, ARRAY_SIZE(db1550_devs));
}
| linux-master | arch/mips/alchemy/devboards/db1550.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* bcsr.h -- Db1xxx/Pb1xxx Devboard CPLD registers ("BCSR") abstraction.
*
* All Alchemy development boards (except, of course, the weird PB1000)
* have a few registers in a CPLD with standardised layout; they mostly
* only differ in base address.
* All registers are 16bits wide with 32bit spacing.
*/
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#include <asm/mach-db1x00/bcsr.h>
static struct bcsr_reg {
void __iomem *raddr;
spinlock_t lock;
} bcsr_regs[BCSR_CNT];
static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
static int bcsr_csc_base; /* linux-irq of first cascaded irq */
void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys)
{
int i;
bcsr1_phys = KSEG1ADDR(CPHYSADDR(bcsr1_phys));
bcsr2_phys = KSEG1ADDR(CPHYSADDR(bcsr2_phys));
bcsr_virt = (void __iomem *)bcsr1_phys;
for (i = 0; i < BCSR_CNT; i++) {
if (i >= BCSR_HEXLEDS)
bcsr_regs[i].raddr = (void __iomem *)bcsr2_phys +
(0x04 * (i - BCSR_HEXLEDS));
else
bcsr_regs[i].raddr = (void __iomem *)bcsr1_phys +
(0x04 * i);
spin_lock_init(&bcsr_regs[i].lock);
}
}
unsigned short bcsr_read(enum bcsr_id reg)
{
unsigned short r;
unsigned long flags;
spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
r = __raw_readw(bcsr_regs[reg].raddr);
spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
return r;
}
EXPORT_SYMBOL_GPL(bcsr_read);
void bcsr_write(enum bcsr_id reg, unsigned short val)
{
unsigned long flags;
spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
__raw_writew(val, bcsr_regs[reg].raddr);
wmb();
spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
}
EXPORT_SYMBOL_GPL(bcsr_write);
void bcsr_mod(enum bcsr_id reg, unsigned short clr, unsigned short set)
{
unsigned short r;
unsigned long flags;
spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
r = __raw_readw(bcsr_regs[reg].raddr);
r &= ~clr;
r |= set;
__raw_writew(r, bcsr_regs[reg].raddr);
wmb();
spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
}
EXPORT_SYMBOL_GPL(bcsr_mod);
/*
* DB1200/PB1200 CPLD IRQ muxer
*/
static void bcsr_csc_handler(struct irq_desc *d)
{
unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
struct irq_chip *chip = irq_desc_get_chip(d);
chained_irq_enter(chip, d);
generic_handle_irq(bcsr_csc_base + __ffs(bisr));
chained_irq_exit(chip, d);
}
static void bcsr_irq_mask(struct irq_data *d)
{
unsigned short v = 1 << (d->irq - bcsr_csc_base);
__raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
wmb();
}
static void bcsr_irq_maskack(struct irq_data *d)
{
unsigned short v = 1 << (d->irq - bcsr_csc_base);
__raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
__raw_writew(v, bcsr_virt + BCSR_REG_INTSTAT); /* ack */
wmb();
}
static void bcsr_irq_unmask(struct irq_data *d)
{
unsigned short v = 1 << (d->irq - bcsr_csc_base);
__raw_writew(v, bcsr_virt + BCSR_REG_MASKSET);
wmb();
}
static struct irq_chip bcsr_irq_type = {
.name = "CPLD",
.irq_mask = bcsr_irq_mask,
.irq_mask_ack = bcsr_irq_maskack,
.irq_unmask = bcsr_irq_unmask,
};
void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq)
{
unsigned int irq;
/* mask & enable & ack all */
__raw_writew(0xffff, bcsr_virt + BCSR_REG_MASKCLR);
__raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSET);
__raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSTAT);
wmb();
bcsr_csc_base = csc_start;
for (irq = csc_start; irq <= csc_end; irq++)
irq_set_chip_and_handler_name(irq, &bcsr_irq_type,
handle_level_irq, "level");
irq_set_chained_handler(hook_irq, bcsr_csc_handler);
}
| linux-master | arch/mips/alchemy/devboards/bcsr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Alchemy Development Board example suspend userspace interface.
*
* (c) 2008 Manuel Lauss <[email protected]>
*/
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/suspend.h>
#include <linux/sysfs.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <asm/mach-db1x00/bcsr.h>
/*
* Generic suspend userspace interface for Alchemy development boards.
* This code exports a few sysfs nodes under /sys/power/db1x/ which
* can be used by userspace to en/disable all au1x-provided wakeup
* sources and configure the timeout after which the TOYMATCH2 irq
* is to trigger a wakeup.
*/
static unsigned long db1x_pm_sleep_secs;
static unsigned long db1x_pm_wakemsk;
static unsigned long db1x_pm_last_wakesrc;
static int db1x_pm_enter(suspend_state_t state)
{
unsigned short bcsrs[16];
int i, j, hasint;
/* save CPLD regs */
hasint = bcsr_read(BCSR_WHOAMI);
hasint = BCSR_WHOAMI_BOARD(hasint) >= BCSR_WHOAMI_DB1200;
j = (hasint) ? BCSR_MASKSET : BCSR_SYSTEM;
for (i = BCSR_STATUS; i <= j; i++)
bcsrs[i] = bcsr_read(i);
/* shut off hexleds */
bcsr_write(BCSR_HEXCLEAR, 3);
/* enable GPIO based wakeup */
alchemy_gpio1_input_enable();
/* clear and setup wake cause and source */
alchemy_wrsys(0, AU1000_SYS_WAKEMSK);
alchemy_wrsys(0, AU1000_SYS_WAKESRC);
alchemy_wrsys(db1x_pm_wakemsk, AU1000_SYS_WAKEMSK);
/* setup 1Hz-timer-based wakeup: wait for reg access */
while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_M20)
asm volatile ("nop");
alchemy_wrsys(alchemy_rdsys(AU1000_SYS_TOYREAD) + db1x_pm_sleep_secs,
AU1000_SYS_TOYMATCH2);
/* wait for value to really hit the register */
while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_M20)
asm volatile ("nop");
/* ...and now the sandman can come! */
au_sleep();
/* restore CPLD regs */
for (i = BCSR_STATUS; i <= BCSR_SYSTEM; i++)
bcsr_write(i, bcsrs[i]);
/* restore CPLD int registers */
if (hasint) {
bcsr_write(BCSR_INTCLR, 0xffff);
bcsr_write(BCSR_MASKCLR, 0xffff);
bcsr_write(BCSR_INTSTAT, 0xffff);
bcsr_write(BCSR_INTSET, bcsrs[BCSR_INTSET]);
bcsr_write(BCSR_MASKSET, bcsrs[BCSR_MASKSET]);
}
/* light up hexleds */
bcsr_write(BCSR_HEXCLEAR, 0);
return 0;
}
static int db1x_pm_begin(suspend_state_t state)
{
if (!db1x_pm_wakemsk) {
printk(KERN_ERR "db1x: no wakeup source activated!\n");
return -EINVAL;
}
return 0;
}
static void db1x_pm_end(void)
{
/* read and store wakeup source, the clear the register. To
* be able to clear it, WAKEMSK must be cleared first.
*/
db1x_pm_last_wakesrc = alchemy_rdsys(AU1000_SYS_WAKESRC);
alchemy_wrsys(0, AU1000_SYS_WAKEMSK);
alchemy_wrsys(0, AU1000_SYS_WAKESRC);
}
static const struct platform_suspend_ops db1x_pm_ops = {
.valid = suspend_valid_only_mem,
.begin = db1x_pm_begin,
.enter = db1x_pm_enter,
.end = db1x_pm_end,
};
#define ATTRCMP(x) (0 == strcmp(attr->attr.name, #x))
static ssize_t db1x_pmattr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
int idx;
if (ATTRCMP(timer_timeout))
return sprintf(buf, "%lu\n", db1x_pm_sleep_secs);
else if (ATTRCMP(timer))
return sprintf(buf, "%u\n",
!!(db1x_pm_wakemsk & SYS_WAKEMSK_M2));
else if (ATTRCMP(wakesrc))
return sprintf(buf, "%lu\n", db1x_pm_last_wakesrc);
else if (ATTRCMP(gpio0) || ATTRCMP(gpio1) || ATTRCMP(gpio2) ||
ATTRCMP(gpio3) || ATTRCMP(gpio4) || ATTRCMP(gpio5) ||
ATTRCMP(gpio6) || ATTRCMP(gpio7)) {
idx = (attr->attr.name)[4] - '0';
return sprintf(buf, "%d\n",
!!(db1x_pm_wakemsk & SYS_WAKEMSK_GPIO(idx)));
} else if (ATTRCMP(wakemsk)) {
return sprintf(buf, "%08lx\n", db1x_pm_wakemsk);
}
return -ENOENT;
}
static ssize_t db1x_pmattr_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *instr,
size_t bytes)
{
unsigned long l;
int tmp;
if (ATTRCMP(timer_timeout)) {
tmp = kstrtoul(instr, 0, &l);
if (tmp)
return tmp;
db1x_pm_sleep_secs = l;
} else if (ATTRCMP(timer)) {
if (instr[0] != '0')
db1x_pm_wakemsk |= SYS_WAKEMSK_M2;
else
db1x_pm_wakemsk &= ~SYS_WAKEMSK_M2;
} else if (ATTRCMP(gpio0) || ATTRCMP(gpio1) || ATTRCMP(gpio2) ||
ATTRCMP(gpio3) || ATTRCMP(gpio4) || ATTRCMP(gpio5) ||
ATTRCMP(gpio6) || ATTRCMP(gpio7)) {
tmp = (attr->attr.name)[4] - '0';
if (instr[0] != '0') {
db1x_pm_wakemsk |= SYS_WAKEMSK_GPIO(tmp);
} else {
db1x_pm_wakemsk &= ~SYS_WAKEMSK_GPIO(tmp);
}
} else if (ATTRCMP(wakemsk)) {
tmp = kstrtoul(instr, 0, &l);
if (tmp)
return tmp;
db1x_pm_wakemsk = l & 0x0000003f;
} else
bytes = -ENOENT;
return bytes;
}
#define ATTR(x) \
static struct kobj_attribute x##_attribute = \
__ATTR(x, 0664, db1x_pmattr_show, \
db1x_pmattr_store);
ATTR(gpio0) /* GPIO-based wakeup enable */
ATTR(gpio1)
ATTR(gpio2)
ATTR(gpio3)
ATTR(gpio4)
ATTR(gpio5)
ATTR(gpio6)
ATTR(gpio7)
ATTR(timer) /* TOYMATCH2-based wakeup enable */
ATTR(timer_timeout) /* timer-based wakeup timeout value, in seconds */
ATTR(wakesrc) /* contents of SYS_WAKESRC after last wakeup */
ATTR(wakemsk) /* direct access to SYS_WAKEMSK */
#define ATTR_LIST(x) & x ## _attribute.attr
static struct attribute *db1x_pmattrs[] = {
ATTR_LIST(gpio0),
ATTR_LIST(gpio1),
ATTR_LIST(gpio2),
ATTR_LIST(gpio3),
ATTR_LIST(gpio4),
ATTR_LIST(gpio5),
ATTR_LIST(gpio6),
ATTR_LIST(gpio7),
ATTR_LIST(timer),
ATTR_LIST(timer_timeout),
ATTR_LIST(wakesrc),
ATTR_LIST(wakemsk),
NULL, /* terminator */
};
static struct attribute_group db1x_pmattr_group = {
.name = "db1x",
.attrs = db1x_pmattrs,
};
/*
* Initialize suspend interface
*/
static int __init pm_init(void)
{
/* init TOY to tick at 1Hz if not already done. No need to wait
* for confirmation since there's plenty of time from here to
* the next suspend cycle.
*/
if (alchemy_rdsys(AU1000_SYS_TOYTRIM) != 32767)
alchemy_wrsys(32767, AU1000_SYS_TOYTRIM);
db1x_pm_last_wakesrc = alchemy_rdsys(AU1000_SYS_WAKESRC);
alchemy_wrsys(0, AU1000_SYS_WAKESRC);
alchemy_wrsys(0, AU1000_SYS_WAKEMSK);
suspend_set_ops(&db1x_pm_ops);
return sysfs_create_group(power_kobj, &db1x_pmattr_group);
}
late_initcall(pm_init);
| linux-master | arch/mips/alchemy/devboards/pm.c |
/*
* Copyright 2001, 2007-2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <[email protected]>
*
* Copyright (C) 2007 Ralf Baechle ([email protected])
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <asm/irq_cpu.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1300.h>
/* Interrupt Controller register offsets */
#define IC_CFG0RD 0x40
#define IC_CFG0SET 0x40
#define IC_CFG0CLR 0x44
#define IC_CFG1RD 0x48
#define IC_CFG1SET 0x48
#define IC_CFG1CLR 0x4C
#define IC_CFG2RD 0x50
#define IC_CFG2SET 0x50
#define IC_CFG2CLR 0x54
#define IC_REQ0INT 0x54
#define IC_SRCRD 0x58
#define IC_SRCSET 0x58
#define IC_SRCCLR 0x5C
#define IC_REQ1INT 0x5C
#define IC_ASSIGNRD 0x60
#define IC_ASSIGNSET 0x60
#define IC_ASSIGNCLR 0x64
#define IC_WAKERD 0x68
#define IC_WAKESET 0x68
#define IC_WAKECLR 0x6C
#define IC_MASKRD 0x70
#define IC_MASKSET 0x70
#define IC_MASKCLR 0x74
#define IC_RISINGRD 0x78
#define IC_RISINGCLR 0x78
#define IC_FALLINGRD 0x7C
#define IC_FALLINGCLR 0x7C
#define IC_TESTBIT 0x80
/* per-processor fixed function irqs */
struct alchemy_irqmap {
int irq; /* linux IRQ number */
int type; /* IRQ_TYPE_ */
int prio; /* irq priority, 0 highest, 3 lowest */
int internal; /* GPIC: internal source (no ext. pin)? */
};
static int au1x_ic_settype(struct irq_data *d, unsigned int type);
static int au1300_gpic_settype(struct irq_data *d, unsigned int type);
/* NOTE on interrupt priorities: The original writers of this code said:
*
* Because of the tight timing of SETUP token to reply transactions,
* the USB devices-side packet complete interrupt (USB_DEV_REQ_INT)
* needs the highest priority.
*/
struct alchemy_irqmap au1000_irqmap[] __initdata = {
{ AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
{ AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
{ AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ -1, },
};
struct alchemy_irqmap au1500_irqmap[] __initdata = {
{ AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1500_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1500_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1500_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
{ AU1500_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
{ AU1500_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1500_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ -1, },
};
struct alchemy_irqmap au1100_irqmap[] __initdata = {
{ AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
{ AU1100_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
{ AU1100_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1100_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ -1, },
};
struct alchemy_irqmap au1550_irqmap[] __initdata = {
{ AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1550_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
{ AU1550_NAND_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
{ AU1550_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ -1, },
};
struct alchemy_irqmap au1200_irqmap[] __initdata = {
{ AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_SWT_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
{ AU1200_NAND_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ -1, },
};
static struct alchemy_irqmap au1300_irqmap[] __initdata = {
/* multifunction: gpio pin or device */
{ AU1300_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
{ AU1300_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
{ AU1300_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
{ AU1300_SD1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
{ AU1300_SD2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
{ AU1300_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
{ AU1300_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
{ AU1300_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
{ AU1300_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
{ AU1300_NAND_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0, },
/* au1300 internal */
{ AU1300_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ AU1300_MMU_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ AU1300_MPU_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ AU1300_GPU_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ AU1300_UDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ AU1300_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
{ AU1300_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
{ AU1300_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
{ AU1300_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
{ AU1300_RTC_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
{ AU1300_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
{ AU1300_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
{ AU1300_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 1, },
{ AU1300_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ AU1300_SD0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ AU1300_USB_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ AU1300_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ AU1300_BSA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ AU1300_MPE_INT, IRQ_TYPE_EDGE_RISING, 1, 1, },
{ AU1300_ITE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ AU1300_AES_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ AU1300_CIM_INT, IRQ_TYPE_LEVEL_HIGH, 1, 1, },
{ -1, }, /* terminator */
};
/******************************************************************************/
static void au1x_ic0_unmask(struct irq_data *d)
{
unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
__raw_writel(1 << bit, base + IC_MASKSET);
__raw_writel(1 << bit, base + IC_WAKESET);
wmb();
}
static void au1x_ic1_unmask(struct irq_data *d)
{
unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
__raw_writel(1 << bit, base + IC_MASKSET);
__raw_writel(1 << bit, base + IC_WAKESET);
wmb();
}
static void au1x_ic0_mask(struct irq_data *d)
{
unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
__raw_writel(1 << bit, base + IC_MASKCLR);
__raw_writel(1 << bit, base + IC_WAKECLR);
wmb();
}
static void au1x_ic1_mask(struct irq_data *d)
{
unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
__raw_writel(1 << bit, base + IC_MASKCLR);
__raw_writel(1 << bit, base + IC_WAKECLR);
wmb();
}
static void au1x_ic0_ack(struct irq_data *d)
{
unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
/*
* This may assume that we don't get interrupts from
* both edges at once, or if we do, that we don't care.
*/
__raw_writel(1 << bit, base + IC_FALLINGCLR);
__raw_writel(1 << bit, base + IC_RISINGCLR);
wmb();
}
static void au1x_ic1_ack(struct irq_data *d)
{
unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
/*
* This may assume that we don't get interrupts from
* both edges at once, or if we do, that we don't care.
*/
__raw_writel(1 << bit, base + IC_FALLINGCLR);
__raw_writel(1 << bit, base + IC_RISINGCLR);
wmb();
}
static void au1x_ic0_maskack(struct irq_data *d)
{
unsigned int bit = d->irq - AU1000_INTC0_INT_BASE;
void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
__raw_writel(1 << bit, base + IC_WAKECLR);
__raw_writel(1 << bit, base + IC_MASKCLR);
__raw_writel(1 << bit, base + IC_RISINGCLR);
__raw_writel(1 << bit, base + IC_FALLINGCLR);
wmb();
}
static void au1x_ic1_maskack(struct irq_data *d)
{
unsigned int bit = d->irq - AU1000_INTC1_INT_BASE;
void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
__raw_writel(1 << bit, base + IC_WAKECLR);
__raw_writel(1 << bit, base + IC_MASKCLR);
__raw_writel(1 << bit, base + IC_RISINGCLR);
__raw_writel(1 << bit, base + IC_FALLINGCLR);
wmb();
}
static int au1x_ic1_setwake(struct irq_data *d, unsigned int on)
{
int bit = d->irq - AU1000_INTC1_INT_BASE;
unsigned long wakemsk, flags;
/* only GPIO 0-7 can act as wakeup source. Fortunately these
* are wired up identically on all supported variants.
*/
if ((bit < 0) || (bit > 7))
return -EINVAL;
local_irq_save(flags);
wakemsk = alchemy_rdsys(AU1000_SYS_WAKEMSK);
if (on)
wakemsk |= 1 << bit;
else
wakemsk &= ~(1 << bit);
alchemy_wrsys(wakemsk, AU1000_SYS_WAKEMSK);
local_irq_restore(flags);
return 0;
}
/*
* irq_chips for both ICs; this way the mask handlers can be
* as short as possible.
*/
static struct irq_chip au1x_ic0_chip = {
.name = "Alchemy-IC0",
.irq_ack = au1x_ic0_ack,
.irq_mask = au1x_ic0_mask,
.irq_mask_ack = au1x_ic0_maskack,
.irq_unmask = au1x_ic0_unmask,
.irq_set_type = au1x_ic_settype,
};
static struct irq_chip au1x_ic1_chip = {
.name = "Alchemy-IC1",
.irq_ack = au1x_ic1_ack,
.irq_mask = au1x_ic1_mask,
.irq_mask_ack = au1x_ic1_maskack,
.irq_unmask = au1x_ic1_unmask,
.irq_set_type = au1x_ic_settype,
.irq_set_wake = au1x_ic1_setwake,
};
static int au1x_ic_settype(struct irq_data *d, unsigned int flow_type)
{
struct irq_chip *chip;
unsigned int bit, irq = d->irq;
irq_flow_handler_t handler = NULL;
unsigned char *name = NULL;
void __iomem *base;
int ret;
if (irq >= AU1000_INTC1_INT_BASE) {
bit = irq - AU1000_INTC1_INT_BASE;
chip = &au1x_ic1_chip;
base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
} else {
bit = irq - AU1000_INTC0_INT_BASE;
chip = &au1x_ic0_chip;
base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
}
if (bit > 31)
return -EINVAL;
ret = 0;
switch (flow_type) { /* cfgregs 2:1:0 */
case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */
__raw_writel(1 << bit, base + IC_CFG2CLR);
__raw_writel(1 << bit, base + IC_CFG1CLR);
__raw_writel(1 << bit, base + IC_CFG0SET);
handler = handle_edge_irq;
name = "riseedge";
break;
case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */
__raw_writel(1 << bit, base + IC_CFG2CLR);
__raw_writel(1 << bit, base + IC_CFG1SET);
__raw_writel(1 << bit, base + IC_CFG0CLR);
handler = handle_edge_irq;
name = "falledge";
break;
case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */
__raw_writel(1 << bit, base + IC_CFG2CLR);
__raw_writel(1 << bit, base + IC_CFG1SET);
__raw_writel(1 << bit, base + IC_CFG0SET);
handler = handle_edge_irq;
name = "bothedge";
break;
case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */
__raw_writel(1 << bit, base + IC_CFG2SET);
__raw_writel(1 << bit, base + IC_CFG1CLR);
__raw_writel(1 << bit, base + IC_CFG0SET);
handler = handle_level_irq;
name = "hilevel";
break;
case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */
__raw_writel(1 << bit, base + IC_CFG2SET);
__raw_writel(1 << bit, base + IC_CFG1SET);
__raw_writel(1 << bit, base + IC_CFG0CLR);
handler = handle_level_irq;
name = "lowlevel";
break;
case IRQ_TYPE_NONE: /* 0:0:0 */
__raw_writel(1 << bit, base + IC_CFG2CLR);
__raw_writel(1 << bit, base + IC_CFG1CLR);
__raw_writel(1 << bit, base + IC_CFG0CLR);
break;
default:
ret = -EINVAL;
}
irq_set_chip_handler_name_locked(d, chip, handler, name);
wmb();
return ret;
}
/******************************************************************************/
/*
* au1300_gpic_chgcfg - change PIN configuration.
* @gpio: pin to change (0-based GPIO number from datasheet).
* @clr: clear all bits set in 'clr'.
* @set: set these bits.
*
* modifies a pins' configuration register, bits set in @clr will
* be cleared in the register, bits in @set will be set.
*/
static inline void au1300_gpic_chgcfg(unsigned int gpio,
unsigned long clr,
unsigned long set)
{
void __iomem *r = AU1300_GPIC_ADDR;
unsigned long l;
r += gpio * 4; /* offset into pin config array */
l = __raw_readl(r + AU1300_GPIC_PINCFG);
l &= ~clr;
l |= set;
__raw_writel(l, r + AU1300_GPIC_PINCFG);
wmb();
}
/*
* au1300_pinfunc_to_gpio - assign a pin as GPIO input (GPIO ctrl).
* @pin: pin (0-based GPIO number from datasheet).
*
* Assigns a GPIO pin to the GPIO controller, so its level can either
* be read or set through the generic GPIO functions.
* If you need a GPOUT, use au1300_gpio_set_value(pin, 0/1).
* REVISIT: is this function really necessary?
*/
void au1300_pinfunc_to_gpio(enum au1300_multifunc_pins gpio)
{
au1300_gpio_direction_input(gpio + AU1300_GPIO_BASE);
}
EXPORT_SYMBOL_GPL(au1300_pinfunc_to_gpio);
/*
* au1300_pinfunc_to_dev - assign a pin to the device function.
* @pin: pin (0-based GPIO number from datasheet).
*
* Assigns a GPIO pin to its associated device function; the pin will be
* driven by the device and not through GPIO functions.
*/
void au1300_pinfunc_to_dev(enum au1300_multifunc_pins gpio)
{
void __iomem *r = AU1300_GPIC_ADDR;
unsigned long bit;
r += GPIC_GPIO_BANKOFF(gpio);
bit = GPIC_GPIO_TO_BIT(gpio);
__raw_writel(bit, r + AU1300_GPIC_DEVSEL);
wmb();
}
EXPORT_SYMBOL_GPL(au1300_pinfunc_to_dev);
/*
* au1300_set_irq_priority - set internal priority of IRQ.
* @irq: irq to set priority (linux irq number).
* @p: priority (0 = highest, 3 = lowest).
*/
void au1300_set_irq_priority(unsigned int irq, int p)
{
irq -= ALCHEMY_GPIC_INT_BASE;
au1300_gpic_chgcfg(irq, GPIC_CFG_IL_MASK, GPIC_CFG_IL_SET(p));
}
EXPORT_SYMBOL_GPL(au1300_set_irq_priority);
/*
* au1300_set_dbdma_gpio - assign a gpio to one of the DBDMA triggers.
* @dchan: dbdma trigger select (0, 1).
* @gpio: pin to assign as trigger.
*
* DBDMA controller has 2 external trigger sources; this function
* assigns a GPIO to the selected trigger.
*/
void au1300_set_dbdma_gpio(int dchan, unsigned int gpio)
{
unsigned long r;
if ((dchan >= 0) && (dchan <= 1)) {
r = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_DMASEL);
r &= ~(0xff << (8 * dchan));
r |= (gpio & 0x7f) << (8 * dchan);
__raw_writel(r, AU1300_GPIC_ADDR + AU1300_GPIC_DMASEL);
wmb();
}
}
static inline void gpic_pin_set_idlewake(unsigned int gpio, int allow)
{
au1300_gpic_chgcfg(gpio, GPIC_CFG_IDLEWAKE,
allow ? GPIC_CFG_IDLEWAKE : 0);
}
static void au1300_gpic_mask(struct irq_data *d)
{
void __iomem *r = AU1300_GPIC_ADDR;
unsigned long bit, irq = d->irq;
irq -= ALCHEMY_GPIC_INT_BASE;
r += GPIC_GPIO_BANKOFF(irq);
bit = GPIC_GPIO_TO_BIT(irq);
__raw_writel(bit, r + AU1300_GPIC_IDIS);
wmb();
gpic_pin_set_idlewake(irq, 0);
}
static void au1300_gpic_unmask(struct irq_data *d)
{
void __iomem *r = AU1300_GPIC_ADDR;
unsigned long bit, irq = d->irq;
irq -= ALCHEMY_GPIC_INT_BASE;
gpic_pin_set_idlewake(irq, 1);
r += GPIC_GPIO_BANKOFF(irq);
bit = GPIC_GPIO_TO_BIT(irq);
__raw_writel(bit, r + AU1300_GPIC_IEN);
wmb();
}
static void au1300_gpic_maskack(struct irq_data *d)
{
void __iomem *r = AU1300_GPIC_ADDR;
unsigned long bit, irq = d->irq;
irq -= ALCHEMY_GPIC_INT_BASE;
r += GPIC_GPIO_BANKOFF(irq);
bit = GPIC_GPIO_TO_BIT(irq);
__raw_writel(bit, r + AU1300_GPIC_IPEND); /* ack */
__raw_writel(bit, r + AU1300_GPIC_IDIS); /* mask */
wmb();
gpic_pin_set_idlewake(irq, 0);
}
static void au1300_gpic_ack(struct irq_data *d)
{
void __iomem *r = AU1300_GPIC_ADDR;
unsigned long bit, irq = d->irq;
irq -= ALCHEMY_GPIC_INT_BASE;
r += GPIC_GPIO_BANKOFF(irq);
bit = GPIC_GPIO_TO_BIT(irq);
__raw_writel(bit, r + AU1300_GPIC_IPEND); /* ack */
wmb();
}
static struct irq_chip au1300_gpic = {
.name = "GPIOINT",
.irq_ack = au1300_gpic_ack,
.irq_mask = au1300_gpic_mask,
.irq_mask_ack = au1300_gpic_maskack,
.irq_unmask = au1300_gpic_unmask,
.irq_set_type = au1300_gpic_settype,
};
static int au1300_gpic_settype(struct irq_data *d, unsigned int type)
{
unsigned long s;
unsigned char *name = NULL;
irq_flow_handler_t hdl = NULL;
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
s = GPIC_CFG_IC_LEVEL_HIGH;
name = "high";
hdl = handle_level_irq;
break;
case IRQ_TYPE_LEVEL_LOW:
s = GPIC_CFG_IC_LEVEL_LOW;
name = "low";
hdl = handle_level_irq;
break;
case IRQ_TYPE_EDGE_RISING:
s = GPIC_CFG_IC_EDGE_RISE;
name = "posedge";
hdl = handle_edge_irq;
break;
case IRQ_TYPE_EDGE_FALLING:
s = GPIC_CFG_IC_EDGE_FALL;
name = "negedge";
hdl = handle_edge_irq;
break;
case IRQ_TYPE_EDGE_BOTH:
s = GPIC_CFG_IC_EDGE_BOTH;
name = "bothedge";
hdl = handle_edge_irq;
break;
case IRQ_TYPE_NONE:
s = GPIC_CFG_IC_OFF;
name = "disabled";
hdl = handle_level_irq;
break;
default:
return -EINVAL;
}
irq_set_chip_handler_name_locked(d, &au1300_gpic, hdl, name);
au1300_gpic_chgcfg(d->irq - ALCHEMY_GPIC_INT_BASE, GPIC_CFG_IC_MASK, s);
return 0;
}
/******************************************************************************/
static inline void ic_init(void __iomem *base)
{
/* initialize interrupt controller to a safe state */
__raw_writel(0xffffffff, base + IC_CFG0CLR);
__raw_writel(0xffffffff, base + IC_CFG1CLR);
__raw_writel(0xffffffff, base + IC_CFG2CLR);
__raw_writel(0xffffffff, base + IC_MASKCLR);
__raw_writel(0xffffffff, base + IC_ASSIGNCLR);
__raw_writel(0xffffffff, base + IC_WAKECLR);
__raw_writel(0xffffffff, base + IC_SRCSET);
__raw_writel(0xffffffff, base + IC_FALLINGCLR);
__raw_writel(0xffffffff, base + IC_RISINGCLR);
__raw_writel(0x00000000, base + IC_TESTBIT);
wmb();
}
static unsigned long alchemy_gpic_pmdata[ALCHEMY_GPIC_INT_NUM + 6];
static inline void alchemy_ic_suspend_one(void __iomem *base, unsigned long *d)
{
d[0] = __raw_readl(base + IC_CFG0RD);
d[1] = __raw_readl(base + IC_CFG1RD);
d[2] = __raw_readl(base + IC_CFG2RD);
d[3] = __raw_readl(base + IC_SRCRD);
d[4] = __raw_readl(base + IC_ASSIGNRD);
d[5] = __raw_readl(base + IC_WAKERD);
d[6] = __raw_readl(base + IC_MASKRD);
ic_init(base); /* shut it up too while at it */
}
static inline void alchemy_ic_resume_one(void __iomem *base, unsigned long *d)
{
ic_init(base);
__raw_writel(d[0], base + IC_CFG0SET);
__raw_writel(d[1], base + IC_CFG1SET);
__raw_writel(d[2], base + IC_CFG2SET);
__raw_writel(d[3], base + IC_SRCSET);
__raw_writel(d[4], base + IC_ASSIGNSET);
__raw_writel(d[5], base + IC_WAKESET);
wmb();
__raw_writel(d[6], base + IC_MASKSET);
wmb();
}
static int alchemy_ic_suspend(void)
{
alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR),
alchemy_gpic_pmdata);
alchemy_ic_suspend_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR),
&alchemy_gpic_pmdata[7]);
return 0;
}
static void alchemy_ic_resume(void)
{
alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR),
&alchemy_gpic_pmdata[7]);
alchemy_ic_resume_one((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR),
alchemy_gpic_pmdata);
}
static int alchemy_gpic_suspend(void)
{
void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR);
int i;
/* save 4 interrupt mask status registers */
alchemy_gpic_pmdata[0] = __raw_readl(base + AU1300_GPIC_IEN + 0x0);
alchemy_gpic_pmdata[1] = __raw_readl(base + AU1300_GPIC_IEN + 0x4);
alchemy_gpic_pmdata[2] = __raw_readl(base + AU1300_GPIC_IEN + 0x8);
alchemy_gpic_pmdata[3] = __raw_readl(base + AU1300_GPIC_IEN + 0xc);
/* save misc register(s) */
alchemy_gpic_pmdata[4] = __raw_readl(base + AU1300_GPIC_DMASEL);
/* molto silenzioso */
__raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x0);
__raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x4);
__raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x8);
__raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0xc);
wmb();
/* save pin/int-type configuration */
base += AU1300_GPIC_PINCFG;
for (i = 0; i < ALCHEMY_GPIC_INT_NUM; i++)
alchemy_gpic_pmdata[i + 5] = __raw_readl(base + (i << 2));
wmb();
return 0;
}
static void alchemy_gpic_resume(void)
{
void __iomem *base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR);
int i;
/* disable all first */
__raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x0);
__raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x4);
__raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0x8);
__raw_writel(~0UL, base + AU1300_GPIC_IDIS + 0xc);
wmb();
/* restore pin/int-type configurations */
base += AU1300_GPIC_PINCFG;
for (i = 0; i < ALCHEMY_GPIC_INT_NUM; i++)
__raw_writel(alchemy_gpic_pmdata[i + 5], base + (i << 2));
wmb();
/* restore misc register(s) */
base = (void __iomem *)KSEG1ADDR(AU1300_GPIC_PHYS_ADDR);
__raw_writel(alchemy_gpic_pmdata[4], base + AU1300_GPIC_DMASEL);
wmb();
/* finally restore masks */
__raw_writel(alchemy_gpic_pmdata[0], base + AU1300_GPIC_IEN + 0x0);
__raw_writel(alchemy_gpic_pmdata[1], base + AU1300_GPIC_IEN + 0x4);
__raw_writel(alchemy_gpic_pmdata[2], base + AU1300_GPIC_IEN + 0x8);
__raw_writel(alchemy_gpic_pmdata[3], base + AU1300_GPIC_IEN + 0xc);
wmb();
}
static struct syscore_ops alchemy_ic_pmops = {
.suspend = alchemy_ic_suspend,
.resume = alchemy_ic_resume,
};
static struct syscore_ops alchemy_gpic_pmops = {
.suspend = alchemy_gpic_suspend,
.resume = alchemy_gpic_resume,
};
/******************************************************************************/
/* create chained handlers for the 4 IC requests to the MIPS IRQ ctrl */
#define DISP(name, base, addr) \
static void au1000_##name##_dispatch(struct irq_desc *d) \
{ \
unsigned long r = __raw_readl((void __iomem *)KSEG1ADDR(addr)); \
if (likely(r)) \
generic_handle_irq(base + __ffs(r)); \
else \
spurious_interrupt(); \
}
DISP(ic0r0, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ0INT)
DISP(ic0r1, AU1000_INTC0_INT_BASE, AU1000_IC0_PHYS_ADDR + IC_REQ1INT)
DISP(ic1r0, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ0INT)
DISP(ic1r1, AU1000_INTC1_INT_BASE, AU1000_IC1_PHYS_ADDR + IC_REQ1INT)
static void alchemy_gpic_dispatch(struct irq_desc *d)
{
int i = __raw_readl(AU1300_GPIC_ADDR + AU1300_GPIC_PRIENC);
generic_handle_irq(ALCHEMY_GPIC_INT_BASE + i);
}
/******************************************************************************/
static void __init au1000_init_irq(struct alchemy_irqmap *map)
{
unsigned int bit, irq_nr;
void __iomem *base;
ic_init((void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR));
ic_init((void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR));
register_syscore_ops(&alchemy_ic_pmops);
mips_cpu_irq_init();
/* register all 64 possible IC0+IC1 irq sources as type "none".
* Use set_irq_type() to set edge/level behaviour at runtime.
*/
for (irq_nr = AU1000_INTC0_INT_BASE;
(irq_nr < AU1000_INTC0_INT_BASE + 32); irq_nr++)
au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE);
for (irq_nr = AU1000_INTC1_INT_BASE;
(irq_nr < AU1000_INTC1_INT_BASE + 32); irq_nr++)
au1x_ic_settype(irq_get_irq_data(irq_nr), IRQ_TYPE_NONE);
/*
* Initialize IC0, which is fixed per processor.
*/
while (map->irq != -1) {
irq_nr = map->irq;
if (irq_nr >= AU1000_INTC1_INT_BASE) {
bit = irq_nr - AU1000_INTC1_INT_BASE;
base = (void __iomem *)KSEG1ADDR(AU1000_IC1_PHYS_ADDR);
} else {
bit = irq_nr - AU1000_INTC0_INT_BASE;
base = (void __iomem *)KSEG1ADDR(AU1000_IC0_PHYS_ADDR);
}
if (map->prio == 0)
__raw_writel(1 << bit, base + IC_ASSIGNSET);
au1x_ic_settype(irq_get_irq_data(irq_nr), map->type);
++map;
}
irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 2, au1000_ic0r0_dispatch);
irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 3, au1000_ic0r1_dispatch);
irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 4, au1000_ic1r0_dispatch);
irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 5, au1000_ic1r1_dispatch);
}
static void __init alchemy_gpic_init_irq(const struct alchemy_irqmap *dints)
{
int i;
void __iomem *bank_base;
register_syscore_ops(&alchemy_gpic_pmops);
mips_cpu_irq_init();
/* disable & ack all possible interrupt sources */
for (i = 0; i < 4; i++) {
bank_base = AU1300_GPIC_ADDR + (i * 4);
__raw_writel(~0UL, bank_base + AU1300_GPIC_IDIS);
wmb();
__raw_writel(~0UL, bank_base + AU1300_GPIC_IPEND);
wmb();
}
/* register an irq_chip for them, with 2nd highest priority */
for (i = ALCHEMY_GPIC_INT_BASE; i <= ALCHEMY_GPIC_INT_LAST; i++) {
au1300_set_irq_priority(i, 1);
au1300_gpic_settype(irq_get_irq_data(i), IRQ_TYPE_NONE);
}
/* setup known on-chip sources */
while ((i = dints->irq) != -1) {
au1300_gpic_settype(irq_get_irq_data(i), dints->type);
au1300_set_irq_priority(i, dints->prio);
if (dints->internal)
au1300_pinfunc_to_dev(i - ALCHEMY_GPIC_INT_BASE);
dints++;
}
irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 2, alchemy_gpic_dispatch);
irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 3, alchemy_gpic_dispatch);
irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 4, alchemy_gpic_dispatch);
irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 5, alchemy_gpic_dispatch);
}
/******************************************************************************/
void __init arch_init_irq(void)
{
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
au1000_init_irq(au1000_irqmap);
break;
case ALCHEMY_CPU_AU1500:
au1000_init_irq(au1500_irqmap);
break;
case ALCHEMY_CPU_AU1100:
au1000_init_irq(au1100_irqmap);
break;
case ALCHEMY_CPU_AU1550:
au1000_init_irq(au1550_irqmap);
break;
case ALCHEMY_CPU_AU1200:
au1000_init_irq(au1200_irqmap);
break;
case ALCHEMY_CPU_AU1300:
alchemy_gpic_init_irq(au1300_irqmap);
break;
default:
pr_err("unknown Alchemy IRQ core\n");
break;
}
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned long r = (read_c0_status() & read_c0_cause()) >> 8;
do_IRQ(MIPS_CPU_IRQ_BASE + __ffs(r & 0xff));
}
| linux-master | arch/mips/alchemy/common/irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* USB block power/access management abstraction.
*
* Au1000+: The OHCI block control register is at the far end of the OHCI memory
* area. Au1550 has OHCI on different base address. No need to handle
* UDC here.
* Au1200: one register to control access and clocks to O/EHCI, UDC and OTG
* as well as the PHY for EHCI and UDC.
*
*/
#include <linux/clk.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <asm/cpu.h>
#include <asm/mach-au1x00/au1000.h>
/* control register offsets */
#define AU1000_OHCICFG 0x7fffc
#define AU1550_OHCICFG 0x07ffc
#define AU1200_USBCFG 0x04
/* Au1000 USB block config bits */
#define USBHEN_RD (1 << 4) /* OHCI reset-done indicator */
#define USBHEN_CE (1 << 3) /* OHCI block clock enable */
#define USBHEN_E (1 << 2) /* OHCI block enable */
#define USBHEN_C (1 << 1) /* OHCI block coherency bit */
#define USBHEN_BE (1 << 0) /* OHCI Big-Endian */
/* Au1200 USB config bits */
#define USBCFG_PFEN (1 << 31) /* prefetch enable (undoc) */
#define USBCFG_RDCOMB (1 << 30) /* read combining (undoc) */
#define USBCFG_UNKNOWN (5 << 20) /* unknown, leave this way */
#define USBCFG_SSD (1 << 23) /* serial short detect en */
#define USBCFG_PPE (1 << 19) /* HS PHY PLL */
#define USBCFG_UCE (1 << 18) /* UDC clock enable */
#define USBCFG_ECE (1 << 17) /* EHCI clock enable */
#define USBCFG_OCE (1 << 16) /* OHCI clock enable */
#define USBCFG_FLA(x) (((x) & 0x3f) << 8)
#define USBCFG_UCAM (1 << 7) /* coherent access (undoc) */
#define USBCFG_GME (1 << 6) /* OTG mem access */
#define USBCFG_DBE (1 << 5) /* UDC busmaster enable */
#define USBCFG_DME (1 << 4) /* UDC mem enable */
#define USBCFG_EBE (1 << 3) /* EHCI busmaster enable */
#define USBCFG_EME (1 << 2) /* EHCI mem enable */
#define USBCFG_OBE (1 << 1) /* OHCI busmaster enable */
#define USBCFG_OME (1 << 0) /* OHCI mem enable */
#define USBCFG_INIT_AU1200 (USBCFG_PFEN | USBCFG_RDCOMB | USBCFG_UNKNOWN |\
USBCFG_SSD | USBCFG_FLA(0x20) | USBCFG_UCAM | \
USBCFG_GME | USBCFG_DBE | USBCFG_DME | \
USBCFG_EBE | USBCFG_EME | USBCFG_OBE | \
USBCFG_OME)
/* Au1300 USB config registers */
#define USB_DWC_CTRL1 0x00
#define USB_DWC_CTRL2 0x04
#define USB_VBUS_TIMER 0x10
#define USB_SBUS_CTRL 0x14
#define USB_MSR_ERR 0x18
#define USB_DWC_CTRL3 0x1C
#define USB_DWC_CTRL4 0x20
#define USB_OTG_STATUS 0x28
#define USB_DWC_CTRL5 0x2C
#define USB_DWC_CTRL6 0x30
#define USB_DWC_CTRL7 0x34
#define USB_PHY_STATUS 0xC0
#define USB_INT_STATUS 0xC4
#define USB_INT_ENABLE 0xC8
#define USB_DWC_CTRL1_OTGD 0x04 /* set to DISable OTG */
#define USB_DWC_CTRL1_HSTRS 0x02 /* set to ENable EHCI */
#define USB_DWC_CTRL1_DCRS 0x01 /* set to ENable UDC */
#define USB_DWC_CTRL2_PHY1RS 0x04 /* set to enable PHY1 */
#define USB_DWC_CTRL2_PHY0RS 0x02 /* set to enable PHY0 */
#define USB_DWC_CTRL2_PHYRS 0x01 /* set to enable PHY */
#define USB_DWC_CTRL3_OHCI1_CKEN (1 << 19)
#define USB_DWC_CTRL3_OHCI0_CKEN (1 << 18)
#define USB_DWC_CTRL3_EHCI0_CKEN (1 << 17)
#define USB_DWC_CTRL3_OTG0_CKEN (1 << 16)
#define USB_SBUS_CTRL_SBCA 0x04 /* coherent access */
#define USB_INTEN_FORCE 0x20
#define USB_INTEN_PHY 0x10
#define USB_INTEN_UDC 0x08
#define USB_INTEN_EHCI 0x04
#define USB_INTEN_OHCI1 0x02
#define USB_INTEN_OHCI0 0x01
static DEFINE_SPINLOCK(alchemy_usb_lock);
static inline void __au1300_usb_phyctl(void __iomem *base, int enable)
{
unsigned long r, s;
r = __raw_readl(base + USB_DWC_CTRL2);
s = __raw_readl(base + USB_DWC_CTRL3);
s &= USB_DWC_CTRL3_OHCI1_CKEN | USB_DWC_CTRL3_OHCI0_CKEN |
USB_DWC_CTRL3_EHCI0_CKEN | USB_DWC_CTRL3_OTG0_CKEN;
if (enable) {
/* simply enable all PHYs */
r |= USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS |
USB_DWC_CTRL2_PHYRS;
__raw_writel(r, base + USB_DWC_CTRL2);
wmb();
} else if (!s) {
/* no USB block active, do disable all PHYs */
r &= ~(USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS |
USB_DWC_CTRL2_PHYRS);
__raw_writel(r, base + USB_DWC_CTRL2);
wmb();
}
}
static inline void __au1300_ohci_control(void __iomem *base, int enable, int id)
{
unsigned long r;
if (enable) {
__raw_writel(1, base + USB_DWC_CTRL7); /* start OHCI clock */
wmb();
r = __raw_readl(base + USB_DWC_CTRL3); /* enable OHCI block */
r |= (id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN
: USB_DWC_CTRL3_OHCI1_CKEN;
__raw_writel(r, base + USB_DWC_CTRL3);
wmb();
__au1300_usb_phyctl(base, enable); /* power up the PHYs */
r = __raw_readl(base + USB_INT_ENABLE);
r |= (id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1;
__raw_writel(r, base + USB_INT_ENABLE);
wmb();
/* reset the OHCI start clock bit */
__raw_writel(0, base + USB_DWC_CTRL7);
wmb();
} else {
r = __raw_readl(base + USB_INT_ENABLE);
r &= ~((id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1);
__raw_writel(r, base + USB_INT_ENABLE);
wmb();
r = __raw_readl(base + USB_DWC_CTRL3);
r &= ~((id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN
: USB_DWC_CTRL3_OHCI1_CKEN);
__raw_writel(r, base + USB_DWC_CTRL3);
wmb();
__au1300_usb_phyctl(base, enable);
}
}
static inline void __au1300_ehci_control(void __iomem *base, int enable)
{
unsigned long r;
if (enable) {
r = __raw_readl(base + USB_DWC_CTRL3);
r |= USB_DWC_CTRL3_EHCI0_CKEN;
__raw_writel(r, base + USB_DWC_CTRL3);
wmb();
r = __raw_readl(base + USB_DWC_CTRL1);
r |= USB_DWC_CTRL1_HSTRS;
__raw_writel(r, base + USB_DWC_CTRL1);
wmb();
__au1300_usb_phyctl(base, enable);
r = __raw_readl(base + USB_INT_ENABLE);
r |= USB_INTEN_EHCI;
__raw_writel(r, base + USB_INT_ENABLE);
wmb();
} else {
r = __raw_readl(base + USB_INT_ENABLE);
r &= ~USB_INTEN_EHCI;
__raw_writel(r, base + USB_INT_ENABLE);
wmb();
r = __raw_readl(base + USB_DWC_CTRL1);
r &= ~USB_DWC_CTRL1_HSTRS;
__raw_writel(r, base + USB_DWC_CTRL1);
wmb();
r = __raw_readl(base + USB_DWC_CTRL3);
r &= ~USB_DWC_CTRL3_EHCI0_CKEN;
__raw_writel(r, base + USB_DWC_CTRL3);
wmb();
__au1300_usb_phyctl(base, enable);
}
}
static inline void __au1300_udc_control(void __iomem *base, int enable)
{
unsigned long r;
if (enable) {
r = __raw_readl(base + USB_DWC_CTRL1);
r |= USB_DWC_CTRL1_DCRS;
__raw_writel(r, base + USB_DWC_CTRL1);
wmb();
__au1300_usb_phyctl(base, enable);
r = __raw_readl(base + USB_INT_ENABLE);
r |= USB_INTEN_UDC;
__raw_writel(r, base + USB_INT_ENABLE);
wmb();
} else {
r = __raw_readl(base + USB_INT_ENABLE);
r &= ~USB_INTEN_UDC;
__raw_writel(r, base + USB_INT_ENABLE);
wmb();
r = __raw_readl(base + USB_DWC_CTRL1);
r &= ~USB_DWC_CTRL1_DCRS;
__raw_writel(r, base + USB_DWC_CTRL1);
wmb();
__au1300_usb_phyctl(base, enable);
}
}
static inline void __au1300_otg_control(void __iomem *base, int enable)
{
unsigned long r;
if (enable) {
r = __raw_readl(base + USB_DWC_CTRL3);
r |= USB_DWC_CTRL3_OTG0_CKEN;
__raw_writel(r, base + USB_DWC_CTRL3);
wmb();
r = __raw_readl(base + USB_DWC_CTRL1);
r &= ~USB_DWC_CTRL1_OTGD;
__raw_writel(r, base + USB_DWC_CTRL1);
wmb();
__au1300_usb_phyctl(base, enable);
} else {
r = __raw_readl(base + USB_DWC_CTRL1);
r |= USB_DWC_CTRL1_OTGD;
__raw_writel(r, base + USB_DWC_CTRL1);
wmb();
r = __raw_readl(base + USB_DWC_CTRL3);
r &= ~USB_DWC_CTRL3_OTG0_CKEN;
__raw_writel(r, base + USB_DWC_CTRL3);
wmb();
__au1300_usb_phyctl(base, enable);
}
}
static inline int au1300_usb_control(int block, int enable)
{
void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
int ret = 0;
switch (block) {
case ALCHEMY_USB_OHCI0:
__au1300_ohci_control(base, enable, 0);
break;
case ALCHEMY_USB_OHCI1:
__au1300_ohci_control(base, enable, 1);
break;
case ALCHEMY_USB_EHCI0:
__au1300_ehci_control(base, enable);
break;
case ALCHEMY_USB_UDC0:
__au1300_udc_control(base, enable);
break;
case ALCHEMY_USB_OTG0:
__au1300_otg_control(base, enable);
break;
default:
ret = -ENODEV;
}
return ret;
}
static inline void au1300_usb_init(void)
{
void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
/* set some sane defaults. Note: we don't fiddle with DWC_CTRL4
* here at all: Port 2 routing (EHCI or UDC) must be set either
* by boot firmware or platform init code; I can't autodetect
* a sane setting.
*/
__raw_writel(0, base + USB_INT_ENABLE); /* disable all USB irqs */
wmb();
__raw_writel(0, base + USB_DWC_CTRL3); /* disable all clocks */
wmb();
__raw_writel(~0, base + USB_MSR_ERR); /* clear all errors */
wmb();
__raw_writel(~0, base + USB_INT_STATUS); /* clear int status */
wmb();
/* set coherent access bit */
__raw_writel(USB_SBUS_CTRL_SBCA, base + USB_SBUS_CTRL);
wmb();
}
static inline void __au1200_ohci_control(void __iomem *base, int enable)
{
unsigned long r = __raw_readl(base + AU1200_USBCFG);
if (enable) {
__raw_writel(r | USBCFG_OCE, base + AU1200_USBCFG);
wmb();
udelay(2000);
} else {
__raw_writel(r & ~USBCFG_OCE, base + AU1200_USBCFG);
wmb();
udelay(1000);
}
}
static inline void __au1200_ehci_control(void __iomem *base, int enable)
{
unsigned long r = __raw_readl(base + AU1200_USBCFG);
if (enable) {
__raw_writel(r | USBCFG_ECE | USBCFG_PPE, base + AU1200_USBCFG);
wmb();
udelay(1000);
} else {
if (!(r & USBCFG_UCE)) /* UDC also off? */
r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */
__raw_writel(r & ~USBCFG_ECE, base + AU1200_USBCFG);
wmb();
udelay(1000);
}
}
static inline void __au1200_udc_control(void __iomem *base, int enable)
{
unsigned long r = __raw_readl(base + AU1200_USBCFG);
if (enable) {
__raw_writel(r | USBCFG_UCE | USBCFG_PPE, base + AU1200_USBCFG);
wmb();
} else {
if (!(r & USBCFG_ECE)) /* EHCI also off? */
r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */
__raw_writel(r & ~USBCFG_UCE, base + AU1200_USBCFG);
wmb();
}
}
static inline int au1200_usb_control(int block, int enable)
{
void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR);
switch (block) {
case ALCHEMY_USB_OHCI0:
__au1200_ohci_control(base, enable);
break;
case ALCHEMY_USB_UDC0:
__au1200_udc_control(base, enable);
break;
case ALCHEMY_USB_EHCI0:
__au1200_ehci_control(base, enable);
break;
default:
return -ENODEV;
}
return 0;
}
/* initialize USB block(s) to a known working state */
static inline void au1200_usb_init(void)
{
void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR);
__raw_writel(USBCFG_INIT_AU1200, base + AU1200_USBCFG);
wmb();
udelay(1000);
}
static inline int au1000_usb_init(unsigned long rb, int reg)
{
void __iomem *base = (void __iomem *)KSEG1ADDR(rb + reg);
unsigned long r = __raw_readl(base);
struct clk *c;
/* 48MHz check. Don't init if no one can provide it */
c = clk_get(NULL, "usbh_clk");
if (IS_ERR(c))
return -ENODEV;
if (clk_round_rate(c, 48000000) != 48000000) {
clk_put(c);
return -ENODEV;
}
if (clk_set_rate(c, 48000000)) {
clk_put(c);
return -ENODEV;
}
clk_put(c);
#if defined(__BIG_ENDIAN)
r |= USBHEN_BE;
#endif
r |= USBHEN_C;
__raw_writel(r, base);
wmb();
udelay(1000);
return 0;
}
static inline void __au1xx0_ohci_control(int enable, unsigned long rb, int creg)
{
void __iomem *base = (void __iomem *)KSEG1ADDR(rb);
unsigned long r = __raw_readl(base + creg);
struct clk *c = clk_get(NULL, "usbh_clk");
if (IS_ERR(c))
return;
if (enable) {
if (clk_prepare_enable(c))
goto out;
__raw_writel(r | USBHEN_CE, base + creg);
wmb();
udelay(1000);
__raw_writel(r | USBHEN_CE | USBHEN_E, base + creg);
wmb();
udelay(1000);
/* wait for reset complete (read reg twice: au1500 erratum) */
while (__raw_readl(base + creg),
!(__raw_readl(base + creg) & USBHEN_RD))
udelay(1000);
} else {
__raw_writel(r & ~(USBHEN_CE | USBHEN_E), base + creg);
wmb();
clk_disable_unprepare(c);
}
out:
clk_put(c);
}
static inline int au1000_usb_control(int block, int enable, unsigned long rb,
int creg)
{
int ret = 0;
switch (block) {
case ALCHEMY_USB_OHCI0:
__au1xx0_ohci_control(enable, rb, creg);
break;
default:
ret = -ENODEV;
}
return ret;
}
/*
* alchemy_usb_control - control Alchemy on-chip USB blocks
* @block: USB block to target
* @enable: set 1 to enable a block, 0 to disable
*/
int alchemy_usb_control(int block, int enable)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&alchemy_usb_lock, flags);
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1100:
ret = au1000_usb_control(block, enable,
AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG);
break;
case ALCHEMY_CPU_AU1550:
ret = au1000_usb_control(block, enable,
AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG);
break;
case ALCHEMY_CPU_AU1200:
ret = au1200_usb_control(block, enable);
break;
case ALCHEMY_CPU_AU1300:
ret = au1300_usb_control(block, enable);
break;
default:
ret = -ENODEV;
}
spin_unlock_irqrestore(&alchemy_usb_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(alchemy_usb_control);
static unsigned long alchemy_usb_pmdata[2];
static void au1000_usb_pm(unsigned long br, int creg, int susp)
{
void __iomem *base = (void __iomem *)KSEG1ADDR(br);
if (susp) {
alchemy_usb_pmdata[0] = __raw_readl(base + creg);
/* There appears to be some undocumented reset register.... */
__raw_writel(0, base + 0x04);
wmb();
__raw_writel(0, base + creg);
wmb();
} else {
__raw_writel(alchemy_usb_pmdata[0], base + creg);
wmb();
}
}
static void au1200_usb_pm(int susp)
{
void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1200_USB_OTG_PHYS_ADDR);
if (susp) {
/* save OTG_CAP/MUX registers which indicate port routing */
/* FIXME: write an OTG driver to do that */
alchemy_usb_pmdata[0] = __raw_readl(base + 0x00);
alchemy_usb_pmdata[1] = __raw_readl(base + 0x04);
} else {
/* restore access to all MMIO areas */
au1200_usb_init();
/* restore OTG_CAP/MUX registers */
__raw_writel(alchemy_usb_pmdata[0], base + 0x00);
__raw_writel(alchemy_usb_pmdata[1], base + 0x04);
wmb();
}
}
static void au1300_usb_pm(int susp)
{
void __iomem *base =
(void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
/* remember Port2 routing */
if (susp) {
alchemy_usb_pmdata[0] = __raw_readl(base + USB_DWC_CTRL4);
} else {
au1300_usb_init();
__raw_writel(alchemy_usb_pmdata[0], base + USB_DWC_CTRL4);
wmb();
}
}
static void alchemy_usb_pm(int susp)
{
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1100:
au1000_usb_pm(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG, susp);
break;
case ALCHEMY_CPU_AU1550:
au1000_usb_pm(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG, susp);
break;
case ALCHEMY_CPU_AU1200:
au1200_usb_pm(susp);
break;
case ALCHEMY_CPU_AU1300:
au1300_usb_pm(susp);
break;
}
}
static int alchemy_usb_suspend(void)
{
alchemy_usb_pm(1);
return 0;
}
static void alchemy_usb_resume(void)
{
alchemy_usb_pm(0);
}
static struct syscore_ops alchemy_usb_pm_ops = {
.suspend = alchemy_usb_suspend,
.resume = alchemy_usb_resume,
};
static int __init alchemy_usb_init(void)
{
int ret = 0;
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1100:
ret = au1000_usb_init(AU1000_USB_OHCI_PHYS_ADDR,
AU1000_OHCICFG);
break;
case ALCHEMY_CPU_AU1550:
ret = au1000_usb_init(AU1550_USB_OHCI_PHYS_ADDR,
AU1550_OHCICFG);
break;
case ALCHEMY_CPU_AU1200:
au1200_usb_init();
break;
case ALCHEMY_CPU_AU1300:
au1300_usb_init();
break;
}
if (!ret)
register_syscore_ops(&alchemy_usb_pm_ops);
return ret;
}
arch_initcall(alchemy_usb_init);
| linux-master | arch/mips/alchemy/common/usb.c |
/*
* Copyright (C) 2007-2009, OpenWrt.org, Florian Fainelli <[email protected]>
* GPIOLIB support for Alchemy chips.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Notes :
* au1000 SoC have only one GPIO block : GPIO1
* Au1100, Au15x0, Au12x0 have a second one : GPIO2
* Au1300 is totally different: 1 block with up to 128 GPIOs
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/gpio/driver.h>
#include <asm/mach-au1x00/gpio-au1000.h>
#include <asm/mach-au1x00/gpio-au1300.h>
static int gpio2_get(struct gpio_chip *chip, unsigned offset)
{
return !!alchemy_gpio2_get_value(offset + ALCHEMY_GPIO2_BASE);
}
static void gpio2_set(struct gpio_chip *chip, unsigned offset, int value)
{
alchemy_gpio2_set_value(offset + ALCHEMY_GPIO2_BASE, value);
}
static int gpio2_direction_input(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio2_direction_input(offset + ALCHEMY_GPIO2_BASE);
}
static int gpio2_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
return alchemy_gpio2_direction_output(offset + ALCHEMY_GPIO2_BASE,
value);
}
static int gpio2_to_irq(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio2_to_irq(offset + ALCHEMY_GPIO2_BASE);
}
static int gpio1_get(struct gpio_chip *chip, unsigned offset)
{
return !!alchemy_gpio1_get_value(offset + ALCHEMY_GPIO1_BASE);
}
static void gpio1_set(struct gpio_chip *chip,
unsigned offset, int value)
{
alchemy_gpio1_set_value(offset + ALCHEMY_GPIO1_BASE, value);
}
static int gpio1_direction_input(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio1_direction_input(offset + ALCHEMY_GPIO1_BASE);
}
static int gpio1_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
return alchemy_gpio1_direction_output(offset + ALCHEMY_GPIO1_BASE,
value);
}
static int gpio1_to_irq(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio1_to_irq(offset + ALCHEMY_GPIO1_BASE);
}
struct gpio_chip alchemy_gpio_chip[] = {
[0] = {
.label = "alchemy-gpio1",
.direction_input = gpio1_direction_input,
.direction_output = gpio1_direction_output,
.get = gpio1_get,
.set = gpio1_set,
.to_irq = gpio1_to_irq,
.base = ALCHEMY_GPIO1_BASE,
.ngpio = ALCHEMY_GPIO1_NUM,
},
[1] = {
.label = "alchemy-gpio2",
.direction_input = gpio2_direction_input,
.direction_output = gpio2_direction_output,
.get = gpio2_get,
.set = gpio2_set,
.to_irq = gpio2_to_irq,
.base = ALCHEMY_GPIO2_BASE,
.ngpio = ALCHEMY_GPIO2_NUM,
},
};
static int alchemy_gpic_get(struct gpio_chip *chip, unsigned int off)
{
return !!au1300_gpio_get_value(off + AU1300_GPIO_BASE);
}
static void alchemy_gpic_set(struct gpio_chip *chip, unsigned int off, int v)
{
au1300_gpio_set_value(off + AU1300_GPIO_BASE, v);
}
static int alchemy_gpic_dir_input(struct gpio_chip *chip, unsigned int off)
{
return au1300_gpio_direction_input(off + AU1300_GPIO_BASE);
}
static int alchemy_gpic_dir_output(struct gpio_chip *chip, unsigned int off,
int v)
{
return au1300_gpio_direction_output(off + AU1300_GPIO_BASE, v);
}
static int alchemy_gpic_gpio_to_irq(struct gpio_chip *chip, unsigned int off)
{
return au1300_gpio_to_irq(off + AU1300_GPIO_BASE);
}
static struct gpio_chip au1300_gpiochip = {
.label = "alchemy-gpic",
.direction_input = alchemy_gpic_dir_input,
.direction_output = alchemy_gpic_dir_output,
.get = alchemy_gpic_get,
.set = alchemy_gpic_set,
.to_irq = alchemy_gpic_gpio_to_irq,
.base = AU1300_GPIO_BASE,
.ngpio = AU1300_GPIO_NUM,
};
static int __init alchemy_gpiochip_init(void)
{
int ret = 0;
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
ret = gpiochip_add_data(&alchemy_gpio_chip[0], NULL);
break;
case ALCHEMY_CPU_AU1500...ALCHEMY_CPU_AU1200:
ret = gpiochip_add_data(&alchemy_gpio_chip[0], NULL);
ret |= gpiochip_add_data(&alchemy_gpio_chip[1], NULL);
break;
case ALCHEMY_CPU_AU1300:
ret = gpiochip_add_data(&au1300_gpiochip, NULL);
break;
}
return ret;
}
arch_initcall(alchemy_gpiochip_init);
| linux-master | arch/mips/alchemy/common/gpiolib.c |
/*
*
* BRIEF MODULE DESCRIPTION
* The Descriptor Based DMA channel manager that first appeared
* on the Au1550. I started with dma.c, but I think all that is
* left is this initial comment :-)
*
* Copyright 2004 Embedded Edge, LLC
* [email protected]
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/syscore_ops.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
/*
* The Descriptor Based DMA supports up to 16 channels.
*
* There are 32 devices defined. We keep an internal structure
* of devices using these channels, along with additional
* information.
*
* We allocate the descriptors and allow access to them through various
* functions. The drivers allocate the data buffers and assign them
* to the descriptors.
*/
static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
/* I couldn't find a macro that did this... */
#define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1))
static dbdma_global_t *dbdma_gptr =
(dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
static int dbdma_initialized;
static dbdev_tab_t *dbdev_tab;
static dbdev_tab_t au1550_dbdev_tab[] __initdata = {
/* UARTS */
{ AU1550_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
{ AU1550_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
{ AU1550_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
{ AU1550_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x11400000, 0, 0 },
/* EXT DMA */
{ AU1550_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
{ AU1550_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
{ AU1550_DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 },
{ AU1550_DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 },
/* USB DEV */
{ AU1550_DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN, 4, 8, 0x10200000, 0, 0 },
{ AU1550_DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
{ AU1550_DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
{ AU1550_DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
{ AU1550_DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN, 4, 8, 0x10200010, 0, 0 },
{ AU1550_DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN, 4, 8, 0x10200014, 0, 0 },
/* PSCs */
{ AU1550_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
{ AU1550_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 },
{ AU1550_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
{ AU1550_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 },
{ AU1550_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
{ AU1550_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 0, 0x10a0001c, 0, 0 },
{ AU1550_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
{ AU1550_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 0, 0x10b0001c, 0, 0 },
{ AU1550_DSCR_CMD0_PCI_WRITE, 0, 0, 0, 0x00000000, 0, 0 }, /* PCI */
{ AU1550_DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */
/* MAC 0 */
{ AU1550_DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
{ AU1550_DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
/* MAC 1 */
{ AU1550_DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
{ AU1550_DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
{ DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
};
static dbdev_tab_t au1200_dbdev_tab[] __initdata = {
{ AU1200_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
{ AU1200_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
{ AU1200_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 },
{ AU1200_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x11200000, 0, 0 },
{ AU1200_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
{ AU1200_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
{ AU1200_DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
{ AU1200_DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
{ AU1200_DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
{ AU1200_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
{ AU1200_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
{ AU1200_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 },
{ AU1200_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 },
{ AU1200_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 4, 8, 0x10680004, 0, 0 },
{ AU1200_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
{ AU1200_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
{ AU1200_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 },
{ AU1200_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x11a0001c, 0, 0 },
{ AU1200_DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
{ AU1200_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 },
{ AU1200_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x11b0001c, 0, 0 },
{ AU1200_DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
{ AU1200_DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 },
{ AU1200_DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 },
{ AU1200_DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 },
{ AU1200_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
{ AU1200_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
{ DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
};
static dbdev_tab_t au1300_dbdev_tab[] __initdata = {
{ AU1300_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x10100004, 0, 0 },
{ AU1300_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x10100000, 0, 0 },
{ AU1300_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x10101004, 0, 0 },
{ AU1300_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x10101000, 0, 0 },
{ AU1300_DSCR_CMD0_UART2_TX, DEV_FLAGS_OUT, 0, 8, 0x10102004, 0, 0 },
{ AU1300_DSCR_CMD0_UART2_RX, DEV_FLAGS_IN, 0, 8, 0x10102000, 0, 0 },
{ AU1300_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x10103004, 0, 0 },
{ AU1300_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x10103000, 0, 0 },
{ AU1300_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
{ AU1300_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 },
{ AU1300_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 8, 8, 0x10601000, 0, 0 },
{ AU1300_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 8, 8, 0x10601004, 0, 0 },
{ AU1300_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
{ AU1300_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
{ AU1300_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0001c, 0, 0 },
{ AU1300_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x10a0001c, 0, 0 },
{ AU1300_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0101c, 0, 0 },
{ AU1300_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x10a0101c, 0, 0 },
{ AU1300_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0201c, 0, 0 },
{ AU1300_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 16, 0x10a0201c, 0, 0 },
{ AU1300_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 16, 0x10a0301c, 0, 0 },
{ AU1300_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 16, 0x10a0301c, 0, 0 },
{ AU1300_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
{ AU1300_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
{ AU1300_DSCR_CMD0_SDMS_TX2, DEV_FLAGS_OUT, 4, 8, 0x10602000, 0, 0 },
{ AU1300_DSCR_CMD0_SDMS_RX2, DEV_FLAGS_IN, 4, 8, 0x10602004, 0, 0 },
{ AU1300_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
{ AU1300_DSCR_CMD0_UDMA, DEV_FLAGS_ANYUSE, 0, 32, 0x14001810, 0, 0 },
{ AU1300_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
{ AU1300_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
{ DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
};
/* 32 predefined plus 32 custom */
#define DBDEV_TAB_SIZE 64
static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
static dbdev_tab_t *find_dbdev_id(u32 id)
{
int i;
dbdev_tab_t *p;
for (i = 0; i < DBDEV_TAB_SIZE; ++i) {
p = &dbdev_tab[i];
if (p->dev_id == id)
return p;
}
return NULL;
}
void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
{
return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
}
EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);
u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
{
u32 ret = 0;
dbdev_tab_t *p;
static u16 new_id = 0x1000;
p = find_dbdev_id(~0);
if (NULL != p) {
memcpy(p, dev, sizeof(dbdev_tab_t));
p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
ret = p->dev_id;
new_id++;
#if 0
printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
p->dev_id, p->dev_flags, p->dev_physaddr);
#endif
}
return ret;
}
EXPORT_SYMBOL(au1xxx_ddma_add_device);
void au1xxx_ddma_del_device(u32 devid)
{
dbdev_tab_t *p = find_dbdev_id(devid);
if (p != NULL) {
memset(p, 0, sizeof(dbdev_tab_t));
p->dev_id = ~0;
}
}
EXPORT_SYMBOL(au1xxx_ddma_del_device);
/* Allocate a channel and return a non-zero descriptor if successful. */
u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
void (*callback)(int, void *), void *callparam)
{
unsigned long flags;
u32 used, chan;
u32 dcp;
int i;
dbdev_tab_t *stp, *dtp;
chan_tab_t *ctp;
au1x_dma_chan_t *cp;
/*
* We do the initialization on the first channel allocation.
* We have to wait because of the interrupt handler initialization
* which can't be done successfully during board set up.
*/
if (!dbdma_initialized)
return 0;
stp = find_dbdev_id(srcid);
if (stp == NULL)
return 0;
dtp = find_dbdev_id(destid);
if (dtp == NULL)
return 0;
used = 0;
/* Check to see if we can get both channels. */
spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
(stp->dev_flags & DEV_FLAGS_ANYUSE)) {
/* Got source */
stp->dev_flags |= DEV_FLAGS_INUSE;
if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
(dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
/* Got destination */
dtp->dev_flags |= DEV_FLAGS_INUSE;
} else {
/* Can't get dest. Release src. */
stp->dev_flags &= ~DEV_FLAGS_INUSE;
used++;
}
} else
used++;
spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
if (used)
return 0;
/* Let's see if we can allocate a channel for it. */
ctp = NULL;
chan = 0;
spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
for (i = 0; i < NUM_DBDMA_CHANS; i++)
if (chan_tab_ptr[i] == NULL) {
/*
* If kmalloc fails, it is caught below same
* as a channel not available.
*/
ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
chan_tab_ptr[i] = ctp;
break;
}
spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
if (ctp != NULL) {
memset(ctp, 0, sizeof(chan_tab_t));
ctp->chan_index = chan = i;
dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
dcp += (0x0100 * chan);
ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
cp = (au1x_dma_chan_t *)dcp;
ctp->chan_src = stp;
ctp->chan_dest = dtp;
ctp->chan_callback = callback;
ctp->chan_callparam = callparam;
/* Initialize channel configuration. */
i = 0;
if (stp->dev_intlevel)
i |= DDMA_CFG_SED;
if (stp->dev_intpolarity)
i |= DDMA_CFG_SP;
if (dtp->dev_intlevel)
i |= DDMA_CFG_DED;
if (dtp->dev_intpolarity)
i |= DDMA_CFG_DP;
if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
(dtp->dev_flags & DEV_FLAGS_SYNC))
i |= DDMA_CFG_SYNC;
cp->ddma_cfg = i;
wmb(); /* drain writebuffer */
/*
* Return a non-zero value that can be used to find the channel
* information in subsequent operations.
*/
return (u32)(&chan_tab_ptr[chan]);
}
/* Release devices */
stp->dev_flags &= ~DEV_FLAGS_INUSE;
dtp->dev_flags &= ~DEV_FLAGS_INUSE;
return 0;
}
EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
/*
* Set the device width if source or destination is a FIFO.
* Should be 8, 16, or 32 bits.
*/
u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
{
u32 rv;
chan_tab_t *ctp;
dbdev_tab_t *stp, *dtp;
ctp = *((chan_tab_t **)chanid);
stp = ctp->chan_src;
dtp = ctp->chan_dest;
rv = 0;
if (stp->dev_flags & DEV_FLAGS_IN) { /* Source in fifo */
rv = stp->dev_devwidth;
stp->dev_devwidth = bits;
}
if (dtp->dev_flags & DEV_FLAGS_OUT) { /* Destination out fifo */
rv = dtp->dev_devwidth;
dtp->dev_devwidth = bits;
}
return rv;
}
EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
/* Allocate a descriptor ring, initializing as much as possible. */
u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
{
int i;
u32 desc_base, srcid, destid;
u32 cmd0, cmd1, src1, dest1;
u32 src0, dest0;
chan_tab_t *ctp;
dbdev_tab_t *stp, *dtp;
au1x_ddma_desc_t *dp;
/*
* I guess we could check this to be within the
* range of the table......
*/
ctp = *((chan_tab_t **)chanid);
stp = ctp->chan_src;
dtp = ctp->chan_dest;
/*
* The descriptors must be 32-byte aligned. There is a
* possibility the allocation will give us such an address,
* and if we try that first we are likely to not waste larger
* slabs of memory.
*/
desc_base = (u32)kmalloc_array(entries, sizeof(au1x_ddma_desc_t),
GFP_KERNEL|GFP_DMA);
if (desc_base == 0)
return 0;
if (desc_base & 0x1f) {
/*
* Lost....do it again, allocate extra, and round
* the address base.
*/
kfree((const void *)desc_base);
i = entries * sizeof(au1x_ddma_desc_t);
i += (sizeof(au1x_ddma_desc_t) - 1);
desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
if (desc_base == 0)
return 0;
ctp->cdb_membase = desc_base;
desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
} else
ctp->cdb_membase = desc_base;
dp = (au1x_ddma_desc_t *)desc_base;
/* Keep track of the base descriptor. */
ctp->chan_desc_base = dp;
/* Initialize the rings with as much information as we know. */
srcid = stp->dev_id;
destid = dtp->dev_id;
cmd0 = cmd1 = src1 = dest1 = 0;
src0 = dest0 = 0;
cmd0 |= DSCR_CMD0_SID(srcid);
cmd0 |= DSCR_CMD0_DID(destid);
cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);
/* Is it mem to mem transfer? */
if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
(DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
(DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
cmd0 |= DSCR_CMD0_MEM;
switch (stp->dev_devwidth) {
case 8:
cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
break;
case 16:
cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
break;
case 32:
default:
cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
break;
}
switch (dtp->dev_devwidth) {
case 8:
cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
break;
case 16:
cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
break;
case 32:
default:
cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
break;
}
/*
* If the device is marked as an in/out FIFO, ensure it is
* set non-coherent.
*/
if (stp->dev_flags & DEV_FLAGS_IN)
cmd0 |= DSCR_CMD0_SN; /* Source in FIFO */
if (dtp->dev_flags & DEV_FLAGS_OUT)
cmd0 |= DSCR_CMD0_DN; /* Destination out FIFO */
/*
* Set up source1. For now, assume no stride and increment.
* A channel attribute update can change this later.
*/
switch (stp->dev_tsize) {
case 1:
src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
break;
case 2:
src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
break;
case 4:
src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
break;
case 8:
default:
src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
break;
}
/* If source input is FIFO, set static address. */
if (stp->dev_flags & DEV_FLAGS_IN) {
if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
else
src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
}
if (stp->dev_physaddr)
src0 = stp->dev_physaddr;
/*
* Set up dest1. For now, assume no stride and increment.
* A channel attribute update can change this later.
*/
switch (dtp->dev_tsize) {
case 1:
dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
break;
case 2:
dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
break;
case 4:
dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
break;
case 8:
default:
dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
break;
}
/* If destination output is FIFO, set static address. */
if (dtp->dev_flags & DEV_FLAGS_OUT) {
if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
else
dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
}
if (dtp->dev_physaddr)
dest0 = dtp->dev_physaddr;
#if 0
printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
"source1:%x dest0:%x dest1:%x\n",
dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
src1, dest0, dest1);
#endif
for (i = 0; i < entries; i++) {
dp->dscr_cmd0 = cmd0;
dp->dscr_cmd1 = cmd1;
dp->dscr_source0 = src0;
dp->dscr_source1 = src1;
dp->dscr_dest0 = dest0;
dp->dscr_dest1 = dest1;
dp->dscr_stat = 0;
dp->sw_context = 0;
dp->sw_status = 0;
dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
dp++;
}
/* Make last descriptor point to the first. */
dp--;
dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
return (u32)ctp->chan_desc_base;
}
EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
/*
* Put a source buffer into the DMA ring.
* This updates the source pointer and byte count. Normally used
* for memory to fifo transfers.
*/
u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
/*
* I guess we could check this to be within the
* range of the table......
*/
ctp = *(chan_tab_t **)chanid;
/*
* We should have multiple callers for a particular channel,
* an interrupt doesn't affect this pointer nor the descriptor,
* so no locking should be needed.
*/
dp = ctp->put_ptr;
/*
* If the descriptor is valid, we are way ahead of the DMA
* engine, so just return an error condition.
*/
if (dp->dscr_cmd0 & DSCR_CMD0_V)
return 0;
/* Load up buffer address and byte count. */
dp->dscr_source0 = buf & ~0UL;
dp->dscr_cmd1 = nbytes;
/* Check flags */
if (flags & DDMA_FLAGS_IE)
dp->dscr_cmd0 |= DSCR_CMD0_IE;
if (flags & DDMA_FLAGS_NOIE)
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
/*
* There is an erratum on certain Au1200/Au1550 revisions that could
* result in "stale" data being DMA'ed. It has to do with the snoop
* logic on the cache eviction buffer. dma_default_coherent is set
* to false on these parts.
*/
if (!dma_default_coherent)
dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
wmb(); /* drain writebuffer */
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
ctp->chan_ptr->ddma_dbell = 0;
wmb(); /* force doorbell write out to dma engine */
/* Get next descriptor pointer. */
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Return something non-zero. */
return nbytes;
}
EXPORT_SYMBOL(au1xxx_dbdma_put_source);
/* Put a destination buffer into the DMA ring.
* This updates the destination pointer and byte count. Normally used
* to place an empty buffer into the ring for fifo to memory transfers.
*/
u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
/* I guess we could check this to be within the
* range of the table......
*/
ctp = *((chan_tab_t **)chanid);
/* We should have multiple callers for a particular channel,
* an interrupt doesn't affect this pointer nor the descriptor,
* so no locking should be needed.
*/
dp = ctp->put_ptr;
/* If the descriptor is valid, we are way ahead of the DMA
* engine, so just return an error condition.
*/
if (dp->dscr_cmd0 & DSCR_CMD0_V)
return 0;
/* Load up buffer address and byte count */
/* Check flags */
if (flags & DDMA_FLAGS_IE)
dp->dscr_cmd0 |= DSCR_CMD0_IE;
if (flags & DDMA_FLAGS_NOIE)
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
dp->dscr_dest0 = buf & ~0UL;
dp->dscr_cmd1 = nbytes;
#if 0
printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
#endif
/*
* There is an erratum on certain Au1200/Au1550 revisions that could
* result in "stale" data being DMA'ed. It has to do with the snoop
* logic on the cache eviction buffer. dma_default_coherent is set
* to false on these parts.
*/
if (!dma_default_coherent)
dma_cache_inv(KSEG0ADDR(buf), nbytes);
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
wmb(); /* drain writebuffer */
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
ctp->chan_ptr->ddma_dbell = 0;
wmb(); /* force doorbell write out to dma engine */
/* Get next descriptor pointer. */
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Return something non-zero. */
return nbytes;
}
EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
/*
* Get a destination buffer into the DMA ring.
* Normally used to get a full buffer from the ring during fifo
* to memory transfers. This does not set the valid bit, you will
* have to put another destination buffer to keep the DMA going.
*/
u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
u32 rv;
/*
* I guess we could check this to be within the
* range of the table......
*/
ctp = *((chan_tab_t **)chanid);
/*
* We should have multiple callers for a particular channel,
* an interrupt doesn't affect this pointer nor the descriptor,
* so no locking should be needed.
*/
dp = ctp->get_ptr;
/*
* If the descriptor is valid, we are way ahead of the DMA
* engine, so just return an error condition.
*/
if (dp->dscr_cmd0 & DSCR_CMD0_V)
return 0;
/* Return buffer address and byte count. */
*buf = (void *)(phys_to_virt(dp->dscr_dest0));
*nbytes = dp->dscr_cmd1;
rv = dp->dscr_stat;
/* Get next descriptor pointer. */
ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Return something non-zero. */
return rv;
}
EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);
void au1xxx_dbdma_stop(u32 chanid)
{
chan_tab_t *ctp;
au1x_dma_chan_t *cp;
int halt_timeout = 0;
ctp = *((chan_tab_t **)chanid);
cp = ctp->chan_ptr;
cp->ddma_cfg &= ~DDMA_CFG_EN; /* Disable channel */
wmb(); /* drain writebuffer */
while (!(cp->ddma_stat & DDMA_STAT_H)) {
udelay(1);
halt_timeout++;
if (halt_timeout > 100) {
printk(KERN_WARNING "warning: DMA channel won't halt\n");
break;
}
}
/* clear current desc valid and doorbell */
cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
wmb(); /* drain writebuffer */
}
EXPORT_SYMBOL(au1xxx_dbdma_stop);
/*
* Start using the current descriptor pointer. If the DBDMA encounters
* a non-valid descriptor, it will stop. In this case, we can just
* continue by adding a buffer to the list and starting again.
*/
void au1xxx_dbdma_start(u32 chanid)
{
chan_tab_t *ctp;
au1x_dma_chan_t *cp;
ctp = *((chan_tab_t **)chanid);
cp = ctp->chan_ptr;
cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
cp->ddma_cfg |= DDMA_CFG_EN; /* Enable channel */
wmb(); /* drain writebuffer */
cp->ddma_dbell = 0;
wmb(); /* drain writebuffer */
}
EXPORT_SYMBOL(au1xxx_dbdma_start);
void au1xxx_dbdma_reset(u32 chanid)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
au1xxx_dbdma_stop(chanid);
ctp = *((chan_tab_t **)chanid);
ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
/* Run through the descriptors and reset the valid indicator. */
dp = ctp->chan_desc_base;
do {
dp->dscr_cmd0 &= ~DSCR_CMD0_V;
/*
* Reset our software status -- this is used to determine
* if a descriptor is in use by upper level software. Since
* posting can reset 'V' bit.
*/
dp->sw_status = 0;
dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
} while (dp != ctp->chan_desc_base);
}
EXPORT_SYMBOL(au1xxx_dbdma_reset);
u32 au1xxx_get_dma_residue(u32 chanid)
{
chan_tab_t *ctp;
au1x_dma_chan_t *cp;
u32 rv;
ctp = *((chan_tab_t **)chanid);
cp = ctp->chan_ptr;
/* This is only valid if the channel is stopped. */
rv = cp->ddma_bytecnt;
wmb(); /* drain writebuffer */
return rv;
}
EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);
void au1xxx_dbdma_chan_free(u32 chanid)
{
chan_tab_t *ctp;
dbdev_tab_t *stp, *dtp;
ctp = *((chan_tab_t **)chanid);
stp = ctp->chan_src;
dtp = ctp->chan_dest;
au1xxx_dbdma_stop(chanid);
kfree((void *)ctp->cdb_membase);
stp->dev_flags &= ~DEV_FLAGS_INUSE;
dtp->dev_flags &= ~DEV_FLAGS_INUSE;
chan_tab_ptr[ctp->chan_index] = NULL;
kfree(ctp);
}
EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
{
u32 intstat;
u32 chan_index;
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
au1x_dma_chan_t *cp;
intstat = dbdma_gptr->ddma_intstat;
wmb(); /* drain writebuffer */
chan_index = __ffs(intstat);
ctp = chan_tab_ptr[chan_index];
cp = ctp->chan_ptr;
dp = ctp->cur_ptr;
/* Reset interrupt. */
cp->ddma_irq = 0;
wmb(); /* drain writebuffer */
if (ctp->chan_callback)
ctp->chan_callback(irq, ctp->chan_callparam);
ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
return IRQ_RETVAL(1);
}
void au1xxx_dbdma_dump(u32 chanid)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
dbdev_tab_t *stp, *dtp;
au1x_dma_chan_t *cp;
u32 i = 0;
ctp = *((chan_tab_t **)chanid);
stp = ctp->chan_src;
dtp = ctp->chan_dest;
cp = ctp->chan_ptr;
printk(KERN_DEBUG "Chan %x, stp %x (dev %d) dtp %x (dev %d)\n",
(u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
dtp - dbdev_tab);
printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
(u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
(u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
cp->ddma_bytecnt);
/* Run through the descriptors */
dp = ctp->chan_desc_base;
do {
printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
dp->dscr_source0, dp->dscr_source1,
dp->dscr_dest0, dp->dscr_dest1);
printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
dp->dscr_stat, dp->dscr_nxtptr);
dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
} while (dp != ctp->chan_desc_base);
}
/* Put a descriptor into the DMA ring.
* This updates the source/destination pointers and byte count.
*/
u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
u32 nbytes = 0;
/*
* I guess we could check this to be within the
* range of the table......
*/
ctp = *((chan_tab_t **)chanid);
/*
* We should have multiple callers for a particular channel,
* an interrupt doesn't affect this pointer nor the descriptor,
* so no locking should be needed.
*/
dp = ctp->put_ptr;
/*
* If the descriptor is valid, we are way ahead of the DMA
* engine, so just return an error condition.
*/
if (dp->dscr_cmd0 & DSCR_CMD0_V)
return 0;
/* Load up buffer addresses and byte count. */
dp->dscr_dest0 = dscr->dscr_dest0;
dp->dscr_source0 = dscr->dscr_source0;
dp->dscr_dest1 = dscr->dscr_dest1;
dp->dscr_source1 = dscr->dscr_source1;
dp->dscr_cmd1 = dscr->dscr_cmd1;
nbytes = dscr->dscr_cmd1;
/* Allow the caller to specify if an interrupt is generated */
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
ctp->chan_ptr->ddma_dbell = 0;
/* Get next descriptor pointer. */
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Return something non-zero. */
return nbytes;
}
static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6];
static int alchemy_dbdma_suspend(void)
{
int i;
void __iomem *addr;
addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00);
alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04);
alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08);
alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c);
/* save channel configurations */
addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00);
alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04);
alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08);
alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c);
alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10);
alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14);
/* halt channel */
__raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00);
wmb();
while (!(__raw_readl(addr + 0x14) & 1))
wmb();
addr += 0x100; /* next channel base */
}
/* disable channel interrupts */
addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
__raw_writel(0, addr + 0x0c);
wmb();
return 0;
}
static void alchemy_dbdma_resume(void)
{
int i;
void __iomem *addr;
addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
__raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00);
__raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04);
__raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08);
__raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c);
/* restore channel configurations */
addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
__raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00);
__raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04);
__raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08);
__raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c);
__raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10);
__raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14);
wmb();
addr += 0x100; /* next channel base */
}
}
static struct syscore_ops alchemy_dbdma_syscore_ops = {
.suspend = alchemy_dbdma_suspend,
.resume = alchemy_dbdma_resume,
};
static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable)
{
int ret;
dbdev_tab = kcalloc(DBDEV_TAB_SIZE, sizeof(dbdev_tab_t), GFP_KERNEL);
if (!dbdev_tab)
return -ENOMEM;
memcpy(dbdev_tab, idtable, 32 * sizeof(dbdev_tab_t));
for (ret = 32; ret < DBDEV_TAB_SIZE; ret++)
dbdev_tab[ret].dev_id = ~0;
dbdma_gptr->ddma_config = 0;
dbdma_gptr->ddma_throttle = 0;
dbdma_gptr->ddma_inten = 0xffff;
wmb(); /* drain writebuffer */
ret = request_irq(irq, dbdma_interrupt, 0, "dbdma", (void *)dbdma_gptr);
if (ret)
printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
else {
dbdma_initialized = 1;
register_syscore_ops(&alchemy_dbdma_syscore_ops);
}
return ret;
}
static int __init alchemy_dbdma_init(void)
{
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1550:
return dbdma_setup(AU1550_DDMA_INT, au1550_dbdev_tab);
case ALCHEMY_CPU_AU1200:
return dbdma_setup(AU1200_DDMA_INT, au1200_dbdev_tab);
case ALCHEMY_CPU_AU1300:
return dbdma_setup(AU1300_DDMA_INT, au1300_dbdev_tab);
}
return 0;
}
subsys_initcall(alchemy_dbdma_init);
| linux-master | arch/mips/alchemy/common/dbdma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Au1300 media block power gating (VSS)
*
* This is a stop-gap solution until I have the clock framework integration
* ready. This stuff here really must be handled transparently when clocks
* for various media blocks are enabled/disabled.
*/
#include <linux/export.h>
#include <linux/spinlock.h>
#include <asm/mach-au1x00/au1000.h>
#define VSS_GATE 0x00 /* gate wait timers */
#define VSS_CLKRST 0x04 /* clock/block control */
#define VSS_FTR 0x08 /* footers */
#define VSS_ADDR(blk) (KSEG1ADDR(AU1300_VSS_PHYS_ADDR) + (blk * 0x0c))
static DEFINE_SPINLOCK(au1300_vss_lock);
/* enable a block as outlined in the databook */
static inline void __enable_block(int block)
{
void __iomem *base = (void __iomem *)VSS_ADDR(block);
__raw_writel(3, base + VSS_CLKRST); /* enable clock, assert reset */
wmb();
__raw_writel(0x01fffffe, base + VSS_GATE); /* maximum setup time */
wmb();
/* enable footers in sequence */
__raw_writel(0x01, base + VSS_FTR);
wmb();
__raw_writel(0x03, base + VSS_FTR);
wmb();
__raw_writel(0x07, base + VSS_FTR);
wmb();
__raw_writel(0x0f, base + VSS_FTR);
wmb();
__raw_writel(0x01ffffff, base + VSS_GATE); /* start FSM too */
wmb();
__raw_writel(2, base + VSS_CLKRST); /* deassert reset */
wmb();
__raw_writel(0x1f, base + VSS_FTR); /* enable isolation cells */
wmb();
}
/* disable a block as outlined in the databook */
static inline void __disable_block(int block)
{
void __iomem *base = (void __iomem *)VSS_ADDR(block);
__raw_writel(0x0f, base + VSS_FTR); /* disable isolation cells */
wmb();
__raw_writel(0, base + VSS_GATE); /* disable FSM */
wmb();
__raw_writel(3, base + VSS_CLKRST); /* assert reset */
wmb();
__raw_writel(1, base + VSS_CLKRST); /* disable clock */
wmb();
__raw_writel(0, base + VSS_FTR); /* disable all footers */
wmb();
}
void au1300_vss_block_control(int block, int enable)
{
unsigned long flags;
if (alchemy_get_cputype() != ALCHEMY_CPU_AU1300)
return;
/* only one block at a time */
spin_lock_irqsave(&au1300_vss_lock, flags);
if (enable)
__enable_block(block);
else
__disable_block(block);
spin_unlock_irqrestore(&au1300_vss_lock, flags);
}
EXPORT_SYMBOL_GPL(au1300_vss_block_control);
| linux-master | arch/mips/alchemy/common/vss.c |
/*
* Platform device support for Au1x00 SoCs.
*
* Copyright 2004, Matt Porter <[email protected]>
*
* (C) Copyright Embedded Alley Solutions, Inc 2005
* Author: Pantelis Antoniou <[email protected]>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/slab.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1100_mmc.h>
#include <asm/mach-au1x00/au1xxx_eth.h>
#include <prom.h>
static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
unsigned int old_state)
{
#ifdef CONFIG_SERIAL_8250
switch (state) {
case 0:
alchemy_uart_enable(CPHYSADDR(port->membase));
serial8250_do_pm(port, state, old_state);
break;
case 3: /* power off */
serial8250_do_pm(port, state, old_state);
alchemy_uart_disable(CPHYSADDR(port->membase));
break;
default:
serial8250_do_pm(port, state, old_state);
break;
}
#endif
}
#define PORT(_base, _irq) \
{ \
.mapbase = _base, \
.mapsize = 0x1000, \
.irq = _irq, \
.regshift = 2, \
.flags = UPF_SKIP_TEST | UPF_IOREMAP | \
UPF_FIXED_TYPE, \
.type = PORT_16550A, \
.pm = alchemy_8250_pm, \
}
static struct plat_serial8250_port au1x00_uart_data[][4] __initdata = {
[ALCHEMY_CPU_AU1000] = {
PORT(AU1000_UART0_PHYS_ADDR, AU1000_UART0_INT),
PORT(AU1000_UART1_PHYS_ADDR, AU1000_UART1_INT),
PORT(AU1000_UART2_PHYS_ADDR, AU1000_UART2_INT),
PORT(AU1000_UART3_PHYS_ADDR, AU1000_UART3_INT),
},
[ALCHEMY_CPU_AU1500] = {
PORT(AU1000_UART0_PHYS_ADDR, AU1500_UART0_INT),
PORT(AU1000_UART3_PHYS_ADDR, AU1500_UART3_INT),
},
[ALCHEMY_CPU_AU1100] = {
PORT(AU1000_UART0_PHYS_ADDR, AU1100_UART0_INT),
PORT(AU1000_UART1_PHYS_ADDR, AU1100_UART1_INT),
PORT(AU1000_UART3_PHYS_ADDR, AU1100_UART3_INT),
},
[ALCHEMY_CPU_AU1550] = {
PORT(AU1000_UART0_PHYS_ADDR, AU1550_UART0_INT),
PORT(AU1000_UART1_PHYS_ADDR, AU1550_UART1_INT),
PORT(AU1000_UART3_PHYS_ADDR, AU1550_UART3_INT),
},
[ALCHEMY_CPU_AU1200] = {
PORT(AU1000_UART0_PHYS_ADDR, AU1200_UART0_INT),
PORT(AU1000_UART1_PHYS_ADDR, AU1200_UART1_INT),
},
[ALCHEMY_CPU_AU1300] = {
PORT(AU1300_UART0_PHYS_ADDR, AU1300_UART0_INT),
PORT(AU1300_UART1_PHYS_ADDR, AU1300_UART1_INT),
PORT(AU1300_UART2_PHYS_ADDR, AU1300_UART2_INT),
PORT(AU1300_UART3_PHYS_ADDR, AU1300_UART3_INT),
},
};
static struct platform_device au1xx0_uart_device = {
.name = "serial8250",
.id = PLAT8250_DEV_AU1X00,
};
static void __init alchemy_setup_uarts(int ctype)
{
long uartclk;
int s = sizeof(struct plat_serial8250_port);
int c = alchemy_get_uarts(ctype);
struct plat_serial8250_port *ports;
struct clk *clk = clk_get(NULL, ALCHEMY_PERIPH_CLK);
if (IS_ERR(clk))
return;
if (clk_prepare_enable(clk)) {
clk_put(clk);
return;
}
uartclk = clk_get_rate(clk);
clk_put(clk);
ports = kcalloc(s, (c + 1), GFP_KERNEL);
if (!ports) {
printk(KERN_INFO "Alchemy: no memory for UART data\n");
return;
}
memcpy(ports, au1x00_uart_data[ctype], s * c);
au1xx0_uart_device.dev.platform_data = ports;
/* Fill up uartclk. */
for (s = 0; s < c; s++) {
ports[s].uartclk = uartclk;
if (au_platform_setup(&ports[s]) < 0) {
kfree(ports);
printk(KERN_INFO "Alchemy: missing support for UARTs\n");
return;
}
}
if (platform_device_register(&au1xx0_uart_device))
printk(KERN_INFO "Alchemy: failed to register UARTs\n");
}
static u64 alchemy_all_dmamask = DMA_BIT_MASK(32);
/* Power on callback for the ehci platform driver */
static int alchemy_ehci_power_on(struct platform_device *pdev)
{
return alchemy_usb_control(ALCHEMY_USB_EHCI0, 1);
}
/* Power off/suspend callback for the ehci platform driver */
static void alchemy_ehci_power_off(struct platform_device *pdev)
{
alchemy_usb_control(ALCHEMY_USB_EHCI0, 0);
}
static struct usb_ehci_pdata alchemy_ehci_pdata = {
.no_io_watchdog = 1,
.power_on = alchemy_ehci_power_on,
.power_off = alchemy_ehci_power_off,
.power_suspend = alchemy_ehci_power_off,
};
/* Power on callback for the ohci platform driver */
static int alchemy_ohci_power_on(struct platform_device *pdev)
{
int unit;
unit = (pdev->id == 1) ?
ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0;
return alchemy_usb_control(unit, 1);
}
/* Power off/suspend callback for the ohci platform driver */
static void alchemy_ohci_power_off(struct platform_device *pdev)
{
int unit;
unit = (pdev->id == 1) ?
ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0;
alchemy_usb_control(unit, 0);
}
static struct usb_ohci_pdata alchemy_ohci_pdata = {
.power_on = alchemy_ohci_power_on,
.power_off = alchemy_ohci_power_off,
.power_suspend = alchemy_ohci_power_off,
};
static unsigned long alchemy_ohci_data[][2] __initdata = {
[ALCHEMY_CPU_AU1000] = { AU1000_USB_OHCI_PHYS_ADDR, AU1000_USB_HOST_INT },
[ALCHEMY_CPU_AU1500] = { AU1000_USB_OHCI_PHYS_ADDR, AU1500_USB_HOST_INT },
[ALCHEMY_CPU_AU1100] = { AU1000_USB_OHCI_PHYS_ADDR, AU1100_USB_HOST_INT },
[ALCHEMY_CPU_AU1550] = { AU1550_USB_OHCI_PHYS_ADDR, AU1550_USB_HOST_INT },
[ALCHEMY_CPU_AU1200] = { AU1200_USB_OHCI_PHYS_ADDR, AU1200_USB_INT },
[ALCHEMY_CPU_AU1300] = { AU1300_USB_OHCI0_PHYS_ADDR, AU1300_USB_INT },
};
static unsigned long alchemy_ehci_data[][2] __initdata = {
[ALCHEMY_CPU_AU1200] = { AU1200_USB_EHCI_PHYS_ADDR, AU1200_USB_INT },
[ALCHEMY_CPU_AU1300] = { AU1300_USB_EHCI_PHYS_ADDR, AU1300_USB_INT },
};
static int __init _new_usbres(struct resource **r, struct platform_device **d)
{
*r = kcalloc(2, sizeof(struct resource), GFP_KERNEL);
if (!*r)
return -ENOMEM;
*d = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
if (!*d) {
kfree(*r);
return -ENOMEM;
}
(*d)->dev.coherent_dma_mask = DMA_BIT_MASK(32);
(*d)->num_resources = 2;
(*d)->resource = *r;
return 0;
}
static void __init alchemy_setup_usb(int ctype)
{
struct resource *res;
struct platform_device *pdev;
/* setup OHCI0. Every variant has one */
if (_new_usbres(&res, &pdev))
return;
res[0].start = alchemy_ohci_data[ctype][0];
res[0].end = res[0].start + 0x100 - 1;
res[0].flags = IORESOURCE_MEM;
res[1].start = alchemy_ohci_data[ctype][1];
res[1].end = res[1].start;
res[1].flags = IORESOURCE_IRQ;
pdev->name = "ohci-platform";
pdev->id = 0;
pdev->dev.dma_mask = &alchemy_all_dmamask;
pdev->dev.platform_data = &alchemy_ohci_pdata;
if (platform_device_register(pdev))
printk(KERN_INFO "Alchemy USB: cannot add OHCI0\n");
/* setup EHCI0: Au1200/Au1300 */
if ((ctype == ALCHEMY_CPU_AU1200) || (ctype == ALCHEMY_CPU_AU1300)) {
if (_new_usbres(&res, &pdev))
return;
res[0].start = alchemy_ehci_data[ctype][0];
res[0].end = res[0].start + 0x100 - 1;
res[0].flags = IORESOURCE_MEM;
res[1].start = alchemy_ehci_data[ctype][1];
res[1].end = res[1].start;
res[1].flags = IORESOURCE_IRQ;
pdev->name = "ehci-platform";
pdev->id = 0;
pdev->dev.dma_mask = &alchemy_all_dmamask;
pdev->dev.platform_data = &alchemy_ehci_pdata;
if (platform_device_register(pdev))
printk(KERN_INFO "Alchemy USB: cannot add EHCI0\n");
}
/* Au1300: OHCI1 */
if (ctype == ALCHEMY_CPU_AU1300) {
if (_new_usbres(&res, &pdev))
return;
res[0].start = AU1300_USB_OHCI1_PHYS_ADDR;
res[0].end = res[0].start + 0x100 - 1;
res[0].flags = IORESOURCE_MEM;
res[1].start = AU1300_USB_INT;
res[1].end = res[1].start;
res[1].flags = IORESOURCE_IRQ;
pdev->name = "ohci-platform";
pdev->id = 1;
pdev->dev.dma_mask = &alchemy_all_dmamask;
pdev->dev.platform_data = &alchemy_ohci_pdata;
if (platform_device_register(pdev))
printk(KERN_INFO "Alchemy USB: cannot add OHCI1\n");
}
}
/* Macro to help defining the Ethernet MAC resources */
#define MAC_RES_COUNT 4 /* MAC regs, MAC en, MAC INT, MACDMA regs */
#define MAC_RES(_base, _enable, _irq, _macdma) \
{ \
.start = _base, \
.end = _base + 0xffff, \
.flags = IORESOURCE_MEM, \
}, \
{ \
.start = _enable, \
.end = _enable + 0x3, \
.flags = IORESOURCE_MEM, \
}, \
{ \
.start = _irq, \
.end = _irq, \
.flags = IORESOURCE_IRQ \
}, \
{ \
.start = _macdma, \
.end = _macdma + 0x1ff, \
.flags = IORESOURCE_MEM, \
}
static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = {
[ALCHEMY_CPU_AU1000] = {
MAC_RES(AU1000_MAC0_PHYS_ADDR,
AU1000_MACEN_PHYS_ADDR,
AU1000_MAC0_DMA_INT,
AU1000_MACDMA0_PHYS_ADDR)
},
[ALCHEMY_CPU_AU1500] = {
MAC_RES(AU1500_MAC0_PHYS_ADDR,
AU1500_MACEN_PHYS_ADDR,
AU1500_MAC0_DMA_INT,
AU1000_MACDMA0_PHYS_ADDR)
},
[ALCHEMY_CPU_AU1100] = {
MAC_RES(AU1000_MAC0_PHYS_ADDR,
AU1000_MACEN_PHYS_ADDR,
AU1100_MAC0_DMA_INT,
AU1000_MACDMA0_PHYS_ADDR)
},
[ALCHEMY_CPU_AU1550] = {
MAC_RES(AU1000_MAC0_PHYS_ADDR,
AU1000_MACEN_PHYS_ADDR,
AU1550_MAC0_DMA_INT,
AU1000_MACDMA0_PHYS_ADDR)
},
};
static struct au1000_eth_platform_data au1xxx_eth0_platform_data = {
.phy1_search_mac0 = 1,
};
static struct platform_device au1xxx_eth0_device = {
.name = "au1000-eth",
.id = 0,
.num_resources = MAC_RES_COUNT,
.dev = {
.dma_mask = &alchemy_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &au1xxx_eth0_platform_data,
},
};
static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = {
[ALCHEMY_CPU_AU1000] = {
MAC_RES(AU1000_MAC1_PHYS_ADDR,
AU1000_MACEN_PHYS_ADDR + 4,
AU1000_MAC1_DMA_INT,
AU1000_MACDMA1_PHYS_ADDR)
},
[ALCHEMY_CPU_AU1500] = {
MAC_RES(AU1500_MAC1_PHYS_ADDR,
AU1500_MACEN_PHYS_ADDR + 4,
AU1500_MAC1_DMA_INT,
AU1000_MACDMA1_PHYS_ADDR)
},
[ALCHEMY_CPU_AU1550] = {
MAC_RES(AU1000_MAC1_PHYS_ADDR,
AU1000_MACEN_PHYS_ADDR + 4,
AU1550_MAC1_DMA_INT,
AU1000_MACDMA1_PHYS_ADDR)
},
};
static struct au1000_eth_platform_data au1xxx_eth1_platform_data = {
.phy1_search_mac0 = 1,
};
static struct platform_device au1xxx_eth1_device = {
.name = "au1000-eth",
.id = 1,
.num_resources = MAC_RES_COUNT,
.dev = {
.dma_mask = &alchemy_all_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &au1xxx_eth1_platform_data,
},
};
void __init au1xxx_override_eth_cfg(unsigned int port,
struct au1000_eth_platform_data *eth_data)
{
if (!eth_data || port > 1)
return;
if (port == 0)
memcpy(&au1xxx_eth0_platform_data, eth_data,
sizeof(struct au1000_eth_platform_data));
else
memcpy(&au1xxx_eth1_platform_data, eth_data,
sizeof(struct au1000_eth_platform_data));
}
static void __init alchemy_setup_macs(int ctype)
{
int ret, i;
unsigned char ethaddr[6];
struct resource *macres;
/* Handle 1st MAC */
if (alchemy_get_macs(ctype) < 1)
return;
macres = kmemdup(au1xxx_eth0_resources[ctype],
sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
if (!macres) {
printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n");
return;
}
au1xxx_eth0_device.resource = macres;
i = prom_get_ethernet_addr(ethaddr);
if (!i && !is_valid_ether_addr(au1xxx_eth0_platform_data.mac))
memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
ret = platform_device_register(&au1xxx_eth0_device);
if (ret)
printk(KERN_INFO "Alchemy: failed to register MAC0\n");
/* Handle 2nd MAC */
if (alchemy_get_macs(ctype) < 2)
return;
macres = kmemdup(au1xxx_eth1_resources[ctype],
sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
if (!macres) {
printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n");
return;
}
au1xxx_eth1_device.resource = macres;
ethaddr[5] += 1; /* next addr for 2nd MAC */
if (!i && !is_valid_ether_addr(au1xxx_eth1_platform_data.mac))
memcpy(au1xxx_eth1_platform_data.mac, ethaddr, 6);
/* Register second MAC if enabled in pinfunc */
if (!(alchemy_rdsys(AU1000_SYS_PINFUNC) & SYS_PF_NI2)) {
ret = platform_device_register(&au1xxx_eth1_device);
if (ret)
printk(KERN_INFO "Alchemy: failed to register MAC1\n");
}
}
static int __init au1xxx_platform_init(void)
{
int ctype = alchemy_get_cputype();
alchemy_setup_uarts(ctype);
alchemy_setup_macs(ctype);
alchemy_setup_usb(ctype);
return 0;
}
arch_initcall(au1xxx_platform_init);
| linux-master | arch/mips/alchemy/common/platform.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Alchemy clocks.
*
* Exposes all configurable internal clock sources to the clk framework.
*
* We have:
* - Root source, usually 12MHz supplied by an external crystal
* - 3 PLLs which generate multiples of root rate [AUX, CPU, AUX2]
*
* Dividers:
* - 6 clock dividers with:
* * selectable source [one of the PLLs],
* * output divided between [2 .. 512 in steps of 2] (!Au1300)
* or [1 .. 256 in steps of 1] (Au1300),
* * can be enabled individually.
*
* - up to 6 "internal" (fixed) consumers which:
* * take either AUXPLL or one of the above 6 dividers as input,
* * divide this input by 1, 2, or 4 (and 3 on Au1300).
* * can be disabled separately.
*
* Misc clocks:
* - sysbus clock: CPU core clock (CPUPLL) divided by 2, 3 or 4.
* depends on board design and should be set by bootloader, read-only.
* - peripheral clock: half the rate of sysbus clock, source for a lot
* of peripheral blocks, read-only.
* - memory clock: clk rate to main memory chips, depends on board
* design and is read-only,
* - lrclk: the static bus clock signal for synchronous operation.
* depends on board design, must be set by bootloader,
* but may be required to correctly configure devices attached to
* the static bus. The Au1000/1500/1100 manuals call it LCLK, on
* later models it's called RCLK.
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/mach-au1x00/au1000.h>
/* Base clock: 12MHz is the default in all databooks, and I haven't
* found any board yet which uses a different rate.
*/
#define ALCHEMY_ROOTCLK_RATE 12000000
/*
* the internal sources which can be driven by the PLLs and dividers.
* Names taken from the databooks, refer to them for more information,
* especially which ones are share a clock line.
*/
static const char * const alchemy_au1300_intclknames[] = {
"lcd_intclk", "gpemgp_clk", "maempe_clk", "maebsa_clk",
"EXTCLK0", "EXTCLK1"
};
static const char * const alchemy_au1200_intclknames[] = {
"lcd_intclk", NULL, NULL, NULL, "EXTCLK0", "EXTCLK1"
};
static const char * const alchemy_au1550_intclknames[] = {
"usb_clk", "psc0_intclk", "psc1_intclk", "pci_clko",
"EXTCLK0", "EXTCLK1"
};
static const char * const alchemy_au1100_intclknames[] = {
"usb_clk", "lcd_intclk", NULL, "i2s_clk", "EXTCLK0", "EXTCLK1"
};
static const char * const alchemy_au1500_intclknames[] = {
NULL, "usbd_clk", "usbh_clk", "pci_clko", "EXTCLK0", "EXTCLK1"
};
static const char * const alchemy_au1000_intclknames[] = {
"irda_clk", "usbd_clk", "usbh_clk", "i2s_clk", "EXTCLK0",
"EXTCLK1"
};
/* aliases for a few on-chip sources which are either shared
* or have gone through name changes.
*/
static struct clk_aliastable {
char *alias;
char *base;
int cputype;
} alchemy_clk_aliases[] __initdata = {
{ "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
{ "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
{ "irda_clk", "usb_clk", ALCHEMY_CPU_AU1100 },
{ "usbh_clk", "usb_clk", ALCHEMY_CPU_AU1550 },
{ "usbd_clk", "usb_clk", ALCHEMY_CPU_AU1550 },
{ "psc2_intclk", "usb_clk", ALCHEMY_CPU_AU1550 },
{ "psc3_intclk", "EXTCLK0", ALCHEMY_CPU_AU1550 },
{ "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1200 },
{ "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1200 },
{ "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
{ "psc2_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
{ "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
{ "psc3_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
{ NULL, NULL, 0 },
};
#define IOMEM(x) ((void __iomem *)(KSEG1ADDR(CPHYSADDR(x))))
/* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */
static spinlock_t alchemy_clk_fg0_lock;
static spinlock_t alchemy_clk_fg1_lock;
static DEFINE_SPINLOCK(alchemy_clk_csrc_lock);
/* CPU Core clock *****************************************************/
static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
unsigned long parent_rate)
{
unsigned long t;
/*
* On early Au1000, sys_cpupll was write-only. Since these
* silicon versions of Au1000 are not sold, we don't bend
* over backwards trying to determine the frequency.
*/
if (unlikely(au1xxx_cpu_has_pll_wo()))
t = 396000000;
else {
t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
t &= 0x3f;
t *= parent_rate;
}
return t;
}
void __init alchemy_set_lpj(void)
{
preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE);
preset_lpj /= 2 * HZ;
}
static const struct clk_ops alchemy_clkops_cpu = {
.recalc_rate = alchemy_clk_cpu_recalc,
};
static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name,
int ctype)
{
struct clk_init_data id;
struct clk_hw *h;
struct clk *clk;
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return ERR_PTR(-ENOMEM);
id.name = ALCHEMY_CPU_CLK;
id.parent_names = &parent_name;
id.num_parents = 1;
id.flags = 0;
id.ops = &alchemy_clkops_cpu;
h->init = &id;
clk = clk_register(NULL, h);
if (IS_ERR(clk)) {
pr_err("failed to register clock\n");
kfree(h);
}
return clk;
}
/* AUXPLLs ************************************************************/
struct alchemy_auxpll_clk {
struct clk_hw hw;
unsigned long reg; /* au1300 has also AUXPLL2 */
int maxmult; /* max multiplier */
};
#define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw)
static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw,
unsigned long parent_rate)
{
struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
return (alchemy_rdsys(a->reg) & 0xff) * parent_rate;
}
static int alchemy_clk_aux_setr(struct clk_hw *hw,
unsigned long rate,
unsigned long parent_rate)
{
struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
unsigned long d = rate;
if (rate)
d /= parent_rate;
else
d = 0;
/* minimum is 84MHz, max is 756-1032 depending on variant */
if (((d < 7) && (d != 0)) || (d > a->maxmult))
return -EINVAL;
alchemy_wrsys(d, a->reg);
return 0;
}
static long alchemy_clk_aux_roundr(struct clk_hw *hw,
unsigned long rate,
unsigned long *parent_rate)
{
struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
unsigned long mult;
if (!rate || !*parent_rate)
return 0;
mult = rate / (*parent_rate);
if (mult && (mult < 7))
mult = 7;
if (mult > a->maxmult)
mult = a->maxmult;
return (*parent_rate) * mult;
}
static const struct clk_ops alchemy_clkops_aux = {
.recalc_rate = alchemy_clk_aux_recalc,
.set_rate = alchemy_clk_aux_setr,
.round_rate = alchemy_clk_aux_roundr,
};
static struct clk __init *alchemy_clk_setup_aux(const char *parent_name,
char *name, int maxmult,
unsigned long reg)
{
struct clk_init_data id;
struct clk *c;
struct alchemy_auxpll_clk *a;
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a)
return ERR_PTR(-ENOMEM);
id.name = name;
id.parent_names = &parent_name;
id.num_parents = 1;
id.flags = CLK_GET_RATE_NOCACHE;
id.ops = &alchemy_clkops_aux;
a->reg = reg;
a->maxmult = maxmult;
a->hw.init = &id;
c = clk_register(NULL, &a->hw);
if (!IS_ERR(c))
clk_register_clkdev(c, name, NULL);
else
kfree(a);
return c;
}
/* sysbus_clk *********************************************************/
static struct clk __init *alchemy_clk_setup_sysbus(const char *pn)
{
unsigned long v = (alchemy_rdsys(AU1000_SYS_POWERCTRL) & 3) + 2;
struct clk *c;
c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK,
pn, 0, 1, v);
if (!IS_ERR(c))
clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL);
return c;
}
/* Peripheral Clock ***************************************************/
static struct clk __init *alchemy_clk_setup_periph(const char *pn)
{
/* Peripheral clock runs at half the rate of sysbus clk */
struct clk *c;
c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK,
pn, 0, 1, 2);
if (!IS_ERR(c))
clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL);
return c;
}
/* mem clock **********************************************************/
static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct)
{
void __iomem *addr = IOMEM(AU1000_MEM_PHYS_ADDR);
unsigned long v;
struct clk *c;
int div;
switch (ct) {
case ALCHEMY_CPU_AU1550:
case ALCHEMY_CPU_AU1200:
v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
div = (v & (1 << 15)) ? 1 : 2;
break;
case ALCHEMY_CPU_AU1300:
v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
div = (v & (1 << 31)) ? 1 : 2;
break;
case ALCHEMY_CPU_AU1000:
case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1100:
default:
div = 2;
break;
}
c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn,
0, 1, div);
if (!IS_ERR(c))
clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL);
return c;
}
/* lrclk: external synchronous static bus clock ***********************/
static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
{
/* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5,
* otherwise lrclk=pclk/4.
* All other variants: MEM_STCFG0[15:13] = divisor.
* L/RCLK = periph_clk / (divisor + 1)
* On Au1000, Au1500, Au1100 it's called LCLK,
* on later models it's called RCLK, but it's the same thing.
*/
struct clk *c;
unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0);
switch (t) {
case ALCHEMY_CPU_AU1000:
case ALCHEMY_CPU_AU1500:
v = 4 + ((v >> 11) & 1);
break;
default: /* all other models */
v = ((v >> 13) & 7) + 1;
}
c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK,
pn, 0, 1, v);
if (!IS_ERR(c))
clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL);
return c;
}
/* Clock dividers and muxes *******************************************/
/* data for fgen and csrc mux-dividers */
struct alchemy_fgcs_clk {
struct clk_hw hw;
spinlock_t *reglock; /* register lock */
unsigned long reg; /* SYS_FREQCTRL0/1 */
int shift; /* offset in register */
int parent; /* parent before disable [Au1300] */
int isen; /* is it enabled? */
int *dt; /* dividertable for csrc */
};
#define to_fgcs_clk(x) container_of(x, struct alchemy_fgcs_clk, hw)
static long alchemy_calc_div(unsigned long rate, unsigned long prate,
int scale, int maxdiv, unsigned long *rv)
{
long div1, div2;
div1 = prate / rate;
if ((prate / div1) > rate)
div1++;
if (scale == 2) { /* only div-by-multiple-of-2 possible */
if (div1 & 1)
div1++; /* stay <=prate */
}
div2 = (div1 / scale) - 1; /* value to write to register */
if (div2 > maxdiv)
div2 = maxdiv;
if (rv)
*rv = div2;
div1 = ((div2 + 1) * scale);
return div1;
}
static int alchemy_clk_fgcs_detr(struct clk_hw *hw,
struct clk_rate_request *req,
int scale, int maxdiv)
{
struct clk_hw *pc, *bpc, *free;
long tdv, tpr, pr, nr, br, bpr, diff, lastdiff;
int j;
lastdiff = INT_MAX;
bpr = 0;
bpc = NULL;
br = -EINVAL;
free = NULL;
/* look at the rates each enabled parent supplies and select
* the one that gets closest to but not over the requested rate.
*/
for (j = 0; j < 7; j++) {
pc = clk_hw_get_parent_by_index(hw, j);
if (!pc)
break;
/* if this parent is currently unused, remember it.
* XXX: we would actually want clk_has_active_children()
* but this is a good-enough approximation for now.
*/
if (!clk_hw_is_prepared(pc)) {
if (!free)
free = pc;
}
pr = clk_hw_get_rate(pc);
if (pr < req->rate)
continue;
/* what can hardware actually provide */
tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL);
nr = pr / tdv;
diff = req->rate - nr;
if (nr > req->rate)
continue;
if (diff < lastdiff) {
lastdiff = diff;
bpr = pr;
bpc = pc;
br = nr;
}
if (diff == 0)
break;
}
/* if we couldn't get the exact rate we wanted from the enabled
* parents, maybe we can tell an available disabled/inactive one
* to give us a rate we can divide down to the requested rate.
*/
if (lastdiff && free) {
for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) {
tpr = req->rate * j;
if (tpr < 0)
break;
pr = clk_hw_round_rate(free, tpr);
tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv,
NULL);
nr = pr / tdv;
diff = req->rate - nr;
if (nr > req->rate)
continue;
if (diff < lastdiff) {
lastdiff = diff;
bpr = pr;
bpc = free;
br = nr;
}
if (diff == 0)
break;
}
}
if (br < 0)
return br;
req->best_parent_rate = bpr;
req->best_parent_hw = bpc;
req->rate = br;
return 0;
}
static int alchemy_clk_fgv1_en(struct clk_hw *hw)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long v, flags;
spin_lock_irqsave(c->reglock, flags);
v = alchemy_rdsys(c->reg);
v |= (1 << 1) << c->shift;
alchemy_wrsys(v, c->reg);
spin_unlock_irqrestore(c->reglock, flags);
return 0;
}
static int alchemy_clk_fgv1_isen(struct clk_hw *hw)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1);
return v & 1;
}
static void alchemy_clk_fgv1_dis(struct clk_hw *hw)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long v, flags;
spin_lock_irqsave(c->reglock, flags);
v = alchemy_rdsys(c->reg);
v &= ~((1 << 1) << c->shift);
alchemy_wrsys(v, c->reg);
spin_unlock_irqrestore(c->reglock, flags);
}
static int alchemy_clk_fgv1_setp(struct clk_hw *hw, u8 index)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long v, flags;
spin_lock_irqsave(c->reglock, flags);
v = alchemy_rdsys(c->reg);
if (index)
v |= (1 << c->shift);
else
v &= ~(1 << c->shift);
alchemy_wrsys(v, c->reg);
spin_unlock_irqrestore(c->reglock, flags);
return 0;
}
static u8 alchemy_clk_fgv1_getp(struct clk_hw *hw)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
return (alchemy_rdsys(c->reg) >> c->shift) & 1;
}
static int alchemy_clk_fgv1_setr(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long div, v, flags, ret;
int sh = c->shift + 2;
if (!rate || !parent_rate || rate > (parent_rate / 2))
return -EINVAL;
ret = alchemy_calc_div(rate, parent_rate, 2, 512, &div);
spin_lock_irqsave(c->reglock, flags);
v = alchemy_rdsys(c->reg);
v &= ~(0xff << sh);
v |= div << sh;
alchemy_wrsys(v, c->reg);
spin_unlock_irqrestore(c->reglock, flags);
return 0;
}
static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
unsigned long parent_rate)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2);
v = ((v & 0xff) + 1) * 2;
return parent_rate / v;
}
static int alchemy_clk_fgv1_detr(struct clk_hw *hw,
struct clk_rate_request *req)
{
return alchemy_clk_fgcs_detr(hw, req, 2, 512);
}
/* Au1000, Au1100, Au15x0, Au12x0 */
static const struct clk_ops alchemy_clkops_fgenv1 = {
.recalc_rate = alchemy_clk_fgv1_recalc,
.determine_rate = alchemy_clk_fgv1_detr,
.set_rate = alchemy_clk_fgv1_setr,
.set_parent = alchemy_clk_fgv1_setp,
.get_parent = alchemy_clk_fgv1_getp,
.enable = alchemy_clk_fgv1_en,
.disable = alchemy_clk_fgv1_dis,
.is_enabled = alchemy_clk_fgv1_isen,
};
static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c)
{
unsigned long v = alchemy_rdsys(c->reg);
v &= ~(3 << c->shift);
v |= (c->parent & 3) << c->shift;
alchemy_wrsys(v, c->reg);
c->isen = 1;
}
static int alchemy_clk_fgv2_en(struct clk_hw *hw)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long flags;
/* enable by setting the previous parent clock */
spin_lock_irqsave(c->reglock, flags);
__alchemy_clk_fgv2_en(c);
spin_unlock_irqrestore(c->reglock, flags);
return 0;
}
static int alchemy_clk_fgv2_isen(struct clk_hw *hw)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0;
}
static void alchemy_clk_fgv2_dis(struct clk_hw *hw)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long v, flags;
spin_lock_irqsave(c->reglock, flags);
v = alchemy_rdsys(c->reg);
v &= ~(3 << c->shift); /* set input mux to "disabled" state */
alchemy_wrsys(v, c->reg);
c->isen = 0;
spin_unlock_irqrestore(c->reglock, flags);
}
static int alchemy_clk_fgv2_setp(struct clk_hw *hw, u8 index)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long flags;
spin_lock_irqsave(c->reglock, flags);
c->parent = index + 1; /* value to write to register */
if (c->isen)
__alchemy_clk_fgv2_en(c);
spin_unlock_irqrestore(c->reglock, flags);
return 0;
}
static u8 alchemy_clk_fgv2_getp(struct clk_hw *hw)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long flags, v;
spin_lock_irqsave(c->reglock, flags);
v = c->parent - 1;
spin_unlock_irqrestore(c->reglock, flags);
return v;
}
/* fg0-2 and fg4-6 share a "scale"-bit. With this bit cleared, the
* dividers behave exactly as on previous models (dividers are multiples
* of 2); with the bit set, dividers are multiples of 1, halving their
* range, but making them also much more flexible.
*/
static int alchemy_clk_fgv2_setr(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
int sh = c->shift + 2;
unsigned long div, v, flags, ret;
if (!rate || !parent_rate || rate > parent_rate)
return -EINVAL;
v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */
ret = alchemy_calc_div(rate, parent_rate, v ? 1 : 2,
v ? 256 : 512, &div);
spin_lock_irqsave(c->reglock, flags);
v = alchemy_rdsys(c->reg);
v &= ~(0xff << sh);
v |= (div & 0xff) << sh;
alchemy_wrsys(v, c->reg);
spin_unlock_irqrestore(c->reglock, flags);
return 0;
}
static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
unsigned long parent_rate)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
int sh = c->shift + 2;
unsigned long v, t;
v = alchemy_rdsys(c->reg);
t = parent_rate / (((v >> sh) & 0xff) + 1);
if ((v & (1 << 30)) == 0) /* test scale bit */
t /= 2;
return t;
}
static int alchemy_clk_fgv2_detr(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
int scale, maxdiv;
if (alchemy_rdsys(c->reg) & (1 << 30)) {
scale = 1;
maxdiv = 256;
} else {
scale = 2;
maxdiv = 512;
}
return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv);
}
/* Au1300 larger input mux, no separate disable bit, flexible divider */
static const struct clk_ops alchemy_clkops_fgenv2 = {
.recalc_rate = alchemy_clk_fgv2_recalc,
.determine_rate = alchemy_clk_fgv2_detr,
.set_rate = alchemy_clk_fgv2_setr,
.set_parent = alchemy_clk_fgv2_setp,
.get_parent = alchemy_clk_fgv2_getp,
.enable = alchemy_clk_fgv2_en,
.disable = alchemy_clk_fgv2_dis,
.is_enabled = alchemy_clk_fgv2_isen,
};
static const char * const alchemy_clk_fgv1_parents[] = {
ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
};
static const char * const alchemy_clk_fgv2_parents[] = {
ALCHEMY_AUXPLL2_CLK, ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
};
static const char * const alchemy_clk_fgen_names[] = {
ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK };
static int __init alchemy_clk_init_fgens(int ctype)
{
struct clk *c;
struct clk_init_data id;
struct alchemy_fgcs_clk *a;
unsigned long v;
int i, ret;
switch (ctype) {
case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200:
id.ops = &alchemy_clkops_fgenv1;
id.parent_names = alchemy_clk_fgv1_parents;
id.num_parents = 2;
break;
case ALCHEMY_CPU_AU1300:
id.ops = &alchemy_clkops_fgenv2;
id.parent_names = alchemy_clk_fgv2_parents;
id.num_parents = 3;
break;
default:
return -ENODEV;
}
id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL);
if (!a)
return -ENOMEM;
spin_lock_init(&alchemy_clk_fg0_lock);
spin_lock_init(&alchemy_clk_fg1_lock);
ret = 0;
for (i = 0; i < 6; i++) {
id.name = alchemy_clk_fgen_names[i];
a->shift = 10 * (i < 3 ? i : i - 3);
if (i > 2) {
a->reg = AU1000_SYS_FREQCTRL1;
a->reglock = &alchemy_clk_fg1_lock;
} else {
a->reg = AU1000_SYS_FREQCTRL0;
a->reglock = &alchemy_clk_fg0_lock;
}
/* default to first parent if bootloader has set
* the mux to disabled state.
*/
if (ctype == ALCHEMY_CPU_AU1300) {
v = alchemy_rdsys(a->reg);
a->parent = (v >> a->shift) & 3;
if (!a->parent) {
a->parent = 1;
a->isen = 0;
} else
a->isen = 1;
}
a->hw.init = &id;
c = clk_register(NULL, &a->hw);
if (IS_ERR(c))
ret++;
else
clk_register_clkdev(c, id.name, NULL);
a++;
}
return ret;
}
/* internal sources muxes *********************************************/
static int alchemy_clk_csrc_isen(struct clk_hw *hw)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long v = alchemy_rdsys(c->reg);
return (((v >> c->shift) >> 2) & 7) != 0;
}
static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c)
{
unsigned long v = alchemy_rdsys(c->reg);
v &= ~((7 << 2) << c->shift);
v |= ((c->parent & 7) << 2) << c->shift;
alchemy_wrsys(v, c->reg);
c->isen = 1;
}
static int alchemy_clk_csrc_en(struct clk_hw *hw)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long flags;
/* enable by setting the previous parent clock */
spin_lock_irqsave(c->reglock, flags);
__alchemy_clk_csrc_en(c);
spin_unlock_irqrestore(c->reglock, flags);
return 0;
}
static void alchemy_clk_csrc_dis(struct clk_hw *hw)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long v, flags;
spin_lock_irqsave(c->reglock, flags);
v = alchemy_rdsys(c->reg);
v &= ~((3 << 2) << c->shift); /* mux to "disabled" state */
alchemy_wrsys(v, c->reg);
c->isen = 0;
spin_unlock_irqrestore(c->reglock, flags);
}
static int alchemy_clk_csrc_setp(struct clk_hw *hw, u8 index)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long flags;
spin_lock_irqsave(c->reglock, flags);
c->parent = index + 1; /* value to write to register */
if (c->isen)
__alchemy_clk_csrc_en(c);
spin_unlock_irqrestore(c->reglock, flags);
return 0;
}
static u8 alchemy_clk_csrc_getp(struct clk_hw *hw)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
return c->parent - 1;
}
static unsigned long alchemy_clk_csrc_recalc(struct clk_hw *hw,
unsigned long parent_rate)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3;
return parent_rate / c->dt[v];
}
static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
unsigned long d, v, flags;
int i;
if (!rate || !parent_rate || rate > parent_rate)
return -EINVAL;
d = (parent_rate + (rate / 2)) / rate;
if (d > 4)
return -EINVAL;
if ((d == 3) && (c->dt[2] != 3))
d = 4;
for (i = 0; i < 4; i++)
if (c->dt[i] == d)
break;
if (i >= 4)
return -EINVAL; /* oops */
spin_lock_irqsave(c->reglock, flags);
v = alchemy_rdsys(c->reg);
v &= ~(3 << c->shift);
v |= (i & 3) << c->shift;
alchemy_wrsys(v, c->reg);
spin_unlock_irqrestore(c->reglock, flags);
return 0;
}
static int alchemy_clk_csrc_detr(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */
return alchemy_clk_fgcs_detr(hw, req, scale, 4);
}
static const struct clk_ops alchemy_clkops_csrc = {
.recalc_rate = alchemy_clk_csrc_recalc,
.determine_rate = alchemy_clk_csrc_detr,
.set_rate = alchemy_clk_csrc_setr,
.set_parent = alchemy_clk_csrc_setp,
.get_parent = alchemy_clk_csrc_getp,
.enable = alchemy_clk_csrc_en,
.disable = alchemy_clk_csrc_dis,
.is_enabled = alchemy_clk_csrc_isen,
};
static const char * const alchemy_clk_csrc_parents[] = {
/* disabled at index 0 */ ALCHEMY_AUXPLL_CLK,
ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK
};
/* divider tables */
static int alchemy_csrc_dt1[] = { 1, 4, 1, 2 }; /* rest */
static int alchemy_csrc_dt2[] = { 1, 4, 3, 2 }; /* Au1300 */
static int __init alchemy_clk_setup_imux(int ctype)
{
struct alchemy_fgcs_clk *a;
const char * const *names;
struct clk_init_data id;
unsigned long v;
int i, ret, *dt;
struct clk *c;
id.ops = &alchemy_clkops_csrc;
id.parent_names = alchemy_clk_csrc_parents;
id.num_parents = 7;
id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
dt = alchemy_csrc_dt1;
switch (ctype) {
case ALCHEMY_CPU_AU1000:
names = alchemy_au1000_intclknames;
break;
case ALCHEMY_CPU_AU1500:
names = alchemy_au1500_intclknames;
break;
case ALCHEMY_CPU_AU1100:
names = alchemy_au1100_intclknames;
break;
case ALCHEMY_CPU_AU1550:
names = alchemy_au1550_intclknames;
break;
case ALCHEMY_CPU_AU1200:
names = alchemy_au1200_intclknames;
break;
case ALCHEMY_CPU_AU1300:
dt = alchemy_csrc_dt2;
names = alchemy_au1300_intclknames;
break;
default:
return -ENODEV;
}
a = kcalloc(6, sizeof(*a), GFP_KERNEL);
if (!a)
return -ENOMEM;
ret = 0;
for (i = 0; i < 6; i++) {
id.name = names[i];
if (!id.name)
goto next;
a->shift = i * 5;
a->reg = AU1000_SYS_CLKSRC;
a->reglock = &alchemy_clk_csrc_lock;
a->dt = dt;
/* default to first parent clock if mux is initially
* set to disabled state.
*/
v = alchemy_rdsys(a->reg);
a->parent = ((v >> a->shift) >> 2) & 7;
if (!a->parent) {
a->parent = 1;
a->isen = 0;
} else
a->isen = 1;
a->hw.init = &id;
c = clk_register(NULL, &a->hw);
if (IS_ERR(c))
ret++;
else
clk_register_clkdev(c, id.name, NULL);
next:
a++;
}
return ret;
}
/**********************************************************************/
#define ERRCK(x) \
if (IS_ERR(x)) { \
ret = PTR_ERR(x); \
goto out; \
}
static int __init alchemy_clk_init(void)
{
int ctype = alchemy_get_cputype(), ret, i;
struct clk_aliastable *t = alchemy_clk_aliases;
struct clk *c;
/* Root of the Alchemy clock tree: external 12MHz crystal osc */
c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL,
0, ALCHEMY_ROOTCLK_RATE);
ERRCK(c)
/* CPU core clock */
c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype);
ERRCK(c)
/* AUXPLLs: max 1GHz on Au1300, 748MHz on older models */
i = (ctype == ALCHEMY_CPU_AU1300) ? 84 : 63;
c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK,
i, AU1000_SYS_AUXPLL);
ERRCK(c)
if (ctype == ALCHEMY_CPU_AU1300) {
c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK,
ALCHEMY_AUXPLL2_CLK, i,
AU1300_SYS_AUXPLL2);
ERRCK(c)
}
/* sysbus clock: cpu core clock divided by 2, 3 or 4 */
c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK);
ERRCK(c)
/* peripheral clock: runs at half rate of sysbus clk */
c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK);
ERRCK(c)
/* SDR/DDR memory clock */
c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype);
ERRCK(c)
/* L/RCLK: external static bus clock for synchronous mode */
c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype);
ERRCK(c)
/* Frequency dividers 0-5 */
ret = alchemy_clk_init_fgens(ctype);
if (ret) {
ret = -ENODEV;
goto out;
}
/* diving muxes for internal sources */
ret = alchemy_clk_setup_imux(ctype);
if (ret) {
ret = -ENODEV;
goto out;
}
/* set up aliases drivers might look for */
while (t->base) {
if (t->cputype == ctype)
clk_add_alias(t->alias, NULL, t->base, NULL);
t++;
}
pr_info("Alchemy clocktree installed\n");
return 0;
out:
return ret;
}
postcore_initcall(alchemy_clk_init);
| linux-master | arch/mips/alchemy/common/clock.c |
/*
* Copyright 2000, 2007-2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <[email protected]
*
* Updates to 2.6, Pete Popov, Embedded Alley Solutions, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <asm/mipsregs.h>
#include <au1000.h>
extern void __init board_setup(void);
extern void __init alchemy_set_lpj(void);
static bool alchemy_dma_coherent(void)
{
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1100:
return false;
case ALCHEMY_CPU_AU1200:
/* Au1200 AB USB does not support coherent memory */
if ((read_c0_prid() & PRID_REV_MASK) == 0)
return false;
return true;
default:
return true;
}
}
void __init plat_mem_setup(void)
{
alchemy_set_lpj();
if (au1xxx_cpu_needs_config_od())
/* Various early Au1xx0 errata corrected by this */
set_c0_config(1 << 19); /* Set Config[OD] */
else
/* Clear to obtain best system bus performance */
clear_c0_config(1 << 19); /* Clear Config[OD] */
dma_default_coherent = alchemy_dma_coherent();
board_setup(); /* board specific setup */
/* IO/MEM resources. */
set_io_port_base(0);
ioport_resource.start = IOPORT_RESOURCE_START;
ioport_resource.end = IOPORT_RESOURCE_END;
iomem_resource.start = IOMEM_RESOURCE_START;
iomem_resource.end = IOMEM_RESOURCE_END;
}
#ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
/* This routine should be valid for all Au1x based boards */
phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
{
unsigned long start = ALCHEMY_PCI_MEMWIN_START;
unsigned long end = ALCHEMY_PCI_MEMWIN_END;
/* Don't fixup 36-bit addresses */
if ((phys_addr >> 32) != 0)
return phys_addr;
/* Check for PCI memory window */
if (phys_addr >= start && (phys_addr + size - 1) <= end)
return (phys_addr_t)(AU1500_PCI_MEM_PHYS_ADDR + phys_addr);
/* default nop */
return phys_addr;
}
int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
phys_addr_t phys_addr = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
return remap_pfn_range(vma, vaddr, phys_addr >> PAGE_SHIFT, size, prot);
}
EXPORT_SYMBOL(io_remap_pfn_range);
#endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
| linux-master | arch/mips/alchemy/common/setup.c |
/*
* BRIEF MODULE DESCRIPTION
* Au1xx0 Power Management routines.
*
* Copyright 2001, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <[email protected]>
*
* Some of the routines are right out of init/main.c, whose
* copyrights apply here.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/pm.h>
#include <linux/sysctl.h>
#include <linux/jiffies.h>
#include <linux/uaccess.h>
#include <asm/mach-au1x00/au1000.h>
/*
* We need to save/restore a bunch of core registers that are
* either volatile or reset to some state across a processor sleep.
* If reading a register doesn't provide a proper result for a
* later restore, we have to provide a function for loading that
* register and save a copy.
*
* We only have to save/restore registers that aren't otherwise
* done as part of a driver pm_* function.
*/
static unsigned int sleep_sys_clocks[5];
static unsigned int sleep_sys_pinfunc;
static unsigned int sleep_static_memctlr[4][3];
static void save_core_regs(void)
{
/* Clocks and PLLs. */
sleep_sys_clocks[0] = alchemy_rdsys(AU1000_SYS_FREQCTRL0);
sleep_sys_clocks[1] = alchemy_rdsys(AU1000_SYS_FREQCTRL1);
sleep_sys_clocks[2] = alchemy_rdsys(AU1000_SYS_CLKSRC);
sleep_sys_clocks[3] = alchemy_rdsys(AU1000_SYS_CPUPLL);
sleep_sys_clocks[4] = alchemy_rdsys(AU1000_SYS_AUXPLL);
/* pin mux config */
sleep_sys_pinfunc = alchemy_rdsys(AU1000_SYS_PINFUNC);
/* Save the static memory controller configuration. */
sleep_static_memctlr[0][0] = alchemy_rdsmem(AU1000_MEM_STCFG0);
sleep_static_memctlr[0][1] = alchemy_rdsmem(AU1000_MEM_STTIME0);
sleep_static_memctlr[0][2] = alchemy_rdsmem(AU1000_MEM_STADDR0);
sleep_static_memctlr[1][0] = alchemy_rdsmem(AU1000_MEM_STCFG1);
sleep_static_memctlr[1][1] = alchemy_rdsmem(AU1000_MEM_STTIME1);
sleep_static_memctlr[1][2] = alchemy_rdsmem(AU1000_MEM_STADDR1);
sleep_static_memctlr[2][0] = alchemy_rdsmem(AU1000_MEM_STCFG2);
sleep_static_memctlr[2][1] = alchemy_rdsmem(AU1000_MEM_STTIME2);
sleep_static_memctlr[2][2] = alchemy_rdsmem(AU1000_MEM_STADDR2);
sleep_static_memctlr[3][0] = alchemy_rdsmem(AU1000_MEM_STCFG3);
sleep_static_memctlr[3][1] = alchemy_rdsmem(AU1000_MEM_STTIME3);
sleep_static_memctlr[3][2] = alchemy_rdsmem(AU1000_MEM_STADDR3);
}
static void restore_core_regs(void)
{
/* restore clock configuration. Writing CPUPLL last will
* stall a bit and stabilize other clocks (unless this is
* one of those Au1000 with a write-only PLL, where we dont
* have a valid value)
*/
alchemy_wrsys(sleep_sys_clocks[0], AU1000_SYS_FREQCTRL0);
alchemy_wrsys(sleep_sys_clocks[1], AU1000_SYS_FREQCTRL1);
alchemy_wrsys(sleep_sys_clocks[2], AU1000_SYS_CLKSRC);
alchemy_wrsys(sleep_sys_clocks[4], AU1000_SYS_AUXPLL);
if (!au1xxx_cpu_has_pll_wo())
alchemy_wrsys(sleep_sys_clocks[3], AU1000_SYS_CPUPLL);
alchemy_wrsys(sleep_sys_pinfunc, AU1000_SYS_PINFUNC);
/* Restore the static memory controller configuration. */
alchemy_wrsmem(sleep_static_memctlr[0][0], AU1000_MEM_STCFG0);
alchemy_wrsmem(sleep_static_memctlr[0][1], AU1000_MEM_STTIME0);
alchemy_wrsmem(sleep_static_memctlr[0][2], AU1000_MEM_STADDR0);
alchemy_wrsmem(sleep_static_memctlr[1][0], AU1000_MEM_STCFG1);
alchemy_wrsmem(sleep_static_memctlr[1][1], AU1000_MEM_STTIME1);
alchemy_wrsmem(sleep_static_memctlr[1][2], AU1000_MEM_STADDR1);
alchemy_wrsmem(sleep_static_memctlr[2][0], AU1000_MEM_STCFG2);
alchemy_wrsmem(sleep_static_memctlr[2][1], AU1000_MEM_STTIME2);
alchemy_wrsmem(sleep_static_memctlr[2][2], AU1000_MEM_STADDR2);
alchemy_wrsmem(sleep_static_memctlr[3][0], AU1000_MEM_STCFG3);
alchemy_wrsmem(sleep_static_memctlr[3][1], AU1000_MEM_STTIME3);
alchemy_wrsmem(sleep_static_memctlr[3][2], AU1000_MEM_STADDR3);
}
void au_sleep(void)
{
save_core_regs();
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1100:
alchemy_sleep_au1000();
break;
case ALCHEMY_CPU_AU1550:
case ALCHEMY_CPU_AU1200:
alchemy_sleep_au1550();
break;
case ALCHEMY_CPU_AU1300:
alchemy_sleep_au1300();
break;
}
restore_core_regs();
}
| linux-master | arch/mips/alchemy/common/power.c |
/*
*
* BRIEF MODULE DESCRIPTION
* A DMA channel allocator for Au1x00. API is modeled loosely off of
* linux/kernel/dma.c.
*
* Copyright 2000, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <[email protected]>
* Copyright (C) 2005 Ralf Baechle ([email protected])
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1000_dma.h>
/*
* A note on resource allocation:
*
* All drivers needing DMA channels, should allocate and release them
* through the public routines `request_dma()' and `free_dma()'.
*
* In order to avoid problems, all processes should allocate resources in
* the same sequence and release them in the reverse order.
*
* So, when allocating DMAs and IRQs, first allocate the DMA, then the IRQ.
* When releasing them, first release the IRQ, then release the DMA. The
* main reason for this order is that, if you are requesting the DMA buffer
* done interrupt, you won't know the irq number until the DMA channel is
* returned from request_dma.
*/
/* DMA Channel register block spacing */
#define DMA_CHANNEL_LEN 0x00000100
DEFINE_SPINLOCK(au1000_dma_spin_lock);
struct dma_chan au1000_dma_table[NUM_AU1000_DMA_CHANNELS] = {
{.dev_id = -1,},
{.dev_id = -1,},
{.dev_id = -1,},
{.dev_id = -1,},
{.dev_id = -1,},
{.dev_id = -1,},
{.dev_id = -1,},
{.dev_id = -1,}
};
EXPORT_SYMBOL(au1000_dma_table);
/* Device FIFO addresses and default DMA modes */
static const struct dma_dev {
unsigned int fifo_addr;
unsigned int dma_mode;
} dma_dev_table[DMA_NUM_DEV] = {
{ AU1000_UART0_PHYS_ADDR + 0x04, DMA_DW8 }, /* UART0_TX */
{ AU1000_UART0_PHYS_ADDR + 0x00, DMA_DW8 | DMA_DR }, /* UART0_RX */
{ 0, 0 }, /* DMA_REQ0 */
{ 0, 0 }, /* DMA_REQ1 */
{ AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 }, /* AC97 TX c */
{ AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR }, /* AC97 RX c */
{ AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* UART3_TX */
{ AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */
{ AU1000_USB_UDC_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */
{ AU1000_USB_UDC_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */
{ AU1000_USB_UDC_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */
{ AU1000_USB_UDC_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */
{ AU1000_USB_UDC_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */
{ AU1000_USB_UDC_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */
/* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */
{ AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC}, /* I2S TX */
{ AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */
};
int au1000_dma_read_proc(char *buf, char **start, off_t fpos,
int length, int *eof, void *data)
{
int i, len = 0;
struct dma_chan *chan;
for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) {
chan = get_dma_chan(i);
if (chan != NULL)
len += sprintf(buf + len, "%2d: %s\n",
i, chan->dev_str);
}
if (fpos >= len) {
*start = buf;
*eof = 1;
return 0;
}
*start = buf + fpos;
len -= fpos;
if (len > length)
return length;
*eof = 1;
return len;
}
/* Device FIFO addresses and default DMA modes - 2nd bank */
static const struct dma_dev dma_dev_table_bank2[DMA_NUM_DEV_BANK2] = {
{ AU1100_SD0_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */
{ AU1100_SD0_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR }, /* coherent */
{ AU1100_SD1_PHYS_ADDR + 0x00, DMA_DS | DMA_DW8 }, /* coherent */
{ AU1100_SD1_PHYS_ADDR + 0x04, DMA_DS | DMA_DW8 | DMA_DR } /* coherent */
};
void dump_au1000_dma_channel(unsigned int dmanr)
{
struct dma_chan *chan;
if (dmanr >= NUM_AU1000_DMA_CHANNELS)
return;
chan = &au1000_dma_table[dmanr];
printk(KERN_INFO "Au1000 DMA%d Register Dump:\n", dmanr);
printk(KERN_INFO " mode = 0x%08x\n",
__raw_readl(chan->io + DMA_MODE_SET));
printk(KERN_INFO " addr = 0x%08x\n",
__raw_readl(chan->io + DMA_PERIPHERAL_ADDR));
printk(KERN_INFO " start0 = 0x%08x\n",
__raw_readl(chan->io + DMA_BUFFER0_START));
printk(KERN_INFO " start1 = 0x%08x\n",
__raw_readl(chan->io + DMA_BUFFER1_START));
printk(KERN_INFO " count0 = 0x%08x\n",
__raw_readl(chan->io + DMA_BUFFER0_COUNT));
printk(KERN_INFO " count1 = 0x%08x\n",
__raw_readl(chan->io + DMA_BUFFER1_COUNT));
}
/*
* Finds a free channel, and binds the requested device to it.
* Returns the allocated channel number, or negative on error.
* Requests the DMA done IRQ if irqhandler != NULL.
*/
int request_au1000_dma(int dev_id, const char *dev_str,
irq_handler_t irqhandler,
unsigned long irqflags,
void *irq_dev_id)
{
struct dma_chan *chan;
const struct dma_dev *dev;
int i, ret;
if (alchemy_get_cputype() == ALCHEMY_CPU_AU1100) {
if (dev_id < 0 || dev_id >= (DMA_NUM_DEV + DMA_NUM_DEV_BANK2))
return -EINVAL;
} else {
if (dev_id < 0 || dev_id >= DMA_NUM_DEV)
return -EINVAL;
}
for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
if (au1000_dma_table[i].dev_id < 0)
break;
if (i == NUM_AU1000_DMA_CHANNELS)
return -ENODEV;
chan = &au1000_dma_table[i];
if (dev_id >= DMA_NUM_DEV) {
dev_id -= DMA_NUM_DEV;
dev = &dma_dev_table_bank2[dev_id];
} else
dev = &dma_dev_table[dev_id];
if (irqhandler) {
chan->irq_dev = irq_dev_id;
ret = request_irq(chan->irq, irqhandler, irqflags, dev_str,
chan->irq_dev);
if (ret) {
chan->irq_dev = NULL;
return ret;
}
} else {
chan->irq_dev = NULL;
}
/* fill it in */
chan->io = (void __iomem *)(KSEG1ADDR(AU1000_DMA_PHYS_ADDR) +
i * DMA_CHANNEL_LEN);
chan->dev_id = dev_id;
chan->dev_str = dev_str;
chan->fifo_addr = dev->fifo_addr;
chan->mode = dev->dma_mode;
/* initialize the channel before returning */
init_dma(i);
return i;
}
EXPORT_SYMBOL(request_au1000_dma);
void free_au1000_dma(unsigned int dmanr)
{
struct dma_chan *chan = get_dma_chan(dmanr);
if (!chan) {
printk(KERN_ERR "Error trying to free DMA%d\n", dmanr);
return;
}
disable_dma(dmanr);
if (chan->irq_dev)
free_irq(chan->irq, chan->irq_dev);
chan->irq_dev = NULL;
chan->dev_id = -1;
}
EXPORT_SYMBOL(free_au1000_dma);
static int __init au1000_dma_init(void)
{
int base, i;
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
base = AU1000_DMA_INT_BASE;
break;
case ALCHEMY_CPU_AU1500:
base = AU1500_DMA_INT_BASE;
break;
case ALCHEMY_CPU_AU1100:
base = AU1100_DMA_INT_BASE;
break;
default:
goto out;
}
for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
au1000_dma_table[i].irq = base + i;
printk(KERN_INFO "Alchemy DMA initialized\n");
out:
return 0;
}
arch_initcall(au1000_dma_init);
| linux-master | arch/mips/alchemy/common/dma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008-2009 Manuel Lauss <[email protected]>
*
* Previous incarnations were:
* Copyright (C) 2001, 2006, 2008 MontaVista Software, <[email protected]>
* Copied and modified Carsten Langgaard's time.c
*
* Carsten Langgaard, [email protected]
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
*
* ########################################################################
*
* ########################################################################
*
* Clocksource/event using the 32.768kHz-clocked Counter1 ('RTC' in the
* databooks). Firmware/Board init code must enable the counters in the
* counter control register, otherwise the CP0 counter clocksource/event
* will be installed instead (and use of 'wait' instruction is prohibited).
*/
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <asm/idle.h>
#include <asm/processor.h>
#include <asm/time.h>
#include <asm/mach-au1x00/au1000.h>
/* 32kHz clock enabled and detected */
#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
static u64 au1x_counter1_read(struct clocksource *cs)
{
return alchemy_rdsys(AU1000_SYS_RTCREAD);
}
static struct clocksource au1x_counter1_clocksource = {
.name = "alchemy-counter1",
.read = au1x_counter1_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.rating = 1500,
};
static int au1x_rtcmatch2_set_next_event(unsigned long delta,
struct clock_event_device *cd)
{
delta += alchemy_rdsys(AU1000_SYS_RTCREAD);
/* wait for register access */
while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_M21)
;
alchemy_wrsys(delta, AU1000_SYS_RTCMATCH2);
return 0;
}
static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id)
{
struct clock_event_device *cd = dev_id;
cd->event_handler(cd);
return IRQ_HANDLED;
}
static struct clock_event_device au1x_rtcmatch2_clockdev = {
.name = "rtcmatch2",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 1500,
.set_next_event = au1x_rtcmatch2_set_next_event,
.cpumask = cpu_possible_mask,
};
static int __init alchemy_time_init(unsigned int m2int)
{
struct clock_event_device *cd = &au1x_rtcmatch2_clockdev;
unsigned long t;
au1x_rtcmatch2_clockdev.irq = m2int;
/* Check if firmware (YAMON, ...) has enabled 32kHz and clock
* has been detected. If so install the rtcmatch2 clocksource,
* otherwise don't bother. Note that both bits being set is by
* no means a definite guarantee that the counters actually work
* (the 32S bit seems to be stuck set to 1 once a single clock-
* edge is detected, hence the timeouts).
*/
if (CNTR_OK != (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & CNTR_OK))
goto cntr_err;
/*
* setup counter 1 (RTC) to tick at full speed
*/
t = 0xffffff;
while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_T1S) && --t)
asm volatile ("nop");
if (!t)
goto cntr_err;
alchemy_wrsys(0, AU1000_SYS_RTCTRIM); /* 32.768 kHz */
t = 0xffffff;
while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C1S) && --t)
asm volatile ("nop");
if (!t)
goto cntr_err;
alchemy_wrsys(0, AU1000_SYS_RTCWRITE);
t = 0xffffff;
while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C1S) && --t)
asm volatile ("nop");
if (!t)
goto cntr_err;
/* register counter1 clocksource and event device */
clocksource_register_hz(&au1x_counter1_clocksource, 32768);
cd->shift = 32;
cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift);
cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd);
cd->max_delta_ticks = 0xffffffff;
cd->min_delta_ns = clockevent_delta2ns(9, cd);
cd->min_delta_ticks = 9; /* ~0.28ms */
clockevents_register_device(cd);
if (request_irq(m2int, au1x_rtcmatch2_irq, IRQF_TIMER, "timer",
&au1x_rtcmatch2_clockdev))
pr_err("Failed to register timer interrupt\n");
printk(KERN_INFO "Alchemy clocksource installed\n");
return 0;
cntr_err:
return -1;
}
static int alchemy_m2inttab[] __initdata = {
AU1000_RTC_MATCH2_INT,
AU1500_RTC_MATCH2_INT,
AU1100_RTC_MATCH2_INT,
AU1550_RTC_MATCH2_INT,
AU1200_RTC_MATCH2_INT,
AU1300_RTC_MATCH2_INT,
};
void __init plat_time_init(void)
{
int t;
t = alchemy_get_cputype();
if (t == ALCHEMY_CPU_UNKNOWN ||
alchemy_time_init(alchemy_m2inttab[t]))
cpu_wait = NULL; /* wait doesn't work with r4k timer */
}
| linux-master | arch/mips/alchemy/common/time.c |
/*
*
* BRIEF MODULE DESCRIPTION
* PROM library initialisation code, supports YAMON and U-Boot.
*
* Copyright 2000-2001, 2006, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <[email protected]>
*
* This file was derived from Carsten Langgaard's
* arch/mips/mips-boards/xx files.
*
* Carsten Langgaard, [email protected]
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/sizes.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
int prom_argc;
char **prom_argv;
char **prom_envp;
void __init prom_init_cmdline(void)
{
int i;
for (i = 1; i < prom_argc; i++) {
strlcat(arcs_cmdline, prom_argv[i], COMMAND_LINE_SIZE);
if (i < (prom_argc - 1))
strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
}
}
char *prom_getenv(char *envname)
{
/*
* Return a pointer to the given environment variable.
* YAMON uses "name", "value" pairs, while U-Boot uses "name=value".
*/
char **env = prom_envp;
int i = strlen(envname);
int yamon = (*env && strchr(*env, '=') == NULL);
while (*env) {
if (yamon) {
if (strcmp(envname, *env++) == 0)
return *env;
} else if (strncmp(envname, *env, i) == 0 && (*env)[i] == '=')
return *env + i + 1;
env++;
}
return NULL;
}
void __init prom_init(void)
{
unsigned char *memsize_str;
unsigned long memsize;
prom_argc = (int)fw_arg0;
prom_argv = (char **)fw_arg1;
prom_envp = (char **)fw_arg2;
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
memsize = SZ_64M; /* minimum memsize is 64MB RAM */
memblock_add(0, memsize);
}
static inline unsigned char str2hexnum(unsigned char c)
{
if (c >= '0' && c <= '9')
return c - '0';
if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
if (c >= 'A' && c <= 'F')
return c - 'A' + 10;
return 0; /* foo */
}
static inline void str2eaddr(unsigned char *ea, unsigned char *str)
{
int i;
for (i = 0; i < 6; i++) {
unsigned char num;
if ((*str == '.') || (*str == ':'))
str++;
num = str2hexnum(*str++) << 4;
num |= str2hexnum(*str++);
ea[i] = num;
}
}
int __init prom_get_ethernet_addr(char *ethernet_addr)
{
char *ethaddr_str;
/* Check the environment variables first */
ethaddr_str = prom_getenv("ethaddr");
if (!ethaddr_str) {
/* Check command line */
ethaddr_str = strstr(arcs_cmdline, "ethaddr=");
if (!ethaddr_str)
return -1;
ethaddr_str += strlen("ethaddr=");
}
str2eaddr(ethernet_addr, ethaddr_str);
return 0;
}
| linux-master | arch/mips/alchemy/common/prom.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 YAEGASHI Takeshi
* Hitachi HD64461 companion chip support
*/
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/hd64461.h>
/* This belongs in cpu specific */
#define INTC_ICR1 0xA4140010UL
static void hd64461_mask_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
unsigned short nimr;
unsigned short mask = 1 << (irq - HD64461_IRQBASE);
nimr = __raw_readw(HD64461_NIMR);
nimr |= mask;
__raw_writew(nimr, HD64461_NIMR);
}
static void hd64461_unmask_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
unsigned short nimr;
unsigned short mask = 1 << (irq - HD64461_IRQBASE);
nimr = __raw_readw(HD64461_NIMR);
nimr &= ~mask;
__raw_writew(nimr, HD64461_NIMR);
}
static void hd64461_mask_and_ack_irq(struct irq_data *data)
{
hd64461_mask_irq(data);
#ifdef CONFIG_HD64461_ENABLER
if (data->irq == HD64461_IRQBASE + 13)
__raw_writeb(0x00, HD64461_PCC1CSCR);
#endif
}
static struct irq_chip hd64461_irq_chip = {
.name = "HD64461-IRQ",
.irq_mask = hd64461_mask_irq,
.irq_mask_ack = hd64461_mask_and_ack_irq,
.irq_unmask = hd64461_unmask_irq,
};
static void hd64461_irq_demux(struct irq_desc *desc)
{
unsigned short intv = __raw_readw(HD64461_NIRR);
unsigned int ext_irq = HD64461_IRQBASE;
intv &= (1 << HD64461_IRQ_NUM) - 1;
for (; intv; intv >>= 1, ext_irq++) {
if (!(intv & 1))
continue;
generic_handle_irq(ext_irq);
}
}
int __init setup_hd64461(void)
{
int irq_base, i;
printk(KERN_INFO
"HD64461 configured at 0x%x on irq %d(mapped into %d to %d)\n",
HD64461_IOBASE, CONFIG_HD64461_IRQ, HD64461_IRQBASE,
HD64461_IRQBASE + 15);
/* Should be at processor specific part.. */
#if defined(CONFIG_CPU_SUBTYPE_SH7709)
__raw_writew(0x2240, INTC_ICR1);
#endif
__raw_writew(0xffff, HD64461_NIMR);
irq_base = irq_alloc_descs(HD64461_IRQBASE, HD64461_IRQBASE, 16, -1);
if (IS_ERR_VALUE(irq_base)) {
pr_err("%s: failed hooking irqs for HD64461\n", __func__);
return irq_base;
}
for (i = 0; i < 16; i++)
irq_set_chip_and_handler(irq_base + i, &hd64461_irq_chip,
handle_level_irq);
irq_set_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux);
irq_set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW);
#ifdef CONFIG_HD64461_ENABLER
printk(KERN_INFO "HD64461: enabling PCMCIA devices\n");
__raw_writeb(0x4c, HD64461_PCC1CSCIER);
__raw_writeb(0x00, HD64461_PCC1CSCR);
#endif
return 0;
}
module_init(setup_hd64461);
| linux-master | arch/sh/cchips/hd6446x/hd64461.c |
/*
* arch/sh/math-emu/math.c
*
* Copyright (C) 2006 Takashi YOSHII <[email protected]>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sched/signal.h>
#include <linux/signal.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <asm/processor.h>
#include <asm/io.h>
#include "sfp-util.h"
#include <math-emu/soft-fp.h>
#include <math-emu/single.h>
#include <math-emu/double.h>
#define FPUL (fregs->fpul)
#define FPSCR (fregs->fpscr)
#define FPSCR_RM (FPSCR&3)
#define FPSCR_DN ((FPSCR>>18)&1)
#define FPSCR_PR ((FPSCR>>19)&1)
#define FPSCR_SZ ((FPSCR>>20)&1)
#define FPSCR_FR ((FPSCR>>21)&1)
#define FPSCR_MASK 0x003fffffUL
#define BANK(n) (n^(FPSCR_FR?16:0))
#define FR ((unsigned long*)(fregs->fp_regs))
#define FR0 (FR[BANK(0)])
#define FRn (FR[BANK(n)])
#define FRm (FR[BANK(m)])
#define DR ((unsigned long long*)(fregs->fp_regs))
#define DRn (DR[BANK(n)/2])
#define DRm (DR[BANK(m)/2])
#define XREG(n) (n^16)
#define XFn (FR[BANK(XREG(n))])
#define XFm (FR[BANK(XREG(m))])
#define XDn (DR[BANK(XREG(n))/2])
#define XDm (DR[BANK(XREG(m))/2])
#define R0 (regs->regs[0])
#define Rn (regs->regs[n])
#define Rm (regs->regs[m])
#define MWRITE(d,a) ({if(put_user(d, (typeof (d) __user *)a)) return -EFAULT;})
#define MREAD(d,a) ({if(get_user(d, (typeof (d) __user *)a)) return -EFAULT;})
#define PACK_S(r,f) FP_PACK_SP(&r,f)
#define UNPACK_S(f,r) FP_UNPACK_SP(f,&r)
#define PACK_D(r,f) \
{u32 t[2]; FP_PACK_DP(t,f); ((u32*)&r)[0]=t[1]; ((u32*)&r)[1]=t[0];}
#define UNPACK_D(f,r) \
{u32 t[2]; t[0]=((u32*)&r)[1]; t[1]=((u32*)&r)[0]; FP_UNPACK_DP(f,t);}
// 2 args instructions.
#define BOTH_PRmn(op,x) \
FP_DECL_EX; if(FPSCR_PR) op(D,x,DRm,DRn); else op(S,x,FRm,FRn);
#define CMP_X(SZ,R,M,N) do{ \
FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); \
UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \
FP_CMP_##SZ(R, Fn, Fm, 2); }while(0)
#define EQ_X(SZ,R,M,N) do{ \
FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); \
UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \
FP_CMP_EQ_##SZ(R, Fn, Fm); }while(0)
#define CMP(OP) ({ int r; BOTH_PRmn(OP##_X,r); r; })
static int
fcmp_gt(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
if (CMP(CMP) > 0)
regs->sr |= 1;
else
regs->sr &= ~1;
return 0;
}
static int
fcmp_eq(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
if (CMP(CMP /*EQ*/) == 0)
regs->sr |= 1;
else
regs->sr &= ~1;
return 0;
}
#define ARITH_X(SZ,OP,M,N) do{ \
FP_DECL_##SZ(Fm); FP_DECL_##SZ(Fn); FP_DECL_##SZ(Fr); \
UNPACK_##SZ(Fm, M); UNPACK_##SZ(Fn, N); \
FP_##OP##_##SZ(Fr, Fn, Fm); \
PACK_##SZ(N, Fr); }while(0)
static int
fadd(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
BOTH_PRmn(ARITH_X, ADD);
return 0;
}
static int
fsub(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
BOTH_PRmn(ARITH_X, SUB);
return 0;
}
static int
fmul(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
BOTH_PRmn(ARITH_X, MUL);
return 0;
}
static int
fdiv(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
BOTH_PRmn(ARITH_X, DIV);
return 0;
}
static int
fmac(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
FP_DECL_EX;
FP_DECL_S(Fr);
FP_DECL_S(Ft);
FP_DECL_S(F0);
FP_DECL_S(Fm);
FP_DECL_S(Fn);
UNPACK_S(F0, FR0);
UNPACK_S(Fm, FRm);
UNPACK_S(Fn, FRn);
FP_MUL_S(Ft, Fm, F0);
FP_ADD_S(Fr, Fn, Ft);
PACK_S(FRn, Fr);
return 0;
}
// to process fmov's extension (odd n for DR access XD).
#define FMOV_EXT(x) if(x&1) x+=16-1
static int
fmov_idx_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(n);
MREAD(FRn, Rm + R0 + 4);
n++;
MREAD(FRn, Rm + R0);
} else {
MREAD(FRn, Rm + R0);
}
return 0;
}
static int
fmov_mem_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(n);
MREAD(FRn, Rm + 4);
n++;
MREAD(FRn, Rm);
} else {
MREAD(FRn, Rm);
}
return 0;
}
static int
fmov_inc_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(n);
MREAD(FRn, Rm + 4);
n++;
MREAD(FRn, Rm);
Rm += 8;
} else {
MREAD(FRn, Rm);
Rm += 4;
}
return 0;
}
static int
fmov_reg_idx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
MWRITE(FRm, Rn + R0 + 4);
m++;
MWRITE(FRm, Rn + R0);
} else {
MWRITE(FRm, Rn + R0);
}
return 0;
}
static int
fmov_reg_mem(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
MWRITE(FRm, Rn + 4);
m++;
MWRITE(FRm, Rn);
} else {
MWRITE(FRm, Rn);
}
return 0;
}
static int
fmov_reg_dec(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
Rn -= 8;
MWRITE(FRm, Rn + 4);
m++;
MWRITE(FRm, Rn);
} else {
Rn -= 4;
MWRITE(FRm, Rn);
}
return 0;
}
static int
fmov_reg_reg(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m,
int n)
{
if (FPSCR_SZ) {
FMOV_EXT(m);
FMOV_EXT(n);
DRn = DRm;
} else {
FRn = FRm;
}
return 0;
}
static int
fnop_mn(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
return -EINVAL;
}
// 1 arg instructions.
#define NOTYETn(i) static int i(struct sh_fpu_soft_struct *fregs, int n) \
{ printk( #i " not yet done.\n"); return 0; }
NOTYETn(ftrv)
NOTYETn(fsqrt)
NOTYETn(fipr)
NOTYETn(fsca)
NOTYETn(fsrra)
#define EMU_FLOAT_X(SZ,N) do { \
FP_DECL_##SZ(Fn); \
FP_FROM_INT_##SZ(Fn, FPUL, 32, int); \
PACK_##SZ(N, Fn); }while(0)
static int ffloat(struct sh_fpu_soft_struct *fregs, int n)
{
FP_DECL_EX;
if (FPSCR_PR)
EMU_FLOAT_X(D, DRn);
else
EMU_FLOAT_X(S, FRn);
return 0;
}
#define EMU_FTRC_X(SZ,N) do { \
FP_DECL_##SZ(Fn); \
UNPACK_##SZ(Fn, N); \
FP_TO_INT_##SZ(FPUL, Fn, 32, 1); }while(0)
static int ftrc(struct sh_fpu_soft_struct *fregs, int n)
{
FP_DECL_EX;
if (FPSCR_PR)
EMU_FTRC_X(D, DRn);
else
EMU_FTRC_X(S, FRn);
return 0;
}
static int fcnvsd(struct sh_fpu_soft_struct *fregs, int n)
{
FP_DECL_EX;
FP_DECL_S(Fn);
FP_DECL_D(Fr);
UNPACK_S(Fn, FPUL);
FP_CONV(D, S, 2, 1, Fr, Fn);
PACK_D(DRn, Fr);
return 0;
}
static int fcnvds(struct sh_fpu_soft_struct *fregs, int n)
{
FP_DECL_EX;
FP_DECL_D(Fn);
FP_DECL_S(Fr);
UNPACK_D(Fn, DRn);
FP_CONV(S, D, 1, 2, Fr, Fn);
PACK_S(FPUL, Fr);
return 0;
}
static int fxchg(struct sh_fpu_soft_struct *fregs, int flag)
{
FPSCR ^= flag;
return 0;
}
static int fsts(struct sh_fpu_soft_struct *fregs, int n)
{
FRn = FPUL;
return 0;
}
static int flds(struct sh_fpu_soft_struct *fregs, int n)
{
FPUL = FRn;
return 0;
}
static int fneg(struct sh_fpu_soft_struct *fregs, int n)
{
FRn ^= (1 << (_FP_W_TYPE_SIZE - 1));
return 0;
}
static int fabs(struct sh_fpu_soft_struct *fregs, int n)
{
FRn &= ~(1 << (_FP_W_TYPE_SIZE - 1));
return 0;
}
static int fld0(struct sh_fpu_soft_struct *fregs, int n)
{
FRn = 0;
return 0;
}
static int fld1(struct sh_fpu_soft_struct *fregs, int n)
{
FRn = (_FP_EXPBIAS_S << (_FP_FRACBITS_S - 1));
return 0;
}
static int fnop_n(struct sh_fpu_soft_struct *fregs, int n)
{
return -EINVAL;
}
/// Instruction decoders.
static int id_fxfd(struct sh_fpu_soft_struct *, int);
static int id_fnxd(struct sh_fpu_soft_struct *, struct pt_regs *, int, int);
static int (*fnxd[])(struct sh_fpu_soft_struct *, int) = {
fsts, flds, ffloat, ftrc, fneg, fabs, fsqrt, fsrra,
fld0, fld1, fcnvsd, fcnvds, fnop_n, fnop_n, fipr, id_fxfd
};
static int (*fnmx[])(struct sh_fpu_soft_struct *, struct pt_regs *, int, int) = {
fadd, fsub, fmul, fdiv, fcmp_eq, fcmp_gt, fmov_idx_reg, fmov_reg_idx,
fmov_mem_reg, fmov_inc_reg, fmov_reg_mem, fmov_reg_dec,
fmov_reg_reg, id_fnxd, fmac, fnop_mn};
static int id_fxfd(struct sh_fpu_soft_struct *fregs, int x)
{
const int flag[] = { FPSCR_SZ, FPSCR_PR, FPSCR_FR, 0 };
switch (x & 3) {
case 3:
fxchg(fregs, flag[x >> 2]);
break;
case 1:
ftrv(fregs, x - 1);
break;
default:
fsca(fregs, x);
}
return 0;
}
static int
id_fnxd(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int x, int n)
{
return (fnxd[x])(fregs, n);
}
static int
id_fnmx(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, u16 code)
{
int n = (code >> 8) & 0xf, m = (code >> 4) & 0xf, x = code & 0xf;
return (fnmx[x])(fregs, regs, m, n);
}
static int
id_sys(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, u16 code)
{
int n = ((code >> 8) & 0xf);
unsigned long *reg = (code & 0x0010) ? &FPUL : &FPSCR;
switch (code & 0xf0ff) {
case 0x005a:
case 0x006a:
Rn = *reg;
break;
case 0x405a:
case 0x406a:
*reg = Rn;
break;
case 0x4052:
case 0x4062:
Rn -= 4;
MWRITE(*reg, Rn);
break;
case 0x4056:
case 0x4066:
MREAD(*reg, Rn);
Rn += 4;
break;
default:
return -EINVAL;
}
return 0;
}
static int fpu_emulate(u16 code, struct sh_fpu_soft_struct *fregs, struct pt_regs *regs)
{
if ((code & 0xf000) == 0xf000)
return id_fnmx(fregs, regs, code);
else
return id_sys(fregs, regs, code);
}
/**
* fpu_init - Initialize FPU registers
* @fpu: Pointer to software emulated FPU registers.
*/
static void fpu_init(struct sh_fpu_soft_struct *fpu)
{
int i;
fpu->fpscr = FPSCR_INIT;
fpu->fpul = 0;
for (i = 0; i < 16; i++) {
fpu->fp_regs[i] = 0;
fpu->xfp_regs[i]= 0;
}
}
/**
* do_fpu_inst - Handle reserved instructions for FPU emulation
* @inst: instruction code.
* @regs: registers on stack.
*/
int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
/* initialize once. */
fpu_init(fpu);
task_thread_info(tsk)->status |= TS_USEDFPU;
}
return fpu_emulate(inst, fpu, regs);
}
| linux-master | arch/sh/math-emu/math.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic push-switch framework
*
* Copyright (C) 2006 Paul Mundt
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <asm/push-switch.h>
#define DRV_NAME "push-switch"
#define DRV_VERSION "0.1.1"
static ssize_t switch_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct push_switch_platform_info *psw_info = dev->platform_data;
return sprintf(buf, "%s\n", psw_info->name);
}
static DEVICE_ATTR_RO(switch);
static void switch_timer(struct timer_list *t)
{
struct push_switch *psw = from_timer(psw, t, debounce);
schedule_work(&psw->work);
}
static void switch_work_handler(struct work_struct *work)
{
struct push_switch *psw = container_of(work, struct push_switch, work);
struct platform_device *pdev = psw->pdev;
psw->state = 0;
kobject_uevent(&pdev->dev.kobj, KOBJ_CHANGE);
}
static int switch_drv_probe(struct platform_device *pdev)
{
struct push_switch_platform_info *psw_info;
struct push_switch *psw;
int ret, irq;
psw = kzalloc(sizeof(struct push_switch), GFP_KERNEL);
if (unlikely(!psw))
return -ENOMEM;
irq = platform_get_irq(pdev, 0);
if (unlikely(irq < 0)) {
ret = -ENODEV;
goto err;
}
psw_info = pdev->dev.platform_data;
BUG_ON(!psw_info);
ret = request_irq(irq, psw_info->irq_handler,
psw_info->irq_flags,
psw_info->name ? psw_info->name : DRV_NAME, pdev);
if (unlikely(ret < 0))
goto err;
if (psw_info->name) {
ret = device_create_file(&pdev->dev, &dev_attr_switch);
if (unlikely(ret)) {
dev_err(&pdev->dev, "Failed creating device attrs\n");
ret = -EINVAL;
goto err_irq;
}
}
INIT_WORK(&psw->work, switch_work_handler);
timer_setup(&psw->debounce, switch_timer, 0);
/* Workqueue API brain-damage */
psw->pdev = pdev;
platform_set_drvdata(pdev, psw);
return 0;
err_irq:
free_irq(irq, pdev);
err:
kfree(psw);
return ret;
}
static int switch_drv_remove(struct platform_device *pdev)
{
struct push_switch *psw = platform_get_drvdata(pdev);
struct push_switch_platform_info *psw_info = pdev->dev.platform_data;
int irq = platform_get_irq(pdev, 0);
if (psw_info->name)
device_remove_file(&pdev->dev, &dev_attr_switch);
platform_set_drvdata(pdev, NULL);
timer_shutdown_sync(&psw->debounce);
flush_work(&psw->work);
free_irq(irq, pdev);
kfree(psw);
return 0;
}
static struct platform_driver switch_driver = {
.probe = switch_drv_probe,
.remove = switch_drv_remove,
.driver = {
.name = DRV_NAME,
},
};
static int __init switch_init(void)
{
printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION);
return platform_driver_register(&switch_driver);
}
static void __exit switch_exit(void)
{
platform_driver_unregister(&switch_driver);
}
module_init(switch_init);
module_exit(switch_exit);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR("Paul Mundt");
MODULE_LICENSE("GPL v2");
| linux-master | arch/sh/drivers/push-switch.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic heartbeat driver for regular LED banks
*
* Copyright (C) 2007 - 2010 Paul Mundt
*
* Most SH reference boards include a number of individual LEDs that can
* be independently controlled (either via a pre-defined hardware
* function or via the LED class, if desired -- the hardware tends to
* encapsulate some of the same "triggers" that the LED class supports,
* so there's not too much value in it).
*
* Additionally, most of these boards also have a LED bank that we've
* traditionally used for strobing the load average. This use case is
* handled by this driver, rather than giving each LED bit position its
* own struct device.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <asm/heartbeat.h>
#define DRV_NAME "heartbeat"
#define DRV_VERSION "0.1.2"
static unsigned char default_bit_pos[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
static inline void heartbeat_toggle_bit(struct heartbeat_data *hd,
unsigned bit, unsigned int inverted)
{
unsigned int new;
new = (1 << hd->bit_pos[bit]);
if (inverted)
new = ~new;
new &= hd->mask;
switch (hd->regsize) {
case 32:
new |= ioread32(hd->base) & ~hd->mask;
iowrite32(new, hd->base);
break;
case 16:
new |= ioread16(hd->base) & ~hd->mask;
iowrite16(new, hd->base);
break;
default:
new |= ioread8(hd->base) & ~hd->mask;
iowrite8(new, hd->base);
break;
}
}
static void heartbeat_timer(struct timer_list *t)
{
struct heartbeat_data *hd = from_timer(hd, t, timer);
static unsigned bit = 0, up = 1;
heartbeat_toggle_bit(hd, bit, hd->flags & HEARTBEAT_INVERTED);
bit += up;
if ((bit == 0) || (bit == (hd->nr_bits)-1))
up = -up;
mod_timer(&hd->timer, jiffies + (110 - ((300 << FSHIFT) /
((avenrun[0] / 5) + (3 << FSHIFT)))));
}
static int heartbeat_drv_probe(struct platform_device *pdev)
{
struct resource *res;
struct heartbeat_data *hd;
int i;
if (unlikely(pdev->num_resources != 1)) {
dev_err(&pdev->dev, "invalid number of resources\n");
return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(res == NULL)) {
dev_err(&pdev->dev, "invalid resource\n");
return -EINVAL;
}
if (pdev->dev.platform_data) {
hd = pdev->dev.platform_data;
} else {
hd = kzalloc(sizeof(struct heartbeat_data), GFP_KERNEL);
if (unlikely(!hd))
return -ENOMEM;
}
hd->base = ioremap(res->start, resource_size(res));
if (unlikely(!hd->base)) {
dev_err(&pdev->dev, "ioremap failed\n");
if (!pdev->dev.platform_data)
kfree(hd);
return -ENXIO;
}
if (!hd->nr_bits) {
hd->bit_pos = default_bit_pos;
hd->nr_bits = ARRAY_SIZE(default_bit_pos);
}
hd->mask = 0;
for (i = 0; i < hd->nr_bits; i++)
hd->mask |= (1 << hd->bit_pos[i]);
if (!hd->regsize) {
switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_32BIT:
hd->regsize = 32;
break;
case IORESOURCE_MEM_16BIT:
hd->regsize = 16;
break;
case IORESOURCE_MEM_8BIT:
default:
hd->regsize = 8;
break;
}
}
timer_setup(&hd->timer, heartbeat_timer, 0);
platform_set_drvdata(pdev, hd);
return mod_timer(&hd->timer, jiffies + 1);
}
static struct platform_driver heartbeat_driver = {
.probe = heartbeat_drv_probe,
.driver = {
.name = DRV_NAME,
.suppress_bind_attrs = true,
},
};
static int __init heartbeat_init(void)
{
printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION);
return platform_driver_register(&heartbeat_driver);
}
device_initcall(heartbeat_init);
| linux-master | arch/sh/drivers/heartbeat.c |
// SPDX--License-Identifier: GPL-2.0
#include <asm/platform_early.h>
#include <linux/mod_devicetable.h>
#include <linux/pm.h>
static __initdata LIST_HEAD(sh_early_platform_driver_list);
static __initdata LIST_HEAD(sh_early_platform_device_list);
static const struct platform_device_id *
platform_match_id(const struct platform_device_id *id,
struct platform_device *pdev)
{
while (id->name[0]) {
if (strcmp(pdev->name, id->name) == 0) {
pdev->id_entry = id;
return id;
}
id++;
}
return NULL;
}
static int platform_match(struct device *dev, struct device_driver *drv)
{
struct platform_device *pdev = to_platform_device(dev);
struct platform_driver *pdrv = to_platform_driver(drv);
/* When driver_override is set, only bind to the matching driver */
if (pdev->driver_override)
return !strcmp(pdev->driver_override, drv->name);
/* Then try to match against the id table */
if (pdrv->id_table)
return platform_match_id(pdrv->id_table, pdev) != NULL;
/* fall-back to driver name match */
return (strcmp(pdev->name, drv->name) == 0);
}
#ifdef CONFIG_PM
static void device_pm_init_common(struct device *dev)
{
if (!dev->power.early_init) {
spin_lock_init(&dev->power.lock);
dev->power.qos = NULL;
dev->power.early_init = true;
}
}
static void pm_runtime_early_init(struct device *dev)
{
dev->power.disable_depth = 1;
device_pm_init_common(dev);
}
#else
static void pm_runtime_early_init(struct device *dev) {}
#endif
/**
* sh_early_platform_driver_register - register early platform driver
* @epdrv: sh_early_platform driver structure
* @buf: string passed from early_param()
*
* Helper function for sh_early_platform_init() / sh_early_platform_init_buffer()
*/
int __init sh_early_platform_driver_register(struct sh_early_platform_driver *epdrv,
char *buf)
{
char *tmp;
int n;
/* Simply add the driver to the end of the global list.
* Drivers will by default be put on the list in compiled-in order.
*/
if (!epdrv->list.next) {
INIT_LIST_HEAD(&epdrv->list);
list_add_tail(&epdrv->list, &sh_early_platform_driver_list);
}
/* If the user has specified device then make sure the driver
* gets prioritized. The driver of the last device specified on
* command line will be put first on the list.
*/
n = strlen(epdrv->pdrv->driver.name);
if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
list_move(&epdrv->list, &sh_early_platform_driver_list);
/* Allow passing parameters after device name */
if (buf[n] == '\0' || buf[n] == ',')
epdrv->requested_id = -1;
else {
epdrv->requested_id = simple_strtoul(&buf[n + 1],
&tmp, 10);
if (buf[n] != '.' || (tmp == &buf[n + 1])) {
epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
n = 0;
} else
n += strcspn(&buf[n + 1], ",") + 1;
}
if (buf[n] == ',')
n++;
if (epdrv->bufsize) {
memcpy(epdrv->buffer, &buf[n],
min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
epdrv->buffer[epdrv->bufsize - 1] = '\0';
}
}
return 0;
}
/**
* sh_early_platform_add_devices - adds a number of early platform devices
* @devs: array of early platform devices to add
* @num: number of early platform devices in array
*
* Used by early architecture code to register early platform devices and
* their platform data.
*/
void __init sh_early_platform_add_devices(struct platform_device **devs, int num)
{
struct device *dev;
int i;
/* simply add the devices to list */
for (i = 0; i < num; i++) {
dev = &devs[i]->dev;
if (!dev->devres_head.next) {
pm_runtime_early_init(dev);
INIT_LIST_HEAD(&dev->devres_head);
list_add_tail(&dev->devres_head,
&sh_early_platform_device_list);
}
}
}
/**
* sh_early_platform_driver_register_all - register early platform drivers
* @class_str: string to identify early platform driver class
*
* Used by architecture code to register all early platform drivers
* for a certain class. If omitted then only early platform drivers
* with matching kernel command line class parameters will be registered.
*/
void __init sh_early_platform_driver_register_all(char *class_str)
{
/* The "class_str" parameter may or may not be present on the kernel
* command line. If it is present then there may be more than one
* matching parameter.
*
* Since we register our early platform drivers using early_param()
* we need to make sure that they also get registered in the case
* when the parameter is missing from the kernel command line.
*
* We use parse_early_options() to make sure the early_param() gets
* called at least once. The early_param() may be called more than
* once since the name of the preferred device may be specified on
* the kernel command line. sh_early_platform_driver_register() handles
* this case for us.
*/
parse_early_options(class_str);
}
/**
* sh_early_platform_match - find early platform device matching driver
* @epdrv: early platform driver structure
* @id: id to match against
*/
static struct platform_device * __init
sh_early_platform_match(struct sh_early_platform_driver *epdrv, int id)
{
struct platform_device *pd;
list_for_each_entry(pd, &sh_early_platform_device_list, dev.devres_head)
if (platform_match(&pd->dev, &epdrv->pdrv->driver))
if (pd->id == id)
return pd;
return NULL;
}
/**
* sh_early_platform_left - check if early platform driver has matching devices
* @epdrv: early platform driver structure
* @id: return true if id or above exists
*/
static int __init sh_early_platform_left(struct sh_early_platform_driver *epdrv,
int id)
{
struct platform_device *pd;
list_for_each_entry(pd, &sh_early_platform_device_list, dev.devres_head)
if (platform_match(&pd->dev, &epdrv->pdrv->driver))
if (pd->id >= id)
return 1;
return 0;
}
/**
* sh_early_platform_driver_probe_id - probe drivers matching class_str and id
* @class_str: string to identify early platform driver class
* @id: id to match against
* @nr_probe: number of platform devices to successfully probe before exiting
*/
static int __init sh_early_platform_driver_probe_id(char *class_str,
int id,
int nr_probe)
{
struct sh_early_platform_driver *epdrv;
struct platform_device *match;
int match_id;
int n = 0;
int left = 0;
list_for_each_entry(epdrv, &sh_early_platform_driver_list, list) {
/* only use drivers matching our class_str */
if (strcmp(class_str, epdrv->class_str))
continue;
if (id == -2) {
match_id = epdrv->requested_id;
left = 1;
} else {
match_id = id;
left += sh_early_platform_left(epdrv, id);
/* skip requested id */
switch (epdrv->requested_id) {
case EARLY_PLATFORM_ID_ERROR:
case EARLY_PLATFORM_ID_UNSET:
break;
default:
if (epdrv->requested_id == id)
match_id = EARLY_PLATFORM_ID_UNSET;
}
}
switch (match_id) {
case EARLY_PLATFORM_ID_ERROR:
pr_warn("%s: unable to parse %s parameter\n",
class_str, epdrv->pdrv->driver.name);
fallthrough;
case EARLY_PLATFORM_ID_UNSET:
match = NULL;
break;
default:
match = sh_early_platform_match(epdrv, match_id);
}
if (match) {
/*
* Set up a sensible init_name to enable
* dev_name() and others to be used before the
* rest of the driver core is initialized.
*/
if (!match->dev.init_name && slab_is_available()) {
if (match->id != -1)
match->dev.init_name =
kasprintf(GFP_KERNEL, "%s.%d",
match->name,
match->id);
else
match->dev.init_name =
kasprintf(GFP_KERNEL, "%s",
match->name);
if (!match->dev.init_name)
return -ENOMEM;
}
if (epdrv->pdrv->probe(match))
pr_warn("%s: unable to probe %s early.\n",
class_str, match->name);
else
n++;
}
if (n >= nr_probe)
break;
}
if (left)
return n;
else
return -ENODEV;
}
/**
* sh_early_platform_driver_probe - probe a class of registered drivers
* @class_str: string to identify early platform driver class
* @nr_probe: number of platform devices to successfully probe before exiting
* @user_only: only probe user specified early platform devices
*
* Used by architecture code to probe registered early platform drivers
* within a certain class. For probe to happen a registered early platform
* device matching a registered early platform driver is needed.
*/
int __init sh_early_platform_driver_probe(char *class_str,
int nr_probe,
int user_only)
{
int k, n, i;
n = 0;
for (i = -2; n < nr_probe; i++) {
k = sh_early_platform_driver_probe_id(class_str, i, nr_probe - n);
if (k < 0)
break;
n += k;
if (user_only)
break;
}
return n;
}
/**
* early_platform_cleanup - clean up early platform code
*/
void __init early_platform_cleanup(void)
{
struct platform_device *pd, *pd2;
/* clean up the devres list used to chain devices */
list_for_each_entry_safe(pd, pd2, &sh_early_platform_device_list,
dev.devres_head) {
list_del(&pd->dev.devres_head);
memset(&pd->dev.devres_head, 0, sizeof(pd->dev.devres_head));
}
}
| linux-master | arch/sh/drivers/platform_early.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic SH-4 / SH-4A PCIC operations (SH7751, SH7780).
*
* Copyright (C) 2002 - 2009 Paul Mundt
*/
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <asm/addrspace.h>
#include "pci-sh4.h"
/*
* Direct access to PCI hardware...
*/
#define CONFIG_CMD(bus, devfn, where) \
(0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3))
/*
* Functions for accessing PCI configuration space with type 1 accesses
*/
static int sh4_pci_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct pci_channel *chan = bus->sysdata;
unsigned long flags;
u32 data;
/*
* PCIPDR may only be accessed as 32 bit words,
* so we must do byte alignment by hand
*/
raw_spin_lock_irqsave(&pci_config_lock, flags);
pci_write_reg(chan, CONFIG_CMD(bus, devfn, where), SH4_PCIPAR);
data = pci_read_reg(chan, SH4_PCIPDR);
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
switch (size) {
case 1:
*val = (data >> ((where & 3) << 3)) & 0xff;
break;
case 2:
*val = (data >> ((where & 2) << 3)) & 0xffff;
break;
case 4:
*val = data;
break;
default:
return PCIBIOS_FUNC_NOT_SUPPORTED;
}
return PCIBIOS_SUCCESSFUL;
}
/*
* Since SH4 only does 32bit access we'll have to do a read,
* mask,write operation.
* We'll allow an odd byte offset, though it should be illegal.
*/
static int sh4_pci_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct pci_channel *chan = bus->sysdata;
unsigned long flags;
int shift;
u32 data;
raw_spin_lock_irqsave(&pci_config_lock, flags);
pci_write_reg(chan, CONFIG_CMD(bus, devfn, where), SH4_PCIPAR);
data = pci_read_reg(chan, SH4_PCIPDR);
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
switch (size) {
case 1:
shift = (where & 3) << 3;
data &= ~(0xff << shift);
data |= ((val & 0xff) << shift);
break;
case 2:
shift = (where & 2) << 3;
data &= ~(0xffff << shift);
data |= ((val & 0xffff) << shift);
break;
case 4:
data = val;
break;
default:
return PCIBIOS_FUNC_NOT_SUPPORTED;
}
pci_write_reg(chan, data, SH4_PCIPDR);
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops sh4_pci_ops = {
.read = sh4_pci_read,
.write = sh4_pci_write,
};
int __attribute__((weak)) pci_fixup_pcic(struct pci_channel *chan)
{
/* Nothing to do. */
return 0;
}
| linux-master | arch/sh/drivers/pci/ops-sh4.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCI operations for the Sega Dreamcast
*
* Copyright (C) 2001, 2002 M. R. Brown
* Copyright (C) 2002, 2003 Paul Mundt
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/io.h>
#include <mach/pci.h>
/*
* The !gapspci_config_access case really shouldn't happen, ever, unless
* someone implicitly messes around with the last devfn value.. otherwise we
* only support a single device anyways, and if we didn't have a BBA, we
* wouldn't make it terribly far through the PCI setup anyways.
*
* Also, we could very easily support both Type 0 and Type 1 configurations
* here, but since it doesn't seem that there is any such implementation in
* existence, we don't bother.
*
* I suppose if someone actually gets around to ripping the chip out of
* the BBA and hanging some more devices off of it, then this might be
* something to take into consideration. However, due to the cost of the BBA,
* and the general lack of activity by DC hardware hackers, this doesn't seem
* likely to happen anytime soon.
*/
static int gapspci_config_access(unsigned char bus, unsigned int devfn)
{
return (bus == 0) && (devfn == 0);
}
/*
* We can also actually read and write in b/w/l sizes! Thankfully this part
* was at least done right, and we don't have to do the stupid masking and
* shifting that we do on the 7751! Small wonders never cease to amaze.
*/
static int gapspci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
{
*val = 0xffffffff;
if (!gapspci_config_access(bus->number, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1: *val = inb(GAPSPCI_BBA_CONFIG+where); break;
case 2: *val = inw(GAPSPCI_BBA_CONFIG+where); break;
case 4: *val = inl(GAPSPCI_BBA_CONFIG+where); break;
}
return PCIBIOS_SUCCESSFUL;
}
static int gapspci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
{
if (!gapspci_config_access(bus->number, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1: outb(( u8)val, GAPSPCI_BBA_CONFIG+where); break;
case 2: outw((u16)val, GAPSPCI_BBA_CONFIG+where); break;
case 4: outl((u32)val, GAPSPCI_BBA_CONFIG+where); break;
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops gapspci_pci_ops = {
.read = gapspci_read,
.write = gapspci_write,
};
| linux-master | arch/sh/drivers/pci/ops-dreamcast.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/kernel.h>
/*
* These functions are used early on before PCI scanning is done
* and all of the pci_dev and pci_bus structures have been created.
*/
static struct pci_dev *fake_pci_dev(struct pci_channel *hose,
int top_bus, int busnr, int devfn)
{
static struct pci_dev dev;
static struct pci_bus bus;
dev.bus = &bus;
dev.sysdata = hose;
dev.devfn = devfn;
bus.number = busnr;
bus.sysdata = hose;
bus.ops = hose->pci_ops;
if(busnr != top_bus)
/* Fake a parent bus structure. */
bus.parent = &bus;
else
bus.parent = NULL;
return &dev;
}
#define EARLY_PCI_OP(rw, size, type) \
int __init early_##rw##_config_##size(struct pci_channel *hose, \
int top_bus, int bus, int devfn, int offset, type value) \
{ \
return pci_##rw##_config_##size( \
fake_pci_dev(hose, top_bus, bus, devfn), \
offset, value); \
}
EARLY_PCI_OP(read, byte, u8 *)
EARLY_PCI_OP(read, word, u16 *)
EARLY_PCI_OP(read, dword, u32 *)
EARLY_PCI_OP(write, byte, u8)
EARLY_PCI_OP(write, word, u16)
EARLY_PCI_OP(write, dword, u32)
int __init pci_is_66mhz_capable(struct pci_channel *hose,
int top_bus, int current_bus)
{
u32 pci_devfn;
unsigned short vid;
int cap66 = -1;
u16 stat;
pr_info("PCI: Checking 66MHz capabilities...\n");
for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) {
if (PCI_FUNC(pci_devfn))
continue;
if (early_read_config_word(hose, top_bus, current_bus,
pci_devfn, PCI_VENDOR_ID, &vid) !=
PCIBIOS_SUCCESSFUL)
continue;
if (vid == 0xffff)
continue;
/* check 66MHz capability */
if (cap66 < 0)
cap66 = 1;
if (cap66) {
early_read_config_word(hose, top_bus, current_bus,
pci_devfn, PCI_STATUS, &stat);
if (!(stat & PCI_STATUS_66MHZ)) {
printk(KERN_DEBUG
"PCI: %02x:%02x not 66MHz capable.\n",
current_bus, pci_devfn);
cap66 = 0;
break;
}
}
}
return cap66 > 0;
}
static void pcibios_enable_err(struct timer_list *t)
{
struct pci_channel *hose = from_timer(hose, t, err_timer);
del_timer(&hose->err_timer);
printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n");
enable_irq(hose->err_irq);
}
static void pcibios_enable_serr(struct timer_list *t)
{
struct pci_channel *hose = from_timer(hose, t, serr_timer);
del_timer(&hose->serr_timer);
printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n");
enable_irq(hose->serr_irq);
}
void pcibios_enable_timers(struct pci_channel *hose)
{
if (hose->err_irq) {
timer_setup(&hose->err_timer, pcibios_enable_err, 0);
}
if (hose->serr_irq) {
timer_setup(&hose->serr_timer, pcibios_enable_serr, 0);
}
}
/*
* A simple handler for the regular PCI status errors, called from IRQ
* context.
*/
unsigned int pcibios_handle_status_errors(unsigned long addr,
unsigned int status,
struct pci_channel *hose)
{
unsigned int cmd = 0;
if (status & PCI_STATUS_REC_MASTER_ABORT) {
printk(KERN_DEBUG "PCI: master abort, pc=0x%08lx\n", addr);
cmd |= PCI_STATUS_REC_MASTER_ABORT;
}
if (status & PCI_STATUS_REC_TARGET_ABORT) {
printk(KERN_DEBUG "PCI: target abort: ");
pcibios_report_status(PCI_STATUS_REC_TARGET_ABORT |
PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_MASTER_ABORT, 1);
pr_cont("\n");
cmd |= PCI_STATUS_REC_TARGET_ABORT;
}
if (status & (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY)) {
printk(KERN_DEBUG "PCI: parity error detected: ");
pcibios_report_status(PCI_STATUS_PARITY |
PCI_STATUS_DETECTED_PARITY, 1);
pr_cont("\n");
cmd |= PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY;
/* Now back off of the IRQ for awhile */
if (hose->err_irq) {
disable_irq_nosync(hose->err_irq);
hose->err_timer.expires = jiffies + HZ;
add_timer(&hose->err_timer);
}
}
return cmd;
}
| linux-master | arch/sh/drivers/pci/common.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PCI support for the Sega Dreamcast
*
* Copyright (C) 2001, 2002 M. R. Brown
* Copyright (C) 2002, 2003 Paul Mundt
*
* This file originally bore the message (with enclosed-$):
* Id: pci.c,v 1.3 2003/05/04 19:29:46 lethal Exp
* Dreamcast PCI: Supports SEGA Broadband Adaptor only.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <mach/pci.h>
static struct resource gapspci_resources[] = {
{
.name = "GAPSPCI IO",
.start = GAPSPCI_BBA_CONFIG,
.end = GAPSPCI_BBA_CONFIG + GAPSPCI_BBA_CONFIG_SIZE - 1,
.flags = IORESOURCE_IO,
}, {
.name = "GAPSPCI mem",
.start = GAPSPCI_DMA_BASE,
.end = GAPSPCI_DMA_BASE + GAPSPCI_DMA_SIZE - 1,
.flags = IORESOURCE_MEM,
},
};
static struct pci_channel dreamcast_pci_controller = {
.pci_ops = &gapspci_pci_ops,
.resources = gapspci_resources,
.nr_resources = ARRAY_SIZE(gapspci_resources),
.io_offset = 0x00000000,
.mem_offset = 0x00000000,
};
/*
* gapspci init
*/
static int __init gapspci_init(void)
{
char idbuf[16];
int i;
/*
* FIXME: All of this wants documenting to some degree,
* even some basic register definitions would be nice.
*
* I haven't seen anything this ugly since.. maple.
*/
for (i=0; i<16; i++)
idbuf[i] = inb(GAPSPCI_REGS+i);
if (strncmp(idbuf, "GAPSPCI_BRIDGE_2", 16))
return -ENODEV;
outl(0x5a14a501, GAPSPCI_REGS+0x18);
for (i=0; i<1000000; i++)
cpu_relax();
if (inl(GAPSPCI_REGS+0x18) != 1)
return -EINVAL;
outl(0x01000000, GAPSPCI_REGS+0x20);
outl(0x01000000, GAPSPCI_REGS+0x24);
outl(GAPSPCI_DMA_BASE, GAPSPCI_REGS+0x28);
outl(GAPSPCI_DMA_BASE+GAPSPCI_DMA_SIZE, GAPSPCI_REGS+0x2c);
outl(1, GAPSPCI_REGS+0x14);
outl(1, GAPSPCI_REGS+0x34);
/* Setting Broadband Adapter */
outw(0xf900, GAPSPCI_BBA_CONFIG+0x06);
outl(0x00000000, GAPSPCI_BBA_CONFIG+0x30);
outb(0x00, GAPSPCI_BBA_CONFIG+0x3c);
outb(0xf0, GAPSPCI_BBA_CONFIG+0x0d);
outw(0x0006, GAPSPCI_BBA_CONFIG+0x04);
outl(0x00002001, GAPSPCI_BBA_CONFIG+0x10);
outl(0x01000000, GAPSPCI_BBA_CONFIG+0x14);
return register_pci_controller(&dreamcast_pci_controller);
}
arch_initcall(gapspci_init);
| linux-master | arch/sh/drivers/pci/pci-dreamcast.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Low-Level PCI Support for the SH7751
*
* Copyright (C) 2003 - 2009 Paul Mundt
* Copyright (C) 2001 Dustin McIntire
*
* With cleanup by Paul van Gool <[email protected]>, 2003.
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/io.h>
#include "pci-sh4.h"
#include <asm/addrspace.h>
#include <linux/sizes.h>
static int __init __area_sdram_check(struct pci_channel *chan,
unsigned int area)
{
unsigned long word;
word = __raw_readl(SH7751_BCR1);
/* check BCR for SDRAM in area */
if (((word >> area) & 1) == 0) {
printk("PCI: Area %d is not configured for SDRAM. BCR1=0x%lx\n",
area, word);
return 0;
}
pci_write_reg(chan, word, SH4_PCIBCR1);
word = __raw_readw(SH7751_BCR2);
/* check BCR2 for 32bit SDRAM interface*/
if (((word >> (area << 1)) & 0x3) != 0x3) {
printk("PCI: Area %d is not 32 bit SDRAM. BCR2=0x%lx\n",
area, word);
return 0;
}
pci_write_reg(chan, word, SH4_PCIBCR2);
return 1;
}
static struct resource sh7751_pci_resources[] = {
{
.name = "SH7751_IO",
.start = 0x1000,
.end = SZ_4M - 1,
.flags = IORESOURCE_IO
}, {
.name = "SH7751_mem",
.start = SH7751_PCI_MEMORY_BASE,
.end = SH7751_PCI_MEMORY_BASE + SH7751_PCI_MEM_SIZE - 1,
.flags = IORESOURCE_MEM
},
};
static struct pci_channel sh7751_pci_controller = {
.pci_ops = &sh4_pci_ops,
.resources = sh7751_pci_resources,
.nr_resources = ARRAY_SIZE(sh7751_pci_resources),
.mem_offset = 0x00000000,
.io_offset = 0x00000000,
.io_map_base = SH7751_PCI_IO_BASE,
};
static struct sh4_pci_address_map sh7751_pci_map = {
.window0 = {
.base = SH7751_CS3_BASE_ADDR,
.size = 0x04000000,
},
};
static int __init sh7751_pci_init(void)
{
struct pci_channel *chan = &sh7751_pci_controller;
unsigned int id;
u32 word, reg;
printk(KERN_NOTICE "PCI: Starting initialization.\n");
chan->reg_base = 0xfe200000;
/* check for SH7751/SH7751R hardware */
id = pci_read_reg(chan, SH7751_PCICONF0);
if (id != ((SH7751_DEVICE_ID << 16) | SH7751_VENDOR_ID) &&
id != ((SH7751R_DEVICE_ID << 16) | SH7751_VENDOR_ID)) {
pr_debug("PCI: This is not an SH7751(R) (%x)\n", id);
return -ENODEV;
}
/* Set the BCR's to enable PCI access */
reg = __raw_readl(SH7751_BCR1);
reg |= 0x80000;
__raw_writel(reg, SH7751_BCR1);
/* Turn the clocks back on (not done in reset)*/
pci_write_reg(chan, 0, SH4_PCICLKR);
/* Clear Powerdown IRQ's (not done in reset) */
word = SH4_PCIPINT_D3 | SH4_PCIPINT_D0;
pci_write_reg(chan, word, SH4_PCIPINT);
/* set the command/status bits to:
* Wait Cycle Control + Parity Enable + Bus Master +
* Mem space enable
*/
word = SH7751_PCICONF1_WCC | SH7751_PCICONF1_PER |
SH7751_PCICONF1_BUM | SH7751_PCICONF1_MES;
pci_write_reg(chan, word, SH7751_PCICONF1);
/* define this host as the host bridge */
word = PCI_BASE_CLASS_BRIDGE << 24;
pci_write_reg(chan, word, SH7751_PCICONF2);
/* Set IO and Mem windows to local address
* Make PCI and local address the same for easy 1 to 1 mapping
*/
word = sh7751_pci_map.window0.size - 1;
pci_write_reg(chan, word, SH4_PCILSR0);
/* Set the values on window 0 PCI config registers */
word = P2SEGADDR(sh7751_pci_map.window0.base);
pci_write_reg(chan, word, SH4_PCILAR0);
pci_write_reg(chan, word, SH7751_PCICONF5);
/* Set the local 16MB PCI memory space window to
* the lowest PCI mapped address
*/
word = chan->resources[1].start & SH4_PCIMBR_MASK;
pr_debug("PCI: Setting upper bits of Memory window to 0x%x\n", word);
pci_write_reg(chan, word , SH4_PCIMBR);
/* Make sure the MSB's of IO window are set to access PCI space
* correctly */
word = chan->resources[0].start & SH4_PCIIOBR_MASK;
pr_debug("PCI: Setting upper bits of IO window to 0x%x\n", word);
pci_write_reg(chan, word, SH4_PCIIOBR);
/* Set PCI WCRx, BCRx's, copy from BSC locations */
/* check BCR for SDRAM in specified area */
switch (sh7751_pci_map.window0.base) {
case SH7751_CS0_BASE_ADDR: word = __area_sdram_check(chan, 0); break;
case SH7751_CS1_BASE_ADDR: word = __area_sdram_check(chan, 1); break;
case SH7751_CS2_BASE_ADDR: word = __area_sdram_check(chan, 2); break;
case SH7751_CS3_BASE_ADDR: word = __area_sdram_check(chan, 3); break;
case SH7751_CS4_BASE_ADDR: word = __area_sdram_check(chan, 4); break;
case SH7751_CS5_BASE_ADDR: word = __area_sdram_check(chan, 5); break;
case SH7751_CS6_BASE_ADDR: word = __area_sdram_check(chan, 6); break;
}
if (!word)
return -1;
/* configure the wait control registers */
word = __raw_readl(SH7751_WCR1);
pci_write_reg(chan, word, SH4_PCIWCR1);
word = __raw_readl(SH7751_WCR2);
pci_write_reg(chan, word, SH4_PCIWCR2);
word = __raw_readl(SH7751_WCR3);
pci_write_reg(chan, word, SH4_PCIWCR3);
word = __raw_readl(SH7751_MCR);
pci_write_reg(chan, word, SH4_PCIMCR);
/* NOTE: I'm ignoring the PCI error IRQs for now..
* TODO: add support for the internal error interrupts and
* DMA interrupts...
*/
pci_fixup_pcic(chan);
/* SH7751 init done, set central function init complete */
/* use round robin mode to stop a device starving/overruning */
word = SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_ARBM;
pci_write_reg(chan, word, SH4_PCICR);
return register_pci_controller(chan);
}
arch_initcall(sh7751_pci_init);
| linux-master | arch/sh/drivers/pci/pci-sh7751.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/pci/fixups-dreamcast.c
*
* PCI fixups for the Sega Dreamcast
*
* Copyright (C) 2001, 2002 M. R. Brown
* Copyright (C) 2002, 2003, 2006 Paul Mundt
*
* This file originally bore the message (with enclosed-$):
* Id: pci.c,v 1.3 2003/05/04 19:29:46 lethal Exp
* Dreamcast PCI: Supports SEGA Broadband Adaptor only.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/dma-map-ops.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <mach/pci.h>
static void gapspci_fixup_resources(struct pci_dev *dev)
{
struct pci_channel *p = dev->sysdata;
struct resource res;
struct pci_bus_region region;
printk(KERN_NOTICE "PCI: Fixing up device %s\n", pci_name(dev));
switch (dev->device) {
case PCI_DEVICE_ID_SEGA_BBA:
/*
* We also assume that dev->devfn == 0
*/
dev->resource[1].start = p->resources[0].start + 0x100;
dev->resource[1].end = dev->resource[1].start + 0x200 - 1;
/*
* This is not a normal BAR, prevent any attempts to move
* the BAR, as this will result in a bus lock.
*/
dev->resource[1].flags |= IORESOURCE_PCI_FIXED;
/*
* Redirect dma memory allocations to special memory window.
*
* If this GAPSPCI region were mapped by a BAR, the CPU
* phys_addr_t would be pci_resource_start(), and the bus
* address would be pci_bus_address(pci_resource_start()).
* But apparently there's no BAR mapping it, so we just
* "know" its CPU address is GAPSPCI_DMA_BASE.
*/
res.start = GAPSPCI_DMA_BASE;
res.end = GAPSPCI_DMA_BASE + GAPSPCI_DMA_SIZE - 1;
res.flags = IORESOURCE_MEM;
pcibios_resource_to_bus(dev->bus, ®ion, &res);
BUG_ON(dma_declare_coherent_memory(&dev->dev,
res.start,
region.start,
resource_size(&res)));
break;
default:
printk("PCI: Failed resource fixup\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, gapspci_fixup_resources);
int pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/*
* The interrupt routing semantics here are quite trivial.
*
* We basically only support one interrupt, so we only bother
* updating a device's interrupt line with this single shared
* interrupt. Keeps routing quite simple, doesn't it?
*/
return GAPSPCI_IRQ;
}
| linux-master | arch/sh/drivers/pci/fixups-dreamcast.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/sh_intc.h>
int pcibios_map_platform_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
if (dev->bus->number == 0) {
switch (slot) {
case 4: return evt2irq(0x2a0); /* eth0 */
case 8: return evt2irq(0x2a0); /* eth1 */
case 6: return evt2irq(0x240); /* PCI bridge */
default:
printk(KERN_ERR "PCI: Bad IRQ mapping request "
"for slot %d\n", slot);
return evt2irq(0x240);
}
} else {
switch (pin) {
case 0: irq = evt2irq(0x240); break;
case 1: irq = evt2irq(0x240); break;
case 2: irq = evt2irq(0x240); break;
case 3: irq = evt2irq(0x240); break;
case 4: irq = evt2irq(0x240); break;
default: irq = -1; break;
}
}
return irq;
}
| linux-master | arch/sh/drivers/pci/fixups-sh03.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/pci/ops-snapgear.c
*
* Author: David McCullough <[email protected]>
*
* Ported to new API by Paul Mundt <[email protected]>
*
* Highly leveraged from pci-bigsur.c, written by Dustin McIntire.
*
* PCI initialization for the SnapGear boards
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/sh_intc.h>
#include "pci-sh4.h"
int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
int irq = -1;
switch (slot) {
case 8: /* the PCI bridge */ break;
case 11: irq = evt2irq(0x300); break; /* USB */
case 12: irq = evt2irq(0x360); break; /* PCMCIA */
case 13: irq = evt2irq(0x2a0); break; /* eth0 */
case 14: irq = evt2irq(0x300); break; /* eth1 */
case 15: irq = evt2irq(0x360); break; /* safenet (unused) */
}
printk("PCI: Mapping SnapGear IRQ for slot %d, pin %c to irq %d\n",
slot, pin - 1 + 'A', irq);
return irq;
}
| linux-master | arch/sh/drivers/pci/fixups-snapgear.c |
// SPDX-License-Identifier: GPL-2.0
/*
* New-style PCI core.
*
* Copyright (c) 2004 - 2009 Paul Mundt
* Copyright (c) 2002 M. R. Brown
*
* Modelled after arch/mips/pci/pci.c:
* Copyright (C) 2003, 04 Ralf Baechle ([email protected])
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/export.h>
unsigned long PCIBIOS_MIN_IO = 0x0000;
unsigned long PCIBIOS_MIN_MEM = 0;
/*
* The PCI controller list.
*/
static struct pci_channel *hose_head, **hose_tail = &hose_head;
static int pci_initialized;
static void pcibios_scanbus(struct pci_channel *hose)
{
static int next_busno;
static int need_domain_info;
LIST_HEAD(resources);
struct resource *res;
resource_size_t offset;
int i, ret;
struct pci_host_bridge *bridge;
bridge = pci_alloc_host_bridge(0);
if (!bridge)
return;
for (i = 0; i < hose->nr_resources; i++) {
res = hose->resources + i;
offset = 0;
if (res->flags & IORESOURCE_DISABLED)
continue;
if (res->flags & IORESOURCE_IO)
offset = hose->io_offset;
else if (res->flags & IORESOURCE_MEM)
offset = hose->mem_offset;
pci_add_resource_offset(&resources, res, offset);
}
list_splice_init(&resources, &bridge->windows);
bridge->dev.parent = NULL;
bridge->sysdata = hose;
bridge->busnr = next_busno;
bridge->ops = hose->pci_ops;
bridge->swizzle_irq = pci_common_swizzle;
bridge->map_irq = pcibios_map_platform_irq;
ret = pci_scan_root_bus_bridge(bridge);
if (ret) {
pci_free_host_bridge(bridge);
return;
}
hose->bus = bridge->bus;
need_domain_info = need_domain_info || hose->index;
hose->need_domain_info = need_domain_info;
next_busno = hose->bus->busn_res.end + 1;
/* Don't allow 8-bit bus number overflow inside the hose -
reserve some space for bridges. */
if (next_busno > 224) {
next_busno = 0;
need_domain_info = 1;
}
pci_bus_size_bridges(hose->bus);
pci_bus_assign_resources(hose->bus);
pci_bus_add_devices(hose->bus);
}
/*
* This interrupt-safe spinlock protects all accesses to PCI
* configuration space.
*/
DEFINE_RAW_SPINLOCK(pci_config_lock);
static DEFINE_MUTEX(pci_scan_mutex);
int register_pci_controller(struct pci_channel *hose)
{
int i;
for (i = 0; i < hose->nr_resources; i++) {
struct resource *res = hose->resources + i;
if (res->flags & IORESOURCE_DISABLED)
continue;
if (res->flags & IORESOURCE_IO) {
if (request_resource(&ioport_resource, res) < 0)
goto out;
} else {
if (request_resource(&iomem_resource, res) < 0)
goto out;
}
}
*hose_tail = hose;
hose_tail = &hose->next;
/*
* Do not panic here but later - this might happen before console init.
*/
if (!hose->io_map_base) {
pr_warn("registering PCI controller with io_map_base unset\n");
}
/*
* Setup the ERR/PERR and SERR timers, if available.
*/
pcibios_enable_timers(hose);
/*
* Scan the bus if it is register after the PCI subsystem
* initialization.
*/
if (pci_initialized) {
mutex_lock(&pci_scan_mutex);
pcibios_scanbus(hose);
mutex_unlock(&pci_scan_mutex);
}
return 0;
out:
for (--i; i >= 0; i--)
release_resource(&hose->resources[i]);
pr_warn("Skipping PCI bus scan due to resource conflict\n");
return -1;
}
static int __init pcibios_init(void)
{
struct pci_channel *hose;
/* Scan all of the recorded PCI controllers. */
for (hose = hose_head; hose; hose = hose->next)
pcibios_scanbus(hose);
pci_initialized = 1;
return 0;
}
subsys_initcall(pcibios_init);
/*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*/
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pci_dev *dev = data;
struct pci_channel *hose = dev->sysdata;
resource_size_t start = res->start;
if (res->flags & IORESOURCE_IO) {
if (start < PCIBIOS_MIN_IO + hose->resources[0].start)
start = PCIBIOS_MIN_IO + hose->resources[0].start;
/*
* Put everything into 0x00-0xff region modulo 0x400.
*/
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
}
return start;
}
static void __init
pcibios_bus_report_status_early(struct pci_channel *hose,
int top_bus, int current_bus,
unsigned int status_mask, int warn)
{
unsigned int pci_devfn;
u16 status;
int ret;
for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) {
if (PCI_FUNC(pci_devfn))
continue;
ret = early_read_config_word(hose, top_bus, current_bus,
pci_devfn, PCI_STATUS, &status);
if (ret != PCIBIOS_SUCCESSFUL)
continue;
if (status == 0xffff)
continue;
early_write_config_word(hose, top_bus, current_bus,
pci_devfn, PCI_STATUS,
status & status_mask);
if (warn)
pr_cont("(%02x:%02x: %04X) ", current_bus, pci_devfn,
status);
}
}
/*
* We can't use pci_find_device() here since we are
* called from interrupt context.
*/
static void __ref
pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask,
int warn)
{
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
u16 status;
/*
* ignore host bridge - we handle
* that separately
*/
if (dev->bus->number == 0 && dev->devfn == 0)
continue;
pci_read_config_word(dev, PCI_STATUS, &status);
if (status == 0xffff)
continue;
if ((status & status_mask) == 0)
continue;
/* clear the status errors */
pci_write_config_word(dev, PCI_STATUS, status & status_mask);
if (warn)
pr_cont("(%s: %04X) ", pci_name(dev), status);
}
list_for_each_entry(dev, &bus->devices, bus_list)
if (dev->subordinate)
pcibios_bus_report_status(dev->subordinate, status_mask, warn);
}
void __ref pcibios_report_status(unsigned int status_mask, int warn)
{
struct pci_channel *hose;
for (hose = hose_head; hose; hose = hose->next) {
if (unlikely(!hose->bus))
pcibios_bus_report_status_early(hose, hose_head->index,
hose->index, status_mask, warn);
else
pcibios_bus_report_status(hose->bus, status_mask, warn);
}
}
#ifndef CONFIG_GENERIC_IOMAP
void __iomem *__pci_ioport_map(struct pci_dev *dev,
unsigned long port, unsigned int nr)
{
struct pci_channel *chan = dev->sysdata;
if (unlikely(!chan->io_map_base)) {
chan->io_map_base = sh_io_port_base;
if (pci_domains_supported)
panic("To avoid data corruption io_map_base MUST be "
"set with multiple PCI domains.");
}
return (void __iomem *)(chan->io_map_base + port);
}
void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
{
iounmap(addr);
}
EXPORT_SYMBOL(pci_iounmap);
#endif /* CONFIG_GENERIC_IOMAP */
EXPORT_SYMBOL(PCIBIOS_MIN_IO);
EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
| linux-master | arch/sh/drivers/pci/pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/pci/fixups-rts7751r2d.c
*
* RTS7751R2D / LBOXRE2 PCI fixups
*
* Copyright (C) 2003 Lineo uSolutions, Inc.
* Copyright (C) 2004 Paul Mundt
* Copyright (C) 2007 Nobuhiro Iwamatsu
*/
#include <linux/pci.h>
#include <mach/lboxre2.h>
#include <mach/r2d.h>
#include "pci-sh4.h"
#include <generated/machtypes.h>
#define PCIMCR_MRSET_OFF 0xBFFFFFFF
#define PCIMCR_RFSH_OFF 0xFFFFFFFB
static u8 rts7751r2d_irq_tab[] = {
IRQ_PCI_INTA,
IRQ_PCI_INTB,
IRQ_PCI_INTC,
IRQ_PCI_INTD,
};
static char lboxre2_irq_tab[] = {
IRQ_ETH0, IRQ_ETH1, IRQ_INTA, IRQ_INTD,
};
int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
if (mach_is_lboxre2())
return lboxre2_irq_tab[slot];
else
return rts7751r2d_irq_tab[slot];
}
int pci_fixup_pcic(struct pci_channel *chan)
{
unsigned long bcr1, mcr;
bcr1 = __raw_readl(SH7751_BCR1);
bcr1 |= 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */
pci_write_reg(chan, bcr1, SH4_PCIBCR1);
/* Enable all interrupts, so we known what to fix */
pci_write_reg(chan, 0x0000c3ff, SH4_PCIINTM);
pci_write_reg(chan, 0x0000380f, SH4_PCIAINTM);
pci_write_reg(chan, 0xfb900047, SH7751_PCICONF1);
pci_write_reg(chan, 0xab000001, SH7751_PCICONF4);
mcr = __raw_readl(SH7751_MCR);
mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF;
pci_write_reg(chan, mcr, SH4_PCIMCR);
pci_write_reg(chan, 0x0c000000, SH7751_PCICONF5);
pci_write_reg(chan, 0xd0000000, SH7751_PCICONF6);
pci_write_reg(chan, 0x0c000000, SH4_PCILAR0);
pci_write_reg(chan, 0x00000000, SH4_PCILAR1);
return 0;
}
| linux-master | arch/sh/drivers/pci/fixups-rts7751r2d.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Low-Level PCI Express Support for the SH7786
*
* Copyright (C) 2009 - 2011 Paul Mundt
*/
#define pr_fmt(fmt) "PCI: " fmt
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/async.h>
#include <linux/delay.h>
#include <linux/dma-map-ops.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/sh_clk.h>
#include <linux/sh_intc.h>
#include <cpu/sh7786.h>
#include "pcie-sh7786.h"
#include <linux/sizes.h>
struct sh7786_pcie_port {
struct pci_channel *hose;
struct clk *fclk, phy_clk;
unsigned int index;
int endpoint;
int link;
};
static struct sh7786_pcie_port *sh7786_pcie_ports;
static unsigned int nr_ports;
size_t memsize;
u64 memstart;
static struct sh7786_pcie_hwops {
int (*core_init)(void);
async_func_t port_init_hw;
} *sh7786_pcie_hwops;
static struct resource sh7786_pci0_resources[] = {
{
.name = "PCIe0 MEM 0",
.start = 0xfd000000,
.end = 0xfd000000 + SZ_8M - 1,
.flags = IORESOURCE_MEM,
}, {
.name = "PCIe0 MEM 1",
.start = 0xc0000000,
.end = 0xc0000000 + SZ_512M - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
}, {
.name = "PCIe0 MEM 2",
.start = 0x10000000,
.end = 0x10000000 + SZ_64M - 1,
.flags = IORESOURCE_MEM,
}, {
.name = "PCIe0 IO",
.start = 0xfe100000,
.end = 0xfe100000 + SZ_1M - 1,
.flags = IORESOURCE_IO,
},
};
static struct resource sh7786_pci1_resources[] = {
{
.name = "PCIe1 MEM 0",
.start = 0xfd800000,
.end = 0xfd800000 + SZ_8M - 1,
.flags = IORESOURCE_MEM,
}, {
.name = "PCIe1 MEM 1",
.start = 0xa0000000,
.end = 0xa0000000 + SZ_512M - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
}, {
.name = "PCIe1 MEM 2",
.start = 0x30000000,
.end = 0x30000000 + SZ_256M - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
}, {
.name = "PCIe1 IO",
.start = 0xfe300000,
.end = 0xfe300000 + SZ_1M - 1,
.flags = IORESOURCE_IO,
},
};
static struct resource sh7786_pci2_resources[] = {
{
.name = "PCIe2 MEM 0",
.start = 0xfc800000,
.end = 0xfc800000 + SZ_4M - 1,
.flags = IORESOURCE_MEM,
}, {
.name = "PCIe2 MEM 1",
.start = 0x80000000,
.end = 0x80000000 + SZ_512M - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
}, {
.name = "PCIe2 MEM 2",
.start = 0x20000000,
.end = 0x20000000 + SZ_256M - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
}, {
.name = "PCIe2 IO",
.start = 0xfcd00000,
.end = 0xfcd00000 + SZ_1M - 1,
.flags = IORESOURCE_IO,
},
};
extern struct pci_ops sh7786_pci_ops;
#define DEFINE_CONTROLLER(start, idx) \
{ \
.pci_ops = &sh7786_pci_ops, \
.resources = sh7786_pci##idx##_resources, \
.nr_resources = ARRAY_SIZE(sh7786_pci##idx##_resources), \
.reg_base = start, \
.mem_offset = 0, \
.io_offset = 0, \
}
static struct pci_channel sh7786_pci_channels[] = {
DEFINE_CONTROLLER(0xfe000000, 0),
DEFINE_CONTROLLER(0xfe200000, 1),
DEFINE_CONTROLLER(0xfcc00000, 2),
};
static struct clk fixed_pciexclkp = {
.rate = 100000000, /* 100 MHz reference clock */
};
static void sh7786_pci_fixup(struct pci_dev *dev)
{
/*
* Prevent enumeration of root complex resources.
*/
if (pci_is_root_bus(dev->bus) && dev->devfn == 0) {
struct resource *r;
pci_dev_for_each_resource(dev, r) {
r->start = 0;
r->end = 0;
r->flags = 0;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_SH7786,
sh7786_pci_fixup);
static int __init phy_wait_for_ack(struct pci_channel *chan)
{
unsigned int timeout = 100;
while (timeout--) {
if (pci_read_reg(chan, SH4A_PCIEPHYADRR) & (1 << BITS_ACK))
return 0;
udelay(100);
}
return -ETIMEDOUT;
}
static int __init pci_wait_for_irq(struct pci_channel *chan, unsigned int mask)
{
unsigned int timeout = 100;
while (timeout--) {
if ((pci_read_reg(chan, SH4A_PCIEINTR) & mask) == mask)
return 0;
udelay(100);
}
return -ETIMEDOUT;
}
static void __init phy_write_reg(struct pci_channel *chan, unsigned int addr,
unsigned int lane, unsigned int data)
{
unsigned long phyaddr;
phyaddr = (1 << BITS_CMD) + ((lane & 0xf) << BITS_LANE) +
((addr & 0xff) << BITS_ADR);
/* Set write data */
pci_write_reg(chan, data, SH4A_PCIEPHYDOUTR);
pci_write_reg(chan, phyaddr, SH4A_PCIEPHYADRR);
phy_wait_for_ack(chan);
/* Clear command */
pci_write_reg(chan, 0, SH4A_PCIEPHYDOUTR);
pci_write_reg(chan, 0, SH4A_PCIEPHYADRR);
phy_wait_for_ack(chan);
}
static int __init pcie_clk_init(struct sh7786_pcie_port *port)
{
struct pci_channel *chan = port->hose;
struct clk *clk;
char fclk_name[16];
int ret;
/*
* First register the fixed clock
*/
ret = clk_register(&fixed_pciexclkp);
if (unlikely(ret != 0))
return ret;
/*
* Grab the port's function clock, which the PHY clock depends
* on. clock lookups don't help us much at this point, since no
* dev_id is available this early. Lame.
*/
snprintf(fclk_name, sizeof(fclk_name), "pcie%d_fck", port->index);
port->fclk = clk_get(NULL, fclk_name);
if (IS_ERR(port->fclk)) {
ret = PTR_ERR(port->fclk);
goto err_fclk;
}
clk_enable(port->fclk);
/*
* And now, set up the PHY clock
*/
clk = &port->phy_clk;
memset(clk, 0, sizeof(struct clk));
clk->parent = &fixed_pciexclkp;
clk->enable_reg = (void __iomem *)(chan->reg_base + SH4A_PCIEPHYCTLR);
clk->enable_bit = BITS_CKE;
ret = sh_clk_mstp_register(clk, 1);
if (unlikely(ret < 0))
goto err_phy;
return 0;
err_phy:
clk_disable(port->fclk);
clk_put(port->fclk);
err_fclk:
clk_unregister(&fixed_pciexclkp);
return ret;
}
static int __init phy_init(struct sh7786_pcie_port *port)
{
struct pci_channel *chan = port->hose;
unsigned int timeout = 100;
clk_enable(&port->phy_clk);
/* Initialize the phy */
phy_write_reg(chan, 0x60, 0xf, 0x004b008b);
phy_write_reg(chan, 0x61, 0xf, 0x00007b41);
phy_write_reg(chan, 0x64, 0xf, 0x00ff4f00);
phy_write_reg(chan, 0x65, 0xf, 0x09070907);
phy_write_reg(chan, 0x66, 0xf, 0x00000010);
phy_write_reg(chan, 0x74, 0xf, 0x0007001c);
phy_write_reg(chan, 0x79, 0xf, 0x01fc000d);
phy_write_reg(chan, 0xb0, 0xf, 0x00000610);
/* Deassert Standby */
phy_write_reg(chan, 0x67, 0x1, 0x00000400);
/* Disable clock */
clk_disable(&port->phy_clk);
while (timeout--) {
if (pci_read_reg(chan, SH4A_PCIEPHYSR))
return 0;
udelay(100);
}
return -ETIMEDOUT;
}
static void __init pcie_reset(struct sh7786_pcie_port *port)
{
struct pci_channel *chan = port->hose;
pci_write_reg(chan, 1, SH4A_PCIESRSTR);
pci_write_reg(chan, 0, SH4A_PCIETCTLR);
pci_write_reg(chan, 0, SH4A_PCIESRSTR);
pci_write_reg(chan, 0, SH4A_PCIETXVC0SR);
}
static int __init pcie_init(struct sh7786_pcie_port *port)
{
struct pci_channel *chan = port->hose;
unsigned int data;
phys_addr_t memstart, memend;
int ret, i, win;
/* Begin initialization */
pcie_reset(port);
/*
* Initial header for port config space is type 1, set the device
* class to match. Hardware takes care of propagating the IDSETR
* settings, so there is no need to bother with a quirk.
*/
pci_write_reg(chan, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, SH4A_PCIEIDSETR1);
/* Initialize default capabilities. */
data = pci_read_reg(chan, SH4A_PCIEEXPCAP0);
data &= ~(PCI_EXP_FLAGS_TYPE << 16);
if (port->endpoint)
data |= PCI_EXP_TYPE_ENDPOINT << 20;
else
data |= PCI_EXP_TYPE_ROOT_PORT << 20;
data |= PCI_CAP_ID_EXP;
pci_write_reg(chan, data, SH4A_PCIEEXPCAP0);
/* Enable data link layer active state reporting */
pci_write_reg(chan, PCI_EXP_LNKCAP_DLLLARC, SH4A_PCIEEXPCAP3);
/* Enable extended sync and ASPM L0s support */
data = pci_read_reg(chan, SH4A_PCIEEXPCAP4);
data &= ~PCI_EXP_LNKCTL_ASPMC;
data |= PCI_EXP_LNKCTL_ES | 1;
pci_write_reg(chan, data, SH4A_PCIEEXPCAP4);
/* Write out the physical slot number */
data = pci_read_reg(chan, SH4A_PCIEEXPCAP5);
data &= ~PCI_EXP_SLTCAP_PSN;
data |= (port->index + 1) << 19;
pci_write_reg(chan, data, SH4A_PCIEEXPCAP5);
/* Set the completion timer timeout to the maximum 32ms. */
data = pci_read_reg(chan, SH4A_PCIETLCTLR);
data &= ~0x3f00;
data |= 0x32 << 8;
pci_write_reg(chan, data, SH4A_PCIETLCTLR);
/*
* Set fast training sequences to the maximum 255,
* and enable MAC data scrambling.
*/
data = pci_read_reg(chan, SH4A_PCIEMACCTLR);
data &= ~PCIEMACCTLR_SCR_DIS;
data |= (0xff << 16);
pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
memstart = __pa(memory_start);
memend = __pa(memory_end);
memsize = roundup_pow_of_two(memend - memstart);
/*
* The start address must be aligned on its size. So we round
* it down, and then recalculate the size so that it covers
* the entire memory.
*/
memstart = ALIGN_DOWN(memstart, memsize);
memsize = roundup_pow_of_two(memend - memstart);
/*
* If there's more than 512MB of memory, we need to roll over to
* LAR1/LAMR1.
*/
if (memsize > SZ_512M) {
pci_write_reg(chan, memstart + SZ_512M, SH4A_PCIELAR1);
pci_write_reg(chan, ((memsize - SZ_512M) - SZ_256) | 1,
SH4A_PCIELAMR1);
memsize = SZ_512M;
} else {
/*
* Otherwise just zero it out and disable it.
*/
pci_write_reg(chan, 0, SH4A_PCIELAR1);
pci_write_reg(chan, 0, SH4A_PCIELAMR1);
}
/*
* LAR0/LAMR0 covers up to the first 512MB, which is enough to
* cover all of lowmem on most platforms.
*/
pci_write_reg(chan, memstart, SH4A_PCIELAR0);
pci_write_reg(chan, (memsize - SZ_256) | 1, SH4A_PCIELAMR0);
/* Finish initialization */
data = pci_read_reg(chan, SH4A_PCIETCTLR);
data |= 0x1;
pci_write_reg(chan, data, SH4A_PCIETCTLR);
/* Let things settle down a bit.. */
mdelay(100);
/* Enable DL_Active Interrupt generation */
data = pci_read_reg(chan, SH4A_PCIEDLINTENR);
data |= PCIEDLINTENR_DLL_ACT_ENABLE;
pci_write_reg(chan, data, SH4A_PCIEDLINTENR);
/* Disable MAC data scrambling. */
data = pci_read_reg(chan, SH4A_PCIEMACCTLR);
data |= PCIEMACCTLR_SCR_DIS | (0xff << 16);
pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
/*
* This will timeout if we don't have a link, but we permit the
* port to register anyways in order to support hotplug on future
* hardware.
*/
ret = pci_wait_for_irq(chan, MASK_INT_TX_CTRL);
data = pci_read_reg(chan, SH4A_PCIEPCICONF1);
data &= ~(PCI_STATUS_DEVSEL_MASK << 16);
data |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
(PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST) << 16;
pci_write_reg(chan, data, SH4A_PCIEPCICONF1);
pci_write_reg(chan, 0x80888000, SH4A_PCIETXVC0DCTLR);
pci_write_reg(chan, 0x00222000, SH4A_PCIERXVC0DCTLR);
wmb();
if (ret == 0) {
data = pci_read_reg(chan, SH4A_PCIEMACSR);
printk(KERN_NOTICE "PCI: PCIe#%d x%d link detected\n",
port->index, (data >> 20) & 0x3f);
} else
printk(KERN_NOTICE "PCI: PCIe#%d link down\n",
port->index);
for (i = win = 0; i < chan->nr_resources; i++) {
struct resource *res = chan->resources + i;
resource_size_t size;
u32 mask;
/*
* We can't use the 32-bit mode windows in legacy 29-bit
* mode, so just skip them entirely.
*/
if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode())
res->flags |= IORESOURCE_DISABLED;
if (res->flags & IORESOURCE_DISABLED)
continue;
pci_write_reg(chan, 0x00000000, SH4A_PCIEPTCTLR(win));
/*
* The PAMR mask is calculated in units of 256kB, which
* keeps things pretty simple.
*/
size = resource_size(res);
mask = (roundup_pow_of_two(size) / SZ_256K) - 1;
pci_write_reg(chan, mask << 18, SH4A_PCIEPAMR(win));
pci_write_reg(chan, upper_32_bits(res->start),
SH4A_PCIEPARH(win));
pci_write_reg(chan, lower_32_bits(res->start),
SH4A_PCIEPARL(win));
mask = MASK_PARE;
if (res->flags & IORESOURCE_IO)
mask |= MASK_SPC;
pci_write_reg(chan, mask, SH4A_PCIEPTCTLR(win));
win++;
}
return 0;
}
int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
return evt2irq(0xae0);
}
void pcibios_bus_add_device(struct pci_dev *pdev)
{
dma_direct_set_offset(&pdev->dev, __pa(memory_start),
__pa(memory_start) - memstart, memsize);
}
static int __init sh7786_pcie_core_init(void)
{
/* Return the number of ports */
return test_mode_pin(MODE_PIN12) ? 3 : 2;
}
static void __init sh7786_pcie_init_hw(void *data, async_cookie_t cookie)
{
struct sh7786_pcie_port *port = data;
int ret;
/*
* Check if we are configured in endpoint or root complex mode,
* this is a fixed pin setting that applies to all PCIe ports.
*/
port->endpoint = test_mode_pin(MODE_PIN11);
/*
* Setup clocks, needed both for PHY and PCIe registers.
*/
ret = pcie_clk_init(port);
if (unlikely(ret < 0)) {
pr_err("clock initialization failed for port#%d\n",
port->index);
return;
}
ret = phy_init(port);
if (unlikely(ret < 0)) {
pr_err("phy initialization failed for port#%d\n",
port->index);
return;
}
ret = pcie_init(port);
if (unlikely(ret < 0)) {
pr_err("core initialization failed for port#%d\n",
port->index);
return;
}
/* In the interest of preserving device ordering, synchronize */
async_synchronize_cookie(cookie);
register_pci_controller(port->hose);
}
static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
.core_init = sh7786_pcie_core_init,
.port_init_hw = sh7786_pcie_init_hw,
};
static int __init sh7786_pcie_init(void)
{
struct clk *platclk;
u32 mm_sel;
int i;
printk(KERN_NOTICE "PCI: Starting initialization.\n");
sh7786_pcie_hwops = &sh7786_65nm_pcie_hwops;
nr_ports = sh7786_pcie_hwops->core_init();
BUG_ON(nr_ports > ARRAY_SIZE(sh7786_pci_channels));
if (unlikely(nr_ports == 0))
return -ENODEV;
sh7786_pcie_ports = kcalloc(nr_ports, sizeof(struct sh7786_pcie_port),
GFP_KERNEL);
if (unlikely(!sh7786_pcie_ports))
return -ENOMEM;
/*
* Fetch any optional platform clock associated with this block.
*
* This is a rather nasty hack for boards with spec-mocking FPGAs
* that have a secondary set of clocks outside of the on-chip
* ones that need to be accounted for before there is any chance
* of touching the existing MSTP bits or CPG clocks.
*/
platclk = clk_get(NULL, "pcie_plat_clk");
if (IS_ERR(platclk)) {
/* Sane hardware should probably get a WARN_ON.. */
platclk = NULL;
}
clk_enable(platclk);
mm_sel = sh7786_mm_sel();
/*
* Depending on the MMSELR register value, the PCIe0 MEM 1
* area may not be available. See Table 13.11 of the SH7786
* datasheet.
*/
if (mm_sel != 1 && mm_sel != 2 && mm_sel != 5 && mm_sel != 6)
sh7786_pci0_resources[2].flags |= IORESOURCE_DISABLED;
printk(KERN_NOTICE "PCI: probing %d ports.\n", nr_ports);
for (i = 0; i < nr_ports; i++) {
struct sh7786_pcie_port *port = sh7786_pcie_ports + i;
port->index = i;
port->hose = sh7786_pci_channels + i;
port->hose->io_map_base = port->hose->resources[0].start;
async_schedule(sh7786_pcie_hwops->port_init_hw, port);
}
async_synchronize_full();
return 0;
}
arch_initcall(sh7786_pcie_init);
| linux-master | arch/sh/drivers/pci/pcie-sh7786.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Low-Level PCI Support for the SH7780
*
* Copyright (C) 2005 - 2010 Paul Mundt
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/irq.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/log2.h>
#include "pci-sh4.h"
#include <asm/mmu.h>
#include <linux/sizes.h>
#if defined(CONFIG_CPU_BIG_ENDIAN)
# define PCICR_ENDIANNESS SH4_PCICR_BSWP
#else
# define PCICR_ENDIANNESS 0
#endif
static struct resource sh7785_pci_resources[] = {
{
.name = "PCI IO",
.start = 0x1000,
.end = SZ_4M - 1,
.flags = IORESOURCE_IO,
}, {
.name = "PCI MEM 0",
.start = 0xfd000000,
.end = 0xfd000000 + SZ_16M - 1,
.flags = IORESOURCE_MEM,
}, {
.name = "PCI MEM 1",
.start = 0x10000000,
.end = 0x10000000 + SZ_64M - 1,
.flags = IORESOURCE_MEM,
}, {
/*
* 32-bit only resources must be last.
*/
.name = "PCI MEM 2",
.start = 0xc0000000,
.end = 0xc0000000 + SZ_512M - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
},
};
static struct pci_channel sh7780_pci_controller = {
.pci_ops = &sh4_pci_ops,
.resources = sh7785_pci_resources,
.nr_resources = ARRAY_SIZE(sh7785_pci_resources),
.io_offset = 0,
.mem_offset = 0,
.io_map_base = 0xfe200000,
.serr_irq = evt2irq(0xa00),
.err_irq = evt2irq(0xaa0),
};
struct pci_errors {
unsigned int mask;
const char *str;
} pci_arbiter_errors[] = {
{ SH4_PCIAINT_MBKN, "master broken" },
{ SH4_PCIAINT_TBTO, "target bus time out" },
{ SH4_PCIAINT_MBTO, "master bus time out" },
{ SH4_PCIAINT_TABT, "target abort" },
{ SH4_PCIAINT_MABT, "master abort" },
{ SH4_PCIAINT_RDPE, "read data parity error" },
{ SH4_PCIAINT_WDPE, "write data parity error" },
}, pci_interrupt_errors[] = {
{ SH4_PCIINT_MLCK, "master lock error" },
{ SH4_PCIINT_TABT, "target-target abort" },
{ SH4_PCIINT_TRET, "target retry time out" },
{ SH4_PCIINT_MFDE, "master function disable error" },
{ SH4_PCIINT_PRTY, "address parity error" },
{ SH4_PCIINT_SERR, "SERR" },
{ SH4_PCIINT_TWDP, "data parity error for target write" },
{ SH4_PCIINT_TRDP, "PERR detected for target read" },
{ SH4_PCIINT_MTABT, "target abort for master" },
{ SH4_PCIINT_MMABT, "master abort for master" },
{ SH4_PCIINT_MWPD, "master write data parity error" },
{ SH4_PCIINT_MRPD, "master read data parity error" },
};
static irqreturn_t sh7780_pci_err_irq(int irq, void *dev_id)
{
struct pci_channel *hose = dev_id;
unsigned long addr;
unsigned int status;
unsigned int cmd;
int i;
addr = __raw_readl(hose->reg_base + SH4_PCIALR);
/*
* Handle status errors.
*/
status = __raw_readw(hose->reg_base + PCI_STATUS);
if (status & (PCI_STATUS_PARITY |
PCI_STATUS_DETECTED_PARITY |
PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT |
PCI_STATUS_REC_MASTER_ABORT)) {
cmd = pcibios_handle_status_errors(addr, status, hose);
if (likely(cmd))
__raw_writew(cmd, hose->reg_base + PCI_STATUS);
}
/*
* Handle arbiter errors.
*/
status = __raw_readl(hose->reg_base + SH4_PCIAINT);
for (i = cmd = 0; i < ARRAY_SIZE(pci_arbiter_errors); i++) {
if (status & pci_arbiter_errors[i].mask) {
printk(KERN_DEBUG "PCI: %s, addr=%08lx\n",
pci_arbiter_errors[i].str, addr);
cmd |= pci_arbiter_errors[i].mask;
}
}
__raw_writel(cmd, hose->reg_base + SH4_PCIAINT);
/*
* Handle the remaining PCI errors.
*/
status = __raw_readl(hose->reg_base + SH4_PCIINT);
for (i = cmd = 0; i < ARRAY_SIZE(pci_interrupt_errors); i++) {
if (status & pci_interrupt_errors[i].mask) {
printk(KERN_DEBUG "PCI: %s, addr=%08lx\n",
pci_interrupt_errors[i].str, addr);
cmd |= pci_interrupt_errors[i].mask;
}
}
__raw_writel(cmd, hose->reg_base + SH4_PCIINT);
return IRQ_HANDLED;
}
static irqreturn_t sh7780_pci_serr_irq(int irq, void *dev_id)
{
struct pci_channel *hose = dev_id;
printk(KERN_DEBUG "PCI: system error received: ");
pcibios_report_status(PCI_STATUS_SIG_SYSTEM_ERROR, 1);
pr_cont("\n");
/* Deassert SERR */
__raw_writel(SH4_PCIINTM_SDIM, hose->reg_base + SH4_PCIINTM);
/* Back off the IRQ for awhile */
disable_irq_nosync(irq);
hose->serr_timer.expires = jiffies + HZ;
add_timer(&hose->serr_timer);
return IRQ_HANDLED;
}
static int __init sh7780_pci_setup_irqs(struct pci_channel *hose)
{
int ret;
/* Clear out PCI arbiter IRQs */
__raw_writel(0, hose->reg_base + SH4_PCIAINT);
/* Clear all error conditions */
__raw_writew(PCI_STATUS_DETECTED_PARITY | \
PCI_STATUS_SIG_SYSTEM_ERROR | \
PCI_STATUS_REC_MASTER_ABORT | \
PCI_STATUS_REC_TARGET_ABORT | \
PCI_STATUS_SIG_TARGET_ABORT | \
PCI_STATUS_PARITY, hose->reg_base + PCI_STATUS);
ret = request_irq(hose->serr_irq, sh7780_pci_serr_irq, 0,
"PCI SERR interrupt", hose);
if (unlikely(ret)) {
pr_err("PCI: Failed hooking SERR IRQ\n");
return ret;
}
/*
* The PCI ERR IRQ needs to be IRQF_SHARED since all of the power
* down IRQ vectors are routed through the ERR IRQ vector. We
* only request_irq() once as there is only a single masking
* source for multiple events.
*/
ret = request_irq(hose->err_irq, sh7780_pci_err_irq, IRQF_SHARED,
"PCI ERR interrupt", hose);
if (unlikely(ret)) {
free_irq(hose->serr_irq, hose);
return ret;
}
/* Unmask all of the arbiter IRQs. */
__raw_writel(SH4_PCIAINT_MBKN | SH4_PCIAINT_TBTO | SH4_PCIAINT_MBTO | \
SH4_PCIAINT_TABT | SH4_PCIAINT_MABT | SH4_PCIAINT_RDPE | \
SH4_PCIAINT_WDPE, hose->reg_base + SH4_PCIAINTM);
/* Unmask all of the PCI IRQs */
__raw_writel(SH4_PCIINTM_TTADIM | SH4_PCIINTM_TMTOIM | \
SH4_PCIINTM_MDEIM | SH4_PCIINTM_APEDIM | \
SH4_PCIINTM_SDIM | SH4_PCIINTM_DPEITWM | \
SH4_PCIINTM_PEDITRM | SH4_PCIINTM_TADIMM | \
SH4_PCIINTM_MADIMM | SH4_PCIINTM_MWPDIM | \
SH4_PCIINTM_MRDPEIM, hose->reg_base + SH4_PCIINTM);
return ret;
}
static inline void __init sh7780_pci_teardown_irqs(struct pci_channel *hose)
{
free_irq(hose->err_irq, hose);
free_irq(hose->serr_irq, hose);
}
static void __init sh7780_pci66_init(struct pci_channel *hose)
{
unsigned int tmp;
if (!pci_is_66mhz_capable(hose, 0, 0))
return;
/* Enable register access */
tmp = __raw_readl(hose->reg_base + SH4_PCICR);
tmp |= SH4_PCICR_PREFIX;
__raw_writel(tmp, hose->reg_base + SH4_PCICR);
/* Enable 66MHz operation */
tmp = __raw_readw(hose->reg_base + PCI_STATUS);
tmp |= PCI_STATUS_66MHZ;
__raw_writew(tmp, hose->reg_base + PCI_STATUS);
/* Done */
tmp = __raw_readl(hose->reg_base + SH4_PCICR);
tmp |= SH4_PCICR_PREFIX | SH4_PCICR_CFIN;
__raw_writel(tmp, hose->reg_base + SH4_PCICR);
}
static int __init sh7780_pci_init(void)
{
struct pci_channel *chan = &sh7780_pci_controller;
phys_addr_t memphys;
size_t memsize;
unsigned int id;
const char *type;
int ret, i;
pr_notice("PCI: Starting initialization.\n");
chan->reg_base = 0xfe040000;
/* Enable CPU access to the PCIC registers. */
__raw_writel(PCIECR_ENBL, PCIECR);
/* Reset */
__raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_PRST | PCICR_ENDIANNESS,
chan->reg_base + SH4_PCICR);
/*
* Wait for it to come back up. The spec says to allow for up to
* 1 second after toggling the reset pin, but in practice 100ms
* is more than enough.
*/
mdelay(100);
id = __raw_readw(chan->reg_base + PCI_VENDOR_ID);
if (id != PCI_VENDOR_ID_RENESAS) {
pr_err("PCI: Unknown vendor ID 0x%04x.\n", id);
return -ENODEV;
}
id = __raw_readw(chan->reg_base + PCI_DEVICE_ID);
type = (id == PCI_DEVICE_ID_RENESAS_SH7763) ? "SH7763" :
(id == PCI_DEVICE_ID_RENESAS_SH7780) ? "SH7780" :
(id == PCI_DEVICE_ID_RENESAS_SH7781) ? "SH7781" :
(id == PCI_DEVICE_ID_RENESAS_SH7785) ? "SH7785" :
NULL;
if (unlikely(!type)) {
pr_err("PCI: Found an unsupported Renesas host controller, device id 0x%04x.\n",
id);
return -EINVAL;
}
pr_notice("PCI: Found a Renesas %s host controller, revision %d.\n",
type, __raw_readb(chan->reg_base + PCI_REVISION_ID));
/*
* Now throw it in to register initialization mode and
* start the real work.
*/
__raw_writel(SH4_PCICR_PREFIX | PCICR_ENDIANNESS,
chan->reg_base + SH4_PCICR);
memphys = __pa(memory_start);
memsize = roundup_pow_of_two(memory_end - memory_start);
/*
* If there's more than 512MB of memory, we need to roll over to
* LAR1/LSR1.
*/
if (memsize > SZ_512M) {
__raw_writel(memphys + SZ_512M, chan->reg_base + SH4_PCILAR1);
__raw_writel((((memsize - SZ_512M) - SZ_1M) & 0x1ff00000) | 1,
chan->reg_base + SH4_PCILSR1);
memsize = SZ_512M;
} else {
/*
* Otherwise just zero it out and disable it.
*/
__raw_writel(0, chan->reg_base + SH4_PCILAR1);
__raw_writel(0, chan->reg_base + SH4_PCILSR1);
}
/*
* LAR0/LSR0 covers up to the first 512MB, which is enough to
* cover all of lowmem on most platforms.
*/
__raw_writel(memphys, chan->reg_base + SH4_PCILAR0);
__raw_writel(((memsize - SZ_1M) & 0x1ff00000) | 1,
chan->reg_base + SH4_PCILSR0);
/*
* Hook up the ERR and SERR IRQs.
*/
ret = sh7780_pci_setup_irqs(chan);
if (unlikely(ret))
return ret;
/*
* Disable the cache snoop controller for non-coherent DMA.
*/
__raw_writel(0, chan->reg_base + SH7780_PCICSCR0);
__raw_writel(0, chan->reg_base + SH7780_PCICSAR0);
__raw_writel(0, chan->reg_base + SH7780_PCICSCR1);
__raw_writel(0, chan->reg_base + SH7780_PCICSAR1);
/*
* Setup the memory BARs
*/
for (i = 1; i < chan->nr_resources; i++) {
struct resource *res = chan->resources + i;
resource_size_t size;
if (unlikely(res->flags & IORESOURCE_IO))
continue;
/*
* Make sure we're in the right physical addressing mode
* for dealing with the resource.
*/
if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode()) {
chan->nr_resources--;
continue;
}
size = resource_size(res);
/*
* The MBMR mask is calculated in units of 256kB, which
* keeps things pretty simple.
*/
__raw_writel(((roundup_pow_of_two(size) / SZ_256K) - 1) << 18,
chan->reg_base + SH7780_PCIMBMR(i - 1));
__raw_writel(res->start, chan->reg_base + SH7780_PCIMBR(i - 1));
}
/*
* And I/O.
*/
__raw_writel(0, chan->reg_base + PCI_BASE_ADDRESS_0);
__raw_writel(0, chan->reg_base + SH7780_PCIIOBR);
__raw_writel(0, chan->reg_base + SH7780_PCIIOBMR);
__raw_writew(PCI_COMMAND_SERR | PCI_COMMAND_WAIT | \
PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | \
PCI_COMMAND_MEMORY, chan->reg_base + PCI_COMMAND);
/*
* Initialization mode complete, release the control register and
* enable round robin mode to stop device overruns/starvation.
*/
__raw_writel(SH4_PCICR_PREFIX | SH4_PCICR_CFIN | SH4_PCICR_FTO |
PCICR_ENDIANNESS,
chan->reg_base + SH4_PCICR);
ret = register_pci_controller(chan);
if (unlikely(ret))
goto err;
sh7780_pci66_init(chan);
pr_notice("PCI: Running at %dMHz.\n",
(__raw_readw(chan->reg_base + PCI_STATUS) & PCI_STATUS_66MHZ)
? 66 : 33);
return 0;
err:
sh7780_pci_teardown_irqs(chan);
return ret;
}
arch_initcall(sh7780_pci_init);
| linux-master | arch/sh/drivers/pci/pci-sh7780.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SDK7786 FPGA PCIe mux handling
*
* Copyright (C) 2010 Paul Mundt
*/
#define pr_fmt(fmt) "PCI: " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <mach/fpga.h>
/*
* The SDK7786 FPGA supports mangling of most of the slots in some way or
* another. Slots 3/4 are special in that only one can be supported at a
* time, and both appear on port 3 to the PCI bus scan. Enabling slot 4
* (the horizontal edge connector) will disable slot 3 entirely.
*
* Misconfigurations can be detected through the FPGA via the slot
* resistors to determine card presence. Hotplug remains unsupported.
*/
static unsigned int slot4en __initdata;
char *__init pcibios_setup(char *str)
{
if (strcmp(str, "slot4en") == 0) {
slot4en = 1;
return NULL;
}
return str;
}
static int __init sdk7786_pci_init(void)
{
u16 data = fpga_read_reg(PCIECR);
/*
* Enable slot #4 if it's been specified on the command line.
*
* Optionally reroute if slot #4 has a card present while slot #3
* does not, regardless of command line value.
*
* Card presence is logically inverted.
*/
slot4en ?: (!(data & PCIECR_PRST4) && (data & PCIECR_PRST3));
if (slot4en) {
pr_info("Activating PCIe slot#4 (disabling slot#3)\n");
data &= ~PCIECR_PCIEMUX1;
fpga_write_reg(data, PCIECR);
/* Warn about forced rerouting if slot#3 is occupied */
if ((data & PCIECR_PRST3) == 0) {
pr_warn("Unreachable card detected in slot#3\n");
return -EBUSY;
}
} else
pr_info("PCIe slot#4 disabled\n");
return 0;
}
postcore_initcall(sdk7786_pci_init);
| linux-master | arch/sh/drivers/pci/fixups-sdk7786.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/pci/fixups-r7780rp.c
*
* Highlander R7780RP-1 PCI fixups
*
* Copyright (C) 2003 Lineo uSolutions, Inc.
* Copyright (C) 2004 - 2006 Paul Mundt
*/
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/sh_intc.h>
#include "pci-sh4.h"
int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
return evt2irq(0xa20) + slot;
}
| linux-master | arch/sh/drivers/pci/fixups-r7780rp.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/sh_intc.h>
#include "pci-sh4.h"
int pcibios_map_platform_irq(const struct pci_dev *, u8 slot, u8 pin)
{
switch (slot) {
case 0: return evt2irq(0x3a0);
case 1: return evt2irq(0x3a0); /* AMD Ethernet controller */
case 2: return -1;
case 3: return -1;
case 4: return -1;
default:
printk("PCI: Bad IRQ mapping request for slot %d\n", slot);
return -1;
}
}
#define PCIMCR_MRSET_OFF 0xBFFFFFFF
#define PCIMCR_RFSH_OFF 0xFFFFFFFB
/*
* Only long word accesses of the PCIC's internal local registers and the
* configuration registers from the CPU is supported.
*/
#define PCIC_WRITE(x,v) writel((v), PCI_REG(x))
#define PCIC_READ(x) readl(PCI_REG(x))
/*
* Description: This function sets up and initializes the pcic, sets
* up the BARS, maps the DRAM into the address space etc, etc.
*/
int pci_fixup_pcic(struct pci_channel *chan)
{
unsigned long bcr1, wcr1, wcr2, wcr3, mcr;
unsigned short bcr2;
/*
* Initialize the slave bus controller on the pcic. The values used
* here should not be hardcoded, but they should be taken from the bsc
* on the processor, to make this function as generic as possible.
* (i.e. Another sbc may usr different SDRAM timing settings -- in order
* for the pcic to work, its settings need to be exactly the same.)
*/
bcr1 = (*(volatile unsigned long*)(SH7751_BCR1));
bcr2 = (*(volatile unsigned short*)(SH7751_BCR2));
wcr1 = (*(volatile unsigned long*)(SH7751_WCR1));
wcr2 = (*(volatile unsigned long*)(SH7751_WCR2));
wcr3 = (*(volatile unsigned long*)(SH7751_WCR3));
mcr = (*(volatile unsigned long*)(SH7751_MCR));
bcr1 = bcr1 | 0x00080000; /* Enable Bit 19, BREQEN */
(*(volatile unsigned long*)(SH7751_BCR1)) = bcr1;
bcr1 = bcr1 | 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */
PCIC_WRITE(SH7751_PCIBCR1, bcr1); /* PCIC BCR1 */
PCIC_WRITE(SH7751_PCIBCR2, bcr2); /* PCIC BCR2 */
PCIC_WRITE(SH7751_PCIWCR1, wcr1); /* PCIC WCR1 */
PCIC_WRITE(SH7751_PCIWCR2, wcr2); /* PCIC WCR2 */
PCIC_WRITE(SH7751_PCIWCR3, wcr3); /* PCIC WCR3 */
mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF;
PCIC_WRITE(SH7751_PCIMCR, mcr); /* PCIC MCR */
/* Enable all interrupts, so we know what to fix */
PCIC_WRITE(SH7751_PCIINTM, 0x0000c3ff);
PCIC_WRITE(SH7751_PCIAINTM, 0x0000380f);
/* Set up standard PCI config registers */
PCIC_WRITE(SH7751_PCICONF1, 0xF39000C7); /* Bus Master, Mem & I/O access */
PCIC_WRITE(SH7751_PCICONF2, 0x00000000); /* PCI Class code & Revision ID */
PCIC_WRITE(SH7751_PCICONF4, 0xab000001); /* PCI I/O address (local regs) */
PCIC_WRITE(SH7751_PCICONF5, 0x0c000000); /* PCI MEM address (local RAM) */
PCIC_WRITE(SH7751_PCICONF6, 0xd0000000); /* PCI MEM address (unused) */
PCIC_WRITE(SH7751_PCICONF11, 0x35051054); /* PCI Subsystem ID & Vendor ID */
PCIC_WRITE(SH7751_PCILSR0, 0x03f00000); /* MEM (full 64M exposed) */
PCIC_WRITE(SH7751_PCILSR1, 0x00000000); /* MEM (unused) */
PCIC_WRITE(SH7751_PCILAR0, 0x0c000000); /* MEM (direct map from PCI) */
PCIC_WRITE(SH7751_PCILAR1, 0x00000000); /* MEM (unused) */
/* Now turn it on... */
PCIC_WRITE(SH7751_PCICR, 0xa5000001);
/*
* Set PCIMBR and PCIIOBR here, assuming a single window
* (16M MEM, 256K IO) is enough. If a larger space is
* needed, the readx/writex and inx/outx functions will
* have to do more (e.g. setting registers for each call).
*/
/*
* Set the MBR so PCI address is one-to-one with window,
* meaning all calls go straight through... use BUG_ON to
* catch erroneous assumption.
*/
BUG_ON(chan->resources[1].start != SH7751_PCI_MEMORY_BASE);
PCIC_WRITE(SH7751_PCIMBR, chan->resources[1].start);
/* Set IOBR for window containing area specified in pci.h */
PCIC_WRITE(SH7751_PCIIOBR, (chan->resources[0].start & SH7751_PCIIOBR_MASK));
/* All done, may as well say so... */
printk("SH7751 PCI: Finished initialization of the PCI controller\n");
return 1;
}
| linux-master | arch/sh/drivers/pci/fixups-se7751.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/pci/fixups-landisk.c
*
* PCI initialization for the I-O DATA Device, Inc. LANDISK board
*
* Copyright (C) 2006 kogiidena
* Copyright (C) 2010 Nobuhiro Iwamatsu
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/sh_intc.h>
#include "pci-sh4.h"
#define PCIMCR_MRSET_OFF 0xBFFFFFFF
#define PCIMCR_RFSH_OFF 0xFFFFFFFB
int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
/*
* slot0: pin1-4 = irq5,6,7,8
* slot1: pin1-4 = irq6,7,8,5
* slot2: pin1-4 = irq7,8,5,6
* slot3: pin1-4 = irq8,5,6,7
*/
int irq = ((slot + pin - 1) & 0x3) + evt2irq(0x2a0);
if ((slot | (pin - 1)) > 0x3) {
printk(KERN_WARNING "PCI: Bad IRQ mapping request for slot %d pin %c\n",
slot, pin - 1 + 'A');
return -1;
}
return irq;
}
int pci_fixup_pcic(struct pci_channel *chan)
{
unsigned long bcr1, mcr;
bcr1 = __raw_readl(SH7751_BCR1);
bcr1 |= 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */
pci_write_reg(chan, bcr1, SH4_PCIBCR1);
mcr = __raw_readl(SH7751_MCR);
mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF;
pci_write_reg(chan, mcr, SH4_PCIMCR);
pci_write_reg(chan, 0x0c000000, SH7751_PCICONF5);
pci_write_reg(chan, 0xd0000000, SH7751_PCICONF6);
pci_write_reg(chan, 0x0c000000, SH4_PCILAR0);
pci_write_reg(chan, 0x00000000, SH4_PCILAR1);
return 0;
}
| linux-master | arch/sh/drivers/pci/fixups-landisk.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Generic SH7786 PCI-Express operations.
*
* Copyright (C) 2009 - 2010 Paul Mundt
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include "pcie-sh7786.h"
enum {
PCI_ACCESS_READ,
PCI_ACCESS_WRITE,
};
static int sh7786_pcie_config_access(unsigned char access_type,
struct pci_bus *bus, unsigned int devfn, int where, u32 *data)
{
struct pci_channel *chan = bus->sysdata;
int dev, func, type, reg;
dev = PCI_SLOT(devfn);
func = PCI_FUNC(devfn);
type = !!bus->parent;
reg = where & ~3;
if (bus->number > 255 || dev > 31 || func > 7)
return PCIBIOS_FUNC_NOT_SUPPORTED;
/*
* While each channel has its own memory-mapped extended config
* space, it's generally only accessible when in endpoint mode.
* When in root complex mode, the controller is unable to target
* itself with either type 0 or type 1 accesses, and indeed, any
* controller initiated target transfer to its own config space
* result in a completer abort.
*
* Each channel effectively only supports a single device, but as
* the same channel <-> device access works for any PCI_SLOT()
* value, we cheat a bit here and bind the controller's config
* space to devfn 0 in order to enable self-enumeration. In this
* case the regular PAR/PDR path is sidelined and the mangled
* config access itself is initiated as a SuperHyway transaction.
*/
if (pci_is_root_bus(bus)) {
if (dev == 0) {
if (access_type == PCI_ACCESS_READ)
*data = pci_read_reg(chan, PCI_REG(reg));
else
pci_write_reg(chan, *data, PCI_REG(reg));
return PCIBIOS_SUCCESSFUL;
} else if (dev > 1)
return PCIBIOS_DEVICE_NOT_FOUND;
}
/* Clear errors */
pci_write_reg(chan, pci_read_reg(chan, SH4A_PCIEERRFR), SH4A_PCIEERRFR);
/* Set the PIO address */
pci_write_reg(chan, (bus->number << 24) | (dev << 19) |
(func << 16) | reg, SH4A_PCIEPAR);
/* Enable the configuration access */
pci_write_reg(chan, (1 << 31) | (type << 8), SH4A_PCIEPCTLR);
/* Check for errors */
if (pci_read_reg(chan, SH4A_PCIEERRFR) & 0x10)
return PCIBIOS_DEVICE_NOT_FOUND;
/* Check for master and target aborts */
if (pci_read_reg(chan, SH4A_PCIEPCICONF1) & ((1 << 29) | (1 << 28)))
return PCIBIOS_DEVICE_NOT_FOUND;
if (access_type == PCI_ACCESS_READ)
*data = pci_read_reg(chan, SH4A_PCIEPDR);
else
pci_write_reg(chan, *data, SH4A_PCIEPDR);
/* Disable the configuration access */
pci_write_reg(chan, 0, SH4A_PCIEPCTLR);
return PCIBIOS_SUCCESSFUL;
}
static int sh7786_pcie_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
unsigned long flags;
int ret;
u32 data;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
raw_spin_lock_irqsave(&pci_config_lock, flags);
ret = sh7786_pcie_config_access(PCI_ACCESS_READ, bus,
devfn, where, &data);
if (ret != PCIBIOS_SUCCESSFUL) {
*val = 0xffffffff;
goto out;
}
if (size == 1)
*val = (data >> ((where & 3) << 3)) & 0xff;
else if (size == 2)
*val = (data >> ((where & 2) << 3)) & 0xffff;
else
*val = data;
dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x "
"where=0x%04x size=%d val=0x%08lx\n", bus->number,
devfn, where, size, (unsigned long)*val);
out:
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return ret;
}
static int sh7786_pcie_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
unsigned long flags;
int shift, ret;
u32 data;
if ((size == 2) && (where & 1))
return PCIBIOS_BAD_REGISTER_NUMBER;
else if ((size == 4) && (where & 3))
return PCIBIOS_BAD_REGISTER_NUMBER;
raw_spin_lock_irqsave(&pci_config_lock, flags);
ret = sh7786_pcie_config_access(PCI_ACCESS_READ, bus,
devfn, where, &data);
if (ret != PCIBIOS_SUCCESSFUL)
goto out;
dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x "
"where=0x%04x size=%d val=%08lx\n", bus->number,
devfn, where, size, (unsigned long)val);
if (size == 1) {
shift = (where & 3) << 3;
data &= ~(0xff << shift);
data |= ((val & 0xff) << shift);
} else if (size == 2) {
shift = (where & 2) << 3;
data &= ~(0xffff << shift);
data |= ((val & 0xffff) << shift);
} else
data = val;
ret = sh7786_pcie_config_access(PCI_ACCESS_WRITE, bus,
devfn, where, &data);
out:
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
return ret;
}
struct pci_ops sh7786_pci_ops = {
.read = sh7786_pcie_read,
.write = sh7786_pcie_write,
};
| linux-master | arch/sh/drivers/pci/ops-sh7786.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/pci/ops-titan.c
*
* Ported to new API by Paul Mundt <[email protected]>
*
* Modified from ops-snapgear.c written by David McCullough
* Highly leveraged from pci-bigsur.c, written by Dustin McIntire.
*
* PCI initialization for the Titan boards
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <mach/titan.h>
#include "pci-sh4.h"
static char titan_irq_tab[] = {
TITAN_IRQ_WAN,
TITAN_IRQ_LAN,
TITAN_IRQ_MPCIA,
TITAN_IRQ_MPCIB,
TITAN_IRQ_USB,
};
int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
int irq = titan_irq_tab[slot];
printk("PCI: Mapping TITAN IRQ for slot %d, pin %c to irq %d\n",
slot, pin - 1 + 'A', irq);
return irq;
}
| linux-master | arch/sh/drivers/pci/fixups-titan.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/pci/fixups-sdk7780.c
*
* PCI fixups for the SDK7780SE03
*
* Copyright (C) 2003 Lineo uSolutions, Inc.
* Copyright (C) 2004 - 2006 Paul Mundt
* Copyright (C) 2006 Nobuhiro Iwamatsu
*/
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/sh_intc.h>
#include "pci-sh4.h"
#define IRQ_INTA evt2irq(0xa20)
#define IRQ_INTB evt2irq(0xa40)
#define IRQ_INTC evt2irq(0xa60)
#define IRQ_INTD evt2irq(0xa80)
/* IDSEL [16][17][18][19][20][21][22][23][24][25][26][27][28][29][30][31] */
static char sdk7780_irq_tab[4][16] = {
/* INTA */
{ IRQ_INTA, IRQ_INTD, IRQ_INTC, IRQ_INTD, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1 },
/* INTB */
{ IRQ_INTB, IRQ_INTA, -1, IRQ_INTA, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1 },
/* INTC */
{ IRQ_INTC, IRQ_INTB, -1, IRQ_INTB, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1 },
/* INTD */
{ IRQ_INTD, IRQ_INTC, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1 },
};
int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
return sdk7780_irq_tab[pin-1][slot];
}
| linux-master | arch/sh/drivers/pci/fixups-sdk7780.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/superhyway/ops-sh4-202.c
*
* SuperHyway bus support for SH4-202
*
* Copyright (C) 2005 Paul Mundt
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/superhyway.h>
#include <linux/string.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#define PHYS_EMI_CBLOCK P4SEGADDR(0x1ec00000)
#define PHYS_EMI_DBLOCK P4SEGADDR(0x08000000)
#define PHYS_FEMI_CBLOCK P4SEGADDR(0x1f800000)
#define PHYS_FEMI_DBLOCK P4SEGADDR(0x00000000)
#define PHYS_EPBR_BLOCK P4SEGADDR(0x1de00000)
#define PHYS_DMAC_BLOCK P4SEGADDR(0x1fa00000)
#define PHYS_PBR_BLOCK P4SEGADDR(0x1fc00000)
static struct resource emi_resources[] = {
[0] = {
.start = PHYS_EMI_CBLOCK,
.end = PHYS_EMI_CBLOCK + 0x00300000 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = PHYS_EMI_DBLOCK,
.end = PHYS_EMI_DBLOCK + 0x08000000 - 1,
.flags = IORESOURCE_MEM,
},
};
static struct superhyway_device emi_device = {
.name = "emi",
.num_resources = ARRAY_SIZE(emi_resources),
.resource = emi_resources,
};
static struct resource femi_resources[] = {
[0] = {
.start = PHYS_FEMI_CBLOCK,
.end = PHYS_FEMI_CBLOCK + 0x00100000 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = PHYS_FEMI_DBLOCK,
.end = PHYS_FEMI_DBLOCK + 0x08000000 - 1,
.flags = IORESOURCE_MEM,
},
};
static struct superhyway_device femi_device = {
.name = "femi",
.num_resources = ARRAY_SIZE(femi_resources),
.resource = femi_resources,
};
static struct resource epbr_resources[] = {
[0] = {
.start = P4SEGADDR(0x1e7ffff8),
.end = P4SEGADDR(0x1e7ffff8 + (sizeof(u32) * 2) - 1),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = PHYS_EPBR_BLOCK,
.end = PHYS_EPBR_BLOCK + 0x00a00000 - 1,
.flags = IORESOURCE_MEM,
},
};
static struct superhyway_device epbr_device = {
.name = "epbr",
.num_resources = ARRAY_SIZE(epbr_resources),
.resource = epbr_resources,
};
static struct resource dmac_resource = {
.start = PHYS_DMAC_BLOCK,
.end = PHYS_DMAC_BLOCK + 0x00100000 - 1,
.flags = IORESOURCE_MEM,
};
static struct superhyway_device dmac_device = {
.name = "dmac",
.num_resources = 1,
.resource = &dmac_resource,
};
static struct resource pbr_resources[] = {
[0] = {
.start = P4SEGADDR(0x1ffffff8),
.end = P4SEGADDR(0x1ffffff8 + (sizeof(u32) * 2) - 1),
.flags = IORESOURCE_MEM,
},
[1] = {
.start = PHYS_PBR_BLOCK,
.end = PHYS_PBR_BLOCK + 0x00400000 - (sizeof(u32) * 2) - 1,
.flags = IORESOURCE_MEM,
},
};
static struct superhyway_device pbr_device = {
.name = "pbr",
.num_resources = ARRAY_SIZE(pbr_resources),
.resource = pbr_resources,
};
static struct superhyway_device *sh4202_devices[] __initdata = {
&emi_device, &femi_device, &epbr_device, &dmac_device, &pbr_device,
};
static int sh4202_read_vcr(unsigned long base, struct superhyway_vcr_info *vcr)
{
u32 vcrh, vcrl;
u64 tmp;
/*
* XXX: Even though the SH4-202 Evaluation Device documentation
* indicates that VCRL is mapped first with VCRH at a + 0x04
* offset, the opposite seems to be true.
*
* Some modules (PBR and ePBR for instance) also appear to have
* VCRL/VCRH flipped in the documentation, but on the SH4-202
* itself it appears that these are all consistently mapped with
* VCRH preceding VCRL.
*
* Do not trust the documentation, for it is evil.
*/
vcrh = __raw_readl(base);
vcrl = __raw_readl(base + sizeof(u32));
tmp = ((u64)vcrh << 32) | vcrl;
memcpy(vcr, &tmp, sizeof(u64));
return 0;
}
static int sh4202_write_vcr(unsigned long base, struct superhyway_vcr_info vcr)
{
u64 tmp = *(u64 *)&vcr;
__raw_writel((tmp >> 32) & 0xffffffff, base);
__raw_writel(tmp & 0xffffffff, base + sizeof(u32));
return 0;
}
static struct superhyway_ops sh4202_superhyway_ops = {
.read_vcr = sh4202_read_vcr,
.write_vcr = sh4202_write_vcr,
};
struct superhyway_bus superhyway_channels[] = {
{ &sh4202_superhyway_ops, },
{ 0, },
};
int __init superhyway_scan_bus(struct superhyway_bus *bus)
{
return superhyway_add_devices(bus, sh4202_devices,
ARRAY_SIZE(sh4202_devices));
}
| linux-master | arch/sh/drivers/superhyway/ops-sh4-202.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH7760 DMABRG IRQ handling
*
* (c) 2007 MSC Vertriebsges.m.b.H, Manuel Lauss <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/dma.h>
#include <asm/dmabrg.h>
#include <asm/io.h>
/*
* The DMABRG is a special DMA unit within the SH7760. It does transfers
* from USB-SRAM/Audio units to main memory (and also the LCDC; but that
* part is sensibly placed in the LCDC registers and requires no irqs)
* It has 3 IRQ lines which trigger 10 events, and works independently
* from the traditional SH DMAC (although it blocks usage of DMAC 0)
*
* BRGIRQID | component | dir | meaning | source
* -----------------------------------------------------
* 0 | USB-DMA | ... | xfer done | DMABRGI1
* 1 | USB-UAE | ... | USB addr err.| DMABRGI0
* 2 | HAC0/SSI0 | play| all done | DMABRGI1
* 3 | HAC0/SSI0 | play| half done | DMABRGI2
* 4 | HAC0/SSI0 | rec | all done | DMABRGI1
* 5 | HAC0/SSI0 | rec | half done | DMABRGI2
* 6 | HAC1/SSI1 | play| all done | DMABRGI1
* 7 | HAC1/SSI1 | play| half done | DMABRGI2
* 8 | HAC1/SSI1 | rec | all done | DMABRGI1
* 9 | HAC1/SSI1 | rec | half done | DMABRGI2
*
* all can be enabled/disabled in the DMABRGCR register,
* as well as checked if they occurred.
*
* DMABRGI0 services USB DMA Address errors, but it still must be
* enabled/acked in the DMABRGCR register. USB-DMA complete indicator
* is grouped together with the audio buffer end indicators, too bad...
*
* DMABRGCR: Bits 31-24: audio-dma ENABLE flags,
* Bits 23-16: audio-dma STATUS flags,
* Bits 9-8: USB error/xfer ENABLE,
* Bits 1-0: USB error/xfer STATUS.
* Ack an IRQ by writing 0 to the STATUS flag.
* Mask IRQ by writing 0 to ENABLE flag.
*
* Usage is almost like with any other IRQ:
* dmabrg_request_irq(BRGIRQID, handler, data)
* dmabrg_free_irq(BRGIRQID)
*
* handler prototype: void brgirqhandler(void *data)
*/
#define DMARSRA 0xfe090000
#define DMAOR 0xffa00040
#define DMACHCR0 0xffa0000c
#define DMABRGCR 0xfe3c0000
#define DMAOR_BRG 0x0000c000
#define DMAOR_DMEN 0x00000001
#define DMABRGI0 68
#define DMABRGI1 69
#define DMABRGI2 70
struct dmabrg_handler {
void (*handler)(void *);
void *data;
} *dmabrg_handlers;
static inline void dmabrg_call_handler(int i)
{
dmabrg_handlers[i].handler(dmabrg_handlers[i].data);
}
/*
* main DMABRG irq handler. It acks irqs and then
* handles every set and unmasked bit sequentially.
* No locking and no validity checks; it should be
* as fast as possible (audio!)
*/
static irqreturn_t dmabrg_irq(int irq, void *data)
{
unsigned long dcr;
unsigned int i;
dcr = __raw_readl(DMABRGCR);
__raw_writel(dcr & ~0x00ff0003, DMABRGCR); /* ack all */
dcr &= dcr >> 8; /* ignore masked */
/* USB stuff, get it out of the way first */
if (dcr & 1)
dmabrg_call_handler(DMABRGIRQ_USBDMA);
if (dcr & 2)
dmabrg_call_handler(DMABRGIRQ_USBDMAERR);
/* Audio */
dcr >>= 16;
while (dcr) {
i = __ffs(dcr);
dcr &= dcr - 1;
dmabrg_call_handler(i + DMABRGIRQ_A0TXF);
}
return IRQ_HANDLED;
}
static void dmabrg_disable_irq(unsigned int dmairq)
{
unsigned long dcr;
dcr = __raw_readl(DMABRGCR);
dcr &= ~(1 << ((dmairq > 1) ? dmairq + 22 : dmairq + 8));
__raw_writel(dcr, DMABRGCR);
}
static void dmabrg_enable_irq(unsigned int dmairq)
{
unsigned long dcr;
dcr = __raw_readl(DMABRGCR);
dcr |= (1 << ((dmairq > 1) ? dmairq + 22 : dmairq + 8));
__raw_writel(dcr, DMABRGCR);
}
int dmabrg_request_irq(unsigned int dmairq, void(*handler)(void*),
void *data)
{
if ((dmairq > 9) || !handler)
return -ENOENT;
if (dmabrg_handlers[dmairq].handler)
return -EBUSY;
dmabrg_handlers[dmairq].handler = handler;
dmabrg_handlers[dmairq].data = data;
dmabrg_enable_irq(dmairq);
return 0;
}
EXPORT_SYMBOL_GPL(dmabrg_request_irq);
void dmabrg_free_irq(unsigned int dmairq)
{
if (likely(dmairq < 10)) {
dmabrg_disable_irq(dmairq);
dmabrg_handlers[dmairq].handler = NULL;
dmabrg_handlers[dmairq].data = NULL;
}
}
EXPORT_SYMBOL_GPL(dmabrg_free_irq);
static int __init dmabrg_init(void)
{
unsigned long or;
int ret;
dmabrg_handlers = kcalloc(10, sizeof(struct dmabrg_handler),
GFP_KERNEL);
if (!dmabrg_handlers)
return -ENOMEM;
#ifdef CONFIG_SH_DMA
/* request DMAC channel 0 before anyone else can get it */
ret = request_dma(0, "DMAC 0 (DMABRG)");
if (ret < 0)
printk(KERN_INFO "DMABRG: DMAC ch0 not reserved!\n");
#endif
__raw_writel(0, DMABRGCR);
__raw_writel(0, DMACHCR0);
__raw_writel(0x94000000, DMARSRA); /* enable DMABRG in DMAC 0 */
/* enable DMABRG mode, enable the DMAC */
or = __raw_readl(DMAOR);
__raw_writel(or | DMAOR_BRG | DMAOR_DMEN, DMAOR);
ret = request_irq(DMABRGI0, dmabrg_irq, 0,
"DMABRG USB address error", NULL);
if (ret)
goto out0;
ret = request_irq(DMABRGI1, dmabrg_irq, 0,
"DMABRG Transfer End", NULL);
if (ret)
goto out1;
ret = request_irq(DMABRGI2, dmabrg_irq, 0,
"DMABRG Transfer Half", NULL);
if (ret == 0)
return ret;
free_irq(DMABRGI1, NULL);
out1: free_irq(DMABRGI0, NULL);
out0: kfree(dmabrg_handlers);
return ret;
}
subsys_initcall(dmabrg_init);
| linux-master | arch/sh/drivers/dma/dmabrg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/dma/dma-sh.c
*
* SuperH On-chip DMAC Support
*
* Copyright (C) 2000 Takashi YOSHII
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2005 Andriy Skulysh
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/io.h>
#include <mach-dreamcast/mach/dma.h>
#include <asm/dma.h>
#include <asm/dma-register.h>
#include <cpu/dma-register.h>
#include <cpu/dma.h>
/*
* Some of the SoCs feature two DMAC modules. In such a case, the channels are
* distributed equally among them.
*/
#ifdef SH_DMAC_BASE1
#define SH_DMAC_NR_MD_CH (CONFIG_NR_ONCHIP_DMA_CHANNELS / 2)
#else
#define SH_DMAC_NR_MD_CH CONFIG_NR_ONCHIP_DMA_CHANNELS
#endif
#define SH_DMAC_CH_SZ 0x10
/*
* Define the default configuration for dual address memory-memory transfer.
* The 0x400 value represents auto-request, external->external.
*/
#define RS_DUAL (DM_INC | SM_INC | RS_AUTO | TS_INDEX2VAL(XMIT_SZ_32BIT))
static unsigned long dma_find_base(unsigned int chan)
{
unsigned long base = SH_DMAC_BASE0;
#ifdef SH_DMAC_BASE1
if (chan >= SH_DMAC_NR_MD_CH)
base = SH_DMAC_BASE1;
#endif
return base;
}
static unsigned long dma_base_addr(unsigned int chan)
{
unsigned long base = dma_find_base(chan);
chan = (chan % SH_DMAC_NR_MD_CH) * SH_DMAC_CH_SZ;
/* DMAOR is placed inside the channel register space. Step over it. */
if (chan >= DMAOR)
base += SH_DMAC_CH_SZ;
return base + chan;
}
#ifdef CONFIG_SH_DMA_IRQ_MULTI
static inline unsigned int get_dmte_irq(unsigned int chan)
{
return chan >= 6 ? DMTE6_IRQ : DMTE0_IRQ;
}
#else
static unsigned int dmte_irq_map[] = {
DMTE0_IRQ, DMTE0_IRQ + 1, DMTE0_IRQ + 2, DMTE0_IRQ + 3,
#ifdef DMTE4_IRQ
DMTE4_IRQ, DMTE4_IRQ + 1,
#endif
#ifdef DMTE6_IRQ
DMTE6_IRQ, DMTE6_IRQ + 1,
#endif
#ifdef DMTE8_IRQ
DMTE8_IRQ, DMTE9_IRQ, DMTE10_IRQ, DMTE11_IRQ,
#endif
};
static inline unsigned int get_dmte_irq(unsigned int chan)
{
return dmte_irq_map[chan];
}
#endif
/*
* We determine the correct shift size based off of the CHCR transmit size
* for the given channel. Since we know that it will take:
*
* info->count >> ts_shift[transmit_size]
*
* iterations to complete the transfer.
*/
static unsigned int ts_shift[] = TS_SHIFT;
static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
{
u32 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
return ts_shift[cnt];
}
/*
* The transfer end interrupt must read the chcr register to end the
* hardware interrupt active condition.
* Besides that it needs to waken any waiting process, which should handle
* setting up the next transfer.
*/
static irqreturn_t dma_tei(int irq, void *dev_id)
{
struct dma_channel *chan = dev_id;
u32 chcr;
chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
if (!(chcr & CHCR_TE))
return IRQ_NONE;
chcr &= ~(CHCR_IE | CHCR_DE);
__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
wake_up(&chan->wait_queue);
return IRQ_HANDLED;
}
static int sh_dmac_request_dma(struct dma_channel *chan)
{
if (unlikely(!(chan->flags & DMA_TEI_CAPABLE)))
return 0;
return request_irq(get_dmte_irq(chan->chan), dma_tei, IRQF_SHARED,
chan->dev_id, chan);
}
static void sh_dmac_free_dma(struct dma_channel *chan)
{
free_irq(get_dmte_irq(chan->chan), chan);
}
static int
sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
{
if (!chcr)
chcr = RS_DUAL | CHCR_IE;
if (chcr & CHCR_IE) {
chcr &= ~CHCR_IE;
chan->flags |= DMA_TEI_CAPABLE;
} else {
chan->flags &= ~DMA_TEI_CAPABLE;
}
__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
chan->flags |= DMA_CONFIGURED;
return 0;
}
static void sh_dmac_enable_dma(struct dma_channel *chan)
{
int irq;
u32 chcr;
chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
chcr |= CHCR_DE;
if (chan->flags & DMA_TEI_CAPABLE)
chcr |= CHCR_IE;
__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
if (chan->flags & DMA_TEI_CAPABLE) {
irq = get_dmte_irq(chan->chan);
enable_irq(irq);
}
}
static void sh_dmac_disable_dma(struct dma_channel *chan)
{
int irq;
u32 chcr;
if (chan->flags & DMA_TEI_CAPABLE) {
irq = get_dmte_irq(chan->chan);
disable_irq(irq);
}
chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
}
static int sh_dmac_xfer_dma(struct dma_channel *chan)
{
/*
* If we haven't pre-configured the channel with special flags, use
* the defaults.
*/
if (unlikely(!(chan->flags & DMA_CONFIGURED)))
sh_dmac_configure_channel(chan, 0);
sh_dmac_disable_dma(chan);
/*
* Single-address mode usage note!
*
* It's important that we don't accidentally write any value to SAR/DAR
* (this includes 0) that hasn't been directly specified by the user if
* we're in single-address mode.
*
* In this case, only one address can be defined, anything else will
* result in a DMA address error interrupt (at least on the SH-4),
* which will subsequently halt the transfer.
*
* Channel 2 on the Dreamcast is a special case, as this is used for
* cascading to the PVR2 DMAC. In this case, we still need to write
* SAR and DAR, regardless of value, in order for cascading to work.
*/
if (chan->sar || (mach_is_dreamcast() &&
chan->chan == PVR2_CASCADE_CHAN))
__raw_writel(chan->sar, (dma_base_addr(chan->chan) + SAR));
if (chan->dar || (mach_is_dreamcast() &&
chan->chan == PVR2_CASCADE_CHAN))
__raw_writel(chan->dar, (dma_base_addr(chan->chan) + DAR));
__raw_writel(chan->count >> calc_xmit_shift(chan),
(dma_base_addr(chan->chan) + TCR));
sh_dmac_enable_dma(chan);
return 0;
}
static int sh_dmac_get_dma_residue(struct dma_channel *chan)
{
if (!(__raw_readl(dma_base_addr(chan->chan) + CHCR) & CHCR_DE))
return 0;
return __raw_readl(dma_base_addr(chan->chan) + TCR)
<< calc_xmit_shift(chan);
}
/*
* DMAOR handling
*/
#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \
defined(CONFIG_CPU_SUBTYPE_SH7724) || \
defined(CONFIG_CPU_SUBTYPE_SH7780) || \
defined(CONFIG_CPU_SUBTYPE_SH7785)
#define NR_DMAOR 2
#else
#define NR_DMAOR 1
#endif
#define dmaor_read_reg(n) __raw_readw(dma_find_base((n) * \
SH_DMAC_NR_MD_CH) + DMAOR)
#define dmaor_write_reg(n, data) __raw_writew(data, \
dma_find_base((n) * \
SH_DMAC_NR_MD_CH) + DMAOR)
static inline int dmaor_reset(int no)
{
unsigned long dmaor = dmaor_read_reg(no);
/* Try to clear the error flags first, incase they are set */
dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
dmaor_write_reg(no, dmaor);
dmaor |= DMAOR_INIT;
dmaor_write_reg(no, dmaor);
/* See if we got an error again */
if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) {
printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
return -EINVAL;
}
return 0;
}
/*
* DMAE handling
*/
#ifdef CONFIG_CPU_SH4
#if defined(DMAE1_IRQ)
#define NR_DMAE 2
#else
#define NR_DMAE 1
#endif
static const char *dmae_name[] = {
"DMAC Address Error0",
"DMAC Address Error1"
};
#ifdef CONFIG_SH_DMA_IRQ_MULTI
static inline unsigned int get_dma_error_irq(int n)
{
return get_dmte_irq(n * 6);
}
#else
static unsigned int dmae_irq_map[] = {
DMAE0_IRQ,
#ifdef DMAE1_IRQ
DMAE1_IRQ,
#endif
};
static inline unsigned int get_dma_error_irq(int n)
{
return dmae_irq_map[n];
}
#endif
static irqreturn_t dma_err(int irq, void *dummy)
{
int i;
for (i = 0; i < NR_DMAOR; i++)
dmaor_reset(i);
disable_irq(irq);
return IRQ_HANDLED;
}
static int dmae_irq_init(void)
{
int n;
for (n = 0; n < NR_DMAE; n++) {
int i = request_irq(get_dma_error_irq(n), dma_err,
IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]);
if (unlikely(i < 0)) {
printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
return i;
}
}
return 0;
}
static void dmae_irq_free(void)
{
int n;
for (n = 0; n < NR_DMAE; n++)
free_irq(get_dma_error_irq(n), NULL);
}
#else
static inline int dmae_irq_init(void)
{
return 0;
}
static void dmae_irq_free(void)
{
}
#endif
static struct dma_ops sh_dmac_ops = {
.request = sh_dmac_request_dma,
.free = sh_dmac_free_dma,
.get_residue = sh_dmac_get_dma_residue,
.xfer = sh_dmac_xfer_dma,
.configure = sh_dmac_configure_channel,
};
static struct dma_info sh_dmac_info = {
.name = "sh_dmac",
.nr_channels = CONFIG_NR_ONCHIP_DMA_CHANNELS,
.ops = &sh_dmac_ops,
.flags = DMAC_CHANNELS_TEI_CAPABLE,
};
static int __init sh_dmac_init(void)
{
struct dma_info *info = &sh_dmac_info;
int i, rc;
/*
* Initialize DMAE, for parts that support it.
*/
rc = dmae_irq_init();
if (unlikely(rc != 0))
return rc;
/*
* Initialize DMAOR, and clean up any error flags that may have
* been set.
*/
for (i = 0; i < NR_DMAOR; i++) {
rc = dmaor_reset(i);
if (unlikely(rc != 0))
return rc;
}
return register_dmac(info);
}
static void __exit sh_dmac_exit(void)
{
dmae_irq_free();
unregister_dmac(&sh_dmac_info);
}
subsys_initcall(sh_dmac_init);
module_exit(sh_dmac_exit);
MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh");
MODULE_DESCRIPTION("SuperH On-Chip DMAC Support");
MODULE_LICENSE("GPL v2");
| linux-master | arch/sh/drivers/dma/dma-sh.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/dma/dma-api.c
*
* SuperH-specific DMA management API
*
* Copyright (C) 2003, 2004, 2005 Paul Mundt
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <asm/dma.h>
DEFINE_SPINLOCK(dma_spin_lock);
static LIST_HEAD(registered_dmac_list);
struct dma_info *get_dma_info(unsigned int chan)
{
struct dma_info *info;
/*
* Look for each DMAC's range to determine who the owner of
* the channel is.
*/
list_for_each_entry(info, ®istered_dmac_list, list) {
if ((chan < info->first_vchannel_nr) ||
(chan >= info->first_vchannel_nr + info->nr_channels))
continue;
return info;
}
return NULL;
}
EXPORT_SYMBOL(get_dma_info);
struct dma_info *get_dma_info_by_name(const char *dmac_name)
{
struct dma_info *info;
list_for_each_entry(info, ®istered_dmac_list, list) {
if (dmac_name && (strcmp(dmac_name, info->name) != 0))
continue;
else
return info;
}
return NULL;
}
EXPORT_SYMBOL(get_dma_info_by_name);
static unsigned int get_nr_channels(void)
{
struct dma_info *info;
unsigned int nr = 0;
if (unlikely(list_empty(®istered_dmac_list)))
return nr;
list_for_each_entry(info, ®istered_dmac_list, list)
nr += info->nr_channels;
return nr;
}
struct dma_channel *get_dma_channel(unsigned int chan)
{
struct dma_info *info = get_dma_info(chan);
struct dma_channel *channel;
int i;
if (unlikely(!info))
return ERR_PTR(-EINVAL);
for (i = 0; i < info->nr_channels; i++) {
channel = &info->channels[i];
if (channel->vchan == chan)
return channel;
}
return NULL;
}
EXPORT_SYMBOL(get_dma_channel);
int get_dma_residue(unsigned int chan)
{
struct dma_info *info = get_dma_info(chan);
struct dma_channel *channel = get_dma_channel(chan);
if (info->ops->get_residue)
return info->ops->get_residue(channel);
return 0;
}
EXPORT_SYMBOL(get_dma_residue);
static int search_cap(const char **haystack, const char *needle)
{
const char **p;
for (p = haystack; *p; p++)
if (strcmp(*p, needle) == 0)
return 1;
return 0;
}
/**
* request_dma_bycap - Allocate a DMA channel based on its capabilities
* @dmac: List of DMA controllers to search
* @caps: List of capabilities
*
* Search all channels of all DMA controllers to find a channel which
* matches the requested capabilities. The result is the channel
* number if a match is found, or %-ENODEV if no match is found.
*
* Note that not all DMA controllers export capabilities, in which
* case they can never be allocated using this API, and so
* request_dma() must be used specifying the channel number.
*/
int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id)
{
unsigned int found = 0;
struct dma_info *info;
const char **p;
int i;
BUG_ON(!dmac || !caps);
list_for_each_entry(info, ®istered_dmac_list, list)
if (strcmp(*dmac, info->name) == 0) {
found = 1;
break;
}
if (!found)
return -ENODEV;
for (i = 0; i < info->nr_channels; i++) {
struct dma_channel *channel = &info->channels[i];
if (unlikely(!channel->caps))
continue;
for (p = caps; *p; p++) {
if (!search_cap(channel->caps, *p))
break;
if (request_dma(channel->chan, dev_id) == 0)
return channel->chan;
}
}
return -EINVAL;
}
EXPORT_SYMBOL(request_dma_bycap);
int dmac_search_free_channel(const char *dev_id)
{
struct dma_channel *channel = { 0 };
struct dma_info *info = get_dma_info(0);
int i;
for (i = 0; i < info->nr_channels; i++) {
channel = &info->channels[i];
if (unlikely(!channel))
return -ENODEV;
if (atomic_read(&channel->busy) == 0)
break;
}
if (info->ops->request) {
int result = info->ops->request(channel);
if (result)
return result;
atomic_set(&channel->busy, 1);
return channel->chan;
}
return -ENOSYS;
}
int request_dma(unsigned int chan, const char *dev_id)
{
struct dma_channel *channel = { 0 };
struct dma_info *info = get_dma_info(chan);
int result;
channel = get_dma_channel(chan);
if (atomic_xchg(&channel->busy, 1))
return -EBUSY;
strscpy(channel->dev_id, dev_id, sizeof(channel->dev_id));
if (info->ops->request) {
result = info->ops->request(channel);
if (result)
atomic_set(&channel->busy, 0);
return result;
}
return 0;
}
EXPORT_SYMBOL(request_dma);
void free_dma(unsigned int chan)
{
struct dma_info *info = get_dma_info(chan);
struct dma_channel *channel = get_dma_channel(chan);
if (info->ops->free)
info->ops->free(channel);
atomic_set(&channel->busy, 0);
}
EXPORT_SYMBOL(free_dma);
void dma_wait_for_completion(unsigned int chan)
{
struct dma_info *info = get_dma_info(chan);
struct dma_channel *channel = get_dma_channel(chan);
if (channel->flags & DMA_TEI_CAPABLE) {
wait_event(channel->wait_queue,
(info->ops->get_residue(channel) == 0));
return;
}
while (info->ops->get_residue(channel))
cpu_relax();
}
EXPORT_SYMBOL(dma_wait_for_completion);
int register_chan_caps(const char *dmac, struct dma_chan_caps *caps)
{
struct dma_info *info;
unsigned int found = 0;
int i;
list_for_each_entry(info, ®istered_dmac_list, list)
if (strcmp(dmac, info->name) == 0) {
found = 1;
break;
}
if (unlikely(!found))
return -ENODEV;
for (i = 0; i < info->nr_channels; i++, caps++) {
struct dma_channel *channel;
if ((info->first_channel_nr + i) != caps->ch_num)
return -EINVAL;
channel = &info->channels[i];
channel->caps = caps->caplist;
}
return 0;
}
EXPORT_SYMBOL(register_chan_caps);
void dma_configure_channel(unsigned int chan, unsigned long flags)
{
struct dma_info *info = get_dma_info(chan);
struct dma_channel *channel = get_dma_channel(chan);
if (info->ops->configure)
info->ops->configure(channel, flags);
}
EXPORT_SYMBOL(dma_configure_channel);
int dma_xfer(unsigned int chan, unsigned long from,
unsigned long to, size_t size, unsigned int mode)
{
struct dma_info *info = get_dma_info(chan);
struct dma_channel *channel = get_dma_channel(chan);
channel->sar = from;
channel->dar = to;
channel->count = size;
channel->mode = mode;
return info->ops->xfer(channel);
}
EXPORT_SYMBOL(dma_xfer);
int dma_extend(unsigned int chan, unsigned long op, void *param)
{
struct dma_info *info = get_dma_info(chan);
struct dma_channel *channel = get_dma_channel(chan);
if (info->ops->extend)
return info->ops->extend(channel, op, param);
return -ENOSYS;
}
EXPORT_SYMBOL(dma_extend);
static int dma_proc_show(struct seq_file *m, void *v)
{
struct dma_info *info = v;
if (list_empty(®istered_dmac_list))
return 0;
/*
* Iterate over each registered DMAC
*/
list_for_each_entry(info, ®istered_dmac_list, list) {
int i;
/*
* Iterate over each channel
*/
for (i = 0; i < info->nr_channels; i++) {
struct dma_channel *channel = info->channels + i;
if (!(channel->flags & DMA_CONFIGURED))
continue;
seq_printf(m, "%2d: %14s %s\n", i,
info->name, channel->dev_id);
}
}
return 0;
}
int register_dmac(struct dma_info *info)
{
unsigned int total_channels, i;
INIT_LIST_HEAD(&info->list);
printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n",
info->name, info->nr_channels, info->nr_channels > 1 ? "s" : "");
BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels);
info->pdev = platform_device_register_simple(info->name, -1,
NULL, 0);
if (IS_ERR(info->pdev))
return PTR_ERR(info->pdev);
/*
* Don't touch pre-configured channels
*/
if (!(info->flags & DMAC_CHANNELS_CONFIGURED)) {
unsigned int size;
size = sizeof(struct dma_channel) * info->nr_channels;
info->channels = kzalloc(size, GFP_KERNEL);
if (!info->channels)
return -ENOMEM;
}
total_channels = get_nr_channels();
info->first_vchannel_nr = total_channels;
for (i = 0; i < info->nr_channels; i++) {
struct dma_channel *chan = &info->channels[i];
atomic_set(&chan->busy, 0);
chan->chan = info->first_channel_nr + i;
chan->vchan = info->first_channel_nr + i + total_channels;
memcpy(chan->dev_id, "Unused", 7);
if (info->flags & DMAC_CHANNELS_TEI_CAPABLE)
chan->flags |= DMA_TEI_CAPABLE;
init_waitqueue_head(&chan->wait_queue);
dma_create_sysfs_files(chan, info);
}
list_add(&info->list, ®istered_dmac_list);
return 0;
}
EXPORT_SYMBOL(register_dmac);
void unregister_dmac(struct dma_info *info)
{
unsigned int i;
for (i = 0; i < info->nr_channels; i++)
dma_remove_sysfs_files(info->channels + i, info);
if (!(info->flags & DMAC_CHANNELS_CONFIGURED))
kfree(info->channels);
list_del(&info->list);
platform_device_unregister(info->pdev);
}
EXPORT_SYMBOL(unregister_dmac);
static int __init dma_api_init(void)
{
printk(KERN_NOTICE "DMA: Registering DMA API.\n");
return proc_create_single("dma", 0, NULL, dma_proc_show) ? 0 : -ENOMEM;
}
subsys_initcall(dma_api_init);
MODULE_AUTHOR("Paul Mundt <[email protected]>");
MODULE_DESCRIPTION("DMA API for SuperH");
MODULE_LICENSE("GPL v2");
| linux-master | arch/sh/drivers/dma/dma-api.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/dma/dma-sysfs.c
*
* sysfs interface for SH DMA API
*
* Copyright (C) 2004 - 2006 Paul Mundt
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/string.h>
#include <asm/dma.h>
static struct bus_type dma_subsys = {
.name = "dma",
.dev_name = "dma",
};
static ssize_t dma_show_devices(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t len = 0;
int i;
for (i = 0; i < 16; i++) {
struct dma_info *info = get_dma_info(i);
struct dma_channel *channel = get_dma_channel(i);
if (unlikely(!info) || !channel)
continue;
len += sprintf(buf + len, "%2d: %14s %s\n",
channel->chan, info->name,
channel->dev_id);
}
return len;
}
static DEVICE_ATTR(devices, S_IRUGO, dma_show_devices, NULL);
static int __init dma_subsys_init(void)
{
struct device *dev_root;
int ret;
ret = subsys_system_register(&dma_subsys, NULL);
if (unlikely(ret))
return ret;
dev_root = bus_get_dev_root(&dma_subsys);
if (dev_root) {
ret = device_create_file(dev_root, &dev_attr_devices);
put_device(dev_root);
}
return ret;
}
postcore_initcall(dma_subsys_init);
static ssize_t dma_show_dev_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dma_channel *channel = to_dma_channel(dev);
return sprintf(buf, "%s\n", channel->dev_id);
}
static ssize_t dma_store_dev_id(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct dma_channel *channel = to_dma_channel(dev);
strcpy(channel->dev_id, buf);
return count;
}
static DEVICE_ATTR(dev_id, S_IRUGO | S_IWUSR, dma_show_dev_id, dma_store_dev_id);
static ssize_t dma_store_config(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct dma_channel *channel = to_dma_channel(dev);
unsigned long config;
config = simple_strtoul(buf, NULL, 0);
dma_configure_channel(channel->vchan, config);
return count;
}
static DEVICE_ATTR(config, S_IWUSR, NULL, dma_store_config);
static ssize_t dma_show_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dma_channel *channel = to_dma_channel(dev);
return sprintf(buf, "0x%08x\n", channel->mode);
}
static ssize_t dma_store_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct dma_channel *channel = to_dma_channel(dev);
channel->mode = simple_strtoul(buf, NULL, 0);
return count;
}
static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, dma_show_mode, dma_store_mode);
#define dma_ro_attr(field, fmt) \
static ssize_t dma_show_##field(struct device *dev, \
struct device_attribute *attr, char *buf)\
{ \
struct dma_channel *channel = to_dma_channel(dev); \
return sprintf(buf, fmt, channel->field); \
} \
static DEVICE_ATTR(field, S_IRUGO, dma_show_##field, NULL);
dma_ro_attr(count, "0x%08x\n");
dma_ro_attr(flags, "0x%08lx\n");
int dma_create_sysfs_files(struct dma_channel *chan, struct dma_info *info)
{
struct device *dev = &chan->dev;
char name[16];
int ret;
dev->id = chan->vchan;
dev->bus = &dma_subsys;
ret = device_register(dev);
if (ret)
return ret;
ret |= device_create_file(dev, &dev_attr_dev_id);
ret |= device_create_file(dev, &dev_attr_count);
ret |= device_create_file(dev, &dev_attr_mode);
ret |= device_create_file(dev, &dev_attr_flags);
ret |= device_create_file(dev, &dev_attr_config);
if (unlikely(ret)) {
dev_err(&info->pdev->dev, "Failed creating attrs\n");
return ret;
}
snprintf(name, sizeof(name), "dma%d", chan->chan);
return sysfs_create_link(&info->pdev->dev.kobj, &dev->kobj, name);
}
void dma_remove_sysfs_files(struct dma_channel *chan, struct dma_info *info)
{
struct device *dev = &chan->dev;
char name[16];
device_remove_file(dev, &dev_attr_dev_id);
device_remove_file(dev, &dev_attr_count);
device_remove_file(dev, &dev_attr_mode);
device_remove_file(dev, &dev_attr_flags);
device_remove_file(dev, &dev_attr_config);
snprintf(name, sizeof(name), "dma%d", chan->chan);
sysfs_remove_link(&info->pdev->dev.kobj, name);
device_unregister(dev);
}
| linux-master | arch/sh/drivers/dma/dma-sysfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/dma/dma-pvr2.c
*
* NEC PowerVR 2 (Dreamcast) DMA support
*
* Copyright (C) 2003, 2004 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <mach/sysasic.h>
#include <mach/dma.h>
#include <asm/dma.h>
#include <asm/io.h>
static unsigned int xfer_complete;
static int count;
static irqreturn_t pvr2_dma_interrupt(int irq, void *dev_id)
{
if (get_dma_residue(PVR2_CASCADE_CHAN)) {
printk(KERN_WARNING "DMA: SH DMAC did not complete transfer "
"on channel %d, waiting..\n", PVR2_CASCADE_CHAN);
dma_wait_for_completion(PVR2_CASCADE_CHAN);
}
if (count++ < 10)
pr_debug("Got a pvr2 dma interrupt for channel %d\n",
irq - HW_EVENT_PVR2_DMA);
xfer_complete = 1;
return IRQ_HANDLED;
}
static int pvr2_request_dma(struct dma_channel *chan)
{
if (__raw_readl(PVR2_DMA_MODE) != 0)
return -EBUSY;
__raw_writel(0, PVR2_DMA_LMMODE0);
return 0;
}
static int pvr2_get_dma_residue(struct dma_channel *chan)
{
return xfer_complete == 0;
}
static int pvr2_xfer_dma(struct dma_channel *chan)
{
if (chan->sar || !chan->dar)
return -EINVAL;
xfer_complete = 0;
__raw_writel(chan->dar, PVR2_DMA_ADDR);
__raw_writel(chan->count, PVR2_DMA_COUNT);
__raw_writel(chan->mode & DMA_MODE_MASK, PVR2_DMA_MODE);
return 0;
}
static struct dma_ops pvr2_dma_ops = {
.request = pvr2_request_dma,
.get_residue = pvr2_get_dma_residue,
.xfer = pvr2_xfer_dma,
};
static struct dma_info pvr2_dma_info = {
.name = "pvr2_dmac",
.nr_channels = 1,
.ops = &pvr2_dma_ops,
.flags = DMAC_CHANNELS_TEI_CAPABLE,
};
static int __init pvr2_dma_init(void)
{
if (request_irq(HW_EVENT_PVR2_DMA, pvr2_dma_interrupt, 0,
"pvr2 DMA handler", NULL))
pr_err("Failed to register pvr2 DMA handler interrupt\n");
request_dma(PVR2_CASCADE_CHAN, "pvr2 cascade");
return register_dmac(&pvr2_dma_info);
}
static void __exit pvr2_dma_exit(void)
{
free_dma(PVR2_CASCADE_CHAN);
free_irq(HW_EVENT_PVR2_DMA, 0);
unregister_dmac(&pvr2_dma_info);
}
subsys_initcall(pvr2_dma_init);
module_exit(pvr2_dma_exit);
MODULE_AUTHOR("Paul Mundt <[email protected]>");
MODULE_DESCRIPTION("NEC PowerVR 2 DMA driver");
MODULE_LICENSE("GPL v2");
| linux-master | arch/sh/drivers/dma/dma-pvr2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/drivers/dma/dma-g2.c
*
* G2 bus DMA support
*
* Copyright (C) 2003 - 2006 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <asm/cacheflush.h>
#include <mach/sysasic.h>
#include <mach/dma.h>
#include <asm/dma.h>
struct g2_channel {
unsigned long g2_addr; /* G2 bus address */
unsigned long root_addr; /* Root bus (SH-4) address */
unsigned long size; /* Size (in bytes), 32-byte aligned */
unsigned long direction; /* Transfer direction */
unsigned long ctrl; /* Transfer control */
unsigned long chan_enable; /* Channel enable */
unsigned long xfer_enable; /* Transfer enable */
unsigned long xfer_stat; /* Transfer status */
} __attribute__ ((aligned(32)));
struct g2_status {
unsigned long g2_addr;
unsigned long root_addr;
unsigned long size;
unsigned long status;
} __attribute__ ((aligned(16)));
struct g2_dma_info {
struct g2_channel channel[G2_NR_DMA_CHANNELS];
unsigned long pad1[G2_NR_DMA_CHANNELS];
unsigned long wait_state;
unsigned long pad2[10];
unsigned long magic;
struct g2_status status[G2_NR_DMA_CHANNELS];
} __attribute__ ((aligned(256)));
static volatile struct g2_dma_info *g2_dma = (volatile struct g2_dma_info *)0xa05f7800;
#define g2_bytes_remaining(i) \
((g2_dma->channel[i].size - \
g2_dma->status[i].size) & 0x0fffffff)
static irqreturn_t g2_dma_interrupt(int irq, void *dev_id)
{
int i;
for (i = 0; i < G2_NR_DMA_CHANNELS; i++) {
if (g2_dma->status[i].status & 0x20000000) {
unsigned int bytes = g2_bytes_remaining(i);
if (likely(bytes == 0)) {
struct dma_info *info = dev_id;
struct dma_channel *chan = info->channels + i;
wake_up(&chan->wait_queue);
return IRQ_HANDLED;
}
}
}
return IRQ_NONE;
}
static int g2_enable_dma(struct dma_channel *chan)
{
unsigned int chan_nr = chan->chan;
g2_dma->channel[chan_nr].chan_enable = 1;
g2_dma->channel[chan_nr].xfer_enable = 1;
return 0;
}
static int g2_disable_dma(struct dma_channel *chan)
{
unsigned int chan_nr = chan->chan;
g2_dma->channel[chan_nr].chan_enable = 0;
g2_dma->channel[chan_nr].xfer_enable = 0;
return 0;
}
static int g2_xfer_dma(struct dma_channel *chan)
{
unsigned int chan_nr = chan->chan;
if (chan->sar & 31) {
printk("g2dma: unaligned source 0x%lx\n", chan->sar);
return -EINVAL;
}
if (chan->dar & 31) {
printk("g2dma: unaligned dest 0x%lx\n", chan->dar);
return -EINVAL;
}
/* Align the count */
if (chan->count & 31)
chan->count = (chan->count + (32 - 1)) & ~(32 - 1);
/* Fixup destination */
chan->dar += 0xa0800000;
/* Fixup direction */
chan->mode = !chan->mode;
flush_icache_range((unsigned long)chan->sar, chan->count);
g2_disable_dma(chan);
g2_dma->channel[chan_nr].g2_addr = chan->dar & 0x1fffffe0;
g2_dma->channel[chan_nr].root_addr = chan->sar & 0x1fffffe0;
g2_dma->channel[chan_nr].size = (chan->count & ~31) | 0x80000000;
g2_dma->channel[chan_nr].direction = chan->mode;
/*
* bit 0 - ???
* bit 1 - if set, generate a hardware event on transfer completion
* bit 2 - ??? something to do with suspend?
*/
g2_dma->channel[chan_nr].ctrl = 5; /* ?? */
g2_enable_dma(chan);
/* debug cruft */
pr_debug("count, sar, dar, mode, ctrl, chan, xfer: %ld, 0x%08lx, "
"0x%08lx, %ld, %ld, %ld, %ld\n",
g2_dma->channel[chan_nr].size,
g2_dma->channel[chan_nr].root_addr,
g2_dma->channel[chan_nr].g2_addr,
g2_dma->channel[chan_nr].direction,
g2_dma->channel[chan_nr].ctrl,
g2_dma->channel[chan_nr].chan_enable,
g2_dma->channel[chan_nr].xfer_enable);
return 0;
}
static int g2_get_residue(struct dma_channel *chan)
{
return g2_bytes_remaining(chan->chan);
}
static struct dma_ops g2_dma_ops = {
.xfer = g2_xfer_dma,
.get_residue = g2_get_residue,
};
static struct dma_info g2_dma_info = {
.name = "g2_dmac",
.nr_channels = 4,
.ops = &g2_dma_ops,
.flags = DMAC_CHANNELS_TEI_CAPABLE,
};
static int __init g2_dma_init(void)
{
int ret;
ret = request_irq(HW_EVENT_G2_DMA, g2_dma_interrupt, 0,
"g2 DMA handler", &g2_dma_info);
if (unlikely(ret))
return -EINVAL;
/* Magic */
g2_dma->wait_state = 27;
g2_dma->magic = 0x4659404f;
ret = register_dmac(&g2_dma_info);
if (unlikely(ret != 0))
free_irq(HW_EVENT_G2_DMA, &g2_dma_info);
return ret;
}
static void __exit g2_dma_exit(void)
{
free_irq(HW_EVENT_G2_DMA, &g2_dma_info);
unregister_dmac(&g2_dma_info);
}
subsys_initcall(g2_dma_init);
module_exit(g2_dma_exit);
MODULE_AUTHOR("Paul Mundt <[email protected]>");
MODULE_DESCRIPTION("G2 bus DMA driver");
MODULE_LICENSE("GPL v2");
| linux-master | arch/sh/drivers/dma/dma-g2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include "../../../../lib/ashldi3.c"
| linux-master | arch/sh/boot/compressed/ashldi3.c |
// SPDX-License-Identifier: GPL-2.0
int cache_control(unsigned int command)
{
volatile unsigned int *p = (volatile unsigned int *) 0x80000000;
int i;
for (i = 0; i < (32 * 1024); i += 32) {
(void)*p;
p += (32 / sizeof(int));
}
return 0;
}
| linux-master | arch/sh/boot/compressed/cache.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/boot/compressed/misc.c
*
* This is a collection of several routines from gzip-1.0.3
* adapted for Linux.
*
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
*
* Adapted for SH by Stuart Menefy, Aug 1999
*
* Modified to use standard LinuxSH BIOS by Greg Banks 7Jul2000
*/
#include <linux/uaccess.h>
#include <asm/addrspace.h>
#include <asm/page.h>
/*
* gzip declarations
*/
#define STATIC static
#undef memset
#undef memcpy
#define memzero(s, n) memset ((s), 0, (n))
/* cache.c */
#define CACHE_ENABLE 0
#define CACHE_DISABLE 1
int cache_control(unsigned int command);
extern char input_data[];
extern int input_len;
static unsigned char *output;
static void error(char *m);
int puts(const char *);
extern int _text; /* Defined in vmlinux.lds.S */
extern int _end;
static unsigned long free_mem_ptr;
static unsigned long free_mem_end_ptr;
#ifdef CONFIG_HAVE_KERNEL_BZIP2
#define HEAP_SIZE 0x400000
#else
#define HEAP_SIZE 0x10000
#endif
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
#ifdef CONFIG_KERNEL_BZIP2
#include "../../../../lib/decompress_bunzip2.c"
#endif
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
#ifdef CONFIG_KERNEL_XZ
#include "../../../../lib/decompress_unxz.c"
#endif
#ifdef CONFIG_KERNEL_LZO
#include "../../../../lib/decompress_unlzo.c"
#endif
int puts(const char *s)
{
/* This should be updated to use the sh-sci routines */
return 0;
}
void* memset(void* s, int c, size_t n)
{
int i;
char *ss = (char*)s;
for (i=0;i<n;i++) ss[i] = c;
return s;
}
void* memcpy(void* __dest, __const void* __src,
size_t __n)
{
int i;
char *d = (char *)__dest, *s = (char *)__src;
for (i=0;i<__n;i++) d[i] = s[i];
return __dest;
}
static void error(char *x)
{
puts("\n\n");
puts(x);
puts("\n\n -- System halted");
while(1); /* Halt */
}
const unsigned long __stack_chk_guard = 0x000a0dff;
void __stack_chk_fail(void)
{
error("stack-protector: Kernel stack is corrupted\n");
}
/* Needed because vmlinux.lds.h references this */
void ftrace_stub(void)
{
}
void arch_ftrace_ops_list_func(void)
{
}
#define stackalign 4
#define STACK_SIZE (4096)
long __attribute__ ((aligned(stackalign))) user_stack[STACK_SIZE];
long *stack_start = &user_stack[STACK_SIZE];
void decompress_kernel(void)
{
unsigned long output_addr;
output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
#if defined(CONFIG_29BIT)
output_addr |= P2SEG;
#endif
output = (unsigned char *)output_addr;
free_mem_ptr = (unsigned long)&_end;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
puts("Uncompressing Linux... ");
cache_control(CACHE_ENABLE);
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
cache_control(CACHE_DISABLE);
puts("Ok, booting the kernel.\n");
}
| linux-master | arch/sh/boot/compressed/misc.c |
/*
* sh7724 MMCIF loader
*
* Copyright (C) 2010 Magnus Damm
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/platform_data/sh_mmcif.h>
#include <mach/romimage.h>
#define MMCIF_BASE (void __iomem *)0xa4ca0000
#define MSTPCR2 0xa4150038
#define PTWCR 0xa4050146
#define PTXCR 0xa4050148
#define PSELA 0xa405014e
#define PSELE 0xa4050156
#define HIZCRC 0xa405015c
#define DRVCRA 0xa405018a
enum {
MMCIF_PROGRESS_ENTER,
MMCIF_PROGRESS_INIT,
MMCIF_PROGRESS_LOAD,
MMCIF_PROGRESS_DONE
};
/* SH7724 specific MMCIF loader
*
* loads the romImage from an MMC card starting from block 512
* use the following line to write the romImage to an MMC card
* # dd if=arch/sh/boot/romImage of=/dev/sdx bs=512 seek=512
*/
asmlinkage void mmcif_loader(unsigned char *buf, unsigned long no_bytes)
{
mmcif_update_progress(MMCIF_PROGRESS_ENTER);
/* enable clock to the MMCIF hardware block */
__raw_writel(__raw_readl(MSTPCR2) & ~0x20000000, MSTPCR2);
/* setup pins D7-D0 */
__raw_writew(0x0000, PTWCR);
/* setup pins MMC_CLK, MMC_CMD */
__raw_writew(__raw_readw(PTXCR) & ~0x000f, PTXCR);
/* select D3-D0 pin function */
__raw_writew(__raw_readw(PSELA) & ~0x2000, PSELA);
/* select D7-D4 pin function */
__raw_writew(__raw_readw(PSELE) & ~0x3000, PSELE);
/* disable Hi-Z for the MMC pins */
__raw_writew(__raw_readw(HIZCRC) & ~0x0620, HIZCRC);
/* high drive capability for MMC pins */
__raw_writew(__raw_readw(DRVCRA) | 0x3000, DRVCRA);
mmcif_update_progress(MMCIF_PROGRESS_INIT);
/* setup MMCIF hardware */
sh_mmcif_boot_init(MMCIF_BASE);
mmcif_update_progress(MMCIF_PROGRESS_LOAD);
/* load kernel via MMCIF interface */
sh_mmcif_boot_do_read(MMCIF_BASE, 512,
(no_bytes + SH_MMCIF_BBS - 1) / SH_MMCIF_BBS,
buf);
/* disable clock to the MMCIF hardware block */
__raw_writel(__raw_readl(MSTPCR2) | 0x20000000, MSTPCR2);
mmcif_update_progress(MMCIF_PROGRESS_DONE);
}
| linux-master | arch/sh/boot/romimage/mmcif-sh7724.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/boards/renesas/edosk7705/setup.c
*
* Copyright (C) 2000 Kazumoto Kojima
*
* Hitachi SolutionEngine Support.
*
* Modified for edosk7705 development
* board by S. Dunn, 2003.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/smc91x.h>
#include <linux/sh_intc.h>
#include <asm/machvec.h>
#include <linux/sizes.h>
#define SMC_IOBASE 0xA2000000
#define SMC_IO_OFFSET 0x300
#define SMC_IOADDR (SMC_IOBASE + SMC_IO_OFFSET)
#define ETHERNET_IRQ evt2irq(0x320)
static void __init sh_edosk7705_init_irq(void)
{
make_imask_irq(ETHERNET_IRQ);
}
/* eth initialization functions */
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_IO_SHIFT_1 | IORESOURCE_IRQ_LOWLEVEL,
};
static struct resource smc91x_res[] = {
[0] = {
.start = SMC_IOADDR,
.end = SMC_IOADDR + SZ_32 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = ETHERNET_IRQ,
.end = ETHERNET_IRQ,
.flags = IORESOURCE_IRQ ,
}
};
static struct platform_device smc91x_dev = {
.name = "smc91x",
.id = -1,
.num_resources = ARRAY_SIZE(smc91x_res),
.resource = smc91x_res,
.dev = {
.platform_data = &smc91x_info,
},
};
/* platform init code */
static struct platform_device *edosk7705_devices[] __initdata = {
&smc91x_dev,
};
static int __init init_edosk7705_devices(void)
{
return platform_add_devices(edosk7705_devices,
ARRAY_SIZE(edosk7705_devices));
}
device_initcall(init_edosk7705_devices);
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_edosk7705 __initmv = {
.mv_name = "EDOSK7705",
.mv_init_irq = sh_edosk7705_init_irq,
};
| linux-master | arch/sh/boards/board-edosk7705.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ALPHAPROJECT AP-SH4AD-0A Support.
*
* Copyright (C) 2010 ALPHAPROJECT Co.,Ltd.
* Copyright (C) 2010 Matt Fleming
* Copyright (C) 2010 Paul Mundt
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <asm/machvec.h>
#include <linux/sizes.h>
/* Dummy supplies, where voltage doesn't matter */
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x"),
REGULATOR_SUPPLY("vdd33a", "smsc911x"),
};
static struct resource smsc911x_resources[] = {
[0] = {
.name = "smsc911x-memory",
.start = 0xA4000000,
.end = 0xA4000000 + SZ_256 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "smsc911x-irq",
.start = evt2irq(0x200),
.end = evt2irq(0x200),
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_16BIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
};
static struct platform_device *apsh4ad0a_devices[] __initdata = {
&smsc911x_device,
};
static int __init apsh4ad0a_devices_setup(void)
{
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
return platform_add_devices(apsh4ad0a_devices,
ARRAY_SIZE(apsh4ad0a_devices));
}
device_initcall(apsh4ad0a_devices_setup);
static int apsh4ad0a_mode_pins(void)
{
int value = 0;
/* These are the factory default settings of SW1 and SW2.
* If you change these dip switches then you will need to
* adjust the values below as well.
*/
value |= MODE_PIN0; /* Clock Mode 3 */
value |= MODE_PIN1;
value &= ~MODE_PIN2;
value &= ~MODE_PIN3;
value &= ~MODE_PIN4; /* 16-bit Area0 bus width */
value |= MODE_PIN5;
value |= MODE_PIN6;
value |= MODE_PIN7; /* Normal mode */
value |= MODE_PIN8; /* Little Endian */
value |= MODE_PIN9; /* Crystal resonator */
value &= ~MODE_PIN10; /* 29-bit address mode */
value &= ~MODE_PIN11; /* PCI-E Root port */
value &= ~MODE_PIN12; /* 4 lane + 1 lane */
value |= MODE_PIN13; /* AUD Enable */
value &= ~MODE_PIN14; /* Normal Operation */
return value;
}
static int apsh4ad0a_clk_init(void)
{
struct clk *clk;
int ret;
clk = clk_get(NULL, "extal");
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333000);
clk_put(clk);
return ret;
}
/* Initialize the board */
static void __init apsh4ad0a_setup(char **cmdline_p)
{
pr_info("Alpha Project AP-SH4AD-0A support:\n");
}
static void __init apsh4ad0a_init_irq(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ3210);
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_apsh4ad0a __initmv = {
.mv_name = "AP-SH4AD-0A",
.mv_setup = apsh4ad0a_setup,
.mv_mode_pins = apsh4ad0a_mode_pins,
.mv_clk_init = apsh4ad0a_clk_init,
.mv_init_irq = apsh4ad0a_init_irq,
};
| linux-master | arch/sh/boards/board-apsh4ad0a.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Renesas Europe EDOSK7760 Board Support
*
* Copyright (C) 2008 SPES Societa' Progettazione Elettronica e Software Ltd.
* Author: Luca Santini <[email protected]>
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/smc91x.h>
#include <linux/interrupt.h>
#include <linux/sh_intc.h>
#include <linux/i2c.h>
#include <linux/mtd/physmap.h>
#include <asm/machvec.h>
#include <asm/io.h>
#include <asm/addrspace.h>
#include <asm/delay.h>
#include <asm/i2c-sh7760.h>
#include <linux/sizes.h>
/* Bus state controller registers for CS4 area */
#define BSC_CS4BCR 0xA4FD0010
#define BSC_CS4WCR 0xA4FD0030
#define SMC_IOBASE 0xA2000000
#define SMC_IO_OFFSET 0x300
#define SMC_IOADDR (SMC_IOBASE + SMC_IO_OFFSET)
/* NOR flash */
static struct mtd_partition edosk7760_nor_flash_partitions[] = {
{
.name = "bootloader",
.offset = 0,
.size = SZ_256K,
.mask_flags = MTD_WRITEABLE, /* Read-only */
}, {
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = SZ_2M,
}, {
.name = "fs",
.offset = MTDPART_OFS_APPEND,
.size = (26 << 20),
}, {
.name = "other",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct physmap_flash_data edosk7760_nor_flash_data = {
.width = 4,
.parts = edosk7760_nor_flash_partitions,
.nr_parts = ARRAY_SIZE(edosk7760_nor_flash_partitions),
};
static struct resource edosk7760_nor_flash_resources[] = {
[0] = {
.name = "NOR Flash",
.start = 0x00000000,
.end = 0x00000000 + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device edosk7760_nor_flash_device = {
.name = "physmap-flash",
.resource = edosk7760_nor_flash_resources,
.num_resources = ARRAY_SIZE(edosk7760_nor_flash_resources),
.dev = {
.platform_data = &edosk7760_nor_flash_data,
},
};
/* i2c initialization functions */
static struct sh7760_i2c_platdata i2c_pd = {
.speed_khz = 400,
};
static struct resource sh7760_i2c1_res[] = {
{
.start = SH7760_I2C1_MMIO,
.end = SH7760_I2C1_MMIOEND,
.flags = IORESOURCE_MEM,
},{
.start = evt2irq(0x9e0),
.end = evt2irq(0x9e0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device sh7760_i2c1_dev = {
.dev = {
.platform_data = &i2c_pd,
},
.name = SH7760_I2C_DEVNAME,
.id = 1,
.resource = sh7760_i2c1_res,
.num_resources = ARRAY_SIZE(sh7760_i2c1_res),
};
static struct resource sh7760_i2c0_res[] = {
{
.start = SH7760_I2C0_MMIO,
.end = SH7760_I2C0_MMIOEND,
.flags = IORESOURCE_MEM,
}, {
.start = evt2irq(0x9c0),
.end = evt2irq(0x9c0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device sh7760_i2c0_dev = {
.dev = {
.platform_data = &i2c_pd,
},
.name = SH7760_I2C_DEVNAME,
.id = 0,
.resource = sh7760_i2c0_res,
.num_resources = ARRAY_SIZE(sh7760_i2c0_res),
};
/* eth initialization functions */
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_IO_SHIFT_1 | IORESOURCE_IRQ_LOWLEVEL,
};
static struct resource smc91x_res[] = {
[0] = {
.start = SMC_IOADDR,
.end = SMC_IOADDR + SZ_32 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x2a0),
.end = evt2irq(0x2a0),
.flags = IORESOURCE_IRQ ,
}
};
static struct platform_device smc91x_dev = {
.name = "smc91x",
.id = -1,
.num_resources = ARRAY_SIZE(smc91x_res),
.resource = smc91x_res,
.dev = {
.platform_data = &smc91x_info,
},
};
/* platform init code */
static struct platform_device *edosk7760_devices[] __initdata = {
&smc91x_dev,
&edosk7760_nor_flash_device,
&sh7760_i2c0_dev,
&sh7760_i2c1_dev,
};
static int __init init_edosk7760_devices(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ);
return platform_add_devices(edosk7760_devices,
ARRAY_SIZE(edosk7760_devices));
}
device_initcall(init_edosk7760_devices);
/*
* The Machine Vector
*/
struct sh_machine_vector mv_edosk7760 __initmv = {
.mv_name = "EDOSK7760",
};
| linux-master | arch/sh/boards/board-edosk7760.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Technology Corp. SH7786 Urquell Support.
*
* Copyright (C) 2008 Kuninori Morimoto <[email protected]>
* Copyright (C) 2009, 2010 Paul Mundt
*
* Based on board-sh7785lcr.c
* Copyright (C) 2008 Yoshihiro Shimoda
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/fb.h>
#include <linux/smc91x.h>
#include <linux/mtd/physmap.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/sh_intc.h>
#include <mach/urquell.h>
#include <cpu/sh7786.h>
#include <asm/heartbeat.h>
#include <linux/sizes.h>
#include <asm/smp-ops.h>
/*
* bit 1234 5678
*----------------------------
* SW1 0101 0010 -> Pck 33MHz version
* (1101 0010) Pck 66MHz version
* SW2 0x1x xxxx -> little endian
* 29bit mode
* SW47 0001 1000 -> CS0 : on-board flash
* CS1 : SRAM, registers, LAN, PCMCIA
* 38400 bps for SCIF1
*
* Address
* 0x00000000 - 0x04000000 (CS0) Nor Flash
* 0x04000000 - 0x04200000 (CS1) SRAM
* 0x05000000 - 0x05800000 (CS1) on board register
* 0x05800000 - 0x06000000 (CS1) LAN91C111
* 0x06000000 - 0x06400000 (CS1) PCMCIA
* 0x08000000 - 0x10000000 (CS2-CS3) DDR3
* 0x10000000 - 0x14000000 (CS4) PCIe
* 0x14000000 - 0x14800000 (CS5) Core0 LRAM/URAM
* 0x14800000 - 0x15000000 (CS5) Core1 LRAM/URAM
* 0x18000000 - 0x1C000000 (CS6) ATA/NAND-Flash
* 0x1C000000 - (CS7) SH7786 Control register
*/
/* HeartBeat */
static struct resource heartbeat_resource = {
.start = BOARDREG(SLEDR),
.end = BOARDREG(SLEDR),
.flags = IORESOURCE_MEM | IORESOURCE_MEM_16BIT,
};
static struct platform_device heartbeat_device = {
.name = "heartbeat",
.id = -1,
.num_resources = 1,
.resource = &heartbeat_resource,
};
/* LAN91C111 */
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
};
static struct resource smc91x_eth_resources[] = {
[0] = {
.name = "SMC91C111" ,
.start = 0x05800300,
.end = 0x0580030f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x360),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device smc91x_eth_device = {
.name = "smc91x",
.num_resources = ARRAY_SIZE(smc91x_eth_resources),
.resource = smc91x_eth_resources,
.dev = {
.platform_data = &smc91x_info,
},
};
/* Nor Flash */
static struct mtd_partition nor_flash_partitions[] = {
{
.name = "loader",
.offset = 0x00000000,
.size = SZ_512K,
.mask_flags = MTD_WRITEABLE, /* Read-only */
},
{
.name = "bootenv",
.offset = MTDPART_OFS_APPEND,
.size = SZ_512K,
.mask_flags = MTD_WRITEABLE, /* Read-only */
},
{
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = SZ_4M,
},
{
.name = "data",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct physmap_flash_data nor_flash_data = {
.width = 2,
.parts = nor_flash_partitions,
.nr_parts = ARRAY_SIZE(nor_flash_partitions),
};
static struct resource nor_flash_resources[] = {
[0] = {
.start = NOR_FLASH_ADDR,
.end = NOR_FLASH_ADDR + NOR_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device nor_flash_device = {
.name = "physmap-flash",
.dev = {
.platform_data = &nor_flash_data,
},
.num_resources = ARRAY_SIZE(nor_flash_resources),
.resource = nor_flash_resources,
};
static struct platform_device *urquell_devices[] __initdata = {
&heartbeat_device,
&smc91x_eth_device,
&nor_flash_device,
};
static int __init urquell_devices_setup(void)
{
/* USB */
gpio_request(GPIO_FN_USB_OVC0, NULL);
gpio_request(GPIO_FN_USB_PENC0, NULL);
/* enable LAN */
__raw_writew(__raw_readw(UBOARDREG(IRL2MSKR)) & ~0x00000001,
UBOARDREG(IRL2MSKR));
return platform_add_devices(urquell_devices,
ARRAY_SIZE(urquell_devices));
}
device_initcall(urquell_devices_setup);
static void urquell_power_off(void)
{
__raw_writew(0xa5a5, UBOARDREG(SRSTR));
}
static void __init urquell_init_irq(void)
{
plat_irq_setup_pins(IRQ_MODE_IRL3210_MASK);
}
static int urquell_mode_pins(void)
{
return __raw_readw(UBOARDREG(MDSWMR));
}
static int urquell_clk_init(void)
{
struct clk *clk;
int ret;
/*
* Only handle the EXTAL case, anyone interfacing a crystal
* resonator will need to provide their own input clock.
*/
if (test_mode_pin(MODE_PIN9))
return -EINVAL;
clk = clk_get(NULL, "extal");
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333333);
clk_put(clk);
return ret;
}
/* Initialize the board */
static void __init urquell_setup(char **cmdline_p)
{
printk(KERN_INFO "Renesas Technology Corp. Urquell support.\n");
pm_power_off = urquell_power_off;
register_smp_ops(&shx3_smp_ops);
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_urquell __initmv = {
.mv_name = "Urquell",
.mv_setup = urquell_setup,
.mv_init_irq = urquell_init_irq,
.mv_mode_pins = urquell_mode_pins,
.mv_clk_init = urquell_clk_init,
};
| linux-master | arch/sh/boards/board-urquell.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH-2007 board support.
*
* Copyright (C) 2003, 2004 SUGIOKA Toshinobu
* Copyright (C) 2010 Hitoshi Mitake <[email protected]>
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/io.h>
#include <asm/machvec.h>
#include <mach/sh2007.h>
/* Dummy supplies, where voltage doesn't matter */
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
REGULATOR_SUPPLY("vddvario", "smsc911x.1"),
REGULATOR_SUPPLY("vdd33a", "smsc911x.1"),
};
struct smsc911x_platform_config smc911x_info = {
.flags = SMSC911X_USE_32BIT,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
};
static struct resource smsc9118_0_resources[] = {
[0] = {
.start = SMC0_BASE,
.end = SMC0_BASE + 0xff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x240),
.end = evt2irq(0x240),
.flags = IORESOURCE_IRQ,
}
};
static struct resource smsc9118_1_resources[] = {
[0] = {
.start = SMC1_BASE,
.end = SMC1_BASE + 0xff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x280),
.end = evt2irq(0x280),
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device smsc9118_0_device = {
.name = "smsc911x",
.id = 0,
.num_resources = ARRAY_SIZE(smsc9118_0_resources),
.resource = smsc9118_0_resources,
.dev = {
.platform_data = &smc911x_info,
},
};
static struct platform_device smsc9118_1_device = {
.name = "smsc911x",
.id = 1,
.num_resources = ARRAY_SIZE(smsc9118_1_resources),
.resource = smsc9118_1_resources,
.dev = {
.platform_data = &smc911x_info,
},
};
static struct resource cf_resources[] = {
[0] = {
.start = CF_BASE + CF_OFFSET,
.end = CF_BASE + CF_OFFSET + 0x0f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CF_BASE + CF_OFFSET + 0x206,
.end = CF_BASE + CF_OFFSET + 0x20f,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = evt2irq(0x2c0),
.end = evt2irq(0x2c0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device cf_device = {
.name = "pata_platform",
.id = 0,
.num_resources = ARRAY_SIZE(cf_resources),
.resource = cf_resources,
};
static struct platform_device *sh2007_devices[] __initdata = {
&smsc9118_0_device,
&smsc9118_1_device,
&cf_device,
};
static int __init sh2007_io_init(void)
{
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
platform_add_devices(sh2007_devices, ARRAY_SIZE(sh2007_devices));
return 0;
}
subsys_initcall(sh2007_io_init);
static void __init sh2007_init_irq(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ);
}
/*
* Initialize the board
*/
static void __init sh2007_setup(char **cmdline_p)
{
pr_info("SH-2007 Setup...");
/* setup wait control registers for area 5 */
__raw_writel(CS5BCR_D, CS5BCR);
__raw_writel(CS5WCR_D, CS5WCR);
__raw_writel(CS5PCR_D, CS5PCR);
pr_cont(" done.\n");
}
/*
* The Machine Vector
*/
struct sh_machine_vector mv_sh2007 __initmv = {
.mv_setup = sh2007_setup,
.mv_name = "sh2007",
.mv_init_irq = sh2007_init_irq,
};
| linux-master | arch/sh/boards/board-sh2007.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SH generic board support, using device tree
*
* Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
*/
#include <linux/of.h>
#include <linux/of_clk.h>
#include <linux/of_fdt.h>
#include <linux/clocksource.h>
#include <linux/irqchip.h>
#include <asm/machvec.h>
#include <asm/rtc.h>
#ifdef CONFIG_SMP
static void dummy_smp_setup(void)
{
}
static void dummy_prepare_cpus(unsigned int max_cpus)
{
}
static void dummy_start_cpu(unsigned int cpu, unsigned long entry_point)
{
}
static unsigned int dummy_smp_processor_id(void)
{
return 0;
}
static void dummy_send_ipi(unsigned int cpu, unsigned int message)
{
}
static struct plat_smp_ops dummy_smp_ops = {
.smp_setup = dummy_smp_setup,
.prepare_cpus = dummy_prepare_cpus,
.start_cpu = dummy_start_cpu,
.smp_processor_id = dummy_smp_processor_id,
.send_ipi = dummy_send_ipi,
.cpu_die = native_cpu_die,
.cpu_disable = native_cpu_disable,
.play_dead = native_play_dead,
};
extern const struct of_cpu_method __cpu_method_of_table[];
const struct of_cpu_method __cpu_method_of_table_sentinel
__section("__cpu_method_of_table_end");
static void sh_of_smp_probe(void)
{
struct device_node *np;
const char *method = NULL;
const struct of_cpu_method *m = __cpu_method_of_table;
pr_info("SH generic board support: scanning for cpus\n");
init_cpu_possible(cpumask_of(0));
for_each_of_cpu_node(np) {
u64 id = of_get_cpu_hwid(np, 0);
if (id < NR_CPUS) {
if (!method)
of_property_read_string(np, "enable-method", &method);
set_cpu_possible(id, true);
set_cpu_present(id, true);
__cpu_number_map[id] = id;
__cpu_logical_map[id] = id;
}
}
if (!method) {
np = of_find_node_by_name(NULL, "cpus");
of_property_read_string(np, "enable-method", &method);
of_node_put(np);
}
pr_info("CPU enable method: %s\n", method);
if (method)
for (; m->method; m++)
if (!strcmp(m->method, method)) {
register_smp_ops(m->ops);
return;
}
register_smp_ops(&dummy_smp_ops);
}
#else
static void sh_of_smp_probe(void)
{
}
#endif
static void noop(void)
{
}
static int noopi(void)
{
return 0;
}
static void __init sh_of_mem_reserve(void)
{
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
}
static void __init sh_of_setup(char **cmdline_p)
{
struct device_node *root;
sh_mv.mv_name = "Unknown SH model";
root = of_find_node_by_path("/");
if (root) {
of_property_read_string(root, "model", &sh_mv.mv_name);
of_node_put(root);
}
sh_of_smp_probe();
}
static int sh_of_irq_demux(int irq)
{
/* FIXME: eventually this should not be used at all;
* the interrupt controller should set_handle_irq(). */
return irq;
}
static void __init sh_of_init_irq(void)
{
pr_info("SH generic board support: scanning for interrupt controllers\n");
irqchip_init();
}
static int __init sh_of_clk_init(void)
{
#ifdef CONFIG_COMMON_CLK
/* Disabled pending move to COMMON_CLK framework. */
pr_info("SH generic board support: scanning for clk providers\n");
of_clk_init(NULL);
#endif
return 0;
}
static struct sh_machine_vector __initmv sh_of_generic_mv = {
.mv_setup = sh_of_setup,
.mv_name = "devicetree", /* replaced by DT root's model */
.mv_irq_demux = sh_of_irq_demux,
.mv_init_irq = sh_of_init_irq,
.mv_clk_init = sh_of_clk_init,
.mv_mode_pins = noopi,
.mv_mem_init = noop,
.mv_mem_reserve = sh_of_mem_reserve,
};
struct sh_clk_ops;
void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
}
void __init __weak plat_irq_setup(void)
{
}
| linux-master | arch/sh/boards/of-generic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sh/boards/magicpanel/setup.c
*
* Copyright (C) 2007 Markus Brunner, Mark Jonas
*
* Magic Panel Release 2 board setup
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/map.h>
#include <linux/sh_intc.h>
#include <mach/magicpanelr2.h>
#include <asm/heartbeat.h>
#include <cpu/gpio.h>
#include <cpu/sh7720.h>
/* Dummy supplies, where voltage doesn't matter */
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x"),
REGULATOR_SUPPLY("vdd33a", "smsc911x"),
};
#define LAN9115_READY (__raw_readl(0xA8000084UL) & 0x00000001UL)
/* Wait until reset finished. Timeout is 100ms. */
static int __init ethernet_reset_finished(void)
{
int i;
if (LAN9115_READY)
return 1;
for (i = 0; i < 10; ++i) {
mdelay(10);
if (LAN9115_READY)
return 1;
}
return 0;
}
static void __init reset_ethernet(void)
{
/* PMDR: LAN_RESET=on */
CLRBITS_OUTB(0x10, PORT_PMDR);
udelay(200);
/* PMDR: LAN_RESET=off */
SETBITS_OUTB(0x10, PORT_PMDR);
}
static void __init setup_chip_select(void)
{
/* CS2: LAN (0x08000000 - 0x0bffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x36db0400, CS2BCR);
/* (SW:1.5 WR:3 HW:1.5), ext. wait */
__raw_writel(0x000003c0, CS2WCR);
/* CS4: CAN1 (0xb0000000 - 0xb3ffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x00000200, CS4BCR);
/* (SW:1.5 WR:3 HW:1.5), ext. wait */
__raw_writel(0x00100981, CS4WCR);
/* CS5a: CAN2 (0xb4000000 - 0xb5ffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x00000200, CS5ABCR);
/* (SW:1.5 WR:3 HW:1.5), ext. wait */
__raw_writel(0x00100981, CS5AWCR);
/* CS5b: CAN3 (0xb6000000 - 0xb7ffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x00000200, CS5BBCR);
/* (SW:1.5 WR:3 HW:1.5), ext. wait */
__raw_writel(0x00100981, CS5BWCR);
/* CS6a: Rotary (0xb8000000 - 0xb9ffffff) */
/* no idle cycles, normal space, 8 bit data bus */
__raw_writel(0x00000200, CS6ABCR);
/* (SW:1.5 WR:3 HW:1.5), no ext. wait */
__raw_writel(0x001009C1, CS6AWCR);
}
static void __init setup_port_multiplexing(void)
{
/* A7 GPO(LED8); A6 GPO(LED7); A5 GPO(LED6); A4 GPO(LED5);
* A3 GPO(LED4); A2 GPO(LED3); A1 GPO(LED2); A0 GPO(LED1);
*/
__raw_writew(0x5555, PORT_PACR); /* 01 01 01 01 01 01 01 01 */
/* B7 GPO(RST4); B6 GPO(RST3); B5 GPO(RST2); B4 GPO(RST1);
* B3 GPO(PB3); B2 GPO(PB2); B1 GPO(PB1); B0 GPO(PB0);
*/
__raw_writew(0x5555, PORT_PBCR); /* 01 01 01 01 01 01 01 01 */
/* C7 GPO(PC7); C6 GPO(PC6); C5 GPO(PC5); C4 GPO(PC4);
* C3 LCD_DATA3; C2 LCD_DATA2; C1 LCD_DATA1; C0 LCD_DATA0;
*/
__raw_writew(0x5500, PORT_PCCR); /* 01 01 01 01 00 00 00 00 */
/* D7 GPO(PD7); D6 GPO(PD6); D5 GPO(PD5); D4 GPO(PD4);
* D3 GPO(PD3); D2 GPO(PD2); D1 GPO(PD1); D0 GPO(PD0);
*/
__raw_writew(0x5555, PORT_PDCR); /* 01 01 01 01 01 01 01 01 */
/* E7 (x); E6 GPI(nu); E5 GPI(nu); E4 LCD_M_DISP;
* E3 LCD_CL1; E2 LCD_CL2; E1 LCD_DON; E0 LCD_FLM;
*/
__raw_writew(0x3C00, PORT_PECR); /* 00 11 11 00 00 00 00 00 */
/* F7 (x); F6 DA1(VLCD); F5 DA0(nc); F4 AN3;
* F3 AN2(MID_AD); F2 AN1(EARTH_AD); F1 AN0(TEMP); F0 GPI+(nc);
*/
__raw_writew(0x0002, PORT_PFCR); /* 00 00 00 00 00 00 00 10 */
/* G7 (x); G6 IRQ5(TOUCH_BUSY); G5 IRQ4(TOUCH_IRQ); G4 GPI(KEY2);
* G3 GPI(KEY1); G2 GPO(LED11); G1 GPO(LED10); G0 GPO(LED9);
*/
__raw_writew(0x03D5, PORT_PGCR); /* 00 00 00 11 11 01 01 01 */
/* H7 (x); H6 /RAS(BRAS); H5 /CAS(BCAS); H4 CKE(BCKE);
* H3 GPO(EARTH_OFF); H2 GPO(EARTH_TEST); H1 USB2_PWR; H0 USB1_PWR;
*/
__raw_writew(0x0050, PORT_PHCR); /* 00 00 00 00 01 01 00 00 */
/* J7 (x); J6 AUDCK; J5 ASEBRKAK; J4 AUDATA3;
* J3 AUDATA2; J2 AUDATA1; J1 AUDATA0; J0 AUDSYNC;
*/
__raw_writew(0x0000, PORT_PJCR); /* 00 00 00 00 00 00 00 00 */
/* K7 (x); K6 (x); K5 (x); K4 (x);
* K3 PINT7(/PWR2); K2 PINT6(/PWR1); K1 PINT5(nu); K0 PINT4(FLASH_READY)
*/
__raw_writew(0x00FF, PORT_PKCR); /* 00 00 00 00 11 11 11 11 */
/* L7 TRST; L6 TMS; L5 TDO; L4 TDI;
* L3 TCK; L2 (x); L1 (x); L0 (x);
*/
__raw_writew(0x0000, PORT_PLCR); /* 00 00 00 00 00 00 00 00 */
/* M7 GPO(CURRENT_SINK); M6 GPO(PWR_SWITCH); M5 GPO(LAN_SPEED);
* M4 GPO(LAN_RESET); M3 GPO(BUZZER); M2 GPO(LCD_BL);
* M1 CS5B(CAN3_CS); M0 GPI+(nc);
*/
__raw_writew(0x5552, PORT_PMCR); /* 01 01 01 01 01 01 00 10 */
/* CURRENT_SINK=off, PWR_SWITCH=off, LAN_SPEED=100MBit,
* LAN_RESET=off, BUZZER=off, LCD_BL=off
*/
#if CONFIG_SH_MAGIC_PANEL_R2_VERSION == 2
__raw_writeb(0x30, PORT_PMDR);
#elif CONFIG_SH_MAGIC_PANEL_R2_VERSION == 3
__raw_writeb(0xF0, PORT_PMDR);
#else
#error Unknown revision of PLATFORM_MP_R2
#endif
/* P7 (x); P6 (x); P5 (x);
* P4 GPO(nu); P3 IRQ3(LAN_IRQ); P2 IRQ2(CAN3_IRQ);
* P1 IRQ1(CAN2_IRQ); P0 IRQ0(CAN1_IRQ)
*/
__raw_writew(0x0100, PORT_PPCR); /* 00 00 00 01 00 00 00 00 */
__raw_writeb(0x10, PORT_PPDR);
/* R7 A25; R6 A24; R5 A23; R4 A22;
* R3 A21; R2 A20; R1 A19; R0 A0;
*/
gpio_request(GPIO_FN_A25, NULL);
gpio_request(GPIO_FN_A24, NULL);
gpio_request(GPIO_FN_A23, NULL);
gpio_request(GPIO_FN_A22, NULL);
gpio_request(GPIO_FN_A21, NULL);
gpio_request(GPIO_FN_A20, NULL);
gpio_request(GPIO_FN_A19, NULL);
gpio_request(GPIO_FN_A0, NULL);
/* S7 (x); S6 (x); S5 (x); S4 GPO(EEPROM_CS2);
* S3 GPO(EEPROM_CS1); S2 SIOF0_TXD; S1 SIOF0_RXD; S0 SIOF0_SCK;
*/
__raw_writew(0x0140, PORT_PSCR); /* 00 00 00 01 01 00 00 00 */
/* T7 (x); T6 (x); T5 (x); T4 COM1_CTS;
* T3 COM1_RTS; T2 COM1_TXD; T1 COM1_RXD; T0 GPO(WDOG)
*/
__raw_writew(0x0001, PORT_PTCR); /* 00 00 00 00 00 00 00 01 */
/* U7 (x); U6 (x); U5 (x); U4 GPI+(/AC_FAULT);
* U3 GPO(TOUCH_CS); U2 TOUCH_TXD; U1 TOUCH_RXD; U0 TOUCH_SCK;
*/
__raw_writew(0x0240, PORT_PUCR); /* 00 00 00 10 01 00 00 00 */
/* V7 (x); V6 (x); V5 (x); V4 GPO(MID2);
* V3 GPO(MID1); V2 CARD_TxD; V1 CARD_RxD; V0 GPI+(/BAT_FAULT);
*/
__raw_writew(0x0142, PORT_PVCR); /* 00 00 00 01 01 00 00 10 */
}
static void __init mpr2_setup(char **cmdline_p)
{
/* set Pin Select Register A:
* /PCC_CD1, /PCC_CD2, PCC_BVD1, PCC_BVD2,
* /IOIS16, IRQ4, IRQ5, USB1d_SUSPEND
*/
__raw_writew(0xAABC, PORT_PSELA);
/* set Pin Select Register B:
* /SCIF0_RTS, /SCIF0_CTS, LCD_VCPWC,
* LCD_VEPWC, IIC_SDA, IIC_SCL, Reserved
*/
__raw_writew(0x3C00, PORT_PSELB);
/* set Pin Select Register C:
* SIOF1_SCK, SIOF1_RxD, SCIF1_RxD, SCIF1_TxD, Reserved
*/
__raw_writew(0x0000, PORT_PSELC);
/* set Pin Select Register D: Reserved, SIOF1_TxD, Reserved, SIOF1_MCLK,
* Reserved, SIOF1_SYNC, Reserved, SCIF1_SCK, Reserved
*/
__raw_writew(0x0000, PORT_PSELD);
/* set USB TxRx Control: Reserved, DRV, Reserved, USB_TRANS, USB_SEL */
__raw_writew(0x0101, PORT_UTRCTL);
/* set USB Clock Control: USSCS, USSTB, Reserved (HighByte always A5) */
__raw_writew(0xA5C0, PORT_UCLKCR_W);
setup_chip_select();
setup_port_multiplexing();
reset_ethernet();
printk(KERN_INFO "Magic Panel Release 2 A.%i\n",
CONFIG_SH_MAGIC_PANEL_R2_VERSION);
if (ethernet_reset_finished() == 0)
printk(KERN_WARNING "Ethernet not ready\n");
}
static struct resource smsc911x_resources[] = {
[0] = {
.start = 0xa8000000,
.end = 0xabffffff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x660),
.end = evt2irq(0x660),
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.phy_interface = PHY_INTERFACE_MODE_MII,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_32BIT,
};
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
};
static struct resource heartbeat_resources[] = {
[0] = {
.start = PA_LED,
.end = PA_LED,
.flags = IORESOURCE_MEM,
},
};
static struct heartbeat_data heartbeat_data = {
.flags = HEARTBEAT_INVERTED,
};
static struct platform_device heartbeat_device = {
.name = "heartbeat",
.id = -1,
.dev = {
.platform_data = &heartbeat_data,
},
.num_resources = ARRAY_SIZE(heartbeat_resources),
.resource = heartbeat_resources,
};
static struct mtd_partition mpr2_partitions[] = {
/* Reserved for bootloader, read-only */
{
.name = "Bootloader",
.offset = 0x00000000UL,
.size = MPR2_MTD_BOOTLOADER_SIZE,
.mask_flags = MTD_WRITEABLE,
},
/* Reserved for kernel image */
{
.name = "Kernel",
.offset = MTDPART_OFS_NXTBLK,
.size = MPR2_MTD_KERNEL_SIZE,
},
/* Rest is used for Flash FS */
{
.name = "Flash_FS",
.offset = MTDPART_OFS_NXTBLK,
.size = MTDPART_SIZ_FULL,
}
};
static struct physmap_flash_data flash_data = {
.parts = mpr2_partitions,
.nr_parts = ARRAY_SIZE(mpr2_partitions),
.width = 2,
};
static struct resource flash_resource = {
.start = 0x00000000,
.end = 0x2000000UL,
.flags = IORESOURCE_MEM,
};
static struct platform_device flash_device = {
.name = "physmap-flash",
.id = -1,
.resource = &flash_resource,
.num_resources = 1,
.dev = {
.platform_data = &flash_data,
},
};
/*
* Add all resources to the platform_device
*/
static struct platform_device *mpr2_devices[] __initdata = {
&heartbeat_device,
&smsc911x_device,
&flash_device,
};
static int __init mpr2_devices_setup(void)
{
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
return platform_add_devices(mpr2_devices, ARRAY_SIZE(mpr2_devices));
}
device_initcall(mpr2_devices_setup);
/*
* Initialize IRQ setting
*/
static void __init init_mpr2_IRQ(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ); /* install handlers for IRQ0-5 */
irq_set_irq_type(evt2irq(0x600), IRQ_TYPE_LEVEL_LOW); /* IRQ0 CAN1 */
irq_set_irq_type(evt2irq(0x620), IRQ_TYPE_LEVEL_LOW); /* IRQ1 CAN2 */
irq_set_irq_type(evt2irq(0x640), IRQ_TYPE_LEVEL_LOW); /* IRQ2 CAN3 */
irq_set_irq_type(evt2irq(0x660), IRQ_TYPE_LEVEL_LOW); /* IRQ3 SMSC9115 */
irq_set_irq_type(evt2irq(0x680), IRQ_TYPE_EDGE_RISING); /* IRQ4 touchscreen */
irq_set_irq_type(evt2irq(0x6a0), IRQ_TYPE_EDGE_FALLING); /* IRQ5 touchscreen */
intc_set_priority(evt2irq(0x600), 13); /* IRQ0 CAN1 */
intc_set_priority(evt2irq(0x620), 13); /* IRQ0 CAN2 */
intc_set_priority(evt2irq(0x640), 13); /* IRQ0 CAN3 */
intc_set_priority(evt2irq(0x660), 6); /* IRQ3 SMSC9115 */
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_mpr2 __initmv = {
.mv_name = "mpr2",
.mv_setup = mpr2_setup,
.mv_init_irq = init_mpr2_IRQ,
};
| linux-master | arch/sh/boards/board-magicpanelr2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2002 David McCullough <[email protected]>
* Copyright (C) 2003 Paul Mundt <[email protected]>
*
* Based on files with the following comments:
*
* Copyright (C) 2000 Kazumoto Kojima
*
* Modified for 7751 Solution Engine by
* Ian da Silva and Jeremy Siegel, 2001.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <asm/machvec.h>
#include <mach/secureedge5410.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <cpu/timer.h>
unsigned short secureedge5410_ioport;
/*
* EraseConfig handling functions
*/
static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
{
printk("SnapGear: erase switch interrupt!\n");
return IRQ_HANDLED;
}
static int __init eraseconfig_init(void)
{
unsigned int irq = evt2irq(0x240);
printk("SnapGear: EraseConfig init\n");
/* Setup "EraseConfig" switch on external IRQ 0 */
if (request_irq(irq, eraseconfig_interrupt, 0, "Erase Config", NULL))
printk("SnapGear: failed to register IRQ%d for Reset witch\n",
irq);
else
printk("SnapGear: registered EraseConfig switch on IRQ%d\n",
irq);
return 0;
}
device_initcall(eraseconfig_init);
/*
* Initialize IRQ setting
*
* IRL0 = erase switch
* IRL1 = eth0
* IRL2 = eth1
* IRL3 = crypto
*/
static void __init init_snapgear_IRQ(void)
{
printk("Setup SnapGear IRQ/IPR ...\n");
/* enable individual interrupt mode for externals */
plat_irq_setup_pins(IRQ_MODE_IRQ);
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_snapgear __initmv = {
.mv_name = "SnapGear SecureEdge5410",
.mv_init_irq = init_snapgear_IRQ,
};
| linux-master | arch/sh/boards/board-secureedge5410.c |
// SPDX-License-Identifier: GPL-2.0
/*
* June 2006 Steve Glendinning <[email protected]>
*
* Polaris-specific resource declaration
*
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/machvec.h>
#include <asm/heartbeat.h>
#include <cpu/gpio.h>
#include <mach-se/mach/se.h>
#define BCR2 (0xFFFFFF62)
#define WCR2 (0xFFFFFF66)
#define AREA5_WAIT_CTRL (0x1C00)
#define WAIT_STATES_10 (0x7)
/* Dummy supplies, where voltage doesn't matter */
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
};
static struct resource smsc911x_resources[] = {
[0] = {
.name = "smsc911x-memory",
.start = PA_EXT5,
.end = PA_EXT5 + 0x1fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "smsc911x-irq",
.start = IRQ0_IRQ,
.end = IRQ0_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_32BIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = 0,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
};
static unsigned char heartbeat_bit_pos[] = { 0, 1, 2, 3 };
static struct heartbeat_data heartbeat_data = {
.bit_pos = heartbeat_bit_pos,
.nr_bits = ARRAY_SIZE(heartbeat_bit_pos),
};
static struct resource heartbeat_resource = {
.start = PORT_PCDR,
.end = PORT_PCDR,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT,
};
static struct platform_device heartbeat_device = {
.name = "heartbeat",
.id = -1,
.dev = {
.platform_data = &heartbeat_data,
},
.num_resources = 1,
.resource = &heartbeat_resource,
};
static struct platform_device *polaris_devices[] __initdata = {
&smsc911x_device,
&heartbeat_device,
};
static int __init polaris_initialise(void)
{
u16 wcr, bcr_mask;
printk(KERN_INFO "Configuring Polaris external bus\n");
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
/* Configure area 5 with 2 wait states */
wcr = __raw_readw(WCR2);
wcr &= (~AREA5_WAIT_CTRL);
wcr |= (WAIT_STATES_10 << 10);
__raw_writew(wcr, WCR2);
/* Configure area 5 for 32-bit access */
bcr_mask = __raw_readw(BCR2);
bcr_mask |= 1 << 10;
__raw_writew(bcr_mask, BCR2);
return platform_add_devices(polaris_devices,
ARRAY_SIZE(polaris_devices));
}
arch_initcall(polaris_initialise);
static struct ipr_data ipr_irq_table[] = {
/* External IRQs */
{ IRQ0_IRQ, 0, 0, 1, }, /* IRQ0 */
{ IRQ1_IRQ, 0, 4, 1, }, /* IRQ1 */
};
static unsigned long ipr_offsets[] = {
INTC_IPRC
};
static struct ipr_desc ipr_irq_desc = {
.ipr_offsets = ipr_offsets,
.nr_offsets = ARRAY_SIZE(ipr_offsets),
.ipr_data = ipr_irq_table,
.nr_irqs = ARRAY_SIZE(ipr_irq_table),
.chip = {
.name = "sh7709-ext",
},
};
static void __init init_polaris_irq(void)
{
/* Disable all interrupts */
__raw_writew(0, BCR_ILCRA);
__raw_writew(0, BCR_ILCRB);
__raw_writew(0, BCR_ILCRC);
__raw_writew(0, BCR_ILCRD);
__raw_writew(0, BCR_ILCRE);
__raw_writew(0, BCR_ILCRF);
__raw_writew(0, BCR_ILCRG);
register_ipr_controller(&ipr_irq_desc);
}
static struct sh_machine_vector mv_polaris __initmv = {
.mv_name = "Polaris",
.mv_init_irq = init_polaris_irq,
};
| linux-master | arch/sh/boards/board-polaris.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Data Technology Inc. ESPT-GIGA board support
*
* Copyright (C) 2008, 2009 Renesas Solutions Corp.
* Copyright (C) 2008, 2009 Nobuhiro Iwamatsu <[email protected]>
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/mtd/physmap.h>
#include <linux/io.h>
#include <linux/sh_eth.h>
#include <linux/sh_intc.h>
#include <asm/machvec.h>
#include <linux/sizes.h>
/* NOR Flash */
static struct mtd_partition espt_nor_flash_partitions[] = {
{
.name = "U-Boot",
.offset = 0,
.size = (2 * SZ_128K),
.mask_flags = MTD_WRITEABLE, /* Read-only */
}, {
.name = "Linux-Kernel",
.offset = MTDPART_OFS_APPEND,
.size = (20 * SZ_128K),
}, {
.name = "Root Filesystem",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct physmap_flash_data espt_nor_flash_data = {
.width = 2,
.parts = espt_nor_flash_partitions,
.nr_parts = ARRAY_SIZE(espt_nor_flash_partitions),
};
static struct resource espt_nor_flash_resources[] = {
[0] = {
.name = "NOR Flash",
.start = 0,
.end = SZ_8M - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device espt_nor_flash_device = {
.name = "physmap-flash",
.resource = espt_nor_flash_resources,
.num_resources = ARRAY_SIZE(espt_nor_flash_resources),
.dev = {
.platform_data = &espt_nor_flash_data,
},
};
/* SH-Ether */
static struct resource sh_eth_resources[] = {
{
.start = 0xFEE00800, /* use eth1 */
.end = 0xFEE00F7C - 1,
.flags = IORESOURCE_MEM,
}, {
.start = 0xFEE01800, /* TSU */
.end = 0xFEE01FFF,
.flags = IORESOURCE_MEM,
}, {
.start = evt2irq(0x920), /* irq number */
.flags = IORESOURCE_IRQ,
},
};
static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 0,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device espt_eth_device = {
.name = "sh7763-gether",
.resource = sh_eth_resources,
.num_resources = ARRAY_SIZE(sh_eth_resources),
.dev = {
.platform_data = &sh7763_eth_pdata,
},
};
static struct platform_device *espt_devices[] __initdata = {
&espt_nor_flash_device,
&espt_eth_device,
};
static int __init espt_devices_setup(void)
{
return platform_add_devices(espt_devices,
ARRAY_SIZE(espt_devices));
}
device_initcall(espt_devices_setup);
static struct sh_machine_vector mv_espt __initmv = {
.mv_name = "ESPT-GIGA",
};
| linux-master | arch/sh/boards/board-espt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Technology Corp. R0P7785LC0011RL Support.
*
* Copyright (C) 2008 Yoshihiro Shimoda
* Copyright (C) 2009 Paul Mundt
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/sm501.h>
#include <linux/sm501-regs.h>
#include <linux/fb.h>
#include <linux/mtd/physmap.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/platform_data/i2c-pca-platform.h>
#include <linux/i2c-algo-pca.h>
#include <linux/usb/r8a66597.h>
#include <linux/sh_intc.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/gpio/machine.h>
#include <mach/sh7785lcr.h>
#include <cpu/sh7785.h>
#include <asm/heartbeat.h>
#include <asm/clock.h>
#include <asm/bl_bit.h>
/*
* NOTE: This board has 2 physical memory maps.
* Please look at include/asm-sh/sh7785lcr.h or hardware manual.
*/
static struct resource heartbeat_resource = {
.start = PLD_LEDCR,
.end = PLD_LEDCR,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT,
};
static struct platform_device heartbeat_device = {
.name = "heartbeat",
.id = -1,
.num_resources = 1,
.resource = &heartbeat_resource,
};
static struct mtd_partition nor_flash_partitions[] = {
{
.name = "loader",
.offset = 0x00000000,
.size = 512 * 1024,
},
{
.name = "bootenv",
.offset = MTDPART_OFS_APPEND,
.size = 512 * 1024,
},
{
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = 4 * 1024 * 1024,
},
{
.name = "data",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct physmap_flash_data nor_flash_data = {
.width = 4,
.parts = nor_flash_partitions,
.nr_parts = ARRAY_SIZE(nor_flash_partitions),
};
static struct resource nor_flash_resources[] = {
[0] = {
.start = NOR_FLASH_ADDR,
.end = NOR_FLASH_ADDR + NOR_FLASH_SIZE - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device nor_flash_device = {
.name = "physmap-flash",
.dev = {
.platform_data = &nor_flash_data,
},
.num_resources = ARRAY_SIZE(nor_flash_resources),
.resource = nor_flash_resources,
};
static struct r8a66597_platdata r8a66597_data = {
.xtal = R8A66597_PLATDATA_XTAL_12MHZ,
.vif = 1,
};
static struct resource r8a66597_usb_host_resources[] = {
[0] = {
.start = R8A66597_ADDR,
.end = R8A66597_ADDR + R8A66597_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x240),
.end = evt2irq(0x240),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
},
};
static struct platform_device r8a66597_usb_host_device = {
.name = "r8a66597_hcd",
.id = -1,
.dev = {
.dma_mask = NULL,
.coherent_dma_mask = 0xffffffff,
.platform_data = &r8a66597_data,
},
.num_resources = ARRAY_SIZE(r8a66597_usb_host_resources),
.resource = r8a66597_usb_host_resources,
};
static struct resource sm501_resources[] = {
[0] = {
.start = SM107_MEM_ADDR,
.end = SM107_MEM_ADDR + SM107_MEM_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = SM107_REG_ADDR,
.end = SM107_REG_ADDR + SM107_REG_SIZE - 1,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = evt2irq(0x340),
.flags = IORESOURCE_IRQ,
},
};
static struct fb_videomode sm501_default_mode_crt = {
.pixclock = 35714, /* 28MHz */
.xres = 640,
.yres = 480,
.left_margin = 105,
.right_margin = 16,
.upper_margin = 33,
.lower_margin = 10,
.hsync_len = 39,
.vsync_len = 2,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
};
static struct fb_videomode sm501_default_mode_pnl = {
.pixclock = 40000, /* 25MHz */
.xres = 640,
.yres = 480,
.left_margin = 2,
.right_margin = 16,
.upper_margin = 33,
.lower_margin = 10,
.hsync_len = 39,
.vsync_len = 2,
.sync = 0,
};
static struct sm501_platdata_fbsub sm501_pdata_fbsub_pnl = {
.def_bpp = 16,
.def_mode = &sm501_default_mode_pnl,
.flags = SM501FB_FLAG_USE_INIT_MODE |
SM501FB_FLAG_USE_HWCURSOR |
SM501FB_FLAG_USE_HWACCEL |
SM501FB_FLAG_DISABLE_AT_EXIT |
SM501FB_FLAG_PANEL_NO_VBIASEN,
};
static struct sm501_platdata_fbsub sm501_pdata_fbsub_crt = {
.def_bpp = 16,
.def_mode = &sm501_default_mode_crt,
.flags = SM501FB_FLAG_USE_INIT_MODE |
SM501FB_FLAG_USE_HWCURSOR |
SM501FB_FLAG_USE_HWACCEL |
SM501FB_FLAG_DISABLE_AT_EXIT,
};
static struct sm501_platdata_fb sm501_fb_pdata = {
.fb_route = SM501_FB_OWN,
.fb_crt = &sm501_pdata_fbsub_crt,
.fb_pnl = &sm501_pdata_fbsub_pnl,
};
static struct sm501_initdata sm501_initdata = {
.gpio_high = {
.set = 0x00001fe0,
.mask = 0x0,
},
.devices = 0,
.mclk = 84 * 1000000,
.m1xclk = 112 * 1000000,
};
static struct sm501_platdata sm501_platform_data = {
.init = &sm501_initdata,
.fb = &sm501_fb_pdata,
};
static struct platform_device sm501_device = {
.name = "sm501",
.id = -1,
.dev = {
.platform_data = &sm501_platform_data,
},
.num_resources = ARRAY_SIZE(sm501_resources),
.resource = sm501_resources,
};
static struct resource i2c_proto_resources[] = {
[0] = {
.start = PCA9564_PROTO_32BIT_ADDR,
.end = PCA9564_PROTO_32BIT_ADDR + PCA9564_SIZE - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT,
},
[1] = {
.start = evt2irq(0x380),
.end = evt2irq(0x380),
.flags = IORESOURCE_IRQ,
},
};
static struct resource i2c_resources[] = {
[0] = {
.start = PCA9564_ADDR,
.end = PCA9564_ADDR + PCA9564_SIZE - 1,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT,
},
[1] = {
.start = evt2irq(0x380),
.end = evt2irq(0x380),
.flags = IORESOURCE_IRQ,
},
};
static struct gpiod_lookup_table i2c_gpio_table = {
.dev_id = "i2c.0",
.table = {
GPIO_LOOKUP("pfc-sh7757", 0, "reset-gpios", GPIO_ACTIVE_LOW),
{ },
},
};
static struct i2c_pca9564_pf_platform_data i2c_platform_data = {
.i2c_clock_speed = I2C_PCA_CON_330kHz,
.timeout = HZ,
};
static struct platform_device i2c_device = {
.name = "i2c-pca-platform",
.id = -1,
.dev = {
.platform_data = &i2c_platform_data,
},
.num_resources = ARRAY_SIZE(i2c_resources),
.resource = i2c_resources,
};
static struct platform_device *sh7785lcr_devices[] __initdata = {
&heartbeat_device,
&nor_flash_device,
&r8a66597_usb_host_device,
&sm501_device,
&i2c_device,
};
static struct i2c_board_info __initdata sh7785lcr_i2c_devices[] = {
{
I2C_BOARD_INFO("r2025sd", 0x32),
},
};
static int __init sh7785lcr_devices_setup(void)
{
i2c_register_board_info(0, sh7785lcr_i2c_devices,
ARRAY_SIZE(sh7785lcr_i2c_devices));
if (mach_is_sh7785lcr_pt()) {
i2c_device.resource = i2c_proto_resources;
i2c_device.num_resources = ARRAY_SIZE(i2c_proto_resources);
}
gpiod_add_lookup_table(&i2c_gpio_table);
return platform_add_devices(sh7785lcr_devices,
ARRAY_SIZE(sh7785lcr_devices));
}
device_initcall(sh7785lcr_devices_setup);
/* Initialize IRQ setting */
void __init init_sh7785lcr_IRQ(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ7654);
plat_irq_setup_pins(IRQ_MODE_IRQ3210);
}
static int sh7785lcr_clk_init(void)
{
struct clk *clk;
int ret;
clk = clk_get(NULL, "extal");
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333333);
clk_put(clk);
return ret;
}
static void sh7785lcr_power_off(void)
{
unsigned char *p;
p = ioremap(PLD_POFCR, PLD_POFCR + 1);
if (!p) {
printk(KERN_ERR "%s: ioremap error.\n", __func__);
return;
}
*p = 0x01;
iounmap(p);
set_bl_bit();
while (1)
cpu_relax();
}
/* Initialize the board */
static void __init sh7785lcr_setup(char **cmdline_p)
{
void __iomem *sm501_reg;
printk(KERN_INFO "Renesas Technology Corp. R0P7785LC0011RL support.\n");
pm_power_off = sh7785lcr_power_off;
/* sm501 DRAM configuration */
sm501_reg = ioremap(SM107_REG_ADDR, SM501_DRAM_CONTROL);
if (!sm501_reg) {
printk(KERN_ERR "%s: ioremap error.\n", __func__);
return;
}
writel(0x000307c2, sm501_reg + SM501_DRAM_CONTROL);
iounmap(sm501_reg);
}
/* Return the board specific boot mode pin configuration */
static int sh7785lcr_mode_pins(void)
{
int value = 0;
/* These are the factory default settings of S1 and S2.
* If you change these dip switches then you will need to
* adjust the values below as well.
*/
value |= MODE_PIN4; /* Clock Mode 16 */
value |= MODE_PIN5; /* 32-bit Area0 bus width */
value |= MODE_PIN6; /* 32-bit Area0 bus width */
value |= MODE_PIN7; /* Area 0 SRAM interface [fixed] */
value |= MODE_PIN8; /* Little Endian */
value |= MODE_PIN9; /* Master Mode */
value |= MODE_PIN14; /* No PLL step-up */
return value;
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_sh7785lcr __initmv = {
.mv_name = "SH7785LCR",
.mv_setup = sh7785lcr_setup,
.mv_clk_init = sh7785lcr_clk_init,
.mv_init_irq = init_sh7785lcr_IRQ,
.mv_mode_pins = sh7785lcr_mode_pins,
};
| linux-master | arch/sh/boards/board-sh7785lcr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R0P7757LC0012RL Support.
*
* Copyright (C) 2009 - 2010 Renesas Solutions Corp.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/io.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/platform_data/sh_mmcif.h>
#include <linux/sh_eth.h>
#include <linux/sh_intc.h>
#include <linux/usb/renesas_usbhs.h>
#include <cpu/sh7757.h>
#include <asm/heartbeat.h>
static struct resource heartbeat_resource = {
.start = 0xffec005c, /* PUDR */
.end = 0xffec005c,
.flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT,
};
static unsigned char heartbeat_bit_pos[] = { 0, 1, 2, 3 };
static struct heartbeat_data heartbeat_data = {
.bit_pos = heartbeat_bit_pos,
.nr_bits = ARRAY_SIZE(heartbeat_bit_pos),
.flags = HEARTBEAT_INVERTED,
};
static struct platform_device heartbeat_device = {
.name = "heartbeat",
.id = -1,
.dev = {
.platform_data = &heartbeat_data,
},
.num_resources = 1,
.resource = &heartbeat_resource,
};
/* Fast Ethernet */
#define GBECONT 0xffc10100
#define GBECONT_RMII1 BIT(17)
#define GBECONT_RMII0 BIT(16)
static void sh7757_eth_set_mdio_gate(void *addr)
{
if (((unsigned long)addr & 0x00000fff) < 0x0800)
writel(readl(GBECONT) | GBECONT_RMII0, GBECONT);
else
writel(readl(GBECONT) | GBECONT_RMII1, GBECONT);
}
static struct resource sh_eth0_resources[] = {
{
.start = 0xfef00000,
.end = 0xfef001ff,
.flags = IORESOURCE_MEM,
}, {
.start = evt2irq(0xc80),
.end = evt2irq(0xc80),
.flags = IORESOURCE_IRQ,
},
};
static struct sh_eth_plat_data sh7757_eth0_pdata = {
.phy = 1,
.set_mdio_gate = sh7757_eth_set_mdio_gate,
};
static struct platform_device sh7757_eth0_device = {
.name = "sh7757-ether",
.resource = sh_eth0_resources,
.id = 0,
.num_resources = ARRAY_SIZE(sh_eth0_resources),
.dev = {
.platform_data = &sh7757_eth0_pdata,
},
};
static struct resource sh_eth1_resources[] = {
{
.start = 0xfef00800,
.end = 0xfef009ff,
.flags = IORESOURCE_MEM,
}, {
.start = evt2irq(0xc80),
.end = evt2irq(0xc80),
.flags = IORESOURCE_IRQ,
},
};
static struct sh_eth_plat_data sh7757_eth1_pdata = {
.phy = 1,
.set_mdio_gate = sh7757_eth_set_mdio_gate,
};
static struct platform_device sh7757_eth1_device = {
.name = "sh7757-ether",
.resource = sh_eth1_resources,
.id = 1,
.num_resources = ARRAY_SIZE(sh_eth1_resources),
.dev = {
.platform_data = &sh7757_eth1_pdata,
},
};
static void sh7757_eth_giga_set_mdio_gate(void *addr)
{
if (((unsigned long)addr & 0x00000fff) < 0x0800) {
gpio_set_value(GPIO_PTT4, 1);
writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT);
} else {
gpio_set_value(GPIO_PTT4, 0);
writel(readl(GBECONT) & ~GBECONT_RMII1, GBECONT);
}
}
static struct resource sh_eth_giga0_resources[] = {
{
.start = 0xfee00000,
.end = 0xfee007ff,
.flags = IORESOURCE_MEM,
}, {
/* TSU */
.start = 0xfee01800,
.end = 0xfee01fff,
.flags = IORESOURCE_MEM,
}, {
.start = evt2irq(0x2960),
.end = evt2irq(0x2960),
.flags = IORESOURCE_IRQ,
},
};
static struct sh_eth_plat_data sh7757_eth_giga0_pdata = {
.phy = 18,
.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
};
static struct platform_device sh7757_eth_giga0_device = {
.name = "sh7757-gether",
.resource = sh_eth_giga0_resources,
.id = 2,
.num_resources = ARRAY_SIZE(sh_eth_giga0_resources),
.dev = {
.platform_data = &sh7757_eth_giga0_pdata,
},
};
static struct resource sh_eth_giga1_resources[] = {
{
.start = 0xfee00800,
.end = 0xfee00fff,
.flags = IORESOURCE_MEM,
}, {
/* TSU */
.start = 0xfee01800,
.end = 0xfee01fff,
.flags = IORESOURCE_MEM,
}, {
.start = evt2irq(0x2980),
.end = evt2irq(0x2980),
.flags = IORESOURCE_IRQ,
},
};
static struct sh_eth_plat_data sh7757_eth_giga1_pdata = {
.phy = 19,
.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
};
static struct platform_device sh7757_eth_giga1_device = {
.name = "sh7757-gether",
.resource = sh_eth_giga1_resources,
.id = 3,
.num_resources = ARRAY_SIZE(sh_eth_giga1_resources),
.dev = {
.platform_data = &sh7757_eth_giga1_pdata,
},
};
/* Fixed 3.3V regulator to be used by SDHI0, MMCIF */
static struct regulator_consumer_supply fixed3v3_power_consumers[] =
{
REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
};
/* SH_MMCIF */
static struct resource sh_mmcif_resources[] = {
[0] = {
.start = 0xffcb0000,
.end = 0xffcb00ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x1c60),
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = evt2irq(0x1c80),
.flags = IORESOURCE_IRQ,
},
};
static struct sh_mmcif_plat_data sh_mmcif_plat = {
.sup_pclk = 0x0f,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA |
MMC_CAP_NONREMOVABLE,
.ocr = MMC_VDD_32_33 | MMC_VDD_33_34,
.slave_id_tx = SHDMA_SLAVE_MMCIF_TX,
.slave_id_rx = SHDMA_SLAVE_MMCIF_RX,
};
static struct platform_device sh_mmcif_device = {
.name = "sh_mmcif",
.id = 0,
.dev = {
.platform_data = &sh_mmcif_plat,
},
.num_resources = ARRAY_SIZE(sh_mmcif_resources),
.resource = sh_mmcif_resources,
};
/* SDHI0 */
static struct tmio_mmc_data sdhi_info = {
.chan_priv_tx = (void *)SHDMA_SLAVE_SDHI_TX,
.chan_priv_rx = (void *)SHDMA_SLAVE_SDHI_RX,
.capabilities = MMC_CAP_SD_HIGHSPEED,
};
static struct resource sdhi_resources[] = {
[0] = {
.start = 0xffe50000,
.end = 0xffe500ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x480),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device sdhi_device = {
.name = "sh_mobile_sdhi",
.num_resources = ARRAY_SIZE(sdhi_resources),
.resource = sdhi_resources,
.id = 0,
.dev = {
.platform_data = &sdhi_info,
},
};
static int usbhs0_get_id(struct platform_device *pdev)
{
return USBHS_GADGET;
}
static struct renesas_usbhs_platform_info usb0_data = {
.platform_callback = {
.get_id = usbhs0_get_id,
},
.driver_param = {
.buswait_bwait = 5,
}
};
static struct resource usb0_resources[] = {
[0] = {
.start = 0xfe450000,
.end = 0xfe4501ff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = evt2irq(0x840),
.end = evt2irq(0x840),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device usb0_device = {
.name = "renesas_usbhs",
.id = 0,
.dev = {
.platform_data = &usb0_data,
},
.num_resources = ARRAY_SIZE(usb0_resources),
.resource = usb0_resources,
};
static struct platform_device *sh7757lcr_devices[] __initdata = {
&heartbeat_device,
&sh7757_eth0_device,
&sh7757_eth1_device,
&sh7757_eth_giga0_device,
&sh7757_eth_giga1_device,
&sh_mmcif_device,
&sdhi_device,
&usb0_device,
};
static struct flash_platform_data spi_flash_data = {
.name = "m25p80",
.type = "m25px64",
};
static struct spi_board_info spi_board_info[] = {
{
.modalias = "m25p80",
.max_speed_hz = 25000000,
.bus_num = 0,
.chip_select = 1,
.platform_data = &spi_flash_data,
},
};
static int __init sh7757lcr_devices_setup(void)
{
regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
/* RGMII (PTA) */
gpio_request(GPIO_FN_ET0_MDC, NULL);
gpio_request(GPIO_FN_ET0_MDIO, NULL);
gpio_request(GPIO_FN_ET1_MDC, NULL);
gpio_request(GPIO_FN_ET1_MDIO, NULL);
/* ONFI (PTB, PTZ) */
gpio_request(GPIO_FN_ON_NRE, NULL);
gpio_request(GPIO_FN_ON_NWE, NULL);
gpio_request(GPIO_FN_ON_NWP, NULL);
gpio_request(GPIO_FN_ON_NCE0, NULL);
gpio_request(GPIO_FN_ON_R_B0, NULL);
gpio_request(GPIO_FN_ON_ALE, NULL);
gpio_request(GPIO_FN_ON_CLE, NULL);
gpio_request(GPIO_FN_ON_DQ7, NULL);
gpio_request(GPIO_FN_ON_DQ6, NULL);
gpio_request(GPIO_FN_ON_DQ5, NULL);
gpio_request(GPIO_FN_ON_DQ4, NULL);
gpio_request(GPIO_FN_ON_DQ3, NULL);
gpio_request(GPIO_FN_ON_DQ2, NULL);
gpio_request(GPIO_FN_ON_DQ1, NULL);
gpio_request(GPIO_FN_ON_DQ0, NULL);
/* IRQ8 to 0 (PTB, PTC) */
gpio_request(GPIO_FN_IRQ8, NULL);
gpio_request(GPIO_FN_IRQ7, NULL);
gpio_request(GPIO_FN_IRQ6, NULL);
gpio_request(GPIO_FN_IRQ5, NULL);
gpio_request(GPIO_FN_IRQ4, NULL);
gpio_request(GPIO_FN_IRQ3, NULL);
gpio_request(GPIO_FN_IRQ2, NULL);
gpio_request(GPIO_FN_IRQ1, NULL);
gpio_request(GPIO_FN_IRQ0, NULL);
/* SPI0 (PTD) */
gpio_request(GPIO_FN_SP0_MOSI, NULL);
gpio_request(GPIO_FN_SP0_MISO, NULL);
gpio_request(GPIO_FN_SP0_SCK, NULL);
gpio_request(GPIO_FN_SP0_SCK_FB, NULL);
gpio_request(GPIO_FN_SP0_SS0, NULL);
gpio_request(GPIO_FN_SP0_SS1, NULL);
gpio_request(GPIO_FN_SP0_SS2, NULL);
gpio_request(GPIO_FN_SP0_SS3, NULL);
/* RMII 0/1 (PTE, PTF) */
gpio_request(GPIO_FN_RMII0_CRS_DV, NULL);
gpio_request(GPIO_FN_RMII0_TXD1, NULL);
gpio_request(GPIO_FN_RMII0_TXD0, NULL);
gpio_request(GPIO_FN_RMII0_TXEN, NULL);
gpio_request(GPIO_FN_RMII0_REFCLK, NULL);
gpio_request(GPIO_FN_RMII0_RXD1, NULL);
gpio_request(GPIO_FN_RMII0_RXD0, NULL);
gpio_request(GPIO_FN_RMII0_RX_ER, NULL);
gpio_request(GPIO_FN_RMII1_CRS_DV, NULL);
gpio_request(GPIO_FN_RMII1_TXD1, NULL);
gpio_request(GPIO_FN_RMII1_TXD0, NULL);
gpio_request(GPIO_FN_RMII1_TXEN, NULL);
gpio_request(GPIO_FN_RMII1_REFCLK, NULL);
gpio_request(GPIO_FN_RMII1_RXD1, NULL);
gpio_request(GPIO_FN_RMII1_RXD0, NULL);
gpio_request(GPIO_FN_RMII1_RX_ER, NULL);
/* eMMC (PTG) */
gpio_request(GPIO_FN_MMCCLK, NULL);
gpio_request(GPIO_FN_MMCCMD, NULL);
gpio_request(GPIO_FN_MMCDAT7, NULL);
gpio_request(GPIO_FN_MMCDAT6, NULL);
gpio_request(GPIO_FN_MMCDAT5, NULL);
gpio_request(GPIO_FN_MMCDAT4, NULL);
gpio_request(GPIO_FN_MMCDAT3, NULL);
gpio_request(GPIO_FN_MMCDAT2, NULL);
gpio_request(GPIO_FN_MMCDAT1, NULL);
gpio_request(GPIO_FN_MMCDAT0, NULL);
/* LPC (PTG, PTH, PTQ, PTU) */
gpio_request(GPIO_FN_SERIRQ, NULL);
gpio_request(GPIO_FN_LPCPD, NULL);
gpio_request(GPIO_FN_LDRQ, NULL);
gpio_request(GPIO_FN_WP, NULL);
gpio_request(GPIO_FN_FMS0, NULL);
gpio_request(GPIO_FN_LAD3, NULL);
gpio_request(GPIO_FN_LAD2, NULL);
gpio_request(GPIO_FN_LAD1, NULL);
gpio_request(GPIO_FN_LAD0, NULL);
gpio_request(GPIO_FN_LFRAME, NULL);
gpio_request(GPIO_FN_LRESET, NULL);
gpio_request(GPIO_FN_LCLK, NULL);
gpio_request(GPIO_FN_LGPIO7, NULL);
gpio_request(GPIO_FN_LGPIO6, NULL);
gpio_request(GPIO_FN_LGPIO5, NULL);
gpio_request(GPIO_FN_LGPIO4, NULL);
/* SPI1 (PTH) */
gpio_request(GPIO_FN_SP1_MOSI, NULL);
gpio_request(GPIO_FN_SP1_MISO, NULL);
gpio_request(GPIO_FN_SP1_SCK, NULL);
gpio_request(GPIO_FN_SP1_SCK_FB, NULL);
gpio_request(GPIO_FN_SP1_SS0, NULL);
gpio_request(GPIO_FN_SP1_SS1, NULL);
/* SDHI (PTI) */
gpio_request(GPIO_FN_SD_WP, NULL);
gpio_request(GPIO_FN_SD_CD, NULL);
gpio_request(GPIO_FN_SD_CLK, NULL);
gpio_request(GPIO_FN_SD_CMD, NULL);
gpio_request(GPIO_FN_SD_D3, NULL);
gpio_request(GPIO_FN_SD_D2, NULL);
gpio_request(GPIO_FN_SD_D1, NULL);
gpio_request(GPIO_FN_SD_D0, NULL);
/* SCIF3/4 (PTJ, PTW) */
gpio_request(GPIO_FN_RTS3, NULL);
gpio_request(GPIO_FN_CTS3, NULL);
gpio_request(GPIO_FN_TXD3, NULL);
gpio_request(GPIO_FN_RXD3, NULL);
gpio_request(GPIO_FN_RTS4, NULL);
gpio_request(GPIO_FN_RXD4, NULL);
gpio_request(GPIO_FN_TXD4, NULL);
gpio_request(GPIO_FN_CTS4, NULL);
/* SERMUX (PTK, PTL, PTO, PTV) */
gpio_request(GPIO_FN_COM2_TXD, NULL);
gpio_request(GPIO_FN_COM2_RXD, NULL);
gpio_request(GPIO_FN_COM2_RTS, NULL);
gpio_request(GPIO_FN_COM2_CTS, NULL);
gpio_request(GPIO_FN_COM2_DTR, NULL);
gpio_request(GPIO_FN_COM2_DSR, NULL);
gpio_request(GPIO_FN_COM2_DCD, NULL);
gpio_request(GPIO_FN_COM2_RI, NULL);
gpio_request(GPIO_FN_RAC_RXD, NULL);
gpio_request(GPIO_FN_RAC_RTS, NULL);
gpio_request(GPIO_FN_RAC_CTS, NULL);
gpio_request(GPIO_FN_RAC_DTR, NULL);
gpio_request(GPIO_FN_RAC_DSR, NULL);
gpio_request(GPIO_FN_RAC_DCD, NULL);
gpio_request(GPIO_FN_RAC_TXD, NULL);
gpio_request(GPIO_FN_COM1_TXD, NULL);
gpio_request(GPIO_FN_COM1_RXD, NULL);
gpio_request(GPIO_FN_COM1_RTS, NULL);
gpio_request(GPIO_FN_COM1_CTS, NULL);
writeb(0x10, 0xfe470000); /* SMR0: SerMux mode 0 */
/* IIC (PTM, PTR, PTS) */
gpio_request(GPIO_FN_SDA7, NULL);
gpio_request(GPIO_FN_SCL7, NULL);
gpio_request(GPIO_FN_SDA6, NULL);
gpio_request(GPIO_FN_SCL6, NULL);
gpio_request(GPIO_FN_SDA5, NULL);
gpio_request(GPIO_FN_SCL5, NULL);
gpio_request(GPIO_FN_SDA4, NULL);
gpio_request(GPIO_FN_SCL4, NULL);
gpio_request(GPIO_FN_SDA3, NULL);
gpio_request(GPIO_FN_SCL3, NULL);
gpio_request(GPIO_FN_SDA2, NULL);
gpio_request(GPIO_FN_SCL2, NULL);
gpio_request(GPIO_FN_SDA1, NULL);
gpio_request(GPIO_FN_SCL1, NULL);
gpio_request(GPIO_FN_SDA0, NULL);
gpio_request(GPIO_FN_SCL0, NULL);
/* USB (PTN) */
gpio_request(GPIO_FN_VBUS_EN, NULL);
gpio_request(GPIO_FN_VBUS_OC, NULL);
/* SGPIO1/0 (PTN, PTO) */
gpio_request(GPIO_FN_SGPIO1_CLK, NULL);
gpio_request(GPIO_FN_SGPIO1_LOAD, NULL);
gpio_request(GPIO_FN_SGPIO1_DI, NULL);
gpio_request(GPIO_FN_SGPIO1_DO, NULL);
gpio_request(GPIO_FN_SGPIO0_CLK, NULL);
gpio_request(GPIO_FN_SGPIO0_LOAD, NULL);
gpio_request(GPIO_FN_SGPIO0_DI, NULL);
gpio_request(GPIO_FN_SGPIO0_DO, NULL);
/* WDT (PTN) */
gpio_request(GPIO_FN_SUB_CLKIN, NULL);
/* System (PTT) */
gpio_request(GPIO_FN_STATUS1, NULL);
gpio_request(GPIO_FN_STATUS0, NULL);
/* PWMX (PTT) */
gpio_request(GPIO_FN_PWMX1, NULL);
gpio_request(GPIO_FN_PWMX0, NULL);
/* R-SPI (PTV) */
gpio_request(GPIO_FN_R_SPI_MOSI, NULL);
gpio_request(GPIO_FN_R_SPI_MISO, NULL);
gpio_request(GPIO_FN_R_SPI_RSPCK, NULL);
gpio_request(GPIO_FN_R_SPI_SSL0, NULL);
gpio_request(GPIO_FN_R_SPI_SSL1, NULL);
/* EVC (PTV, PTW) */
gpio_request(GPIO_FN_EVENT7, NULL);
gpio_request(GPIO_FN_EVENT6, NULL);
gpio_request(GPIO_FN_EVENT5, NULL);
gpio_request(GPIO_FN_EVENT4, NULL);
gpio_request(GPIO_FN_EVENT3, NULL);
gpio_request(GPIO_FN_EVENT2, NULL);
gpio_request(GPIO_FN_EVENT1, NULL);
gpio_request(GPIO_FN_EVENT0, NULL);
/* LED for heartbeat */
gpio_request(GPIO_PTU3, NULL);
gpio_direction_output(GPIO_PTU3, 1);
gpio_request(GPIO_PTU2, NULL);
gpio_direction_output(GPIO_PTU2, 1);
gpio_request(GPIO_PTU1, NULL);
gpio_direction_output(GPIO_PTU1, 1);
gpio_request(GPIO_PTU0, NULL);
gpio_direction_output(GPIO_PTU0, 1);
/* control for MDIO of Gigabit Ethernet */
gpio_request(GPIO_PTT4, NULL);
gpio_direction_output(GPIO_PTT4, 1);
/* control for eMMC */
gpio_request(GPIO_PTT7, NULL); /* eMMC_RST# */
gpio_direction_output(GPIO_PTT7, 0);
gpio_request(GPIO_PTT6, NULL); /* eMMC_INDEX# */
gpio_direction_output(GPIO_PTT6, 0);
gpio_request(GPIO_PTT5, NULL); /* eMMC_PRST# */
gpio_direction_output(GPIO_PTT5, 1);
/* register SPI device information */
spi_register_board_info(spi_board_info,
ARRAY_SIZE(spi_board_info));
/* General platform */
return platform_add_devices(sh7757lcr_devices,
ARRAY_SIZE(sh7757lcr_devices));
}
arch_initcall(sh7757lcr_devices_setup);
/* Initialize IRQ setting */
void __init init_sh7757lcr_IRQ(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ7654);
plat_irq_setup_pins(IRQ_MODE_IRQ3210);
}
/* Initialize the board */
static void __init sh7757lcr_setup(char **cmdline_p)
{
printk(KERN_INFO "Renesas R0P7757LC0012RL support.\n");
}
static int sh7757lcr_mode_pins(void)
{
int value = 0;
/* These are the factory default settings of S3 (Low active).
* If you change these dip switches then you will need to
* adjust the values below as well.
*/
value |= MODE_PIN0; /* Clock Mode: 1 */
return value;
}
/* The Machine Vector */
static struct sh_machine_vector mv_sh7757lcr __initmv = {
.mv_name = "SH7757LCR",
.mv_setup = sh7757lcr_setup,
.mv_init_irq = init_sh7757lcr_IRQ,
.mv_mode_pins = sh7757lcr_mode_pins,
};
| linux-master | arch/sh/boards/board-sh7757lcr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ALPHAPROJECT AP-SH4A-3A Support.
*
* Copyright (C) 2010 ALPHAPROJECT Co.,Ltd.
* Copyright (C) 2008 Yoshihiro Shimoda
* Copyright (C) 2009 Paul Mundt
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mtd/physmap.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <asm/machvec.h>
#include <linux/sizes.h>
#include <asm/clock.h>
static struct mtd_partition nor_flash_partitions[] = {
{
.name = "loader",
.offset = 0x00000000,
.size = 512 * 1024,
},
{
.name = "bootenv",
.offset = MTDPART_OFS_APPEND,
.size = 512 * 1024,
},
{
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = 4 * 1024 * 1024,
},
{
.name = "data",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct physmap_flash_data nor_flash_data = {
.width = 4,
.parts = nor_flash_partitions,
.nr_parts = ARRAY_SIZE(nor_flash_partitions),
};
static struct resource nor_flash_resources[] = {
[0] = {
.start = 0x00000000,
.end = 0x01000000 - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device nor_flash_device = {
.name = "physmap-flash",
.dev = {
.platform_data = &nor_flash_data,
},
.num_resources = ARRAY_SIZE(nor_flash_resources),
.resource = nor_flash_resources,
};
/* Dummy supplies, where voltage doesn't matter */
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x"),
REGULATOR_SUPPLY("vdd33a", "smsc911x"),
};
static struct resource smsc911x_resources[] = {
[0] = {
.name = "smsc911x-memory",
.start = 0xA4000000,
.end = 0xA4000000 + SZ_256 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "smsc911x-irq",
.start = evt2irq(0x200),
.end = evt2irq(0x200),
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config smsc911x_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
.flags = SMSC911X_USE_16BIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
.dev = {
.platform_data = &smsc911x_config,
},
};
static struct platform_device *apsh4a3a_devices[] __initdata = {
&nor_flash_device,
&smsc911x_device,
};
static int __init apsh4a3a_devices_setup(void)
{
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
return platform_add_devices(apsh4a3a_devices,
ARRAY_SIZE(apsh4a3a_devices));
}
device_initcall(apsh4a3a_devices_setup);
static int apsh4a3a_clk_init(void)
{
struct clk *clk;
int ret;
clk = clk_get(NULL, "extal");
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = clk_set_rate(clk, 33333000);
clk_put(clk);
return ret;
}
/* Initialize the board */
static void __init apsh4a3a_setup(char **cmdline_p)
{
printk(KERN_INFO "Alpha Project AP-SH4A-3A support:\n");
}
static void __init apsh4a3a_init_irq(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ7654);
}
/* Return the board specific boot mode pin configuration */
static int apsh4a3a_mode_pins(void)
{
int value = 0;
/* These are the factory default settings of SW1 and SW2.
* If you change these dip switches then you will need to
* adjust the values below as well.
*/
value &= ~MODE_PIN0; /* Clock Mode 16 */
value &= ~MODE_PIN1;
value &= ~MODE_PIN2;
value &= ~MODE_PIN3;
value |= MODE_PIN4;
value &= ~MODE_PIN5; /* 16-bit Area0 bus width */
value |= MODE_PIN6; /* Area 0 SRAM interface */
value |= MODE_PIN7;
value |= MODE_PIN8; /* Little Endian */
value |= MODE_PIN9; /* Master Mode */
value |= MODE_PIN10; /* Crystal resonator */
value |= MODE_PIN11; /* Display Unit */
value |= MODE_PIN12;
value &= ~MODE_PIN13; /* 29-bit address mode */
value |= MODE_PIN14; /* No PLL step-up */
return value;
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_apsh4a3a __initmv = {
.mv_name = "AP-SH4A-3A",
.mv_setup = apsh4a3a_setup,
.mv_clk_init = apsh4a3a_clk_init,
.mv_init_irq = apsh4a3a_init_irq,
.mv_mode_pins = apsh4a3a_mode_pins,
};
| linux-master | arch/sh/boards/board-apsh4a3a.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/boards/titan/setup.c - Setup for Titan
*
* Copyright (C) 2006 Jamie Lenehan
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <mach/titan.h>
#include <asm/io.h>
static void __init init_titan_irq(void)
{
/* enable individual interrupt mode for externals */
plat_irq_setup_pins(IRQ_MODE_IRQ);
}
static struct sh_machine_vector mv_titan __initmv = {
.mv_name = "Titan",
.mv_init_irq = init_titan_irq,
};
| linux-master | arch/sh/boards/board-titan.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/boards/shmin/setup.c
*
* Copyright (C) 2006 Takashi YOSHII
*
* SHMIN Support.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/machvec.h>
#include <mach/shmin.h>
#include <asm/clock.h>
#include <asm/io.h>
#define PFC_PHCR 0xa400010eUL
#define INTC_ICR1 0xa4000010UL
static void __init init_shmin_irq(void)
{
__raw_writew(0x2a00, PFC_PHCR); // IRQ0-3=IRQ
__raw_writew(0x0aaa, INTC_ICR1); // IRQ0-3=IRQ-mode,Low-active.
plat_irq_setup_pins(IRQ_MODE_IRQ);
}
static void __init shmin_setup(char **cmdline_p)
{
__set_io_port_base(SHMIN_IO_BASE);
}
static struct sh_machine_vector mv_shmin __initmv = {
.mv_name = "SHMIN",
.mv_setup = shmin_setup,
.mv_init_irq = init_shmin_irq,
};
| linux-master | arch/sh/boards/board-shmin.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sh/boards/renesas/sh7763rdp/irq.c
*
* Renesas Solutions SH7763RDP Support.
*
* Copyright (C) 2008 Renesas Solutions Corp.
* Copyright (C) 2008 Nobuhiro Iwamatsu <[email protected]>
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <mach/sh7763rdp.h>
#define INTC_BASE (0xFFD00000)
#define INTC_INT2PRI7 (INTC_BASE+0x4001C)
#define INTC_INT2MSKCR (INTC_BASE+0x4003C)
#define INTC_INT2MSKCR1 (INTC_BASE+0x400D4)
/*
* Initialize IRQ setting
*/
void __init init_sh7763rdp_IRQ(void)
{
/* GPIO enabled */
__raw_writel(1 << 25, INTC_INT2MSKCR);
/* enable GPIO interrupts */
__raw_writel((__raw_readl(INTC_INT2PRI7) & 0xFF00FFFF) | 0x000F0000,
INTC_INT2PRI7);
/* USBH enabled */
__raw_writel(1 << 17, INTC_INT2MSKCR1);
/* GETHER enabled */
__raw_writel(1 << 16, INTC_INT2MSKCR1);
/* DMAC enabled */
__raw_writel(1 << 8, INTC_INT2MSKCR);
}
| linux-master | arch/sh/boards/mach-sh7763rdp/irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sh/boards/renesas/sh7763rdp/setup.c
*
* Renesas Solutions sh7763rdp board
*
* Copyright (C) 2008 Renesas Solutions Corp.
* Copyright (C) 2008 Nobuhiro Iwamatsu <[email protected]>
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/mtd/physmap.h>
#include <linux/fb.h>
#include <linux/io.h>
#include <linux/sh_eth.h>
#include <linux/sh_intc.h>
#include <mach/sh7763rdp.h>
#include <asm/sh7760fb.h>
/* NOR Flash */
static struct mtd_partition sh7763rdp_nor_flash_partitions[] = {
{
.name = "U-Boot",
.offset = 0,
.size = (2 * 128 * 1024),
.mask_flags = MTD_WRITEABLE, /* Read-only */
}, {
.name = "Linux-Kernel",
.offset = MTDPART_OFS_APPEND,
.size = (20 * 128 * 1024),
}, {
.name = "Root Filesystem",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static struct physmap_flash_data sh7763rdp_nor_flash_data = {
.width = 2,
.parts = sh7763rdp_nor_flash_partitions,
.nr_parts = ARRAY_SIZE(sh7763rdp_nor_flash_partitions),
};
static struct resource sh7763rdp_nor_flash_resources[] = {
[0] = {
.name = "NOR Flash",
.start = 0,
.end = (64 * 1024 * 1024),
.flags = IORESOURCE_MEM,
},
};
static struct platform_device sh7763rdp_nor_flash_device = {
.name = "physmap-flash",
.resource = sh7763rdp_nor_flash_resources,
.num_resources = ARRAY_SIZE(sh7763rdp_nor_flash_resources),
.dev = {
.platform_data = &sh7763rdp_nor_flash_data,
},
};
/*
* SH-Ether
*
* SH Ether of SH7763 has multi IRQ handling.
* (0x920,0x940,0x960 -> 0x920)
*/
static struct resource sh_eth_resources[] = {
{
.start = 0xFEE00800, /* use eth1 */
.end = 0xFEE00F7C - 1,
.flags = IORESOURCE_MEM,
}, {
.start = 0xFEE01800, /* TSU */
.end = 0xFEE01FFF,
.flags = IORESOURCE_MEM,
}, {
.start = evt2irq(0x920), /* irq number */
.flags = IORESOURCE_IRQ,
},
};
static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 1,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device sh7763rdp_eth_device = {
.name = "sh7763-gether",
.resource = sh_eth_resources,
.num_resources = ARRAY_SIZE(sh_eth_resources),
.dev = {
.platform_data = &sh7763_eth_pdata,
},
};
/* SH7763 LCDC */
static struct resource sh7763rdp_fb_resources[] = {
{
.start = 0xFFE80000,
.end = 0xFFE80442 - 1,
.flags = IORESOURCE_MEM,
},
};
static struct fb_videomode sh7763fb_videomode = {
.refresh = 60,
.name = "VGA Monitor",
.xres = 640,
.yres = 480,
.pixclock = 10000,
.left_margin = 80,
.right_margin = 24,
.upper_margin = 30,
.lower_margin = 1,
.hsync_len = 96,
.vsync_len = 1,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED,
.flag = FB_MODE_IS_UNKNOWN,
};
static struct sh7760fb_platdata sh7763fb_def_pdata = {
.def_mode = &sh7763fb_videomode,
.ldmtr = (LDMTR_TFT_COLOR_16|LDMTR_MCNT),
.lddfr = LDDFR_16BPP_RGB565,
.ldpmmr = 0x0000,
.ldpspr = 0xFFFF,
.ldaclnr = 0x0001,
.ldickr = 0x1102,
.rotate = 0,
.novsync = 0,
.blank = NULL,
};
static struct platform_device sh7763rdp_fb_device = {
.name = "sh7760-lcdc",
.resource = sh7763rdp_fb_resources,
.num_resources = ARRAY_SIZE(sh7763rdp_fb_resources),
.dev = {
.platform_data = &sh7763fb_def_pdata,
},
};
static struct platform_device *sh7763rdp_devices[] __initdata = {
&sh7763rdp_nor_flash_device,
&sh7763rdp_eth_device,
&sh7763rdp_fb_device,
};
static int __init sh7763rdp_devices_setup(void)
{
return platform_add_devices(sh7763rdp_devices,
ARRAY_SIZE(sh7763rdp_devices));
}
device_initcall(sh7763rdp_devices_setup);
static void __init sh7763rdp_setup(char **cmdline_p)
{
/* Board version check */
if (__raw_readw(CPLD_BOARD_ID_ERV_REG) == 0xECB1)
printk(KERN_INFO "RTE Standard Configuration\n");
else
printk(KERN_INFO "RTA Standard Configuration\n");
/* USB pin select bits (clear bit 5-2 to 0) */
__raw_writew((__raw_readw(PORT_PSEL2) & 0xFFC3), PORT_PSEL2);
/* USBH setup port I controls to other (clear bits 4-9 to 0) */
__raw_writew(__raw_readw(PORT_PICR) & 0xFC0F, PORT_PICR);
/* Select USB Host controller */
__raw_writew(0x00, USB_USBHSC);
/* For LCD */
/* set PTJ7-1, bits 15-2 of PJCR to 0 */
__raw_writew(__raw_readw(PORT_PJCR) & 0x0003, PORT_PJCR);
/* set PTI5, bits 11-10 of PICR to 0 */
__raw_writew(__raw_readw(PORT_PICR) & 0xF3FF, PORT_PICR);
__raw_writew(0, PORT_PKCR);
__raw_writew(0, PORT_PLCR);
/* set PSEL2 bits 14-8, 5-4, of PSEL2 to 0 */
__raw_writew((__raw_readw(PORT_PSEL2) & 0x00C0), PORT_PSEL2);
/* set PSEL3 bits 14-12, 6-4, 2-0 of PSEL3 to 0 */
__raw_writew((__raw_readw(PORT_PSEL3) & 0x0700), PORT_PSEL3);
/* For HAC */
/* bit3-0 0100:HAC & SSI1 enable */
__raw_writew((__raw_readw(PORT_PSEL1) & 0xFFF0) | 0x0004, PORT_PSEL1);
/* bit14 1:SSI_HAC_CLK enable */
__raw_writew(__raw_readw(PORT_PSEL4) | 0x4000, PORT_PSEL4);
/* SH-Ether */
__raw_writew((__raw_readw(PORT_PSEL1) & ~0xff00) | 0x2400, PORT_PSEL1);
__raw_writew(0x0, PORT_PFCR);
__raw_writew(0x0, PORT_PFCR);
__raw_writew(0x0, PORT_PFCR);
/* MMC */
/*selects SCIF and MMC other functions */
__raw_writew(0x0001, PORT_PSEL0);
/* MMC clock operates */
__raw_writel(__raw_readl(MSTPCR1) & ~0x8, MSTPCR1);
__raw_writew(__raw_readw(PORT_PACR) & ~0x3000, PORT_PACR);
__raw_writew(__raw_readw(PORT_PCCR) & ~0xCFC3, PORT_PCCR);
}
static struct sh_machine_vector mv_sh7763rdp __initmv = {
.mv_name = "sh7763drp",
.mv_setup = sh7763rdp_setup,
.mv_init_irq = init_sh7763rdp_IRQ,
};
| linux-master | arch/sh/boards/mach-sh7763rdp/setup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/boards/dreamcast/irq.c
*
* Holly IRQ support for the Sega Dreamcast.
*
* Copyright (c) 2001, 2002 M. R. Brown <[email protected]>
*
* This file is part of the LinuxDC project (www.linuxdc.org)
*/
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/export.h>
#include <linux/err.h>
#include <mach/sysasic.h>
/*
* Dreamcast System ASIC Hardware Events -
*
* The Dreamcast's System ASIC (a.k.a. Holly) is responsible for receiving
* hardware events from system peripherals and triggering an SH7750 IRQ.
* Hardware events can trigger IRQs 13, 11, or 9 depending on which bits are
* set in the Event Mask Registers (EMRs). When a hardware event is
* triggered, its corresponding bit in the Event Status Registers (ESRs)
* is set, and that bit should be rewritten to the ESR to acknowledge that
* event.
*
* There are three 32-bit ESRs located at 0xa05f6900 - 0xa05f6908. Event
* types can be found in arch/sh/include/mach-dreamcast/mach/sysasic.h.
* There are three groups of EMRs that parallel the ESRs. Each EMR group
* corresponds to an IRQ, so 0xa05f6910 - 0xa05f6918 triggers IRQ 13,
* 0xa05f6920 - 0xa05f6928 triggers IRQ 11, and 0xa05f6930 - 0xa05f6938
* triggers IRQ 9.
*
* In the kernel, these events are mapped to virtual IRQs so that drivers can
* respond to them as they would a normal interrupt. In order to keep this
* mapping simple, the events are mapped as:
*
* 6900/6910 - Events 0-31, IRQ 13
* 6904/6924 - Events 32-63, IRQ 11
* 6908/6938 - Events 64-95, IRQ 9
*
*/
#define ESR_BASE 0x005f6900 /* Base event status register */
#define EMR_BASE 0x005f6910 /* Base event mask register */
/*
* Helps us determine the EMR group that this event belongs to: 0 = 0x6910,
* 1 = 0x6920, 2 = 0x6930; also determine the event offset.
*/
#define LEVEL(event) (((event) - HW_EVENT_IRQ_BASE) / 32)
/* Return the hardware event's bit position within the EMR/ESR */
#define EVENT_BIT(event) (((event) - HW_EVENT_IRQ_BASE) & 31)
/*
* For each of these *_irq routines, the IRQ passed in is the virtual IRQ
* (logically mapped to the corresponding bit for the hardware event).
*/
/* Disable the hardware event by masking its bit in its EMR */
static inline void disable_systemasic_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
__u32 emr = EMR_BASE + (LEVEL(irq) << 4) + (LEVEL(irq) << 2);
__u32 mask;
mask = inl(emr);
mask &= ~(1 << EVENT_BIT(irq));
outl(mask, emr);
}
/* Enable the hardware event by setting its bit in its EMR */
static inline void enable_systemasic_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
__u32 emr = EMR_BASE + (LEVEL(irq) << 4) + (LEVEL(irq) << 2);
__u32 mask;
mask = inl(emr);
mask |= (1 << EVENT_BIT(irq));
outl(mask, emr);
}
/* Acknowledge a hardware event by writing its bit back to its ESR */
static void mask_ack_systemasic_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
__u32 esr = ESR_BASE + (LEVEL(irq) << 2);
disable_systemasic_irq(data);
outl((1 << EVENT_BIT(irq)), esr);
}
struct irq_chip systemasic_int = {
.name = "System ASIC",
.irq_mask = disable_systemasic_irq,
.irq_mask_ack = mask_ack_systemasic_irq,
.irq_unmask = enable_systemasic_irq,
};
/*
* Map the hardware event indicated by the processor IRQ to a virtual IRQ.
*/
int systemasic_irq_demux(int irq)
{
__u32 emr, esr, status, level;
__u32 j, bit;
switch (irq) {
case 13 + 16:
level = 0;
break;
case 11 + 16:
level = 1;
break;
case 9 + 16:
level = 2;
break;
default:
return irq;
}
emr = EMR_BASE + (level << 4) + (level << 2);
esr = ESR_BASE + (level << 2);
/* Mask the ESR to filter any spurious, unwanted interrupts */
status = inl(esr);
status &= inl(emr);
/* Now scan and find the first set bit as the event to map */
for (bit = 1, j = 0; j < 32; bit <<= 1, j++) {
if (status & bit) {
irq = HW_EVENT_IRQ_BASE + j + (level << 5);
return irq;
}
}
/* Not reached */
return irq;
}
void systemasic_irq_init(void)
{
int irq_base, i;
irq_base = irq_alloc_descs(HW_EVENT_IRQ_BASE, HW_EVENT_IRQ_BASE,
HW_EVENT_IRQ_MAX - HW_EVENT_IRQ_BASE, -1);
if (IS_ERR_VALUE(irq_base)) {
pr_err("%s: failed hooking irqs\n", __func__);
return;
}
for (i = HW_EVENT_IRQ_BASE; i < HW_EVENT_IRQ_MAX; i++)
irq_set_chip_and_handler(i, &systemasic_int, handle_level_irq);
}
| linux-master | arch/sh/boards/mach-dreamcast/irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/boards/dreamcast/rtc.c
*
* Dreamcast AICA RTC routines.
*
* Copyright (c) 2001, 2002 M. R. Brown <[email protected]>
* Copyright (c) 2002 Paul Mundt <[email protected]>
*/
#include <linux/time.h>
#include <linux/rtc.h>
#include <linux/io.h>
#include <linux/platform_device.h>
/* The AICA RTC has an Epoch of 1/1/1950, so we must subtract 20 years (in
seconds) to get the standard Unix Epoch when getting the time, and add
20 years when setting the time. */
#define TWENTY_YEARS ((20 * 365LU + 5) * 86400)
/* The AICA RTC is represented by a 32-bit seconds counter stored in 2 16-bit
registers.*/
#define AICA_RTC_SECS_H 0xa0710000
#define AICA_RTC_SECS_L 0xa0710004
/**
* aica_rtc_gettimeofday - Get the time from the AICA RTC
* @dev: the RTC device (ignored)
* @tm: pointer to resulting RTC time structure
*
* Grabs the current RTC seconds counter and adjusts it to the Unix Epoch.
*/
static int aica_rtc_gettimeofday(struct device *dev, struct rtc_time *tm)
{
unsigned long val1, val2;
time64_t t;
do {
val1 = ((__raw_readl(AICA_RTC_SECS_H) & 0xffff) << 16) |
(__raw_readl(AICA_RTC_SECS_L) & 0xffff);
val2 = ((__raw_readl(AICA_RTC_SECS_H) & 0xffff) << 16) |
(__raw_readl(AICA_RTC_SECS_L) & 0xffff);
} while (val1 != val2);
/* normalize to 1970..2106 time range */
t = (u32)(val1 - TWENTY_YEARS);
rtc_time64_to_tm(t, tm);
return 0;
}
/**
* aica_rtc_settimeofday - Set the AICA RTC to the current time
* @dev: the RTC device (ignored)
* @tm: pointer to new RTC time structure
*
* Adjusts the given @tv to the AICA Epoch and sets the RTC seconds counter.
*/
static int aica_rtc_settimeofday(struct device *dev, struct rtc_time *tm)
{
unsigned long val1, val2;
time64_t secs = rtc_tm_to_time64(tm);
u32 adj = secs + TWENTY_YEARS;
do {
__raw_writel((adj & 0xffff0000) >> 16, AICA_RTC_SECS_H);
__raw_writel((adj & 0xffff), AICA_RTC_SECS_L);
val1 = ((__raw_readl(AICA_RTC_SECS_H) & 0xffff) << 16) |
(__raw_readl(AICA_RTC_SECS_L) & 0xffff);
val2 = ((__raw_readl(AICA_RTC_SECS_H) & 0xffff) << 16) |
(__raw_readl(AICA_RTC_SECS_L) & 0xffff);
} while (val1 != val2);
return 0;
}
static const struct rtc_class_ops rtc_generic_ops = {
.read_time = aica_rtc_gettimeofday,
.set_time = aica_rtc_settimeofday,
};
static int __init aica_time_init(void)
{
struct platform_device *pdev;
pdev = platform_device_register_data(NULL, "rtc-generic", -1,
&rtc_generic_ops,
sizeof(rtc_generic_ops));
return PTR_ERR_OR_ZERO(pdev);
}
arch_initcall(aica_time_init);
| linux-master | arch/sh/boards/mach-dreamcast/rtc.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.