python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
/* * Kernel and userspace stack tracing. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2013 Tensilica Inc. * Copyright (C) 2015 Cadence Design Systems Inc. */ #include <linux/export.h> #include <linux/sched.h> #include <linux/stacktrace.h> #include <asm/stacktrace.h> #include <asm/traps.h> #include <linux/uaccess.h> #if IS_ENABLED(CONFIG_PERF_EVENTS) /* Address of common_exception_return, used to check the * transition from kernel to user space. */ extern int common_exception_return; void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth, int (*ufn)(struct stackframe *frame, void *data), void *data) { unsigned long windowstart = regs->windowstart; unsigned long windowbase = regs->windowbase; unsigned long a0 = regs->areg[0]; unsigned long a1 = regs->areg[1]; unsigned long pc = regs->pc; struct stackframe frame; int index; if (!depth--) return; frame.pc = pc; frame.sp = a1; if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data)) return; if (IS_ENABLED(CONFIG_USER_ABI_CALL0_ONLY) || (IS_ENABLED(CONFIG_USER_ABI_CALL0_PROBE) && !(regs->ps & PS_WOE_MASK))) return; /* Two steps: * * 1. Look through the register window for the * previous PCs in the call trace. * * 2. Look on the stack. */ /* Step 1. */ /* Rotate WINDOWSTART to move the bit corresponding to * the current window to the bit #0. */ windowstart = (windowstart << WSBITS | windowstart) >> windowbase; /* Look for bits that are set, they correspond to * valid windows. */ for (index = WSBITS - 1; (index > 0) && depth; depth--, index--) if (windowstart & (1 << index)) { /* Get the PC from a0 and a1. */ pc = MAKE_PC_FROM_RA(a0, pc); /* Read a0 and a1 from the * corresponding position in AREGs. */ a0 = regs->areg[index * 4]; a1 = regs->areg[index * 4 + 1]; frame.pc = pc; frame.sp = a1; if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data)) return; } /* Step 2. */ /* We are done with the register window, we need to * look through the stack. */ if (!depth) return; /* Start from the a1 register. */ /* a1 = regs->areg[1]; */ while (a0 != 0 && depth--) { pc = MAKE_PC_FROM_RA(a0, pc); /* Check if the region is OK to access. */ if (!access_ok(&SPILL_SLOT(a1, 0), 8)) return; /* Copy a1, a0 from user space stack frame. */ if (__get_user(a0, &SPILL_SLOT(a1, 0)) || __get_user(a1, &SPILL_SLOT(a1, 1))) return; frame.pc = pc; frame.sp = a1; if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data)) return; } } EXPORT_SYMBOL(xtensa_backtrace_user); void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth, int (*kfn)(struct stackframe *frame, void *data), int (*ufn)(struct stackframe *frame, void *data), void *data) { unsigned long pc = regs->depc > VALID_DOUBLE_EXCEPTION_ADDRESS ? regs->depc : regs->pc; unsigned long sp_start, sp_end; unsigned long a0 = regs->areg[0]; unsigned long a1 = regs->areg[1]; sp_start = a1 & ~(THREAD_SIZE - 1); sp_end = sp_start + THREAD_SIZE; /* Spill the register window to the stack first. */ spill_registers(); /* Read the stack frames one by one and create the PC * from the a0 and a1 registers saved there. */ while (a1 > sp_start && a1 < sp_end && depth--) { struct stackframe frame; frame.pc = pc; frame.sp = a1; if (kernel_text_address(pc) && kfn(&frame, data)) return; if (pc == (unsigned long)&common_exception_return) { regs = (struct pt_regs *)a1; if (user_mode(regs)) { if (ufn == NULL) return; xtensa_backtrace_user(regs, depth, ufn, data); return; } a0 = regs->areg[0]; a1 = regs->areg[1]; continue; } sp_start = a1; pc = MAKE_PC_FROM_RA(a0, pc); a0 = SPILL_SLOT(a1, 0); a1 = SPILL_SLOT(a1, 1); } } EXPORT_SYMBOL(xtensa_backtrace_kernel); #endif void walk_stackframe(unsigned long *sp, int (*fn)(struct stackframe *frame, void *data), void *data) { unsigned long a0, a1; unsigned long sp_end; a1 = (unsigned long)sp; sp_end = ALIGN(a1, THREAD_SIZE); spill_registers(); while (a1 < sp_end) { struct stackframe frame; sp = (unsigned long *)a1; a0 = SPILL_SLOT(a1, 0); a1 = SPILL_SLOT(a1, 1); if (a1 <= (unsigned long)sp) break; frame.pc = MAKE_PC_FROM_RA(a0, a1); frame.sp = a1; if (fn(&frame, data)) return; } } #ifdef CONFIG_STACKTRACE struct stack_trace_data { struct stack_trace *trace; unsigned skip; }; static int stack_trace_cb(struct stackframe *frame, void *data) { struct stack_trace_data *trace_data = data; struct stack_trace *trace = trace_data->trace; if (trace_data->skip) { --trace_data->skip; return 0; } if (!kernel_text_address(frame->pc)) return 0; trace->entries[trace->nr_entries++] = frame->pc; return trace->nr_entries >= trace->max_entries; } void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) { struct stack_trace_data trace_data = { .trace = trace, .skip = trace->skip, }; walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); void save_stack_trace(struct stack_trace *trace) { save_stack_trace_tsk(current, trace); } EXPORT_SYMBOL_GPL(save_stack_trace); #endif struct return_addr_data { unsigned long addr; unsigned skip; }; static int return_address_cb(struct stackframe *frame, void *data) { struct return_addr_data *r = data; if (r->skip) { --r->skip; return 0; } if (!kernel_text_address(frame->pc)) return 0; r->addr = frame->pc; return 1; } /* * level == 0 is for the return address from the caller of this function, * not from this function itself. */ unsigned long return_address(unsigned level) { struct return_addr_data r = { .skip = level, }; walk_stackframe(stack_pointer(NULL), return_address_cb, &r); return r.addr; } EXPORT_SYMBOL(return_address);
linux-master
arch/xtensa/kernel/stacktrace.c
/* * arch/xtensa/kernel/signal.c * * Default platform functions. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005, 2006 Tensilica Inc. * Copyright (C) 1991, 1992 Linus Torvalds * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * * Chris Zankel <[email protected]> * Joe Taylor <[email protected]> */ #include <linux/signal.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/personality.h> #include <linux/resume_user_mode.h> #include <linux/sched/task_stack.h> #include <asm/ucontext.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/coprocessor.h> #include <asm/unistd.h> extern struct task_struct *coproc_owners[]; struct rt_sigframe { struct siginfo info; struct ucontext uc; struct { xtregs_opt_t opt; xtregs_user_t user; #if XTENSA_HAVE_COPROCESSORS xtregs_coprocessor_t cp; #endif } xtregs; unsigned char retcode[6]; unsigned int window[4]; }; #if defined(USER_SUPPORT_WINDOWED) /* * Flush register windows stored in pt_regs to stack. * Returns 1 for errors. */ static int flush_window_regs_user(struct pt_regs *regs) { const unsigned long ws = regs->windowstart; const unsigned long wb = regs->windowbase; unsigned long sp = 0; unsigned long wm; int err = 1; int base; /* Return if no other frames. */ if (regs->wmask == 1) return 0; /* Rotate windowmask and skip empty frames. */ wm = (ws >> wb) | (ws << (XCHAL_NUM_AREGS / 4 - wb)); base = (XCHAL_NUM_AREGS / 4) - (regs->wmask >> 4); /* For call8 or call12 frames, we need the previous stack pointer. */ if ((regs->wmask & 2) == 0) if (__get_user(sp, (int*)(regs->areg[base * 4 + 1] - 12))) goto errout; /* Spill frames to stack. */ while (base < XCHAL_NUM_AREGS / 4) { int m = (wm >> base); int inc = 0; /* Save registers a4..a7 (call8) or a4...a11 (call12) */ if (m & 2) { /* call4 */ inc = 1; } else if (m & 4) { /* call8 */ if (copy_to_user(&SPILL_SLOT_CALL8(sp, 4), &regs->areg[(base + 1) * 4], 16)) goto errout; inc = 2; } else if (m & 8) { /* call12 */ if (copy_to_user(&SPILL_SLOT_CALL12(sp, 4), &regs->areg[(base + 1) * 4], 32)) goto errout; inc = 3; } /* Save current frame a0..a3 under next SP */ sp = regs->areg[((base + inc) * 4 + 1) % XCHAL_NUM_AREGS]; if (copy_to_user(&SPILL_SLOT(sp, 0), &regs->areg[base * 4], 16)) goto errout; /* Get current stack pointer for next loop iteration. */ sp = regs->areg[base * 4 + 1]; base += inc; } regs->wmask = 1; regs->windowstart = 1 << wb; return 0; errout: return err; } #else static int flush_window_regs_user(struct pt_regs *regs) { return 0; } #endif /* * Note: We don't copy double exception 'regs', we have to finish double exc. * first before we return to signal handler! This dbl.exc.handler might cause * another double exception, but I think we are fine as the situation is the * same as if we had returned to the signal handerl and got an interrupt * immediately... */ static int setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs) { struct sigcontext __user *sc = &frame->uc.uc_mcontext; struct thread_info *ti = current_thread_info(); int err = 0; #define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) COPY(pc); COPY(ps); COPY(lbeg); COPY(lend); COPY(lcount); COPY(sar); #undef COPY err |= flush_window_regs_user(regs); err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4); err |= __put_user(0, &sc->sc_xtregs); if (err) return err; #if XTENSA_HAVE_COPROCESSORS coprocessor_flush_release_all(ti); err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp, sizeof (frame->xtregs.cp)); #endif err |= __copy_to_user(&frame->xtregs.opt, &regs->xtregs_opt, sizeof (xtregs_opt_t)); err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user, sizeof (xtregs_user_t)); err |= __put_user(err ? NULL : &frame->xtregs, &sc->sc_xtregs); return err; } static int restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame) { struct sigcontext __user *sc = &frame->uc.uc_mcontext; struct thread_info *ti = current_thread_info(); unsigned int err = 0; unsigned long ps; #define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) COPY(pc); COPY(lbeg); COPY(lend); COPY(lcount); COPY(sar); #undef COPY /* All registers were flushed to stack. Start with a pristine frame. */ regs->wmask = 1; regs->windowbase = 0; regs->windowstart = 1; regs->syscall = NO_SYSCALL; /* disable syscall checks */ /* For PS, restore only PS.CALLINC. * Assume that all other bits are either the same as for the signal * handler, or the user mode value doesn't matter (e.g. PS.OWB). */ err |= __get_user(ps, &sc->sc_ps); regs->ps = (regs->ps & ~PS_CALLINC_MASK) | (ps & PS_CALLINC_MASK); /* Additional corruption checks */ if ((regs->lcount > 0) && ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) ) err = 1; err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4); if (err) return err; /* The signal handler may have used coprocessors in which * case they are still enabled. We disable them to force a * reloading of the original task's CP state by the lazy * context-switching mechanisms of CP exception handling. * Also, we essentially discard any coprocessor state that the * signal handler created. */ #if XTENSA_HAVE_COPROCESSORS coprocessor_release_all(ti); err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp, sizeof (frame->xtregs.cp)); #endif err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user, sizeof (xtregs_user_t)); err |= __copy_from_user(&regs->xtregs_opt, &frame->xtregs.opt, sizeof (xtregs_opt_t)); return err; } /* * Do a signal return; undo the signal stack. */ asmlinkage long xtensa_rt_sigreturn(void) { struct pt_regs *regs = current_pt_regs(); struct rt_sigframe __user *frame; sigset_t set; int ret; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; if (regs->depc > 64) panic("rt_sigreturn in double exception!\n"); frame = (struct rt_sigframe __user *) regs->areg[1]; if (!access_ok(frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(regs, frame)) goto badframe; ret = regs->areg[2]; if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return ret; badframe: force_sig(SIGSEGV); return 0; } /* * Set up a signal frame. */ static int gen_return_code(unsigned char *codemem) { int err = 0; /* * The 12-bit immediate is really split up within the 24-bit MOVI * instruction. As long as the above system call numbers fit within * 8-bits, the following code works fine. See the Xtensa ISA for * details. */ #if __NR_rt_sigreturn > 255 # error Generating the MOVI instruction below breaks! #endif #ifdef __XTENSA_EB__ /* Big Endian version */ /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ err |= __put_user(0x22, &codemem[0]); err |= __put_user(0x0a, &codemem[1]); err |= __put_user(__NR_rt_sigreturn, &codemem[2]); /* Generate instruction: SYSCALL */ err |= __put_user(0x00, &codemem[3]); err |= __put_user(0x05, &codemem[4]); err |= __put_user(0x00, &codemem[5]); #elif defined __XTENSA_EL__ /* Little Endian version */ /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ err |= __put_user(0x22, &codemem[0]); err |= __put_user(0xa0, &codemem[1]); err |= __put_user(__NR_rt_sigreturn, &codemem[2]); /* Generate instruction: SYSCALL */ err |= __put_user(0x00, &codemem[3]); err |= __put_user(0x50, &codemem[4]); err |= __put_user(0x00, &codemem[5]); #else # error Must use compiler for Xtensa processors. #endif /* Flush generated code out of the data cache */ if (err == 0) { __invalidate_icache_range((unsigned long)codemem, 6UL); __flush_invalidate_dcache_range((unsigned long)codemem, 6UL); } return err; } static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe *frame; int err = 0, sig = ksig->sig; unsigned long sp, ra, tp, ps; unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler; unsigned long handler_fdpic_GOT = 0; unsigned int base; bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && (current->personality & FDPIC_FUNCPTRS); if (fdpic) { unsigned long __user *fdpic_func_desc = (unsigned long __user *)handler; if (__get_user(handler, &fdpic_func_desc[0]) || __get_user(handler_fdpic_GOT, &fdpic_func_desc[1])) return -EFAULT; } sp = regs->areg[1]; if ((ksig->ka.sa.sa_flags & SA_ONSTACK) != 0 && sas_ss_flags(sp) == 0) { sp = current->sas_ss_sp + current->sas_ss_size; } frame = (void *)((sp - sizeof(*frame)) & -16ul); if (regs->depc > 64) panic ("Double exception sys_sigreturn\n"); if (!access_ok(frame, sizeof(*frame))) { return -EFAULT; } if (ksig->ka.sa.sa_flags & SA_SIGINFO) { err |= copy_siginfo_to_user(&frame->info, &ksig->info); } /* Create the user context. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __save_altstack(&frame->uc.uc_stack, regs->areg[1]); err |= setup_sigcontext(frame, regs); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (ksig->ka.sa.sa_flags & SA_RESTORER) { if (fdpic) { unsigned long __user *fdpic_func_desc = (unsigned long __user *)ksig->ka.sa.sa_restorer; err |= __get_user(ra, fdpic_func_desc); } else { ra = (unsigned long)ksig->ka.sa.sa_restorer; } } else { /* Create sys_rt_sigreturn syscall in stack frame */ err |= gen_return_code(frame->retcode); ra = (unsigned long) frame->retcode; } if (err) return -EFAULT; /* * Create signal handler execution context. * Return context not modified until this point. */ /* Set up registers for signal handler; preserve the threadptr */ tp = regs->threadptr; ps = regs->ps; start_thread(regs, handler, (unsigned long)frame); /* Set up a stack frame for a call4 if userspace uses windowed ABI */ if (ps & PS_WOE_MASK) { base = 4; regs->areg[base] = (((unsigned long) ra) & 0x3fffffff) | 0x40000000; ps = (ps & ~(PS_CALLINC_MASK | PS_OWB_MASK)) | (1 << PS_CALLINC_SHIFT); } else { base = 0; regs->areg[base] = (unsigned long) ra; } regs->areg[base + 2] = (unsigned long) sig; regs->areg[base + 3] = (unsigned long) &frame->info; regs->areg[base + 4] = (unsigned long) &frame->uc; regs->threadptr = tp; regs->ps = ps; if (fdpic) regs->areg[base + 11] = handler_fdpic_GOT; pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n", current->comm, current->pid, sig, frame, regs->pc); return 0; } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. */ static void do_signal(struct pt_regs *regs) { struct ksignal ksig; task_pt_regs(current)->icountlevel = 0; if (get_signal(&ksig)) { int ret; /* Are we from a system call? */ if (regs->syscall != NO_SYSCALL) { /* If so, check system call restarting.. */ switch (regs->areg[2]) { case -ERESTARTNOHAND: case -ERESTART_RESTARTBLOCK: regs->areg[2] = -EINTR; break; case -ERESTARTSYS: if (!(ksig.ka.sa.sa_flags & SA_RESTART)) { regs->areg[2] = -EINTR; break; } fallthrough; case -ERESTARTNOINTR: regs->areg[2] = regs->syscall; regs->pc -= 3; break; default: /* nothing to do */ if (regs->areg[2] != 0) break; } } /* Whee! Actually deliver the signal. */ /* Set up the stack frame */ ret = setup_frame(&ksig, sigmask_to_save(), regs); signal_setup_done(ret, &ksig, 0); if (test_thread_flag(TIF_SINGLESTEP)) task_pt_regs(current)->icountlevel = 1; return; } /* Did we come from a system call? */ if (regs->syscall != NO_SYSCALL) { /* Restart the system call - no handlers present */ switch (regs->areg[2]) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->areg[2] = regs->syscall; regs->pc -= 3; break; case -ERESTART_RESTARTBLOCK: regs->areg[2] = __NR_restart_syscall; regs->pc -= 3; break; } } /* If there's no signal to deliver, we just restore the saved mask. */ restore_saved_sigmask(); if (test_thread_flag(TIF_SINGLESTEP)) task_pt_regs(current)->icountlevel = 1; return; } void do_notify_resume(struct pt_regs *regs) { if (test_thread_flag(TIF_SIGPENDING) || test_thread_flag(TIF_NOTIFY_SIGNAL)) do_signal(regs); if (test_thread_flag(TIF_NOTIFY_RESUME)) resume_user_mode_work(regs); }
linux-master
arch/xtensa/kernel/signal.c
/* * Xtensa SMP support functions. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 - 2013 Tensilica Inc. * * Chris Zankel <[email protected]> * Joe Taylor <[email protected]> * Pete Delaney <[email protected] */ #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/irq.h> #include <linux/kdebug.h> #include <linux/module.h> #include <linux/sched/mm.h> #include <linux/sched/hotplug.h> #include <linux/sched/task_stack.h> #include <linux/reboot.h> #include <linux/seq_file.h> #include <linux/smp.h> #include <linux/thread_info.h> #include <asm/cacheflush.h> #include <asm/coprocessor.h> #include <asm/kdebug.h> #include <asm/mmu_context.h> #include <asm/mxregs.h> #include <asm/platform.h> #include <asm/tlbflush.h> #include <asm/traps.h> #ifdef CONFIG_SMP # if XCHAL_HAVE_S32C1I == 0 # error "The S32C1I option is required for SMP." # endif #endif static void system_invalidate_dcache_range(unsigned long start, unsigned long size); static void system_flush_invalidate_dcache_range(unsigned long start, unsigned long size); /* IPI (Inter Process Interrupt) */ #define IPI_IRQ 0 static irqreturn_t ipi_interrupt(int irq, void *dev_id); void ipi_init(void) { unsigned irq = irq_create_mapping(NULL, IPI_IRQ); if (request_irq(irq, ipi_interrupt, IRQF_PERCPU, "ipi", NULL)) pr_err("Failed to request irq %u (ipi)\n", irq); } static inline unsigned int get_core_count(void) { /* Bits 18..21 of SYSCFGID contain the core count minus 1. */ unsigned int syscfgid = get_er(SYSCFGID); return ((syscfgid >> 18) & 0xf) + 1; } static inline int get_core_id(void) { /* Bits 0...18 of SYSCFGID contain the core id */ unsigned int core_id = get_er(SYSCFGID); return core_id & 0x3fff; } void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned i; for_each_possible_cpu(i) set_cpu_present(i, true); } void __init smp_init_cpus(void) { unsigned i; unsigned int ncpus = get_core_count(); unsigned int core_id = get_core_id(); pr_info("%s: Core Count = %d\n", __func__, ncpus); pr_info("%s: Core Id = %d\n", __func__, core_id); if (ncpus > NR_CPUS) { ncpus = NR_CPUS; pr_info("%s: limiting core count by %d\n", __func__, ncpus); } for (i = 0; i < ncpus; ++i) set_cpu_possible(i, true); } void __init smp_prepare_boot_cpu(void) { unsigned int cpu = smp_processor_id(); BUG_ON(cpu != 0); cpu_asid_cache(cpu) = ASID_USER_FIRST; } void __init smp_cpus_done(unsigned int max_cpus) { } static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */ static DECLARE_COMPLETION(cpu_running); void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); init_mmu(); #ifdef CONFIG_DEBUG_MISC if (boot_secondary_processors == 0) { pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n", __func__, boot_secondary_processors, cpu); for (;;) __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL)); } pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n", __func__, boot_secondary_processors, cpu); #endif /* Init EXCSAVE1 */ secondary_trap_init(); /* All kernel threads share the same mm context. */ mmget(mm); mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); enter_lazy_tlb(mm, current); trace_hardirqs_off(); calibrate_delay(); notify_cpu_starting(cpu); secondary_init_irq(); local_timer_setup(cpu); set_cpu_online(cpu, true); local_irq_enable(); complete(&cpu_running); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } static void mx_cpu_start(void *p) { unsigned cpu = (unsigned)p; unsigned long run_stall_mask = get_er(MPSCORE); set_er(run_stall_mask & ~(1u << cpu), MPSCORE); pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", __func__, cpu, run_stall_mask, get_er(MPSCORE)); } static void mx_cpu_stop(void *p) { unsigned cpu = (unsigned)p; unsigned long run_stall_mask = get_er(MPSCORE); set_er(run_stall_mask | (1u << cpu), MPSCORE); pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n", __func__, cpu, run_stall_mask, get_er(MPSCORE)); } #ifdef CONFIG_HOTPLUG_CPU unsigned long cpu_start_id __cacheline_aligned; #endif unsigned long cpu_start_ccount; static int boot_secondary(unsigned int cpu, struct task_struct *ts) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); unsigned long ccount; int i; #ifdef CONFIG_HOTPLUG_CPU WRITE_ONCE(cpu_start_id, cpu); /* Pairs with the third memw in the cpu_restart */ mb(); system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id, sizeof(cpu_start_id)); #endif smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); for (i = 0; i < 2; ++i) { do ccount = get_ccount(); while (!ccount); WRITE_ONCE(cpu_start_ccount, ccount); do { /* * Pairs with the first two memws in the * .Lboot_secondary. */ mb(); ccount = READ_ONCE(cpu_start_ccount); } while (ccount && time_before(jiffies, timeout)); if (ccount) { smp_call_function_single(0, mx_cpu_stop, (void *)cpu, 1); WRITE_ONCE(cpu_start_ccount, 0); return -EIO; } } return 0; } int __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret = 0; if (cpu_asid_cache(cpu) == 0) cpu_asid_cache(cpu) = ASID_USER_FIRST; start_info.stack = (unsigned long)task_pt_regs(idle); wmb(); pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", __func__, cpu, idle, start_info.stack); init_completion(&cpu_running); ret = boot_secondary(cpu, idle); if (ret == 0) { wait_for_completion_timeout(&cpu_running, msecs_to_jiffies(1000)); if (!cpu_online(cpu)) ret = -EIO; } if (ret) pr_err("CPU %u failed to boot\n", cpu); return ret; } #ifdef CONFIG_HOTPLUG_CPU /* * __cpu_disable runs on the processor to be shutdown. */ int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); /* * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); #if XTENSA_HAVE_COPROCESSORS /* * Flush coprocessor contexts that are active on the current CPU. */ local_coprocessors_flush_release_all(); #endif /* * OK - migrate IRQs away from this CPU */ migrate_irqs(); /* * Flush user cache and TLB mappings, and then remove this CPU * from the vm mask set of all processes. */ local_flush_cache_all(); local_flush_tlb_all(); invalidate_page_directory(); clear_tasks_mm_cpumask(cpu); return 0; } static void platform_cpu_kill(unsigned int cpu) { smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true); } /* * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ void __cpu_die(unsigned int cpu) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); while (time_before(jiffies, timeout)) { system_invalidate_dcache_range((unsigned long)&cpu_start_id, sizeof(cpu_start_id)); /* Pairs with the second memw in the cpu_restart */ mb(); if (READ_ONCE(cpu_start_id) == -cpu) { platform_cpu_kill(cpu); return; } } pr_err("CPU%u: unable to kill\n", cpu); } void __noreturn arch_cpu_idle_dead(void) { cpu_die(); } /* * Called from the idle thread for the CPU which has been shutdown. * * Note that we disable IRQs here, but do not re-enable them * before returning to the caller. This is also the behaviour * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ void __ref cpu_die(void) { idle_task_exit(); local_irq_disable(); __asm__ __volatile__( " movi a2, cpu_restart\n" " jx a2\n"); BUG(); } #endif /* CONFIG_HOTPLUG_CPU */ enum ipi_msg_type { IPI_RESCHEDULE = 0, IPI_CALL_FUNC, IPI_CPU_STOP, IPI_MAX }; static const struct { const char *short_text; const char *long_text; } ipi_text[] = { { .short_text = "RES", .long_text = "Rescheduling interrupts" }, { .short_text = "CAL", .long_text = "Function call interrupts" }, { .short_text = "DIE", .long_text = "CPU shutdown interrupts" }, }; struct ipi_data { unsigned long ipi_count[IPI_MAX]; }; static DEFINE_PER_CPU(struct ipi_data, ipi_data); static void send_ipi_message(const struct cpumask *callmask, enum ipi_msg_type msg_id) { int index; unsigned long mask = 0; for_each_cpu(index, callmask) mask |= 1 << index; set_er(mask, MIPISET(msg_id)); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_ipi_message(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); } void arch_smp_send_reschedule(int cpu) { send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } void smp_send_stop(void) { struct cpumask targets; cpumask_copy(&targets, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &targets); send_ipi_message(&targets, IPI_CPU_STOP); } static void ipi_cpu_stop(unsigned int cpu) { set_cpu_online(cpu, false); machine_halt(); } irqreturn_t ipi_interrupt(int irq, void *dev_id) { unsigned int cpu = smp_processor_id(); struct ipi_data *ipi = &per_cpu(ipi_data, cpu); for (;;) { unsigned int msg; msg = get_er(MIPICAUSE(cpu)); set_er(msg, MIPICAUSE(cpu)); if (!msg) break; if (msg & (1 << IPI_CALL_FUNC)) { ++ipi->ipi_count[IPI_CALL_FUNC]; generic_smp_call_function_interrupt(); } if (msg & (1 << IPI_RESCHEDULE)) { ++ipi->ipi_count[IPI_RESCHEDULE]; scheduler_ipi(); } if (msg & (1 << IPI_CPU_STOP)) { ++ipi->ipi_count[IPI_CPU_STOP]; ipi_cpu_stop(cpu); } } return IRQ_HANDLED; } void show_ipi_list(struct seq_file *p, int prec) { unsigned int cpu; unsigned i; for (i = 0; i < IPI_MAX; ++i) { seq_printf(p, "%*s:", prec, ipi_text[i].short_text); for_each_online_cpu(cpu) seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count[i]); seq_printf(p, " %s\n", ipi_text[i].long_text); } } int setup_profiling_timer(unsigned int multiplier) { pr_debug("setup_profiling_timer %d\n", multiplier); return 0; } /* TLB flush functions */ struct flush_data { struct vm_area_struct *vma; unsigned long addr1; unsigned long addr2; }; static void ipi_flush_tlb_all(void *arg) { local_flush_tlb_all(); } void flush_tlb_all(void) { on_each_cpu(ipi_flush_tlb_all, NULL, 1); } static void ipi_flush_tlb_mm(void *arg) { local_flush_tlb_mm(arg); } void flush_tlb_mm(struct mm_struct *mm) { on_each_cpu(ipi_flush_tlb_mm, mm, 1); } static void ipi_flush_tlb_page(void *arg) { struct flush_data *fd = arg; local_flush_tlb_page(fd->vma, fd->addr1); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { struct flush_data fd = { .vma = vma, .addr1 = addr, }; on_each_cpu(ipi_flush_tlb_page, &fd, 1); } static void ipi_flush_tlb_range(void *arg) { struct flush_data *fd = arg; local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct flush_data fd = { .vma = vma, .addr1 = start, .addr2 = end, }; on_each_cpu(ipi_flush_tlb_range, &fd, 1); } static void ipi_flush_tlb_kernel_range(void *arg) { struct flush_data *fd = arg; local_flush_tlb_kernel_range(fd->addr1, fd->addr2); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { struct flush_data fd = { .addr1 = start, .addr2 = end, }; on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1); } /* Cache flush functions */ static void ipi_flush_cache_all(void *arg) { local_flush_cache_all(); } void flush_cache_all(void) { on_each_cpu(ipi_flush_cache_all, NULL, 1); } static void ipi_flush_cache_page(void *arg) { struct flush_data *fd = arg; local_flush_cache_page(fd->vma, fd->addr1, fd->addr2); } void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) { struct flush_data fd = { .vma = vma, .addr1 = address, .addr2 = pfn, }; on_each_cpu(ipi_flush_cache_page, &fd, 1); } static void ipi_flush_cache_range(void *arg) { struct flush_data *fd = arg; local_flush_cache_range(fd->vma, fd->addr1, fd->addr2); } void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct flush_data fd = { .vma = vma, .addr1 = start, .addr2 = end, }; on_each_cpu(ipi_flush_cache_range, &fd, 1); } static void ipi_flush_icache_range(void *arg) { struct flush_data *fd = arg; local_flush_icache_range(fd->addr1, fd->addr2); } void flush_icache_range(unsigned long start, unsigned long end) { struct flush_data fd = { .addr1 = start, .addr2 = end, }; on_each_cpu(ipi_flush_icache_range, &fd, 1); } EXPORT_SYMBOL(flush_icache_range); /* ------------------------------------------------------------------------- */ static void ipi_invalidate_dcache_range(void *arg) { struct flush_data *fd = arg; __invalidate_dcache_range(fd->addr1, fd->addr2); } static void system_invalidate_dcache_range(unsigned long start, unsigned long size) { struct flush_data fd = { .addr1 = start, .addr2 = size, }; on_each_cpu(ipi_invalidate_dcache_range, &fd, 1); } static void ipi_flush_invalidate_dcache_range(void *arg) { struct flush_data *fd = arg; __flush_invalidate_dcache_range(fd->addr1, fd->addr2); } static void system_flush_invalidate_dcache_range(unsigned long start, unsigned long size) { struct flush_data fd = { .addr1 = start, .addr2 = size, }; on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1); }
linux-master
arch/xtensa/kernel/smp.c
/* * Xtensa hardware breakpoints/watchpoints handling functions * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2016 Cadence Design Systems Inc. */ #include <linux/hw_breakpoint.h> #include <linux/log2.h> #include <linux/percpu.h> #include <linux/perf_event.h> #include <asm/core.h> /* Breakpoint currently in use for each IBREAKA. */ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[XCHAL_NUM_IBREAK]); /* Watchpoint currently in use for each DBREAKA. */ static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[XCHAL_NUM_DBREAK]); int hw_breakpoint_slots(int type) { switch (type) { case TYPE_INST: return XCHAL_NUM_IBREAK; case TYPE_DATA: return XCHAL_NUM_DBREAK; default: pr_warn("unknown slot type: %d\n", type); return 0; } } int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) { unsigned int len; unsigned long va; va = hw->address; len = hw->len; return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); } /* * Construct an arch_hw_breakpoint from a perf_event. */ int hw_breakpoint_arch_parse(struct perf_event *bp, const struct perf_event_attr *attr, struct arch_hw_breakpoint *hw) { /* Type */ switch (attr->bp_type) { case HW_BREAKPOINT_X: hw->type = XTENSA_BREAKPOINT_EXECUTE; break; case HW_BREAKPOINT_R: hw->type = XTENSA_BREAKPOINT_LOAD; break; case HW_BREAKPOINT_W: hw->type = XTENSA_BREAKPOINT_STORE; break; case HW_BREAKPOINT_RW: hw->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE; break; default: return -EINVAL; } /* Len */ hw->len = attr->bp_len; if (hw->len < 1 || hw->len > 64 || !is_power_of_2(hw->len)) return -EINVAL; /* Address */ hw->address = attr->bp_addr; if (hw->address & (hw->len - 1)) return -EINVAL; return 0; } int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data) { return NOTIFY_DONE; } static void xtensa_wsr(unsigned long v, u8 sr) { /* We don't have indexed wsr and creating instruction dynamically * doesn't seem worth it given how small XCHAL_NUM_IBREAK and * XCHAL_NUM_DBREAK are. Thus the switch. In case build breaks here * the switch below needs to be extended. */ BUILD_BUG_ON(XCHAL_NUM_IBREAK > 2); BUILD_BUG_ON(XCHAL_NUM_DBREAK > 2); switch (sr) { #if XCHAL_NUM_IBREAK > 0 case SREG_IBREAKA + 0: xtensa_set_sr(v, SREG_IBREAKA + 0); break; #endif #if XCHAL_NUM_IBREAK > 1 case SREG_IBREAKA + 1: xtensa_set_sr(v, SREG_IBREAKA + 1); break; #endif #if XCHAL_NUM_DBREAK > 0 case SREG_DBREAKA + 0: xtensa_set_sr(v, SREG_DBREAKA + 0); break; case SREG_DBREAKC + 0: xtensa_set_sr(v, SREG_DBREAKC + 0); break; #endif #if XCHAL_NUM_DBREAK > 1 case SREG_DBREAKA + 1: xtensa_set_sr(v, SREG_DBREAKA + 1); break; case SREG_DBREAKC + 1: xtensa_set_sr(v, SREG_DBREAKC + 1); break; #endif } } static int alloc_slot(struct perf_event **slot, size_t n, struct perf_event *bp) { size_t i; for (i = 0; i < n; ++i) { if (!slot[i]) { slot[i] = bp; return i; } } return -EBUSY; } static void set_ibreak_regs(int reg, struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned long ibreakenable; xtensa_wsr(info->address, SREG_IBREAKA + reg); ibreakenable = xtensa_get_sr(SREG_IBREAKENABLE); xtensa_set_sr(ibreakenable | (1 << reg), SREG_IBREAKENABLE); } static void set_dbreak_regs(int reg, struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); unsigned long dbreakc = DBREAKC_MASK_MASK & -info->len; if (info->type & XTENSA_BREAKPOINT_LOAD) dbreakc |= DBREAKC_LOAD_MASK; if (info->type & XTENSA_BREAKPOINT_STORE) dbreakc |= DBREAKC_STOR_MASK; xtensa_wsr(info->address, SREG_DBREAKA + reg); xtensa_wsr(dbreakc, SREG_DBREAKC + reg); } int arch_install_hw_breakpoint(struct perf_event *bp) { int i; if (counter_arch_bp(bp)->type == XTENSA_BREAKPOINT_EXECUTE) { /* Breakpoint */ i = alloc_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp); if (i < 0) return i; set_ibreak_regs(i, bp); } else { /* Watchpoint */ i = alloc_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp); if (i < 0) return i; set_dbreak_regs(i, bp); } return 0; } static int free_slot(struct perf_event **slot, size_t n, struct perf_event *bp) { size_t i; for (i = 0; i < n; ++i) { if (slot[i] == bp) { slot[i] = NULL; return i; } } return -EBUSY; } void arch_uninstall_hw_breakpoint(struct perf_event *bp) { struct arch_hw_breakpoint *info = counter_arch_bp(bp); int i; if (info->type == XTENSA_BREAKPOINT_EXECUTE) { unsigned long ibreakenable; /* Breakpoint */ i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp); if (i >= 0) { ibreakenable = xtensa_get_sr(SREG_IBREAKENABLE); xtensa_set_sr(ibreakenable & ~(1 << i), SREG_IBREAKENABLE); } } else { /* Watchpoint */ i = free_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp); if (i >= 0) xtensa_wsr(0, SREG_DBREAKC + i); } } void hw_breakpoint_pmu_read(struct perf_event *bp) { } void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { int i; struct thread_struct *t = &tsk->thread; for (i = 0; i < XCHAL_NUM_IBREAK; ++i) { if (t->ptrace_bp[i]) { unregister_hw_breakpoint(t->ptrace_bp[i]); t->ptrace_bp[i] = NULL; } } for (i = 0; i < XCHAL_NUM_DBREAK; ++i) { if (t->ptrace_wp[i]) { unregister_hw_breakpoint(t->ptrace_wp[i]); t->ptrace_wp[i] = NULL; } } } /* * Set ptrace breakpoint pointers to zero for this task. * This is required in order to prevent child processes from unregistering * breakpoints held by their parent. */ void clear_ptrace_hw_breakpoint(struct task_struct *tsk) { memset(tsk->thread.ptrace_bp, 0, sizeof(tsk->thread.ptrace_bp)); memset(tsk->thread.ptrace_wp, 0, sizeof(tsk->thread.ptrace_wp)); } void restore_dbreak(void) { int i; for (i = 0; i < XCHAL_NUM_DBREAK; ++i) { struct perf_event *bp = this_cpu_ptr(wp_on_reg)[i]; if (bp) set_dbreak_regs(i, bp); } clear_thread_flag(TIF_DB_DISABLED); } int check_hw_breakpoint(struct pt_regs *regs) { if (regs->debugcause & BIT(DEBUGCAUSE_IBREAK_BIT)) { int i; struct perf_event **bp = this_cpu_ptr(bp_on_reg); for (i = 0; i < XCHAL_NUM_IBREAK; ++i) { if (bp[i] && !bp[i]->attr.disabled && regs->pc == bp[i]->attr.bp_addr) perf_bp_event(bp[i], regs); } return 0; } else if (regs->debugcause & BIT(DEBUGCAUSE_DBREAK_BIT)) { struct perf_event **bp = this_cpu_ptr(wp_on_reg); int dbnum = (regs->debugcause & DEBUGCAUSE_DBNUM_MASK) >> DEBUGCAUSE_DBNUM_SHIFT; if (dbnum < XCHAL_NUM_DBREAK && bp[dbnum]) { if (user_mode(regs)) { perf_bp_event(bp[dbnum], regs); } else { set_thread_flag(TIF_DB_DISABLED); xtensa_wsr(0, SREG_DBREAKC + dbnum); } } else { WARN_ONCE(1, "Wrong/unconfigured DBNUM reported in DEBUGCAUSE: %d\n", dbnum); } return 0; } return -ENOENT; }
linux-master
arch/xtensa/kernel/hw_breakpoint.c
// SPDX-License-Identifier: GPL-2.0-only /* * CRC vpmsum tester * Copyright 2017 Daniel Axtens, IBM Corporation. */ #include <linux/crc-t10dif.h> #include <linux/crc32.h> #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/random.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/cpufeature.h> #include <asm/switch_to.h> static unsigned long iterations = 10000; #define MAX_CRC_LENGTH 65535 static int __init crc_test_init(void) { u16 crc16 = 0, verify16 = 0; __le32 verify32le = 0; unsigned char *data; u32 verify32 = 0; unsigned long i; __le32 crc32; int ret; struct crypto_shash *crct10dif_tfm; struct crypto_shash *crc32c_tfm; if (!cpu_has_feature(CPU_FTR_ARCH_207S)) return -ENODEV; data = kmalloc(MAX_CRC_LENGTH, GFP_KERNEL); if (!data) return -ENOMEM; crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); if (IS_ERR(crct10dif_tfm)) { pr_err("Error allocating crc-t10dif\n"); goto free_buf; } crc32c_tfm = crypto_alloc_shash("crc32c", 0, 0); if (IS_ERR(crc32c_tfm)) { pr_err("Error allocating crc32c\n"); goto free_16; } do { SHASH_DESC_ON_STACK(crct10dif_shash, crct10dif_tfm); SHASH_DESC_ON_STACK(crc32c_shash, crc32c_tfm); crct10dif_shash->tfm = crct10dif_tfm; ret = crypto_shash_init(crct10dif_shash); if (ret) { pr_err("Error initing crc-t10dif\n"); goto free_32; } crc32c_shash->tfm = crc32c_tfm; ret = crypto_shash_init(crc32c_shash); if (ret) { pr_err("Error initing crc32c\n"); goto free_32; } pr_info("crc-vpmsum_test begins, %lu iterations\n", iterations); for (i=0; i<iterations; i++) { size_t offset = get_random_u32_below(16); size_t len = get_random_u32_below(MAX_CRC_LENGTH); if (len <= offset) continue; get_random_bytes(data, len); len -= offset; crypto_shash_update(crct10dif_shash, data+offset, len); crypto_shash_final(crct10dif_shash, (u8 *)(&crc16)); verify16 = crc_t10dif_generic(verify16, data+offset, len); if (crc16 != verify16) { pr_err("FAILURE in CRC16: got 0x%04x expected 0x%04x (len %lu)\n", crc16, verify16, len); break; } crypto_shash_update(crc32c_shash, data+offset, len); crypto_shash_final(crc32c_shash, (u8 *)(&crc32)); verify32 = le32_to_cpu(verify32le); verify32le = ~cpu_to_le32(__crc32c_le(~verify32, data+offset, len)); if (crc32 != verify32le) { pr_err("FAILURE in CRC32: got 0x%08x expected 0x%08x (len %lu)\n", crc32, verify32, len); break; } cond_resched(); } pr_info("crc-vpmsum_test done, completed %lu iterations\n", i); } while (0); free_32: crypto_free_shash(crc32c_tfm); free_16: crypto_free_shash(crct10dif_tfm); free_buf: kfree(data); return 0; } static void __exit crc_test_exit(void) {} module_init(crc_test_init); module_exit(crc_test_exit); module_param(iterations, long, 0400); MODULE_AUTHOR("Daniel Axtens <[email protected]>"); MODULE_DESCRIPTION("Vector polynomial multiply-sum CRC tester"); MODULE_LICENSE("GPL");
linux-master
arch/powerpc/crypto/crc-vpmsum_test.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Glue code for SHA-1 implementation for SPE instructions (PPC) * * Based on generic implementation. * * Copyright (c) 2015 Markus Stockhausen <[email protected]> */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> #include <asm/byteorder.h> #include <asm/switch_to.h> #include <linux/hardirq.h> /* * MAX_BYTES defines the number of bytes that are allowed to be processed * between preempt_disable() and preempt_enable(). SHA1 takes ~1000 * operations per 64 bytes. e500 cores can issue two arithmetic instructions * per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2). * Thus 2KB of input data will need an estimated maximum of 18,000 cycles. * Headroom for cache misses included. Even with the low end model clocked * at 667 MHz this equals to a critical time window of less than 27us. * */ #define MAX_BYTES 2048 extern void ppc_spe_sha1_transform(u32 *state, const u8 *src, u32 blocks); static void spe_begin(void) { /* We just start SPE operations and will save SPE registers later. */ preempt_disable(); enable_kernel_spe(); } static void spe_end(void) { disable_kernel_spe(); /* reenable preemption */ preempt_enable(); } static inline void ppc_sha1_clear_context(struct sha1_state *sctx) { int count = sizeof(struct sha1_state) >> 2; u32 *ptr = (u32 *)sctx; /* make sure we can clear the fast way */ BUILD_BUG_ON(sizeof(struct sha1_state) % 4); do { *ptr++ = 0; } while (--count); } static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha1_state *sctx = shash_desc_ctx(desc); const unsigned int offset = sctx->count & 0x3f; const unsigned int avail = 64 - offset; unsigned int bytes; const u8 *src = data; if (avail > len) { sctx->count += len; memcpy((char *)sctx->buffer + offset, src, len); return 0; } sctx->count += len; if (offset) { memcpy((char *)sctx->buffer + offset, src, avail); spe_begin(); ppc_spe_sha1_transform(sctx->state, (const u8 *)sctx->buffer, 1); spe_end(); len -= avail; src += avail; } while (len > 63) { bytes = (len > MAX_BYTES) ? MAX_BYTES : len; bytes = bytes & ~0x3f; spe_begin(); ppc_spe_sha1_transform(sctx->state, src, bytes >> 6); spe_end(); src += bytes; len -= bytes; } memcpy((char *)sctx->buffer, src, len); return 0; } static int ppc_spe_sha1_final(struct shash_desc *desc, u8 *out) { struct sha1_state *sctx = shash_desc_ctx(desc); const unsigned int offset = sctx->count & 0x3f; char *p = (char *)sctx->buffer + offset; int padlen; __be64 *pbits = (__be64 *)(((char *)&sctx->buffer) + 56); __be32 *dst = (__be32 *)out; padlen = 55 - offset; *p++ = 0x80; spe_begin(); if (padlen < 0) { memset(p, 0x00, padlen + sizeof (u64)); ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1); p = (char *)sctx->buffer; padlen = 56; } memset(p, 0, padlen); *pbits = cpu_to_be64(sctx->count << 3); ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1); spe_end(); dst[0] = cpu_to_be32(sctx->state[0]); dst[1] = cpu_to_be32(sctx->state[1]); dst[2] = cpu_to_be32(sctx->state[2]); dst[3] = cpu_to_be32(sctx->state[3]); dst[4] = cpu_to_be32(sctx->state[4]); ppc_sha1_clear_context(sctx); return 0; } static int ppc_spe_sha1_export(struct shash_desc *desc, void *out) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int ppc_spe_sha1_import(struct shash_desc *desc, const void *in) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = ppc_spe_sha1_update, .final = ppc_spe_sha1_final, .export = ppc_spe_sha1_export, .import = ppc_spe_sha1_import, .descsize = sizeof(struct sha1_state), .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name= "sha1-ppc-spe", .cra_priority = 300, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init ppc_spe_sha1_mod_init(void) { return crypto_register_shash(&alg); } static void __exit ppc_spe_sha1_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(ppc_spe_sha1_mod_init); module_exit(ppc_spe_sha1_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, SPE optimized"); MODULE_ALIAS_CRYPTO("sha1"); MODULE_ALIAS_CRYPTO("sha1-ppc-spe");
linux-master
arch/powerpc/crypto/sha1-spe-glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Glue code for SHA-256 implementation for SPE instructions (PPC) * * Based on generic implementation. The assembler module takes care * about the SPE registers so it can run from interrupt context. * * Copyright (c) 2015 Markus Stockhausen <[email protected]> */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/sha2.h> #include <crypto/sha256_base.h> #include <asm/byteorder.h> #include <asm/switch_to.h> #include <linux/hardirq.h> /* * MAX_BYTES defines the number of bytes that are allowed to be processed * between preempt_disable() and preempt_enable(). SHA256 takes ~2,000 * operations per 64 bytes. e500 cores can issue two arithmetic instructions * per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2). * Thus 1KB of input data will need an estimated maximum of 18,000 cycles. * Headroom for cache misses included. Even with the low end model clocked * at 667 MHz this equals to a critical time window of less than 27us. * */ #define MAX_BYTES 1024 extern void ppc_spe_sha256_transform(u32 *state, const u8 *src, u32 blocks); static void spe_begin(void) { /* We just start SPE operations and will save SPE registers later. */ preempt_disable(); enable_kernel_spe(); } static void spe_end(void) { disable_kernel_spe(); /* reenable preemption */ preempt_enable(); } static inline void ppc_sha256_clear_context(struct sha256_state *sctx) { int count = sizeof(struct sha256_state) >> 2; u32 *ptr = (u32 *)sctx; /* make sure we can clear the fast way */ BUILD_BUG_ON(sizeof(struct sha256_state) % 4); do { *ptr++ = 0; } while (--count); } static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha256_state *sctx = shash_desc_ctx(desc); const unsigned int offset = sctx->count & 0x3f; const unsigned int avail = 64 - offset; unsigned int bytes; const u8 *src = data; if (avail > len) { sctx->count += len; memcpy((char *)sctx->buf + offset, src, len); return 0; } sctx->count += len; if (offset) { memcpy((char *)sctx->buf + offset, src, avail); spe_begin(); ppc_spe_sha256_transform(sctx->state, (const u8 *)sctx->buf, 1); spe_end(); len -= avail; src += avail; } while (len > 63) { /* cut input data into smaller blocks */ bytes = (len > MAX_BYTES) ? MAX_BYTES : len; bytes = bytes & ~0x3f; spe_begin(); ppc_spe_sha256_transform(sctx->state, src, bytes >> 6); spe_end(); src += bytes; len -= bytes; } memcpy((char *)sctx->buf, src, len); return 0; } static int ppc_spe_sha256_final(struct shash_desc *desc, u8 *out) { struct sha256_state *sctx = shash_desc_ctx(desc); const unsigned int offset = sctx->count & 0x3f; char *p = (char *)sctx->buf + offset; int padlen; __be64 *pbits = (__be64 *)(((char *)&sctx->buf) + 56); __be32 *dst = (__be32 *)out; padlen = 55 - offset; *p++ = 0x80; spe_begin(); if (padlen < 0) { memset(p, 0x00, padlen + sizeof (u64)); ppc_spe_sha256_transform(sctx->state, sctx->buf, 1); p = (char *)sctx->buf; padlen = 56; } memset(p, 0, padlen); *pbits = cpu_to_be64(sctx->count << 3); ppc_spe_sha256_transform(sctx->state, sctx->buf, 1); spe_end(); dst[0] = cpu_to_be32(sctx->state[0]); dst[1] = cpu_to_be32(sctx->state[1]); dst[2] = cpu_to_be32(sctx->state[2]); dst[3] = cpu_to_be32(sctx->state[3]); dst[4] = cpu_to_be32(sctx->state[4]); dst[5] = cpu_to_be32(sctx->state[5]); dst[6] = cpu_to_be32(sctx->state[6]); dst[7] = cpu_to_be32(sctx->state[7]); ppc_sha256_clear_context(sctx); return 0; } static int ppc_spe_sha224_final(struct shash_desc *desc, u8 *out) { __be32 D[SHA256_DIGEST_SIZE >> 2]; __be32 *dst = (__be32 *)out; ppc_spe_sha256_final(desc, (u8 *)D); /* avoid bytewise memcpy */ dst[0] = D[0]; dst[1] = D[1]; dst[2] = D[2]; dst[3] = D[3]; dst[4] = D[4]; dst[5] = D[5]; dst[6] = D[6]; /* clear sensitive data */ memzero_explicit(D, SHA256_DIGEST_SIZE); return 0; } static int ppc_spe_sha256_export(struct shash_desc *desc, void *out) { struct sha256_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int ppc_spe_sha256_import(struct shash_desc *desc, const void *in) { struct sha256_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg algs[2] = { { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_base_init, .update = ppc_spe_sha256_update, .final = ppc_spe_sha256_final, .export = ppc_spe_sha256_export, .import = ppc_spe_sha256_import, .descsize = sizeof(struct sha256_state), .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name= "sha256-ppc-spe", .cra_priority = 300, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } }, { .digestsize = SHA224_DIGEST_SIZE, .init = sha224_base_init, .update = ppc_spe_sha256_update, .final = ppc_spe_sha224_final, .export = ppc_spe_sha256_export, .import = ppc_spe_sha256_import, .descsize = sizeof(struct sha256_state), .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name= "sha224-ppc-spe", .cra_priority = 300, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_module = THIS_MODULE, } } }; static int __init ppc_spe_sha256_mod_init(void) { return crypto_register_shashes(algs, ARRAY_SIZE(algs)); } static void __exit ppc_spe_sha256_mod_fini(void) { crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } module_init(ppc_spe_sha256_mod_init); module_exit(ppc_spe_sha256_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, SPE optimized"); MODULE_ALIAS_CRYPTO("sha224"); MODULE_ALIAS_CRYPTO("sha224-ppc-spe"); MODULE_ALIAS_CRYPTO("sha256"); MODULE_ALIAS_CRYPTO("sha256-ppc-spe");
linux-master
arch/powerpc/crypto/sha256-spe-glue.c
// SPDX-License-Identifier: GPL-2.0 /* * Poly1305 authenticator algorithm, RFC7539. * * Copyright 2023- IBM Corp. All rights reserved. */ #include <crypto/algapi.h> #include <linux/crypto.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/jump_label.h> #include <crypto/internal/hash.h> #include <crypto/internal/poly1305.h> #include <crypto/internal/simd.h> #include <linux/cpufeature.h> #include <asm/unaligned.h> #include <asm/simd.h> #include <asm/switch_to.h> asmlinkage void poly1305_p10le_4blocks(void *h, const u8 *m, u32 mlen); asmlinkage void poly1305_64s(void *h, const u8 *m, u32 mlen, int highbit); asmlinkage void poly1305_emit_64(void *h, void *s, u8 *dst); static void vsx_begin(void) { preempt_disable(); enable_kernel_vsx(); } static void vsx_end(void) { disable_kernel_vsx(); preempt_enable(); } static int crypto_poly1305_p10_init(struct shash_desc *desc) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); poly1305_core_init(&dctx->h); dctx->buflen = 0; dctx->rset = 0; dctx->sset = false; return 0; } static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx, const u8 *inp, unsigned int len) { unsigned int acc = 0; if (unlikely(!dctx->sset)) { if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) { struct poly1305_core_key *key = &dctx->core_r; key->key.r64[0] = get_unaligned_le64(&inp[0]); key->key.r64[1] = get_unaligned_le64(&inp[8]); inp += POLY1305_BLOCK_SIZE; len -= POLY1305_BLOCK_SIZE; acc += POLY1305_BLOCK_SIZE; dctx->rset = 1; } if (len >= POLY1305_BLOCK_SIZE) { dctx->s[0] = get_unaligned_le32(&inp[0]); dctx->s[1] = get_unaligned_le32(&inp[4]); dctx->s[2] = get_unaligned_le32(&inp[8]); dctx->s[3] = get_unaligned_le32(&inp[12]); acc += POLY1305_BLOCK_SIZE; dctx->sset = true; } } return acc; } static int crypto_poly1305_p10_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); unsigned int bytes, used; if (unlikely(dctx->buflen)) { bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); memcpy(dctx->buf + dctx->buflen, src, bytes); src += bytes; srclen -= bytes; dctx->buflen += bytes; if (dctx->buflen == POLY1305_BLOCK_SIZE) { if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf, POLY1305_BLOCK_SIZE))) { vsx_begin(); poly1305_64s(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1); vsx_end(); } dctx->buflen = 0; } } if (likely(srclen >= POLY1305_BLOCK_SIZE)) { bytes = round_down(srclen, POLY1305_BLOCK_SIZE); used = crypto_poly1305_setdctxkey(dctx, src, bytes); if (likely(used)) { srclen -= used; src += used; } if (crypto_simd_usable() && (srclen >= POLY1305_BLOCK_SIZE*4)) { vsx_begin(); poly1305_p10le_4blocks(&dctx->h, src, srclen); vsx_end(); src += srclen - (srclen % (POLY1305_BLOCK_SIZE * 4)); srclen %= POLY1305_BLOCK_SIZE * 4; } while (srclen >= POLY1305_BLOCK_SIZE) { vsx_begin(); poly1305_64s(&dctx->h, src, POLY1305_BLOCK_SIZE, 1); vsx_end(); srclen -= POLY1305_BLOCK_SIZE; src += POLY1305_BLOCK_SIZE; } } if (unlikely(srclen)) { dctx->buflen = srclen; memcpy(dctx->buf, src, srclen); } return 0; } static int crypto_poly1305_p10_final(struct shash_desc *desc, u8 *dst) { struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); if (unlikely(!dctx->sset)) return -ENOKEY; if ((dctx->buflen)) { dctx->buf[dctx->buflen++] = 1; memset(dctx->buf + dctx->buflen, 0, POLY1305_BLOCK_SIZE - dctx->buflen); vsx_begin(); poly1305_64s(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); vsx_end(); dctx->buflen = 0; } poly1305_emit_64(&dctx->h, &dctx->s, dst); return 0; } static struct shash_alg poly1305_alg = { .digestsize = POLY1305_DIGEST_SIZE, .init = crypto_poly1305_p10_init, .update = crypto_poly1305_p10_update, .final = crypto_poly1305_p10_final, .descsize = sizeof(struct poly1305_desc_ctx), .base = { .cra_name = "poly1305", .cra_driver_name = "poly1305-p10", .cra_priority = 300, .cra_blocksize = POLY1305_BLOCK_SIZE, .cra_module = THIS_MODULE, }, }; static int __init poly1305_p10_init(void) { return crypto_register_shash(&poly1305_alg); } static void __exit poly1305_p10_exit(void) { crypto_unregister_shash(&poly1305_alg); } module_cpu_feature_match(PPC_MODULE_FEATURE_P10, poly1305_p10_init); module_exit(poly1305_p10_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Danny Tsen <[email protected]>"); MODULE_DESCRIPTION("Optimized Poly1305 for P10"); MODULE_ALIAS_CRYPTO("poly1305"); MODULE_ALIAS_CRYPTO("poly1305-p10");
linux-master
arch/powerpc/crypto/poly1305-p10-glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Glue code for AES implementation for SPE instructions (PPC) * * Based on generic implementation. The assembler module takes care * about the SPE registers so it can run from interrupt context. * * Copyright (c) 2015 Markus Stockhausen <[email protected]> */ #include <crypto/aes.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/crypto.h> #include <asm/byteorder.h> #include <asm/switch_to.h> #include <crypto/algapi.h> #include <crypto/internal/skcipher.h> #include <crypto/xts.h> #include <crypto/gf128mul.h> #include <crypto/scatterwalk.h> /* * MAX_BYTES defines the number of bytes that are allowed to be processed * between preempt_disable() and preempt_enable(). e500 cores can issue two * instructions per clock cycle using one 32/64 bit unit (SU1) and one 32 * bit unit (SU2). One of these can be a memory access that is executed via * a single load and store unit (LSU). XTS-AES-256 takes ~780 operations per * 16 byte block or 25 cycles per byte. Thus 768 bytes of input data * will need an estimated maximum of 20,000 cycles. Headroom for cache misses * included. Even with the low end model clocked at 667 MHz this equals to a * critical time window of less than 30us. The value has been chosen to * process a 512 byte disk block in one or a large 1400 bytes IPsec network * packet in two runs. * */ #define MAX_BYTES 768 struct ppc_aes_ctx { u32 key_enc[AES_MAX_KEYLENGTH_U32]; u32 key_dec[AES_MAX_KEYLENGTH_U32]; u32 rounds; }; struct ppc_xts_ctx { u32 key_enc[AES_MAX_KEYLENGTH_U32]; u32 key_dec[AES_MAX_KEYLENGTH_U32]; u32 key_twk[AES_MAX_KEYLENGTH_U32]; u32 rounds; }; extern void ppc_encrypt_aes(u8 *out, const u8 *in, u32 *key_enc, u32 rounds); extern void ppc_decrypt_aes(u8 *out, const u8 *in, u32 *key_dec, u32 rounds); extern void ppc_encrypt_ecb(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes); extern void ppc_decrypt_ecb(u8 *out, const u8 *in, u32 *key_dec, u32 rounds, u32 bytes); extern void ppc_encrypt_cbc(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes, u8 *iv); extern void ppc_decrypt_cbc(u8 *out, const u8 *in, u32 *key_dec, u32 rounds, u32 bytes, u8 *iv); extern void ppc_crypt_ctr (u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes, u8 *iv); extern void ppc_encrypt_xts(u8 *out, const u8 *in, u32 *key_enc, u32 rounds, u32 bytes, u8 *iv, u32 *key_twk); extern void ppc_decrypt_xts(u8 *out, const u8 *in, u32 *key_dec, u32 rounds, u32 bytes, u8 *iv, u32 *key_twk); extern void ppc_expand_key_128(u32 *key_enc, const u8 *key); extern void ppc_expand_key_192(u32 *key_enc, const u8 *key); extern void ppc_expand_key_256(u32 *key_enc, const u8 *key); extern void ppc_generate_decrypt_key(u32 *key_dec,u32 *key_enc, unsigned int key_len); static void spe_begin(void) { /* disable preemption and save users SPE registers if required */ preempt_disable(); enable_kernel_spe(); } static void spe_end(void) { disable_kernel_spe(); /* reenable preemption */ preempt_enable(); } static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm); switch (key_len) { case AES_KEYSIZE_128: ctx->rounds = 4; ppc_expand_key_128(ctx->key_enc, in_key); break; case AES_KEYSIZE_192: ctx->rounds = 5; ppc_expand_key_192(ctx->key_enc, in_key); break; case AES_KEYSIZE_256: ctx->rounds = 6; ppc_expand_key_256(ctx->key_enc, in_key); break; default: return -EINVAL; } ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len); return 0; } static int ppc_aes_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { return ppc_aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len); } static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int err; err = xts_verify_key(tfm, in_key, key_len); if (err) return err; key_len >>= 1; switch (key_len) { case AES_KEYSIZE_128: ctx->rounds = 4; ppc_expand_key_128(ctx->key_enc, in_key); ppc_expand_key_128(ctx->key_twk, in_key + AES_KEYSIZE_128); break; case AES_KEYSIZE_192: ctx->rounds = 5; ppc_expand_key_192(ctx->key_enc, in_key); ppc_expand_key_192(ctx->key_twk, in_key + AES_KEYSIZE_192); break; case AES_KEYSIZE_256: ctx->rounds = 6; ppc_expand_key_256(ctx->key_enc, in_key); ppc_expand_key_256(ctx->key_twk, in_key + AES_KEYSIZE_256); break; default: return -EINVAL; } ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len); return 0; } static void ppc_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm); spe_begin(); ppc_encrypt_aes(out, in, ctx->key_enc, ctx->rounds); spe_end(); } static void ppc_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm); spe_begin(); ppc_decrypt_aes(out, in, ctx->key_dec, ctx->rounds); spe_end(); } static int ppc_ecb_crypt(struct skcipher_request *req, bool enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes) != 0) { nbytes = min_t(unsigned int, nbytes, MAX_BYTES); nbytes = round_down(nbytes, AES_BLOCK_SIZE); spe_begin(); if (enc) ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, ctx->key_enc, ctx->rounds, nbytes); else ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, ctx->key_dec, ctx->rounds, nbytes); spe_end(); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } static int ppc_ecb_encrypt(struct skcipher_request *req) { return ppc_ecb_crypt(req, true); } static int ppc_ecb_decrypt(struct skcipher_request *req) { return ppc_ecb_crypt(req, false); } static int ppc_cbc_crypt(struct skcipher_request *req, bool enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes) != 0) { nbytes = min_t(unsigned int, nbytes, MAX_BYTES); nbytes = round_down(nbytes, AES_BLOCK_SIZE); spe_begin(); if (enc) ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr, ctx->key_enc, ctx->rounds, nbytes, walk.iv); else ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr, ctx->key_dec, ctx->rounds, nbytes, walk.iv); spe_end(); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } static int ppc_cbc_encrypt(struct skcipher_request *req) { return ppc_cbc_crypt(req, true); } static int ppc_cbc_decrypt(struct skcipher_request *req) { return ppc_cbc_crypt(req, false); } static int ppc_ctr_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes) != 0) { nbytes = min_t(unsigned int, nbytes, MAX_BYTES); if (nbytes < walk.total) nbytes = round_down(nbytes, AES_BLOCK_SIZE); spe_begin(); ppc_crypt_ctr(walk.dst.virt.addr, walk.src.virt.addr, ctx->key_enc, ctx->rounds, nbytes, walk.iv); spe_end(); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } static int ppc_xts_crypt(struct skcipher_request *req, bool enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; u32 *twk; err = skcipher_walk_virt(&walk, req, false); twk = ctx->key_twk; while ((nbytes = walk.nbytes) != 0) { nbytes = min_t(unsigned int, nbytes, MAX_BYTES); nbytes = round_down(nbytes, AES_BLOCK_SIZE); spe_begin(); if (enc) ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr, ctx->key_enc, ctx->rounds, nbytes, walk.iv, twk); else ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr, ctx->key_dec, ctx->rounds, nbytes, walk.iv, twk); spe_end(); twk = NULL; err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } static int ppc_xts_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int tail = req->cryptlen % AES_BLOCK_SIZE; int offset = req->cryptlen - tail - AES_BLOCK_SIZE; struct skcipher_request subreq; u8 b[2][AES_BLOCK_SIZE]; int err; if (req->cryptlen < AES_BLOCK_SIZE) return -EINVAL; if (tail) { subreq = *req; skcipher_request_set_crypt(&subreq, req->src, req->dst, req->cryptlen - tail, req->iv); req = &subreq; } err = ppc_xts_crypt(req, true); if (err || !tail) return err; scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE, 0); memcpy(b[1], b[0], tail); scatterwalk_map_and_copy(b[0], req->src, offset + AES_BLOCK_SIZE, tail, 0); spe_begin(); ppc_encrypt_xts(b[0], b[0], ctx->key_enc, ctx->rounds, AES_BLOCK_SIZE, req->iv, NULL); spe_end(); scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE + tail, 1); return 0; } static int ppc_xts_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int tail = req->cryptlen % AES_BLOCK_SIZE; int offset = req->cryptlen - tail - AES_BLOCK_SIZE; struct skcipher_request subreq; u8 b[3][AES_BLOCK_SIZE]; le128 twk; int err; if (req->cryptlen < AES_BLOCK_SIZE) return -EINVAL; if (tail) { subreq = *req; skcipher_request_set_crypt(&subreq, req->src, req->dst, offset, req->iv); req = &subreq; } err = ppc_xts_crypt(req, false); if (err || !tail) return err; scatterwalk_map_and_copy(b[1], req->src, offset, AES_BLOCK_SIZE + tail, 0); spe_begin(); if (!offset) ppc_encrypt_ecb(req->iv, req->iv, ctx->key_twk, ctx->rounds, AES_BLOCK_SIZE); gf128mul_x_ble(&twk, (le128 *)req->iv); ppc_decrypt_xts(b[1], b[1], ctx->key_dec, ctx->rounds, AES_BLOCK_SIZE, (u8 *)&twk, NULL); memcpy(b[0], b[2], tail); memcpy(b[0] + tail, b[1] + tail, AES_BLOCK_SIZE - tail); ppc_decrypt_xts(b[0], b[0], ctx->key_dec, ctx->rounds, AES_BLOCK_SIZE, req->iv, NULL); spe_end(); scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE + tail, 1); return 0; } /* * Algorithm definitions. Disabling alignment (cra_alignmask=0) was chosen * because the e500 platform can handle unaligned reads/writes very efficiently. * This improves IPsec thoughput by another few percent. Additionally we assume * that AES context is always aligned to at least 8 bytes because it is created * with kmalloc() in the crypto infrastructure */ static struct crypto_alg aes_cipher_alg = { .cra_name = "aes", .cra_driver_name = "aes-ppc-spe", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct ppc_aes_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = ppc_aes_setkey, .cia_encrypt = ppc_aes_encrypt, .cia_decrypt = ppc_aes_decrypt } } }; static struct skcipher_alg aes_skcipher_algs[] = { { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-ppc-spe", .base.cra_priority = 300, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct ppc_aes_ctx), .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = ppc_aes_setkey_skcipher, .encrypt = ppc_ecb_encrypt, .decrypt = ppc_ecb_decrypt, }, { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-ppc-spe", .base.cra_priority = 300, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct ppc_aes_ctx), .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ppc_aes_setkey_skcipher, .encrypt = ppc_cbc_encrypt, .decrypt = ppc_cbc_decrypt, }, { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "ctr-ppc-spe", .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct ppc_aes_ctx), .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = ppc_aes_setkey_skcipher, .encrypt = ppc_ctr_crypt, .decrypt = ppc_ctr_crypt, .chunksize = AES_BLOCK_SIZE, }, { .base.cra_name = "xts(aes)", .base.cra_driver_name = "xts-ppc-spe", .base.cra_priority = 300, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct ppc_xts_ctx), .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE * 2, .max_keysize = AES_MAX_KEY_SIZE * 2, .ivsize = AES_BLOCK_SIZE, .setkey = ppc_xts_setkey, .encrypt = ppc_xts_encrypt, .decrypt = ppc_xts_decrypt, } }; static int __init ppc_aes_mod_init(void) { int err; err = crypto_register_alg(&aes_cipher_alg); if (err) return err; err = crypto_register_skciphers(aes_skcipher_algs, ARRAY_SIZE(aes_skcipher_algs)); if (err) crypto_unregister_alg(&aes_cipher_alg); return err; } static void __exit ppc_aes_mod_fini(void) { crypto_unregister_alg(&aes_cipher_alg); crypto_unregister_skciphers(aes_skcipher_algs, ARRAY_SIZE(aes_skcipher_algs)); } module_init(ppc_aes_mod_init); module_exit(ppc_aes_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS, SPE optimized"); MODULE_ALIAS_CRYPTO("aes"); MODULE_ALIAS_CRYPTO("ecb(aes)"); MODULE_ALIAS_CRYPTO("cbc(aes)"); MODULE_ALIAS_CRYPTO("ctr(aes)"); MODULE_ALIAS_CRYPTO("xts(aes)"); MODULE_ALIAS_CRYPTO("aes-ppc-spe");
linux-master
arch/powerpc/crypto/aes-spe-glue.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/crc32.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/cpufeature.h> #include <asm/simd.h> #include <asm/switch_to.h> #define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_DIGEST_SIZE 4 #define VMX_ALIGN 16 #define VMX_ALIGN_MASK (VMX_ALIGN-1) #define VECTOR_BREAKPOINT 512 u32 __crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len); static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len) { unsigned int prealign; unsigned int tail; if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable()) return __crc32c_le(crc, p, len); if ((unsigned long)p & VMX_ALIGN_MASK) { prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); crc = __crc32c_le(crc, p, prealign); len -= prealign; p += prealign; } if (len & ~VMX_ALIGN_MASK) { preempt_disable(); pagefault_disable(); enable_kernel_altivec(); crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); disable_kernel_altivec(); pagefault_enable(); preempt_enable(); } tail = len & VMX_ALIGN_MASK; if (tail) { p += len & ~VMX_ALIGN_MASK; crc = __crc32c_le(crc, p, tail); } return crc; } static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm) { u32 *key = crypto_tfm_ctx(tfm); *key = ~0; return 0; } /* * Setting the seed allows arbitrary accumulators and flexible XOR policy * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ static int crc32c_vpmsum_setkey(struct crypto_shash *hash, const u8 *key, unsigned int keylen) { u32 *mctx = crypto_shash_ctx(hash); if (keylen != sizeof(u32)) return -EINVAL; *mctx = le32_to_cpup((__le32 *)key); return 0; } static int crc32c_vpmsum_init(struct shash_desc *desc) { u32 *mctx = crypto_shash_ctx(desc->tfm); u32 *crcp = shash_desc_ctx(desc); *crcp = *mctx; return 0; } static int crc32c_vpmsum_update(struct shash_desc *desc, const u8 *data, unsigned int len) { u32 *crcp = shash_desc_ctx(desc); *crcp = crc32c_vpmsum(*crcp, data, len); return 0; } static int __crc32c_vpmsum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { *(__le32 *)out = ~cpu_to_le32(crc32c_vpmsum(*crcp, data, len)); return 0; } static int crc32c_vpmsum_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_vpmsum_finup(shash_desc_ctx(desc), data, len, out); } static int crc32c_vpmsum_final(struct shash_desc *desc, u8 *out) { u32 *crcp = shash_desc_ctx(desc); *(__le32 *)out = ~cpu_to_le32p(crcp); return 0; } static int crc32c_vpmsum_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_vpmsum_finup(crypto_shash_ctx(desc->tfm), data, len, out); } static struct shash_alg alg = { .setkey = crc32c_vpmsum_setkey, .init = crc32c_vpmsum_init, .update = crc32c_vpmsum_update, .final = crc32c_vpmsum_final, .finup = crc32c_vpmsum_finup, .digest = crc32c_vpmsum_digest, .descsize = sizeof(u32), .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32c", .cra_driver_name = "crc32c-vpmsum", .cra_priority = 200, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_module = THIS_MODULE, .cra_init = crc32c_vpmsum_cra_init, } }; static int __init crc32c_vpmsum_mod_init(void) { if (!cpu_has_feature(CPU_FTR_ARCH_207S)) return -ENODEV; return crypto_register_shash(&alg); } static void __exit crc32c_vpmsum_mod_fini(void) { crypto_unregister_shash(&alg); } module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crc32c_vpmsum_mod_init); module_exit(crc32c_vpmsum_mod_fini); MODULE_AUTHOR("Anton Blanchard <[email protected]>"); MODULE_DESCRIPTION("CRC32C using vector polynomial multiply-sum instructions"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("crc32c"); MODULE_ALIAS_CRYPTO("crc32c-vpmsum");
linux-master
arch/powerpc/crypto/crc32c-vpmsum_glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * * powerpc implementation of the SHA1 Secure Hash Algorithm. * * Derived from cryptoapi implementation, adapted for in-place * scatterlist interface. * * Derived from "crypto/sha1.c" * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <[email protected]> * Copyright (c) Jean-Francois Dive <[email protected]> */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> #include <asm/byteorder.h> void powerpc_sha_transform(u32 *state, const u8 *src); static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int partial, done; const u8 *src; partial = sctx->count & 0x3f; sctx->count += len; done = 0; src = data; if ((partial + len) > 63) { if (partial) { done = -partial; memcpy(sctx->buffer + partial, data, done + 64); src = sctx->buffer; } do { powerpc_sha_transform(sctx->state, src); done += 64; src = data + done; } while (done + 63 < len); partial = 0; } memcpy(sctx->buffer + partial, src, len - done); return 0; } /* Add padding and return the message digest. */ static int powerpc_sha1_final(struct shash_desc *desc, u8 *out) { struct sha1_state *sctx = shash_desc_ctx(desc); __be32 *dst = (__be32 *)out; u32 i, index, padlen; __be64 bits; static const u8 padding[64] = { 0x80, }; bits = cpu_to_be64(sctx->count << 3); /* Pad out to 56 mod 64 */ index = sctx->count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); powerpc_sha1_update(desc, padding, padlen); /* Append length */ powerpc_sha1_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 5; i++) dst[i] = cpu_to_be32(sctx->state[i]); /* Wipe context */ memset(sctx, 0, sizeof *sctx); return 0; } static int powerpc_sha1_export(struct shash_desc *desc, void *out) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int powerpc_sha1_import(struct shash_desc *desc, const void *in) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = powerpc_sha1_update, .final = powerpc_sha1_final, .export = powerpc_sha1_export, .import = powerpc_sha1_import, .descsize = sizeof(struct sha1_state), .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name= "sha1-powerpc", .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init sha1_powerpc_mod_init(void) { return crypto_register_shash(&alg); } static void __exit sha1_powerpc_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(sha1_powerpc_mod_init); module_exit(sha1_powerpc_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); MODULE_ALIAS_CRYPTO("sha1"); MODULE_ALIAS_CRYPTO("sha1-powerpc");
linux-master
arch/powerpc/crypto/sha1.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Calculate a CRC T10-DIF with vpmsum acceleration * * Copyright 2017, Daniel Axtens, IBM Corporation. * [based on crc32c-vpmsum_glue.c] */ #include <linux/crc-t10dif.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/cpufeature.h> #include <asm/simd.h> #include <asm/switch_to.h> #define VMX_ALIGN 16 #define VMX_ALIGN_MASK (VMX_ALIGN-1) #define VECTOR_BREAKPOINT 64 u32 __crct10dif_vpmsum(u32 crc, unsigned char const *p, size_t len); static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len) { unsigned int prealign; unsigned int tail; u32 crc = crci; if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable()) return crc_t10dif_generic(crc, p, len); if ((unsigned long)p & VMX_ALIGN_MASK) { prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK); crc = crc_t10dif_generic(crc, p, prealign); len -= prealign; p += prealign; } if (len & ~VMX_ALIGN_MASK) { crc <<= 16; preempt_disable(); pagefault_disable(); enable_kernel_altivec(); crc = __crct10dif_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); disable_kernel_altivec(); pagefault_enable(); preempt_enable(); crc >>= 16; } tail = len & VMX_ALIGN_MASK; if (tail) { p += len & ~VMX_ALIGN_MASK; crc = crc_t10dif_generic(crc, p, tail); } return crc & 0xffff; } static int crct10dif_vpmsum_init(struct shash_desc *desc) { u16 *crc = shash_desc_ctx(desc); *crc = 0; return 0; } static int crct10dif_vpmsum_update(struct shash_desc *desc, const u8 *data, unsigned int length) { u16 *crc = shash_desc_ctx(desc); *crc = crct10dif_vpmsum(*crc, data, length); return 0; } static int crct10dif_vpmsum_final(struct shash_desc *desc, u8 *out) { u16 *crcp = shash_desc_ctx(desc); *(u16 *)out = *crcp; return 0; } static struct shash_alg alg = { .init = crct10dif_vpmsum_init, .update = crct10dif_vpmsum_update, .final = crct10dif_vpmsum_final, .descsize = CRC_T10DIF_DIGEST_SIZE, .digestsize = CRC_T10DIF_DIGEST_SIZE, .base = { .cra_name = "crct10dif", .cra_driver_name = "crct10dif-vpmsum", .cra_priority = 200, .cra_blocksize = CRC_T10DIF_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init crct10dif_vpmsum_mod_init(void) { if (!cpu_has_feature(CPU_FTR_ARCH_207S)) return -ENODEV; return crypto_register_shash(&alg); } static void __exit crct10dif_vpmsum_mod_fini(void) { crypto_unregister_shash(&alg); } module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, crct10dif_vpmsum_mod_init); module_exit(crct10dif_vpmsum_mod_fini); MODULE_AUTHOR("Daniel Axtens <[email protected]>"); MODULE_DESCRIPTION("CRCT10DIF using vector polynomial multiply-sum instructions"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("crct10dif"); MODULE_ALIAS_CRYPTO("crct10dif-vpmsum");
linux-master
arch/powerpc/crypto/crct10dif-vpmsum_glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerPC P10 (ppc64le) accelerated ChaCha and XChaCha stream ciphers, * including ChaCha20 (RFC7539) * * Copyright 2023- IBM Corp. All rights reserved. */ #include <crypto/algapi.h> #include <crypto/internal/chacha.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/cpufeature.h> #include <linux/sizes.h> #include <asm/simd.h> #include <asm/switch_to.h> asmlinkage void chacha_p10le_8x(u32 *state, u8 *dst, const u8 *src, unsigned int len, int nrounds); static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10); static void vsx_begin(void) { preempt_disable(); enable_kernel_vsx(); } static void vsx_end(void) { disable_kernel_vsx(); preempt_enable(); } static void chacha_p10_do_8x(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { unsigned int l = bytes & ~0x0FF; if (l > 0) { chacha_p10le_8x(state, dst, src, l, nrounds); bytes -= l; src += l; dst += l; state[12] += l / CHACHA_BLOCK_SIZE; } if (bytes > 0) chacha_crypt_generic(state, dst, src, bytes, nrounds); } void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) { hchacha_block_generic(state, stream, nrounds); } EXPORT_SYMBOL(hchacha_block_arch); void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) { chacha_init_generic(state, key, iv); } EXPORT_SYMBOL(chacha_init_arch); void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { if (!static_branch_likely(&have_p10) || bytes <= CHACHA_BLOCK_SIZE || !crypto_simd_usable()) return chacha_crypt_generic(state, dst, src, bytes, nrounds); do { unsigned int todo = min_t(unsigned int, bytes, SZ_4K); vsx_begin(); chacha_p10_do_8x(state, dst, src, todo, nrounds); vsx_end(); bytes -= todo; src += todo; dst += todo; } while (bytes); } EXPORT_SYMBOL(chacha_crypt_arch); static int chacha_p10_stream_xor(struct skcipher_request *req, const struct chacha_ctx *ctx, const u8 *iv) { struct skcipher_walk walk; u32 state[16]; int err; err = skcipher_walk_virt(&walk, req, false); if (err) return err; chacha_init_generic(state, ctx->key, iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; if (nbytes < walk.total) nbytes = rounddown(nbytes, walk.stride); if (!crypto_simd_usable()) { chacha_crypt_generic(state, walk.dst.virt.addr, walk.src.virt.addr, nbytes, ctx->nrounds); } else { vsx_begin(); chacha_p10_do_8x(state, walk.dst.virt.addr, walk.src.virt.addr, nbytes, ctx->nrounds); vsx_end(); } err = skcipher_walk_done(&walk, walk.nbytes - nbytes); if (err) break; } return err; } static int chacha_p10(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); return chacha_p10_stream_xor(req, ctx, req->iv); } static int xchacha_p10(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); struct chacha_ctx subctx; u32 state[16]; u8 real_iv[16]; chacha_init_generic(state, ctx->key, req->iv); hchacha_block_arch(state, subctx.key, ctx->nrounds); subctx.nrounds = ctx->nrounds; memcpy(&real_iv[0], req->iv + 24, 8); memcpy(&real_iv[8], req->iv + 16, 8); return chacha_p10_stream_xor(req, &subctx, real_iv); } static struct skcipher_alg algs[] = { { .base.cra_name = "chacha20", .base.cra_driver_name = "chacha20-p10", .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = CHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .setkey = chacha20_setkey, .encrypt = chacha_p10, .decrypt = chacha_p10, }, { .base.cra_name = "xchacha20", .base.cra_driver_name = "xchacha20-p10", .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = XCHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .setkey = chacha20_setkey, .encrypt = xchacha_p10, .decrypt = xchacha_p10, }, { .base.cra_name = "xchacha12", .base.cra_driver_name = "xchacha12-p10", .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = XCHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .setkey = chacha12_setkey, .encrypt = xchacha_p10, .decrypt = xchacha_p10, } }; static int __init chacha_p10_init(void) { static_branch_enable(&have_p10); return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); } static void __exit chacha_p10_exit(void) { crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); } module_cpu_feature_match(PPC_MODULE_FEATURE_P10, chacha_p10_init); module_exit(chacha_p10_exit); MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)"); MODULE_AUTHOR("Danny Tsen <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("chacha20"); MODULE_ALIAS_CRYPTO("chacha20-p10"); MODULE_ALIAS_CRYPTO("xchacha20"); MODULE_ALIAS_CRYPTO("xchacha20-p10"); MODULE_ALIAS_CRYPTO("xchacha12"); MODULE_ALIAS_CRYPTO("xchacha12-p10");
linux-master
arch/powerpc/crypto/chacha-p10-glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Glue code for MD5 implementation for PPC assembler * * Based on generic implementation. * * Copyright (c) 2015 Markus Stockhausen <[email protected]> */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/md5.h> #include <asm/byteorder.h> extern void ppc_md5_transform(u32 *state, const u8 *src, u32 blocks); static inline void ppc_md5_clear_context(struct md5_state *sctx) { int count = sizeof(struct md5_state) >> 2; u32 *ptr = (u32 *)sctx; /* make sure we can clear the fast way */ BUILD_BUG_ON(sizeof(struct md5_state) % 4); do { *ptr++ = 0; } while (--count); } static int ppc_md5_init(struct shash_desc *desc) { struct md5_state *sctx = shash_desc_ctx(desc); sctx->hash[0] = MD5_H0; sctx->hash[1] = MD5_H1; sctx->hash[2] = MD5_H2; sctx->hash[3] = MD5_H3; sctx->byte_count = 0; return 0; } static int ppc_md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md5_state *sctx = shash_desc_ctx(desc); const unsigned int offset = sctx->byte_count & 0x3f; unsigned int avail = 64 - offset; const u8 *src = data; sctx->byte_count += len; if (avail > len) { memcpy((char *)sctx->block + offset, src, len); return 0; } if (offset) { memcpy((char *)sctx->block + offset, src, avail); ppc_md5_transform(sctx->hash, (const u8 *)sctx->block, 1); len -= avail; src += avail; } if (len > 63) { ppc_md5_transform(sctx->hash, src, len >> 6); src += len & ~0x3f; len &= 0x3f; } memcpy((char *)sctx->block, src, len); return 0; } static int ppc_md5_final(struct shash_desc *desc, u8 *out) { struct md5_state *sctx = shash_desc_ctx(desc); const unsigned int offset = sctx->byte_count & 0x3f; const u8 *src = (const u8 *)sctx->block; u8 *p = (u8 *)src + offset; int padlen = 55 - offset; __le64 *pbits = (__le64 *)((char *)sctx->block + 56); __le32 *dst = (__le32 *)out; *p++ = 0x80; if (padlen < 0) { memset(p, 0x00, padlen + sizeof (u64)); ppc_md5_transform(sctx->hash, src, 1); p = (char *)sctx->block; padlen = 56; } memset(p, 0, padlen); *pbits = cpu_to_le64(sctx->byte_count << 3); ppc_md5_transform(sctx->hash, src, 1); dst[0] = cpu_to_le32(sctx->hash[0]); dst[1] = cpu_to_le32(sctx->hash[1]); dst[2] = cpu_to_le32(sctx->hash[2]); dst[3] = cpu_to_le32(sctx->hash[3]); ppc_md5_clear_context(sctx); return 0; } static int ppc_md5_export(struct shash_desc *desc, void *out) { struct md5_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int ppc_md5_import(struct shash_desc *desc, const void *in) { struct md5_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = ppc_md5_init, .update = ppc_md5_update, .final = ppc_md5_final, .export = ppc_md5_export, .import = ppc_md5_import, .descsize = sizeof(struct md5_state), .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_driver_name= "md5-ppc", .cra_priority = 200, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init ppc_md5_mod_init(void) { return crypto_register_shash(&alg); } static void __exit ppc_md5_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(ppc_md5_mod_init); module_exit(ppc_md5_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, PPC assembler"); MODULE_ALIAS_CRYPTO("md5"); MODULE_ALIAS_CRYPTO("md5-ppc");
linux-master
arch/powerpc/crypto/md5-glue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Glue code for accelerated AES-GCM stitched implementation for ppc64le. * * Copyright 2022- IBM Inc. All rights reserved */ #include <asm/unaligned.h> #include <asm/simd.h> #include <asm/switch_to.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/b128ops.h> #include <crypto/gf128mul.h> #include <crypto/internal/simd.h> #include <crypto/internal/aead.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/cpufeature.h> #include <linux/crypto.h> #include <linux/module.h> #include <linux/types.h> #define PPC_ALIGN 16 #define GCM_IV_SIZE 12 MODULE_DESCRIPTION("PPC64le AES-GCM with Stitched implementation"); MODULE_AUTHOR("Danny Tsen <[email protected]"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_CRYPTO("aes"); asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits, void *key); asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key); asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len, void *rkey, u8 *iv, void *Xi); asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len, void *rkey, u8 *iv, void *Xi); asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]); asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable, unsigned char *aad, unsigned int alen); struct aes_key { u8 key[AES_MAX_KEYLENGTH]; u64 rounds; }; struct gcm_ctx { u8 iv[16]; u8 ivtag[16]; u8 aad_hash[16]; u64 aadLen; u64 Plen; /* offset 56 - used in aes_p10_gcm_{en/de}crypt */ }; struct Hash_ctx { u8 H[16]; /* subkey */ u8 Htable[256]; /* Xi, Hash table(offset 32) */ }; struct p10_aes_gcm_ctx { struct aes_key enc_key; }; static void vsx_begin(void) { preempt_disable(); enable_kernel_vsx(); } static void vsx_end(void) { disable_kernel_vsx(); preempt_enable(); } static void set_subkey(unsigned char *hash) { *(u64 *)&hash[0] = be64_to_cpup((__be64 *)&hash[0]); *(u64 *)&hash[8] = be64_to_cpup((__be64 *)&hash[8]); } /* * Compute aad if any. * - Hash aad and copy to Xi. */ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash, unsigned char *aad, int alen) { int i; u8 nXi[16] = {0, }; gctx->aadLen = alen; i = alen & ~0xf; if (i) { gcm_ghash_p10(nXi, hash->Htable+32, aad, i); aad += i; alen -= i; } if (alen) { for (i = 0; i < alen; i++) nXi[i] ^= aad[i]; memset(gctx->aad_hash, 0, 16); gcm_ghash_p10(gctx->aad_hash, hash->Htable+32, nXi, 16); } else { memcpy(gctx->aad_hash, nXi, 16); } memcpy(hash->Htable, gctx->aad_hash, 16); } static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey, struct Hash_ctx *hash, u8 *assoc, unsigned int assoclen) { __be32 counter = cpu_to_be32(1); aes_p10_encrypt(hash->H, hash->H, rdkey); set_subkey(hash->H); gcm_init_htable(hash->Htable+32, hash->H); *((__be32 *)(iv+12)) = counter; gctx->Plen = 0; /* * Encrypt counter vector as iv tag and increment counter. */ aes_p10_encrypt(iv, gctx->ivtag, rdkey); counter = cpu_to_be32(2); *((__be32 *)(iv+12)) = counter; memcpy(gctx->iv, iv, 16); gctx->aadLen = assoclen; memset(gctx->aad_hash, 0, 16); if (assoclen) set_aad(gctx, hash, assoc, assoclen); } static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len) { int i; unsigned char len_ac[16 + PPC_ALIGN]; unsigned char *aclen = PTR_ALIGN((void *)len_ac, PPC_ALIGN); __be64 clen = cpu_to_be64(len << 3); __be64 alen = cpu_to_be64(gctx->aadLen << 3); if (len == 0 && gctx->aadLen == 0) { memcpy(hash->Htable, gctx->ivtag, 16); return; } /* * Len is in bits. */ *((__be64 *)(aclen)) = alen; *((__be64 *)(aclen+8)) = clen; /* * hash (AAD len and len) */ gcm_ghash_p10(hash->Htable, hash->Htable+32, aclen, 16); for (i = 0; i < 16; i++) hash->Htable[i] ^= gctx->ivtag[i]; } static int set_authsize(struct crypto_aead *tfm, unsigned int authsize) { switch (authsize) { case 4: case 8: case 12: case 13: case 14: case 15: case 16: break; default: return -EINVAL; } return 0; } static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm); int ret; vsx_begin(); ret = aes_p10_set_encrypt_key(key, keylen * 8, &ctx->enc_key); vsx_end(); return ret ? -EINVAL : 0; } static int p10_aes_gcm_crypt(struct aead_request *req, int enc) { struct crypto_tfm *tfm = req->base.tfm; struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm); u8 databuf[sizeof(struct gcm_ctx) + PPC_ALIGN]; struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN); u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN]; struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN); struct scatter_walk assoc_sg_walk; struct skcipher_walk walk; u8 *assocmem = NULL; u8 *assoc; unsigned int assoclen = req->assoclen; unsigned int cryptlen = req->cryptlen; unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN]; unsigned char *iv = PTR_ALIGN((void *)ivbuf, PPC_ALIGN); int ret; unsigned long auth_tag_len = crypto_aead_authsize(__crypto_aead_cast(tfm)); u8 otag[16]; int total_processed = 0; memset(databuf, 0, sizeof(databuf)); memset(hashbuf, 0, sizeof(hashbuf)); memset(ivbuf, 0, sizeof(ivbuf)); memcpy(iv, req->iv, GCM_IV_SIZE); /* Linearize assoc, if not already linear */ if (req->src->length >= assoclen && req->src->length) { scatterwalk_start(&assoc_sg_walk, req->src); assoc = scatterwalk_map(&assoc_sg_walk); } else { gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; /* assoc can be any length, so must be on heap */ assocmem = kmalloc(assoclen, flags); if (unlikely(!assocmem)) return -ENOMEM; assoc = assocmem; scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0); } vsx_begin(); gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen); vsx_end(); if (!assocmem) scatterwalk_unmap(assoc); else kfree(assocmem); if (enc) ret = skcipher_walk_aead_encrypt(&walk, req, false); else ret = skcipher_walk_aead_decrypt(&walk, req, false); if (ret) return ret; while (walk.nbytes > 0 && ret == 0) { vsx_begin(); if (enc) aes_p10_gcm_encrypt(walk.src.virt.addr, walk.dst.virt.addr, walk.nbytes, &ctx->enc_key, gctx->iv, hash->Htable); else aes_p10_gcm_decrypt(walk.src.virt.addr, walk.dst.virt.addr, walk.nbytes, &ctx->enc_key, gctx->iv, hash->Htable); vsx_end(); total_processed += walk.nbytes; ret = skcipher_walk_done(&walk, 0); } if (ret) return ret; /* Finalize hash */ vsx_begin(); finish_tag(gctx, hash, total_processed); vsx_end(); /* copy Xi to end of dst */ if (enc) scatterwalk_map_and_copy(hash->Htable, req->dst, req->assoclen + cryptlen, auth_tag_len, 1); else { scatterwalk_map_and_copy(otag, req->src, req->assoclen + cryptlen - auth_tag_len, auth_tag_len, 0); if (crypto_memneq(otag, hash->Htable, auth_tag_len)) { memzero_explicit(hash->Htable, 16); return -EBADMSG; } } return 0; } static int p10_aes_gcm_encrypt(struct aead_request *req) { return p10_aes_gcm_crypt(req, 1); } static int p10_aes_gcm_decrypt(struct aead_request *req) { return p10_aes_gcm_crypt(req, 0); } static struct aead_alg gcm_aes_alg = { .ivsize = GCM_IV_SIZE, .maxauthsize = 16, .setauthsize = set_authsize, .setkey = p10_aes_gcm_setkey, .encrypt = p10_aes_gcm_encrypt, .decrypt = p10_aes_gcm_decrypt, .base.cra_name = "gcm(aes)", .base.cra_driver_name = "aes_gcm_p10", .base.cra_priority = 2100, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct p10_aes_gcm_ctx), .base.cra_module = THIS_MODULE, }; static int __init p10_init(void) { return crypto_register_aead(&gcm_aes_alg); } static void __exit p10_exit(void) { crypto_unregister_aead(&gcm_aes_alg); } module_cpu_feature_match(PPC_MODULE_FEATURE_P10, p10_init); module_exit(p10_exit);
linux-master
arch/powerpc/crypto/aes-gcm-p10-glue.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> int stfiwx(u32 *frS, void *ea) { #ifdef DEBUG printk("%s: %p %p\n", __func__, frS, ea); #endif if (copy_to_user(ea, &frS[1], sizeof(frS[1]))) return -EFAULT; return 0; }
linux-master
arch/powerpc/math-emu/stfiwx.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> int mtfsb0(int crbD) { if ((crbD != 1) && (crbD != 2)) __FPU_FPSCR &= ~(1 << (31 - crbD)); #ifdef DEBUG printk("%s: %d %08lx\n", __func__, crbD, __FPU_FPSCR); #endif return 0; }
linux-master
arch/powerpc/math-emu/mtfsb0.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int lfs(void *frD, void *ea) { FP_DECL_D(R); FP_DECL_S(A); FP_DECL_EX; float f; #ifdef DEBUG printk("%s: D %p, ea %p\n", __func__, frD, ea); #endif if (copy_from_user(&f, ea, sizeof(float))) return -EFAULT; FP_UNPACK_S(A, f); #ifdef DEBUG printk("A: %ld %lu %ld (%ld) [%08lx]\n", A_s, A_f, A_e, A_c, *(unsigned long *)&f); #endif FP_CONV(D, S, 2, 1, R, A); #ifdef DEBUG printk("R: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif if (R_c == FP_CLS_NAN) { R_e = _FP_EXPMAX_D; _FP_PACK_RAW_2_P(D, frD, R); } else { __FP_PACK_D(frD, R); } return 0; }
linux-master
arch/powerpc/math-emu/lfs.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fcmpu(u32 *ccr, int crfD, void *frA, void *frB) { FP_DECL_D(A); FP_DECL_D(B); FP_DECL_EX; int code[4] = { (1 << 3), (1 << 1), (1 << 2), (1 << 0) }; long cmp; #ifdef DEBUG printk("%s: %p (%08x) %d %p %p\n", __func__, ccr, *ccr, crfD, frA, frB); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); #endif FP_CMP_D(cmp, A, B, 2); cmp = code[(cmp + 1) & 3]; __FPU_FPSCR &= ~(0x1f000); __FPU_FPSCR |= (cmp << 12); *ccr &= ~(15 << ((7 - crfD) << 2)); *ccr |= (cmp << ((7 - crfD) << 2)); #ifdef DEBUG printk("CR: %08x\n", *ccr); #endif return 0; }
linux-master
arch/powerpc/math-emu/fcmpu.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int fdivs(void *frD, void *frA, void *frB) { FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); #endif if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) { FP_SET_EXCEPTION(EFLAG_VXZDZ); #ifdef DEBUG printk("%s: FPSCR_VXZDZ raised\n", __func__); #endif } if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) { FP_SET_EXCEPTION(EFLAG_VXIDI); #ifdef DEBUG printk("%s: FPSCR_VXIDI raised\n", __func__); #endif } if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) { FP_SET_EXCEPTION(EFLAG_DIVZERO); if (__FPU_TRAP_P(EFLAG_DIVZERO)) return FP_CUR_EXCEPTIONS; } FP_DIV_D(R, A, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_DS(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fdivs.c
// SPDX-License-Identifier: GPL-2.0 /* This has so very few changes over libgcc2's __udivmoddi4 it isn't funny. */ #include <math-emu/soft-fp.h> #undef count_leading_zeros #define count_leading_zeros __FP_CLZ void _fp_udivmodti4(_FP_W_TYPE q[2], _FP_W_TYPE r[2], _FP_W_TYPE n1, _FP_W_TYPE n0, _FP_W_TYPE d1, _FP_W_TYPE d0) { _FP_W_TYPE q0, q1, r0, r1; _FP_I_TYPE b, bm; if (d1 == 0) { #if !UDIV_NEEDS_NORMALIZATION if (d0 > n1) { /* 0q = nn / 0D */ udiv_qrnnd (q0, n0, n1, n0, d0); q1 = 0; /* Remainder in n0. */ } else { /* qq = NN / 0d */ if (d0 == 0) d0 = 1 / d0; /* Divide intentionally by zero. */ udiv_qrnnd (q1, n1, 0, n1, d0); udiv_qrnnd (q0, n0, n1, n0, d0); /* Remainder in n0. */ } r0 = n0; r1 = 0; #else /* UDIV_NEEDS_NORMALIZATION */ if (d0 > n1) { /* 0q = nn / 0D */ count_leading_zeros (bm, d0); if (bm != 0) { /* Normalize, i.e. make the most significant bit of the denominator set. */ d0 = d0 << bm; n1 = (n1 << bm) | (n0 >> (_FP_W_TYPE_SIZE - bm)); n0 = n0 << bm; } udiv_qrnnd (q0, n0, n1, n0, d0); q1 = 0; /* Remainder in n0 >> bm. */ } else { /* qq = NN / 0d */ if (d0 == 0) d0 = 1 / d0; /* Divide intentionally by zero. */ count_leading_zeros (bm, d0); if (bm == 0) { /* From (n1 >= d0) /\ (the most significant bit of d0 is set), conclude (the most significant bit of n1 is set) /\ (the leading quotient digit q1 = 1). This special case is necessary, not an optimization. (Shifts counts of SI_TYPE_SIZE are undefined.) */ n1 -= d0; q1 = 1; } else { _FP_W_TYPE n2; /* Normalize. */ b = _FP_W_TYPE_SIZE - bm; d0 = d0 << bm; n2 = n1 >> b; n1 = (n1 << bm) | (n0 >> b); n0 = n0 << bm; udiv_qrnnd (q1, n1, n2, n1, d0); } /* n1 != d0... */ udiv_qrnnd (q0, n0, n1, n0, d0); /* Remainder in n0 >> bm. */ } r0 = n0 >> bm; r1 = 0; #endif /* UDIV_NEEDS_NORMALIZATION */ } else { if (d1 > n1) { /* 00 = nn / DD */ q0 = 0; q1 = 0; /* Remainder in n1n0. */ r0 = n0; r1 = n1; } else { /* 0q = NN / dd */ count_leading_zeros (bm, d1); if (bm == 0) { /* From (n1 >= d1) /\ (the most significant bit of d1 is set), conclude (the most significant bit of n1 is set) /\ (the quotient digit q0 = 0 or 1). This special case is necessary, not an optimization. */ /* The condition on the next line takes advantage of that n1 >= d1 (true due to program flow). */ if (n1 > d1 || n0 >= d0) { q0 = 1; sub_ddmmss (n1, n0, n1, n0, d1, d0); } else q0 = 0; q1 = 0; r0 = n0; r1 = n1; } else { _FP_W_TYPE m1, m0, n2; /* Normalize. */ b = _FP_W_TYPE_SIZE - bm; d1 = (d1 << bm) | (d0 >> b); d0 = d0 << bm; n2 = n1 >> b; n1 = (n1 << bm) | (n0 >> b); n0 = n0 << bm; udiv_qrnnd (q0, n1, n2, n1, d1); umul_ppmm (m1, m0, q0, d0); if (m1 > n1 || (m1 == n1 && m0 > n0)) { q0--; sub_ddmmss (m1, m0, m1, m0, d1, d0); } q1 = 0; /* Remainder in (n1n0 - m1m0) >> bm. */ sub_ddmmss (n1, n0, n1, n0, m1, m0); r0 = (n1 << b) | (n0 >> bm); r1 = n1 >> bm; } } } q[0] = q0; q[1] = q1; r[0] = r0, r[1] = r1; }
linux-master
arch/powerpc/math-emu/udivmodti4.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> int fnabs(u32 *frD, u32 *frB) { frD[0] = frB[0] | 0x80000000; frD[1] = frB[1]; #ifdef DEBUG printk("%s: D %p, B %p: ", __func__, frD, frB); dump_double(frD); printk("\n"); #endif return 0; }
linux-master
arch/powerpc/math-emu/fnabs.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1999 Eddie C. Dost ([email protected]) */ #include <linux/types.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <asm/reg.h> #include <asm/switch_to.h> #include <asm/sfp-machine.h> #include <math-emu/double.h> #define FLOATFUNC(x) extern int x(void *, void *, void *, void *) /* The instructions list which may be not implemented by a hardware FPU */ FLOATFUNC(fre); FLOATFUNC(frsqrtes); FLOATFUNC(fsqrt); FLOATFUNC(fsqrts); FLOATFUNC(mtfsf); FLOATFUNC(mtfsfi); #ifdef CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED #undef FLOATFUNC #define FLOATFUNC(x) static inline int x(void *op1, void *op2, void *op3, \ void *op4) { return 0; } #endif FLOATFUNC(fadd); FLOATFUNC(fadds); FLOATFUNC(fdiv); FLOATFUNC(fdivs); FLOATFUNC(fmul); FLOATFUNC(fmuls); FLOATFUNC(fsub); FLOATFUNC(fsubs); FLOATFUNC(fmadd); FLOATFUNC(fmadds); FLOATFUNC(fmsub); FLOATFUNC(fmsubs); FLOATFUNC(fnmadd); FLOATFUNC(fnmadds); FLOATFUNC(fnmsub); FLOATFUNC(fnmsubs); FLOATFUNC(fctiw); FLOATFUNC(fctiwz); FLOATFUNC(frsp); FLOATFUNC(fcmpo); FLOATFUNC(fcmpu); FLOATFUNC(mcrfs); FLOATFUNC(mffs); FLOATFUNC(mtfsb0); FLOATFUNC(mtfsb1); FLOATFUNC(lfd); FLOATFUNC(lfs); FLOATFUNC(stfd); FLOATFUNC(stfs); FLOATFUNC(stfiwx); FLOATFUNC(fabs); FLOATFUNC(fmr); FLOATFUNC(fnabs); FLOATFUNC(fneg); /* Optional */ FLOATFUNC(fres); FLOATFUNC(frsqrte); FLOATFUNC(fsel); #define OP31 0x1f /* 31 */ #define LFS 0x30 /* 48 */ #define LFSU 0x31 /* 49 */ #define LFD 0x32 /* 50 */ #define LFDU 0x33 /* 51 */ #define STFS 0x34 /* 52 */ #define STFSU 0x35 /* 53 */ #define STFD 0x36 /* 54 */ #define STFDU 0x37 /* 55 */ #define OP59 0x3b /* 59 */ #define OP63 0x3f /* 63 */ /* Opcode 31: */ /* X-Form: */ #define LFSX 0x217 /* 535 */ #define LFSUX 0x237 /* 567 */ #define LFDX 0x257 /* 599 */ #define LFDUX 0x277 /* 631 */ #define STFSX 0x297 /* 663 */ #define STFSUX 0x2b7 /* 695 */ #define STFDX 0x2d7 /* 727 */ #define STFDUX 0x2f7 /* 759 */ #define STFIWX 0x3d7 /* 983 */ /* Opcode 59: */ /* A-Form: */ #define FDIVS 0x012 /* 18 */ #define FSUBS 0x014 /* 20 */ #define FADDS 0x015 /* 21 */ #define FSQRTS 0x016 /* 22 */ #define FRES 0x018 /* 24 */ #define FMULS 0x019 /* 25 */ #define FRSQRTES 0x01a /* 26 */ #define FMSUBS 0x01c /* 28 */ #define FMADDS 0x01d /* 29 */ #define FNMSUBS 0x01e /* 30 */ #define FNMADDS 0x01f /* 31 */ /* Opcode 63: */ /* A-Form: */ #define FDIV 0x012 /* 18 */ #define FSUB 0x014 /* 20 */ #define FADD 0x015 /* 21 */ #define FSQRT 0x016 /* 22 */ #define FSEL 0x017 /* 23 */ #define FRE 0x018 /* 24 */ #define FMUL 0x019 /* 25 */ #define FRSQRTE 0x01a /* 26 */ #define FMSUB 0x01c /* 28 */ #define FMADD 0x01d /* 29 */ #define FNMSUB 0x01e /* 30 */ #define FNMADD 0x01f /* 31 */ /* X-Form: */ #define FCMPU 0x000 /* 0 */ #define FRSP 0x00c /* 12 */ #define FCTIW 0x00e /* 14 */ #define FCTIWZ 0x00f /* 15 */ #define FCMPO 0x020 /* 32 */ #define MTFSB1 0x026 /* 38 */ #define FNEG 0x028 /* 40 */ #define MCRFS 0x040 /* 64 */ #define MTFSB0 0x046 /* 70 */ #define FMR 0x048 /* 72 */ #define MTFSFI 0x086 /* 134 */ #define FNABS 0x088 /* 136 */ #define FABS 0x108 /* 264 */ #define MFFS 0x247 /* 583 */ #define MTFSF 0x2c7 /* 711 */ #define AB 2 #define AC 3 #define ABC 4 #define D 5 #define DU 6 #define X 7 #define XA 8 #define XB 9 #define XCR 11 #define XCRB 12 #define XCRI 13 #define XCRL 16 #define XE 14 #define XEU 15 #define XFLB 10 static int record_exception(struct pt_regs *regs, int eflag) { u32 fpscr; fpscr = __FPU_FPSCR; if (eflag) { fpscr |= FPSCR_FX; if (eflag & EFLAG_OVERFLOW) fpscr |= FPSCR_OX; if (eflag & EFLAG_UNDERFLOW) fpscr |= FPSCR_UX; if (eflag & EFLAG_DIVZERO) fpscr |= FPSCR_ZX; if (eflag & EFLAG_INEXACT) fpscr |= FPSCR_XX; if (eflag & EFLAG_INVALID) fpscr |= FPSCR_VX; if (eflag & EFLAG_VXSNAN) fpscr |= FPSCR_VXSNAN; if (eflag & EFLAG_VXISI) fpscr |= FPSCR_VXISI; if (eflag & EFLAG_VXIDI) fpscr |= FPSCR_VXIDI; if (eflag & EFLAG_VXZDZ) fpscr |= FPSCR_VXZDZ; if (eflag & EFLAG_VXIMZ) fpscr |= FPSCR_VXIMZ; if (eflag & EFLAG_VXVC) fpscr |= FPSCR_VXVC; if (eflag & EFLAG_VXSOFT) fpscr |= FPSCR_VXSOFT; if (eflag & EFLAG_VXSQRT) fpscr |= FPSCR_VXSQRT; if (eflag & EFLAG_VXCVI) fpscr |= FPSCR_VXCVI; } // fpscr &= ~(FPSCR_VX); if (fpscr & (FPSCR_VXSNAN | FPSCR_VXISI | FPSCR_VXIDI | FPSCR_VXZDZ | FPSCR_VXIMZ | FPSCR_VXVC | FPSCR_VXSOFT | FPSCR_VXSQRT | FPSCR_VXCVI)) fpscr |= FPSCR_VX; fpscr &= ~(FPSCR_FEX); if (((fpscr & FPSCR_VX) && (fpscr & FPSCR_VE)) || ((fpscr & FPSCR_OX) && (fpscr & FPSCR_OE)) || ((fpscr & FPSCR_UX) && (fpscr & FPSCR_UE)) || ((fpscr & FPSCR_ZX) && (fpscr & FPSCR_ZE)) || ((fpscr & FPSCR_XX) && (fpscr & FPSCR_XE))) fpscr |= FPSCR_FEX; __FPU_FPSCR = fpscr; return (fpscr & FPSCR_FEX) ? 1 : 0; } int do_mathemu(struct pt_regs *regs) { void *op0 = NULL, *op1 = NULL, *op2 = NULL, *op3 = NULL; unsigned long pc = regs->nip; signed short sdisp; u32 insn = 0; int idx = 0; int (*func)(void *, void *, void *, void *); int type = 0; int eflag, trap; if (get_user(insn, (u32 __user *)pc)) return -EFAULT; switch (insn >> 26) { case LFS: func = lfs; type = D; break; case LFSU: func = lfs; type = DU; break; case LFD: func = lfd; type = D; break; case LFDU: func = lfd; type = DU; break; case STFS: func = stfs; type = D; break; case STFSU: func = stfs; type = DU; break; case STFD: func = stfd; type = D; break; case STFDU: func = stfd; type = DU; break; case OP31: switch ((insn >> 1) & 0x3ff) { case LFSX: func = lfs; type = XE; break; case LFSUX: func = lfs; type = XEU; break; case LFDX: func = lfd; type = XE; break; case LFDUX: func = lfd; type = XEU; break; case STFSX: func = stfs; type = XE; break; case STFSUX: func = stfs; type = XEU; break; case STFDX: func = stfd; type = XE; break; case STFDUX: func = stfd; type = XEU; break; case STFIWX: func = stfiwx; type = XE; break; default: goto illegal; } break; case OP59: switch ((insn >> 1) & 0x1f) { case FDIVS: func = fdivs; type = AB; break; case FSUBS: func = fsubs; type = AB; break; case FADDS: func = fadds; type = AB; break; case FSQRTS: func = fsqrts; type = XB; break; case FRES: func = fres; type = XB; break; case FMULS: func = fmuls; type = AC; break; case FRSQRTES: func = frsqrtes;type = XB; break; case FMSUBS: func = fmsubs; type = ABC; break; case FMADDS: func = fmadds; type = ABC; break; case FNMSUBS: func = fnmsubs; type = ABC; break; case FNMADDS: func = fnmadds; type = ABC; break; default: goto illegal; } break; case OP63: if (insn & 0x20) { switch ((insn >> 1) & 0x1f) { case FDIV: func = fdiv; type = AB; break; case FSUB: func = fsub; type = AB; break; case FADD: func = fadd; type = AB; break; case FSQRT: func = fsqrt; type = XB; break; case FRE: func = fre; type = XB; break; case FSEL: func = fsel; type = ABC; break; case FMUL: func = fmul; type = AC; break; case FRSQRTE: func = frsqrte; type = XB; break; case FMSUB: func = fmsub; type = ABC; break; case FMADD: func = fmadd; type = ABC; break; case FNMSUB: func = fnmsub; type = ABC; break; case FNMADD: func = fnmadd; type = ABC; break; default: goto illegal; } break; } switch ((insn >> 1) & 0x3ff) { case FCMPU: func = fcmpu; type = XCR; break; case FRSP: func = frsp; type = XB; break; case FCTIW: func = fctiw; type = XB; break; case FCTIWZ: func = fctiwz; type = XB; break; case FCMPO: func = fcmpo; type = XCR; break; case MTFSB1: func = mtfsb1; type = XCRB; break; case FNEG: func = fneg; type = XB; break; case MCRFS: func = mcrfs; type = XCRL; break; case MTFSB0: func = mtfsb0; type = XCRB; break; case FMR: func = fmr; type = XB; break; case MTFSFI: func = mtfsfi; type = XCRI; break; case FNABS: func = fnabs; type = XB; break; case FABS: func = fabs; type = XB; break; case MFFS: func = mffs; type = X; break; case MTFSF: func = mtfsf; type = XFLB; break; default: goto illegal; } break; default: goto illegal; } switch (type) { case AB: op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); op1 = (void *)&current->thread.TS_FPR((insn >> 16) & 0x1f); op2 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f); break; case AC: op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); op1 = (void *)&current->thread.TS_FPR((insn >> 16) & 0x1f); op2 = (void *)&current->thread.TS_FPR((insn >> 6) & 0x1f); break; case ABC: op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); op1 = (void *)&current->thread.TS_FPR((insn >> 16) & 0x1f); op2 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f); op3 = (void *)&current->thread.TS_FPR((insn >> 6) & 0x1f); break; case D: idx = (insn >> 16) & 0x1f; sdisp = (insn & 0xffff); op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); op1 = (void *)((idx ? regs->gpr[idx] : 0) + sdisp); break; case DU: idx = (insn >> 16) & 0x1f; if (!idx) goto illegal; sdisp = (insn & 0xffff); op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); op1 = (void *)(regs->gpr[idx] + sdisp); break; case X: op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); break; case XA: op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); op1 = (void *)&current->thread.TS_FPR((insn >> 16) & 0x1f); break; case XB: op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); op1 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f); break; case XE: idx = (insn >> 16) & 0x1f; op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); op1 = (void *)((idx ? regs->gpr[idx] : 0) + regs->gpr[(insn >> 11) & 0x1f]); break; case XEU: idx = (insn >> 16) & 0x1f; if (!idx) goto illegal; op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); op1 = (void *)(regs->gpr[idx] + regs->gpr[(insn >> 11) & 0x1f]); break; case XCR: op0 = (void *)&regs->ccr; op1 = (void *)(long)((insn >> 23) & 0x7); op2 = (void *)&current->thread.TS_FPR((insn >> 16) & 0x1f); op3 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f); break; case XCRL: op0 = (void *)&regs->ccr; op1 = (void *)(long)((insn >> 23) & 0x7); op2 = (void *)(long)((insn >> 18) & 0x7); break; case XCRB: op0 = (void *)(long)((insn >> 21) & 0x1f); break; case XCRI: op0 = (void *)(long)((insn >> 23) & 0x7); op1 = (void *)(long)((insn >> 12) & 0xf); break; case XFLB: op0 = (void *)(long)((insn >> 17) & 0xff); op1 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f); break; default: goto illegal; } /* * If we support a HW FPU, we need to ensure the FP state * is flushed into the thread_struct before attempting * emulation */ flush_fp_to_thread(current); eflag = func(op0, op1, op2, op3); if (insn & 1) { regs->ccr &= ~(0x0f000000); regs->ccr |= (__FPU_FPSCR >> 4) & 0x0f000000; } trap = record_exception(regs, eflag); if (trap) return 1; switch (type) { case DU: case XEU: regs->gpr[idx] = (unsigned long)op1; break; default: break; } regs_add_return_ip(regs, 4); return 0; illegal: return -ENOSYS; }
linux-master
arch/powerpc/math-emu/math.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> int mffs(u32 *frD) { frD[1] = __FPU_FPSCR; #ifdef DEBUG printk("%s: frD %p: %08x.%08x\n", __func__, frD, frD[0], frD[1]); #endif return 0; }
linux-master
arch/powerpc/math-emu/mffs.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fnmsub(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (B_c != FP_CLS_NAN) B_s ^= 1; if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); if (R_c != FP_CLS_NAN) R_s ^= 1; #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fnmsub.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int fnmsubs(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (B_c != FP_CLS_NAN) B_s ^= 1; if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); if (R_c != FP_CLS_NAN) R_s ^= 1; #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_DS(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fnmsubs.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fnmadd(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); if (R_c != FP_CLS_NAN) R_s ^= 1; #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fnmadd.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int fmsubs(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (B_c != FP_CLS_NAN) B_s ^= 1; if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_DS(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fmsubs.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int fadds(void *frD, void *frA, void *frB) { FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); #endif FP_ADD_D(R, A, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_DS(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fadds.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fsqrt(void *frD, void *frB) { FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frB); #endif FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); #endif if (B_s && B_c != FP_CLS_ZERO) FP_SET_EXCEPTION(EFLAG_VXSQRT); if (B_c == FP_CLS_NAN) FP_SET_EXCEPTION(EFLAG_VXSNAN); FP_SQRT_D(R, B); #ifdef DEBUG printk("R: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fsqrt.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fadd(void *frD, void *frA, void *frB) { FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); #endif FP_ADD_D(R, A, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fadd.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fsub(void *frD, void *frA, void *frB) { FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); #endif if (B_c != FP_CLS_NAN) B_s ^= 1; if (A_s != B_s && A_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, A, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fsub.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int stfs(void *frS, void *ea) { FP_DECL_D(A); FP_DECL_S(R); FP_DECL_EX; float f; #ifdef DEBUG printk("%s: S %p, ea %p\n", __func__, frS, ea); #endif FP_UNPACK_DP(A, frS); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); #endif FP_CONV(S, D, 1, 2, R, A); #ifdef DEBUG printk("R: %ld %lu %ld (%ld)\n", R_s, R_f, R_e, R_c); #endif _FP_PACK_CANONICAL(S, 1, R); if (!FP_CUR_EXCEPTIONS || !__FPU_TRAP_P(FP_CUR_EXCEPTIONS)) { _FP_PACK_RAW_1_P(S, &f, R); if (copy_to_user(ea, &f, sizeof(float))) return -EFAULT; } return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/stfs.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> int fabs(u32 *frD, u32 *frB) { frD[0] = frB[0] & 0x7fffffff; frD[1] = frB[1]; #ifdef DEBUG printk("%s: D %p, B %p: ", __func__, frD, frB); dump_double(frD); printk("\n"); #endif return 0; }
linux-master
arch/powerpc/math-emu/fabs.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> int mtfsfi(unsigned int crfD, unsigned int IMM) { u32 mask = 0xf; if (!crfD) mask = 9; __FPU_FPSCR &= ~(mask << ((7 - crfD) << 2)); __FPU_FPSCR |= (IMM & 0xf) << ((7 - crfD) << 2); #ifdef DEBUG printk("%s: %d %x: %08lx\n", __func__, crfD, IMM, __FPU_FPSCR); #endif return 0; }
linux-master
arch/powerpc/math-emu/mtfsfi.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> int fre(void *frD, void *frB) { #ifdef DEBUG printk("%s: %p %p\n", __func__, frD, frB); #endif return -ENOSYS; }
linux-master
arch/powerpc/math-emu/fre.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> int fmr(u32 *frD, u32 *frB) { frD[0] = frB[0]; frD[1] = frB[1]; #ifdef DEBUG printk("%s: D %p, B %p: ", __func__, frD, frB); dump_double(frD); printk("\n"); #endif return 0; }
linux-master
arch/powerpc/math-emu/fmr.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fctiw(u32 *frD, void *frB) { FP_DECL_D(B); FP_DECL_EX; unsigned int r; FP_UNPACK_DP(B, frB); FP_TO_INT_D(r, B, 32, 1); frD[1] = r; #ifdef DEBUG printk("%s: D %p, B %p: ", __func__, frD, frB); dump_double(frD); printk("\n"); #endif return 0; }
linux-master
arch/powerpc/math-emu/fctiw.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fcmpo(u32 *ccr, int crfD, void *frA, void *frB) { FP_DECL_D(A); FP_DECL_D(B); FP_DECL_EX; int code[4] = { (1 << 3), (1 << 1), (1 << 2), (1 << 0) }; long cmp; #ifdef DEBUG printk("%s: %p (%08x) %d %p %p\n", __func__, ccr, *ccr, crfD, frA, frB); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); #endif if (A_c == FP_CLS_NAN || B_c == FP_CLS_NAN) FP_SET_EXCEPTION(EFLAG_VXVC); FP_CMP_D(cmp, A, B, 2); cmp = code[(cmp + 1) & 3]; __FPU_FPSCR &= ~(0x1f000); __FPU_FPSCR |= (cmp << 12); *ccr &= ~(15 << ((7 - crfD) << 2)); *ccr |= (cmp << ((7 - crfD) << 2)); #ifdef DEBUG printk("CR: %08x\n", *ccr); #endif return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fcmpo.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fmul(void *frD, void *frA, void *frB) { FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld) [%08lx.%08lx %lx]\n", A_s, A_f1, A_f0, A_e, A_c, A_f1, A_f0, A_e + 1023); printk("B: %ld %lu %lu %ld (%ld) [%08lx.%08lx %lx]\n", B_s, B_f1, B_f0, B_e, B_c, B_f1, B_f0, B_e + 1023); #endif if ((A_c == FP_CLS_INF && B_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && B_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(R, A, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld) [%08lx.%08lx %lx]\n", R_s, R_f1, R_f0, R_e, R_c, R_f1, R_f0, R_e + 1023); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fmul.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int fmuls(void *frD, void *frA, void *frB) { FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld) [%08lx.%08lx %lx]\n", A_s, A_f1, A_f0, A_e, A_c, A_f1, A_f0, A_e + 1023); printk("B: %ld %lu %lu %ld (%ld) [%08lx.%08lx %lx]\n", B_s, B_f1, B_f0, B_e, B_c, B_f1, B_f0, B_e + 1023); #endif if ((A_c == FP_CLS_INF && B_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && B_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(R, A, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld) [%08lx.%08lx %lx]\n", R_s, R_f1, R_f0, R_e, R_c, R_f1, R_f0, R_e + 1023); #endif __FP_PACK_DS(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fmuls.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fctiwz(u32 *frD, void *frB) { FP_DECL_D(B); FP_DECL_EX; u32 fpscr; unsigned int r; fpscr = __FPU_FPSCR; __FPU_FPSCR &= ~(3); __FPU_FPSCR |= FP_RND_ZERO; FP_UNPACK_DP(B, frB); FP_TO_INT_D(r, B, 32, 1); frD[1] = r; __FPU_FPSCR = fpscr; #ifdef DEBUG printk("%s: D %p, B %p: ", __func__, frD, frB); dump_double(frD); printk("\n"); #endif return 0; }
linux-master
arch/powerpc/math-emu/fctiwz.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> int stfd(void *frS, void *ea) { #if 0 #ifdef DEBUG printk("%s: S %p, ea %p: ", __func__, frS, ea); dump_double(frS); printk("\n"); #endif #endif if (copy_to_user(ea, frS, sizeof(double))) return -EFAULT; return 0; }
linux-master
arch/powerpc/math-emu/stfd.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/math-emu/math_efp.c * * Copyright (C) 2006-2008, 2010 Freescale Semiconductor, Inc. * * Author: Ebony Zhu, <[email protected]> * Yu Liu, <[email protected]> * * Derived from arch/alpha/math-emu/math.c * arch/powerpc/math-emu/math.c * * Description: * This file is the exception handler to make E500 SPE instructions * fully comply with IEEE-754 floating point standard. */ #include <linux/types.h> #include <linux/prctl.h> #include <linux/module.h> #include <linux/uaccess.h> #include <asm/reg.h> #define FP_EX_BOOKE_E500_SPE #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/single.h> #include <math-emu/double.h> #define EFAPU 0x4 #define VCT 0x4 #define SPFP 0x6 #define DPFP 0x7 #define EFSADD 0x2c0 #define EFSSUB 0x2c1 #define EFSABS 0x2c4 #define EFSNABS 0x2c5 #define EFSNEG 0x2c6 #define EFSMUL 0x2c8 #define EFSDIV 0x2c9 #define EFSCMPGT 0x2cc #define EFSCMPLT 0x2cd #define EFSCMPEQ 0x2ce #define EFSCFD 0x2cf #define EFSCFSI 0x2d1 #define EFSCTUI 0x2d4 #define EFSCTSI 0x2d5 #define EFSCTUF 0x2d6 #define EFSCTSF 0x2d7 #define EFSCTUIZ 0x2d8 #define EFSCTSIZ 0x2da #define EVFSADD 0x280 #define EVFSSUB 0x281 #define EVFSABS 0x284 #define EVFSNABS 0x285 #define EVFSNEG 0x286 #define EVFSMUL 0x288 #define EVFSDIV 0x289 #define EVFSCMPGT 0x28c #define EVFSCMPLT 0x28d #define EVFSCMPEQ 0x28e #define EVFSCTUI 0x294 #define EVFSCTSI 0x295 #define EVFSCTUF 0x296 #define EVFSCTSF 0x297 #define EVFSCTUIZ 0x298 #define EVFSCTSIZ 0x29a #define EFDADD 0x2e0 #define EFDSUB 0x2e1 #define EFDABS 0x2e4 #define EFDNABS 0x2e5 #define EFDNEG 0x2e6 #define EFDMUL 0x2e8 #define EFDDIV 0x2e9 #define EFDCTUIDZ 0x2ea #define EFDCTSIDZ 0x2eb #define EFDCMPGT 0x2ec #define EFDCMPLT 0x2ed #define EFDCMPEQ 0x2ee #define EFDCFS 0x2ef #define EFDCTUI 0x2f4 #define EFDCTSI 0x2f5 #define EFDCTUF 0x2f6 #define EFDCTSF 0x2f7 #define EFDCTUIZ 0x2f8 #define EFDCTSIZ 0x2fa #define AB 2 #define XA 3 #define XB 4 #define XCR 5 #define NOTYPE 0 #define SIGN_BIT_S (1UL << 31) #define SIGN_BIT_D (1ULL << 63) #define FP_EX_MASK (FP_EX_INEXACT | FP_EX_INVALID | FP_EX_DIVZERO | \ FP_EX_UNDERFLOW | FP_EX_OVERFLOW) static int have_e500_cpu_a005_erratum; union dw_union { u64 dp[1]; u32 wp[2]; }; static unsigned long insn_type(unsigned long speinsn) { unsigned long ret = NOTYPE; switch (speinsn & 0x7ff) { case EFSABS: ret = XA; break; case EFSADD: ret = AB; break; case EFSCFD: ret = XB; break; case EFSCMPEQ: ret = XCR; break; case EFSCMPGT: ret = XCR; break; case EFSCMPLT: ret = XCR; break; case EFSCTSF: ret = XB; break; case EFSCTSI: ret = XB; break; case EFSCTSIZ: ret = XB; break; case EFSCTUF: ret = XB; break; case EFSCTUI: ret = XB; break; case EFSCTUIZ: ret = XB; break; case EFSDIV: ret = AB; break; case EFSMUL: ret = AB; break; case EFSNABS: ret = XA; break; case EFSNEG: ret = XA; break; case EFSSUB: ret = AB; break; case EFSCFSI: ret = XB; break; case EVFSABS: ret = XA; break; case EVFSADD: ret = AB; break; case EVFSCMPEQ: ret = XCR; break; case EVFSCMPGT: ret = XCR; break; case EVFSCMPLT: ret = XCR; break; case EVFSCTSF: ret = XB; break; case EVFSCTSI: ret = XB; break; case EVFSCTSIZ: ret = XB; break; case EVFSCTUF: ret = XB; break; case EVFSCTUI: ret = XB; break; case EVFSCTUIZ: ret = XB; break; case EVFSDIV: ret = AB; break; case EVFSMUL: ret = AB; break; case EVFSNABS: ret = XA; break; case EVFSNEG: ret = XA; break; case EVFSSUB: ret = AB; break; case EFDABS: ret = XA; break; case EFDADD: ret = AB; break; case EFDCFS: ret = XB; break; case EFDCMPEQ: ret = XCR; break; case EFDCMPGT: ret = XCR; break; case EFDCMPLT: ret = XCR; break; case EFDCTSF: ret = XB; break; case EFDCTSI: ret = XB; break; case EFDCTSIDZ: ret = XB; break; case EFDCTSIZ: ret = XB; break; case EFDCTUF: ret = XB; break; case EFDCTUI: ret = XB; break; case EFDCTUIDZ: ret = XB; break; case EFDCTUIZ: ret = XB; break; case EFDDIV: ret = AB; break; case EFDMUL: ret = AB; break; case EFDNABS: ret = XA; break; case EFDNEG: ret = XA; break; case EFDSUB: ret = AB; break; } return ret; } int do_spe_mathemu(struct pt_regs *regs) { FP_DECL_EX; int IR, cmp; unsigned long type, func, fc, fa, fb, src, speinsn; union dw_union vc, va, vb; if (get_user(speinsn, (unsigned int __user *) regs->nip)) return -EFAULT; if ((speinsn >> 26) != EFAPU) return -EINVAL; /* not an spe instruction */ type = insn_type(speinsn); if (type == NOTYPE) goto illegal; func = speinsn & 0x7ff; fc = (speinsn >> 21) & 0x1f; fa = (speinsn >> 16) & 0x1f; fb = (speinsn >> 11) & 0x1f; src = (speinsn >> 5) & 0x7; vc.wp[0] = current->thread.evr[fc]; vc.wp[1] = regs->gpr[fc]; va.wp[0] = current->thread.evr[fa]; va.wp[1] = regs->gpr[fa]; vb.wp[0] = current->thread.evr[fb]; vb.wp[1] = regs->gpr[fb]; __FPU_FPSCR = mfspr(SPRN_SPEFSCR); pr_debug("speinsn:%08lx spefscr:%08lx\n", speinsn, __FPU_FPSCR); pr_debug("vc: %08x %08x\n", vc.wp[0], vc.wp[1]); pr_debug("va: %08x %08x\n", va.wp[0], va.wp[1]); pr_debug("vb: %08x %08x\n", vb.wp[0], vb.wp[1]); switch (src) { case SPFP: { FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); switch (type) { case AB: case XCR: FP_UNPACK_SP(SA, va.wp + 1); fallthrough; case XB: FP_UNPACK_SP(SB, vb.wp + 1); break; case XA: FP_UNPACK_SP(SA, va.wp + 1); break; } pr_debug("SA: %d %08x %d (%d)\n", SA_s, SA_f, SA_e, SA_c); pr_debug("SB: %d %08x %d (%d)\n", SB_s, SB_f, SB_e, SB_c); switch (func) { case EFSABS: vc.wp[1] = va.wp[1] & ~SIGN_BIT_S; goto update_regs; case EFSNABS: vc.wp[1] = va.wp[1] | SIGN_BIT_S; goto update_regs; case EFSNEG: vc.wp[1] = va.wp[1] ^ SIGN_BIT_S; goto update_regs; case EFSADD: FP_ADD_S(SR, SA, SB); goto pack_s; case EFSSUB: FP_SUB_S(SR, SA, SB); goto pack_s; case EFSMUL: FP_MUL_S(SR, SA, SB); goto pack_s; case EFSDIV: FP_DIV_S(SR, SA, SB); goto pack_s; case EFSCMPEQ: cmp = 0; goto cmp_s; case EFSCMPGT: cmp = 1; goto cmp_s; case EFSCMPLT: cmp = -1; goto cmp_s; case EFSCTSF: case EFSCTUF: if (SB_c == FP_CLS_NAN) { vc.wp[1] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { SB_e += (func == EFSCTSF ? 31 : 32); FP_TO_INT_ROUND_S(vc.wp[1], SB, 32, (func == EFSCTSF) ? 1 : 0); } goto update_regs; case EFSCFD: { FP_DECL_D(DB); FP_CLEAR_EXCEPTIONS; FP_UNPACK_DP(DB, vb.dp); pr_debug("DB: %d %08x %08x %d (%d)\n", DB_s, DB_f1, DB_f0, DB_e, DB_c); FP_CONV(S, D, 1, 2, SR, DB); goto pack_s; } case EFSCTSI: case EFSCTUI: if (SB_c == FP_CLS_NAN) { vc.wp[1] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_ROUND_S(vc.wp[1], SB, 32, ((func & 0x3) != 0) ? 1 : 0); } goto update_regs; case EFSCTSIZ: case EFSCTUIZ: if (SB_c == FP_CLS_NAN) { vc.wp[1] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_S(vc.wp[1], SB, 32, ((func & 0x3) != 0) ? 1 : 0); } goto update_regs; default: goto illegal; } break; pack_s: pr_debug("SR: %d %08x %d (%d)\n", SR_s, SR_f, SR_e, SR_c); FP_PACK_SP(vc.wp + 1, SR); goto update_regs; cmp_s: FP_CMP_S(IR, SA, SB, 3); if (IR == 3 && (FP_ISSIGNAN_S(SA) || FP_ISSIGNAN_S(SB))) FP_SET_EXCEPTION(FP_EX_INVALID); if (IR == cmp) { IR = 0x4; } else { IR = 0; } goto update_ccr; } case DPFP: { FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); switch (type) { case AB: case XCR: FP_UNPACK_DP(DA, va.dp); fallthrough; case XB: FP_UNPACK_DP(DB, vb.dp); break; case XA: FP_UNPACK_DP(DA, va.dp); break; } pr_debug("DA: %d %08x %08x %d (%d)\n", DA_s, DA_f1, DA_f0, DA_e, DA_c); pr_debug("DB: %d %08x %08x %d (%d)\n", DB_s, DB_f1, DB_f0, DB_e, DB_c); switch (func) { case EFDABS: vc.dp[0] = va.dp[0] & ~SIGN_BIT_D; goto update_regs; case EFDNABS: vc.dp[0] = va.dp[0] | SIGN_BIT_D; goto update_regs; case EFDNEG: vc.dp[0] = va.dp[0] ^ SIGN_BIT_D; goto update_regs; case EFDADD: FP_ADD_D(DR, DA, DB); goto pack_d; case EFDSUB: FP_SUB_D(DR, DA, DB); goto pack_d; case EFDMUL: FP_MUL_D(DR, DA, DB); goto pack_d; case EFDDIV: FP_DIV_D(DR, DA, DB); goto pack_d; case EFDCMPEQ: cmp = 0; goto cmp_d; case EFDCMPGT: cmp = 1; goto cmp_d; case EFDCMPLT: cmp = -1; goto cmp_d; case EFDCTSF: case EFDCTUF: if (DB_c == FP_CLS_NAN) { vc.wp[1] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { DB_e += (func == EFDCTSF ? 31 : 32); FP_TO_INT_ROUND_D(vc.wp[1], DB, 32, (func == EFDCTSF) ? 1 : 0); } goto update_regs; case EFDCFS: { FP_DECL_S(SB); FP_CLEAR_EXCEPTIONS; FP_UNPACK_SP(SB, vb.wp + 1); pr_debug("SB: %d %08x %d (%d)\n", SB_s, SB_f, SB_e, SB_c); FP_CONV(D, S, 2, 1, DR, SB); goto pack_d; } case EFDCTUIDZ: case EFDCTSIDZ: if (DB_c == FP_CLS_NAN) { vc.dp[0] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_D(vc.dp[0], DB, 64, ((func & 0x1) == 0) ? 1 : 0); } goto update_regs; case EFDCTUI: case EFDCTSI: if (DB_c == FP_CLS_NAN) { vc.wp[1] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_ROUND_D(vc.wp[1], DB, 32, ((func & 0x3) != 0) ? 1 : 0); } goto update_regs; case EFDCTUIZ: case EFDCTSIZ: if (DB_c == FP_CLS_NAN) { vc.wp[1] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_D(vc.wp[1], DB, 32, ((func & 0x3) != 0) ? 1 : 0); } goto update_regs; default: goto illegal; } break; pack_d: pr_debug("DR: %d %08x %08x %d (%d)\n", DR_s, DR_f1, DR_f0, DR_e, DR_c); FP_PACK_DP(vc.dp, DR); goto update_regs; cmp_d: FP_CMP_D(IR, DA, DB, 3); if (IR == 3 && (FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB))) FP_SET_EXCEPTION(FP_EX_INVALID); if (IR == cmp) { IR = 0x4; } else { IR = 0; } goto update_ccr; } case VCT: { FP_DECL_S(SA0); FP_DECL_S(SB0); FP_DECL_S(SR0); FP_DECL_S(SA1); FP_DECL_S(SB1); FP_DECL_S(SR1); int IR0, IR1; switch (type) { case AB: case XCR: FP_UNPACK_SP(SA0, va.wp); FP_UNPACK_SP(SA1, va.wp + 1); fallthrough; case XB: FP_UNPACK_SP(SB0, vb.wp); FP_UNPACK_SP(SB1, vb.wp + 1); break; case XA: FP_UNPACK_SP(SA0, va.wp); FP_UNPACK_SP(SA1, va.wp + 1); break; } pr_debug("SA0: %d %08x %d (%d)\n", SA0_s, SA0_f, SA0_e, SA0_c); pr_debug("SA1: %d %08x %d (%d)\n", SA1_s, SA1_f, SA1_e, SA1_c); pr_debug("SB0: %d %08x %d (%d)\n", SB0_s, SB0_f, SB0_e, SB0_c); pr_debug("SB1: %d %08x %d (%d)\n", SB1_s, SB1_f, SB1_e, SB1_c); switch (func) { case EVFSABS: vc.wp[0] = va.wp[0] & ~SIGN_BIT_S; vc.wp[1] = va.wp[1] & ~SIGN_BIT_S; goto update_regs; case EVFSNABS: vc.wp[0] = va.wp[0] | SIGN_BIT_S; vc.wp[1] = va.wp[1] | SIGN_BIT_S; goto update_regs; case EVFSNEG: vc.wp[0] = va.wp[0] ^ SIGN_BIT_S; vc.wp[1] = va.wp[1] ^ SIGN_BIT_S; goto update_regs; case EVFSADD: FP_ADD_S(SR0, SA0, SB0); FP_ADD_S(SR1, SA1, SB1); goto pack_vs; case EVFSSUB: FP_SUB_S(SR0, SA0, SB0); FP_SUB_S(SR1, SA1, SB1); goto pack_vs; case EVFSMUL: FP_MUL_S(SR0, SA0, SB0); FP_MUL_S(SR1, SA1, SB1); goto pack_vs; case EVFSDIV: FP_DIV_S(SR0, SA0, SB0); FP_DIV_S(SR1, SA1, SB1); goto pack_vs; case EVFSCMPEQ: cmp = 0; goto cmp_vs; case EVFSCMPGT: cmp = 1; goto cmp_vs; case EVFSCMPLT: cmp = -1; goto cmp_vs; case EVFSCTUF: case EVFSCTSF: if (SB0_c == FP_CLS_NAN) { vc.wp[0] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { SB0_e += (func == EVFSCTSF ? 31 : 32); FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32, (func == EVFSCTSF) ? 1 : 0); } if (SB1_c == FP_CLS_NAN) { vc.wp[1] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { SB1_e += (func == EVFSCTSF ? 31 : 32); FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32, (func == EVFSCTSF) ? 1 : 0); } goto update_regs; case EVFSCTUI: case EVFSCTSI: if (SB0_c == FP_CLS_NAN) { vc.wp[0] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32, ((func & 0x3) != 0) ? 1 : 0); } if (SB1_c == FP_CLS_NAN) { vc.wp[1] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32, ((func & 0x3) != 0) ? 1 : 0); } goto update_regs; case EVFSCTUIZ: case EVFSCTSIZ: if (SB0_c == FP_CLS_NAN) { vc.wp[0] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_S(vc.wp[0], SB0, 32, ((func & 0x3) != 0) ? 1 : 0); } if (SB1_c == FP_CLS_NAN) { vc.wp[1] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_S(vc.wp[1], SB1, 32, ((func & 0x3) != 0) ? 1 : 0); } goto update_regs; default: goto illegal; } break; pack_vs: pr_debug("SR0: %d %08x %d (%d)\n", SR0_s, SR0_f, SR0_e, SR0_c); pr_debug("SR1: %d %08x %d (%d)\n", SR1_s, SR1_f, SR1_e, SR1_c); FP_PACK_SP(vc.wp, SR0); FP_PACK_SP(vc.wp + 1, SR1); goto update_regs; cmp_vs: { int ch, cl; FP_CMP_S(IR0, SA0, SB0, 3); FP_CMP_S(IR1, SA1, SB1, 3); if (IR0 == 3 && (FP_ISSIGNAN_S(SA0) || FP_ISSIGNAN_S(SB0))) FP_SET_EXCEPTION(FP_EX_INVALID); if (IR1 == 3 && (FP_ISSIGNAN_S(SA1) || FP_ISSIGNAN_S(SB1))) FP_SET_EXCEPTION(FP_EX_INVALID); ch = (IR0 == cmp) ? 1 : 0; cl = (IR1 == cmp) ? 1 : 0; IR = (ch << 3) | (cl << 2) | ((ch | cl) << 1) | ((ch & cl) << 0); goto update_ccr; } } default: return -EINVAL; } update_ccr: regs->ccr &= ~(15 << ((7 - ((speinsn >> 23) & 0x7)) << 2)); regs->ccr |= (IR << ((7 - ((speinsn >> 23) & 0x7)) << 2)); update_regs: /* * If the "invalid" exception sticky bit was set by the * processor for non-finite input, but was not set before the * instruction being emulated, clear it. Likewise for the * "underflow" bit, which may have been set by the processor * for exact underflow, not just inexact underflow when the * flag should be set for IEEE 754 semantics. Other sticky * exceptions will only be set by the processor when they are * correct according to IEEE 754 semantics, and we must not * clear sticky bits that were already set before the emulated * instruction as they represent the user-visible sticky * exception status. "inexact" traps to kernel are not * required for IEEE semantics and are not enabled by default, * so the "inexact" sticky bit may have been set by a previous * instruction without the kernel being aware of it. */ __FPU_FPSCR &= ~(FP_EX_INVALID | FP_EX_UNDERFLOW) | current->thread.spefscr_last; __FPU_FPSCR |= (FP_CUR_EXCEPTIONS & FP_EX_MASK); mtspr(SPRN_SPEFSCR, __FPU_FPSCR); current->thread.spefscr_last = __FPU_FPSCR; current->thread.evr[fc] = vc.wp[0]; regs->gpr[fc] = vc.wp[1]; pr_debug("ccr = %08lx\n", regs->ccr); pr_debug("cur exceptions = %08x spefscr = %08lx\n", FP_CUR_EXCEPTIONS, __FPU_FPSCR); pr_debug("vc: %08x %08x\n", vc.wp[0], vc.wp[1]); pr_debug("va: %08x %08x\n", va.wp[0], va.wp[1]); pr_debug("vb: %08x %08x\n", vb.wp[0], vb.wp[1]); if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) { if ((FP_CUR_EXCEPTIONS & FP_EX_DIVZERO) && (current->thread.fpexc_mode & PR_FP_EXC_DIV)) return 1; if ((FP_CUR_EXCEPTIONS & FP_EX_OVERFLOW) && (current->thread.fpexc_mode & PR_FP_EXC_OVF)) return 1; if ((FP_CUR_EXCEPTIONS & FP_EX_UNDERFLOW) && (current->thread.fpexc_mode & PR_FP_EXC_UND)) return 1; if ((FP_CUR_EXCEPTIONS & FP_EX_INEXACT) && (current->thread.fpexc_mode & PR_FP_EXC_RES)) return 1; if ((FP_CUR_EXCEPTIONS & FP_EX_INVALID) && (current->thread.fpexc_mode & PR_FP_EXC_INV)) return 1; } return 0; illegal: if (have_e500_cpu_a005_erratum) { /* according to e500 cpu a005 erratum, reissue efp inst */ regs_add_return_ip(regs, -4); pr_debug("re-issue efp inst: %08lx\n", speinsn); return 0; } printk(KERN_ERR "\nOoops! IEEE-754 compliance handler encountered un-supported instruction.\ninst code: %08lx\n", speinsn); return -ENOSYS; } int speround_handler(struct pt_regs *regs) { union dw_union fgpr; int s_lo, s_hi; int lo_inexact, hi_inexact; int fp_result; unsigned long speinsn, type, fb, fc, fptype, func; if (get_user(speinsn, (unsigned int __user *) regs->nip)) return -EFAULT; if ((speinsn >> 26) != 4) return -EINVAL; /* not an spe instruction */ func = speinsn & 0x7ff; type = insn_type(func); if (type == XCR) return -ENOSYS; __FPU_FPSCR = mfspr(SPRN_SPEFSCR); pr_debug("speinsn:%08lx spefscr:%08lx\n", speinsn, __FPU_FPSCR); fptype = (speinsn >> 5) & 0x7; /* No need to round if the result is exact */ lo_inexact = __FPU_FPSCR & (SPEFSCR_FG | SPEFSCR_FX); hi_inexact = __FPU_FPSCR & (SPEFSCR_FGH | SPEFSCR_FXH); if (!(lo_inexact || (hi_inexact && fptype == VCT))) return 0; fc = (speinsn >> 21) & 0x1f; s_lo = regs->gpr[fc] & SIGN_BIT_S; s_hi = current->thread.evr[fc] & SIGN_BIT_S; fgpr.wp[0] = current->thread.evr[fc]; fgpr.wp[1] = regs->gpr[fc]; fb = (speinsn >> 11) & 0x1f; switch (func) { case EFSCTUIZ: case EFSCTSIZ: case EVFSCTUIZ: case EVFSCTSIZ: case EFDCTUIDZ: case EFDCTSIDZ: case EFDCTUIZ: case EFDCTSIZ: /* * These instructions always round to zero, * independent of the rounding mode. */ return 0; case EFSCTUI: case EFSCTUF: case EVFSCTUI: case EVFSCTUF: case EFDCTUI: case EFDCTUF: fp_result = 0; s_lo = 0; s_hi = 0; break; case EFSCTSI: case EFSCTSF: fp_result = 0; /* Recover the sign of a zero result if possible. */ if (fgpr.wp[1] == 0) s_lo = regs->gpr[fb] & SIGN_BIT_S; break; case EVFSCTSI: case EVFSCTSF: fp_result = 0; /* Recover the sign of a zero result if possible. */ if (fgpr.wp[1] == 0) s_lo = regs->gpr[fb] & SIGN_BIT_S; if (fgpr.wp[0] == 0) s_hi = current->thread.evr[fb] & SIGN_BIT_S; break; case EFDCTSI: case EFDCTSF: fp_result = 0; s_hi = s_lo; /* Recover the sign of a zero result if possible. */ if (fgpr.wp[1] == 0) s_hi = current->thread.evr[fb] & SIGN_BIT_S; break; default: fp_result = 1; break; } pr_debug("round fgpr: %08x %08x\n", fgpr.wp[0], fgpr.wp[1]); switch (fptype) { /* Since SPE instructions on E500 core can handle round to nearest * and round toward zero with IEEE-754 complied, we just need * to handle round toward +Inf and round toward -Inf by software. */ case SPFP: if ((FP_ROUNDMODE) == FP_RND_PINF) { if (!s_lo) fgpr.wp[1]++; /* Z > 0, choose Z1 */ } else { /* round to -Inf */ if (s_lo) { if (fp_result) fgpr.wp[1]++; /* Z < 0, choose Z2 */ else fgpr.wp[1]--; /* Z < 0, choose Z2 */ } } break; case DPFP: if (FP_ROUNDMODE == FP_RND_PINF) { if (!s_hi) { if (fp_result) fgpr.dp[0]++; /* Z > 0, choose Z1 */ else fgpr.wp[1]++; /* Z > 0, choose Z1 */ } } else { /* round to -Inf */ if (s_hi) { if (fp_result) fgpr.dp[0]++; /* Z < 0, choose Z2 */ else fgpr.wp[1]--; /* Z < 0, choose Z2 */ } } break; case VCT: if (FP_ROUNDMODE == FP_RND_PINF) { if (lo_inexact && !s_lo) fgpr.wp[1]++; /* Z_low > 0, choose Z1 */ if (hi_inexact && !s_hi) fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */ } else { /* round to -Inf */ if (lo_inexact && s_lo) { if (fp_result) fgpr.wp[1]++; /* Z_low < 0, choose Z2 */ else fgpr.wp[1]--; /* Z_low < 0, choose Z2 */ } if (hi_inexact && s_hi) { if (fp_result) fgpr.wp[0]++; /* Z_high < 0, choose Z2 */ else fgpr.wp[0]--; /* Z_high < 0, choose Z2 */ } } break; default: return -EINVAL; } current->thread.evr[fc] = fgpr.wp[0]; regs->gpr[fc] = fgpr.wp[1]; pr_debug(" to fgpr: %08x %08x\n", fgpr.wp[0], fgpr.wp[1]); if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) return (current->thread.fpexc_mode & PR_FP_EXC_RES) ? 1 : 0; return 0; } static int __init spe_mathemu_init(void) { u32 pvr, maj, min; pvr = mfspr(SPRN_PVR); if ((PVR_VER(pvr) == PVR_VER_E500V1) || (PVR_VER(pvr) == PVR_VER_E500V2)) { maj = PVR_MAJ(pvr); min = PVR_MIN(pvr); /* * E500 revision below 1.1, 2.3, 3.1, 4.1, 5.1 * need cpu a005 errata workaround */ switch (maj) { case 1: if (min < 1) have_e500_cpu_a005_erratum = 1; break; case 2: if (min < 3) have_e500_cpu_a005_erratum = 1; break; case 3: case 4: case 5: if (min < 1) have_e500_cpu_a005_erratum = 1; break; default: break; } } return 0; } module_init(spe_mathemu_init);
linux-master
arch/powerpc/math-emu/math_efp.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int fnmadds(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); if (R_c != FP_CLS_NAN) R_s ^= 1; #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_DS(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fnmadds.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> int frsqrte(void *frD, void *frB) { #ifdef DEBUG printk("%s: %p %p\n", __func__, frD, frB); #endif return 0; }
linux-master
arch/powerpc/math-emu/frsqrte.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> int fres(void *frD, void *frB) { #ifdef DEBUG printk("%s: %p %p\n", __func__, frD, frB); #endif return -ENOSYS; }
linux-master
arch/powerpc/math-emu/fres.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> int mcrfs(u32 *ccr, u32 crfD, u32 crfS) { u32 value, clear; #ifdef DEBUG printk("%s: %p (%08x) %d %d\n", __func__, ccr, *ccr, crfD, crfS); #endif clear = 15 << ((7 - crfS) << 2); if (!crfS) clear = 0x90000000; value = (__FPU_FPSCR >> ((7 - crfS) << 2)) & 15; __FPU_FPSCR &= ~(clear); *ccr &= ~(15 << ((7 - crfD) << 2)); *ccr |= (value << ((7 - crfD) << 2)); #ifdef DEBUG printk("CR: %08x\n", __func__, *ccr); #endif return 0; }
linux-master
arch/powerpc/math-emu/mcrfs.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> int frsqrtes(void *frD, void *frB) { #ifdef DEBUG printk("%s: %p %p\n", __func__, frD, frB); #endif return 0; }
linux-master
arch/powerpc/math-emu/frsqrtes.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int fmadds(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_DS(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fmadds.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int fsubs(void *frD, void *frA, void *frB) { FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); #endif if (B_c != FP_CLS_NAN) B_s ^= 1; if (A_s != B_s && A_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, A, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_DS(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fsubs.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int frsp(void *frD, void *frB) { FP_DECL_D(B); FP_DECL_EX; #ifdef DEBUG printk("%s: D %p, B %p\n", __func__, frD, frB); #endif FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); #endif __FP_PACK_DS(frD, B); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/frsp.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> int mtfsf(unsigned int FM, u32 *frB) { u32 mask; u32 fpscr; if (likely(FM == 1)) mask = 0x0f; else if (likely(FM == 0xff)) mask = ~0; else { mask = ((FM & 1) | ((FM << 3) & 0x10) | ((FM << 6) & 0x100) | ((FM << 9) & 0x1000) | ((FM << 12) & 0x10000) | ((FM << 15) & 0x100000) | ((FM << 18) & 0x1000000) | ((FM << 21) & 0x10000000)) * 15; } fpscr = ((__FPU_FPSCR & ~mask) | (frB[1] & mask)) & ~(FPSCR_VX | FPSCR_FEX | 0x800); if (fpscr & (FPSCR_VXSNAN | FPSCR_VXISI | FPSCR_VXIDI | FPSCR_VXZDZ | FPSCR_VXIMZ | FPSCR_VXVC | FPSCR_VXSOFT | FPSCR_VXSQRT | FPSCR_VXCVI)) fpscr |= FPSCR_VX; /* The bit order of exception enables and exception status * is the same. Simply shift and mask to check for enabled * exceptions. */ if (fpscr & (fpscr >> 22) & 0xf8) fpscr |= FPSCR_FEX; __FPU_FPSCR = fpscr; #ifdef DEBUG printk("%s: %02x %p: %08lx\n", __func__, FM, frB, __FPU_FPSCR); #endif return 0; }
linux-master
arch/powerpc/math-emu/mtfsf.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> int fneg(u32 *frD, u32 *frB) { frD[0] = frB[0] ^ 0x80000000; frD[1] = frB[1]; #ifdef DEBUG printk("%s: D %p, B %p: ", __func__, frD, frB); dump_double(frD); printk("\n"); #endif return 0; }
linux-master
arch/powerpc/math-emu/fneg.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> int mtfsb1(int crbD) { if ((crbD != 1) && (crbD != 2)) __FPU_FPSCR |= (1 << (31 - crbD)); #ifdef DEBUG printk("%s: %d %08lx\n", __func__, crbD, __FPU_FPSCR); #endif return 0; }
linux-master
arch/powerpc/math-emu/mtfsb1.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fsel(u32 *frD, void *frA, u32 *frB, u32 *frC) { FP_DECL_D(A); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %08x %08x\n", frB[0], frB[1]); printk("C: %08x %08x\n", frC[0], frC[1]); #endif if (A_c == FP_CLS_NAN || (A_c != FP_CLS_ZERO && A_s)) { frD[0] = frB[0]; frD[1] = frB[1]; } else { frD[0] = frC[0]; frD[1] = frC[1]; } #ifdef DEBUG printk("D: %08x.%08x\n", frD[0], frD[1]); #endif return 0; }
linux-master
arch/powerpc/math-emu/fsel.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> #include <math-emu/single.h> int fsqrts(void *frD, void *frB) { FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frB); #endif FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); #endif if (B_s && B_c != FP_CLS_ZERO) FP_SET_EXCEPTION(EFLAG_VXSQRT); if (B_c == FP_CLS_NAN) FP_SET_EXCEPTION(EFLAG_VXSNAN); FP_SQRT_D(R, B); #ifdef DEBUG printk("R: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_DS(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fsqrts.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fmadd(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fmadd.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/double.h> int lfd(void *frD, void *ea) { if (copy_from_user(frD, ea, sizeof(double))) return -EFAULT; #ifdef DEBUG printk("%s: D %p, ea %p: ", __func__, frD, ea); dump_double(frD); printk("\n"); #endif return 0; }
linux-master
arch/powerpc/math-emu/lfd.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fmsub(void *frD, void *frA, void *frB, void *frC) { FP_DECL_D(R); FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(C); FP_DECL_D(T); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); FP_UNPACK_DP(C, frC); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); printk("C: %ld %lu %lu %ld (%ld)\n", C_s, C_f1, C_f0, C_e, C_c); #endif if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) || (A_c == FP_CLS_ZERO && C_c == FP_CLS_INF)) FP_SET_EXCEPTION(EFLAG_VXIMZ); FP_MUL_D(T, A, C); if (B_c != FP_CLS_NAN) B_s ^= 1; if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF) FP_SET_EXCEPTION(EFLAG_VXISI); FP_ADD_D(R, T, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fmsub.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/sfp-machine.h> #include <math-emu/soft-fp.h> #include <math-emu/double.h> int fdiv(void *frD, void *frA, void *frB) { FP_DECL_D(A); FP_DECL_D(B); FP_DECL_D(R); FP_DECL_EX; #ifdef DEBUG printk("%s: %p %p %p\n", __func__, frD, frA, frB); #endif FP_UNPACK_DP(A, frA); FP_UNPACK_DP(B, frB); #ifdef DEBUG printk("A: %ld %lu %lu %ld (%ld)\n", A_s, A_f1, A_f0, A_e, A_c); printk("B: %ld %lu %lu %ld (%ld)\n", B_s, B_f1, B_f0, B_e, B_c); #endif if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) { FP_SET_EXCEPTION(EFLAG_VXZDZ); #ifdef DEBUG printk("%s: FPSCR_VXZDZ raised\n", __func__); #endif } if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) { FP_SET_EXCEPTION(EFLAG_VXIDI); #ifdef DEBUG printk("%s: FPSCR_VXIDI raised\n", __func__); #endif } if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) { FP_SET_EXCEPTION(EFLAG_DIVZERO); if (__FPU_TRAP_P(EFLAG_DIVZERO)) return FP_CUR_EXCEPTIONS; } FP_DIV_D(R, A, B); #ifdef DEBUG printk("D: %ld %lu %lu %ld (%ld)\n", R_s, R_f1, R_f0, R_e, R_c); #endif __FP_PACK_D(frD, R); return FP_CUR_EXCEPTIONS; }
linux-master
arch/powerpc/math-emu/fdiv.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * ULI M1575 setup code - specific to Freescale boards * * Copyright 2007 Freescale Semiconductor Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/mc146818rtc.h> #include <linux/of_irq.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <sysdev/fsl_pci.h> #define ULI_PIRQA 0x08 #define ULI_PIRQB 0x09 #define ULI_PIRQC 0x0a #define ULI_PIRQD 0x0b #define ULI_PIRQE 0x0c #define ULI_PIRQF 0x0d #define ULI_PIRQG 0x0e #define ULI_8259_NONE 0x00 #define ULI_8259_IRQ1 0x08 #define ULI_8259_IRQ3 0x02 #define ULI_8259_IRQ4 0x04 #define ULI_8259_IRQ5 0x05 #define ULI_8259_IRQ6 0x07 #define ULI_8259_IRQ7 0x06 #define ULI_8259_IRQ9 0x01 #define ULI_8259_IRQ10 0x03 #define ULI_8259_IRQ11 0x09 #define ULI_8259_IRQ12 0x0b #define ULI_8259_IRQ14 0x0d #define ULI_8259_IRQ15 0x0f static u8 uli_pirq_to_irq[8] = { ULI_8259_IRQ9, /* PIRQA */ ULI_8259_IRQ10, /* PIRQB */ ULI_8259_IRQ11, /* PIRQC */ ULI_8259_IRQ12, /* PIRQD */ ULI_8259_IRQ5, /* PIRQE */ ULI_8259_IRQ6, /* PIRQF */ ULI_8259_IRQ7, /* PIRQG */ ULI_8259_NONE, /* PIRQH */ }; static inline bool is_quirk_valid(void) { return (machine_is(mpc86xx_hpcn) || machine_is(mpc8544_ds) || machine_is(p2020_ds) || machine_is(mpc8572_ds)); } /* Bridge */ static void early_uli5249(struct pci_dev *dev) { unsigned char temp; if (!is_quirk_valid()) return; pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); /* read/write lock */ pci_read_config_byte(dev, 0x7c, &temp); pci_write_config_byte(dev, 0x7c, 0x80); /* set as P2P bridge */ pci_write_config_byte(dev, PCI_CLASS_PROG, 0x01); dev->class |= 0x1; /* restore lock */ pci_write_config_byte(dev, 0x7c, temp); } static void quirk_uli1575(struct pci_dev *dev) { int i; if (!is_quirk_valid()) return; /* * ULI1575 interrupts route setup */ /* ULI1575 IRQ mapping conf register maps PIRQx to IRQn */ for (i = 0; i < 4; i++) { u8 val = uli_pirq_to_irq[i*2] | (uli_pirq_to_irq[i*2+1] << 4); pci_write_config_byte(dev, 0x48 + i, val); } /* USB 1.1 OHCI controller 1: dev 28, func 0 - IRQ12 */ pci_write_config_byte(dev, 0x86, ULI_PIRQD); /* USB 1.1 OHCI controller 2: dev 28, func 1 - IRQ9 */ pci_write_config_byte(dev, 0x87, ULI_PIRQA); /* USB 1.1 OHCI controller 3: dev 28, func 2 - IRQ10 */ pci_write_config_byte(dev, 0x88, ULI_PIRQB); /* Lan controller: dev 27, func 0 - IRQ6 */ pci_write_config_byte(dev, 0x89, ULI_PIRQF); /* AC97 Audio controller: dev 29, func 0 - IRQ6 */ pci_write_config_byte(dev, 0x8a, ULI_PIRQF); /* Modem controller: dev 29, func 1 - IRQ6 */ pci_write_config_byte(dev, 0x8b, ULI_PIRQF); /* HD Audio controller: dev 29, func 2 - IRQ6 */ pci_write_config_byte(dev, 0x8c, ULI_PIRQF); /* SATA controller: dev 31, func 1 - IRQ5 */ pci_write_config_byte(dev, 0x8d, ULI_PIRQE); /* SMB interrupt: dev 30, func 1 - IRQ7 */ pci_write_config_byte(dev, 0x8e, ULI_PIRQG); /* PMU ACPI SCI interrupt: dev 30, func 2 - IRQ7 */ pci_write_config_byte(dev, 0x8f, ULI_PIRQG); /* USB 2.0 controller: dev 28, func 3 */ pci_write_config_byte(dev, 0x74, ULI_8259_IRQ11); /* Primary PATA IDE IRQ: 14 * Secondary PATA IDE IRQ: 15 */ pci_write_config_byte(dev, 0x44, 0x30 | ULI_8259_IRQ14); pci_write_config_byte(dev, 0x75, ULI_8259_IRQ15); } static void quirk_final_uli1575(struct pci_dev *dev) { /* Set i8259 interrupt trigger * IRQ 3: Level * IRQ 4: Level * IRQ 5: Level * IRQ 6: Level * IRQ 7: Level * IRQ 9: Level * IRQ 10: Level * IRQ 11: Level * IRQ 12: Level * IRQ 14: Edge * IRQ 15: Edge */ if (!is_quirk_valid()) return; outb(0xfa, 0x4d0); outb(0x1e, 0x4d1); /* setup RTC */ CMOS_WRITE(RTC_SET, RTC_CONTROL); CMOS_WRITE(RTC_24H, RTC_CONTROL); /* ensure month, date, and week alarm fields are ignored */ CMOS_WRITE(0, RTC_VALID); outb_p(0x7c, 0x72); outb_p(RTC_ALARM_DONT_CARE, 0x73); outb_p(0x7d, 0x72); outb_p(RTC_ALARM_DONT_CARE, 0x73); } /* SATA */ static void quirk_uli5288(struct pci_dev *dev) { unsigned char c; unsigned int d; if (!is_quirk_valid()) return; /* read/write lock */ pci_read_config_byte(dev, 0x83, &c); pci_write_config_byte(dev, 0x83, c|0x80); pci_read_config_dword(dev, PCI_CLASS_REVISION, &d); d = (d & 0xff) | (PCI_CLASS_STORAGE_SATA_AHCI << 8); pci_write_config_dword(dev, PCI_CLASS_REVISION, d); /* restore lock */ pci_write_config_byte(dev, 0x83, c); /* disable emulated PATA mode enabled */ pci_read_config_byte(dev, 0x84, &c); pci_write_config_byte(dev, 0x84, c & ~0x01); } /* PATA */ static void quirk_uli5229(struct pci_dev *dev) { unsigned short temp; if (!is_quirk_valid()) return; pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_MASTER | PCI_COMMAND_IO); /* Enable Native IRQ 14/15 */ pci_read_config_word(dev, 0x4a, &temp); pci_write_config_word(dev, 0x4a, temp | 0x1000); } /* We have to do a dummy read on the P2P for the RTC to work, WTF */ static void quirk_final_uli5249(struct pci_dev *dev) { int i; u8 *dummy; struct pci_bus *bus = dev->bus; struct resource *res; resource_size_t end = 0; for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCES+3; i++) { unsigned long flags = pci_resource_flags(dev, i); if ((flags & (IORESOURCE_MEM|IORESOURCE_PREFETCH)) == IORESOURCE_MEM) end = pci_resource_end(dev, i); } pci_bus_for_each_resource(bus, res, i) { if (res && res->flags & IORESOURCE_MEM) { if (res->end == end) dummy = ioremap(res->start, 0x4); else dummy = ioremap(res->end - 3, 0x4); if (dummy) { in_8(dummy); iounmap(dummy); } break; } } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AL, 0x5249, early_uli5249); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, quirk_uli1575); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5288, quirk_uli5288); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 0x5249, quirk_final_uli5249); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 0x1575, quirk_final_uli1575); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229); static void hpcd_quirk_uli1575(struct pci_dev *dev) { u32 temp32; if (!machine_is(mpc86xx_hpcd)) return; /* Disable INTx */ pci_read_config_dword(dev, 0x48, &temp32); pci_write_config_dword(dev, 0x48, (temp32 | 1<<26)); /* Enable sideband interrupt */ pci_read_config_dword(dev, 0x90, &temp32); pci_write_config_dword(dev, 0x90, (temp32 | 1<<22)); } static void hpcd_quirk_uli5288(struct pci_dev *dev) { unsigned char c; if (!machine_is(mpc86xx_hpcd)) return; pci_read_config_byte(dev, 0x83, &c); c |= 0x80; pci_write_config_byte(dev, 0x83, c); pci_write_config_byte(dev, PCI_CLASS_PROG, 0x01); pci_write_config_byte(dev, PCI_CLASS_DEVICE, 0x06); pci_read_config_byte(dev, 0x83, &c); c &= 0x7f; pci_write_config_byte(dev, 0x83, c); } /* * Since 8259PIC was disabled on the board, the IDE device can not * use the legacy IRQ, we need to let the IDE device work under * native mode and use the interrupt line like other PCI devices. * IRQ14 is a sideband interrupt from IDE device to CPU and we use this * as the interrupt for IDE device. */ static void hpcd_quirk_uli5229(struct pci_dev *dev) { unsigned char c; if (!machine_is(mpc86xx_hpcd)) return; pci_read_config_byte(dev, 0x4b, &c); c |= 0x10; pci_write_config_byte(dev, 0x4b, c); } /* * SATA interrupt pin bug fix * There's a chip bug for 5288, The interrupt pin should be 2, * not the read only value 1, So it use INTB#, not INTA# which * actually used by the IDE device 5229. * As of this bug, during the PCI initialization, 5288 read the * irq of IDE device from the device tree, this function fix this * bug by re-assigning a correct irq to 5288. * */ static void hpcd_final_uli5288(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct device_node *hosenode = hose ? hose->dn : NULL; struct of_phandle_args oirq; u32 laddr[3]; if (!machine_is(mpc86xx_hpcd)) return; if (!hosenode) return; oirq.np = hosenode; oirq.args[0] = 2; oirq.args_count = 1; laddr[0] = (hose->first_busno << 16) | (PCI_DEVFN(31, 0) << 8); laddr[1] = laddr[2] = 0; of_irq_parse_raw(laddr, &oirq); dev->irq = irq_create_of_mapping(&oirq); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, hpcd_quirk_uli1575); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5288, hpcd_quirk_uli5288); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, hpcd_quirk_uli5229); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 0x5288, hpcd_final_uli5288); static int uli_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn) { if (hose->dn == fsl_pci_primary && bus == (hose->first_busno + 2)) { /* exclude Modem controller */ if ((PCI_SLOT(devfn) == 29) && (PCI_FUNC(devfn) == 1)) return PCIBIOS_DEVICE_NOT_FOUND; /* exclude HD Audio controller */ if ((PCI_SLOT(devfn) == 29) && (PCI_FUNC(devfn) == 2)) return PCIBIOS_DEVICE_NOT_FOUND; } return PCIBIOS_SUCCESSFUL; } void __init uli_init(void) { struct device_node *node; struct device_node *pci_with_uli; /* See if we have a ULI under the primary */ node = of_find_node_by_name(NULL, "uli1575"); while ((pci_with_uli = of_get_parent(node))) { of_node_put(node); node = pci_with_uli; if (pci_with_uli == fsl_pci_primary) { ppc_md.pci_exclude_device = uli_exclude_device; break; } } }
linux-master
arch/powerpc/platforms/fsl_uli1575.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/of_irq.h> #include <asm/pmac_feature.h> #include <asm/pmac_pfunc.h> #undef DEBUG #ifdef DEBUG #define DBG(fmt...) printk(fmt) #else #define DBG(fmt...) #endif static irqreturn_t macio_gpio_irq(int irq, void *data) { pmf_do_irq(data); return IRQ_HANDLED; } static int macio_do_gpio_irq_enable(struct pmf_function *func) { unsigned int irq = irq_of_parse_and_map(func->node, 0); if (!irq) return -EINVAL; return request_irq(irq, macio_gpio_irq, 0, func->node->name, func); } static int macio_do_gpio_irq_disable(struct pmf_function *func) { unsigned int irq = irq_of_parse_and_map(func->node, 0); if (!irq) return -EINVAL; free_irq(irq, func); return 0; } static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask) { u8 __iomem *addr = (u8 __iomem *)func->driver_data; unsigned long flags; u8 tmp; /* Check polarity */ if (args && args->count && !args->u[0].v) value = ~value; /* Toggle the GPIO */ raw_spin_lock_irqsave(&feature_lock, flags); tmp = readb(addr); tmp = (tmp & ~mask) | (value & mask); DBG("Do write 0x%02x to GPIO %pOF (%p)\n", tmp, func->node, addr); writeb(tmp, addr); raw_spin_unlock_irqrestore(&feature_lock, flags); return 0; } static int macio_do_gpio_read(PMF_STD_ARGS, u8 mask, int rshift, u8 xor) { u8 __iomem *addr = (u8 __iomem *)func->driver_data; u32 value; /* Check if we have room for reply */ if (args == NULL || args->count == 0 || args->u[0].p == NULL) return -EINVAL; value = readb(addr); *args->u[0].p = ((value & mask) >> rshift) ^ xor; return 0; } static int macio_do_delay(PMF_STD_ARGS, u32 duration) { /* assume we can sleep ! */ msleep((duration + 999) / 1000); return 0; } static struct pmf_handlers macio_gpio_handlers = { .irq_enable = macio_do_gpio_irq_enable, .irq_disable = macio_do_gpio_irq_disable, .write_gpio = macio_do_gpio_write, .read_gpio = macio_do_gpio_read, .delay = macio_do_delay, }; static void __init macio_gpio_init_one(struct macio_chip *macio) { struct device_node *gparent, *gp; /* * Find the "gpio" parent node */ for_each_child_of_node(macio->of_node, gparent) if (of_node_name_eq(gparent, "gpio")) break; if (gparent == NULL) return; DBG("Installing GPIO functions for macio %pOF\n", macio->of_node); /* * Ok, got one, we dont need anything special to track them down, so * we just create them all */ for_each_child_of_node(gparent, gp) { const u32 *reg = of_get_property(gp, "reg", NULL); unsigned long offset; if (reg == NULL) continue; offset = *reg; /* Deal with old style device-tree. We can safely hard code the * offset for now too even if it's a bit gross ... */ if (offset < 0x50) offset += 0x50; offset += (unsigned long)macio->base; pmf_register_driver(gp, &macio_gpio_handlers, (void *)offset); } DBG("Calling initial GPIO functions for macio %pOF\n", macio->of_node); /* And now we run all the init ones */ for_each_child_of_node(gparent, gp) pmf_do_functions(gp, NULL, 0, PMF_FLAGS_ON_INIT, NULL); of_node_put(gparent); /* Note: We do not at this point implement the "at sleep" or "at wake" * functions. I yet to find any for GPIOs anyway */ } static int macio_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask) { struct macio_chip *macio = func->driver_data; unsigned long flags; raw_spin_lock_irqsave(&feature_lock, flags); MACIO_OUT32(offset, (MACIO_IN32(offset) & ~mask) | (value & mask)); raw_spin_unlock_irqrestore(&feature_lock, flags); return 0; } static int macio_do_read_reg32(PMF_STD_ARGS, u32 offset) { struct macio_chip *macio = func->driver_data; /* Check if we have room for reply */ if (args == NULL || args->count == 0 || args->u[0].p == NULL) return -EINVAL; *args->u[0].p = MACIO_IN32(offset); return 0; } static int macio_do_write_reg8(PMF_STD_ARGS, u32 offset, u8 value, u8 mask) { struct macio_chip *macio = func->driver_data; unsigned long flags; raw_spin_lock_irqsave(&feature_lock, flags); MACIO_OUT8(offset, (MACIO_IN8(offset) & ~mask) | (value & mask)); raw_spin_unlock_irqrestore(&feature_lock, flags); return 0; } static int macio_do_read_reg8(PMF_STD_ARGS, u32 offset) { struct macio_chip *macio = func->driver_data; /* Check if we have room for reply */ if (args == NULL || args->count == 0 || args->u[0].p == NULL) return -EINVAL; *((u8 *)(args->u[0].p)) = MACIO_IN8(offset); return 0; } static int macio_do_read_reg32_msrx(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift, u32 xor) { struct macio_chip *macio = func->driver_data; /* Check if we have room for reply */ if (args == NULL || args->count == 0 || args->u[0].p == NULL) return -EINVAL; *args->u[0].p = ((MACIO_IN32(offset) & mask) >> shift) ^ xor; return 0; } static int macio_do_read_reg8_msrx(PMF_STD_ARGS, u32 offset, u32 mask, u32 shift, u32 xor) { struct macio_chip *macio = func->driver_data; /* Check if we have room for reply */ if (args == NULL || args->count == 0 || args->u[0].p == NULL) return -EINVAL; *((u8 *)(args->u[0].p)) = ((MACIO_IN8(offset) & mask) >> shift) ^ xor; return 0; } static int macio_do_write_reg32_slm(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask) { struct macio_chip *macio = func->driver_data; unsigned long flags; u32 tmp, val; /* Check args */ if (args == NULL || args->count == 0) return -EINVAL; raw_spin_lock_irqsave(&feature_lock, flags); tmp = MACIO_IN32(offset); val = args->u[0].v << shift; tmp = (tmp & ~mask) | (val & mask); MACIO_OUT32(offset, tmp); raw_spin_unlock_irqrestore(&feature_lock, flags); return 0; } static int macio_do_write_reg8_slm(PMF_STD_ARGS, u32 offset, u32 shift, u32 mask) { struct macio_chip *macio = func->driver_data; unsigned long flags; u32 tmp, val; /* Check args */ if (args == NULL || args->count == 0) return -EINVAL; raw_spin_lock_irqsave(&feature_lock, flags); tmp = MACIO_IN8(offset); val = args->u[0].v << shift; tmp = (tmp & ~mask) | (val & mask); MACIO_OUT8(offset, tmp); raw_spin_unlock_irqrestore(&feature_lock, flags); return 0; } static struct pmf_handlers macio_mmio_handlers = { .write_reg32 = macio_do_write_reg32, .read_reg32 = macio_do_read_reg32, .write_reg8 = macio_do_write_reg8, .read_reg8 = macio_do_read_reg8, .read_reg32_msrx = macio_do_read_reg32_msrx, .read_reg8_msrx = macio_do_read_reg8_msrx, .write_reg32_slm = macio_do_write_reg32_slm, .write_reg8_slm = macio_do_write_reg8_slm, .delay = macio_do_delay, }; static void __init macio_mmio_init_one(struct macio_chip *macio) { DBG("Installing MMIO functions for macio %pOF\n", macio->of_node); pmf_register_driver(macio->of_node, &macio_mmio_handlers, macio); } static struct device_node *unin_hwclock; static int unin_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask) { unsigned long flags; raw_spin_lock_irqsave(&feature_lock, flags); /* This is fairly bogus in darwin, but it should work for our needs * implemeted that way: */ UN_OUT(offset, (UN_IN(offset) & ~mask) | (value & mask)); raw_spin_unlock_irqrestore(&feature_lock, flags); return 0; } static struct pmf_handlers unin_mmio_handlers = { .write_reg32 = unin_do_write_reg32, .delay = macio_do_delay, }; static void __init uninorth_install_pfunc(void) { struct device_node *np; DBG("Installing functions for UniN %pOF\n", uninorth_node); /* * Install handlers for the bridge itself */ pmf_register_driver(uninorth_node, &unin_mmio_handlers, NULL); pmf_do_functions(uninorth_node, NULL, 0, PMF_FLAGS_ON_INIT, NULL); /* * Install handlers for the hwclock child if any */ for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;) if (of_node_name_eq(np, "hw-clock")) { unin_hwclock = np; break; } if (unin_hwclock) { DBG("Installing functions for UniN clock %pOF\n", unin_hwclock); pmf_register_driver(unin_hwclock, &unin_mmio_handlers, NULL); pmf_do_functions(unin_hwclock, NULL, 0, PMF_FLAGS_ON_INIT, NULL); } } /* We export this as the SMP code might init us early */ int __init pmac_pfunc_base_install(void) { static int pfbase_inited; int i; if (pfbase_inited) return 0; pfbase_inited = 1; if (!machine_is(powermac)) return 0; DBG("Installing base platform functions...\n"); /* * Locate mac-io chips and install handlers */ for (i = 0 ; i < MAX_MACIO_CHIPS; i++) { if (macio_chips[i].of_node) { macio_mmio_init_one(&macio_chips[i]); macio_gpio_init_one(&macio_chips[i]); } } /* * Install handlers for northbridge and direct mapped hwclock * if any. We do not implement the config space access callback * which is only ever used for functions that we do not call in * the current driver (enabling/disabling cells in U2, mostly used * to restore the PCI settings, we do that differently) */ if (uninorth_node && uninorth_base) uninorth_install_pfunc(); DBG("All base functions installed\n"); return 0; } machine_arch_initcall(powermac, pmac_pfunc_base_install); #ifdef CONFIG_PM /* Those can be called by pmac_feature. Ultimately, I should use a sysdev * or a device, but for now, that's good enough until I sort out some * ordering issues. Also, we do not bother with GPIOs, as so far I yet have * to see a case where a GPIO function has the on-suspend or on-resume bit */ void pmac_pfunc_base_suspend(void) { int i; for (i = 0 ; i < MAX_MACIO_CHIPS; i++) { if (macio_chips[i].of_node) pmf_do_functions(macio_chips[i].of_node, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL); } if (uninorth_node) pmf_do_functions(uninorth_node, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL); if (unin_hwclock) pmf_do_functions(unin_hwclock, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL); } void pmac_pfunc_base_resume(void) { int i; if (unin_hwclock) pmf_do_functions(unin_hwclock, NULL, 0, PMF_FLAGS_ON_WAKE, NULL); if (uninorth_node) pmf_do_functions(uninorth_node, NULL, 0, PMF_FLAGS_ON_WAKE, NULL); for (i = 0 ; i < MAX_MACIO_CHIPS; i++) { if (macio_chips[i].of_node) pmf_do_functions(macio_chips[i].of_node, NULL, 0, PMF_FLAGS_ON_WAKE, NULL); } } #endif /* CONFIG_PM */
linux-master
arch/powerpc/platforms/powermac/pfunc_base.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Support for the interrupt controllers found on Power Macintosh, * currently Apple's "Grand Central" interrupt controller in all * it's incarnations. OpenPIC support used on newer machines is * in a separate file * * Copyright (C) 1997 Paul Mackerras ([email protected]) * Copyright (C) 2005 Benjamin Herrenschmidt ([email protected]) * IBM, Corp. */ #include <linux/stddef.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/syscore_ops.h> #include <linux/adb.h> #include <linux/minmax.h> #include <linux/pmu.h> #include <linux/irqdomain.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/pci-bridge.h> #include <asm/time.h> #include <asm/pmac_feature.h> #include <asm/mpic.h> #include <asm/xmon.h> #include "pmac.h" #ifdef CONFIG_PPC32 struct pmac_irq_hw { unsigned int event; unsigned int enable; unsigned int ack; unsigned int level; }; /* Workaround flags for 32bit powermac machines */ unsigned int of_irq_workarounds; struct device_node *of_irq_dflt_pic; /* Default addresses */ static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4]; static int max_irqs; static int max_real_irqs; static DEFINE_RAW_SPINLOCK(pmac_pic_lock); /* The max irq number this driver deals with is 128; see max_irqs */ static DECLARE_BITMAP(ppc_lost_interrupts, 128); static DECLARE_BITMAP(ppc_cached_irq_mask, 128); static int pmac_irq_cascade = -1; static struct irq_domain *pmac_pic_host; static void __pmac_retrigger(unsigned int irq_nr) { if (irq_nr >= max_real_irqs && pmac_irq_cascade > 0) { __set_bit(irq_nr, ppc_lost_interrupts); irq_nr = pmac_irq_cascade; mb(); } if (!__test_and_set_bit(irq_nr, ppc_lost_interrupts)) { atomic_inc(&ppc_n_lost_interrupts); set_dec(1); } } static void pmac_mask_and_ack_irq(struct irq_data *d) { unsigned int src = irqd_to_hwirq(d); unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; unsigned long flags; raw_spin_lock_irqsave(&pmac_pic_lock, flags); __clear_bit(src, ppc_cached_irq_mask); if (__test_and_clear_bit(src, ppc_lost_interrupts)) atomic_dec(&ppc_n_lost_interrupts); out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); out_le32(&pmac_irq_hw[i]->ack, bit); do { /* make sure ack gets to controller before we enable interrupts */ mb(); } while((in_le32(&pmac_irq_hw[i]->enable) & bit) != (ppc_cached_irq_mask[i] & bit)); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); } static void pmac_ack_irq(struct irq_data *d) { unsigned int src = irqd_to_hwirq(d); unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; unsigned long flags; raw_spin_lock_irqsave(&pmac_pic_lock, flags); if (__test_and_clear_bit(src, ppc_lost_interrupts)) atomic_dec(&ppc_n_lost_interrupts); out_le32(&pmac_irq_hw[i]->ack, bit); (void)in_le32(&pmac_irq_hw[i]->ack); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); } static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) { unsigned long bit = 1UL << (irq_nr & 0x1f); int i = irq_nr >> 5; if ((unsigned)irq_nr >= max_irqs) return; /* enable unmasked interrupts */ out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); do { /* make sure mask gets to controller before we return to user */ mb(); } while((in_le32(&pmac_irq_hw[i]->enable) & bit) != (ppc_cached_irq_mask[i] & bit)); /* * Unfortunately, setting the bit in the enable register * when the device interrupt is already on *doesn't* set * the bit in the flag register or request another interrupt. */ if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level)) __pmac_retrigger(irq_nr); } /* When an irq gets requested for the first client, if it's an * edge interrupt, we clear any previous one on the controller */ static unsigned int pmac_startup_irq(struct irq_data *d) { unsigned long flags; unsigned int src = irqd_to_hwirq(d); unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; raw_spin_lock_irqsave(&pmac_pic_lock, flags); if (!irqd_is_level_type(d)) out_le32(&pmac_irq_hw[i]->ack, bit); __set_bit(src, ppc_cached_irq_mask); __pmac_set_irq_mask(src, 0); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); return 0; } static void pmac_mask_irq(struct irq_data *d) { unsigned long flags; unsigned int src = irqd_to_hwirq(d); raw_spin_lock_irqsave(&pmac_pic_lock, flags); __clear_bit(src, ppc_cached_irq_mask); __pmac_set_irq_mask(src, 1); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); } static void pmac_unmask_irq(struct irq_data *d) { unsigned long flags; unsigned int src = irqd_to_hwirq(d); raw_spin_lock_irqsave(&pmac_pic_lock, flags); __set_bit(src, ppc_cached_irq_mask); __pmac_set_irq_mask(src, 0); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); } static int pmac_retrigger(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&pmac_pic_lock, flags); __pmac_retrigger(irqd_to_hwirq(d)); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); return 1; } static struct irq_chip pmac_pic = { .name = "PMAC-PIC", .irq_startup = pmac_startup_irq, .irq_mask = pmac_mask_irq, .irq_ack = pmac_ack_irq, .irq_mask_ack = pmac_mask_and_ack_irq, .irq_unmask = pmac_unmask_irq, .irq_retrigger = pmac_retrigger, }; static irqreturn_t gatwick_action(int cpl, void *dev_id) { unsigned long flags; int irq, bits; int rc = IRQ_NONE; raw_spin_lock_irqsave(&pmac_pic_lock, flags); for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) { int i = irq >> 5; bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; bits |= in_le32(&pmac_irq_hw[i]->level); bits &= ppc_cached_irq_mask[i]; if (bits == 0) continue; irq += __ilog2(bits); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); generic_handle_irq(irq); raw_spin_lock_irqsave(&pmac_pic_lock, flags); rc = IRQ_HANDLED; } raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); return rc; } static unsigned int pmac_pic_get_irq(void) { int irq; unsigned long bits = 0; unsigned long flags; #ifdef CONFIG_PPC_PMAC32_PSURGE /* IPI's are a hack on the powersurge -- Cort */ if (smp_processor_id() != 0) { return psurge_secondary_virq; } #endif /* CONFIG_PPC_PMAC32_PSURGE */ raw_spin_lock_irqsave(&pmac_pic_lock, flags); for (irq = max_real_irqs; (irq -= 32) >= 0; ) { int i = irq >> 5; bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; bits |= in_le32(&pmac_irq_hw[i]->level); bits &= ppc_cached_irq_mask[i]; if (bits == 0) continue; irq += __ilog2(bits); break; } raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); if (unlikely(irq < 0)) return 0; return irq_linear_revmap(pmac_pic_host, irq); } static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node, enum irq_domain_bus_token bus_token) { /* We match all, we don't always have a node anyway */ return 1; } static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { if (hw >= max_irqs) return -EINVAL; /* Mark level interrupts, set delayed disable for edge ones and set * handlers */ irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &pmac_pic, handle_level_irq); return 0; } static const struct irq_domain_ops pmac_pic_host_ops = { .match = pmac_pic_host_match, .map = pmac_pic_host_map, .xlate = irq_domain_xlate_onecell, }; static void __init pmac_pic_probe_oldstyle(void) { int i; struct device_node *master = NULL; struct device_node *slave = NULL; u8 __iomem *addr; struct resource r; /* Set our get_irq function */ ppc_md.get_irq = pmac_pic_get_irq; /* * Find the interrupt controller type & node */ if ((master = of_find_node_by_name(NULL, "gc")) != NULL) { max_irqs = max_real_irqs = 32; } else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) { max_irqs = max_real_irqs = 32; /* We might have a second cascaded ohare */ slave = of_find_node_by_name(NULL, "pci106b,7"); if (slave) max_irqs = 64; } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { max_irqs = max_real_irqs = 64; /* We might have a second cascaded heathrow */ /* Compensate for of_node_put() in of_find_node_by_name() */ of_node_get(master); slave = of_find_node_by_name(master, "mac-io"); /* Check ordering of master & slave */ if (of_device_is_compatible(master, "gatwick")) { BUG_ON(slave == NULL); swap(master, slave); } /* We found a slave */ if (slave) max_irqs = 128; } BUG_ON(master == NULL); /* * Allocate an irq host */ pmac_pic_host = irq_domain_add_linear(master, max_irqs, &pmac_pic_host_ops, NULL); BUG_ON(pmac_pic_host == NULL); irq_set_default_host(pmac_pic_host); /* Get addresses of first controller if we have a node for it */ BUG_ON(of_address_to_resource(master, 0, &r)); /* Map interrupts of primary controller */ addr = (u8 __iomem *) ioremap(r.start, 0x40); i = 0; pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) (addr + 0x20); if (max_real_irqs > 32) pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) (addr + 0x10); of_node_put(master); printk(KERN_INFO "irq: Found primary Apple PIC %pOF for %d irqs\n", master, max_real_irqs); /* Map interrupts of cascaded controller */ if (slave && !of_address_to_resource(slave, 0, &r)) { addr = (u8 __iomem *)ioremap(r.start, 0x40); pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) (addr + 0x20); if (max_irqs > 64) pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) (addr + 0x10); pmac_irq_cascade = irq_of_parse_and_map(slave, 0); printk(KERN_INFO "irq: Found slave Apple PIC %pOF for %d irqs" " cascade: %d\n", slave, max_irqs - max_real_irqs, pmac_irq_cascade); } of_node_put(slave); /* Disable all interrupts in all controllers */ for (i = 0; i * 32 < max_irqs; ++i) out_le32(&pmac_irq_hw[i]->enable, 0); /* Hookup cascade irq */ if (slave && pmac_irq_cascade) { if (request_irq(pmac_irq_cascade, gatwick_action, IRQF_NO_THREAD, "cascade", NULL)) pr_err("Failed to register cascade interrupt\n"); } printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); #ifdef CONFIG_XMON i = irq_create_mapping(NULL, 20); if (request_irq(i, xmon_irq, IRQF_NO_THREAD, "NMI - XMON", NULL)) pr_err("Failed to register NMI-XMON interrupt\n"); #endif } int of_irq_parse_oldworld(const struct device_node *device, int index, struct of_phandle_args *out_irq) { const u32 *ints = NULL; int intlen; /* * Old machines just have a list of interrupt numbers * and no interrupt-controller nodes. We also have dodgy * cases where the APPL,interrupts property is completely * missing behind pci-pci bridges and we have to get it * from the parent (the bridge itself, as apple just wired * everything together on these) */ while (device) { ints = of_get_property(device, "AAPL,interrupts", &intlen); if (ints != NULL) break; device = device->parent; if (!of_node_is_type(device, "pci")) break; } if (ints == NULL) return -EINVAL; intlen /= sizeof(u32); if (index >= intlen) return -EINVAL; out_irq->np = NULL; out_irq->args[0] = ints[index]; out_irq->args_count = 1; return 0; } #endif /* CONFIG_PPC32 */ static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) { #if defined(CONFIG_XMON) && defined(CONFIG_PPC32) struct device_node* pswitch; int nmi_irq; pswitch = of_find_node_by_name(NULL, "programmer-switch"); if (pswitch) { nmi_irq = irq_of_parse_and_map(pswitch, 0); if (nmi_irq) { mpic_irq_set_priority(nmi_irq, 9); if (request_irq(nmi_irq, xmon_irq, IRQF_NO_THREAD, "NMI - XMON", NULL)) pr_err("Failed to register NMI-XMON interrupt\n"); } of_node_put(pswitch); } #endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ } static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, int master) { const char *name = master ? " MPIC 1 " : " MPIC 2 "; struct mpic *mpic; unsigned int flags = master ? 0 : MPIC_SECONDARY; pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); if (of_property_read_bool(np, "big-endian")) flags |= MPIC_BIG_ENDIAN; /* Primary Big Endian means HT interrupts. This is quite dodgy * but works until I find a better way */ if (master && (flags & MPIC_BIG_ENDIAN)) flags |= MPIC_U3_HT_IRQS; mpic = mpic_alloc(np, 0, flags, 0, 0, name); if (mpic == NULL) return NULL; mpic_init(mpic); return mpic; } static int __init pmac_pic_probe_mpic(void) { struct mpic *mpic1, *mpic2; struct device_node *np, *master = NULL, *slave = NULL; /* We can have up to 2 MPICs cascaded */ for_each_node_by_type(np, "open-pic") { if (master == NULL && !of_property_present(np, "interrupts")) master = of_node_get(np); else if (slave == NULL) slave = of_node_get(np); if (master && slave) { of_node_put(np); break; } } /* Check for bogus setups */ if (master == NULL && slave != NULL) { master = slave; slave = NULL; } /* Not found, default to good old pmac pic */ if (master == NULL) return -ENODEV; /* Set master handler */ ppc_md.get_irq = mpic_get_irq; /* Setup master */ mpic1 = pmac_setup_one_mpic(master, 1); BUG_ON(mpic1 == NULL); /* Install NMI if any */ pmac_pic_setup_mpic_nmi(mpic1); of_node_put(master); /* Set up a cascaded controller, if present */ if (slave) { mpic2 = pmac_setup_one_mpic(slave, 0); if (mpic2 == NULL) printk(KERN_ERR "Failed to setup slave MPIC\n"); of_node_put(slave); } return 0; } void __init pmac_pic_init(void) { /* We configure the OF parsing based on our oldworld vs. newworld * platform type and whether we were booted by BootX. */ #ifdef CONFIG_PPC32 if (!pmac_newworld) of_irq_workarounds |= OF_IMAP_OLDWORLD_MAC; if (of_property_read_bool(of_chosen, "linux,bootx")) of_irq_workarounds |= OF_IMAP_NO_PHANDLE; /* If we don't have phandles on a newworld, then try to locate a * default interrupt controller (happens when booting with BootX). * We do a first match here, hopefully, that only ever happens on * machines with one controller. */ if (pmac_newworld && (of_irq_workarounds & OF_IMAP_NO_PHANDLE)) { struct device_node *np; for_each_node_with_property(np, "interrupt-controller") { /* Skip /chosen/interrupt-controller */ if (of_node_name_eq(np, "chosen")) continue; /* It seems like at least one person wants * to use BootX on a machine with an AppleKiwi * controller which happens to pretend to be an * interrupt controller too. */ if (of_node_name_eq(np, "AppleKiwi")) continue; /* I think we found one ! */ of_irq_dflt_pic = np; break; } } #endif /* CONFIG_PPC32 */ /* We first try to detect Apple's new Core99 chipset, since mac-io * is quite different on those machines and contains an IBM MPIC2. */ if (pmac_pic_probe_mpic() == 0) return; #ifdef CONFIG_PPC32 pmac_pic_probe_oldstyle(); #endif } #if defined(CONFIG_PM) && defined(CONFIG_PPC32) /* * These procedures are used in implementing sleep on the powerbooks. * sleep_save_intrs() saves the states of all interrupt enables * and disables all interrupts except for the nominated one. * sleep_restore_intrs() restores the states of all interrupt enables. */ unsigned long sleep_save_mask[2]; /* This used to be passed by the PMU driver but that link got * broken with the new driver model. We use this tweak for now... * We really want to do things differently though... */ static int pmacpic_find_viaint(void) { int viaint = -1; #ifdef CONFIG_ADB_PMU struct device_node *np; if (pmu_get_model() != PMU_OHARE_BASED) goto not_found; np = of_find_node_by_name(NULL, "via-pmu"); if (np == NULL) goto not_found; viaint = irq_of_parse_and_map(np, 0); of_node_put(np); not_found: #endif /* CONFIG_ADB_PMU */ return viaint; } static int pmacpic_suspend(void) { int viaint = pmacpic_find_viaint(); sleep_save_mask[0] = ppc_cached_irq_mask[0]; sleep_save_mask[1] = ppc_cached_irq_mask[1]; ppc_cached_irq_mask[0] = 0; ppc_cached_irq_mask[1] = 0; if (viaint > 0) set_bit(viaint, ppc_cached_irq_mask); out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]); if (max_real_irqs > 32) out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]); (void)in_le32(&pmac_irq_hw[0]->event); /* make sure mask gets to controller before we return to caller */ mb(); (void)in_le32(&pmac_irq_hw[0]->enable); return 0; } static void pmacpic_resume(void) { int i; out_le32(&pmac_irq_hw[0]->enable, 0); if (max_real_irqs > 32) out_le32(&pmac_irq_hw[1]->enable, 0); mb(); for (i = 0; i < max_real_irqs; ++i) if (test_bit(i, sleep_save_mask)) pmac_unmask_irq(irq_get_irq_data(i)); } static struct syscore_ops pmacpic_syscore_ops = { .suspend = pmacpic_suspend, .resume = pmacpic_resume, }; static int __init init_pmacpic_syscore(void) { if (pmac_irq_hw[0]) register_syscore_ops(&pmacpic_syscore_ops); return 0; } machine_subsys_initcall(powermac, init_pmacpic_syscore); #endif /* CONFIG_PM && CONFIG_PPC32 */
linux-master
arch/powerpc/platforms/powermac/pic.c
// SPDX-License-Identifier: GPL-2.0-only /* * Miscellaneous procedures for dealing with the PowerMac hardware. * Contains support for the backlight. * * Copyright (C) 2000 Benjamin Herrenschmidt * Copyright (C) 2006 Michael Hanselmann <[email protected]> * */ #include <linux/kernel.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/atomic.h> #include <linux/export.h> #include <asm/backlight.h> #define OLD_BACKLIGHT_MAX 15 static void pmac_backlight_key_worker(struct work_struct *work); static void pmac_backlight_set_legacy_worker(struct work_struct *work); static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker); static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker); /* Although these variables are used in interrupt context, it makes no sense to * protect them. No user is able to produce enough key events per second and * notice the errors that might happen. */ static int pmac_backlight_key_queued; static int pmac_backlight_set_legacy_queued; /* The via-pmu code allows the backlight to be grabbed, in which case the * in-kernel control of the brightness needs to be disabled. This should * only be used by really old PowerBooks. */ static atomic_t kernel_backlight_disabled = ATOMIC_INIT(0); /* Protect the pmac_backlight variable below. You should hold this lock when using the pmac_backlight pointer to prevent its potential removal. */ DEFINE_MUTEX(pmac_backlight_mutex); /* Main backlight storage * * Backlight drivers in this variable are required to have the "ops" * attribute set and to have an update_status function. * * We can only store one backlight here, but since Apple laptops have only one * internal display, it doesn't matter. Other backlight drivers can be used * independently. * */ struct backlight_device *pmac_backlight; int pmac_has_backlight_type(const char *type) { struct device_node* bk_node = of_find_node_by_name(NULL, "backlight"); if (bk_node) { const char *prop = of_get_property(bk_node, "backlight-control", NULL); if (prop && strncmp(prop, type, strlen(type)) == 0) { of_node_put(bk_node); return 1; } of_node_put(bk_node); } return 0; } int pmac_backlight_curve_lookup(struct fb_info *info, int value) { int level = (FB_BACKLIGHT_LEVELS - 1); if (info && info->bl_dev) { int i, max = 0; /* Look for biggest value */ for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) max = max((int)info->bl_curve[i], max); /* Look for nearest value */ for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) { int diff = abs(info->bl_curve[i] - value); if (diff < max) { max = diff; level = i; } } } return level; } static void pmac_backlight_key_worker(struct work_struct *work) { if (atomic_read(&kernel_backlight_disabled)) return; mutex_lock(&pmac_backlight_mutex); if (pmac_backlight) { struct backlight_properties *props; int brightness; props = &pmac_backlight->props; brightness = props->brightness + ((pmac_backlight_key_queued?-1:1) * (props->max_brightness / 15)); if (brightness < 0) brightness = 0; else if (brightness > props->max_brightness) brightness = props->max_brightness; props->brightness = brightness; backlight_update_status(pmac_backlight); } mutex_unlock(&pmac_backlight_mutex); } /* This function is called in interrupt context */ void pmac_backlight_key(int direction) { if (atomic_read(&kernel_backlight_disabled)) return; /* we can receive multiple interrupts here, but the scheduled work * will run only once, with the last value */ pmac_backlight_key_queued = direction; schedule_work(&pmac_backlight_key_work); } static int __pmac_backlight_set_legacy_brightness(int brightness) { int error = -ENXIO; mutex_lock(&pmac_backlight_mutex); if (pmac_backlight) { struct backlight_properties *props; props = &pmac_backlight->props; props->brightness = brightness * (props->max_brightness + 1) / (OLD_BACKLIGHT_MAX + 1); if (props->brightness > props->max_brightness) props->brightness = props->max_brightness; else if (props->brightness < 0) props->brightness = 0; backlight_update_status(pmac_backlight); error = 0; } mutex_unlock(&pmac_backlight_mutex); return error; } static void pmac_backlight_set_legacy_worker(struct work_struct *work) { if (atomic_read(&kernel_backlight_disabled)) return; __pmac_backlight_set_legacy_brightness(pmac_backlight_set_legacy_queued); } /* This function is called in interrupt context */ void pmac_backlight_set_legacy_brightness_pmu(int brightness) { if (atomic_read(&kernel_backlight_disabled)) return; pmac_backlight_set_legacy_queued = brightness; schedule_work(&pmac_backlight_set_legacy_work); } int pmac_backlight_set_legacy_brightness(int brightness) { return __pmac_backlight_set_legacy_brightness(brightness); } int pmac_backlight_get_legacy_brightness(void) { int result = -ENXIO; mutex_lock(&pmac_backlight_mutex); if (pmac_backlight) { struct backlight_properties *props; props = &pmac_backlight->props; result = props->brightness * (OLD_BACKLIGHT_MAX + 1) / (props->max_brightness + 1); } mutex_unlock(&pmac_backlight_mutex); return result; } void pmac_backlight_disable(void) { atomic_inc(&kernel_backlight_disabled); } void pmac_backlight_enable(void) { atomic_dec(&kernel_backlight_disabled); } EXPORT_SYMBOL_GPL(pmac_backlight); EXPORT_SYMBOL_GPL(pmac_backlight_mutex); EXPORT_SYMBOL_GPL(pmac_has_backlight_type);
linux-master
arch/powerpc/platforms/powermac/backlight.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Support for PCI bridges found on Power Macintoshes. * * Copyright (C) 2003-2005 Benjamin Herrenschmuidt ([email protected]) * Copyright (C) 1997 Paul Mackerras ([email protected]) */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_pci.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/grackle.h> #include <asm/ppc-pci.h> #include "pmac.h" #undef DEBUG #ifdef DEBUG #define DBG(x...) printk(x) #else #define DBG(x...) #endif /* XXX Could be per-controller, but I don't think we risk anything by * assuming we won't have both UniNorth and Bandit */ static int has_uninorth; #ifdef CONFIG_PPC64 static struct pci_controller *u3_agp; #else static int has_second_ohare; #endif /* CONFIG_PPC64 */ extern int pcibios_assign_bus_offset; struct device_node *k2_skiplist[2]; /* * Magic constants for enabling cache coherency in the bandit/PSX bridge. */ #define BANDIT_DEVID_2 8 #define BANDIT_REVID 3 #define BANDIT_DEVNUM 11 #define BANDIT_MAGIC 0x50 #define BANDIT_COHERENT 0x40 static int __init fixup_one_level_bus_range(struct device_node *node, int higher) { for (; node; node = node->sibling) { const int * bus_range; const unsigned int *class_code; int len; /* For PCI<->PCI bridges or CardBus bridges, we go down */ class_code = of_get_property(node, "class-code", NULL); if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) continue; bus_range = of_get_property(node, "bus-range", &len); if (bus_range != NULL && len > 2 * sizeof(int)) { if (bus_range[1] > higher) higher = bus_range[1]; } higher = fixup_one_level_bus_range(node->child, higher); } return higher; } /* This routine fixes the "bus-range" property of all bridges in the * system since they tend to have their "last" member wrong on macs * * Note that the bus numbers manipulated here are OF bus numbers, they * are not Linux bus numbers. */ static void __init fixup_bus_range(struct device_node *bridge) { int *bus_range, len; struct property *prop; /* Lookup the "bus-range" property for the hose */ prop = of_find_property(bridge, "bus-range", &len); if (prop == NULL || prop->length < 2 * sizeof(int)) return; bus_range = prop->value; bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]); } /* * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers. * * The "Bandit" version is present in all early PCI PowerMacs, * and up to the first ones using Grackle. Some machines may * have 2 bandit controllers (2 PCI busses). * * "Chaos" is used in some "Bandit"-type machines as a bridge * for the separate display bus. It is accessed the same * way as bandit, but cannot be probed for devices. It therefore * has its own config access functions. * * The "UniNorth" version is present in all Core99 machines * (iBook, G4, new IMacs, and all the recent Apple machines). * It contains 3 controllers in one ASIC. * * The U3 is the bridge used on G5 machines. It contains an * AGP bus which is dealt with the old UniNorth access routines * and a HyperTransport bus which uses its own set of access * functions. */ #define MACRISC_CFA0(devfn, off) \ ((1 << (unsigned int)PCI_SLOT(dev_fn)) \ | (((unsigned int)PCI_FUNC(dev_fn)) << 8) \ | (((unsigned int)(off)) & 0xFCUL)) #define MACRISC_CFA1(bus, devfn, off) \ ((((unsigned int)(bus)) << 16) \ |(((unsigned int)(devfn)) << 8) \ |(((unsigned int)(off)) & 0xFCUL) \ |1UL) static void __iomem *macrisc_cfg_map_bus(struct pci_bus *bus, unsigned int dev_fn, int offset) { unsigned int caddr; struct pci_controller *hose; hose = pci_bus_to_host(bus); if (hose == NULL) return NULL; if (bus->number == hose->first_busno) { if (dev_fn < (11 << 3)) return NULL; caddr = MACRISC_CFA0(dev_fn, offset); } else caddr = MACRISC_CFA1(bus->number, dev_fn, offset); /* Uninorth will return garbage if we don't read back the value ! */ do { out_le32(hose->cfg_addr, caddr); } while (in_le32(hose->cfg_addr) != caddr); offset &= has_uninorth ? 0x07 : 0x03; return hose->cfg_data + offset; } static struct pci_ops macrisc_pci_ops = { .map_bus = macrisc_cfg_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; #ifdef CONFIG_PPC32 /* * Verify that a specific (bus, dev_fn) exists on chaos */ static void __iomem *chaos_map_bus(struct pci_bus *bus, unsigned int devfn, int offset) { struct device_node *np; const u32 *vendor, *device; if (offset >= 0x100) return NULL; np = of_pci_find_child_device(bus->dev.of_node, devfn); if (np == NULL) return NULL; vendor = of_get_property(np, "vendor-id", NULL); device = of_get_property(np, "device-id", NULL); if (vendor == NULL || device == NULL) return NULL; if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10) && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24)) return NULL; return macrisc_cfg_map_bus(bus, devfn, offset); } static struct pci_ops chaos_pci_ops = { .map_bus = chaos_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; static void __init setup_chaos(struct pci_controller *hose, struct resource *addr) { /* assume a `chaos' bridge */ hose->ops = &chaos_pci_ops; hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000); hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000); } #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC64 /* * These versions of U3 HyperTransport config space access ops do not * implement self-view of the HT host yet */ /* * This function deals with some "special cases" devices. * * 0 -> No special case * 1 -> Skip the device but act as if the access was successful * (return 0xff's on reads, eventually, cache config space * accesses in a later version) * -1 -> Hide the device (unsuccessful access) */ static int u3_ht_skip_device(struct pci_controller *hose, struct pci_bus *bus, unsigned int devfn) { struct device_node *busdn, *dn; int i; /* We only allow config cycles to devices that are in OF device-tree * as we are apparently having some weird things going on with some * revs of K2 on recent G5s, except for the host bridge itself, which * is missing from the tree but we know we can probe. */ if (bus->self) busdn = pci_device_to_OF_node(bus->self); else if (devfn == 0) return 0; else busdn = hose->dn; for (dn = busdn->child; dn; dn = dn->sibling) if (PCI_DN(dn) && PCI_DN(dn)->devfn == devfn) break; if (dn == NULL) return -1; /* * When a device in K2 is powered down, we die on config * cycle accesses. Fix that here. */ for (i=0; i<2; i++) if (k2_skiplist[i] == dn) return 1; return 0; } #define U3_HT_CFA0(devfn, off) \ ((((unsigned int)devfn) << 8) | offset) #define U3_HT_CFA1(bus, devfn, off) \ (U3_HT_CFA0(devfn, off) \ + (((unsigned int)bus) << 16) \ + 0x01000000UL) static void __iomem *u3_ht_cfg_access(struct pci_controller *hose, u8 bus, u8 devfn, u8 offset, int *swap) { *swap = 1; if (bus == hose->first_busno) { if (devfn != 0) return hose->cfg_data + U3_HT_CFA0(devfn, offset); *swap = 0; return ((void __iomem *)hose->cfg_addr) + (offset << 2); } else return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset); } static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 *val) { struct pci_controller *hose; void __iomem *addr; int swap; hose = pci_bus_to_host(bus); if (hose == NULL) return PCIBIOS_DEVICE_NOT_FOUND; if (offset >= 0x100) return PCIBIOS_BAD_REGISTER_NUMBER; addr = u3_ht_cfg_access(hose, bus->number, devfn, offset, &swap); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; switch (u3_ht_skip_device(hose, bus, devfn)) { case 0: break; case 1: switch (len) { case 1: *val = 0xff; break; case 2: *val = 0xffff; break; default: *val = 0xfffffffful; break; } return PCIBIOS_SUCCESSFUL; default: return PCIBIOS_DEVICE_NOT_FOUND; } /* * Note: the caller has already checked that offset is * suitably aligned and that len is 1, 2 or 4. */ switch (len) { case 1: *val = in_8(addr); break; case 2: *val = swap ? in_le16(addr) : in_be16(addr); break; default: *val = swap ? in_le32(addr) : in_be32(addr); break; } return PCIBIOS_SUCCESSFUL; } static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose; void __iomem *addr; int swap; hose = pci_bus_to_host(bus); if (hose == NULL) return PCIBIOS_DEVICE_NOT_FOUND; if (offset >= 0x100) return PCIBIOS_BAD_REGISTER_NUMBER; addr = u3_ht_cfg_access(hose, bus->number, devfn, offset, &swap); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; switch (u3_ht_skip_device(hose, bus, devfn)) { case 0: break; case 1: return PCIBIOS_SUCCESSFUL; default: return PCIBIOS_DEVICE_NOT_FOUND; } /* * Note: the caller has already checked that offset is * suitably aligned and that len is 1, 2 or 4. */ switch (len) { case 1: out_8(addr, val); break; case 2: swap ? out_le16(addr, val) : out_be16(addr, val); break; default: swap ? out_le32(addr, val) : out_be32(addr, val); break; } return PCIBIOS_SUCCESSFUL; } static struct pci_ops u3_ht_pci_ops = { .read = u3_ht_read_config, .write = u3_ht_write_config, }; #define U4_PCIE_CFA0(devfn, off) \ ((1 << ((unsigned int)PCI_SLOT(dev_fn))) \ | (((unsigned int)PCI_FUNC(dev_fn)) << 8) \ | ((((unsigned int)(off)) >> 8) << 28) \ | (((unsigned int)(off)) & 0xfcU)) #define U4_PCIE_CFA1(bus, devfn, off) \ ((((unsigned int)(bus)) << 16) \ |(((unsigned int)(devfn)) << 8) \ | ((((unsigned int)(off)) >> 8) << 28) \ |(((unsigned int)(off)) & 0xfcU) \ |1UL) static void __iomem *u4_pcie_cfg_map_bus(struct pci_bus *bus, unsigned int dev_fn, int offset) { struct pci_controller *hose; unsigned int caddr; if (offset >= 0x1000) return NULL; hose = pci_bus_to_host(bus); if (!hose) return NULL; if (bus->number == hose->first_busno) { caddr = U4_PCIE_CFA0(dev_fn, offset); } else caddr = U4_PCIE_CFA1(bus->number, dev_fn, offset); /* Uninorth will return garbage if we don't read back the value ! */ do { out_le32(hose->cfg_addr, caddr); } while (in_le32(hose->cfg_addr) != caddr); offset &= 0x03; return hose->cfg_data + offset; } static struct pci_ops u4_pcie_pci_ops = { .map_bus = u4_pcie_cfg_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; static void pmac_pci_fixup_u4_of_node(struct pci_dev *dev) { /* Apple's device-tree "hides" the root complex virtual P2P bridge * on U4. However, Linux sees it, causing the PCI <-> OF matching * code to fail to properly match devices below it. This works around * it by setting the node of the bridge to point to the PHB node, * which is not entirely correct but fixes the matching code and * doesn't break anything else. It's also the simplest possible fix. */ if (dev->dev.of_node == NULL) dev->dev.of_node = pcibios_get_phb_of_node(dev->bus); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x5b, pmac_pci_fixup_u4_of_node); #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC32 /* * For a bandit bridge, turn on cache coherency if necessary. * N.B. we could clean this up using the hose ops directly. */ static void __init init_bandit(struct pci_controller *bp) { unsigned int vendev, magic; int rev; /* read the word at offset 0 in config space for device 11 */ out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + PCI_VENDOR_ID); udelay(2); vendev = in_le32(bp->cfg_data); if (vendev == (PCI_DEVICE_ID_APPLE_BANDIT << 16) + PCI_VENDOR_ID_APPLE) { /* read the revision id */ out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + PCI_REVISION_ID); udelay(2); rev = in_8(bp->cfg_data); if (rev != BANDIT_REVID) printk(KERN_WARNING "Unknown revision %d for bandit\n", rev); } else if (vendev != (BANDIT_DEVID_2 << 16) + PCI_VENDOR_ID_APPLE) { printk(KERN_WARNING "bandit isn't? (%x)\n", vendev); return; } /* read the word at offset 0x50 */ out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + BANDIT_MAGIC); udelay(2); magic = in_le32(bp->cfg_data); if ((magic & BANDIT_COHERENT) != 0) return; magic |= BANDIT_COHERENT; udelay(2); out_le32(bp->cfg_data, magic); printk(KERN_INFO "Cache coherency enabled for bandit/PSX\n"); } /* * Tweak the PCI-PCI bridge chip on the blue & white G3s. */ static void __init init_p2pbridge(void) { struct device_node *p2pbridge; struct pci_controller* hose; u8 bus, devfn; u16 val; /* XXX it would be better here to identify the specific PCI-PCI bridge chip we have. */ p2pbridge = of_find_node_by_name(NULL, "pci-bridge"); if (p2pbridge == NULL || !of_node_name_eq(p2pbridge->parent, "pci")) goto done; if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) { DBG("Can't find PCI infos for PCI<->PCI bridge\n"); goto done; } /* Warning: At this point, we have not yet renumbered all busses. * So we must use OF walking to find out hose */ hose = pci_find_hose_for_OF_device(p2pbridge); if (!hose) { DBG("Can't find hose for PCI<->PCI bridge\n"); goto done; } if (early_read_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, &val) < 0) { printk(KERN_ERR "init_p2pbridge: couldn't read bridge" " control\n"); goto done; } val &= ~PCI_BRIDGE_CTL_MASTER_ABORT; early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val); done: of_node_put(p2pbridge); } static void __init init_second_ohare(void) { struct device_node *np = of_find_node_by_name(NULL, "pci106b,7"); unsigned char bus, devfn; unsigned short cmd; if (np == NULL) return; /* This must run before we initialize the PICs since the second * ohare hosts a PIC that will be accessed there. */ if (pci_device_from_OF_node(np, &bus, &devfn) == 0) { struct pci_controller* hose = pci_find_hose_for_OF_device(np); if (!hose) { printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); of_node_put(np); return; } early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; cmd &= ~PCI_COMMAND_IO; early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); } has_second_ohare = 1; of_node_put(np); } /* * Some Apple desktop machines have a NEC PD720100A USB2 controller * on the motherboard. Open Firmware, on these, will disable the * EHCI part of it so it behaves like a pair of OHCI's. This fixup * code re-enables it ;) */ static void __init fixup_nec_usb2(void) { struct device_node *nec; for_each_node_by_name(nec, "usb") { struct pci_controller *hose; u32 data; const u32 *prop; u8 bus, devfn; prop = of_get_property(nec, "vendor-id", NULL); if (prop == NULL) continue; if (0x1033 != *prop) continue; prop = of_get_property(nec, "device-id", NULL); if (prop == NULL) continue; if (0x0035 != *prop) continue; prop = of_get_property(nec, "reg", NULL); if (prop == NULL) continue; devfn = (prop[0] >> 8) & 0xff; bus = (prop[0] >> 16) & 0xff; if (PCI_FUNC(devfn) != 0) continue; hose = pci_find_hose_for_OF_device(nec); if (!hose) continue; early_read_config_dword(hose, bus, devfn, 0xe4, &data); if (data & 1UL) { printk("Found NEC PD720100A USB2 chip with disabled" " EHCI, fixing up...\n"); data &= ~1UL; early_write_config_dword(hose, bus, devfn, 0xe4, data); } } } static void __init setup_bandit(struct pci_controller *hose, struct resource *addr) { hose->ops = &macrisc_pci_ops; hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000); hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000); init_bandit(hose); } static int __init setup_uninorth(struct pci_controller *hose, struct resource *addr) { pci_add_flags(PCI_REASSIGN_ALL_BUS); has_uninorth = 1; hose->ops = &macrisc_pci_ops; hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000); hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000); /* We "know" that the bridge at f2000000 has the PCI slots. */ return addr->start == 0xf2000000; } #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC64 static void __init setup_u3_agp(struct pci_controller* hose) { /* On G5, we move AGP up to high bus number so we don't need * to reassign bus numbers for HT. If we ever have P2P bridges * on AGP, we'll have to move pci_assign_all_busses to the * pci_controller structure so we enable it for AGP and not for * HT childs. * We hard code the address because of the different size of * the reg address cell, we shall fix that by killing struct * reg_property and using some accessor functions instead */ hose->first_busno = 0xf0; hose->last_busno = 0xff; has_uninorth = 1; hose->ops = &macrisc_pci_ops; hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); u3_agp = hose; } static void __init setup_u4_pcie(struct pci_controller* hose) { /* We currently only implement the "non-atomic" config space, to * be optimised later. */ hose->ops = &u4_pcie_pci_ops; hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); /* The bus contains a bridge from root -> device, we need to * make it visible on bus 0 so that we pick the right type * of config cycles. If we didn't, we would have to force all * config cycles to be type 1. So we override the "bus-range" * property here */ hose->first_busno = 0x00; hose->last_busno = 0xff; } static void __init parse_region_decode(struct pci_controller *hose, u32 decode) { unsigned long base, end, next = -1; int i, cur = -1; /* Iterate through all bits. We ignore the last bit as this region is * reserved for the ROM among other niceties */ for (i = 0; i < 31; i++) { if ((decode & (0x80000000 >> i)) == 0) continue; if (i < 16) { base = 0xf0000000 | (((u32)i) << 24); end = base + 0x00ffffff; } else { base = ((u32)i-16) << 28; end = base + 0x0fffffff; } if (base != next) { if (++cur >= 3) { printk(KERN_WARNING "PCI: Too many ranges !\n"); break; } hose->mem_resources[cur].flags = IORESOURCE_MEM; hose->mem_resources[cur].name = hose->dn->full_name; hose->mem_resources[cur].start = base; hose->mem_resources[cur].end = end; hose->mem_offset[cur] = 0; DBG(" %d: 0x%08lx-0x%08lx\n", cur, base, end); } else { DBG(" : -0x%08lx\n", end); hose->mem_resources[cur].end = end; } next = end + 1; } } static void __init setup_u3_ht(struct pci_controller* hose) { struct device_node *np = hose->dn; struct resource cfg_res, self_res; u32 decode; hose->ops = &u3_ht_pci_ops; /* Get base addresses from OF tree */ if (of_address_to_resource(np, 0, &cfg_res) || of_address_to_resource(np, 1, &self_res)) { printk(KERN_ERR "PCI: Failed to get U3/U4 HT resources !\n"); return; } /* Map external cfg space access into cfg_data and self registers * into cfg_addr */ hose->cfg_data = ioremap(cfg_res.start, 0x02000000); hose->cfg_addr = ioremap(self_res.start, resource_size(&self_res)); /* * /ht node doesn't expose a "ranges" property, we read the register * that controls the decoding logic and use that for memory regions. * The IO region is hard coded since it is fixed in HW as well. */ hose->io_base_phys = 0xf4000000; hose->pci_io_size = 0x00400000; hose->io_resource.name = np->full_name; hose->io_resource.start = 0; hose->io_resource.end = 0x003fffff; hose->io_resource.flags = IORESOURCE_IO; hose->first_busno = 0; hose->last_busno = 0xef; /* Note: fix offset when cfg_addr becomes a void * */ decode = in_be32(hose->cfg_addr + 0x80); DBG("PCI: Apple HT bridge decode register: 0x%08x\n", decode); /* NOTE: The decode register setup is a bit weird... region * 0xf8000000 for example is marked as enabled in there while it's & actually the memory controller registers. * That means that we are incorrectly attributing it to HT. * * In a similar vein, region 0xf4000000 is actually the HT IO space but * also marked as enabled in here and 0xf9000000 is used by some other * internal bits of the northbridge. * * Unfortunately, we can't just mask out those bit as we would end * up with more regions than we can cope (linux can only cope with * 3 memory regions for a PHB at this stage). * * So for now, we just do a little hack. We happen to -know- that * Apple firmware doesn't assign things below 0xfa000000 for that * bridge anyway so we mask out all bits we don't want. */ decode &= 0x003fffff; /* Now parse the resulting bits and build resources */ parse_region_decode(hose, decode); } #endif /* CONFIG_PPC64 */ /* * We assume that if we have a G3 powermac, we have one bridge called * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise, * if we have one or more bandit or chaos bridges, we don't have a MPC106. */ static int __init pmac_add_bridge(struct device_node *dev) { int len; struct pci_controller *hose; struct resource rsrc; char *disp_name; const int *bus_range; int primary = 1; DBG("Adding PCI host bridge %pOF\n", dev); /* Fetch host bridge registers address */ of_address_to_resource(dev, 0, &rsrc); /* Get bus range if any */ bus_range = of_get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %pOF, assume" " bus 0\n", dev); } hose = pcibios_alloc_controller(dev); if (!hose) return -ENOMEM; hose->first_busno = bus_range ? bus_range[0] : 0; hose->last_busno = bus_range ? bus_range[1] : 0xff; hose->controller_ops = pmac_pci_controller_ops; disp_name = NULL; /* 64 bits only bridges */ #ifdef CONFIG_PPC64 if (of_device_is_compatible(dev, "u3-agp")) { setup_u3_agp(hose); disp_name = "U3-AGP"; primary = 0; } else if (of_device_is_compatible(dev, "u3-ht")) { setup_u3_ht(hose); disp_name = "U3-HT"; primary = 1; } else if (of_device_is_compatible(dev, "u4-pcie")) { setup_u4_pcie(hose); disp_name = "U4-PCIE"; primary = 0; } printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number:" " %d->%d\n", disp_name, hose->first_busno, hose->last_busno); #endif /* CONFIG_PPC64 */ /* 32 bits only bridges */ #ifdef CONFIG_PPC32 if (of_device_is_compatible(dev, "uni-north")) { primary = setup_uninorth(hose, &rsrc); disp_name = "UniNorth"; } else if (of_node_name_eq(dev, "pci")) { /* XXX assume this is a mpc106 (grackle) */ setup_grackle(hose); disp_name = "Grackle (MPC106)"; } else if (of_node_name_eq(dev, "bandit")) { setup_bandit(hose, &rsrc); disp_name = "Bandit"; } else if (of_node_name_eq(dev, "chaos")) { setup_chaos(hose, &rsrc); disp_name = "Chaos"; primary = 0; } printk(KERN_INFO "Found %s PCI host bridge at 0x%016llx. " "Firmware bus number: %d->%d\n", disp_name, (unsigned long long)rsrc.start, hose->first_busno, hose->last_busno); #endif /* CONFIG_PPC32 */ DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", hose, hose->cfg_addr, hose->cfg_data); /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ pci_process_bridge_OF_ranges(hose, dev, primary); /* Fixup "bus-range" OF property */ fixup_bus_range(dev); /* create pci_dn's for DT nodes under this PHB */ if (IS_ENABLED(CONFIG_PPC64)) pci_devs_phb_init_dynamic(hose); return 0; } void pmac_pci_irq_fixup(struct pci_dev *dev) { #ifdef CONFIG_PPC32 /* Fixup interrupt for the modem/ethernet combo controller. * on machines with a second ohare chip. * The number in the device tree (27) is bogus (correct for * the ethernet-only board but not the combo ethernet/modem * board). The real interrupt is 28 on the second controller * -> 28+32 = 60. */ if (has_second_ohare && dev->vendor == PCI_VENDOR_ID_DEC && dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) { dev->irq = irq_create_mapping(NULL, 60); irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); } #endif /* CONFIG_PPC32 */ } #ifdef CONFIG_PPC64 static int pmac_pci_root_bridge_prepare(struct pci_host_bridge *bridge) { struct pci_controller *hose = pci_bus_to_host(bridge->bus); struct device_node *np, *child; if (hose != u3_agp) return 0; /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We * assume there is no P2P bridge on the AGP bus, which should be a * safe assumptions for now. We should do something better in the * future though */ np = hose->dn; PCI_DN(np)->busno = 0xf0; for_each_child_of_node(np, child) PCI_DN(child)->busno = 0xf0; return 0; } #endif /* CONFIG_PPC64 */ void __init pmac_pci_init(void) { struct device_node *np, *root; struct device_node *ht __maybe_unused = NULL; pci_set_flags(PCI_CAN_SKIP_ISA_ALIGN); root = of_find_node_by_path("/"); if (root == NULL) { printk(KERN_CRIT "pmac_pci_init: can't find root " "of device tree\n"); return; } for_each_child_of_node(root, np) { if (of_node_name_eq(np, "bandit") || of_node_name_eq(np, "chaos") || of_node_name_eq(np, "pci")) { if (pmac_add_bridge(np) == 0) of_node_get(np); } if (of_node_name_eq(np, "ht")) { of_node_get(np); ht = np; } } of_node_put(root); #ifdef CONFIG_PPC64 /* Probe HT last as it relies on the agp resources to be already * setup */ if (ht && pmac_add_bridge(ht) != 0) of_node_put(ht); ppc_md.pcibios_root_bridge_prepare = pmac_pci_root_bridge_prepare; /* pmac_check_ht_link(); */ #else /* CONFIG_PPC64 */ init_p2pbridge(); init_second_ohare(); fixup_nec_usb2(); /* We are still having some issues with the Xserve G4, enabling * some offset between bus number and domains for now when we * assign all busses should help for now */ if (pci_has_flag(PCI_REASSIGN_ALL_BUS)) pcibios_assign_bus_offset = 0x10; #endif } #ifdef CONFIG_PPC32 static bool pmac_pci_enable_device_hook(struct pci_dev *dev) { struct device_node* node; int updatecfg = 0; int uninorth_child; node = pci_device_to_OF_node(dev); /* We don't want to enable USB controllers absent from the OF tree * (iBook second controller) */ if (dev->vendor == PCI_VENDOR_ID_APPLE && dev->class == PCI_CLASS_SERIAL_USB_OHCI && !node) { printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n", pci_name(dev)); return false; } if (!node) return true; uninorth_child = node->parent && of_device_is_compatible(node->parent, "uni-north"); /* Firewire & GMAC were disabled after PCI probe, the driver is * claiming them, we must re-enable them now. */ if (uninorth_child && of_node_name_eq(node, "firewire") && (of_device_is_compatible(node, "pci106b,18") || of_device_is_compatible(node, "pci106b,30") || of_device_is_compatible(node, "pci11c1,5811"))) { pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, node, 0, 1); pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1); updatecfg = 1; } if (uninorth_child && of_node_name_eq(node, "ethernet") && of_device_is_compatible(node, "gmac")) { pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1); updatecfg = 1; } /* * Fixup various header fields on 32 bits. We don't do that on * 64 bits as some of these have strange values behind the HT * bridge and we must not, for example, enable MWI or set the * cache line size on them. */ if (updatecfg) { u16 cmd; pci_read_config_word(dev, PCI_COMMAND, &cmd); cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16); pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES >> 2); } return true; } static void pmac_pci_fixup_ohci(struct pci_dev *dev) { struct device_node *node = pci_device_to_OF_node(dev); /* We don't want to assign resources to USB controllers * absent from the OF tree (iBook second controller) */ if (dev->class == PCI_CLASS_SERIAL_USB_OHCI && !node) dev->resource[0].flags = 0; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_ANY_ID, pmac_pci_fixup_ohci); /* We power down some devices after they have been probed. They'll * be powered back on later on */ void __init pmac_pcibios_after_init(void) { struct device_node* nd; for_each_node_by_name(nd, "firewire") { if (nd->parent && (of_device_is_compatible(nd, "pci106b,18") || of_device_is_compatible(nd, "pci106b,30") || of_device_is_compatible(nd, "pci11c1,5811")) && of_device_is_compatible(nd->parent, "uni-north")) { pmac_call_feature(PMAC_FTR_1394_ENABLE, nd, 0, 0); pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0); } } for_each_node_by_name(nd, "ethernet") { if (nd->parent && of_device_is_compatible(nd, "gmac") && of_device_is_compatible(nd->parent, "uni-north")) pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0); } } static void pmac_pci_fixup_cardbus(struct pci_dev *dev) { if (!machine_is(powermac)) return; /* * Fix the interrupt routing on the various cardbus bridges * used on powerbooks */ if (dev->vendor != PCI_VENDOR_ID_TI) return; if (dev->device == PCI_DEVICE_ID_TI_1130 || dev->device == PCI_DEVICE_ID_TI_1131) { u8 val; /* Enable PCI interrupt */ if (pci_read_config_byte(dev, 0x91, &val) == 0) pci_write_config_byte(dev, 0x91, val | 0x30); /* Disable ISA interrupt mode */ if (pci_read_config_byte(dev, 0x92, &val) == 0) pci_write_config_byte(dev, 0x92, val & ~0x06); } if (dev->device == PCI_DEVICE_ID_TI_1210 || dev->device == PCI_DEVICE_ID_TI_1211 || dev->device == PCI_DEVICE_ID_TI_1410 || dev->device == PCI_DEVICE_ID_TI_1510) { u8 val; /* 0x8c == TI122X_IRQMUX, 2 says to route the INTA signal out the MFUNC0 pin */ if (pci_read_config_byte(dev, 0x8c, &val) == 0) pci_write_config_byte(dev, 0x8c, (val & ~0x0f) | 2); /* Disable ISA interrupt mode */ if (pci_read_config_byte(dev, 0x92, &val) == 0) pci_write_config_byte(dev, 0x92, val & ~0x06); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_ANY_ID, pmac_pci_fixup_cardbus); static void pmac_pci_fixup_pciata(struct pci_dev *dev) { u8 progif = 0; /* * On PowerMacs, we try to switch any PCI ATA controller to * fully native mode */ if (!machine_is(powermac)) return; /* Some controllers don't have the class IDE */ if (dev->vendor == PCI_VENDOR_ID_PROMISE) switch(dev->device) { case PCI_DEVICE_ID_PROMISE_20246: case PCI_DEVICE_ID_PROMISE_20262: case PCI_DEVICE_ID_PROMISE_20263: case PCI_DEVICE_ID_PROMISE_20265: case PCI_DEVICE_ID_PROMISE_20267: case PCI_DEVICE_ID_PROMISE_20268: case PCI_DEVICE_ID_PROMISE_20269: case PCI_DEVICE_ID_PROMISE_20270: case PCI_DEVICE_ID_PROMISE_20271: case PCI_DEVICE_ID_PROMISE_20275: case PCI_DEVICE_ID_PROMISE_20276: case PCI_DEVICE_ID_PROMISE_20277: goto good; } /* Others, check PCI class */ if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) return; good: pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); if ((progif & 5) != 5) { printk(KERN_INFO "PCI: %s Forcing PCI IDE into native mode\n", pci_name(dev)); (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5); if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) || (progif & 5) != 5) printk(KERN_ERR "Rewrite of PROGIF failed !\n"); else { /* Clear IO BARs, they will be reassigned */ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0); pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 0); pci_write_config_dword(dev, PCI_BASE_ADDRESS_2, 0); pci_write_config_dword(dev, PCI_BASE_ADDRESS_3, 0); } } } DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata); #endif /* CONFIG_PPC32 */ /* * Disable second function on K2-SATA, it's broken * and disable IO BARs on first one */ static void fixup_k2_sata(struct pci_dev* dev) { int i; u16 cmd; if (PCI_FUNC(dev->devfn) > 0) { pci_read_config_word(dev, PCI_COMMAND, &cmd); cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY); pci_write_config_word(dev, PCI_COMMAND, cmd); for (i = 0; i < 6; i++) { dev->resource[i].start = dev->resource[i].end = 0; dev->resource[i].flags = 0; pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0); } } else { pci_read_config_word(dev, PCI_COMMAND, &cmd); cmd &= ~PCI_COMMAND_IO; pci_write_config_word(dev, PCI_COMMAND, cmd); for (i = 0; i < 5; i++) { dev->resource[i].start = dev->resource[i].end = 0; dev->resource[i].flags = 0; pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0); } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata); /* * On U4 (aka CPC945) the PCIe root complex "P2P" bridge resource ranges aren't * configured by the firmware. The bridge itself seems to ignore them but it * causes problems with Linux which then re-assigns devices below the bridge, * thus changing addresses of those devices from what was in the device-tree, * which sucks when those are video cards using offb * * We could just mark it transparent but I prefer fixing up the resources to * properly show what's going on here, as I have some doubts about having them * badly configured potentially being an issue for DMA. * * We leave PIO alone, it seems to be fine * * Oh and there's another funny bug. The OF properties advertize the region * 0xf1000000..0xf1ffffff as being forwarded as memory space. But that's * actually not true, this region is the memory mapped config space. So we * also need to filter it out or we'll map things in the wrong place. */ static void fixup_u4_pcie(struct pci_dev* dev) { struct pci_controller *host = pci_bus_to_host(dev->bus); struct resource *region = NULL; u32 reg; int i; /* Only do that on PowerMac */ if (!machine_is(powermac)) return; /* Find the largest MMIO region */ for (i = 0; i < 3; i++) { struct resource *r = &host->mem_resources[i]; if (!(r->flags & IORESOURCE_MEM)) continue; /* Skip the 0xf0xxxxxx..f2xxxxxx regions, we know they * are reserved by HW for other things */ if (r->start >= 0xf0000000 && r->start < 0xf3000000) continue; if (!region || resource_size(r) > resource_size(region)) region = r; } /* Nothing found, bail */ if (!region) return; /* Print things out */ printk(KERN_INFO "PCI: Fixup U4 PCIe bridge range: %pR\n", region); /* Fixup bridge config space. We know it's a Mac, resource aren't * offset so let's just blast them as-is. We also know that they * fit in 32 bits */ reg = ((region->start >> 16) & 0xfff0) | (region->end & 0xfff00000); pci_write_config_dword(dev, PCI_MEMORY_BASE, reg); pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0); pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0); pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_U4_PCIE, fixup_u4_pcie); #ifdef CONFIG_PPC64 static int pmac_pci_probe_mode(struct pci_bus *bus) { struct device_node *node = pci_bus_to_OF_node(bus); /* We need to use normal PCI probing for the AGP bus, * since the device for the AGP bridge isn't in the tree. * Same for the PCIe host on U4 and the HT host bridge. */ if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") || of_device_is_compatible(node, "u4-pcie") || of_device_is_compatible(node, "u3-ht"))) return PCI_PROBE_NORMAL; return PCI_PROBE_DEVTREE; } #endif /* CONFIG_PPC64 */ struct pci_controller_ops pmac_pci_controller_ops = { #ifdef CONFIG_PPC64 .probe_mode = pmac_pci_probe_mode, #endif #ifdef CONFIG_PPC32 .enable_device_hook = pmac_pci_enable_device_hook, #endif };
linux-master
arch/powerpc/platforms/powermac/pci.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/string.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/bitops.h> #include <linux/ptrace.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/cuda.h> #include <linux/of.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/page.h> #include <asm/xmon.h> #include <asm/bootx.h> #include <asm/errno.h> #include <asm/pmac_feature.h> #include <asm/processor.h> #include <asm/delay.h> #include <asm/btext.h> #include <asm/time.h> #include <asm/udbg.h> /* * This implementation is "special", it can "patch" the current * udbg implementation and work on top of it. It must thus be * initialized last */ static void (*udbg_adb_old_putc)(char c); static int (*udbg_adb_old_getc)(void); static int (*udbg_adb_old_getc_poll)(void); static enum { input_adb_none, input_adb_pmu, input_adb_cuda, } input_type = input_adb_none; int xmon_wants_key, xmon_adb_keycode; static inline void udbg_adb_poll(void) { #ifdef CONFIG_ADB_PMU if (input_type == input_adb_pmu) pmu_poll_adb(); #endif /* CONFIG_ADB_PMU */ #ifdef CONFIG_ADB_CUDA if (input_type == input_adb_cuda) cuda_poll(); #endif /* CONFIG_ADB_CUDA */ } #ifdef CONFIG_BOOTX_TEXT static int udbg_adb_use_btext; static int xmon_adb_shiftstate; static unsigned char xmon_keytab[128] = "asdfhgzxcv\000bqwer" /* 0x00 - 0x0f */ "yt123465=97-80]o" /* 0x10 - 0x1f */ "u[ip\rlj'k;\\,/nm." /* 0x20 - 0x2f */ "\t `\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */ "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */ "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */ static unsigned char xmon_shift_keytab[128] = "ASDFHGZXCV\000BQWER" /* 0x00 - 0x0f */ "YT!@#$^%+(&_*)}O" /* 0x10 - 0x1f */ "U{IP\rLJ\"K:|<?NM>" /* 0x20 - 0x2f */ "\t ~\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */ "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */ "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */ static int udbg_adb_local_getc(void) { int k, t, on; xmon_wants_key = 1; for (;;) { xmon_adb_keycode = -1; t = 0; on = 0; k = -1; do { if (--t < 0) { on = 1 - on; btext_drawchar(on? 0xdb: 0x20); btext_drawchar('\b'); t = 200000; } udbg_adb_poll(); if (udbg_adb_old_getc_poll) k = udbg_adb_old_getc_poll(); } while (k == -1 && xmon_adb_keycode == -1); if (on) btext_drawstring(" \b"); if (k != -1) return k; k = xmon_adb_keycode; /* test for shift keys */ if ((k & 0x7f) == 0x38 || (k & 0x7f) == 0x7b) { xmon_adb_shiftstate = (k & 0x80) == 0; continue; } if (k >= 0x80) continue; /* ignore up transitions */ k = (xmon_adb_shiftstate? xmon_shift_keytab: xmon_keytab)[k]; if (k != 0) break; } xmon_wants_key = 0; return k; } #endif /* CONFIG_BOOTX_TEXT */ static int udbg_adb_getc(void) { #ifdef CONFIG_BOOTX_TEXT if (udbg_adb_use_btext && input_type != input_adb_none) return udbg_adb_local_getc(); #endif if (udbg_adb_old_getc) return udbg_adb_old_getc(); return -1; } /* getc_poll() is not really used, unless you have the xmon-over modem * hack that doesn't quite concern us here, thus we just poll the low level * ADB driver to prevent it from timing out and call back the original poll * routine. */ static int udbg_adb_getc_poll(void) { udbg_adb_poll(); if (udbg_adb_old_getc_poll) return udbg_adb_old_getc_poll(); return -1; } static void udbg_adb_putc(char c) { #ifdef CONFIG_BOOTX_TEXT if (udbg_adb_use_btext) btext_drawchar(c); #endif if (udbg_adb_old_putc) return udbg_adb_old_putc(c); } void __init udbg_adb_init_early(void) { #ifdef CONFIG_BOOTX_TEXT if (btext_find_display(1) == 0) { udbg_adb_use_btext = 1; udbg_putc = udbg_adb_putc; } #endif } int __init udbg_adb_init(int force_btext) { struct device_node *np; /* Capture existing callbacks */ udbg_adb_old_putc = udbg_putc; udbg_adb_old_getc = udbg_getc; udbg_adb_old_getc_poll = udbg_getc_poll; /* Check if our early init was already called */ if (udbg_adb_old_putc == udbg_adb_putc) udbg_adb_old_putc = NULL; #ifdef CONFIG_BOOTX_TEXT if (udbg_adb_old_putc == btext_drawchar) udbg_adb_old_putc = NULL; #endif /* Set ours as output */ udbg_putc = udbg_adb_putc; udbg_getc = udbg_adb_getc; udbg_getc_poll = udbg_adb_getc_poll; #ifdef CONFIG_BOOTX_TEXT /* Check if we should use btext output */ if (btext_find_display(force_btext) == 0) udbg_adb_use_btext = 1; #endif /* See if there is a keyboard in the device tree with a parent * of type "adb". If not, we return a failure, but we keep the * bext output set for now */ for_each_node_by_name(np, "keyboard") { struct device_node *parent = of_get_parent(np); int found = of_node_is_type(parent, "adb"); of_node_put(parent); if (found) break; } if (np == NULL) return -ENODEV; of_node_put(np); #ifdef CONFIG_ADB_PMU if (find_via_pmu()) input_type = input_adb_pmu; #endif #ifdef CONFIG_ADB_CUDA if (find_via_cuda()) input_type = input_adb_cuda; #endif /* Same as above: nothing found, keep btext set for output */ if (input_type == input_adb_none) return -ENODEV; return 0; }
linux-master
arch/powerpc/platforms/powermac/udbg_adb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * udbg for zilog scc ports as found on Apple PowerMacs * * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp */ #include <linux/types.h> #include <linux/of.h> #include <asm/udbg.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/pmac_feature.h> extern u8 real_readb(volatile u8 __iomem *addr); extern void real_writeb(u8 data, volatile u8 __iomem *addr); #define SCC_TXRDY 4 #define SCC_RXRDY 1 static volatile u8 __iomem *sccc; static volatile u8 __iomem *sccd; static void udbg_scc_putc(char c) { if (sccc) { while ((in_8(sccc) & SCC_TXRDY) == 0) ; out_8(sccd, c); if (c == '\n') udbg_scc_putc('\r'); } } static int udbg_scc_getc_poll(void) { if (sccc) { if ((in_8(sccc) & SCC_RXRDY) != 0) return in_8(sccd); else return -1; } return -1; } static int udbg_scc_getc(void) { if (sccc) { while ((in_8(sccc) & SCC_RXRDY) == 0) ; return in_8(sccd); } return -1; } static unsigned char scc_inittab[] = { 13, 0, /* set baud rate divisor */ 12, 0, 14, 1, /* baud rate gen enable, src=rtxc */ 11, 0x50, /* clocks = br gen */ 5, 0xea, /* tx 8 bits, assert DTR & RTS */ 4, 0x46, /* x16 clock, 1 stop */ 3, 0xc1, /* rx enable, 8 bits */ }; void __init udbg_scc_init(int force_scc) { const u32 *reg; unsigned long addr; struct device_node *stdout = NULL, *escc = NULL, *macio = NULL; struct device_node *ch, *ch_def = NULL, *ch_a = NULL; const char *path; int i; escc = of_find_node_by_name(NULL, "escc"); if (escc == NULL) goto bail; macio = of_get_parent(escc); if (macio == NULL) goto bail; path = of_get_property(of_chosen, "linux,stdout-path", NULL); if (path != NULL) stdout = of_find_node_by_path(path); for_each_child_of_node(escc, ch) { if (ch == stdout) { of_node_put(ch_def); ch_def = of_node_get(ch); } if (of_node_name_eq(ch, "ch-a")) { of_node_put(ch_a); ch_a = of_node_get(ch); } } if (ch_def == NULL && !force_scc) goto bail; ch = ch_def ? ch_def : ch_a; /* Get address within mac-io ASIC */ reg = of_get_property(escc, "reg", NULL); if (reg == NULL) goto bail; addr = reg[0]; /* Get address of mac-io PCI itself */ reg = of_get_property(macio, "assigned-addresses", NULL); if (reg == NULL) goto bail; addr += reg[2]; /* Lock the serial port */ pmac_call_feature(PMAC_FTR_SCC_ENABLE, ch, PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1); if (ch == ch_a) addr += 0x20; sccc = ioremap(addr & PAGE_MASK, PAGE_SIZE) ; sccc += addr & ~PAGE_MASK; sccd = sccc + 0x10; mb(); for (i = 20000; i != 0; --i) in_8(sccc); out_8(sccc, 0x09); /* reset A or B side */ out_8(sccc, 0xc0); /* If SCC was the OF output port, read the BRG value, else * Setup for 38400 or 57600 8N1 depending on the machine */ if (ch_def != NULL) { out_8(sccc, 13); scc_inittab[1] = in_8(sccc); out_8(sccc, 12); scc_inittab[3] = in_8(sccc); } else if (of_machine_is_compatible("RackMac1,1") || of_machine_is_compatible("RackMac1,2") || of_machine_is_compatible("MacRISC4")) { /* Xserves and G5s default to 57600 */ scc_inittab[1] = 0; scc_inittab[3] = 0; } else { /* Others default to 38400 */ scc_inittab[1] = 0; scc_inittab[3] = 1; } for (i = 0; i < sizeof(scc_inittab); ++i) out_8(sccc, scc_inittab[i]); udbg_putc = udbg_scc_putc; udbg_getc = udbg_scc_getc; udbg_getc_poll = udbg_scc_getc_poll; udbg_puts("Hello World !\n"); bail: of_node_put(macio); of_node_put(escc); of_node_put(stdout); of_node_put(ch_def); of_node_put(ch_a); } #ifdef CONFIG_PPC64 static void udbg_real_scc_putc(char c) { while ((real_readb(sccc) & SCC_TXRDY) == 0) ; real_writeb(c, sccd); if (c == '\n') udbg_real_scc_putc('\r'); } void __init udbg_init_pmac_realmode(void) { sccc = (volatile u8 __iomem *)0x80013020ul; sccd = (volatile u8 __iomem *)0x80013030ul; udbg_putc = udbg_real_scc_putc; udbg_getc = NULL; udbg_getc_poll = NULL; } #endif /* CONFIG_PPC64 */
linux-master
arch/powerpc/platforms/powermac/udbg_scc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Powermac setup and early boot code plus other random bits. * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas ([email protected]) * * Adapted for Power Macintosh by Paul Mackerras * Copyright (C) 1996 Paul Mackerras ([email protected]) * * Derived from "arch/alpha/kernel/setup.c" * Copyright (C) 1995 Linus Torvalds * * Maintained by Benjamin Herrenschmidt ([email protected]) */ /* * bootup setup stuff.. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/export.h> #include <linux/user.h> #include <linux/tty.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/major.h> #include <linux/initrd.h> #include <linux/vt_kern.h> #include <linux/console.h> #include <linux/pci.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/pmu.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/bitops.h> #include <linux/suspend.h> #include <linux/of.h> #include <linux/of_platform.h> #include <asm/reg.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <asm/ohare.h> #include <asm/mediabay.h> #include <asm/machdep.h> #include <asm/dma.h> #include <asm/cputable.h> #include <asm/btext.h> #include <asm/pmac_feature.h> #include <asm/time.h> #include <asm/mmu_context.h> #include <asm/iommu.h> #include <asm/smu.h> #include <asm/pmc.h> #include <asm/udbg.h> #include "pmac.h" #undef SHOW_GATWICK_IRQS static int has_l2cache; int pmac_newworld; static int current_root_goodness = -1; /* sda1 - slightly silly choice */ #define DEFAULT_ROOT_DEVICE MKDEV(SCSI_DISK0_MAJOR, 1) sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN; EXPORT_SYMBOL(sys_ctrler); static void pmac_show_cpuinfo(struct seq_file *m) { struct device_node *np; const char *pp; int plen; int mbmodel; unsigned int mbflags; char* mbname; mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_MODEL, 0); mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_FLAGS, 0); if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME, (long) &mbname) != 0) mbname = "Unknown"; /* find motherboard type */ seq_printf(m, "machine\t\t: "); np = of_find_node_by_path("/"); if (np != NULL) { pp = of_get_property(np, "model", NULL); if (pp != NULL) seq_printf(m, "%s\n", pp); else seq_printf(m, "PowerMac\n"); pp = of_get_property(np, "compatible", &plen); if (pp != NULL) { seq_printf(m, "motherboard\t:"); while (plen > 0) { int l = strlen(pp) + 1; seq_printf(m, " %s", pp); plen -= l; pp += l; } seq_printf(m, "\n"); } of_node_put(np); } else seq_printf(m, "PowerMac\n"); /* print parsed model */ seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname); seq_printf(m, "pmac flags\t: %08x\n", mbflags); /* find l2 cache info */ np = of_find_node_by_name(NULL, "l2-cache"); if (np == NULL) np = of_find_node_by_type(NULL, "cache"); if (np != NULL) { const unsigned int *ic = of_get_property(np, "i-cache-size", NULL); const unsigned int *dc = of_get_property(np, "d-cache-size", NULL); seq_printf(m, "L2 cache\t:"); has_l2cache = 1; if (of_property_read_bool(np, "cache-unified") && dc) { seq_printf(m, " %dK unified", *dc / 1024); } else { if (ic) seq_printf(m, " %dK instruction", *ic / 1024); if (dc) seq_printf(m, "%s %dK data", (ic? " +": ""), *dc / 1024); } pp = of_get_property(np, "ram-type", NULL); if (pp) seq_printf(m, " %s", pp); seq_printf(m, "\n"); of_node_put(np); } /* Indicate newworld/oldworld */ seq_printf(m, "pmac-generation\t: %s\n", pmac_newworld ? "NewWorld" : "OldWorld"); } #ifndef CONFIG_ADB_CUDA int __init find_via_cuda(void) { struct device_node *dn = of_find_node_by_name(NULL, "via-cuda"); if (!dn) return 0; of_node_put(dn); printk("WARNING ! Your machine is CUDA-based but your kernel\n"); printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n"); return 0; } #endif #ifndef CONFIG_ADB_PMU int __init find_via_pmu(void) { struct device_node *dn = of_find_node_by_name(NULL, "via-pmu"); if (!dn) return 0; of_node_put(dn); printk("WARNING ! Your machine is PMU-based but your kernel\n"); printk(" wasn't compiled with CONFIG_ADB_PMU option !\n"); return 0; } #endif #ifndef CONFIG_PMAC_SMU int __init smu_init(void) { /* should check and warn if SMU is present */ return 0; } #endif #ifdef CONFIG_PPC32 static volatile u32 *sysctrl_regs; static void __init ohare_init(void) { struct device_node *dn; /* this area has the CPU identification register and some registers used by smp boards */ sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000); /* * Turn on the L2 cache. * We assume that we have a PSX memory controller iff * we have an ohare I/O controller. */ dn = of_find_node_by_name(NULL, "ohare"); if (dn) { of_node_put(dn); if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) { if (sysctrl_regs[4] & 0x10) sysctrl_regs[4] |= 0x04000020; else sysctrl_regs[4] |= 0x04000000; if(has_l2cache) printk(KERN_INFO "Level 2 cache enabled\n"); } } } static void __init l2cr_init(void) { /* Checks "l2cr-value" property in the registry */ if (cpu_has_feature(CPU_FTR_L2CR)) { struct device_node *np; for_each_of_cpu_node(np) { const unsigned int *l2cr = of_get_property(np, "l2cr-value", NULL); if (l2cr) { _set_L2CR(0); _set_L2CR(*l2cr); pr_info("L2CR overridden (0x%x), backside cache is %s\n", *l2cr, ((*l2cr) & 0x80000000) ? "enabled" : "disabled"); } of_node_put(np); break; } } } #endif static void __init pmac_setup_arch(void) { struct device_node *cpu, *ic; const int *fp; unsigned long pvr; pvr = PVR_VER(mfspr(SPRN_PVR)); /* Set loops_per_jiffy to a half-way reasonable value, for use until calibrate_delay gets called. */ loops_per_jiffy = 50000000 / HZ; for_each_of_cpu_node(cpu) { fp = of_get_property(cpu, "clock-frequency", NULL); if (fp != NULL) { if (pvr >= 0x30 && pvr < 0x80) /* PPC970 etc. */ loops_per_jiffy = *fp / (3 * HZ); else if (pvr == 4 || pvr >= 8) /* 604, G3, G4 etc. */ loops_per_jiffy = *fp / HZ; else /* 603, etc. */ loops_per_jiffy = *fp / (2 * HZ); of_node_put(cpu); break; } } /* See if newworld or oldworld */ ic = of_find_node_with_property(NULL, "interrupt-controller"); if (ic) { pmac_newworld = 1; of_node_put(ic); } #ifdef CONFIG_PPC32 ohare_init(); l2cr_init(); #endif /* CONFIG_PPC32 */ find_via_cuda(); find_via_pmu(); smu_init(); #if IS_ENABLED(CONFIG_NVRAM) pmac_nvram_init(); #endif #ifdef CONFIG_PPC32 #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) ROOT_DEV = Root_RAM0; else #endif ROOT_DEV = DEFAULT_ROOT_DEVICE; #endif #ifdef CONFIG_ADB if (strstr(boot_command_line, "adb_sync")) { extern int __adb_probe_sync; __adb_probe_sync = 1; } #endif /* CONFIG_ADB */ } static int initializing = 1; static int pmac_late_init(void) { initializing = 0; return 0; } machine_late_initcall(powermac, pmac_late_init); void note_bootable_part(dev_t dev, int part, int goodness); /* * This is __ref because we check for "initializing" before * touching any of the __init sensitive things and "initializing" * will be false after __init time. This can't be __init because it * can be called whenever a disk is first accessed. */ void __ref note_bootable_part(dev_t dev, int part, int goodness) { char *p; if (!initializing) return; if ((goodness <= current_root_goodness) && ROOT_DEV != DEFAULT_ROOT_DEVICE) return; p = strstr(boot_command_line, "root="); if (p != NULL && (p == boot_command_line || p[-1] == ' ')) return; ROOT_DEV = dev + part; current_root_goodness = goodness; } #ifdef CONFIG_ADB_CUDA static void __noreturn cuda_restart(void) { struct adb_request req; cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM); for (;;) cuda_poll(); } static void __noreturn cuda_shutdown(void) { struct adb_request req; cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN); for (;;) cuda_poll(); } #else #define cuda_restart() #define cuda_shutdown() #endif #ifndef CONFIG_ADB_PMU #define pmu_restart() #define pmu_shutdown() #endif #ifndef CONFIG_PMAC_SMU #define smu_restart() #define smu_shutdown() #endif static void __noreturn pmac_restart(char *cmd) { switch (sys_ctrler) { case SYS_CTRLER_CUDA: cuda_restart(); break; case SYS_CTRLER_PMU: pmu_restart(); break; case SYS_CTRLER_SMU: smu_restart(); break; default: ; } while (1) ; } static void __noreturn pmac_power_off(void) { switch (sys_ctrler) { case SYS_CTRLER_CUDA: cuda_shutdown(); break; case SYS_CTRLER_PMU: pmu_shutdown(); break; case SYS_CTRLER_SMU: smu_shutdown(); break; default: ; } while (1) ; } static void __noreturn pmac_halt(void) { pmac_power_off(); } /* * Early initialization. */ static void __init pmac_init(void) { /* Enable early btext debug if requested */ if (strstr(boot_command_line, "btextdbg")) { udbg_adb_init_early(); register_early_udbg_console(); } /* Probe motherboard chipset */ pmac_feature_init(); /* Initialize debug stuff */ udbg_scc_init(!!strstr(boot_command_line, "sccdbg")); udbg_adb_init(!!strstr(boot_command_line, "btextdbg")); #ifdef CONFIG_PPC64 iommu_init_early_dart(&pmac_pci_controller_ops); #endif /* SMP Init has to be done early as we need to patch up * cpu_possible_mask before interrupt stacks are allocated * or kaboom... */ #ifdef CONFIG_SMP pmac_setup_smp(); #endif } static int __init pmac_declare_of_platform_devices(void) { struct device_node *np; np = of_find_node_by_name(NULL, "valkyrie"); if (np) { of_platform_device_create(np, "valkyrie", NULL); of_node_put(np); } np = of_find_node_by_name(NULL, "platinum"); if (np) { of_platform_device_create(np, "platinum", NULL); of_node_put(np); } np = of_find_node_by_type(NULL, "smu"); if (np) { of_platform_device_create(np, "smu", NULL); of_node_put(np); } np = of_find_node_by_type(NULL, "fcu"); if (np == NULL) { /* Some machines have strangely broken device-tree */ np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e"); } if (np) { of_platform_device_create(np, "temperature", NULL); of_node_put(np); } return 0; } machine_device_initcall(powermac, pmac_declare_of_platform_devices); #ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE /* * This is called very early, as part of console_init() (typically just after * time_init()). This function is respondible for trying to find a good * default console on serial ports. It tries to match the open firmware * default output with one of the available serial console drivers. */ static int __init check_pmac_serial_console(void) { struct device_node *prom_stdout = NULL; int offset = 0; const char *name; #ifdef CONFIG_SERIAL_PMACZILOG_TTYS char *devname = "ttyS"; #else char *devname = "ttyPZ"; #endif pr_debug(" -> check_pmac_serial_console()\n"); /* The user has requested a console so this is already set up. */ if (strstr(boot_command_line, "console=")) { pr_debug(" console was specified !\n"); return -EBUSY; } if (!of_chosen) { pr_debug(" of_chosen is NULL !\n"); return -ENODEV; } /* We are getting a weird phandle from OF ... */ /* ... So use the full path instead */ name = of_get_property(of_chosen, "linux,stdout-path", NULL); if (name == NULL) { pr_debug(" no linux,stdout-path !\n"); return -ENODEV; } prom_stdout = of_find_node_by_path(name); if (!prom_stdout) { pr_debug(" can't find stdout package %s !\n", name); return -ENODEV; } pr_debug("stdout is %pOF\n", prom_stdout); if (of_node_name_eq(prom_stdout, "ch-a")) offset = 0; else if (of_node_name_eq(prom_stdout, "ch-b")) offset = 1; else goto not_found; of_node_put(prom_stdout); pr_debug("Found serial console at %s%d\n", devname, offset); return add_preferred_console(devname, offset, NULL); not_found: pr_debug("No preferred console found !\n"); of_node_put(prom_stdout); return -ENODEV; } console_initcall(check_pmac_serial_console); #endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init pmac_probe(void) { if (!of_machine_is_compatible("Power Macintosh") && !of_machine_is_compatible("MacRISC")) return 0; #ifdef CONFIG_PPC32 /* isa_io_base gets set in pmac_pci_init */ DMA_MODE_READ = 1; DMA_MODE_WRITE = 2; #endif /* CONFIG_PPC32 */ pm_power_off = pmac_power_off; pmac_init(); return 1; } define_machine(powermac) { .name = "PowerMac", .probe = pmac_probe, .setup_arch = pmac_setup_arch, .discover_phbs = pmac_pci_init, .show_cpuinfo = pmac_show_cpuinfo, .init_IRQ = pmac_pic_init, .get_irq = NULL, /* changed later */ .pci_irq_fixup = pmac_pci_irq_fixup, .restart = pmac_restart, .halt = pmac_halt, .time_init = pmac_time_init, .get_boot_time = pmac_get_boot_time, .set_rtc_time = pmac_set_rtc_time, .get_rtc_time = pmac_get_rtc_time, .calibrate_decr = pmac_calibrate_decr, .feature_call = pmac_do_feature_call, .progress = udbg_progress, #ifdef CONFIG_PPC64 .power_save = power4_idle, .enable_pmcs = power4_enable_pmcs, #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC32 .pcibios_after_init = pmac_pcibios_after_init, .phys_mem_access_prot = pci_phys_mem_access_prot, #endif };
linux-master
arch/powerpc/platforms/powermac/setup.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for periodic interrupts (100 per second) and for getting * the current time from the RTC on Power Macintoshes. * * We use the decrementer register for our periodic interrupts. * * Paul Mackerras August 1996. * Copyright (C) 1996 Paul Mackerras. * Copyright (C) 2003-2005 Benjamin Herrenschmidt. * */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/time.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/pmu.h> #include <linux/interrupt.h> #include <linux/hardirq.h> #include <linux/rtc.h> #include <linux/of_address.h> #include <asm/early_ioremap.h> #include <asm/sections.h> #include <asm/machdep.h> #include <asm/time.h> #include <asm/nvram.h> #include <asm/smu.h> #include "pmac.h" #undef DEBUG #ifdef DEBUG #define DBG(x...) printk(x) #else #define DBG(x...) #endif /* * Calibrate the decrementer frequency with the VIA timer 1. */ #define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */ /* VIA registers */ #define RS 0x200 /* skip between registers */ #define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */ #define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */ #define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */ #define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */ #define ACR (11*RS) /* Auxiliary control register */ #define IFR (13*RS) /* Interrupt flag register */ /* Bits in ACR */ #define T1MODE 0xc0 /* Timer 1 mode */ #define T1MODE_CONT 0x40 /* continuous interrupts */ /* Bits in IFR and IER */ #define T1_INT 0x40 /* Timer 1 interrupt */ long __init pmac_time_init(void) { s32 delta = 0; #if defined(CONFIG_NVRAM) && defined(CONFIG_PPC32) int dst; delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16; delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8; delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb); if (delta & 0x00800000UL) delta |= 0xFF000000UL; dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0); printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60, dst ? "on" : "off"); #endif return delta; } #ifdef CONFIG_PMAC_SMU static time64_t smu_get_time(void) { struct rtc_time tm; if (smu_get_rtc_time(&tm, 1)) return 0; return rtc_tm_to_time64(&tm); } #endif /* Can't be __init, it's called when suspending and resuming */ time64_t pmac_get_boot_time(void) { /* Get the time from the RTC, used only at boot time */ switch (sys_ctrler) { #ifdef CONFIG_ADB_CUDA case SYS_CTRLER_CUDA: return cuda_get_time(); #endif #ifdef CONFIG_ADB_PMU case SYS_CTRLER_PMU: return pmu_get_time(); #endif #ifdef CONFIG_PMAC_SMU case SYS_CTRLER_SMU: return smu_get_time(); #endif default: return 0; } } void pmac_get_rtc_time(struct rtc_time *tm) { /* Get the time from the RTC, used only at boot time */ switch (sys_ctrler) { #ifdef CONFIG_ADB_CUDA case SYS_CTRLER_CUDA: rtc_time64_to_tm(cuda_get_time(), tm); break; #endif #ifdef CONFIG_ADB_PMU case SYS_CTRLER_PMU: rtc_time64_to_tm(pmu_get_time(), tm); break; #endif #ifdef CONFIG_PMAC_SMU case SYS_CTRLER_SMU: smu_get_rtc_time(tm, 1); break; #endif default: ; } } int pmac_set_rtc_time(struct rtc_time *tm) { switch (sys_ctrler) { #ifdef CONFIG_ADB_CUDA case SYS_CTRLER_CUDA: return cuda_set_rtc_time(tm); #endif #ifdef CONFIG_ADB_PMU case SYS_CTRLER_PMU: return pmu_set_rtc_time(tm); #endif #ifdef CONFIG_PMAC_SMU case SYS_CTRLER_SMU: return smu_set_rtc_time(tm, 1); #endif default: return -ENODEV; } } #ifdef CONFIG_PPC32 /* * Calibrate the decrementer register using VIA timer 1. * This is used both on powermacs and CHRP machines. */ static int __init via_calibrate_decr(void) { struct device_node *vias; volatile unsigned char __iomem *via; int count = VIA_TIMER_FREQ_6 / 100; unsigned int dstart, dend; struct resource rsrc; vias = of_find_node_by_name(NULL, "via-cuda"); if (vias == NULL) vias = of_find_node_by_name(NULL, "via-pmu"); if (vias == NULL) vias = of_find_node_by_name(NULL, "via"); if (vias == NULL || of_address_to_resource(vias, 0, &rsrc)) { of_node_put(vias); return 0; } of_node_put(vias); via = early_ioremap(rsrc.start, resource_size(&rsrc)); if (via == NULL) { printk(KERN_ERR "Failed to map VIA for timer calibration !\n"); return 0; } /* set timer 1 for continuous interrupts */ out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT); /* set the counter to a small value */ out_8(&via[T1CH], 2); /* set the latch to `count' */ out_8(&via[T1LL], count); out_8(&via[T1LH], count >> 8); /* wait until it hits 0 */ while ((in_8(&via[IFR]) & T1_INT) == 0) ; dstart = get_dec(); /* clear the interrupt & wait until it hits 0 again */ in_8(&via[T1CL]); while ((in_8(&via[IFR]) & T1_INT) == 0) ; dend = get_dec(); ppc_tb_freq = (dstart - dend) * 100 / 6; early_iounmap((void *)via, resource_size(&rsrc)); return 1; } #endif /* * Query the OF and get the decr frequency. */ void __init pmac_calibrate_decr(void) { generic_calibrate_decr(); #ifdef CONFIG_PPC32 /* We assume MacRISC2 machines have correct device-tree * calibration. That's better since the VIA itself seems * to be slightly off. --BenH */ if (!of_machine_is_compatible("MacRISC2") && !of_machine_is_compatible("MacRISC3") && !of_machine_is_compatible("MacRISC4")) if (via_calibrate_decr()) return; /* Special case: QuickSilver G4s seem to have a badly calibrated * timebase-frequency in OF, VIA is much better on these. We should * probably implement calibration based on the KL timer on these * machines anyway... -BenH */ if (of_machine_is_compatible("PowerMac3,5")) if (via_calibrate_decr()) return; #endif }
linux-master
arch/powerpc/platforms/powermac/time.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2002 Benjamin Herrenschmidt ([email protected]) * * Todo: - add support for the OF persistent properties */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/nvram.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/memblock.h> #include <linux/completion.h> #include <linux/spinlock.h> #include <linux/of_address.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/nvram.h> #include "pmac.h" #define DEBUG #ifdef DEBUG #define DBG(x...) printk(x) #else #define DBG(x...) #endif #define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */ #define CORE99_SIGNATURE 0x5a #define CORE99_ADLER_START 0x14 /* On Core99, nvram is either a sharp, a micron or an AMD flash */ #define SM_FLASH_STATUS_DONE 0x80 #define SM_FLASH_STATUS_ERR 0x38 #define SM_FLASH_CMD_ERASE_CONFIRM 0xd0 #define SM_FLASH_CMD_ERASE_SETUP 0x20 #define SM_FLASH_CMD_RESET 0xff #define SM_FLASH_CMD_WRITE_SETUP 0x40 #define SM_FLASH_CMD_CLEAR_STATUS 0x50 #define SM_FLASH_CMD_READ_STATUS 0x70 /* CHRP NVRAM header */ struct chrp_header { u8 signature; u8 cksum; u16 len; char name[12]; u8 data[]; }; struct core99_header { struct chrp_header hdr; u32 adler; u32 generation; u32 reserved[2]; }; /* * Read and write the non-volatile RAM on PowerMacs and CHRP machines. */ static int nvram_naddrs; static volatile unsigned char __iomem *nvram_data; static int is_core_99; static int core99_bank; static int nvram_partitions[3]; // XXX Turn that into a sem static DEFINE_RAW_SPINLOCK(nv_lock); static int (*core99_write_bank)(int bank, u8* datas); static int (*core99_erase_bank)(int bank); static char *nvram_image; static unsigned char core99_nvram_read_byte(int addr) { if (nvram_image == NULL) return 0xff; return nvram_image[addr]; } static void core99_nvram_write_byte(int addr, unsigned char val) { if (nvram_image == NULL) return; nvram_image[addr] = val; } static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index) { int i; if (nvram_image == NULL) return -ENODEV; if (*index > NVRAM_SIZE) return 0; i = *index; if (i + count > NVRAM_SIZE) count = NVRAM_SIZE - i; memcpy(buf, &nvram_image[i], count); *index = i + count; return count; } static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index) { int i; if (nvram_image == NULL) return -ENODEV; if (*index > NVRAM_SIZE) return 0; i = *index; if (i + count > NVRAM_SIZE) count = NVRAM_SIZE - i; memcpy(&nvram_image[i], buf, count); *index = i + count; return count; } static ssize_t core99_nvram_size(void) { if (nvram_image == NULL) return -ENODEV; return NVRAM_SIZE; } #ifdef CONFIG_PPC32 static volatile unsigned char __iomem *nvram_addr; static int nvram_mult; static ssize_t ppc32_nvram_size(void) { return NVRAM_SIZE; } static unsigned char direct_nvram_read_byte(int addr) { return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]); } static void direct_nvram_write_byte(int addr, unsigned char val) { out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val); } static unsigned char indirect_nvram_read_byte(int addr) { unsigned char val; unsigned long flags; raw_spin_lock_irqsave(&nv_lock, flags); out_8(nvram_addr, addr >> 5); val = in_8(&nvram_data[(addr & 0x1f) << 4]); raw_spin_unlock_irqrestore(&nv_lock, flags); return val; } static void indirect_nvram_write_byte(int addr, unsigned char val) { unsigned long flags; raw_spin_lock_irqsave(&nv_lock, flags); out_8(nvram_addr, addr >> 5); out_8(&nvram_data[(addr & 0x1f) << 4], val); raw_spin_unlock_irqrestore(&nv_lock, flags); } #ifdef CONFIG_ADB_PMU static void pmu_nvram_complete(struct adb_request *req) { if (req->arg) complete((struct completion *)req->arg); } static unsigned char pmu_nvram_read_byte(int addr) { struct adb_request req; DECLARE_COMPLETION_ONSTACK(req_complete); req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL; if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM, (addr >> 8) & 0xff, addr & 0xff)) return 0xff; if (system_state == SYSTEM_RUNNING) wait_for_completion(&req_complete); while (!req.complete) pmu_poll(); return req.reply[0]; } static void pmu_nvram_write_byte(int addr, unsigned char val) { struct adb_request req; DECLARE_COMPLETION_ONSTACK(req_complete); req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL; if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM, (addr >> 8) & 0xff, addr & 0xff, val)) return; if (system_state == SYSTEM_RUNNING) wait_for_completion(&req_complete); while (!req.complete) pmu_poll(); } #endif /* CONFIG_ADB_PMU */ #endif /* CONFIG_PPC32 */ static u8 chrp_checksum(struct chrp_header* hdr) { u8 *ptr; u16 sum = hdr->signature; for (ptr = (u8 *)&hdr->len; ptr < hdr->data; ptr++) sum += *ptr; while (sum > 0xFF) sum = (sum & 0xFF) + (sum>>8); return sum; } static u32 core99_calc_adler(u8 *buffer) { int cnt; u32 low, high; buffer += CORE99_ADLER_START; low = 1; high = 0; for (cnt=0; cnt<(NVRAM_SIZE-CORE99_ADLER_START); cnt++) { if ((cnt % 5000) == 0) { high %= 65521UL; high %= 65521UL; } low += buffer[cnt]; high += low; } low %= 65521UL; high %= 65521UL; return (high << 16) | low; } static u32 __init core99_check(u8 *datas) { struct core99_header* hdr99 = (struct core99_header*)datas; if (hdr99->hdr.signature != CORE99_SIGNATURE) { DBG("Invalid signature\n"); return 0; } if (hdr99->hdr.cksum != chrp_checksum(&hdr99->hdr)) { DBG("Invalid checksum\n"); return 0; } if (hdr99->adler != core99_calc_adler(datas)) { DBG("Invalid adler\n"); return 0; } return hdr99->generation; } static int sm_erase_bank(int bank) { int stat; unsigned long timeout; u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; DBG("nvram: Sharp/Micron Erasing bank %d...\n", bank); out_8(base, SM_FLASH_CMD_ERASE_SETUP); out_8(base, SM_FLASH_CMD_ERASE_CONFIRM); timeout = 0; do { if (++timeout > 1000000) { printk(KERN_ERR "nvram: Sharp/Micron flash erase timeout !\n"); break; } out_8(base, SM_FLASH_CMD_READ_STATUS); stat = in_8(base); } while (!(stat & SM_FLASH_STATUS_DONE)); out_8(base, SM_FLASH_CMD_CLEAR_STATUS); out_8(base, SM_FLASH_CMD_RESET); if (memchr_inv(base, 0xff, NVRAM_SIZE)) { printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n"); return -ENXIO; } return 0; } static int sm_write_bank(int bank, u8* datas) { int i, stat = 0; unsigned long timeout; u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; DBG("nvram: Sharp/Micron Writing bank %d...\n", bank); for (i=0; i<NVRAM_SIZE; i++) { out_8(base+i, SM_FLASH_CMD_WRITE_SETUP); udelay(1); out_8(base+i, datas[i]); timeout = 0; do { if (++timeout > 1000000) { printk(KERN_ERR "nvram: Sharp/Micron flash write timeout !\n"); break; } out_8(base, SM_FLASH_CMD_READ_STATUS); stat = in_8(base); } while (!(stat & SM_FLASH_STATUS_DONE)); if (!(stat & SM_FLASH_STATUS_DONE)) break; } out_8(base, SM_FLASH_CMD_CLEAR_STATUS); out_8(base, SM_FLASH_CMD_RESET); if (memcmp(base, datas, NVRAM_SIZE)) { printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n"); return -ENXIO; } return 0; } static int amd_erase_bank(int bank) { int stat = 0; unsigned long timeout; u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; DBG("nvram: AMD Erasing bank %d...\n", bank); /* Unlock 1 */ out_8(base+0x555, 0xaa); udelay(1); /* Unlock 2 */ out_8(base+0x2aa, 0x55); udelay(1); /* Sector-Erase */ out_8(base+0x555, 0x80); udelay(1); out_8(base+0x555, 0xaa); udelay(1); out_8(base+0x2aa, 0x55); udelay(1); out_8(base, 0x30); udelay(1); timeout = 0; do { if (++timeout > 1000000) { printk(KERN_ERR "nvram: AMD flash erase timeout !\n"); break; } stat = in_8(base) ^ in_8(base); } while (stat != 0); /* Reset */ out_8(base, 0xf0); udelay(1); if (memchr_inv(base, 0xff, NVRAM_SIZE)) { printk(KERN_ERR "nvram: AMD flash erase failed !\n"); return -ENXIO; } return 0; } static int amd_write_bank(int bank, u8* datas) { int i, stat = 0; unsigned long timeout; u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; DBG("nvram: AMD Writing bank %d...\n", bank); for (i=0; i<NVRAM_SIZE; i++) { /* Unlock 1 */ out_8(base+0x555, 0xaa); udelay(1); /* Unlock 2 */ out_8(base+0x2aa, 0x55); udelay(1); /* Write single word */ out_8(base+0x555, 0xa0); udelay(1); out_8(base+i, datas[i]); timeout = 0; do { if (++timeout > 1000000) { printk(KERN_ERR "nvram: AMD flash write timeout !\n"); break; } stat = in_8(base) ^ in_8(base); } while (stat != 0); if (stat != 0) break; } /* Reset */ out_8(base, 0xf0); udelay(1); if (memcmp(base, datas, NVRAM_SIZE)) { printk(KERN_ERR "nvram: AMD flash write failed !\n"); return -ENXIO; } return 0; } static void __init lookup_partitions(void) { u8 buffer[17]; int i, offset; struct chrp_header* hdr; if (pmac_newworld) { nvram_partitions[pmac_nvram_OF] = -1; nvram_partitions[pmac_nvram_XPRAM] = -1; nvram_partitions[pmac_nvram_NR] = -1; hdr = (struct chrp_header *)buffer; offset = 0; buffer[16] = 0; do { for (i=0;i<16;i++) buffer[i] = ppc_md.nvram_read_val(offset+i); if (!strcmp(hdr->name, "common")) nvram_partitions[pmac_nvram_OF] = offset + 0x10; if (!strcmp(hdr->name, "APL,MacOS75")) { nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10; nvram_partitions[pmac_nvram_NR] = offset + 0x110; } offset += (hdr->len * 0x10); } while(offset < NVRAM_SIZE); } else { nvram_partitions[pmac_nvram_OF] = 0x1800; nvram_partitions[pmac_nvram_XPRAM] = 0x1300; nvram_partitions[pmac_nvram_NR] = 0x1400; } DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]); DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]); DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]); } static void core99_nvram_sync(void) { struct core99_header* hdr99; unsigned long flags; if (!is_core_99 || !nvram_data || !nvram_image) return; raw_spin_lock_irqsave(&nv_lock, flags); if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE, NVRAM_SIZE)) goto bail; DBG("Updating nvram...\n"); hdr99 = (struct core99_header*)nvram_image; hdr99->generation++; hdr99->hdr.signature = CORE99_SIGNATURE; hdr99->hdr.cksum = chrp_checksum(&hdr99->hdr); hdr99->adler = core99_calc_adler(nvram_image); core99_bank = core99_bank ? 0 : 1; if (core99_erase_bank) if (core99_erase_bank(core99_bank)) { printk("nvram: Error erasing bank %d\n", core99_bank); goto bail; } if (core99_write_bank) if (core99_write_bank(core99_bank, nvram_image)) printk("nvram: Error writing bank %d\n", core99_bank); bail: raw_spin_unlock_irqrestore(&nv_lock, flags); #ifdef DEBUG mdelay(2000); #endif } static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) { int i; u32 gen_bank0, gen_bank1; if (nvram_naddrs < 1) { printk(KERN_ERR "nvram: no address\n"); return -EINVAL; } nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES); if (!nvram_image) panic("%s: Failed to allocate %u bytes\n", __func__, NVRAM_SIZE); nvram_data = ioremap(addr, NVRAM_SIZE*2); nvram_naddrs = 1; /* Make sure we get the correct case */ DBG("nvram: Checking bank 0...\n"); gen_bank0 = core99_check((u8 *)nvram_data); gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE); core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0; DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1); DBG("nvram: Active bank is: %d\n", core99_bank); for (i=0; i<NVRAM_SIZE; i++) nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE]; ppc_md.nvram_read_val = core99_nvram_read_byte; ppc_md.nvram_write_val = core99_nvram_write_byte; ppc_md.nvram_read = core99_nvram_read; ppc_md.nvram_write = core99_nvram_write; ppc_md.nvram_size = core99_nvram_size; ppc_md.nvram_sync = core99_nvram_sync; ppc_md.machine_shutdown = core99_nvram_sync; /* * Maybe we could be smarter here though making an exclusive list * of known flash chips is a bit nasty as older OF didn't provide us * with a useful "compatible" entry. A solution would be to really * identify the chip using flash id commands and base ourselves on * a list of known chips IDs */ if (of_device_is_compatible(dp, "amd-0137")) { core99_erase_bank = amd_erase_bank; core99_write_bank = amd_write_bank; } else { core99_erase_bank = sm_erase_bank; core99_write_bank = sm_write_bank; } return 0; } int __init pmac_nvram_init(void) { struct device_node *dp; struct resource r1, r2; unsigned int s1 = 0, s2 = 0; int err = 0; nvram_naddrs = 0; dp = of_find_node_by_name(NULL, "nvram"); if (dp == NULL) { printk(KERN_ERR "Can't find NVRAM device\n"); return -ENODEV; } /* Try to obtain an address */ if (of_address_to_resource(dp, 0, &r1) == 0) { nvram_naddrs = 1; s1 = resource_size(&r1); if (of_address_to_resource(dp, 1, &r2) == 0) { nvram_naddrs = 2; s2 = resource_size(&r2); } } is_core_99 = of_device_is_compatible(dp, "nvram,flash"); if (is_core_99) { err = core99_nvram_setup(dp, r1.start); goto bail; } #ifdef CONFIG_PPC32 if (machine_is(chrp) && nvram_naddrs == 1) { nvram_data = ioremap(r1.start, s1); nvram_mult = 1; ppc_md.nvram_read_val = direct_nvram_read_byte; ppc_md.nvram_write_val = direct_nvram_write_byte; ppc_md.nvram_size = ppc32_nvram_size; } else if (nvram_naddrs == 1) { nvram_data = ioremap(r1.start, s1); nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE; ppc_md.nvram_read_val = direct_nvram_read_byte; ppc_md.nvram_write_val = direct_nvram_write_byte; ppc_md.nvram_size = ppc32_nvram_size; } else if (nvram_naddrs == 2) { nvram_addr = ioremap(r1.start, s1); nvram_data = ioremap(r2.start, s2); ppc_md.nvram_read_val = indirect_nvram_read_byte; ppc_md.nvram_write_val = indirect_nvram_write_byte; ppc_md.nvram_size = ppc32_nvram_size; } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) { #ifdef CONFIG_ADB_PMU nvram_naddrs = -1; ppc_md.nvram_read_val = pmu_nvram_read_byte; ppc_md.nvram_write_val = pmu_nvram_write_byte; ppc_md.nvram_size = ppc32_nvram_size; #endif /* CONFIG_ADB_PMU */ } else { printk(KERN_ERR "Incompatible type of NVRAM\n"); err = -ENXIO; } #endif /* CONFIG_PPC32 */ bail: of_node_put(dp); if (err == 0) lookup_partitions(); return err; } int pmac_get_partition(int partition) { return nvram_partitions[partition]; } u8 pmac_xpram_read(int xpaddr) { int offset = pmac_get_partition(pmac_nvram_XPRAM); if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) return 0xff; return ppc_md.nvram_read_val(xpaddr + offset); } void pmac_xpram_write(int xpaddr, u8 data) { int offset = pmac_get_partition(pmac_nvram_XPRAM); if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) return; ppc_md.nvram_write_val(xpaddr + offset, data); } EXPORT_SYMBOL(pmac_get_partition); EXPORT_SYMBOL(pmac_xpram_read); EXPORT_SYMBOL(pmac_xpram_write);
linux-master
arch/powerpc/platforms/powermac/nvram.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/platforms/powermac/low_i2c.c * * Copyright (C) 2003-2005 Ben. Herrenschmidt ([email protected]) * * The linux i2c layer isn't completely suitable for our needs for various * reasons ranging from too late initialisation to semantics not perfectly * matching some requirements of the apple platform functions etc... * * This file thus provides a simple low level unified i2c interface for * powermac that covers the various types of i2c busses used in Apple machines. * For now, keywest, PMU and SMU, though we could add Cuda, or other bit * banging busses found on older chipsets in earlier machines if we ever need * one of them. * * The drivers in this file are synchronous/blocking. In addition, the * keywest one is fairly slow due to the use of msleep instead of interrupts * as the interrupt is currently used by i2c-keywest. In the long run, we * might want to get rid of those high-level interfaces to linux i2c layer * either completely (converting all drivers) or replacing them all with a * single stub driver on top of this one. Once done, the interrupt will be * available for our use. */ #undef DEBUG #undef DEBUG_LOW #include <linux/types.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/export.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/mutex.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/of_irq.h> #include <asm/keylargo.h> #include <asm/uninorth.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/smu.h> #include <asm/pmac_pfunc.h> #include <asm/pmac_low_i2c.h> #ifdef DEBUG #define DBG(x...) do {\ printk(KERN_DEBUG "low_i2c:" x); \ } while(0) #else #define DBG(x...) #endif #ifdef DEBUG_LOW #define DBG_LOW(x...) do {\ printk(KERN_DEBUG "low_i2c:" x); \ } while(0) #else #define DBG_LOW(x...) #endif static int pmac_i2c_force_poll = 1; /* * A bus structure. Each bus in the system has such a structure associated. */ struct pmac_i2c_bus { struct list_head link; struct device_node *controller; struct device_node *busnode; int type; int flags; struct i2c_adapter adapter; void *hostdata; int channel; /* some hosts have multiple */ int mode; /* current mode */ struct mutex mutex; int opened; int polled; /* open mode */ struct platform_device *platform_dev; struct lock_class_key lock_key; /* ops */ int (*open)(struct pmac_i2c_bus *bus); void (*close)(struct pmac_i2c_bus *bus); int (*xfer)(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, u32 subaddr, u8 *data, int len); }; static LIST_HEAD(pmac_i2c_busses); /* * Keywest implementation */ struct pmac_i2c_host_kw { struct mutex mutex; /* Access mutex for use by * i2c-keywest */ void __iomem *base; /* register base address */ int bsteps; /* register stepping */ int speed; /* speed */ int irq; u8 *data; unsigned len; int state; int rw; int polled; int result; struct completion complete; spinlock_t lock; struct timer_list timeout_timer; }; /* Register indices */ typedef enum { reg_mode = 0, reg_control, reg_status, reg_isr, reg_ier, reg_addr, reg_subaddr, reg_data } reg_t; /* The Tumbler audio equalizer can be really slow sometimes */ #define KW_POLL_TIMEOUT (2*HZ) /* Mode register */ #define KW_I2C_MODE_100KHZ 0x00 #define KW_I2C_MODE_50KHZ 0x01 #define KW_I2C_MODE_25KHZ 0x02 #define KW_I2C_MODE_DUMB 0x00 #define KW_I2C_MODE_STANDARD 0x04 #define KW_I2C_MODE_STANDARDSUB 0x08 #define KW_I2C_MODE_COMBINED 0x0C #define KW_I2C_MODE_MODE_MASK 0x0C #define KW_I2C_MODE_CHAN_MASK 0xF0 /* Control register */ #define KW_I2C_CTL_AAK 0x01 #define KW_I2C_CTL_XADDR 0x02 #define KW_I2C_CTL_STOP 0x04 #define KW_I2C_CTL_START 0x08 /* Status register */ #define KW_I2C_STAT_BUSY 0x01 #define KW_I2C_STAT_LAST_AAK 0x02 #define KW_I2C_STAT_LAST_RW 0x04 #define KW_I2C_STAT_SDA 0x08 #define KW_I2C_STAT_SCL 0x10 /* IER & ISR registers */ #define KW_I2C_IRQ_DATA 0x01 #define KW_I2C_IRQ_ADDR 0x02 #define KW_I2C_IRQ_STOP 0x04 #define KW_I2C_IRQ_START 0x08 #define KW_I2C_IRQ_MASK 0x0F /* State machine states */ enum { state_idle, state_addr, state_read, state_write, state_stop, state_dead }; #define WRONG_STATE(name) do {\ printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s " \ "(isr: %02x)\n", \ name, __kw_state_names[host->state], isr); \ } while(0) static const char *__kw_state_names[] = { "state_idle", "state_addr", "state_read", "state_write", "state_stop", "state_dead" }; static inline u8 __kw_read_reg(struct pmac_i2c_host_kw *host, reg_t reg) { return readb(host->base + (((unsigned int)reg) << host->bsteps)); } static inline void __kw_write_reg(struct pmac_i2c_host_kw *host, reg_t reg, u8 val) { writeb(val, host->base + (((unsigned)reg) << host->bsteps)); (void)__kw_read_reg(host, reg_subaddr); } #define kw_write_reg(reg, val) __kw_write_reg(host, reg, val) #define kw_read_reg(reg) __kw_read_reg(host, reg) static u8 kw_i2c_wait_interrupt(struct pmac_i2c_host_kw *host) { int i, j; u8 isr; for (i = 0; i < 1000; i++) { isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK; if (isr != 0) return isr; /* This code is used with the timebase frozen, we cannot rely * on udelay nor schedule when in polled mode ! * For now, just use a bogus loop.... */ if (host->polled) { for (j = 1; j < 100000; j++) mb(); } else msleep(1); } return isr; } static void kw_i2c_do_stop(struct pmac_i2c_host_kw *host, int result) { kw_write_reg(reg_control, KW_I2C_CTL_STOP); host->state = state_stop; host->result = result; } static void kw_i2c_handle_interrupt(struct pmac_i2c_host_kw *host, u8 isr) { u8 ack; DBG_LOW("kw_handle_interrupt(%s, isr: %x)\n", __kw_state_names[host->state], isr); if (host->state == state_idle) { printk(KERN_WARNING "low_i2c: Keywest got an out of state" " interrupt, ignoring\n"); kw_write_reg(reg_isr, isr); return; } if (isr == 0) { printk(KERN_WARNING "low_i2c: Timeout in i2c transfer" " on keywest !\n"); if (host->state != state_stop) { kw_i2c_do_stop(host, -EIO); return; } ack = kw_read_reg(reg_status); if (ack & KW_I2C_STAT_BUSY) kw_write_reg(reg_status, 0); host->state = state_idle; kw_write_reg(reg_ier, 0x00); if (!host->polled) complete(&host->complete); return; } if (isr & KW_I2C_IRQ_ADDR) { ack = kw_read_reg(reg_status); if (host->state != state_addr) { WRONG_STATE("KW_I2C_IRQ_ADDR"); kw_i2c_do_stop(host, -EIO); } if ((ack & KW_I2C_STAT_LAST_AAK) == 0) { host->result = -ENXIO; host->state = state_stop; DBG_LOW("KW: NAK on address\n"); } else { if (host->len == 0) kw_i2c_do_stop(host, 0); else if (host->rw) { host->state = state_read; if (host->len > 1) kw_write_reg(reg_control, KW_I2C_CTL_AAK); } else { host->state = state_write; kw_write_reg(reg_data, *(host->data++)); host->len--; } } kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR); } if (isr & KW_I2C_IRQ_DATA) { if (host->state == state_read) { *(host->data++) = kw_read_reg(reg_data); host->len--; kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); if (host->len == 0) host->state = state_stop; else if (host->len == 1) kw_write_reg(reg_control, 0); } else if (host->state == state_write) { ack = kw_read_reg(reg_status); if ((ack & KW_I2C_STAT_LAST_AAK) == 0) { DBG_LOW("KW: nack on data write\n"); host->result = -EFBIG; host->state = state_stop; } else if (host->len) { kw_write_reg(reg_data, *(host->data++)); host->len--; } else kw_i2c_do_stop(host, 0); } else { WRONG_STATE("KW_I2C_IRQ_DATA"); if (host->state != state_stop) kw_i2c_do_stop(host, -EIO); } kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); } if (isr & KW_I2C_IRQ_STOP) { kw_write_reg(reg_isr, KW_I2C_IRQ_STOP); if (host->state != state_stop) { WRONG_STATE("KW_I2C_IRQ_STOP"); host->result = -EIO; } host->state = state_idle; if (!host->polled) complete(&host->complete); } /* Below should only happen in manual mode which we don't use ... */ if (isr & KW_I2C_IRQ_START) kw_write_reg(reg_isr, KW_I2C_IRQ_START); } /* Interrupt handler */ static irqreturn_t kw_i2c_irq(int irq, void *dev_id) { struct pmac_i2c_host_kw *host = dev_id; unsigned long flags; spin_lock_irqsave(&host->lock, flags); del_timer(&host->timeout_timer); kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); if (host->state != state_idle) { host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; add_timer(&host->timeout_timer); } spin_unlock_irqrestore(&host->lock, flags); return IRQ_HANDLED; } static void kw_i2c_timeout(struct timer_list *t) { struct pmac_i2c_host_kw *host = from_timer(host, t, timeout_timer); unsigned long flags; spin_lock_irqsave(&host->lock, flags); /* * If the timer is pending, that means we raced with the * irq, in which case we just return */ if (timer_pending(&host->timeout_timer)) goto skip; kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); if (host->state != state_idle) { host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; add_timer(&host->timeout_timer); } skip: spin_unlock_irqrestore(&host->lock, flags); } static int kw_i2c_open(struct pmac_i2c_bus *bus) { struct pmac_i2c_host_kw *host = bus->hostdata; mutex_lock(&host->mutex); return 0; } static void kw_i2c_close(struct pmac_i2c_bus *bus) { struct pmac_i2c_host_kw *host = bus->hostdata; mutex_unlock(&host->mutex); } static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, u32 subaddr, u8 *data, int len) { struct pmac_i2c_host_kw *host = bus->hostdata; u8 mode_reg = host->speed; int use_irq = host->irq && !bus->polled; /* Setup mode & subaddress if any */ switch(bus->mode) { case pmac_i2c_mode_dumb: return -EINVAL; case pmac_i2c_mode_std: mode_reg |= KW_I2C_MODE_STANDARD; if (subsize != 0) return -EINVAL; break; case pmac_i2c_mode_stdsub: mode_reg |= KW_I2C_MODE_STANDARDSUB; if (subsize != 1) return -EINVAL; break; case pmac_i2c_mode_combined: mode_reg |= KW_I2C_MODE_COMBINED; if (subsize != 1) return -EINVAL; break; } /* Setup channel & clear pending irqs */ kw_write_reg(reg_isr, kw_read_reg(reg_isr)); kw_write_reg(reg_mode, mode_reg | (bus->channel << 4)); kw_write_reg(reg_status, 0); /* Set up address and r/w bit, strip possible stale bus number from * address top bits */ kw_write_reg(reg_addr, addrdir & 0xff); /* Set up the sub address */ if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB || (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED) kw_write_reg(reg_subaddr, subaddr); /* Prepare for async operations */ host->data = data; host->len = len; host->state = state_addr; host->result = 0; host->rw = (addrdir & 1); host->polled = bus->polled; /* Enable interrupt if not using polled mode and interrupt is * available */ if (use_irq) { /* Clear completion */ reinit_completion(&host->complete); /* Ack stale interrupts */ kw_write_reg(reg_isr, kw_read_reg(reg_isr)); /* Arm timeout */ host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; add_timer(&host->timeout_timer); /* Enable emission */ kw_write_reg(reg_ier, KW_I2C_IRQ_MASK); } /* Start sending address */ kw_write_reg(reg_control, KW_I2C_CTL_XADDR); /* Wait for completion */ if (use_irq) wait_for_completion(&host->complete); else { while(host->state != state_idle) { unsigned long flags; u8 isr = kw_i2c_wait_interrupt(host); spin_lock_irqsave(&host->lock, flags); kw_i2c_handle_interrupt(host, isr); spin_unlock_irqrestore(&host->lock, flags); } } /* Disable emission */ kw_write_reg(reg_ier, 0); return host->result; } static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) { struct pmac_i2c_host_kw *host; const u32 *psteps, *prate, *addrp; u32 steps; host = kzalloc(sizeof(*host), GFP_KERNEL); if (host == NULL) { printk(KERN_ERR "low_i2c: Can't allocate host for %pOF\n", np); return NULL; } /* Apple is kind enough to provide a valid AAPL,address property * on all i2c keywest nodes so far ... we would have to fallback * to macio parsing if that wasn't the case */ addrp = of_get_property(np, "AAPL,address", NULL); if (addrp == NULL) { printk(KERN_ERR "low_i2c: Can't find address for %pOF\n", np); kfree(host); return NULL; } mutex_init(&host->mutex); init_completion(&host->complete); spin_lock_init(&host->lock); timer_setup(&host->timeout_timer, kw_i2c_timeout, 0); psteps = of_get_property(np, "AAPL,address-step", NULL); steps = psteps ? (*psteps) : 0x10; for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++) steps >>= 1; /* Select interface rate */ host->speed = KW_I2C_MODE_25KHZ; prate = of_get_property(np, "AAPL,i2c-rate", NULL); if (prate) switch(*prate) { case 100: host->speed = KW_I2C_MODE_100KHZ; break; case 50: host->speed = KW_I2C_MODE_50KHZ; break; case 25: host->speed = KW_I2C_MODE_25KHZ; break; } host->irq = irq_of_parse_and_map(np, 0); if (!host->irq) printk(KERN_WARNING "low_i2c: Failed to map interrupt for %pOF\n", np); host->base = ioremap((*addrp), 0x1000); if (host->base == NULL) { printk(KERN_ERR "low_i2c: Can't map registers for %pOF\n", np); kfree(host); return NULL; } /* Make sure IRQ is disabled */ kw_write_reg(reg_ier, 0); /* Request chip interrupt. We set IRQF_NO_SUSPEND because we don't * want that interrupt disabled between the 2 passes of driver * suspend or we'll have issues running the pfuncs */ if (request_irq(host->irq, kw_i2c_irq, IRQF_NO_SUSPEND, "keywest i2c", host)) host->irq = 0; printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %pOF\n", *addrp, host->irq, np); return host; } static void __init kw_i2c_add(struct pmac_i2c_host_kw *host, struct device_node *controller, struct device_node *busnode, int channel) { struct pmac_i2c_bus *bus; bus = kzalloc(sizeof(struct pmac_i2c_bus), GFP_KERNEL); if (bus == NULL) return; bus->controller = of_node_get(controller); bus->busnode = of_node_get(busnode); bus->type = pmac_i2c_bus_keywest; bus->hostdata = host; bus->channel = channel; bus->mode = pmac_i2c_mode_std; bus->open = kw_i2c_open; bus->close = kw_i2c_close; bus->xfer = kw_i2c_xfer; mutex_init(&bus->mutex); lockdep_register_key(&bus->lock_key); lockdep_set_class(&bus->mutex, &bus->lock_key); if (controller == busnode) bus->flags = pmac_i2c_multibus; list_add(&bus->link, &pmac_i2c_busses); printk(KERN_INFO " channel %d bus %s\n", channel, (controller == busnode) ? "<multibus>" : busnode->full_name); } static void __init kw_i2c_probe(void) { struct device_node *np, *child, *parent; /* Probe keywest-i2c busses */ for_each_compatible_node(np, "i2c","keywest-i2c") { struct pmac_i2c_host_kw *host; int multibus; /* Found one, init a host structure */ host = kw_i2c_host_init(np); if (host == NULL) continue; /* Now check if we have a multibus setup (old style) or if we * have proper bus nodes. Note that the "new" way (proper bus * nodes) might cause us to not create some busses that are * kept hidden in the device-tree. In the future, we might * want to work around that by creating busses without a node * but not for now */ child = of_get_next_child(np, NULL); multibus = !of_node_name_eq(child, "i2c-bus"); of_node_put(child); /* For a multibus setup, we get the bus count based on the * parent type */ if (multibus) { int chans, i; parent = of_get_parent(np); if (parent == NULL) continue; chans = parent->name[0] == 'u' ? 2 : 1; of_node_put(parent); for (i = 0; i < chans; i++) kw_i2c_add(host, np, np, i); } else { for_each_child_of_node(np, child) { const u32 *reg = of_get_property(child, "reg", NULL); if (reg == NULL) continue; kw_i2c_add(host, np, child, *reg); } } } } /* * * PMU implementation * */ #ifdef CONFIG_ADB_PMU /* * i2c command block to the PMU */ struct pmu_i2c_hdr { u8 bus; u8 mode; u8 bus2; u8 address; u8 sub_addr; u8 comb_addr; u8 count; u8 data[]; }; static void pmu_i2c_complete(struct adb_request *req) { complete(req->arg); } static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, u32 subaddr, u8 *data, int len) { struct adb_request *req = bus->hostdata; struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req->data[1]; struct completion comp; int read = addrdir & 1; int retry; int rc = 0; /* For now, limit ourselves to 16 bytes transfers */ if (len > 16) return -EINVAL; init_completion(&comp); for (retry = 0; retry < 16; retry++) { memset(req, 0, sizeof(struct adb_request)); hdr->bus = bus->channel; hdr->count = len; switch(bus->mode) { case pmac_i2c_mode_std: if (subsize != 0) return -EINVAL; hdr->address = addrdir; hdr->mode = PMU_I2C_MODE_SIMPLE; break; case pmac_i2c_mode_stdsub: case pmac_i2c_mode_combined: if (subsize != 1) return -EINVAL; hdr->address = addrdir & 0xfe; hdr->comb_addr = addrdir; hdr->sub_addr = subaddr; if (bus->mode == pmac_i2c_mode_stdsub) hdr->mode = PMU_I2C_MODE_STDSUB; else hdr->mode = PMU_I2C_MODE_COMBINED; break; default: return -EINVAL; } reinit_completion(&comp); req->data[0] = PMU_I2C_CMD; req->reply[0] = 0xff; req->nbytes = sizeof(struct pmu_i2c_hdr) + 1; req->done = pmu_i2c_complete; req->arg = &comp; if (!read && len) { memcpy(hdr->data, data, len); req->nbytes += len; } rc = pmu_queue_request(req); if (rc) return rc; wait_for_completion(&comp); if (req->reply[0] == PMU_I2C_STATUS_OK) break; msleep(15); } if (req->reply[0] != PMU_I2C_STATUS_OK) return -EIO; for (retry = 0; retry < 16; retry++) { memset(req, 0, sizeof(struct adb_request)); /* I know that looks like a lot, slow as hell, but darwin * does it so let's be on the safe side for now */ msleep(15); hdr->bus = PMU_I2C_BUS_STATUS; reinit_completion(&comp); req->data[0] = PMU_I2C_CMD; req->reply[0] = 0xff; req->nbytes = 2; req->done = pmu_i2c_complete; req->arg = &comp; rc = pmu_queue_request(req); if (rc) return rc; wait_for_completion(&comp); if (req->reply[0] == PMU_I2C_STATUS_OK && !read) return 0; if (req->reply[0] == PMU_I2C_STATUS_DATAREAD && read) { int rlen = req->reply_len - 1; if (rlen != len) { printk(KERN_WARNING "low_i2c: PMU returned %d" " bytes, expected %d !\n", rlen, len); return -EIO; } if (len) memcpy(data, &req->reply[1], len); return 0; } } return -EIO; } static void __init pmu_i2c_probe(void) { struct pmac_i2c_bus *bus; struct device_node *busnode; int channel, sz; if (!pmu_present()) return; /* There might or might not be a "pmu-i2c" node, we use that * or via-pmu itself, whatever we find. I haven't seen a machine * with separate bus nodes, so we assume a multibus setup */ busnode = of_find_node_by_name(NULL, "pmu-i2c"); if (busnode == NULL) busnode = of_find_node_by_name(NULL, "via-pmu"); if (busnode == NULL) return; printk(KERN_INFO "PMU i2c %pOF\n", busnode); /* * We add bus 1 and 2 only for now, bus 0 is "special" */ for (channel = 1; channel <= 2; channel++) { sz = sizeof(struct pmac_i2c_bus) + sizeof(struct adb_request); bus = kzalloc(sz, GFP_KERNEL); if (bus == NULL) return; bus->controller = busnode; bus->busnode = busnode; bus->type = pmac_i2c_bus_pmu; bus->channel = channel; bus->mode = pmac_i2c_mode_std; bus->hostdata = bus + 1; bus->xfer = pmu_i2c_xfer; mutex_init(&bus->mutex); lockdep_register_key(&bus->lock_key); lockdep_set_class(&bus->mutex, &bus->lock_key); bus->flags = pmac_i2c_multibus; list_add(&bus->link, &pmac_i2c_busses); printk(KERN_INFO " channel %d bus <multibus>\n", channel); } } #endif /* CONFIG_ADB_PMU */ /* * * SMU implementation * */ #ifdef CONFIG_PMAC_SMU static void smu_i2c_complete(struct smu_i2c_cmd *cmd, void *misc) { complete(misc); } static int smu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, u32 subaddr, u8 *data, int len) { struct smu_i2c_cmd *cmd = bus->hostdata; struct completion comp; int read = addrdir & 1; int rc = 0; if ((read && len > SMU_I2C_READ_MAX) || ((!read) && len > SMU_I2C_WRITE_MAX)) return -EINVAL; memset(cmd, 0, sizeof(struct smu_i2c_cmd)); cmd->info.bus = bus->channel; cmd->info.devaddr = addrdir; cmd->info.datalen = len; switch(bus->mode) { case pmac_i2c_mode_std: if (subsize != 0) return -EINVAL; cmd->info.type = SMU_I2C_TRANSFER_SIMPLE; break; case pmac_i2c_mode_stdsub: case pmac_i2c_mode_combined: if (subsize > 3 || subsize < 1) return -EINVAL; cmd->info.sublen = subsize; /* that's big-endian only but heh ! */ memcpy(&cmd->info.subaddr, ((char *)&subaddr) + (4 - subsize), subsize); if (bus->mode == pmac_i2c_mode_stdsub) cmd->info.type = SMU_I2C_TRANSFER_STDSUB; else cmd->info.type = SMU_I2C_TRANSFER_COMBINED; break; default: return -EINVAL; } if (!read && len) memcpy(cmd->info.data, data, len); init_completion(&comp); cmd->done = smu_i2c_complete; cmd->misc = &comp; rc = smu_queue_i2c(cmd); if (rc < 0) return rc; wait_for_completion(&comp); rc = cmd->status; if (read && len) memcpy(data, cmd->info.data, len); return rc < 0 ? rc : 0; } static void __init smu_i2c_probe(void) { struct device_node *controller, *busnode; struct pmac_i2c_bus *bus; const u32 *reg; int sz; if (!smu_present()) return; controller = of_find_node_by_name(NULL, "smu-i2c-control"); if (controller == NULL) controller = of_find_node_by_name(NULL, "smu"); if (controller == NULL) return; printk(KERN_INFO "SMU i2c %pOF\n", controller); /* Look for childs, note that they might not be of the right * type as older device trees mix i2c busses and other things * at the same level */ for_each_child_of_node(controller, busnode) { if (!of_node_is_type(busnode, "i2c") && !of_node_is_type(busnode, "i2c-bus")) continue; reg = of_get_property(busnode, "reg", NULL); if (reg == NULL) continue; sz = sizeof(struct pmac_i2c_bus) + sizeof(struct smu_i2c_cmd); bus = kzalloc(sz, GFP_KERNEL); if (bus == NULL) return; bus->controller = controller; bus->busnode = of_node_get(busnode); bus->type = pmac_i2c_bus_smu; bus->channel = *reg; bus->mode = pmac_i2c_mode_std; bus->hostdata = bus + 1; bus->xfer = smu_i2c_xfer; mutex_init(&bus->mutex); lockdep_register_key(&bus->lock_key); lockdep_set_class(&bus->mutex, &bus->lock_key); bus->flags = 0; list_add(&bus->link, &pmac_i2c_busses); printk(KERN_INFO " channel %x bus %pOF\n", bus->channel, busnode); } } #endif /* CONFIG_PMAC_SMU */ /* * * Core code * */ struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node) { struct device_node *p = of_node_get(node); struct device_node *prev = NULL; struct pmac_i2c_bus *bus; while(p) { list_for_each_entry(bus, &pmac_i2c_busses, link) { if (p == bus->busnode) { if (prev && bus->flags & pmac_i2c_multibus) { const u32 *reg; reg = of_get_property(prev, "reg", NULL); if (!reg) continue; if (((*reg) >> 8) != bus->channel) continue; } of_node_put(p); of_node_put(prev); return bus; } } of_node_put(prev); prev = p; p = of_get_parent(p); } return NULL; } EXPORT_SYMBOL_GPL(pmac_i2c_find_bus); u8 pmac_i2c_get_dev_addr(struct device_node *device) { const u32 *reg = of_get_property(device, "reg", NULL); if (reg == NULL) return 0; return (*reg) & 0xff; } EXPORT_SYMBOL_GPL(pmac_i2c_get_dev_addr); struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus) { return bus->controller; } EXPORT_SYMBOL_GPL(pmac_i2c_get_controller); struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus) { return bus->busnode; } EXPORT_SYMBOL_GPL(pmac_i2c_get_bus_node); int pmac_i2c_get_type(struct pmac_i2c_bus *bus) { return bus->type; } EXPORT_SYMBOL_GPL(pmac_i2c_get_type); int pmac_i2c_get_flags(struct pmac_i2c_bus *bus) { return bus->flags; } EXPORT_SYMBOL_GPL(pmac_i2c_get_flags); int pmac_i2c_get_channel(struct pmac_i2c_bus *bus) { return bus->channel; } EXPORT_SYMBOL_GPL(pmac_i2c_get_channel); struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus) { return &bus->adapter; } EXPORT_SYMBOL_GPL(pmac_i2c_get_adapter); struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter) { struct pmac_i2c_bus *bus; list_for_each_entry(bus, &pmac_i2c_busses, link) if (&bus->adapter == adapter) return bus; return NULL; } EXPORT_SYMBOL_GPL(pmac_i2c_adapter_to_bus); int pmac_i2c_match_adapter(struct device_node *dev, struct i2c_adapter *adapter) { struct pmac_i2c_bus *bus = pmac_i2c_find_bus(dev); if (bus == NULL) return 0; return (&bus->adapter == adapter); } EXPORT_SYMBOL_GPL(pmac_i2c_match_adapter); int pmac_low_i2c_lock(struct device_node *np) { struct pmac_i2c_bus *bus, *found = NULL; list_for_each_entry(bus, &pmac_i2c_busses, link) { if (np == bus->controller) { found = bus; break; } } if (!found) return -ENODEV; return pmac_i2c_open(bus, 0); } EXPORT_SYMBOL_GPL(pmac_low_i2c_lock); int pmac_low_i2c_unlock(struct device_node *np) { struct pmac_i2c_bus *bus, *found = NULL; list_for_each_entry(bus, &pmac_i2c_busses, link) { if (np == bus->controller) { found = bus; break; } } if (!found) return -ENODEV; pmac_i2c_close(bus); return 0; } EXPORT_SYMBOL_GPL(pmac_low_i2c_unlock); int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled) { int rc; mutex_lock(&bus->mutex); bus->polled = polled || pmac_i2c_force_poll; bus->opened = 1; bus->mode = pmac_i2c_mode_std; if (bus->open && (rc = bus->open(bus)) != 0) { bus->opened = 0; mutex_unlock(&bus->mutex); return rc; } return 0; } EXPORT_SYMBOL_GPL(pmac_i2c_open); void pmac_i2c_close(struct pmac_i2c_bus *bus) { WARN_ON(!bus->opened); if (bus->close) bus->close(bus); bus->opened = 0; mutex_unlock(&bus->mutex); } EXPORT_SYMBOL_GPL(pmac_i2c_close); int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode) { WARN_ON(!bus->opened); /* Report me if you see the error below as there might be a new * "combined4" mode that I need to implement for the SMU bus */ if (mode < pmac_i2c_mode_dumb || mode > pmac_i2c_mode_combined) { printk(KERN_ERR "low_i2c: Invalid mode %d requested on" " bus %pOF !\n", mode, bus->busnode); return -EINVAL; } bus->mode = mode; return 0; } EXPORT_SYMBOL_GPL(pmac_i2c_setmode); int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, u32 subaddr, u8 *data, int len) { int rc; WARN_ON(!bus->opened); DBG("xfer() chan=%d, addrdir=0x%x, mode=%d, subsize=%d, subaddr=0x%x," " %d bytes, bus %pOF\n", bus->channel, addrdir, bus->mode, subsize, subaddr, len, bus->busnode); rc = bus->xfer(bus, addrdir, subsize, subaddr, data, len); #ifdef DEBUG if (rc) DBG("xfer error %d\n", rc); #endif return rc; } EXPORT_SYMBOL_GPL(pmac_i2c_xfer); /* some quirks for platform function decoding */ enum { pmac_i2c_quirk_invmask = 0x00000001u, pmac_i2c_quirk_skip = 0x00000002u, }; static void pmac_i2c_devscan(void (*callback)(struct device_node *dev, int quirks)) { struct pmac_i2c_bus *bus; struct device_node *np; static struct whitelist_ent { char *name; char *compatible; int quirks; } whitelist[] = { /* XXX Study device-tree's & apple drivers are get the quirks * right ! */ /* Workaround: It seems that running the clockspreading * properties on the eMac will cause lockups during boot. * The machine seems to work fine without that. So for now, * let's make sure i2c-hwclock doesn't match about "imic" * clocks and we'll figure out if we really need to do * something special about those later. */ { "i2c-hwclock", "imic5002", pmac_i2c_quirk_skip }, { "i2c-hwclock", "imic5003", pmac_i2c_quirk_skip }, { "i2c-hwclock", NULL, pmac_i2c_quirk_invmask }, { "i2c-cpu-voltage", NULL, 0}, { "temp-monitor", NULL, 0 }, { "supply-monitor", NULL, 0 }, { NULL, NULL, 0 }, }; /* Only some devices need to have platform functions instantiated * here. For now, we have a table. Others, like 9554 i2c GPIOs used * on Xserve, if we ever do a driver for them, will use their own * platform function instance */ list_for_each_entry(bus, &pmac_i2c_busses, link) { for_each_child_of_node(bus->busnode, np) { struct whitelist_ent *p; /* If multibus, check if device is on that bus */ if (bus->flags & pmac_i2c_multibus) if (bus != pmac_i2c_find_bus(np)) continue; for (p = whitelist; p->name != NULL; p++) { if (!of_node_name_eq(np, p->name)) continue; if (p->compatible && !of_device_is_compatible(np, p->compatible)) continue; if (p->quirks & pmac_i2c_quirk_skip) break; callback(np, p->quirks); break; } } } } #define MAX_I2C_DATA 64 struct pmac_i2c_pf_inst { struct pmac_i2c_bus *bus; u8 addr; u8 buffer[MAX_I2C_DATA]; u8 scratch[MAX_I2C_DATA]; int bytes; int quirks; }; static void* pmac_i2c_do_begin(struct pmf_function *func, struct pmf_args *args) { struct pmac_i2c_pf_inst *inst; struct pmac_i2c_bus *bus; bus = pmac_i2c_find_bus(func->node); if (bus == NULL) { printk(KERN_ERR "low_i2c: Can't find bus for %pOF (pfunc)\n", func->node); return NULL; } if (pmac_i2c_open(bus, 0)) { printk(KERN_ERR "low_i2c: Can't open i2c bus for %pOF (pfunc)\n", func->node); return NULL; } /* XXX might need GFP_ATOMIC when called during the suspend process, * but then, there are already lots of issues with suspending when * near OOM that need to be resolved, the allocator itself should * probably make GFP_NOIO implicit during suspend */ inst = kzalloc(sizeof(struct pmac_i2c_pf_inst), GFP_KERNEL); if (inst == NULL) { pmac_i2c_close(bus); return NULL; } inst->bus = bus; inst->addr = pmac_i2c_get_dev_addr(func->node); inst->quirks = (int)(long)func->driver_data; return inst; } static void pmac_i2c_do_end(struct pmf_function *func, void *instdata) { struct pmac_i2c_pf_inst *inst = instdata; if (inst == NULL) return; pmac_i2c_close(inst->bus); kfree(inst); } static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len) { struct pmac_i2c_pf_inst *inst = instdata; inst->bytes = len; return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 0, 0, inst->buffer, len); } static int pmac_i2c_do_write(PMF_STD_ARGS, u32 len, const u8 *data) { struct pmac_i2c_pf_inst *inst = instdata; return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0, (u8 *)data, len); } /* This function is used to do the masking & OR'ing for the "rmw" type * callbacks. Ze should apply the mask and OR in the values in the * buffer before writing back. The problem is that it seems that * various darwin drivers implement the mask/or differently, thus * we need to check the quirks first */ static void pmac_i2c_do_apply_rmw(struct pmac_i2c_pf_inst *inst, u32 len, const u8 *mask, const u8 *val) { int i; if (inst->quirks & pmac_i2c_quirk_invmask) { for (i = 0; i < len; i ++) inst->scratch[i] = (inst->buffer[i] & mask[i]) | val[i]; } else { for (i = 0; i < len; i ++) inst->scratch[i] = (inst->buffer[i] & ~mask[i]) | (val[i] & mask[i]); } } static int pmac_i2c_do_rmw(PMF_STD_ARGS, u32 masklen, u32 valuelen, u32 totallen, const u8 *maskdata, const u8 *valuedata) { struct pmac_i2c_pf_inst *inst = instdata; if (masklen > inst->bytes || valuelen > inst->bytes || totallen > inst->bytes || valuelen > masklen) return -EINVAL; pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata); return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0, inst->scratch, totallen); } static int pmac_i2c_do_read_sub(PMF_STD_ARGS, u8 subaddr, u32 len) { struct pmac_i2c_pf_inst *inst = instdata; inst->bytes = len; return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 1, subaddr, inst->buffer, len); } static int pmac_i2c_do_write_sub(PMF_STD_ARGS, u8 subaddr, u32 len, const u8 *data) { struct pmac_i2c_pf_inst *inst = instdata; return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1, subaddr, (u8 *)data, len); } static int pmac_i2c_do_set_mode(PMF_STD_ARGS, int mode) { struct pmac_i2c_pf_inst *inst = instdata; return pmac_i2c_setmode(inst->bus, mode); } static int pmac_i2c_do_rmw_sub(PMF_STD_ARGS, u8 subaddr, u32 masklen, u32 valuelen, u32 totallen, const u8 *maskdata, const u8 *valuedata) { struct pmac_i2c_pf_inst *inst = instdata; if (masklen > inst->bytes || valuelen > inst->bytes || totallen > inst->bytes || valuelen > masklen) return -EINVAL; pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata); return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1, subaddr, inst->scratch, totallen); } static int pmac_i2c_do_mask_and_comp(PMF_STD_ARGS, u32 len, const u8 *maskdata, const u8 *valuedata) { struct pmac_i2c_pf_inst *inst = instdata; int i, match; /* Get return value pointer, it's assumed to be a u32 */ if (!args || !args->count || !args->u[0].p) return -EINVAL; /* Check buffer */ if (len > inst->bytes) return -EINVAL; for (i = 0, match = 1; match && i < len; i ++) if ((inst->buffer[i] & maskdata[i]) != valuedata[i]) match = 0; *args->u[0].p = match; return 0; } static int pmac_i2c_do_delay(PMF_STD_ARGS, u32 duration) { msleep((duration + 999) / 1000); return 0; } static struct pmf_handlers pmac_i2c_pfunc_handlers = { .begin = pmac_i2c_do_begin, .end = pmac_i2c_do_end, .read_i2c = pmac_i2c_do_read, .write_i2c = pmac_i2c_do_write, .rmw_i2c = pmac_i2c_do_rmw, .read_i2c_sub = pmac_i2c_do_read_sub, .write_i2c_sub = pmac_i2c_do_write_sub, .rmw_i2c_sub = pmac_i2c_do_rmw_sub, .set_i2c_mode = pmac_i2c_do_set_mode, .mask_and_compare = pmac_i2c_do_mask_and_comp, .delay = pmac_i2c_do_delay, }; static void __init pmac_i2c_dev_create(struct device_node *np, int quirks) { DBG("dev_create(%pOF)\n", np); pmf_register_driver(np, &pmac_i2c_pfunc_handlers, (void *)(long)quirks); } static void __init pmac_i2c_dev_init(struct device_node *np, int quirks) { DBG("dev_create(%pOF)\n", np); pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_INIT, NULL); } static void pmac_i2c_dev_suspend(struct device_node *np, int quirks) { DBG("dev_suspend(%pOF)\n", np); pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL); } static void pmac_i2c_dev_resume(struct device_node *np, int quirks) { DBG("dev_resume(%pOF)\n", np); pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_WAKE, NULL); } void pmac_pfunc_i2c_suspend(void) { pmac_i2c_devscan(pmac_i2c_dev_suspend); } void pmac_pfunc_i2c_resume(void) { pmac_i2c_devscan(pmac_i2c_dev_resume); } /* * Initialize us: probe all i2c busses on the machine, instantiate * busses and platform functions as needed. */ /* This is non-static as it might be called early by smp code */ int __init pmac_i2c_init(void) { static int i2c_inited; if (i2c_inited) return 0; i2c_inited = 1; /* Probe keywest-i2c busses */ kw_i2c_probe(); #ifdef CONFIG_ADB_PMU /* Probe PMU i2c busses */ pmu_i2c_probe(); #endif #ifdef CONFIG_PMAC_SMU /* Probe SMU i2c busses */ smu_i2c_probe(); #endif /* Now add platform functions for some known devices */ pmac_i2c_devscan(pmac_i2c_dev_create); return 0; } machine_arch_initcall(powermac, pmac_i2c_init); /* Since pmac_i2c_init can be called too early for the platform device * registration, we need to do it at a later time. In our case, subsys * happens to fit well, though I agree it's a bit of a hack... */ static int __init pmac_i2c_create_platform_devices(void) { struct pmac_i2c_bus *bus; int i = 0; /* In the case where we are initialized from smp_init(), we must * not use the timer (and thus the irq). It's safe from now on * though */ pmac_i2c_force_poll = 0; /* Create platform devices */ list_for_each_entry(bus, &pmac_i2c_busses, link) { bus->platform_dev = platform_device_alloc("i2c-powermac", i++); if (bus->platform_dev == NULL) return -ENOMEM; bus->platform_dev->dev.platform_data = bus; bus->platform_dev->dev.of_node = bus->busnode; platform_device_add(bus->platform_dev); } /* Now call platform "init" functions */ pmac_i2c_devscan(pmac_i2c_dev_init); return 0; } machine_subsys_initcall(powermac, pmac_i2c_create_platform_devices);
linux-master
arch/powerpc/platforms/powermac/low_i2c.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Early boot support code for BootX bootloader * * Copyright (C) 2005 Ben. Herrenschmidt ([email protected]) */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> #include <linux/of_fdt.h> #include <generated/utsrelease.h> #include <asm/sections.h> #include <asm/prom.h> #include <asm/page.h> #include <asm/bootx.h> #include <asm/btext.h> #include <asm/io.h> #include <asm/setup.h> #undef DEBUG #define SET_BOOT_BAT #ifdef DEBUG #define DBG(fmt...) do { bootx_printf(fmt); } while(0) #else #define DBG(fmt...) do { } while(0) #endif extern void __start(unsigned long r3, unsigned long r4, unsigned long r5); static unsigned long __initdata bootx_dt_strbase; static unsigned long __initdata bootx_dt_strend; static unsigned long __initdata bootx_node_chosen; static boot_infos_t * __initdata bootx_info; static char __initdata bootx_disp_path[256]; /* Is boot-info compatible ? */ #define BOOT_INFO_IS_COMPATIBLE(bi) \ ((bi)->compatible_version <= BOOT_INFO_VERSION) #define BOOT_INFO_IS_V2_COMPATIBLE(bi) ((bi)->version >= 2) #define BOOT_INFO_IS_V4_COMPATIBLE(bi) ((bi)->version >= 4) #ifdef CONFIG_BOOTX_TEXT static void __init bootx_printf(const char *format, ...) { const char *p, *q, *s; va_list args; unsigned long v; va_start(args, format); for (p = format; *p != 0; p = q) { for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) ; if (q > p) btext_drawtext(p, q - p); if (*q == 0) break; if (*q == '\n') { ++q; btext_flushline(); btext_drawstring("\r\n"); btext_flushline(); continue; } ++q; if (*q == 0) break; switch (*q) { case 's': ++q; s = va_arg(args, const char *); if (s == NULL) s = "<NULL>"; btext_drawstring(s); break; case 'x': ++q; v = va_arg(args, unsigned long); btext_drawhex(v); break; } } va_end(args); } #else /* CONFIG_BOOTX_TEXT */ static void __init bootx_printf(const char *format, ...) {} #endif /* CONFIG_BOOTX_TEXT */ static void * __init bootx_early_getprop(unsigned long base, unsigned long node, char *prop) { struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node); u32 *ppp = &np->properties; while(*ppp) { struct bootx_dt_prop *pp = (struct bootx_dt_prop *)(base + *ppp); if (strcmp((char *)((unsigned long)pp->name + base), prop) == 0) { return (void *)((unsigned long)pp->value + base); } ppp = &pp->next; } return NULL; } #define dt_push_token(token, mem) \ do { \ *(mem) = ALIGN(*(mem),4); \ *((u32 *)*(mem)) = token; \ *(mem) += 4; \ } while(0) static unsigned long __init bootx_dt_find_string(char *str) { char *s, *os; s = os = (char *)bootx_dt_strbase; s += 4; while (s < (char *)bootx_dt_strend) { if (strcmp(s, str) == 0) return s - os; s += strlen(s) + 1; } return 0; } static void __init bootx_dt_add_prop(char *name, void *data, int size, unsigned long *mem_end) { unsigned long soff = bootx_dt_find_string(name); if (data == NULL) size = 0; if (soff == 0) { bootx_printf("WARNING: Can't find string index for <%s>\n", name); return; } if (size > 0x20000) { bootx_printf("WARNING: ignoring large property "); bootx_printf("%s length 0x%x\n", name, size); return; } dt_push_token(OF_DT_PROP, mem_end); dt_push_token(size, mem_end); dt_push_token(soff, mem_end); /* push property content */ if (size && data) { memcpy((void *)*mem_end, data, size); *mem_end = ALIGN(*mem_end + size, 4); } } static void __init bootx_add_chosen_props(unsigned long base, unsigned long *mem_end) { u32 val; bootx_dt_add_prop("linux,bootx", NULL, 0, mem_end); if (bootx_info->kernelParamsOffset) { char *args = (char *)((unsigned long)bootx_info) + bootx_info->kernelParamsOffset; bootx_dt_add_prop("bootargs", args, strlen(args) + 1, mem_end); } if (bootx_info->ramDisk) { val = ((unsigned long)bootx_info) + bootx_info->ramDisk; bootx_dt_add_prop("linux,initrd-start", &val, 4, mem_end); val += bootx_info->ramDiskSize; bootx_dt_add_prop("linux,initrd-end", &val, 4, mem_end); } if (strlen(bootx_disp_path)) bootx_dt_add_prop("linux,stdout-path", bootx_disp_path, strlen(bootx_disp_path) + 1, mem_end); } static void __init bootx_add_display_props(unsigned long base, unsigned long *mem_end, int has_real_node) { boot_infos_t *bi = bootx_info; u32 tmp; if (has_real_node) { bootx_dt_add_prop("linux,boot-display", NULL, 0, mem_end); bootx_dt_add_prop("linux,opened", NULL, 0, mem_end); } else bootx_dt_add_prop("linux,bootx-noscreen", NULL, 0, mem_end); tmp = bi->dispDeviceDepth; bootx_dt_add_prop("linux,bootx-depth", &tmp, 4, mem_end); tmp = bi->dispDeviceRect[2] - bi->dispDeviceRect[0]; bootx_dt_add_prop("linux,bootx-width", &tmp, 4, mem_end); tmp = bi->dispDeviceRect[3] - bi->dispDeviceRect[1]; bootx_dt_add_prop("linux,bootx-height", &tmp, 4, mem_end); tmp = bi->dispDeviceRowBytes; bootx_dt_add_prop("linux,bootx-linebytes", &tmp, 4, mem_end); tmp = (u32)bi->dispDeviceBase; if (tmp == 0) tmp = (u32)bi->logicalDisplayBase; tmp += bi->dispDeviceRect[1] * bi->dispDeviceRowBytes; tmp += bi->dispDeviceRect[0] * ((bi->dispDeviceDepth + 7) / 8); bootx_dt_add_prop("linux,bootx-addr", &tmp, 4, mem_end); } static void __init bootx_dt_add_string(char *s, unsigned long *mem_end) { unsigned int l = strlen(s) + 1; memcpy((void *)*mem_end, s, l); bootx_dt_strend = *mem_end = *mem_end + l; } static void __init bootx_scan_dt_build_strings(unsigned long base, unsigned long node, unsigned long *mem_end) { struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node); u32 *cpp, *ppp = &np->properties; unsigned long soff; char *namep; /* Keep refs to known nodes */ namep = np->full_name ? (char *)(base + np->full_name) : NULL; if (namep == NULL) { bootx_printf("Node without a full name !\n"); namep = ""; } DBG("* strings: %s\n", namep); if (!strcmp(namep, "/chosen")) { DBG(" detected /chosen ! adding properties names !\n"); bootx_dt_add_string("linux,bootx", mem_end); bootx_dt_add_string("linux,stdout-path", mem_end); bootx_dt_add_string("linux,initrd-start", mem_end); bootx_dt_add_string("linux,initrd-end", mem_end); bootx_dt_add_string("bootargs", mem_end); bootx_node_chosen = node; } if (node == bootx_info->dispDeviceRegEntryOffset) { DBG(" detected display ! adding properties names !\n"); bootx_dt_add_string("linux,boot-display", mem_end); bootx_dt_add_string("linux,opened", mem_end); strscpy(bootx_disp_path, namep, sizeof(bootx_disp_path)); } /* get and store all property names */ while (*ppp) { struct bootx_dt_prop *pp = (struct bootx_dt_prop *)(base + *ppp); namep = pp->name ? (char *)(base + pp->name) : NULL; if (namep == NULL || strcmp(namep, "name") == 0) goto next; /* get/create string entry */ soff = bootx_dt_find_string(namep); if (soff == 0) bootx_dt_add_string(namep, mem_end); next: ppp = &pp->next; } /* do all our children */ cpp = &np->child; while(*cpp) { np = (struct bootx_dt_node *)(base + *cpp); bootx_scan_dt_build_strings(base, *cpp, mem_end); cpp = &np->sibling; } } static void __init bootx_scan_dt_build_struct(unsigned long base, unsigned long node, unsigned long *mem_end) { struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node); u32 *cpp, *ppp = &np->properties; char *namep, *p, *ep, *lp; int l; dt_push_token(OF_DT_BEGIN_NODE, mem_end); /* get the node's full name */ namep = np->full_name ? (char *)(base + np->full_name) : NULL; if (namep == NULL) namep = ""; l = strlen(namep); DBG("* struct: %s\n", namep); /* Fixup an Apple bug where they have bogus \0 chars in the * middle of the path in some properties, and extract * the unit name (everything after the last '/'). */ memcpy((void *)*mem_end, namep, l + 1); namep = (char *)*mem_end; for (lp = p = namep, ep = namep + l; p < ep; p++) { if (*p == '/') lp = namep; else if (*p != 0) *lp++ = *p; } *lp = 0; *mem_end = ALIGN((unsigned long)lp + 1, 4); /* get and store all properties */ while (*ppp) { struct bootx_dt_prop *pp = (struct bootx_dt_prop *)(base + *ppp); namep = pp->name ? (char *)(base + pp->name) : NULL; /* Skip "name" */ if (namep == NULL || !strcmp(namep, "name")) goto next; /* Skip "bootargs" in /chosen too as we replace it */ if (node == bootx_node_chosen && !strcmp(namep, "bootargs")) goto next; /* push property head */ bootx_dt_add_prop(namep, pp->value ? (void *)(base + pp->value): NULL, pp->length, mem_end); next: ppp = &pp->next; } if (node == bootx_node_chosen) { bootx_add_chosen_props(base, mem_end); if (bootx_info->dispDeviceRegEntryOffset == 0) bootx_add_display_props(base, mem_end, 0); } else if (node == bootx_info->dispDeviceRegEntryOffset) bootx_add_display_props(base, mem_end, 1); /* do all our children */ cpp = &np->child; while(*cpp) { np = (struct bootx_dt_node *)(base + *cpp); bootx_scan_dt_build_struct(base, *cpp, mem_end); cpp = &np->sibling; } dt_push_token(OF_DT_END_NODE, mem_end); } static unsigned long __init bootx_flatten_dt(unsigned long start) { boot_infos_t *bi = bootx_info; unsigned long mem_start, mem_end; struct boot_param_header *hdr; unsigned long base; u64 *rsvmap; /* Start using memory after the big blob passed by BootX, get * some space for the header */ mem_start = mem_end = ALIGN(((unsigned long)bi) + start, 4); DBG("Boot params header at: %x\n", mem_start); hdr = (struct boot_param_header *)mem_start; mem_end += sizeof(struct boot_param_header); rsvmap = (u64 *)(ALIGN(mem_end, 8)); hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - mem_start; mem_end = ((unsigned long)rsvmap) + 8 * sizeof(u64); /* Get base of tree */ base = ((unsigned long)bi) + bi->deviceTreeOffset; /* Build string array */ DBG("Building string array at: %x\n", mem_end); DBG("Device Tree Base=%x\n", base); bootx_dt_strbase = mem_end; mem_end += 4; bootx_dt_strend = mem_end; bootx_scan_dt_build_strings(base, 4, &mem_end); /* Add some strings */ bootx_dt_add_string("linux,bootx-noscreen", &mem_end); bootx_dt_add_string("linux,bootx-depth", &mem_end); bootx_dt_add_string("linux,bootx-width", &mem_end); bootx_dt_add_string("linux,bootx-height", &mem_end); bootx_dt_add_string("linux,bootx-linebytes", &mem_end); bootx_dt_add_string("linux,bootx-addr", &mem_end); /* Wrap up strings */ hdr->off_dt_strings = bootx_dt_strbase - mem_start; hdr->dt_strings_size = bootx_dt_strend - bootx_dt_strbase; /* Build structure */ mem_end = ALIGN(mem_end, 16); DBG("Building device tree structure at: %x\n", mem_end); hdr->off_dt_struct = mem_end - mem_start; bootx_scan_dt_build_struct(base, 4, &mem_end); dt_push_token(OF_DT_END, &mem_end); /* Finish header */ hdr->boot_cpuid_phys = 0; hdr->magic = OF_DT_HEADER; hdr->totalsize = mem_end - mem_start; hdr->version = OF_DT_VERSION; /* Version 16 is not backward compatible */ hdr->last_comp_version = 0x10; /* Reserve the whole thing and copy the reserve map in, we * also bump mem_reserve_cnt to cause further reservations to * fail since it's too late. */ mem_end = ALIGN(mem_end, PAGE_SIZE); DBG("End of boot params: %x\n", mem_end); rsvmap[0] = mem_start; rsvmap[1] = mem_end; if (bootx_info->ramDisk) { rsvmap[2] = ((unsigned long)bootx_info) + bootx_info->ramDisk; rsvmap[3] = rsvmap[2] + bootx_info->ramDiskSize; rsvmap[4] = 0; rsvmap[5] = 0; } else { rsvmap[2] = 0; rsvmap[3] = 0; } return (unsigned long)hdr; } #ifdef CONFIG_BOOTX_TEXT static void __init btext_welcome(boot_infos_t *bi) { unsigned long flags; unsigned long pvr; bootx_printf("Welcome to Linux, kernel " UTS_RELEASE "\n"); bootx_printf("\nlinked at : 0x%x", KERNELBASE); bootx_printf("\nframe buffer at : 0x%x", bi->dispDeviceBase); bootx_printf(" (phys), 0x%x", bi->logicalDisplayBase); bootx_printf(" (log)"); bootx_printf("\nklimit : 0x%x",(unsigned long)_end); bootx_printf("\nboot_info at : 0x%x", bi); __asm__ __volatile__ ("mfmsr %0" : "=r" (flags)); bootx_printf("\nMSR : 0x%x", flags); __asm__ __volatile__ ("mfspr %0, 287" : "=r" (pvr)); bootx_printf("\nPVR : 0x%x", pvr); pvr >>= 16; if (pvr > 1) { __asm__ __volatile__ ("mfspr %0, 1008" : "=r" (flags)); bootx_printf("\nHID0 : 0x%x", flags); } if (pvr == 8 || pvr == 12 || pvr == 0x800c) { __asm__ __volatile__ ("mfspr %0, 1019" : "=r" (flags)); bootx_printf("\nICTC : 0x%x", flags); } #ifdef DEBUG bootx_printf("\n\n"); bootx_printf("bi->deviceTreeOffset : 0x%x\n", bi->deviceTreeOffset); bootx_printf("bi->deviceTreeSize : 0x%x\n", bi->deviceTreeSize); #endif bootx_printf("\n\n"); } #endif /* CONFIG_BOOTX_TEXT */ void __init bootx_init(unsigned long r3, unsigned long r4) { boot_infos_t *bi = (boot_infos_t *) r4; unsigned long hdr; unsigned long space; unsigned long ptr; char *model; unsigned long offset = reloc_offset(); reloc_got2(offset); bootx_info = bi; /* We haven't cleared any bss at this point, make sure * what we need is initialized */ bootx_dt_strbase = bootx_dt_strend = 0; bootx_node_chosen = 0; bootx_disp_path[0] = 0; if (!BOOT_INFO_IS_V2_COMPATIBLE(bi)) bi->logicalDisplayBase = bi->dispDeviceBase; /* Fixup depth 16 -> 15 as that's what MacOS calls 16bpp */ if (bi->dispDeviceDepth == 16) bi->dispDeviceDepth = 15; #ifdef CONFIG_BOOTX_TEXT ptr = (unsigned long)bi->logicalDisplayBase; ptr += bi->dispDeviceRect[1] * bi->dispDeviceRowBytes; ptr += bi->dispDeviceRect[0] * ((bi->dispDeviceDepth + 7) / 8); btext_setup_display(bi->dispDeviceRect[2] - bi->dispDeviceRect[0], bi->dispDeviceRect[3] - bi->dispDeviceRect[1], bi->dispDeviceDepth, bi->dispDeviceRowBytes, (unsigned long)bi->logicalDisplayBase); btext_clearscreen(); btext_flushscreen(); #endif /* CONFIG_BOOTX_TEXT */ /* * Test if boot-info is compatible. Done only in config * CONFIG_BOOTX_TEXT since there is nothing much we can do * with an incompatible version, except display a message * and eventually hang the processor... * * I'll try to keep enough of boot-info compatible in the * future to always allow display of this message; */ if (!BOOT_INFO_IS_COMPATIBLE(bi)) { bootx_printf(" !!! WARNING - Incompatible version" " of BootX !!!\n\n\n"); for (;;) ; } if (bi->architecture != BOOT_ARCH_PCI) { bootx_printf(" !!! WARNING - Unsupported machine" " architecture !\n"); for (;;) ; } #ifdef CONFIG_BOOTX_TEXT btext_welcome(bi); #endif /* New BootX enters kernel with MMU off, i/os are not allowed * here. This hack will have been done by the boostrap anyway. */ if (bi->version < 4) { /* * XXX If this is an iMac, turn off the USB controller. */ model = (char *) bootx_early_getprop(r4 + bi->deviceTreeOffset, 4, "model"); if (model && (strcmp(model, "iMac,1") == 0 || strcmp(model, "PowerMac1,1") == 0)) { bootx_printf("iMac,1 detected, shutting down USB\n"); out_le32((unsigned __iomem *)0x80880008, 1); /* XXX */ } } /* Get a pointer that points above the device tree, args, ramdisk, * etc... to use for generating the flattened tree */ if (bi->version < 5) { space = bi->deviceTreeOffset + bi->deviceTreeSize; if (bi->ramDisk >= space) space = bi->ramDisk + bi->ramDiskSize; } else space = bi->totalParamsSize; bootx_printf("Total space used by parameters & ramdisk: 0x%x\n", space); /* New BootX will have flushed all TLBs and enters kernel with * MMU switched OFF, so this should not be useful anymore. */ if (bi->version < 4) { unsigned long x __maybe_unused; bootx_printf("Touching pages...\n"); /* * Touch each page to make sure the PTEs for them * are in the hash table - the aim is to try to avoid * getting DSI exceptions while copying the kernel image. */ for (ptr = ((unsigned long) &_stext) & PAGE_MASK; ptr < (unsigned long)bi + space; ptr += PAGE_SIZE) x = *(volatile unsigned long *)ptr; } /* Ok, now we need to generate a flattened device-tree to pass * to the kernel */ bootx_printf("Preparing boot params...\n"); hdr = bootx_flatten_dt(space); #ifdef CONFIG_BOOTX_TEXT #ifdef SET_BOOT_BAT bootx_printf("Preparing BAT...\n"); btext_prepare_BAT(); #else btext_unmap(); #endif #endif reloc_got2(-offset); __start(hdr, KERNELBASE + offset, 0); }
linux-master
arch/powerpc/platforms/powermac/bootx_init.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 1996-2001 Paul Mackerras ([email protected]) * Ben. Herrenschmidt ([email protected]) * * TODO: * * - Replace mdelay with some schedule loop if possible * - Shorten some obfuscated delays on some routines (like modem * power) * - Refcount some clocks (see darwin) * - Split split split... */ #include <linux/types.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/spinlock.h> #include <linux/adb.h> #include <linux/pmu.h> #include <linux/ioport.h> #include <linux/export.h> #include <linux/pci.h> #include <asm/sections.h> #include <asm/errno.h> #include <asm/ohare.h> #include <asm/heathrow.h> #include <asm/keylargo.h> #include <asm/uninorth.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/dbdma.h> #include <asm/pci-bridge.h> #include <asm/pmac_low_i2c.h> #include "pmac.h" #undef DEBUG_FEATURE #ifdef DEBUG_FEATURE #define DBG(fmt...) printk(KERN_DEBUG fmt) #else #define DBG(fmt...) #endif #ifdef CONFIG_PPC_BOOK3S_32 extern int powersave_lowspeed; #endif extern int powersave_nap; extern struct device_node *k2_skiplist[2]; /* * We use a single global lock to protect accesses. Each driver has * to take care of its own locking */ DEFINE_RAW_SPINLOCK(feature_lock); #define LOCK(flags) raw_spin_lock_irqsave(&feature_lock, flags); #define UNLOCK(flags) raw_spin_unlock_irqrestore(&feature_lock, flags); /* * Instance of some macio stuffs */ struct macio_chip macio_chips[MAX_MACIO_CHIPS]; struct macio_chip *macio_find(struct device_node *child, int type) { while(child) { int i; for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++) if (child == macio_chips[i].of_node && (!type || macio_chips[i].type == type)) return &macio_chips[i]; child = child->parent; } return NULL; } EXPORT_SYMBOL_GPL(macio_find); static const char *macio_names[] = { "Unknown", "Grand Central", "OHare", "OHareII", "Heathrow", "Gatwick", "Paddington", "Keylargo", "Pangea", "Intrepid", "K2", "Shasta", }; struct device_node *uninorth_node; u32 __iomem *uninorth_base; static u32 uninorth_rev; static int uninorth_maj; static void __iomem *u3_ht_base; /* * For each motherboard family, we have a table of functions pointers * that handle the various features. */ typedef long (*feature_call)(struct device_node *node, long param, long value); struct feature_table_entry { unsigned int selector; feature_call function; }; struct pmac_mb_def { const char* model_string; const char* model_name; int model_id; struct feature_table_entry* features; unsigned long board_flags; }; static struct pmac_mb_def pmac_mb; /* * Here are the chip specific feature functions */ #ifndef CONFIG_PPC64 static int simple_feature_tweak(struct device_node *node, int type, int reg, u32 mask, int value) { struct macio_chip* macio; unsigned long flags; macio = macio_find(node, type); if (!macio) return -ENODEV; LOCK(flags); if (value) MACIO_BIS(reg, mask); else MACIO_BIC(reg, mask); (void)MACIO_IN32(reg); UNLOCK(flags); return 0; } static long ohare_htw_scc_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long chan_mask; unsigned long fcr; unsigned long flags; int htw, trans; unsigned long rmask; macio = macio_find(node, 0); if (!macio) return -ENODEV; if (of_node_name_eq(node, "ch-a")) chan_mask = MACIO_FLAG_SCCA_ON; else if (of_node_name_eq(node, "ch-b")) chan_mask = MACIO_FLAG_SCCB_ON; else return -ENODEV; htw = (macio->type == macio_heathrow || macio->type == macio_paddington || macio->type == macio_gatwick); /* On these machines, the HRW_SCC_TRANS_EN_N bit mustn't be touched */ trans = (pmac_mb.model_id != PMAC_TYPE_YOSEMITE && pmac_mb.model_id != PMAC_TYPE_YIKES); if (value) { #ifdef CONFIG_ADB_PMU if ((param & 0xfff) == PMAC_SCC_IRDA) pmu_enable_irled(1); #endif /* CONFIG_ADB_PMU */ LOCK(flags); fcr = MACIO_IN32(OHARE_FCR); /* Check if scc cell need enabling */ if (!(fcr & OH_SCC_ENABLE)) { fcr |= OH_SCC_ENABLE; if (htw) { /* Side effect: this will also power up the * modem, but it's too messy to figure out on which * ports this controls the transceiver and on which * it controls the modem */ if (trans) fcr &= ~HRW_SCC_TRANS_EN_N; MACIO_OUT32(OHARE_FCR, fcr); fcr |= (rmask = HRW_RESET_SCC); MACIO_OUT32(OHARE_FCR, fcr); } else { fcr |= (rmask = OH_SCC_RESET); MACIO_OUT32(OHARE_FCR, fcr); } UNLOCK(flags); (void)MACIO_IN32(OHARE_FCR); mdelay(15); LOCK(flags); fcr &= ~rmask; MACIO_OUT32(OHARE_FCR, fcr); } if (chan_mask & MACIO_FLAG_SCCA_ON) fcr |= OH_SCCA_IO; if (chan_mask & MACIO_FLAG_SCCB_ON) fcr |= OH_SCCB_IO; MACIO_OUT32(OHARE_FCR, fcr); macio->flags |= chan_mask; UNLOCK(flags); if (param & PMAC_SCC_FLAG_XMON) macio->flags |= MACIO_FLAG_SCC_LOCKED; } else { if (macio->flags & MACIO_FLAG_SCC_LOCKED) return -EPERM; LOCK(flags); fcr = MACIO_IN32(OHARE_FCR); if (chan_mask & MACIO_FLAG_SCCA_ON) fcr &= ~OH_SCCA_IO; if (chan_mask & MACIO_FLAG_SCCB_ON) fcr &= ~OH_SCCB_IO; MACIO_OUT32(OHARE_FCR, fcr); if ((fcr & (OH_SCCA_IO | OH_SCCB_IO)) == 0) { fcr &= ~OH_SCC_ENABLE; if (htw && trans) fcr |= HRW_SCC_TRANS_EN_N; MACIO_OUT32(OHARE_FCR, fcr); } macio->flags &= ~(chan_mask); UNLOCK(flags); mdelay(10); #ifdef CONFIG_ADB_PMU if ((param & 0xfff) == PMAC_SCC_IRDA) pmu_enable_irled(0); #endif /* CONFIG_ADB_PMU */ } return 0; } static long ohare_floppy_enable(struct device_node *node, long param, long value) { return simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_FLOPPY_ENABLE, value); } static long ohare_mesh_enable(struct device_node *node, long param, long value) { return simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_MESH_ENABLE, value); } static long ohare_ide_enable(struct device_node *node, long param, long value) { switch(param) { case 0: /* For some reason, setting the bit in set_initial_features() * doesn't stick. I'm still investigating... --BenH. */ if (value) simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_IOBUS_ENABLE, 1); return simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_IDE0_ENABLE, value); case 1: return simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_BAY_IDE_ENABLE, value); default: return -ENODEV; } } static long ohare_ide_reset(struct device_node *node, long param, long value) { switch(param) { case 0: return simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_IDE0_RESET_N, !value); case 1: return simple_feature_tweak(node, macio_ohare, OHARE_FCR, OH_IDE1_RESET_N, !value); default: return -ENODEV; } } static long ohare_sleep_state(struct device_node *node, long param, long value) { struct macio_chip* macio = &macio_chips[0]; if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) return -EPERM; if (value == 1) { MACIO_BIC(OHARE_FCR, OH_IOBUS_ENABLE); } else if (value == 0) { MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE); } return 0; } static long heathrow_modem_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; u8 gpio; unsigned long flags; macio = macio_find(node, macio_unknown); if (!macio) return -ENODEV; gpio = MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1; if (!value) { LOCK(flags); MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio); UNLOCK(flags); (void)MACIO_IN8(HRW_GPIO_MODEM_RESET); mdelay(250); } if (pmac_mb.model_id != PMAC_TYPE_YOSEMITE && pmac_mb.model_id != PMAC_TYPE_YIKES) { LOCK(flags); if (value) MACIO_BIC(HEATHROW_FCR, HRW_SCC_TRANS_EN_N); else MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N); UNLOCK(flags); (void)MACIO_IN32(HEATHROW_FCR); mdelay(250); } if (value) { LOCK(flags); MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1); (void)MACIO_IN8(HRW_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); LOCK(flags); MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio); (void)MACIO_IN8(HRW_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); LOCK(flags); MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1); (void)MACIO_IN8(HRW_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); } return 0; } static long heathrow_floppy_enable(struct device_node *node, long param, long value) { return simple_feature_tweak(node, macio_unknown, HEATHROW_FCR, HRW_SWIM_ENABLE|HRW_BAY_FLOPPY_ENABLE, value); } static long heathrow_mesh_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long flags; macio = macio_find(node, macio_unknown); if (!macio) return -ENODEV; LOCK(flags); /* Set clear mesh cell enable */ if (value) MACIO_BIS(HEATHROW_FCR, HRW_MESH_ENABLE); else MACIO_BIC(HEATHROW_FCR, HRW_MESH_ENABLE); (void)MACIO_IN32(HEATHROW_FCR); udelay(10); /* Set/Clear termination power */ if (value) MACIO_BIC(HEATHROW_MBCR, 0x04000000); else MACIO_BIS(HEATHROW_MBCR, 0x04000000); (void)MACIO_IN32(HEATHROW_MBCR); udelay(10); UNLOCK(flags); return 0; } static long heathrow_ide_enable(struct device_node *node, long param, long value) { switch(param) { case 0: return simple_feature_tweak(node, macio_unknown, HEATHROW_FCR, HRW_IDE0_ENABLE, value); case 1: return simple_feature_tweak(node, macio_unknown, HEATHROW_FCR, HRW_BAY_IDE_ENABLE, value); default: return -ENODEV; } } static long heathrow_ide_reset(struct device_node *node, long param, long value) { switch(param) { case 0: return simple_feature_tweak(node, macio_unknown, HEATHROW_FCR, HRW_IDE0_RESET_N, !value); case 1: return simple_feature_tweak(node, macio_unknown, HEATHROW_FCR, HRW_IDE1_RESET_N, !value); default: return -ENODEV; } } static long heathrow_bmac_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long flags; macio = macio_find(node, 0); if (!macio) return -ENODEV; if (value) { LOCK(flags); MACIO_BIS(HEATHROW_FCR, HRW_BMAC_IO_ENABLE); MACIO_BIS(HEATHROW_FCR, HRW_BMAC_RESET); UNLOCK(flags); (void)MACIO_IN32(HEATHROW_FCR); mdelay(10); LOCK(flags); MACIO_BIC(HEATHROW_FCR, HRW_BMAC_RESET); UNLOCK(flags); (void)MACIO_IN32(HEATHROW_FCR); mdelay(10); } else { LOCK(flags); MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE); UNLOCK(flags); } return 0; } static long heathrow_sound_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long flags; /* B&W G3 and Yikes don't support that properly (the * sound appear to never come back after being shut down). */ if (pmac_mb.model_id == PMAC_TYPE_YOSEMITE || pmac_mb.model_id == PMAC_TYPE_YIKES) return 0; macio = macio_find(node, 0); if (!macio) return -ENODEV; if (value) { LOCK(flags); MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE); MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N); UNLOCK(flags); (void)MACIO_IN32(HEATHROW_FCR); } else { LOCK(flags); MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N); MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE); UNLOCK(flags); } return 0; } static u32 save_fcr[6]; static u32 save_mbcr; static struct dbdma_regs save_dbdma[13]; static struct dbdma_regs save_alt_dbdma[13]; static void dbdma_save(struct macio_chip *macio, struct dbdma_regs *save) { int i; /* Save state & config of DBDMA channels */ for (i = 0; i < 13; i++) { volatile struct dbdma_regs __iomem * chan = (void __iomem *) (macio->base + ((0x8000+i*0x100)>>2)); save[i].cmdptr_hi = in_le32(&chan->cmdptr_hi); save[i].cmdptr = in_le32(&chan->cmdptr); save[i].intr_sel = in_le32(&chan->intr_sel); save[i].br_sel = in_le32(&chan->br_sel); save[i].wait_sel = in_le32(&chan->wait_sel); } } static void dbdma_restore(struct macio_chip *macio, struct dbdma_regs *save) { int i; /* Save state & config of DBDMA channels */ for (i = 0; i < 13; i++) { volatile struct dbdma_regs __iomem * chan = (void __iomem *) (macio->base + ((0x8000+i*0x100)>>2)); out_le32(&chan->control, (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16); while (in_le32(&chan->status) & ACTIVE) mb(); out_le32(&chan->cmdptr_hi, save[i].cmdptr_hi); out_le32(&chan->cmdptr, save[i].cmdptr); out_le32(&chan->intr_sel, save[i].intr_sel); out_le32(&chan->br_sel, save[i].br_sel); out_le32(&chan->wait_sel, save[i].wait_sel); } } static void heathrow_sleep(struct macio_chip *macio, int secondary) { if (secondary) { dbdma_save(macio, save_alt_dbdma); save_fcr[2] = MACIO_IN32(0x38); save_fcr[3] = MACIO_IN32(0x3c); } else { dbdma_save(macio, save_dbdma); save_fcr[0] = MACIO_IN32(0x38); save_fcr[1] = MACIO_IN32(0x3c); save_mbcr = MACIO_IN32(0x34); /* Make sure sound is shut down */ MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N); MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE); /* This seems to be necessary as well or the fan * keeps coming up and battery drains fast */ MACIO_BIC(HEATHROW_FCR, HRW_IOBUS_ENABLE); MACIO_BIC(HEATHROW_FCR, HRW_IDE0_RESET_N); /* Make sure eth is down even if module or sleep * won't work properly */ MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE | HRW_BMAC_RESET); } /* Make sure modem is shut down */ MACIO_OUT8(HRW_GPIO_MODEM_RESET, MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1); MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N); MACIO_BIC(HEATHROW_FCR, OH_SCCA_IO|OH_SCCB_IO|HRW_SCC_ENABLE); /* Let things settle */ (void)MACIO_IN32(HEATHROW_FCR); } static void heathrow_wakeup(struct macio_chip *macio, int secondary) { if (secondary) { MACIO_OUT32(0x38, save_fcr[2]); (void)MACIO_IN32(0x38); mdelay(1); MACIO_OUT32(0x3c, save_fcr[3]); (void)MACIO_IN32(0x38); mdelay(10); dbdma_restore(macio, save_alt_dbdma); } else { MACIO_OUT32(0x38, save_fcr[0] | HRW_IOBUS_ENABLE); (void)MACIO_IN32(0x38); mdelay(1); MACIO_OUT32(0x3c, save_fcr[1]); (void)MACIO_IN32(0x38); mdelay(1); MACIO_OUT32(0x34, save_mbcr); (void)MACIO_IN32(0x38); mdelay(10); dbdma_restore(macio, save_dbdma); } } static long heathrow_sleep_state(struct device_node *node, long param, long value) { if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) return -EPERM; if (value == 1) { if (macio_chips[1].type == macio_gatwick) heathrow_sleep(&macio_chips[0], 1); heathrow_sleep(&macio_chips[0], 0); } else if (value == 0) { heathrow_wakeup(&macio_chips[0], 0); if (macio_chips[1].type == macio_gatwick) heathrow_wakeup(&macio_chips[0], 1); } return 0; } static long core99_scc_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long flags; unsigned long chan_mask; u32 fcr; macio = macio_find(node, 0); if (!macio) return -ENODEV; if (of_node_name_eq(node, "ch-a")) chan_mask = MACIO_FLAG_SCCA_ON; else if (of_node_name_eq(node, "ch-b")) chan_mask = MACIO_FLAG_SCCB_ON; else return -ENODEV; if (value) { int need_reset_scc = 0; int need_reset_irda = 0; LOCK(flags); fcr = MACIO_IN32(KEYLARGO_FCR0); /* Check if scc cell need enabling */ if (!(fcr & KL0_SCC_CELL_ENABLE)) { fcr |= KL0_SCC_CELL_ENABLE; need_reset_scc = 1; } if (chan_mask & MACIO_FLAG_SCCA_ON) { fcr |= KL0_SCCA_ENABLE; /* Don't enable line drivers for I2S modem */ if ((param & 0xfff) == PMAC_SCC_I2S1) fcr &= ~KL0_SCC_A_INTF_ENABLE; else fcr |= KL0_SCC_A_INTF_ENABLE; } if (chan_mask & MACIO_FLAG_SCCB_ON) { fcr |= KL0_SCCB_ENABLE; /* Perform irda specific inits */ if ((param & 0xfff) == PMAC_SCC_IRDA) { fcr &= ~KL0_SCC_B_INTF_ENABLE; fcr |= KL0_IRDA_ENABLE; fcr |= KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE; fcr |= KL0_IRDA_SOURCE1_SEL; fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0); fcr &= ~(KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND); need_reset_irda = 1; } else fcr |= KL0_SCC_B_INTF_ENABLE; } MACIO_OUT32(KEYLARGO_FCR0, fcr); macio->flags |= chan_mask; if (need_reset_scc) { MACIO_BIS(KEYLARGO_FCR0, KL0_SCC_RESET); (void)MACIO_IN32(KEYLARGO_FCR0); UNLOCK(flags); mdelay(15); LOCK(flags); MACIO_BIC(KEYLARGO_FCR0, KL0_SCC_RESET); } if (need_reset_irda) { MACIO_BIS(KEYLARGO_FCR0, KL0_IRDA_RESET); (void)MACIO_IN32(KEYLARGO_FCR0); UNLOCK(flags); mdelay(15); LOCK(flags); MACIO_BIC(KEYLARGO_FCR0, KL0_IRDA_RESET); } UNLOCK(flags); if (param & PMAC_SCC_FLAG_XMON) macio->flags |= MACIO_FLAG_SCC_LOCKED; } else { if (macio->flags & MACIO_FLAG_SCC_LOCKED) return -EPERM; LOCK(flags); fcr = MACIO_IN32(KEYLARGO_FCR0); if (chan_mask & MACIO_FLAG_SCCA_ON) fcr &= ~KL0_SCCA_ENABLE; if (chan_mask & MACIO_FLAG_SCCB_ON) { fcr &= ~KL0_SCCB_ENABLE; /* Perform irda specific clears */ if ((param & 0xfff) == PMAC_SCC_IRDA) { fcr &= ~KL0_IRDA_ENABLE; fcr &= ~(KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE); fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0); fcr &= ~(KL0_IRDA_SOURCE1_SEL|KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND); } } MACIO_OUT32(KEYLARGO_FCR0, fcr); if ((fcr & (KL0_SCCA_ENABLE | KL0_SCCB_ENABLE)) == 0) { fcr &= ~KL0_SCC_CELL_ENABLE; MACIO_OUT32(KEYLARGO_FCR0, fcr); } macio->flags &= ~(chan_mask); UNLOCK(flags); mdelay(10); } return 0; } static long core99_modem_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; u8 gpio; unsigned long flags; /* Hack for internal USB modem */ if (node == NULL) { if (macio_chips[0].type != macio_keylargo) return -ENODEV; node = macio_chips[0].of_node; } macio = macio_find(node, 0); if (!macio) return -ENODEV; gpio = MACIO_IN8(KL_GPIO_MODEM_RESET); gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE; gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA; if (!value) { LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio); UNLOCK(flags); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); mdelay(250); } LOCK(flags); if (value) { MACIO_BIC(KEYLARGO_FCR2, KL2_ALT_DATA_OUT); UNLOCK(flags); (void)MACIO_IN32(KEYLARGO_FCR2); mdelay(250); } else { MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT); UNLOCK(flags); } if (value) { LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); } return 0; } static long pangea_modem_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; u8 gpio; unsigned long flags; /* Hack for internal USB modem */ if (node == NULL) { if (macio_chips[0].type != macio_pangea && macio_chips[0].type != macio_intrepid) return -ENODEV; node = macio_chips[0].of_node; } macio = macio_find(node, 0); if (!macio) return -ENODEV; gpio = MACIO_IN8(KL_GPIO_MODEM_RESET); gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE; gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA; if (!value) { LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio); UNLOCK(flags); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); mdelay(250); } LOCK(flags); if (value) { MACIO_OUT8(KL_GPIO_MODEM_POWER, KEYLARGO_GPIO_OUTPUT_ENABLE); UNLOCK(flags); (void)MACIO_IN32(KEYLARGO_FCR2); mdelay(250); } else { MACIO_OUT8(KL_GPIO_MODEM_POWER, KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA); UNLOCK(flags); } if (value) { LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); LOCK(flags); MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA); (void)MACIO_IN8(KL_GPIO_MODEM_RESET); UNLOCK(flags); mdelay(250); } return 0; } static long core99_ata100_enable(struct device_node *node, long value) { unsigned long flags; struct pci_dev *pdev = NULL; u8 pbus, pid; int rc; if (uninorth_rev < 0x24) return -ENODEV; LOCK(flags); if (value) UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100); else UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100); (void)UN_IN(UNI_N_CLOCK_CNTL); UNLOCK(flags); udelay(20); if (value) { if (pci_device_from_OF_node(node, &pbus, &pid) == 0) pdev = pci_get_domain_bus_and_slot(0, pbus, pid); if (pdev == NULL) return 0; rc = pci_enable_device(pdev); if (rc == 0) pci_set_master(pdev); pci_dev_put(pdev); if (rc) return rc; } return 0; } static long core99_ide_enable(struct device_node *node, long param, long value) { /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2 * based ata-100 */ switch(param) { case 0: return simple_feature_tweak(node, macio_unknown, KEYLARGO_FCR1, KL1_EIDE0_ENABLE, value); case 1: return simple_feature_tweak(node, macio_unknown, KEYLARGO_FCR1, KL1_EIDE1_ENABLE, value); case 2: return simple_feature_tweak(node, macio_unknown, KEYLARGO_FCR1, KL1_UIDE_ENABLE, value); case 3: return core99_ata100_enable(node, value); default: return -ENODEV; } } static long core99_ide_reset(struct device_node *node, long param, long value) { switch(param) { case 0: return simple_feature_tweak(node, macio_unknown, KEYLARGO_FCR1, KL1_EIDE0_RESET_N, !value); case 1: return simple_feature_tweak(node, macio_unknown, KEYLARGO_FCR1, KL1_EIDE1_RESET_N, !value); case 2: return simple_feature_tweak(node, macio_unknown, KEYLARGO_FCR1, KL1_UIDE_RESET_N, !value); default: return -ENODEV; } } static long core99_gmac_enable(struct device_node *node, long param, long value) { unsigned long flags; LOCK(flags); if (value) UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC); else UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC); (void)UN_IN(UNI_N_CLOCK_CNTL); UNLOCK(flags); udelay(20); return 0; } static long core99_gmac_phy_reset(struct device_node *node, long param, long value) { unsigned long flags; struct macio_chip *macio; macio = &macio_chips[0]; if (macio->type != macio_keylargo && macio->type != macio_pangea && macio->type != macio_intrepid) return -ENODEV; LOCK(flags); MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE); (void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET); UNLOCK(flags); mdelay(10); LOCK(flags); MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */ KEYLARGO_GPIO_OUTOUT_DATA); UNLOCK(flags); mdelay(10); return 0; } static long core99_sound_chip_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long flags; macio = macio_find(node, 0); if (!macio) return -ENODEV; /* Do a better probe code, screamer G4 desktops & * iMacs can do that too, add a recalibrate in * the driver as well */ if (pmac_mb.model_id == PMAC_TYPE_PISMO || pmac_mb.model_id == PMAC_TYPE_TITANIUM) { LOCK(flags); if (value) MACIO_OUT8(KL_GPIO_SOUND_POWER, KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA); else MACIO_OUT8(KL_GPIO_SOUND_POWER, KEYLARGO_GPIO_OUTPUT_ENABLE); (void)MACIO_IN8(KL_GPIO_SOUND_POWER); UNLOCK(flags); } return 0; } static long core99_airport_enable(struct device_node *node, long param, long value) { struct macio_chip* macio; unsigned long flags; int state; macio = macio_find(node, 0); if (!macio) return -ENODEV; /* Hint: we allow passing of macio itself for the sake of the * sleep code */ if (node != macio->of_node && (!node->parent || node->parent != macio->of_node)) return -ENODEV; state = (macio->flags & MACIO_FLAG_AIRPORT_ON) != 0; if (value == state) return 0; if (value) { /* This code is a reproduction of OF enable-cardslot * and init-wireless methods, slightly hacked until * I got it working. */ LOCK(flags); MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 5); (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf); UNLOCK(flags); mdelay(10); LOCK(flags); MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 4); (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf); UNLOCK(flags); mdelay(10); LOCK(flags); MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16); (void)MACIO_IN32(KEYLARGO_FCR2); udelay(10); MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xb, 0); (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xb); udelay(10); MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xa, 0x28); (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xa); udelay(10); MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xd, 0x28); (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xd); udelay(10); MACIO_OUT8(KEYLARGO_GPIO_0+0xd, 0x28); (void)MACIO_IN8(KEYLARGO_GPIO_0+0xd); udelay(10); MACIO_OUT8(KEYLARGO_GPIO_0+0xe, 0x28); (void)MACIO_IN8(KEYLARGO_GPIO_0+0xe); UNLOCK(flags); udelay(10); MACIO_OUT32(0x1c000, 0); mdelay(1); MACIO_OUT8(0x1a3e0, 0x41); (void)MACIO_IN8(0x1a3e0); udelay(10); LOCK(flags); MACIO_BIS(KEYLARGO_FCR2, KL2_CARDSEL_16); (void)MACIO_IN32(KEYLARGO_FCR2); UNLOCK(flags); mdelay(100); macio->flags |= MACIO_FLAG_AIRPORT_ON; } else { LOCK(flags); MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16); (void)MACIO_IN32(KEYLARGO_FCR2); MACIO_OUT8(KL_GPIO_AIRPORT_0, 0); MACIO_OUT8(KL_GPIO_AIRPORT_1, 0); MACIO_OUT8(KL_GPIO_AIRPORT_2, 0); MACIO_OUT8(KL_GPIO_AIRPORT_3, 0); MACIO_OUT8(KL_GPIO_AIRPORT_4, 0); (void)MACIO_IN8(KL_GPIO_AIRPORT_4); UNLOCK(flags); macio->flags &= ~MACIO_FLAG_AIRPORT_ON; } return 0; } #ifdef CONFIG_SMP static long core99_reset_cpu(struct device_node *node, long param, long value) { unsigned int reset_io = 0; unsigned long flags; struct macio_chip *macio; struct device_node *np; const int dflt_reset_lines[] = { KL_GPIO_RESET_CPU0, KL_GPIO_RESET_CPU1, KL_GPIO_RESET_CPU2, KL_GPIO_RESET_CPU3 }; macio = &macio_chips[0]; if (macio->type != macio_keylargo) return -ENODEV; for_each_of_cpu_node(np) { const u32 *rst = of_get_property(np, "soft-reset", NULL); if (!rst) continue; if (param == of_get_cpu_hwid(np, 0)) { of_node_put(np); reset_io = *rst; break; } } if (np == NULL || reset_io == 0) reset_io = dflt_reset_lines[param]; LOCK(flags); MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE); (void)MACIO_IN8(reset_io); udelay(1); MACIO_OUT8(reset_io, 0); (void)MACIO_IN8(reset_io); UNLOCK(flags); return 0; } #endif /* CONFIG_SMP */ static long core99_usb_enable(struct device_node *node, long param, long value) { struct macio_chip *macio; unsigned long flags; const char *prop; int number; u32 reg; macio = &macio_chips[0]; if (macio->type != macio_keylargo && macio->type != macio_pangea && macio->type != macio_intrepid) return -ENODEV; prop = of_get_property(node, "AAPL,clock-id", NULL); if (!prop) return -ENODEV; if (strncmp(prop, "usb0u048", 8) == 0) number = 0; else if (strncmp(prop, "usb1u148", 8) == 0) number = 2; else if (strncmp(prop, "usb2u248", 8) == 0) number = 4; else return -ENODEV; /* Sorry for the brute-force locking, but this is only used during * sleep and the timing seem to be critical */ LOCK(flags); if (value) { /* Turn ON */ if (number == 0) { MACIO_BIC(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1)); (void)MACIO_IN32(KEYLARGO_FCR0); UNLOCK(flags); mdelay(1); LOCK(flags); MACIO_BIS(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE); } else if (number == 2) { MACIO_BIC(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1)); UNLOCK(flags); (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); LOCK(flags); MACIO_BIS(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE); } else if (number == 4) { MACIO_BIC(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1)); UNLOCK(flags); (void)MACIO_IN32(KEYLARGO_FCR1); mdelay(1); LOCK(flags); MACIO_BIS(KEYLARGO_FCR1, KL1_USB2_CELL_ENABLE); } if (number < 4) { reg = MACIO_IN32(KEYLARGO_FCR4); reg &= ~(KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) | KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number)); reg &= ~(KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) | KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1)); MACIO_OUT32(KEYLARGO_FCR4, reg); (void)MACIO_IN32(KEYLARGO_FCR4); udelay(10); } else { reg = MACIO_IN32(KEYLARGO_FCR3); reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) | KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0)); reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) | KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1)); MACIO_OUT32(KEYLARGO_FCR3, reg); (void)MACIO_IN32(KEYLARGO_FCR3); udelay(10); } if (macio->type == macio_intrepid) { /* wait for clock stopped bits to clear */ u32 test0 = 0, test1 = 0; u32 status0, status1; int timeout = 1000; UNLOCK(flags); switch (number) { case 0: test0 = UNI_N_CLOCK_STOPPED_USB0; test1 = UNI_N_CLOCK_STOPPED_USB0PCI; break; case 2: test0 = UNI_N_CLOCK_STOPPED_USB1; test1 = UNI_N_CLOCK_STOPPED_USB1PCI; break; case 4: test0 = UNI_N_CLOCK_STOPPED_USB2; test1 = UNI_N_CLOCK_STOPPED_USB2PCI; break; } do { if (--timeout <= 0) { printk(KERN_ERR "core99_usb_enable: " "Timeout waiting for clocks\n"); break; } mdelay(1); status0 = UN_IN(UNI_N_CLOCK_STOP_STATUS0); status1 = UN_IN(UNI_N_CLOCK_STOP_STATUS1); } while ((status0 & test0) | (status1 & test1)); LOCK(flags); } } else { /* Turn OFF */ if (number < 4) { reg = MACIO_IN32(KEYLARGO_FCR4); reg |= KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) | KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number); reg |= KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) | KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1); MACIO_OUT32(KEYLARGO_FCR4, reg); (void)MACIO_IN32(KEYLARGO_FCR4); udelay(1); } else { reg = MACIO_IN32(KEYLARGO_FCR3); reg |= KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) | KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0); reg |= KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) | KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1); MACIO_OUT32(KEYLARGO_FCR3, reg); (void)MACIO_IN32(KEYLARGO_FCR3); udelay(1); } if (number == 0) { if (macio->type != macio_intrepid) MACIO_BIC(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE); (void)MACIO_IN32(KEYLARGO_FCR0); udelay(1); MACIO_BIS(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1)); (void)MACIO_IN32(KEYLARGO_FCR0); } else if (number == 2) { if (macio->type != macio_intrepid) MACIO_BIC(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE); (void)MACIO_IN32(KEYLARGO_FCR0); udelay(1); MACIO_BIS(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1)); (void)MACIO_IN32(KEYLARGO_FCR0); } else if (number == 4) { udelay(1); MACIO_BIS(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1)); (void)MACIO_IN32(KEYLARGO_FCR1); } udelay(1); } UNLOCK(flags); return 0; } static long core99_firewire_enable(struct device_node *node, long param, long value) { unsigned long flags; struct macio_chip *macio; macio = &macio_chips[0]; if (macio->type != macio_keylargo && macio->type != macio_pangea && macio->type != macio_intrepid) return -ENODEV; if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED)) return -ENODEV; LOCK(flags); if (value) { UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW); (void)UN_IN(UNI_N_CLOCK_CNTL); } else { UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW); (void)UN_IN(UNI_N_CLOCK_CNTL); } UNLOCK(flags); mdelay(1); return 0; } static long core99_firewire_cable_power(struct device_node *node, long param, long value) { unsigned long flags; struct macio_chip *macio; /* Trick: we allow NULL node */ if ((pmac_mb.board_flags & PMAC_MB_HAS_FW_POWER) == 0) return -ENODEV; macio = &macio_chips[0]; if (macio->type != macio_keylargo && macio->type != macio_pangea && macio->type != macio_intrepid) return -ENODEV; if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED)) return -ENODEV; LOCK(flags); if (value) { MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 0); MACIO_IN8(KL_GPIO_FW_CABLE_POWER); udelay(10); } else { MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 4); MACIO_IN8(KL_GPIO_FW_CABLE_POWER); udelay(10); } UNLOCK(flags); mdelay(1); return 0; } static long intrepid_aack_delay_enable(struct device_node *node, long param, long value) { unsigned long flags; if (uninorth_rev < 0xd2) return -ENODEV; LOCK(flags); if (param) UN_BIS(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE); else UN_BIC(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE); UNLOCK(flags); return 0; } #endif /* CONFIG_PPC64 */ static long core99_read_gpio(struct device_node *node, long param, long value) { struct macio_chip *macio = &macio_chips[0]; return MACIO_IN8(param); } static long core99_write_gpio(struct device_node *node, long param, long value) { struct macio_chip *macio = &macio_chips[0]; MACIO_OUT8(param, (u8)(value & 0xff)); return 0; } #ifdef CONFIG_PPC64 static long g5_gmac_enable(struct device_node *node, long param, long value) { struct macio_chip *macio = &macio_chips[0]; unsigned long flags; if (node == NULL) return -ENODEV; LOCK(flags); if (value) { MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE); mb(); k2_skiplist[0] = NULL; } else { k2_skiplist[0] = node; mb(); MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE); } UNLOCK(flags); mdelay(1); return 0; } static long g5_fw_enable(struct device_node *node, long param, long value) { struct macio_chip *macio = &macio_chips[0]; unsigned long flags; if (node == NULL) return -ENODEV; LOCK(flags); if (value) { MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE); mb(); k2_skiplist[1] = NULL; } else { k2_skiplist[1] = node; mb(); MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE); } UNLOCK(flags); mdelay(1); return 0; } static long g5_mpic_enable(struct device_node *node, long param, long value) { unsigned long flags; struct device_node *parent = of_get_parent(node); int is_u3; if (parent == NULL) return 0; is_u3 = of_node_name_eq(parent, "u3") || of_node_name_eq(parent, "u4"); of_node_put(parent); if (!is_u3) return 0; LOCK(flags); UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE); UNLOCK(flags); return 0; } static long g5_eth_phy_reset(struct device_node *node, long param, long value) { struct macio_chip *macio = &macio_chips[0]; struct device_node *phy; int need_reset; /* * We must not reset the combo PHYs, only the BCM5221 found in * the iMac G5. */ phy = of_get_next_child(node, NULL); if (!phy) return -ENODEV; need_reset = of_device_is_compatible(phy, "B5221"); of_node_put(phy); if (!need_reset) return 0; /* PHY reset is GPIO 29, not in device-tree unfortunately */ MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA); /* Thankfully, this is now always called at a time when we can * schedule by sungem. */ msleep(10); MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0); return 0; } static long g5_i2s_enable(struct device_node *node, long param, long value) { /* Very crude implementation for now */ struct macio_chip *macio = &macio_chips[0]; unsigned long flags; int cell; u32 fcrs[3][3] = { { 0, K2_FCR1_I2S0_CELL_ENABLE | K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE, KL3_I2S0_CLK18_ENABLE }, { KL0_SCC_A_INTF_ENABLE, K2_FCR1_I2S1_CELL_ENABLE | K2_FCR1_I2S1_CLK_ENABLE_BIT | K2_FCR1_I2S1_ENABLE, KL3_I2S1_CLK18_ENABLE }, { KL0_SCC_B_INTF_ENABLE, SH_FCR1_I2S2_CELL_ENABLE | SH_FCR1_I2S2_CLK_ENABLE_BIT | SH_FCR1_I2S2_ENABLE, SH_FCR3_I2S2_CLK18_ENABLE }, }; if (macio->type != macio_keylargo2 && macio->type != macio_shasta) return -ENODEV; if (strncmp(node->name, "i2s-", 4)) return -ENODEV; cell = node->name[4] - 'a'; switch(cell) { case 0: case 1: break; case 2: if (macio->type == macio_shasta) break; fallthrough; default: return -ENODEV; } LOCK(flags); if (value) { MACIO_BIC(KEYLARGO_FCR0, fcrs[cell][0]); MACIO_BIS(KEYLARGO_FCR1, fcrs[cell][1]); MACIO_BIS(KEYLARGO_FCR3, fcrs[cell][2]); } else { MACIO_BIC(KEYLARGO_FCR3, fcrs[cell][2]); MACIO_BIC(KEYLARGO_FCR1, fcrs[cell][1]); MACIO_BIS(KEYLARGO_FCR0, fcrs[cell][0]); } udelay(10); UNLOCK(flags); return 0; } #ifdef CONFIG_SMP static long g5_reset_cpu(struct device_node *node, long param, long value) { unsigned int reset_io = 0; unsigned long flags; struct macio_chip *macio; struct device_node *np; macio = &macio_chips[0]; if (macio->type != macio_keylargo2 && macio->type != macio_shasta) return -ENODEV; for_each_of_cpu_node(np) { const u32 *rst = of_get_property(np, "soft-reset", NULL); if (!rst) continue; if (param == of_get_cpu_hwid(np, 0)) { of_node_put(np); reset_io = *rst; break; } } if (np == NULL || reset_io == 0) return -ENODEV; LOCK(flags); MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE); (void)MACIO_IN8(reset_io); udelay(1); MACIO_OUT8(reset_io, 0); (void)MACIO_IN8(reset_io); UNLOCK(flags); return 0; } #endif /* CONFIG_SMP */ /* * This can be called from pmac_smp so isn't static * * This takes the second CPU off the bus on dual CPU machines * running UP */ void __init g5_phy_disable_cpu1(void) { if (uninorth_maj == 3) UN_OUT(U3_API_PHY_CONFIG_1, 0); } #endif /* CONFIG_PPC64 */ #ifndef CONFIG_PPC64 #ifdef CONFIG_PM static u32 save_gpio_levels[2]; static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT]; static u8 save_gpio_normal[KEYLARGO_GPIO_CNT]; static u32 save_unin_clock_ctl; static void keylargo_shutdown(struct macio_chip *macio, int sleep_mode) { u32 temp; if (sleep_mode) { mdelay(1); MACIO_BIS(KEYLARGO_FCR0, KL0_USB_REF_SUSPEND); (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); } MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE | KL0_SCC_CELL_ENABLE | KL0_IRDA_ENABLE | KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE); MACIO_BIC(KEYLARGO_MBCR, KL_MBCR_MB0_DEV_MASK); MACIO_BIS(KEYLARGO_MBCR, KL_MBCR_MB0_IDE_ENABLE); MACIO_BIC(KEYLARGO_FCR1, KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT | KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE | KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT | KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE | KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE | KL1_EIDE0_ENABLE | KL1_EIDE0_RESET_N | KL1_EIDE1_ENABLE | KL1_EIDE1_RESET_N | KL1_UIDE_ENABLE); MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT); MACIO_BIC(KEYLARGO_FCR2, KL2_IOBUS_ENABLE); temp = MACIO_IN32(KEYLARGO_FCR3); if (macio->rev >= 2) { temp |= KL3_SHUTDOWN_PLL2X; if (sleep_mode) temp |= KL3_SHUTDOWN_PLL_TOTAL; } temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 | KL3_SHUTDOWN_PLLKW35; if (sleep_mode) temp |= KL3_SHUTDOWN_PLLKW12; temp &= ~(KL3_CLK66_ENABLE | KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_CLK31_ENABLE | KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE); if (sleep_mode) temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_VIA_CLK16_ENABLE); MACIO_OUT32(KEYLARGO_FCR3, temp); /* Flush posted writes & wait a bit */ (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); } static void pangea_shutdown(struct macio_chip *macio, int sleep_mode) { u32 temp; MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE | KL0_SCC_CELL_ENABLE | KL0_USB0_CELL_ENABLE | KL0_USB1_CELL_ENABLE); MACIO_BIC(KEYLARGO_FCR1, KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT | KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE | KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT | KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE | KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE | KL1_UIDE_ENABLE); if (pmac_mb.board_flags & PMAC_MB_MOBILE) MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N); MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT); temp = MACIO_IN32(KEYLARGO_FCR3); temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 | KL3_SHUTDOWN_PLLKW35; temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_CLK31_ENABLE | KL3_I2S0_CLK18_ENABLE | KL3_I2S1_CLK18_ENABLE); if (sleep_mode) temp &= ~(KL3_VIA_CLK16_ENABLE | KL3_TIMER_CLK18_ENABLE); MACIO_OUT32(KEYLARGO_FCR3, temp); /* Flush posted writes & wait a bit */ (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); } static void intrepid_shutdown(struct macio_chip *macio, int sleep_mode) { u32 temp; MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE | KL0_SCC_CELL_ENABLE); MACIO_BIC(KEYLARGO_FCR1, KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT | KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE | KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE | KL1_EIDE0_ENABLE); if (pmac_mb.board_flags & PMAC_MB_MOBILE) MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N); temp = MACIO_IN32(KEYLARGO_FCR3); temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE); if (sleep_mode) temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_IT_VIA_CLK32_ENABLE); MACIO_OUT32(KEYLARGO_FCR3, temp); /* Flush posted writes & wait a bit */ (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(10); } static int core99_sleep(void) { struct macio_chip *macio; int i; macio = &macio_chips[0]; if (macio->type != macio_keylargo && macio->type != macio_pangea && macio->type != macio_intrepid) return -ENODEV; /* We power off the wireless slot in case it was not done * by the driver. We don't power it on automatically however */ if (macio->flags & MACIO_FLAG_AIRPORT_ON) core99_airport_enable(macio->of_node, 0, 0); /* We power off the FW cable. Should be done by the driver... */ if (macio->flags & MACIO_FLAG_FW_SUPPORTED) { core99_firewire_enable(NULL, 0, 0); core99_firewire_cable_power(NULL, 0, 0); } /* We make sure int. modem is off (in case driver lost it) */ if (macio->type == macio_keylargo) core99_modem_enable(macio->of_node, 0, 0); else pangea_modem_enable(macio->of_node, 0, 0); /* We make sure the sound is off as well */ core99_sound_chip_enable(macio->of_node, 0, 0); /* * Save various bits of KeyLargo */ /* Save the state of the various GPIOs */ save_gpio_levels[0] = MACIO_IN32(KEYLARGO_GPIO_LEVELS0); save_gpio_levels[1] = MACIO_IN32(KEYLARGO_GPIO_LEVELS1); for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++) save_gpio_extint[i] = MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+i); for (i=0; i<KEYLARGO_GPIO_CNT; i++) save_gpio_normal[i] = MACIO_IN8(KEYLARGO_GPIO_0+i); /* Save the FCRs */ if (macio->type == macio_keylargo) save_mbcr = MACIO_IN32(KEYLARGO_MBCR); save_fcr[0] = MACIO_IN32(KEYLARGO_FCR0); save_fcr[1] = MACIO_IN32(KEYLARGO_FCR1); save_fcr[2] = MACIO_IN32(KEYLARGO_FCR2); save_fcr[3] = MACIO_IN32(KEYLARGO_FCR3); save_fcr[4] = MACIO_IN32(KEYLARGO_FCR4); if (macio->type == macio_pangea || macio->type == macio_intrepid) save_fcr[5] = MACIO_IN32(KEYLARGO_FCR5); /* Save state & config of DBDMA channels */ dbdma_save(macio, save_dbdma); /* * Turn off as much as we can */ if (macio->type == macio_pangea) pangea_shutdown(macio, 1); else if (macio->type == macio_intrepid) intrepid_shutdown(macio, 1); else if (macio->type == macio_keylargo) keylargo_shutdown(macio, 1); /* * Put the host bridge to sleep */ save_unin_clock_ctl = UN_IN(UNI_N_CLOCK_CNTL); /* Note: do not switch GMAC off, driver does it when necessary, WOL must keep it * enabled ! */ UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl & ~(/*UNI_N_CLOCK_CNTL_GMAC|*/UNI_N_CLOCK_CNTL_FW/*|UNI_N_CLOCK_CNTL_PCI*/)); udelay(100); UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING); UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_SLEEP); mdelay(10); /* * FIXME: A bit of black magic with OpenPIC (don't ask me why) */ if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) { MACIO_BIS(0x506e0, 0x00400000); MACIO_BIS(0x506e0, 0x80000000); } return 0; } static int core99_wake_up(void) { struct macio_chip *macio; int i; macio = &macio_chips[0]; if (macio->type != macio_keylargo && macio->type != macio_pangea && macio->type != macio_intrepid) return -ENODEV; /* * Wakeup the host bridge */ UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL); udelay(10); UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING); udelay(10); /* * Restore KeyLargo */ if (macio->type == macio_keylargo) { MACIO_OUT32(KEYLARGO_MBCR, save_mbcr); (void)MACIO_IN32(KEYLARGO_MBCR); udelay(10); } MACIO_OUT32(KEYLARGO_FCR0, save_fcr[0]); (void)MACIO_IN32(KEYLARGO_FCR0); udelay(10); MACIO_OUT32(KEYLARGO_FCR1, save_fcr[1]); (void)MACIO_IN32(KEYLARGO_FCR1); udelay(10); MACIO_OUT32(KEYLARGO_FCR2, save_fcr[2]); (void)MACIO_IN32(KEYLARGO_FCR2); udelay(10); MACIO_OUT32(KEYLARGO_FCR3, save_fcr[3]); (void)MACIO_IN32(KEYLARGO_FCR3); udelay(10); MACIO_OUT32(KEYLARGO_FCR4, save_fcr[4]); (void)MACIO_IN32(KEYLARGO_FCR4); udelay(10); if (macio->type == macio_pangea || macio->type == macio_intrepid) { MACIO_OUT32(KEYLARGO_FCR5, save_fcr[5]); (void)MACIO_IN32(KEYLARGO_FCR5); udelay(10); } dbdma_restore(macio, save_dbdma); MACIO_OUT32(KEYLARGO_GPIO_LEVELS0, save_gpio_levels[0]); MACIO_OUT32(KEYLARGO_GPIO_LEVELS1, save_gpio_levels[1]); for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++) MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+i, save_gpio_extint[i]); for (i=0; i<KEYLARGO_GPIO_CNT; i++) MACIO_OUT8(KEYLARGO_GPIO_0+i, save_gpio_normal[i]); /* FIXME more black magic with OpenPIC ... */ if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) { MACIO_BIC(0x506e0, 0x00400000); MACIO_BIC(0x506e0, 0x80000000); } UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl); udelay(100); return 0; } #endif /* CONFIG_PM */ static long core99_sleep_state(struct device_node *node, long param, long value) { /* Param == 1 means to enter the "fake sleep" mode that is * used for CPU speed switch */ if (param == 1) { if (value == 1) { UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING); UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_IDLE2); } else { UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL); udelay(10); UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING); udelay(10); } return 0; } if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) return -EPERM; #ifdef CONFIG_PM if (value == 1) return core99_sleep(); else if (value == 0) return core99_wake_up(); #endif /* CONFIG_PM */ return 0; } #endif /* CONFIG_PPC64 */ static long generic_dev_can_wake(struct device_node *node, long param, long value) { /* Todo: eventually check we are really dealing with on-board * video device ... */ if (pmac_mb.board_flags & PMAC_MB_MAY_SLEEP) pmac_mb.board_flags |= PMAC_MB_CAN_SLEEP; return 0; } static long generic_get_mb_info(struct device_node *node, long param, long value) { switch(param) { case PMAC_MB_INFO_MODEL: return pmac_mb.model_id; case PMAC_MB_INFO_FLAGS: return pmac_mb.board_flags; case PMAC_MB_INFO_NAME: /* hack hack hack... but should work */ *((const char **)value) = pmac_mb.model_name; return 0; } return -EINVAL; } /* * Table definitions */ /* Used on any machine */ static struct feature_table_entry any_features[] = { { PMAC_FTR_GET_MB_INFO, generic_get_mb_info }, { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake }, { 0, NULL } }; #ifndef CONFIG_PPC64 /* OHare based motherboards. Currently, we only use these on the * 2400,3400 and 3500 series powerbooks. Some older desktops seem * to have issues with turning on/off those asic cells */ static struct feature_table_entry ohare_features[] = { { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable }, { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable }, { PMAC_FTR_IDE_ENABLE, ohare_ide_enable}, { PMAC_FTR_IDE_RESET, ohare_ide_reset}, { PMAC_FTR_SLEEP_STATE, ohare_sleep_state }, { 0, NULL } }; /* Heathrow desktop machines (Beige G3). * Separated as some features couldn't be properly tested * and the serial port control bits appear to confuse it. */ static struct feature_table_entry heathrow_desktop_features[] = { { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable }, { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable }, { PMAC_FTR_IDE_RESET, heathrow_ide_reset }, { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable }, { 0, NULL } }; /* Heathrow based laptop, that is the Wallstreet and mainstreet * powerbooks. */ static struct feature_table_entry heathrow_laptop_features[] = { { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable }, { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable }, { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable }, { PMAC_FTR_IDE_RESET, heathrow_ide_reset }, { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable }, { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable }, { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state }, { 0, NULL } }; /* Paddington based machines * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4. */ static struct feature_table_entry paddington_features[] = { { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable }, { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable }, { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable }, { PMAC_FTR_IDE_RESET, heathrow_ide_reset }, { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable }, { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable }, { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state }, { 0, NULL } }; /* Core99 & MacRISC 2 machines (all machines released since the * iBook (included), that is all AGP machines, except pangea * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo * used on iBook2 & iMac "flow power". */ static struct feature_table_entry core99_features[] = { { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, { PMAC_FTR_MODEM_ENABLE, core99_modem_enable }, { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, { PMAC_FTR_IDE_RESET, core99_ide_reset }, { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable }, { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset }, { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable }, { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable }, { PMAC_FTR_USB_ENABLE, core99_usb_enable }, { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, #ifdef CONFIG_PM { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, #endif #ifdef CONFIG_SMP { PMAC_FTR_RESET_CPU, core99_reset_cpu }, #endif /* CONFIG_SMP */ { PMAC_FTR_READ_GPIO, core99_read_gpio }, { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, { 0, NULL } }; /* RackMac */ static struct feature_table_entry rackmac_features[] = { { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, { PMAC_FTR_IDE_RESET, core99_ide_reset }, { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable }, { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset }, { PMAC_FTR_USB_ENABLE, core99_usb_enable }, { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, #ifdef CONFIG_SMP { PMAC_FTR_RESET_CPU, core99_reset_cpu }, #endif /* CONFIG_SMP */ { PMAC_FTR_READ_GPIO, core99_read_gpio }, { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, { 0, NULL } }; /* Pangea features */ static struct feature_table_entry pangea_features[] = { { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable }, { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, { PMAC_FTR_IDE_RESET, core99_ide_reset }, { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable }, { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset }, { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable }, { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable }, { PMAC_FTR_USB_ENABLE, core99_usb_enable }, { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, { PMAC_FTR_READ_GPIO, core99_read_gpio }, { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, { 0, NULL } }; /* Intrepid features */ static struct feature_table_entry intrepid_features[] = { { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable }, { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, { PMAC_FTR_IDE_RESET, core99_ide_reset }, { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable }, { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset }, { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable }, { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable }, { PMAC_FTR_USB_ENABLE, core99_usb_enable }, { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, { PMAC_FTR_READ_GPIO, core99_read_gpio }, { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, { PMAC_FTR_AACK_DELAY_ENABLE, intrepid_aack_delay_enable }, { 0, NULL } }; #else /* CONFIG_PPC64 */ /* G5 features */ static struct feature_table_entry g5_features[] = { { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable }, { PMAC_FTR_1394_ENABLE, g5_fw_enable }, { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable }, { PMAC_FTR_GMAC_PHY_RESET, g5_eth_phy_reset }, { PMAC_FTR_SOUND_CHIP_ENABLE, g5_i2s_enable }, #ifdef CONFIG_SMP { PMAC_FTR_RESET_CPU, g5_reset_cpu }, #endif /* CONFIG_SMP */ { PMAC_FTR_READ_GPIO, core99_read_gpio }, { PMAC_FTR_WRITE_GPIO, core99_write_gpio }, { 0, NULL } }; #endif /* CONFIG_PPC64 */ static struct pmac_mb_def pmac_mb_defs[] = { #ifndef CONFIG_PPC64 /* * Desktops */ { "AAPL,8500", "PowerMac 8500/8600", PMAC_TYPE_PSURGE, NULL, 0 }, { "AAPL,9500", "PowerMac 9500/9600", PMAC_TYPE_PSURGE, NULL, 0 }, { "AAPL,7200", "PowerMac 7200", PMAC_TYPE_PSURGE, NULL, 0 }, { "AAPL,7300", "PowerMac 7200/7300", PMAC_TYPE_PSURGE, NULL, 0 }, { "AAPL,7500", "PowerMac 7500", PMAC_TYPE_PSURGE, NULL, 0 }, { "AAPL,ShinerESB", "Apple Network Server", PMAC_TYPE_ANS, NULL, 0 }, { "AAPL,e407", "Alchemy", PMAC_TYPE_ALCHEMY, NULL, 0 }, { "AAPL,e411", "Gazelle", PMAC_TYPE_GAZELLE, NULL, 0 }, { "AAPL,Gossamer", "PowerMac G3 (Gossamer)", PMAC_TYPE_GOSSAMER, heathrow_desktop_features, 0 }, { "AAPL,PowerMac G3", "PowerMac G3 (Silk)", PMAC_TYPE_SILK, heathrow_desktop_features, 0 }, { "PowerMac1,1", "Blue&White G3", PMAC_TYPE_YOSEMITE, paddington_features, 0 }, { "PowerMac1,2", "PowerMac G4 PCI Graphics", PMAC_TYPE_YIKES, paddington_features, 0 }, { "PowerMac2,1", "iMac FireWire", PMAC_TYPE_FW_IMAC, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 }, { "PowerMac2,2", "iMac FireWire", PMAC_TYPE_FW_IMAC, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 }, { "PowerMac3,1", "PowerMac G4 AGP Graphics", PMAC_TYPE_SAWTOOTH, core99_features, PMAC_MB_OLD_CORE99 }, { "PowerMac3,2", "PowerMac G4 AGP Graphics", PMAC_TYPE_SAWTOOTH, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 }, { "PowerMac3,3", "PowerMac G4 AGP Graphics", PMAC_TYPE_SAWTOOTH, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 }, { "PowerMac3,4", "PowerMac G4 Silver", PMAC_TYPE_QUICKSILVER, core99_features, PMAC_MB_MAY_SLEEP }, { "PowerMac3,5", "PowerMac G4 Silver", PMAC_TYPE_QUICKSILVER, core99_features, PMAC_MB_MAY_SLEEP }, { "PowerMac3,6", "PowerMac G4 Windtunnel", PMAC_TYPE_WINDTUNNEL, core99_features, PMAC_MB_MAY_SLEEP, }, { "PowerMac4,1", "iMac \"Flower Power\"", PMAC_TYPE_PANGEA_IMAC, pangea_features, PMAC_MB_MAY_SLEEP }, { "PowerMac4,2", "Flat panel iMac", PMAC_TYPE_FLAT_PANEL_IMAC, pangea_features, PMAC_MB_CAN_SLEEP }, { "PowerMac4,4", "eMac", PMAC_TYPE_EMAC, core99_features, PMAC_MB_MAY_SLEEP }, { "PowerMac5,1", "PowerMac G4 Cube", PMAC_TYPE_CUBE, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99 }, { "PowerMac6,1", "Flat panel iMac", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP, }, { "PowerMac6,3", "Flat panel iMac", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP, }, { "PowerMac6,4", "eMac", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP, }, { "PowerMac10,1", "Mac mini", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP, }, { "PowerMac10,2", "Mac mini (Late 2005)", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP, }, { "iMac,1", "iMac (first generation)", PMAC_TYPE_ORIG_IMAC, paddington_features, 0 }, /* * Xserve's */ { "RackMac1,1", "XServe", PMAC_TYPE_RACKMAC, rackmac_features, 0, }, { "RackMac1,2", "XServe rev. 2", PMAC_TYPE_RACKMAC, rackmac_features, 0, }, /* * Laptops */ { "AAPL,3400/2400", "PowerBook 3400", PMAC_TYPE_HOOPER, ohare_features, PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE }, { "AAPL,3500", "PowerBook 3500", PMAC_TYPE_KANGA, ohare_features, PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE }, { "AAPL,PowerBook1998", "PowerBook Wallstreet", PMAC_TYPE_WALLSTREET, heathrow_laptop_features, PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE }, { "PowerBook1,1", "PowerBook 101 (Lombard)", PMAC_TYPE_101_PBOOK, paddington_features, PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE }, { "PowerBook2,1", "iBook (first generation)", PMAC_TYPE_ORIG_IBOOK, core99_features, PMAC_MB_CAN_SLEEP | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE }, { "PowerBook2,2", "iBook FireWire", PMAC_TYPE_FW_IBOOK, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE }, { "PowerBook3,1", "PowerBook Pismo", PMAC_TYPE_PISMO, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE }, { "PowerBook3,2", "PowerBook Titanium", PMAC_TYPE_TITANIUM, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook3,3", "PowerBook Titanium II", PMAC_TYPE_TITANIUM2, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook3,4", "PowerBook Titanium III", PMAC_TYPE_TITANIUM3, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook3,5", "PowerBook Titanium IV", PMAC_TYPE_TITANIUM4, core99_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook4,1", "iBook 2", PMAC_TYPE_IBOOK2, pangea_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook4,2", "iBook 2", PMAC_TYPE_IBOOK2, pangea_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook4,3", "iBook 2 rev. 2", PMAC_TYPE_IBOOK2, pangea_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE }, { "PowerBook5,1", "PowerBook G4 17\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,2", "PowerBook G4 15\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,3", "PowerBook G4 17\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,4", "PowerBook G4 15\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,5", "PowerBook G4 17\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,6", "PowerBook G4 15\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,7", "PowerBook G4 17\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook5,8", "PowerBook G4 15\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE, }, { "PowerBook5,9", "PowerBook G4 17\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE, }, { "PowerBook6,1", "PowerBook G4 12\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook6,2", "PowerBook G4", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook6,3", "iBook G4", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook6,4", "PowerBook G4 12\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook6,5", "iBook G4", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook6,7", "iBook G4", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, { "PowerBook6,8", "PowerBook G4 12\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, #else /* CONFIG_PPC64 */ { "PowerMac7,2", "PowerMac G5", PMAC_TYPE_POWERMAC_G5, g5_features, 0, }, #ifdef CONFIG_PPC64 { "PowerMac7,3", "PowerMac G5", PMAC_TYPE_POWERMAC_G5, g5_features, 0, }, { "PowerMac8,1", "iMac G5", PMAC_TYPE_IMAC_G5, g5_features, 0, }, { "PowerMac9,1", "PowerMac G5", PMAC_TYPE_POWERMAC_G5_U3L, g5_features, 0, }, { "PowerMac11,2", "PowerMac G5 Dual Core", PMAC_TYPE_POWERMAC_G5_U3L, g5_features, 0, }, { "PowerMac12,1", "iMac G5 (iSight)", PMAC_TYPE_POWERMAC_G5_U3L, g5_features, 0, }, { "RackMac3,1", "XServe G5", PMAC_TYPE_XSERVE_G5, g5_features, 0, }, #endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */ }; /* * The toplevel feature_call callback */ long pmac_do_feature_call(unsigned int selector, ...) { struct device_node *node; long param, value; int i; feature_call func = NULL; va_list args; if (pmac_mb.features) for (i=0; pmac_mb.features[i].function; i++) if (pmac_mb.features[i].selector == selector) { func = pmac_mb.features[i].function; break; } if (!func) for (i=0; any_features[i].function; i++) if (any_features[i].selector == selector) { func = any_features[i].function; break; } if (!func) return -ENODEV; va_start(args, selector); node = (struct device_node*)va_arg(args, void*); param = va_arg(args, long); value = va_arg(args, long); va_end(args); return func(node, param, value); } static int __init probe_motherboard(void) { int i; struct macio_chip *macio = &macio_chips[0]; const char *model = NULL; struct device_node *dt; int ret = 0; /* Lookup known motherboard type in device-tree. First try an * exact match on the "model" property, then try a "compatible" * match is none is found. */ dt = of_find_node_by_name(NULL, "device-tree"); if (dt != NULL) model = of_get_property(dt, "model", NULL); for(i=0; model && i<ARRAY_SIZE(pmac_mb_defs); i++) { if (strcmp(model, pmac_mb_defs[i].model_string) == 0) { pmac_mb = pmac_mb_defs[i]; goto found; } } for(i=0; i<ARRAY_SIZE(pmac_mb_defs); i++) { if (of_machine_is_compatible(pmac_mb_defs[i].model_string)) { pmac_mb = pmac_mb_defs[i]; goto found; } } /* Fallback to selection depending on mac-io chip type */ switch(macio->type) { #ifndef CONFIG_PPC64 case macio_grand_central: pmac_mb.model_id = PMAC_TYPE_PSURGE; pmac_mb.model_name = "Unknown PowerSurge"; break; case macio_ohare: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_OHARE; pmac_mb.model_name = "Unknown OHare-based"; break; case macio_heathrow: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_HEATHROW; pmac_mb.model_name = "Unknown Heathrow-based"; pmac_mb.features = heathrow_desktop_features; break; case macio_paddington: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PADDINGTON; pmac_mb.model_name = "Unknown Paddington-based"; pmac_mb.features = paddington_features; break; case macio_keylargo: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_CORE99; pmac_mb.model_name = "Unknown Keylargo-based"; pmac_mb.features = core99_features; break; case macio_pangea: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PANGEA; pmac_mb.model_name = "Unknown Pangea-based"; pmac_mb.features = pangea_features; break; case macio_intrepid: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_INTREPID; pmac_mb.model_name = "Unknown Intrepid-based"; pmac_mb.features = intrepid_features; break; #else /* CONFIG_PPC64 */ case macio_keylargo2: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2; pmac_mb.model_name = "Unknown K2-based"; pmac_mb.features = g5_features; break; case macio_shasta: pmac_mb.model_id = PMAC_TYPE_UNKNOWN_SHASTA; pmac_mb.model_name = "Unknown Shasta-based"; pmac_mb.features = g5_features; break; #endif /* CONFIG_PPC64 */ default: ret = -ENODEV; goto done; } found: #ifndef CONFIG_PPC64 /* Fixup Hooper vs. Comet */ if (pmac_mb.model_id == PMAC_TYPE_HOOPER) { u32 __iomem * mach_id_ptr = ioremap(0xf3000034, 4); if (!mach_id_ptr) { ret = -ENODEV; goto done; } /* Here, I used to disable the media-bay on comet. It * appears this is wrong, the floppy connector is actually * a kind of media-bay and works with the current driver. */ if (__raw_readl(mach_id_ptr) & 0x20000000UL) pmac_mb.model_id = PMAC_TYPE_COMET; iounmap(mach_id_ptr); } /* Set default value of powersave_nap on machines that support it. * It appears that uninorth rev 3 has a problem with it, we don't * enable it on those. In theory, the flush-on-lock property is * supposed to be set when not supported, but I'm not very confident * that all Apple OF revs did it properly, I do it the paranoid way. */ if (uninorth_base && uninorth_rev > 3) { struct device_node *np; for_each_of_cpu_node(np) { int cpu_count = 1; /* Nap mode not supported on SMP */ if (of_property_read_bool(np, "flush-on-lock") || (cpu_count > 1)) { powersave_nap = 0; of_node_put(np); break; } cpu_count++; powersave_nap = 1; } } if (powersave_nap) printk(KERN_DEBUG "Processor NAP mode on idle enabled.\n"); /* On CPUs that support it (750FX), lowspeed by default during * NAP mode */ powersave_lowspeed = 1; #else /* CONFIG_PPC64 */ powersave_nap = 1; #endif /* CONFIG_PPC64 */ /* Check for "mobile" machine */ if (model && (strncmp(model, "PowerBook", 9) == 0 || strncmp(model, "iBook", 5) == 0)) pmac_mb.board_flags |= PMAC_MB_MOBILE; printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name); done: of_node_put(dt); return ret; } /* Initialize the Core99 UniNorth host bridge and memory controller */ static void __init probe_uninorth(void) { struct resource res; unsigned long actrl; /* Locate core99 Uni-N */ uninorth_node = of_find_node_by_name(NULL, "uni-n"); uninorth_maj = 1; /* Locate G5 u3 */ if (uninorth_node == NULL) { uninorth_node = of_find_node_by_name(NULL, "u3"); uninorth_maj = 3; } /* Locate G5 u4 */ if (uninorth_node == NULL) { uninorth_node = of_find_node_by_name(NULL, "u4"); uninorth_maj = 4; } if (uninorth_node == NULL) { uninorth_maj = 0; return; } if (of_address_to_resource(uninorth_node, 0, &res)) return; uninorth_base = ioremap(res.start, 0x40000); if (uninorth_base == NULL) return; uninorth_rev = in_be32(UN_REG(UNI_N_VERSION)); if (uninorth_maj == 3 || uninorth_maj == 4) { u3_ht_base = ioremap(res.start + U3_HT_CONFIG_BASE, 0x1000); if (u3_ht_base == NULL) { iounmap(uninorth_base); return; } } printk(KERN_INFO "Found %s memory controller & host bridge" " @ 0x%08x revision: 0x%02x\n", uninorth_maj == 3 ? "U3" : uninorth_maj == 4 ? "U4" : "UniNorth", (unsigned int)res.start, uninorth_rev); printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base); /* Set the arbitrer QAck delay according to what Apple does */ if (uninorth_rev < 0x11) { actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK; actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 : UNI_N_ARB_CTRL_QACK_DELAY) << UNI_N_ARB_CTRL_QACK_DELAY_SHIFT; UN_OUT(UNI_N_ARB_CTRL, actrl); } /* Some more magic as done by them in recent MacOS X on UniNorth * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI * memory timeout */ if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) || uninorth_rev == 0xc0) UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff); } static void __init probe_one_macio(const char *name, const char *compat, int type) { struct device_node* node; int i; volatile u32 __iomem *base; const u32 *addrp, *revp; phys_addr_t addr; u64 size; for_each_node_by_name(node, name) { if (!compat) break; if (of_device_is_compatible(node, compat)) break; } if (!node) return; for(i=0; i<MAX_MACIO_CHIPS; i++) { if (!macio_chips[i].of_node) break; if (macio_chips[i].of_node == node) goto out_put; } if (i >= MAX_MACIO_CHIPS) { printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n"); printk(KERN_ERR "pmac_feature: %pOF skipped\n", node); goto out_put; } addrp = of_get_pci_address(node, 0, &size, NULL); if (addrp == NULL) { printk(KERN_ERR "pmac_feature: %pOF: can't find base !\n", node); goto out_put; } addr = of_translate_address(node, addrp); if (addr == 0) { printk(KERN_ERR "pmac_feature: %pOF, can't translate base !\n", node); goto out_put; } base = ioremap(addr, (unsigned long)size); if (!base) { printk(KERN_ERR "pmac_feature: %pOF, can't map mac-io chip !\n", node); goto out_put; } if (type == macio_keylargo || type == macio_keylargo2) { const u32 *did = of_get_property(node, "device-id", NULL); if (*did == 0x00000025) type = macio_pangea; if (*did == 0x0000003e) type = macio_intrepid; if (*did == 0x0000004f) type = macio_shasta; } macio_chips[i].of_node = node; macio_chips[i].type = type; macio_chips[i].base = base; macio_chips[i].flags = MACIO_FLAG_SCCA_ON | MACIO_FLAG_SCCB_ON; macio_chips[i].name = macio_names[type]; revp = of_get_property(node, "revision-id", NULL); if (revp) macio_chips[i].rev = *revp; printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n", macio_names[type], macio_chips[i].rev, macio_chips[i].base); return; out_put: of_node_put(node); } static int __init probe_macios(void) { /* Warning, ordering is important */ probe_one_macio("gc", NULL, macio_grand_central); probe_one_macio("ohare", NULL, macio_ohare); probe_one_macio("pci106b,7", NULL, macio_ohareII); probe_one_macio("mac-io", "keylargo", macio_keylargo); probe_one_macio("mac-io", "paddington", macio_paddington); probe_one_macio("mac-io", "gatwick", macio_gatwick); probe_one_macio("mac-io", "heathrow", macio_heathrow); probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2); /* Make sure the "main" macio chip appear first */ if (macio_chips[0].type == macio_gatwick && macio_chips[1].type == macio_heathrow) { struct macio_chip temp = macio_chips[0]; macio_chips[0] = macio_chips[1]; macio_chips[1] = temp; } if (macio_chips[0].type == macio_ohareII && macio_chips[1].type == macio_ohare) { struct macio_chip temp = macio_chips[0]; macio_chips[0] = macio_chips[1]; macio_chips[1] = temp; } macio_chips[0].lbus.index = 0; macio_chips[1].lbus.index = 1; return (macio_chips[0].of_node == NULL) ? -ENODEV : 0; } static void __init initial_serial_shutdown(struct device_node *np) { int len; const struct slot_names_prop { int count; char name[1]; } *slots; const char *conn; int port_type = PMAC_SCC_ASYNC; int modem = 0; slots = of_get_property(np, "slot-names", &len); conn = of_get_property(np, "AAPL,connector", &len); if (conn && (strcmp(conn, "infrared") == 0)) port_type = PMAC_SCC_IRDA; else if (of_device_is_compatible(np, "cobalt")) modem = 1; else if (slots && slots->count > 0) { if (strcmp(slots->name, "IrDA") == 0) port_type = PMAC_SCC_IRDA; else if (strcmp(slots->name, "Modem") == 0) modem = 1; } if (modem) pmac_call_feature(PMAC_FTR_MODEM_ENABLE, np, 0, 0); pmac_call_feature(PMAC_FTR_SCC_ENABLE, np, port_type, 0); } static void __init set_initial_features(void) { struct device_node *np; /* That hack appears to be necessary for some StarMax motherboards * but I'm not too sure it was audited for side-effects on other * ohare based machines... * Since I still have difficulties figuring the right way to * differentiate them all and since that hack was there for a long * time, I'll keep it around */ if (macio_chips[0].type == macio_ohare) { struct macio_chip *macio = &macio_chips[0]; np = of_find_node_by_name(NULL, "via-pmu"); if (np) MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE); else MACIO_OUT32(OHARE_FCR, STARMAX_FEATURES); of_node_put(np); } else if (macio_chips[1].type == macio_ohare) { struct macio_chip *macio = &macio_chips[1]; MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE); } #ifdef CONFIG_PPC64 if (macio_chips[0].type == macio_keylargo2 || macio_chips[0].type == macio_shasta) { #ifndef CONFIG_SMP /* On SMP machines running UP, we have the second CPU eating * bus cycles. We need to take it off the bus. This is done * from pmac_smp for SMP kernels running on one CPU */ np = of_find_node_by_type(NULL, "cpu"); if (np != NULL) np = of_find_node_by_type(np, "cpu"); if (np != NULL) { g5_phy_disable_cpu1(); of_node_put(np); } #endif /* CONFIG_SMP */ /* Enable GMAC for now for PCI probing. It will be disabled * later on after PCI probe */ for_each_node_by_name(np, "ethernet") if (of_device_is_compatible(np, "K2-GMAC")) g5_gmac_enable(np, 0, 1); /* Enable FW before PCI probe. Will be disabled later on * Note: We should have a batter way to check that we are * dealing with uninorth internal cell and not a PCI cell * on the external PCI. The code below works though. */ for_each_node_by_name(np, "firewire") { if (of_device_is_compatible(np, "pci106b,5811")) { macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED; g5_fw_enable(np, 0, 1); } } } #else /* CONFIG_PPC64 */ if (macio_chips[0].type == macio_keylargo || macio_chips[0].type == macio_pangea || macio_chips[0].type == macio_intrepid) { /* Enable GMAC for now for PCI probing. It will be disabled * later on after PCI probe */ for_each_node_by_name(np, "ethernet") { if (np->parent && of_device_is_compatible(np->parent, "uni-north") && of_device_is_compatible(np, "gmac")) core99_gmac_enable(np, 0, 1); } /* Enable FW before PCI probe. Will be disabled later on * Note: We should have a batter way to check that we are * dealing with uninorth internal cell and not a PCI cell * on the external PCI. The code below works though. */ for_each_node_by_name(np, "firewire") { if (np->parent && of_device_is_compatible(np->parent, "uni-north") && (of_device_is_compatible(np, "pci106b,18") || of_device_is_compatible(np, "pci106b,30") || of_device_is_compatible(np, "pci11c1,5811"))) { macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED; core99_firewire_enable(np, 0, 1); } } /* Enable ATA-100 before PCI probe. */ for_each_node_by_name(np, "ata-6") { if (np->parent && of_device_is_compatible(np->parent, "uni-north") && of_device_is_compatible(np, "kauai-ata")) { core99_ata100_enable(np, 1); } } /* Switch airport off */ for_each_node_by_name(np, "radio") { if (np->parent == macio_chips[0].of_node) { macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON; core99_airport_enable(np, 0, 0); } } } /* On all machines that support sound PM, switch sound off */ if (macio_chips[0].of_node) pmac_do_feature_call(PMAC_FTR_SOUND_CHIP_ENABLE, macio_chips[0].of_node, 0, 0); /* While on some desktop G3s, we turn it back on */ if (macio_chips[0].of_node && macio_chips[0].type == macio_heathrow && (pmac_mb.model_id == PMAC_TYPE_GOSSAMER || pmac_mb.model_id == PMAC_TYPE_SILK)) { struct macio_chip *macio = &macio_chips[0]; MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE); MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N); } #endif /* CONFIG_PPC64 */ /* On all machines, switch modem & serial ports off */ for_each_node_by_name(np, "ch-a") initial_serial_shutdown(np); for_each_node_by_name(np, "ch-b") initial_serial_shutdown(np); } void __init pmac_feature_init(void) { /* Detect the UniNorth memory controller */ probe_uninorth(); /* Probe mac-io controllers */ if (probe_macios()) { printk(KERN_WARNING "No mac-io chip found\n"); return; } /* Probe machine type */ if (probe_motherboard()) printk(KERN_WARNING "Unknown PowerMac !\n"); /* Set some initial features (turn off some chips that will * be later turned on) */ set_initial_features(); } #if 0 static void dump_HT_speeds(char *name, u32 cfg, u32 frq) { int freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 }; int bits[8] = { 8,16,0,32,2,4,0,0 }; int freq = (frq >> 8) & 0xf; if (freqs[freq] == 0) printk("%s: Unknown HT link frequency %x\n", name, freq); else printk("%s: %d MHz on main link, (%d in / %d out) bits width\n", name, freqs[freq], bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]); } void __init pmac_check_ht_link(void) { u32 ufreq, freq, ucfg, cfg; struct device_node *pcix_node; u8 px_bus, px_devfn; struct pci_controller *px_hose; (void)in_be32(u3_ht_base + U3_HT_LINK_COMMAND); ucfg = cfg = in_be32(u3_ht_base + U3_HT_LINK_CONFIG); ufreq = freq = in_be32(u3_ht_base + U3_HT_LINK_FREQ); dump_HT_speeds("U3 HyperTransport", cfg, freq); pcix_node = of_find_compatible_node(NULL, "pci", "pci-x"); if (pcix_node == NULL) { printk("No PCI-X bridge found\n"); return; } if (pci_device_from_OF_node(pcix_node, &px_bus, &px_devfn) != 0) { printk("PCI-X bridge found but not matched to pci\n"); return; } px_hose = pci_find_hose_for_OF_device(pcix_node); if (px_hose == NULL) { printk("PCI-X bridge found but not matched to host\n"); return; } early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg); early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq); dump_HT_speeds("PCI-X HT Uplink", cfg, freq); early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg); early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq); dump_HT_speeds("PCI-X HT Downlink", cfg, freq); } #endif /* 0 */ /* * Early video resume hook */ static void (*pmac_early_vresume_proc)(void *data); static void *pmac_early_vresume_data; void pmac_set_early_video_resume(void (*proc)(void *data), void *data) { if (!machine_is(powermac)) return; preempt_disable(); pmac_early_vresume_proc = proc; pmac_early_vresume_data = data; preempt_enable(); } EXPORT_SYMBOL(pmac_set_early_video_resume); void pmac_call_early_video_resume(void) { if (pmac_early_vresume_proc) pmac_early_vresume_proc(pmac_early_vresume_data); } /* * AGP related suspend/resume code */ static struct pci_dev *pmac_agp_bridge; static int (*pmac_agp_suspend)(struct pci_dev *bridge); static int (*pmac_agp_resume)(struct pci_dev *bridge); void pmac_register_agp_pm(struct pci_dev *bridge, int (*suspend)(struct pci_dev *bridge), int (*resume)(struct pci_dev *bridge)) { if (suspend || resume) { pmac_agp_bridge = bridge; pmac_agp_suspend = suspend; pmac_agp_resume = resume; return; } if (bridge != pmac_agp_bridge) return; pmac_agp_suspend = pmac_agp_resume = NULL; return; } EXPORT_SYMBOL(pmac_register_agp_pm); void pmac_suspend_agp_for_card(struct pci_dev *dev) { if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL) return; if (pmac_agp_bridge->bus != dev->bus) return; pmac_agp_suspend(pmac_agp_bridge); } EXPORT_SYMBOL(pmac_suspend_agp_for_card); void pmac_resume_agp_for_card(struct pci_dev *dev) { if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL) return; if (pmac_agp_bridge->bus != dev->bus) return; pmac_agp_resume(pmac_agp_bridge); } EXPORT_SYMBOL(pmac_resume_agp_for_card); int pmac_get_uninorth_variant(void) { return uninorth_maj; }
linux-master
arch/powerpc/platforms/powermac/feature.c
// SPDX-License-Identifier: GPL-2.0-only /* * * FIXME: Properly make this race free with refcounting etc... * * FIXME: LOCKING !!! */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <asm/pmac_pfunc.h> /* Debug */ #define LOG_PARSE(fmt...) #define LOG_ERROR(fmt...) printk(fmt) #define LOG_BLOB(t,b,c) #undef DEBUG #ifdef DEBUG #define DBG(fmt...) printk(fmt) #else #define DBG(fmt...) #endif /* Command numbers */ #define PMF_CMD_LIST 0 #define PMF_CMD_WRITE_GPIO 1 #define PMF_CMD_READ_GPIO 2 #define PMF_CMD_WRITE_REG32 3 #define PMF_CMD_READ_REG32 4 #define PMF_CMD_WRITE_REG16 5 #define PMF_CMD_READ_REG16 6 #define PMF_CMD_WRITE_REG8 7 #define PMF_CMD_READ_REG8 8 #define PMF_CMD_DELAY 9 #define PMF_CMD_WAIT_REG32 10 #define PMF_CMD_WAIT_REG16 11 #define PMF_CMD_WAIT_REG8 12 #define PMF_CMD_READ_I2C 13 #define PMF_CMD_WRITE_I2C 14 #define PMF_CMD_RMW_I2C 15 #define PMF_CMD_GEN_I2C 16 #define PMF_CMD_SHIFT_BYTES_RIGHT 17 #define PMF_CMD_SHIFT_BYTES_LEFT 18 #define PMF_CMD_READ_CFG 19 #define PMF_CMD_WRITE_CFG 20 #define PMF_CMD_RMW_CFG 21 #define PMF_CMD_READ_I2C_SUBADDR 22 #define PMF_CMD_WRITE_I2C_SUBADDR 23 #define PMF_CMD_SET_I2C_MODE 24 #define PMF_CMD_RMW_I2C_SUBADDR 25 #define PMF_CMD_READ_REG32_MASK_SHR_XOR 26 #define PMF_CMD_READ_REG16_MASK_SHR_XOR 27 #define PMF_CMD_READ_REG8_MASK_SHR_XOR 28 #define PMF_CMD_WRITE_REG32_SHL_MASK 29 #define PMF_CMD_WRITE_REG16_SHL_MASK 30 #define PMF_CMD_WRITE_REG8_SHL_MASK 31 #define PMF_CMD_MASK_AND_COMPARE 32 #define PMF_CMD_COUNT 33 /* This structure holds the state of the parser while walking through * a function definition */ struct pmf_cmd { const void *cmdptr; const void *cmdend; struct pmf_function *func; void *instdata; struct pmf_args *args; int error; }; #if 0 /* Debug output */ static void print_blob(const char *title, const void *blob, int bytes) { printk("%s", title); while(bytes--) { printk("%02x ", *((u8 *)blob)); blob += 1; } printk("\n"); } #endif /* * Parser helpers */ static u32 pmf_next32(struct pmf_cmd *cmd) { u32 value; if ((cmd->cmdend - cmd->cmdptr) < 4) { cmd->error = 1; return 0; } value = *((u32 *)cmd->cmdptr); cmd->cmdptr += 4; return value; } static const void* pmf_next_blob(struct pmf_cmd *cmd, int count) { const void *value; if ((cmd->cmdend - cmd->cmdptr) < count) { cmd->error = 1; return NULL; } value = cmd->cmdptr; cmd->cmdptr += count; return value; } /* * Individual command parsers */ #define PMF_PARSE_CALL(name, cmd, handlers, p...) \ do { \ if (cmd->error) \ return -ENXIO; \ if (handlers == NULL) \ return 0; \ if (handlers->name) \ return handlers->name(cmd->func, cmd->instdata, \ cmd->args, p); \ return -1; \ } while(0) \ static int pmf_parser_write_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h) { u8 value = (u8)pmf_next32(cmd); u8 mask = (u8)pmf_next32(cmd); LOG_PARSE("pmf: write_gpio(value: %02x, mask: %02x)\n", value, mask); PMF_PARSE_CALL(write_gpio, cmd, h, value, mask); } static int pmf_parser_read_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h) { u8 mask = (u8)pmf_next32(cmd); int rshift = (int)pmf_next32(cmd); u8 xor = (u8)pmf_next32(cmd); LOG_PARSE("pmf: read_gpio(mask: %02x, rshift: %d, xor: %02x)\n", mask, rshift, xor); PMF_PARSE_CALL(read_gpio, cmd, h, mask, rshift, xor); } static int pmf_parser_write_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u32 value = pmf_next32(cmd); u32 mask = pmf_next32(cmd); LOG_PARSE("pmf: write_reg32(offset: %08x, value: %08x, mask: %08x)\n", offset, value, mask); PMF_PARSE_CALL(write_reg32, cmd, h, offset, value, mask); } static int pmf_parser_read_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); LOG_PARSE("pmf: read_reg32(offset: %08x)\n", offset); PMF_PARSE_CALL(read_reg32, cmd, h, offset); } static int pmf_parser_write_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u16 value = (u16)pmf_next32(cmd); u16 mask = (u16)pmf_next32(cmd); LOG_PARSE("pmf: write_reg16(offset: %08x, value: %04x, mask: %04x)\n", offset, value, mask); PMF_PARSE_CALL(write_reg16, cmd, h, offset, value, mask); } static int pmf_parser_read_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); LOG_PARSE("pmf: read_reg16(offset: %08x)\n", offset); PMF_PARSE_CALL(read_reg16, cmd, h, offset); } static int pmf_parser_write_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u8 value = (u16)pmf_next32(cmd); u8 mask = (u16)pmf_next32(cmd); LOG_PARSE("pmf: write_reg8(offset: %08x, value: %02x, mask: %02x)\n", offset, value, mask); PMF_PARSE_CALL(write_reg8, cmd, h, offset, value, mask); } static int pmf_parser_read_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); LOG_PARSE("pmf: read_reg8(offset: %08x)\n", offset); PMF_PARSE_CALL(read_reg8, cmd, h, offset); } static int pmf_parser_delay(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 duration = pmf_next32(cmd); LOG_PARSE("pmf: delay(duration: %d us)\n", duration); PMF_PARSE_CALL(delay, cmd, h, duration); } static int pmf_parser_wait_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u32 value = pmf_next32(cmd); u32 mask = pmf_next32(cmd); LOG_PARSE("pmf: wait_reg32(offset: %08x, comp_value: %08x,mask: %08x)\n", offset, value, mask); PMF_PARSE_CALL(wait_reg32, cmd, h, offset, value, mask); } static int pmf_parser_wait_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u16 value = (u16)pmf_next32(cmd); u16 mask = (u16)pmf_next32(cmd); LOG_PARSE("pmf: wait_reg16(offset: %08x, comp_value: %04x,mask: %04x)\n", offset, value, mask); PMF_PARSE_CALL(wait_reg16, cmd, h, offset, value, mask); } static int pmf_parser_wait_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u8 value = (u8)pmf_next32(cmd); u8 mask = (u8)pmf_next32(cmd); LOG_PARSE("pmf: wait_reg8(offset: %08x, comp_value: %02x,mask: %02x)\n", offset, value, mask); PMF_PARSE_CALL(wait_reg8, cmd, h, offset, value, mask); } static int pmf_parser_read_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 bytes = pmf_next32(cmd); LOG_PARSE("pmf: read_i2c(bytes: %ud)\n", bytes); PMF_PARSE_CALL(read_i2c, cmd, h, bytes); } static int pmf_parser_write_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 bytes = pmf_next32(cmd); const void *blob = pmf_next_blob(cmd, bytes); LOG_PARSE("pmf: write_i2c(bytes: %ud) ...\n", bytes); LOG_BLOB("pmf: data: \n", blob, bytes); PMF_PARSE_CALL(write_i2c, cmd, h, bytes, blob); } static int pmf_parser_rmw_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 maskbytes = pmf_next32(cmd); u32 valuesbytes = pmf_next32(cmd); u32 totalbytes = pmf_next32(cmd); const void *maskblob = pmf_next_blob(cmd, maskbytes); const void *valuesblob = pmf_next_blob(cmd, valuesbytes); LOG_PARSE("pmf: rmw_i2c(maskbytes: %ud, valuebytes: %ud, " "totalbytes: %d) ...\n", maskbytes, valuesbytes, totalbytes); LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes); LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes); PMF_PARSE_CALL(rmw_i2c, cmd, h, maskbytes, valuesbytes, totalbytes, maskblob, valuesblob); } static int pmf_parser_read_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u32 bytes = pmf_next32(cmd); LOG_PARSE("pmf: read_cfg(offset: %x, bytes: %ud)\n", offset, bytes); PMF_PARSE_CALL(read_cfg, cmd, h, offset, bytes); } static int pmf_parser_write_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u32 bytes = pmf_next32(cmd); const void *blob = pmf_next_blob(cmd, bytes); LOG_PARSE("pmf: write_cfg(offset: %x, bytes: %ud)\n", offset, bytes); LOG_BLOB("pmf: data: \n", blob, bytes); PMF_PARSE_CALL(write_cfg, cmd, h, offset, bytes, blob); } static int pmf_parser_rmw_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u32 maskbytes = pmf_next32(cmd); u32 valuesbytes = pmf_next32(cmd); u32 totalbytes = pmf_next32(cmd); const void *maskblob = pmf_next_blob(cmd, maskbytes); const void *valuesblob = pmf_next_blob(cmd, valuesbytes); LOG_PARSE("pmf: rmw_cfg(maskbytes: %ud, valuebytes: %ud," " totalbytes: %d) ...\n", maskbytes, valuesbytes, totalbytes); LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes); LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes); PMF_PARSE_CALL(rmw_cfg, cmd, h, offset, maskbytes, valuesbytes, totalbytes, maskblob, valuesblob); } static int pmf_parser_read_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h) { u8 subaddr = (u8)pmf_next32(cmd); u32 bytes = pmf_next32(cmd); LOG_PARSE("pmf: read_i2c_sub(subaddr: %x, bytes: %ud)\n", subaddr, bytes); PMF_PARSE_CALL(read_i2c_sub, cmd, h, subaddr, bytes); } static int pmf_parser_write_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h) { u8 subaddr = (u8)pmf_next32(cmd); u32 bytes = pmf_next32(cmd); const void *blob = pmf_next_blob(cmd, bytes); LOG_PARSE("pmf: write_i2c_sub(subaddr: %x, bytes: %ud) ...\n", subaddr, bytes); LOG_BLOB("pmf: data: \n", blob, bytes); PMF_PARSE_CALL(write_i2c_sub, cmd, h, subaddr, bytes, blob); } static int pmf_parser_set_i2c_mode(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 mode = pmf_next32(cmd); LOG_PARSE("pmf: set_i2c_mode(mode: %d)\n", mode); PMF_PARSE_CALL(set_i2c_mode, cmd, h, mode); } static int pmf_parser_rmw_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h) { u8 subaddr = (u8)pmf_next32(cmd); u32 maskbytes = pmf_next32(cmd); u32 valuesbytes = pmf_next32(cmd); u32 totalbytes = pmf_next32(cmd); const void *maskblob = pmf_next_blob(cmd, maskbytes); const void *valuesblob = pmf_next_blob(cmd, valuesbytes); LOG_PARSE("pmf: rmw_i2c_sub(subaddr: %x, maskbytes: %ud, valuebytes: %ud" ", totalbytes: %d) ...\n", subaddr, maskbytes, valuesbytes, totalbytes); LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes); LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes); PMF_PARSE_CALL(rmw_i2c_sub, cmd, h, subaddr, maskbytes, valuesbytes, totalbytes, maskblob, valuesblob); } static int pmf_parser_read_reg32_msrx(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u32 mask = pmf_next32(cmd); u32 shift = pmf_next32(cmd); u32 xor = pmf_next32(cmd); LOG_PARSE("pmf: read_reg32_msrx(offset: %x, mask: %x, shift: %x," " xor: %x\n", offset, mask, shift, xor); PMF_PARSE_CALL(read_reg32_msrx, cmd, h, offset, mask, shift, xor); } static int pmf_parser_read_reg16_msrx(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u32 mask = pmf_next32(cmd); u32 shift = pmf_next32(cmd); u32 xor = pmf_next32(cmd); LOG_PARSE("pmf: read_reg16_msrx(offset: %x, mask: %x, shift: %x," " xor: %x\n", offset, mask, shift, xor); PMF_PARSE_CALL(read_reg16_msrx, cmd, h, offset, mask, shift, xor); } static int pmf_parser_read_reg8_msrx(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u32 mask = pmf_next32(cmd); u32 shift = pmf_next32(cmd); u32 xor = pmf_next32(cmd); LOG_PARSE("pmf: read_reg8_msrx(offset: %x, mask: %x, shift: %x," " xor: %x\n", offset, mask, shift, xor); PMF_PARSE_CALL(read_reg8_msrx, cmd, h, offset, mask, shift, xor); } static int pmf_parser_write_reg32_slm(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u32 shift = pmf_next32(cmd); u32 mask = pmf_next32(cmd); LOG_PARSE("pmf: write_reg32_slm(offset: %x, shift: %x, mask: %x\n", offset, shift, mask); PMF_PARSE_CALL(write_reg32_slm, cmd, h, offset, shift, mask); } static int pmf_parser_write_reg16_slm(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u32 shift = pmf_next32(cmd); u32 mask = pmf_next32(cmd); LOG_PARSE("pmf: write_reg16_slm(offset: %x, shift: %x, mask: %x\n", offset, shift, mask); PMF_PARSE_CALL(write_reg16_slm, cmd, h, offset, shift, mask); } static int pmf_parser_write_reg8_slm(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 offset = pmf_next32(cmd); u32 shift = pmf_next32(cmd); u32 mask = pmf_next32(cmd); LOG_PARSE("pmf: write_reg8_slm(offset: %x, shift: %x, mask: %x\n", offset, shift, mask); PMF_PARSE_CALL(write_reg8_slm, cmd, h, offset, shift, mask); } static int pmf_parser_mask_and_compare(struct pmf_cmd *cmd, struct pmf_handlers *h) { u32 bytes = pmf_next32(cmd); const void *maskblob = pmf_next_blob(cmd, bytes); const void *valuesblob = pmf_next_blob(cmd, bytes); LOG_PARSE("pmf: mask_and_compare(length: %ud ...\n", bytes); LOG_BLOB("pmf: mask data: \n", maskblob, bytes); LOG_BLOB("pmf: values data: \n", valuesblob, bytes); PMF_PARSE_CALL(mask_and_compare, cmd, h, bytes, maskblob, valuesblob); } typedef int (*pmf_cmd_parser_t)(struct pmf_cmd *cmd, struct pmf_handlers *h); static pmf_cmd_parser_t pmf_parsers[PMF_CMD_COUNT] = { NULL, pmf_parser_write_gpio, pmf_parser_read_gpio, pmf_parser_write_reg32, pmf_parser_read_reg32, pmf_parser_write_reg16, pmf_parser_read_reg16, pmf_parser_write_reg8, pmf_parser_read_reg8, pmf_parser_delay, pmf_parser_wait_reg32, pmf_parser_wait_reg16, pmf_parser_wait_reg8, pmf_parser_read_i2c, pmf_parser_write_i2c, pmf_parser_rmw_i2c, NULL, /* Bogus command */ NULL, /* Shift bytes right: NYI */ NULL, /* Shift bytes left: NYI */ pmf_parser_read_cfg, pmf_parser_write_cfg, pmf_parser_rmw_cfg, pmf_parser_read_i2c_sub, pmf_parser_write_i2c_sub, pmf_parser_set_i2c_mode, pmf_parser_rmw_i2c_sub, pmf_parser_read_reg32_msrx, pmf_parser_read_reg16_msrx, pmf_parser_read_reg8_msrx, pmf_parser_write_reg32_slm, pmf_parser_write_reg16_slm, pmf_parser_write_reg8_slm, pmf_parser_mask_and_compare, }; struct pmf_device { struct list_head link; struct device_node *node; struct pmf_handlers *handlers; struct list_head functions; struct kref ref; }; static LIST_HEAD(pmf_devices); static DEFINE_SPINLOCK(pmf_lock); static DEFINE_MUTEX(pmf_irq_mutex); static void pmf_release_device(struct kref *kref) { struct pmf_device *dev = container_of(kref, struct pmf_device, ref); kfree(dev); } static inline void pmf_put_device(struct pmf_device *dev) { kref_put(&dev->ref, pmf_release_device); } static inline struct pmf_device *pmf_get_device(struct pmf_device *dev) { kref_get(&dev->ref); return dev; } static inline struct pmf_device *pmf_find_device(struct device_node *np) { struct pmf_device *dev; list_for_each_entry(dev, &pmf_devices, link) { if (dev->node == np) return pmf_get_device(dev); } return NULL; } static int pmf_parse_one(struct pmf_function *func, struct pmf_handlers *handlers, void *instdata, struct pmf_args *args) { struct pmf_cmd cmd; u32 ccode; int count, rc; cmd.cmdptr = func->data; cmd.cmdend = func->data + func->length; cmd.func = func; cmd.instdata = instdata; cmd.args = args; cmd.error = 0; LOG_PARSE("pmf: func %s, %d bytes, %s...\n", func->name, func->length, handlers ? "executing" : "parsing"); /* One subcommand to parse for now */ count = 1; while(count-- && cmd.cmdptr < cmd.cmdend) { /* Get opcode */ ccode = pmf_next32(&cmd); /* Check if we are hitting a command list, fetch new count */ if (ccode == 0) { count = pmf_next32(&cmd) - 1; ccode = pmf_next32(&cmd); } if (cmd.error) { LOG_ERROR("pmf: parse error, not enough data\n"); return -ENXIO; } if (ccode >= PMF_CMD_COUNT) { LOG_ERROR("pmf: command code %d unknown !\n", ccode); return -ENXIO; } if (pmf_parsers[ccode] == NULL) { LOG_ERROR("pmf: no parser for command %d !\n", ccode); return -ENXIO; } rc = pmf_parsers[ccode](&cmd, handlers); if (rc != 0) { LOG_ERROR("pmf: parser for command %d returned" " error %d\n", ccode, rc); return rc; } } /* We are doing an initial parse pass, we need to adjust the size */ if (handlers == NULL) func->length = cmd.cmdptr - func->data; return 0; } static int pmf_add_function_prop(struct pmf_device *dev, void *driverdata, const char *name, u32 *data, unsigned int length) { int count = 0; struct pmf_function *func = NULL; DBG("pmf: Adding functions for platform-do-%s\n", name); while (length >= 12) { /* Allocate a structure */ func = kzalloc(sizeof(*func), GFP_KERNEL); if (func == NULL) goto bail; kref_init(&func->ref); INIT_LIST_HEAD(&func->irq_clients); func->node = dev->node; func->driver_data = driverdata; func->name = name; func->phandle = data[0]; func->flags = data[1]; data += 2; length -= 8; func->data = data; func->length = length; func->dev = dev; DBG("pmf: idx %d: flags=%08x, phandle=%08x " " %d bytes remaining, parsing...\n", count+1, func->flags, func->phandle, length); if (pmf_parse_one(func, NULL, NULL, NULL)) { kfree(func); goto bail; } length -= func->length; data = (u32 *)(((u8 *)data) + func->length); list_add(&func->link, &dev->functions); pmf_get_device(dev); count++; } bail: DBG("pmf: Added %d functions\n", count); return count; } static int pmf_add_functions(struct pmf_device *dev, void *driverdata) { struct property *pp; #define PP_PREFIX "platform-do-" const int plen = strlen(PP_PREFIX); int count = 0; for_each_property_of_node(dev->node, pp) { const char *name; if (strncmp(pp->name, PP_PREFIX, plen) != 0) continue; name = pp->name + plen; if (strlen(name) && pp->length >= 12) count += pmf_add_function_prop(dev, driverdata, name, pp->value, pp->length); } return count; } int pmf_register_driver(struct device_node *np, struct pmf_handlers *handlers, void *driverdata) { struct pmf_device *dev; unsigned long flags; int rc = 0; if (handlers == NULL) return -EINVAL; DBG("pmf: registering driver for node %pOF\n", np); spin_lock_irqsave(&pmf_lock, flags); dev = pmf_find_device(np); spin_unlock_irqrestore(&pmf_lock, flags); if (dev != NULL) { DBG("pmf: already there !\n"); pmf_put_device(dev); return -EBUSY; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { DBG("pmf: no memory !\n"); return -ENOMEM; } kref_init(&dev->ref); dev->node = of_node_get(np); dev->handlers = handlers; INIT_LIST_HEAD(&dev->functions); rc = pmf_add_functions(dev, driverdata); if (rc == 0) { DBG("pmf: no functions, disposing.. \n"); of_node_put(np); kfree(dev); return -ENODEV; } spin_lock_irqsave(&pmf_lock, flags); list_add(&dev->link, &pmf_devices); spin_unlock_irqrestore(&pmf_lock, flags); return 0; } EXPORT_SYMBOL_GPL(pmf_register_driver); struct pmf_function *pmf_get_function(struct pmf_function *func) { if (!try_module_get(func->dev->handlers->owner)) return NULL; kref_get(&func->ref); return func; } EXPORT_SYMBOL_GPL(pmf_get_function); static void pmf_release_function(struct kref *kref) { struct pmf_function *func = container_of(kref, struct pmf_function, ref); pmf_put_device(func->dev); kfree(func); } static inline void __pmf_put_function(struct pmf_function *func) { kref_put(&func->ref, pmf_release_function); } void pmf_put_function(struct pmf_function *func) { if (func == NULL) return; module_put(func->dev->handlers->owner); __pmf_put_function(func); } EXPORT_SYMBOL_GPL(pmf_put_function); void pmf_unregister_driver(struct device_node *np) { struct pmf_device *dev; unsigned long flags; DBG("pmf: unregistering driver for node %pOF\n", np); spin_lock_irqsave(&pmf_lock, flags); dev = pmf_find_device(np); if (dev == NULL) { DBG("pmf: not such driver !\n"); spin_unlock_irqrestore(&pmf_lock, flags); return; } list_del(&dev->link); while(!list_empty(&dev->functions)) { struct pmf_function *func = list_entry(dev->functions.next, typeof(*func), link); list_del(&func->link); __pmf_put_function(func); } pmf_put_device(dev); spin_unlock_irqrestore(&pmf_lock, flags); } EXPORT_SYMBOL_GPL(pmf_unregister_driver); static struct pmf_function *__pmf_find_function(struct device_node *target, const char *name, u32 flags) { struct device_node *actor = of_node_get(target); struct pmf_device *dev; struct pmf_function *func, *result = NULL; char fname[64]; const u32 *prop; u32 ph; /* * Look for a "platform-*" function reference. If we can't find * one, then we fallback to a direct call attempt */ snprintf(fname, 63, "platform-%s", name); prop = of_get_property(target, fname, NULL); if (prop == NULL) goto find_it; ph = *prop; if (ph == 0) goto find_it; /* * Ok, now try to find the actor. If we can't find it, we fail, * there is no point in falling back there */ of_node_put(actor); actor = of_find_node_by_phandle(ph); if (actor == NULL) return NULL; find_it: dev = pmf_find_device(actor); if (dev == NULL) { result = NULL; goto out; } list_for_each_entry(func, &dev->functions, link) { if (name && strcmp(name, func->name)) continue; if (func->phandle && target->phandle != func->phandle) continue; if ((func->flags & flags) == 0) continue; result = func; break; } pmf_put_device(dev); out: of_node_put(actor); return result; } int pmf_register_irq_client(struct device_node *target, const char *name, struct pmf_irq_client *client) { struct pmf_function *func; unsigned long flags; spin_lock_irqsave(&pmf_lock, flags); func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN); if (func) func = pmf_get_function(func); spin_unlock_irqrestore(&pmf_lock, flags); if (func == NULL) return -ENODEV; /* guard against manipulations of list */ mutex_lock(&pmf_irq_mutex); if (list_empty(&func->irq_clients)) func->dev->handlers->irq_enable(func); /* guard against pmf_do_irq while changing list */ spin_lock_irqsave(&pmf_lock, flags); list_add(&client->link, &func->irq_clients); spin_unlock_irqrestore(&pmf_lock, flags); client->func = func; mutex_unlock(&pmf_irq_mutex); return 0; } EXPORT_SYMBOL_GPL(pmf_register_irq_client); void pmf_unregister_irq_client(struct pmf_irq_client *client) { struct pmf_function *func = client->func; unsigned long flags; BUG_ON(func == NULL); /* guard against manipulations of list */ mutex_lock(&pmf_irq_mutex); client->func = NULL; /* guard against pmf_do_irq while changing list */ spin_lock_irqsave(&pmf_lock, flags); list_del(&client->link); spin_unlock_irqrestore(&pmf_lock, flags); if (list_empty(&func->irq_clients)) func->dev->handlers->irq_disable(func); mutex_unlock(&pmf_irq_mutex); pmf_put_function(func); } EXPORT_SYMBOL_GPL(pmf_unregister_irq_client); void pmf_do_irq(struct pmf_function *func) { unsigned long flags; struct pmf_irq_client *client; /* For now, using a spinlock over the whole function. Can be made * to drop the lock using 2 lists if necessary */ spin_lock_irqsave(&pmf_lock, flags); list_for_each_entry(client, &func->irq_clients, link) { if (!try_module_get(client->owner)) continue; client->handler(client->data); module_put(client->owner); } spin_unlock_irqrestore(&pmf_lock, flags); } EXPORT_SYMBOL_GPL(pmf_do_irq); int pmf_call_one(struct pmf_function *func, struct pmf_args *args) { struct pmf_device *dev = func->dev; void *instdata = NULL; int rc = 0; DBG(" ** pmf_call_one(%pOF/%s) **\n", dev->node, func->name); if (dev->handlers->begin) instdata = dev->handlers->begin(func, args); rc = pmf_parse_one(func, dev->handlers, instdata, args); if (dev->handlers->end) dev->handlers->end(func, instdata); return rc; } EXPORT_SYMBOL_GPL(pmf_call_one); int pmf_do_functions(struct device_node *np, const char *name, u32 phandle, u32 fflags, struct pmf_args *args) { struct pmf_device *dev; struct pmf_function *func, *tmp; unsigned long flags; int rc = -ENODEV; spin_lock_irqsave(&pmf_lock, flags); dev = pmf_find_device(np); if (dev == NULL) { spin_unlock_irqrestore(&pmf_lock, flags); return -ENODEV; } list_for_each_entry_safe(func, tmp, &dev->functions, link) { if (name && strcmp(name, func->name)) continue; if (phandle && func->phandle && phandle != func->phandle) continue; if ((func->flags & fflags) == 0) continue; if (pmf_get_function(func) == NULL) continue; spin_unlock_irqrestore(&pmf_lock, flags); rc = pmf_call_one(func, args); pmf_put_function(func); spin_lock_irqsave(&pmf_lock, flags); } pmf_put_device(dev); spin_unlock_irqrestore(&pmf_lock, flags); return rc; } EXPORT_SYMBOL_GPL(pmf_do_functions); struct pmf_function *pmf_find_function(struct device_node *target, const char *name) { struct pmf_function *func; unsigned long flags; spin_lock_irqsave(&pmf_lock, flags); func = __pmf_find_function(target, name, PMF_FLAGS_ON_DEMAND); if (func) func = pmf_get_function(func); spin_unlock_irqrestore(&pmf_lock, flags); return func; } EXPORT_SYMBOL_GPL(pmf_find_function); int pmf_call_function(struct device_node *target, const char *name, struct pmf_args *args) { struct pmf_function *func = pmf_find_function(target, name); int rc; if (func == NULL) return -ENODEV; rc = pmf_call_one(func, args); pmf_put_function(func); return rc; } EXPORT_SYMBOL_GPL(pmf_call_function);
linux-master
arch/powerpc/platforms/powermac/pfunc_core.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SMP support for power macintosh. * * We support both the old "powersurge" SMP architecture * and the current Core99 (G4 PowerMac) machines. * * Note that we don't support the very first rev. of * Apple/DayStar 2 CPUs board, the one with the funky * watchdog. Hopefully, none of these should be there except * maybe internally to Apple. I should probably still add some * code to detect this card though and disable SMP. --BenH. * * Support Macintosh G4 SMP by Troy Benjegerdes ([email protected]) * and Ben Herrenschmidt <[email protected]>. * * Support for DayStar quad CPU cards * Copyright (C) XLR8, Inc. 1994-2000 */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/hotplug.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/kernel_stat.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/hardirq.h> #include <linux/cpu.h> #include <linux/compiler.h> #include <linux/pgtable.h> #include <asm/ptrace.h> #include <linux/atomic.h> #include <asm/code-patching.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/time.h> #include <asm/mpic.h> #include <asm/cacheflush.h> #include <asm/keylargo.h> #include <asm/pmac_low_i2c.h> #include <asm/pmac_pfunc.h> #include <asm/inst.h> #include "pmac.h" #undef DEBUG #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif extern void __secondary_start_pmac_0(void); static void (*pmac_tb_freeze)(int freeze); static u64 timebase; static int tb_req; #ifdef CONFIG_PPC_PMAC32_PSURGE /* * Powersurge (old powermac SMP) support. */ /* Addresses for powersurge registers */ #define HAMMERHEAD_BASE 0xf8000000 #define HHEAD_CONFIG 0x90 #define HHEAD_SEC_INTR 0xc0 /* register for interrupting the primary processor on the powersurge */ /* N.B. this is actually the ethernet ROM! */ #define PSURGE_PRI_INTR 0xf3019000 /* register for storing the start address for the secondary processor */ /* N.B. this is the PCI config space address register for the 1st bridge */ #define PSURGE_START 0xf2800000 /* Daystar/XLR8 4-CPU card */ #define PSURGE_QUAD_REG_ADDR 0xf8800000 #define PSURGE_QUAD_IRQ_SET 0 #define PSURGE_QUAD_IRQ_CLR 1 #define PSURGE_QUAD_IRQ_PRIMARY 2 #define PSURGE_QUAD_CKSTOP_CTL 3 #define PSURGE_QUAD_PRIMARY_ARB 4 #define PSURGE_QUAD_BOARD_ID 6 #define PSURGE_QUAD_WHICH_CPU 7 #define PSURGE_QUAD_CKSTOP_RDBK 8 #define PSURGE_QUAD_RESET_CTL 11 #define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v))) #define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f) #define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v))) #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v))) /* virtual addresses for the above */ static volatile u8 __iomem *hhead_base; static volatile u8 __iomem *quad_base; static volatile u32 __iomem *psurge_pri_intr; static volatile u8 __iomem *psurge_sec_intr; static volatile u32 __iomem *psurge_start; /* values for psurge_type */ #define PSURGE_NONE -1 #define PSURGE_DUAL 0 #define PSURGE_QUAD_OKEE 1 #define PSURGE_QUAD_COTTON 2 #define PSURGE_QUAD_ICEGRASS 3 /* what sort of powersurge board we have */ static int psurge_type = PSURGE_NONE; /* irq for secondary cpus to report */ static struct irq_domain *psurge_host; int psurge_secondary_virq; /* * Set and clear IPIs for powersurge. */ static inline void psurge_set_ipi(int cpu) { if (psurge_type == PSURGE_NONE) return; if (cpu == 0) in_be32(psurge_pri_intr); else if (psurge_type == PSURGE_DUAL) out_8(psurge_sec_intr, 0); else PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu); } static inline void psurge_clr_ipi(int cpu) { if (cpu > 0) { switch(psurge_type) { case PSURGE_DUAL: out_8(psurge_sec_intr, ~0); break; case PSURGE_NONE: break; default: PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu); } } } /* * On powersurge (old SMP powermac architecture) we don't have * separate IPIs for separate messages like openpic does. Instead * use the generic demux helpers * -- paulus. */ static irqreturn_t psurge_ipi_intr(int irq, void *d) { psurge_clr_ipi(smp_processor_id()); smp_ipi_demux(); return IRQ_HANDLED; } static void smp_psurge_cause_ipi(int cpu) { psurge_set_ipi(cpu); } static int psurge_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq); return 0; } static const struct irq_domain_ops psurge_host_ops = { .map = psurge_host_map, }; static int __init psurge_secondary_ipi_init(void) { int rc = -ENOMEM; psurge_host = irq_domain_add_nomap(NULL, ~0, &psurge_host_ops, NULL); if (psurge_host) psurge_secondary_virq = irq_create_direct_mapping(psurge_host); if (psurge_secondary_virq) rc = request_irq(psurge_secondary_virq, psurge_ipi_intr, IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL); if (rc) pr_err("Failed to setup secondary cpu IPI\n"); return rc; } /* * Determine a quad card presence. We read the board ID register, we * force the data bus to change to something else, and we read it again. * It it's stable, then the register probably exist (ugh !) */ static int __init psurge_quad_probe(void) { int type; unsigned int i; type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID); if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID)) return PSURGE_DUAL; /* looks OK, try a slightly more rigorous test */ /* bogus is not necessarily cacheline-aligned, though I don't suppose that really matters. -- paulus */ for (i = 0; i < 100; i++) { volatile u32 bogus[8]; bogus[(0+i)%8] = 0x00000000; bogus[(1+i)%8] = 0x55555555; bogus[(2+i)%8] = 0xFFFFFFFF; bogus[(3+i)%8] = 0xAAAAAAAA; bogus[(4+i)%8] = 0x33333333; bogus[(5+i)%8] = 0xCCCCCCCC; bogus[(6+i)%8] = 0xCCCCCCCC; bogus[(7+i)%8] = 0x33333333; wmb(); asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory"); mb(); if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID)) return PSURGE_DUAL; } return type; } static void __init psurge_quad_init(void) { int procbits; if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351); procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU); if (psurge_type == PSURGE_QUAD_ICEGRASS) PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits); else PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits); mdelay(33); out_8(psurge_sec_intr, ~0); PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits); PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits); if (psurge_type != PSURGE_QUAD_ICEGRASS) PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits); PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits); mdelay(33); PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits); mdelay(33); PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits); mdelay(33); } static void __init smp_psurge_probe(void) { int i, ncpus; struct device_node *dn; /* * The powersurge cpu board can be used in the generation * of powermacs that have a socket for an upgradeable cpu card, * including the 7500, 8500, 9500, 9600. * The device tree doesn't tell you if you have 2 cpus because * OF doesn't know anything about the 2nd processor. * Instead we look for magic bits in magic registers, * in the hammerhead memory controller in the case of the * dual-cpu powersurge board. -- paulus. */ dn = of_find_node_by_name(NULL, "hammerhead"); if (dn == NULL) return; of_node_put(dn); hhead_base = ioremap(HAMMERHEAD_BASE, 0x800); quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024); psurge_sec_intr = hhead_base + HHEAD_SEC_INTR; psurge_type = psurge_quad_probe(); if (psurge_type != PSURGE_DUAL) { psurge_quad_init(); /* All released cards using this HW design have 4 CPUs */ ncpus = 4; /* No sure how timebase sync works on those, let's use SW */ smp_ops->give_timebase = smp_generic_give_timebase; smp_ops->take_timebase = smp_generic_take_timebase; } else { iounmap(quad_base); if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) { /* not a dual-cpu card */ iounmap(hhead_base); psurge_type = PSURGE_NONE; return; } ncpus = 2; } if (psurge_secondary_ipi_init()) return; psurge_start = ioremap(PSURGE_START, 4); psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); /* This is necessary because OF doesn't know about the * secondary cpu(s), and thus there aren't nodes in the * device tree for them, and smp_setup_cpu_maps hasn't * set their bits in cpu_present_mask. */ if (ncpus > NR_CPUS) ncpus = NR_CPUS; for (i = 1; i < ncpus ; ++i) set_cpu_present(i, true); if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352); } static int __init smp_psurge_kick_cpu(int nr) { unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; unsigned long a, flags; int i, j; /* Defining this here is evil ... but I prefer hiding that * crap to avoid giving people ideas that they can do the * same. */ extern volatile unsigned int cpu_callin_map[NR_CPUS]; /* may need to flush here if secondary bats aren't setup */ for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32) asm volatile("dcbf 0,%0" : : "r" (a) : "memory"); asm volatile("sync"); if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353); /* This is going to freeze the timeebase, we disable interrupts */ local_irq_save(flags); out_be32(psurge_start, start); mb(); psurge_set_ipi(nr); /* * We can't use udelay here because the timebase is now frozen. */ for (i = 0; i < 2000; ++i) asm volatile("nop" : : : "memory"); psurge_clr_ipi(nr); /* * Also, because the timebase is frozen, we must not return to the * caller which will try to do udelay's etc... Instead, we wait -here- * for the CPU to callin. */ for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) { for (j = 1; j < 10000; j++) asm volatile("nop" : : : "memory"); asm volatile("sync" : : : "memory"); } if (!cpu_callin_map[nr]) goto stuck; /* And we do the TB sync here too for standard dual CPU cards */ if (psurge_type == PSURGE_DUAL) { while(!tb_req) barrier(); tb_req = 0; mb(); timebase = get_tb(); mb(); while (timebase) barrier(); mb(); } stuck: /* now interrupt the secondary, restarting both TBs */ if (psurge_type == PSURGE_DUAL) psurge_set_ipi(1); if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); return 0; } static void __init smp_psurge_setup_cpu(int cpu_nr) { unsigned long flags = IRQF_PERCPU | IRQF_NO_THREAD; int irq; if (cpu_nr != 0 || !psurge_start) return; /* reset the entry point so if we get another intr we won't * try to startup again */ out_be32(psurge_start, 0x100); irq = irq_create_mapping(NULL, 30); if (request_irq(irq, psurge_ipi_intr, flags, "primary IPI", NULL)) printk(KERN_ERR "Couldn't get primary IPI interrupt"); } void __init smp_psurge_take_timebase(void) { if (psurge_type != PSURGE_DUAL) return; tb_req = 1; mb(); while (!timebase) barrier(); mb(); set_tb(timebase >> 32, timebase & 0xffffffff); timebase = 0; mb(); set_dec(tb_ticks_per_jiffy/2); } void __init smp_psurge_give_timebase(void) { /* Nothing to do here */ } /* PowerSurge-style Macs */ struct smp_ops_t psurge_smp_ops = { .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */ .cause_ipi = smp_psurge_cause_ipi, .cause_nmi_ipi = NULL, .probe = smp_psurge_probe, .kick_cpu = smp_psurge_kick_cpu, .setup_cpu = smp_psurge_setup_cpu, .give_timebase = smp_psurge_give_timebase, .take_timebase = smp_psurge_take_timebase, }; #endif /* CONFIG_PPC_PMAC32_PSURGE */ /* * Core 99 and later support */ static void smp_core99_give_timebase(void) { unsigned long flags; local_irq_save(flags); while(!tb_req) barrier(); tb_req = 0; (*pmac_tb_freeze)(1); mb(); timebase = get_tb(); mb(); while (timebase) barrier(); mb(); (*pmac_tb_freeze)(0); mb(); local_irq_restore(flags); } static void smp_core99_take_timebase(void) { unsigned long flags; local_irq_save(flags); tb_req = 1; mb(); while (!timebase) barrier(); mb(); set_tb(timebase >> 32, timebase & 0xffffffff); timebase = 0; mb(); local_irq_restore(flags); } #ifdef CONFIG_PPC64 /* * G5s enable/disable the timebase via an i2c-connected clock chip. */ static struct pmac_i2c_bus *pmac_tb_clock_chip_host; static u8 pmac_tb_pulsar_addr; static void smp_core99_cypress_tb_freeze(int freeze) { u8 data; int rc; /* Strangely, the device-tree says address is 0xd2, but darwin * accesses 0xd0 ... */ pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_combined); rc = pmac_i2c_xfer(pmac_tb_clock_chip_host, 0xd0 | pmac_i2c_read, 1, 0x81, &data, 1); if (rc != 0) goto bail; data = (data & 0xf3) | (freeze ? 0x00 : 0x0c); pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub); rc = pmac_i2c_xfer(pmac_tb_clock_chip_host, 0xd0 | pmac_i2c_write, 1, 0x81, &data, 1); bail: if (rc != 0) { printk("Cypress Timebase %s rc: %d\n", freeze ? "freeze" : "unfreeze", rc); panic("Timebase freeze failed !\n"); } } static void smp_core99_pulsar_tb_freeze(int freeze) { u8 data; int rc; pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_combined); rc = pmac_i2c_xfer(pmac_tb_clock_chip_host, pmac_tb_pulsar_addr | pmac_i2c_read, 1, 0x2e, &data, 1); if (rc != 0) goto bail; data = (data & 0x88) | (freeze ? 0x11 : 0x22); pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub); rc = pmac_i2c_xfer(pmac_tb_clock_chip_host, pmac_tb_pulsar_addr | pmac_i2c_write, 1, 0x2e, &data, 1); bail: if (rc != 0) { printk(KERN_ERR "Pulsar Timebase %s rc: %d\n", freeze ? "freeze" : "unfreeze", rc); panic("Timebase freeze failed !\n"); } } static void __init smp_core99_setup_i2c_hwsync(int ncpus) { struct device_node *cc = NULL; struct device_node *p; const char *name = NULL; const u32 *reg; int ok; /* Look for the clock chip */ for_each_node_by_name(cc, "i2c-hwclock") { p = of_get_parent(cc); ok = p && of_device_is_compatible(p, "uni-n-i2c"); of_node_put(p); if (!ok) continue; pmac_tb_clock_chip_host = pmac_i2c_find_bus(cc); if (pmac_tb_clock_chip_host == NULL) continue; reg = of_get_property(cc, "reg", NULL); if (reg == NULL) continue; switch (*reg) { case 0xd2: if (of_device_is_compatible(cc,"pulsar-legacy-slewing")) { pmac_tb_freeze = smp_core99_pulsar_tb_freeze; pmac_tb_pulsar_addr = 0xd2; name = "Pulsar"; } else if (of_device_is_compatible(cc, "cy28508")) { pmac_tb_freeze = smp_core99_cypress_tb_freeze; name = "Cypress"; } break; case 0xd4: pmac_tb_freeze = smp_core99_pulsar_tb_freeze; pmac_tb_pulsar_addr = 0xd4; name = "Pulsar"; break; } if (pmac_tb_freeze != NULL) break; } if (pmac_tb_freeze != NULL) { /* Open i2c bus for synchronous access */ if (pmac_i2c_open(pmac_tb_clock_chip_host, 1)) { printk(KERN_ERR "Failed top open i2c bus for clock" " sync, fallback to software sync !\n"); goto no_i2c_sync; } printk(KERN_INFO "Processor timebase sync using %s i2c clock\n", name); return; } no_i2c_sync: pmac_tb_freeze = NULL; pmac_tb_clock_chip_host = NULL; } /* * Newer G5s uses a platform function */ static void smp_core99_pfunc_tb_freeze(int freeze) { struct device_node *cpus; struct pmf_args args; cpus = of_find_node_by_path("/cpus"); BUG_ON(cpus == NULL); args.count = 1; args.u[0].v = !freeze; pmf_call_function(cpus, "cpu-timebase", &args); of_node_put(cpus); } #else /* CONFIG_PPC64 */ /* * SMP G4 use a GPIO to enable/disable the timebase. */ static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */ static void smp_core99_gpio_tb_freeze(int freeze) { if (freeze) pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4); else pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0); pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0); } #endif /* !CONFIG_PPC64 */ static void core99_init_caches(int cpu) { #ifndef CONFIG_PPC64 /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */ static long int core99_l2_cache; static long int core99_l3_cache; if (!cpu_has_feature(CPU_FTR_L2CR)) return; if (cpu == 0) { core99_l2_cache = _get_L2CR(); printk("CPU0: L2CR is %lx\n", core99_l2_cache); } else { printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR()); _set_L2CR(0); _set_L2CR(core99_l2_cache); printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache); } if (!cpu_has_feature(CPU_FTR_L3CR)) return; if (cpu == 0){ core99_l3_cache = _get_L3CR(); printk("CPU0: L3CR is %lx\n", core99_l3_cache); } else { printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR()); _set_L3CR(0); _set_L3CR(core99_l3_cache); printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache); } #endif /* !CONFIG_PPC64 */ } static void __init smp_core99_setup(int ncpus) { #ifdef CONFIG_PPC64 /* i2c based HW sync on some G5s */ if (of_machine_is_compatible("PowerMac7,2") || of_machine_is_compatible("PowerMac7,3") || of_machine_is_compatible("RackMac3,1")) smp_core99_setup_i2c_hwsync(ncpus); /* pfunc based HW sync on recent G5s */ if (pmac_tb_freeze == NULL) { struct device_node *cpus = of_find_node_by_path("/cpus"); if (cpus && of_property_read_bool(cpus, "platform-cpu-timebase")) { pmac_tb_freeze = smp_core99_pfunc_tb_freeze; printk(KERN_INFO "Processor timebase sync using" " platform function\n"); } of_node_put(cpus); } #else /* CONFIG_PPC64 */ /* GPIO based HW sync on ppc32 Core99 */ if (pmac_tb_freeze == NULL && !of_machine_is_compatible("MacRISC4")) { struct device_node *cpu; const u32 *tbprop = NULL; core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */ cpu = of_find_node_by_type(NULL, "cpu"); if (cpu != NULL) { tbprop = of_get_property(cpu, "timebase-enable", NULL); if (tbprop) core99_tb_gpio = *tbprop; of_node_put(cpu); } pmac_tb_freeze = smp_core99_gpio_tb_freeze; printk(KERN_INFO "Processor timebase sync using" " GPIO 0x%02x\n", core99_tb_gpio); } #endif /* CONFIG_PPC64 */ /* No timebase sync, fallback to software */ if (pmac_tb_freeze == NULL) { smp_ops->give_timebase = smp_generic_give_timebase; smp_ops->take_timebase = smp_generic_take_timebase; printk(KERN_INFO "Processor timebase sync using software\n"); } #ifndef CONFIG_PPC64 { int i; /* XXX should get this from reg properties */ for (i = 1; i < ncpus; ++i) set_hard_smp_processor_id(i, i); } #endif /* 32 bits SMP can't NAP */ if (!of_machine_is_compatible("MacRISC4")) powersave_nap = 0; } static void __init smp_core99_probe(void) { struct device_node *cpus; int ncpus = 0; if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345); /* Count CPUs in the device-tree */ for_each_node_by_type(cpus, "cpu") ++ncpus; printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus); /* Nothing more to do if less than 2 of them */ if (ncpus <= 1) return; /* We need to perform some early initialisations before we can start * setting up SMP as we are running before initcalls */ pmac_pfunc_base_install(); pmac_i2c_init(); /* Setup various bits like timebase sync method, ability to nap, ... */ smp_core99_setup(ncpus); /* Install IPIs */ mpic_request_ipis(); /* Collect l2cr and l3cr values from CPU 0 */ core99_init_caches(0); } static int smp_core99_kick_cpu(int nr) { unsigned int save_vector; unsigned long target, flags; unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100); if (nr < 0 || nr > 3) return -ENOENT; if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346); local_irq_save(flags); /* Save reset vector */ save_vector = *vector; /* Setup fake reset vector that does * b __secondary_start_pmac_0 + nr*8 */ target = (unsigned long) __secondary_start_pmac_0 + nr * 8; patch_branch(vector, target, BRANCH_SET_LINK); /* Put some life in our friend */ pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0); /* FIXME: We wait a bit for the CPU to take the exception, I should * instead wait for the entry code to set something for me. Well, * ideally, all that crap will be done in prom.c and the CPU left * in a RAM-based wait loop like CHRP. */ mdelay(1); /* Restore our exception vector */ patch_instruction(vector, ppc_inst(save_vector)); local_irq_restore(flags); if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); return 0; } static void smp_core99_setup_cpu(int cpu_nr) { /* Setup L2/L3 */ if (cpu_nr != 0) core99_init_caches(cpu_nr); /* Setup openpic */ mpic_setup_this_cpu(); } #ifdef CONFIG_PPC64 #ifdef CONFIG_HOTPLUG_CPU static unsigned int smp_core99_host_open; static int smp_core99_cpu_prepare(unsigned int cpu) { int rc; /* Open i2c bus if it was used for tb sync */ if (pmac_tb_clock_chip_host && !smp_core99_host_open) { rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1); if (rc) { pr_err("Failed to open i2c bus for time sync\n"); return notifier_from_errno(rc); } smp_core99_host_open = 1; } return 0; } static int smp_core99_cpu_online(unsigned int cpu) { /* Close i2c bus if it was used for tb sync */ if (pmac_tb_clock_chip_host && smp_core99_host_open) { pmac_i2c_close(pmac_tb_clock_chip_host); smp_core99_host_open = 0; } return 0; } #endif /* CONFIG_HOTPLUG_CPU */ static void __init smp_core99_bringup_done(void) { /* Close i2c bus if it was used for tb sync */ if (pmac_tb_clock_chip_host) pmac_i2c_close(pmac_tb_clock_chip_host); /* If we didn't start the second CPU, we must take * it off the bus. */ if (of_machine_is_compatible("MacRISC4") && num_online_cpus() < 2) { set_cpu_present(1, false); g5_phy_disable_cpu1(); } #ifdef CONFIG_HOTPLUG_CPU cpuhp_setup_state_nocalls(CPUHP_POWERPC_PMAC_PREPARE, "powerpc/pmac:prepare", smp_core99_cpu_prepare, NULL); cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "powerpc/pmac:online", smp_core99_cpu_online, NULL); #endif if (ppc_md.progress) ppc_md.progress("smp_core99_bringup_done", 0x349); } #endif /* CONFIG_PPC64 */ #ifdef CONFIG_HOTPLUG_CPU static int smp_core99_cpu_disable(void) { int rc = generic_cpu_disable(); if (rc) return rc; mpic_cpu_set_priority(0xf); cleanup_cpu_mmu_context(); return 0; } #ifdef CONFIG_PPC32 static void pmac_cpu_offline_self(void) { int cpu = smp_processor_id(); local_irq_disable(); idle_task_exit(); pr_debug("CPU%d offline\n", cpu); generic_set_cpu_dead(cpu); smp_wmb(); mb(); low_cpu_offline_self(); } #else /* CONFIG_PPC32 */ static void pmac_cpu_offline_self(void) { int cpu = smp_processor_id(); local_irq_disable(); idle_task_exit(); /* * turn off as much as possible, we'll be * kicked out as this will only be invoked * on core99 platforms for now ... */ printk(KERN_INFO "CPU#%d offline\n", cpu); generic_set_cpu_dead(cpu); smp_wmb(); /* * Re-enable interrupts. The NAP code needs to enable them * anyways, do it now so we deal with the case where one already * happened while soft-disabled. * We shouldn't get any external interrupts, only decrementer, and the * decrementer handler is safe for use on offline CPUs */ local_irq_enable(); while (1) { /* let's not take timer interrupts too often ... */ set_dec(0x7fffffff); /* Enter NAP mode */ power4_idle(); } } #endif /* else CONFIG_PPC32 */ #endif /* CONFIG_HOTPLUG_CPU */ /* Core99 Macs (dual G4s and G5s) */ static struct smp_ops_t core99_smp_ops = { .message_pass = smp_mpic_message_pass, .probe = smp_core99_probe, #ifdef CONFIG_PPC64 .bringup_done = smp_core99_bringup_done, #endif .kick_cpu = smp_core99_kick_cpu, .setup_cpu = smp_core99_setup_cpu, .give_timebase = smp_core99_give_timebase, .take_timebase = smp_core99_take_timebase, #if defined(CONFIG_HOTPLUG_CPU) .cpu_disable = smp_core99_cpu_disable, .cpu_die = generic_cpu_die, #endif }; void __init pmac_setup_smp(void) { struct device_node *np; /* Check for Core99 */ np = of_find_node_by_name(NULL, "uni-n"); if (!np) np = of_find_node_by_name(NULL, "u3"); if (!np) np = of_find_node_by_name(NULL, "u4"); if (np) { of_node_put(np); smp_ops = &core99_smp_ops; } #ifdef CONFIG_PPC_PMAC32_PSURGE else { /* We have to set bits in cpu_possible_mask here since the * secondary CPU(s) aren't in the device tree. Various * things won't be initialized for CPUs not in the possible * map, so we really need to fix it up here. */ int cpu; for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu) set_cpu_possible(cpu, true); smp_ops = &psurge_smp_ops; } #endif /* CONFIG_PPC_PMAC32_PSURGE */ #ifdef CONFIG_HOTPLUG_CPU smp_ops->cpu_offline_self = pmac_cpu_offline_self; #endif }
linux-master
arch/powerpc/platforms/powermac/smp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/platforms/83xx/asp834x.c * * Analogue & Micro ASP8347 board specific routines * clone of mpc834x_itx * * Copyright 2008 Codehermit * * Maintainer: Bryan O'Donoghue <[email protected]> */ #include <linux/pci.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include "mpc83xx.h" /* ************************************************************************ * * Setup the architecture * */ static void __init asp834x_setup_arch(void) { mpc83xx_setup_arch(); mpc834x_usb_cfg(); } machine_device_initcall(asp834x, mpc83xx_declare_of_platform_devices); define_machine(asp834x) { .name = "ASP8347E", .compatible = "analogue-and-micro,asp8347e", .setup_arch = asp834x_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/83xx/asp834x.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/platforms/83xx/mpc831x_rdb.c * * Description: MPC831x RDB board specific routines. * This file is based on mpc834x_sys.c * Author: Lo Wlison <[email protected]> * * Copyright (C) Freescale Semiconductor, Inc. 2006. All rights reserved. */ #include <linux/pci.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" /* * Setup the architecture */ static void __init mpc831x_rdb_setup_arch(void) { mpc83xx_setup_arch(); mpc831x_usb_cfg(); } static const char *board[] __initdata = { "MPC8313ERDB", "fsl,mpc8315erdb", NULL }; /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc831x_rdb_probe(void) { return of_device_compatible_match(of_root, board); } machine_device_initcall(mpc831x_rdb, mpc83xx_declare_of_platform_devices); define_machine(mpc831x_rdb) { .name = "MPC831x RDB", .probe = mpc831x_rdb_probe, .setup_arch = mpc831x_rdb_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/83xx/mpc831x_rdb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/platforms/83xx/mpc834x_itx.c * * MPC834x ITX board specific routines * * Maintainer: Kumar Gala <[email protected]> */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/of_platform.h> #include <linux/atomic.h> #include <asm/time.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/irq.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" static const struct of_device_id mpc834x_itx_ids[] __initconst = { { .compatible = "fsl,pq2pro-localbus", }, {}, }; static int __init mpc834x_itx_declare_of_platform_devices(void) { mpc83xx_declare_of_platform_devices(); return of_platform_bus_probe(NULL, mpc834x_itx_ids, NULL); } machine_device_initcall(mpc834x_itx, mpc834x_itx_declare_of_platform_devices); /* ************************************************************************ * * Setup the architecture * */ static void __init mpc834x_itx_setup_arch(void) { mpc83xx_setup_arch(); mpc834x_usb_cfg(); } define_machine(mpc834x_itx) { .name = "MPC834x ITX", .compatible = "MPC834xMITX", .setup_arch = mpc834x_itx_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/83xx/mpc834x_itx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2008-2011 DENX Software Engineering GmbH * Author: Heiko Schocher <[email protected]> * * Description: * Keymile 83xx platform specific routines. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/initrd.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/atomic.h> #include <linux/time.h> #include <linux/io.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/irq.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <soc/fsl/qe/qe.h> #include "mpc83xx.h" #define SVR_REV(svr) (((svr) >> 0) & 0xFFFF) /* Revision field */ static void __init quirk_mpc8360e_qe_enet10(void) { /* * handle mpc8360E Erratum QE_ENET10: * RGMII AC values do not meet the specification */ uint svid = mfspr(SPRN_SVR); struct device_node *np_par; struct resource res; void __iomem *base; int ret; np_par = of_find_node_by_name(NULL, "par_io"); if (np_par == NULL) { pr_warn("%s couldn't find par_io node\n", __func__); return; } /* Map Parallel I/O ports registers */ ret = of_address_to_resource(np_par, 0, &res); if (ret) { pr_warn("%s couldn't map par_io registers\n", __func__); goto out; } base = ioremap(res.start, resource_size(&res)); if (!base) goto out; /* * set output delay adjustments to default values according * table 5 in Errata Rev. 5, 9/2011: * * write 0b01 to UCC1 bits 18:19 * write 0b01 to UCC2 option 1 bits 4:5 * write 0b01 to UCC2 option 2 bits 16:17 */ clrsetbits_be32((base + 0xa8), 0x0c00f000, 0x04005000); /* * set output delay adjustments to default values according * table 3-13 in Reference Manual Rev.3 05/2010: * * write 0b01 to UCC2 option 2 bits 16:17 * write 0b0101 to UCC1 bits 20:23 * write 0b0101 to UCC2 option 1 bits 24:27 */ clrsetbits_be32((base + 0xac), 0x0000cff0, 0x00004550); if (SVR_REV(svid) == 0x0021) { /* * UCC2 option 1: write 0b1010 to bits 24:27 * at address IMMRBAR+0x14AC */ clrsetbits_be32((base + 0xac), 0x000000f0, 0x000000a0); } else if (SVR_REV(svid) == 0x0020) { /* * UCC1: write 0b11 to bits 18:19 * at address IMMRBAR+0x14A8 */ setbits32((base + 0xa8), 0x00003000); /* * UCC2 option 1: write 0b11 to bits 4:5 * at address IMMRBAR+0x14A8 */ setbits32((base + 0xa8), 0x0c000000); /* * UCC2 option 2: write 0b11 to bits 16:17 * at address IMMRBAR+0x14AC */ setbits32((base + 0xac), 0x0000c000); } iounmap(base); out: of_node_put(np_par); } /* ************************************************************************ * * Setup the architecture * */ static void __init mpc83xx_km_setup_arch(void) { #ifdef CONFIG_QUICC_ENGINE struct device_node *np; #endif mpc83xx_setup_arch(); #ifdef CONFIG_QUICC_ENGINE np = of_find_node_by_name(NULL, "par_io"); if (np != NULL) { par_io_init(np); of_node_put(np); for_each_node_by_name(np, "spi") par_io_of_config(np); for_each_node_by_name(np, "ucc") par_io_of_config(np); /* Only apply this quirk when par_io is available */ np = of_find_compatible_node(NULL, "network", "ucc_geth"); if (np != NULL) { quirk_mpc8360e_qe_enet10(); of_node_put(np); } } #endif /* CONFIG_QUICC_ENGINE */ } machine_device_initcall(mpc83xx_km, mpc83xx_declare_of_platform_devices); /* list of the supported boards */ static char *board[] __initdata = { "Keymile,KMETER1", "Keymile,kmpbec8321", NULL }; /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc83xx_km_probe(void) { int i = 0; while (board[i]) { if (of_machine_is_compatible(board[i])) break; i++; } return (board[i] != NULL); } define_machine(mpc83xx_km) { .name = "mpc83xx-km-platform", .probe = mpc83xx_km_probe, .setup_arch = mpc83xx_km_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/83xx/km83xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/platforms/83xx/mpc837x_rdb.c * * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. * * MPC837x RDB board specific routines */ #include <linux/pci.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" static void __init mpc837x_rdb_sd_cfg(void) { void __iomem *im; im = ioremap(get_immrbase(), 0x1000); if (!im) { WARN_ON(1); return; } /* * On RDB boards (in contrast to MDS) USBB pins are used for SD only, * so we can safely mux them away from the USB block. */ clrsetbits_be32(im + MPC83XX_SICRL_OFFS, MPC837X_SICRL_USBB_MASK, MPC837X_SICRL_SD); clrsetbits_be32(im + MPC83XX_SICRH_OFFS, MPC837X_SICRH_SPI_MASK, MPC837X_SICRH_SD); iounmap(im); } /* ************************************************************************ * * Setup the architecture * */ static void __init mpc837x_rdb_setup_arch(void) { mpc83xx_setup_arch(); mpc837x_usb_cfg(); mpc837x_rdb_sd_cfg(); } machine_device_initcall(mpc837x_rdb, mpc83xx_declare_of_platform_devices); static const char * const board[] __initconst = { "fsl,mpc8377rdb", "fsl,mpc8378rdb", "fsl,mpc8379rdb", "fsl,mpc8377wlan", NULL }; /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc837x_rdb_probe(void) { return of_device_compatible_match(of_root, board); } define_machine(mpc837x_rdb) { .name = "MPC837x RDB/WLAN", .probe = mpc837x_rdb_probe, .setup_arch = mpc837x_rdb_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/83xx/mpc837x_rdb.c
// SPDX-License-Identifier: GPL-2.0-only /* * MPC83xx suspend support * * Author: Scott Wood <[email protected]> * * Copyright (c) 2006-2007 Freescale Semiconductor, Inc. */ #include <linux/pm.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <linux/sched/signal.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/suspend.h> #include <linux/fsl_devices.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/export.h> #include <asm/reg.h> #include <asm/io.h> #include <asm/time.h> #include <asm/mpc6xx.h> #include <asm/switch_to.h> #include <sysdev/fsl_soc.h> #define PMCCR1_NEXT_STATE 0x0C /* Next state for power management */ #define PMCCR1_NEXT_STATE_SHIFT 2 #define PMCCR1_CURR_STATE 0x03 /* Current state for power management*/ #define IMMR_SYSCR_OFFSET 0x100 #define IMMR_RCW_OFFSET 0x900 #define RCW_PCI_HOST 0x80000000 void mpc83xx_enter_deep_sleep(phys_addr_t immrbase); struct mpc83xx_pmc { u32 config; #define PMCCR_DLPEN 2 /* DDR SDRAM low power enable */ #define PMCCR_SLPEN 1 /* System low power enable */ u32 event; u32 mask; /* All but PMCI are deep-sleep only */ #define PMCER_GPIO 0x100 #define PMCER_PCI 0x080 #define PMCER_USB 0x040 #define PMCER_ETSEC1 0x020 #define PMCER_ETSEC2 0x010 #define PMCER_TIMER 0x008 #define PMCER_INT1 0x004 #define PMCER_INT2 0x002 #define PMCER_PMCI 0x001 #define PMCER_ALL 0x1FF /* deep-sleep only */ u32 config1; #define PMCCR1_USE_STATE 0x80000000 #define PMCCR1_PME_EN 0x00000080 #define PMCCR1_ASSERT_PME 0x00000040 #define PMCCR1_POWER_OFF 0x00000020 /* deep-sleep only */ u32 config2; }; struct mpc83xx_rcw { u32 rcwlr; u32 rcwhr; }; struct mpc83xx_clock { u32 spmr; u32 occr; u32 sccr; }; struct mpc83xx_syscr { __be32 sgprl; __be32 sgprh; __be32 spridr; __be32 :32; __be32 spcr; __be32 sicrl; __be32 sicrh; }; struct mpc83xx_saved { u32 sicrl; u32 sicrh; u32 sccr; }; struct pmc_type { int has_deep_sleep; }; static int has_deep_sleep, deep_sleeping; static int pmc_irq; static struct mpc83xx_pmc __iomem *pmc_regs; static struct mpc83xx_clock __iomem *clock_regs; static struct mpc83xx_syscr __iomem *syscr_regs; static struct mpc83xx_saved saved_regs; static int is_pci_agent, wake_from_pci; static phys_addr_t immrbase; static int pci_pm_state; static DECLARE_WAIT_QUEUE_HEAD(agent_wq); int fsl_deep_sleep(void) { return deep_sleeping; } EXPORT_SYMBOL(fsl_deep_sleep); static int mpc83xx_change_state(void) { u32 curr_state; u32 reg_cfg1 = in_be32(&pmc_regs->config1); if (is_pci_agent) { pci_pm_state = (reg_cfg1 & PMCCR1_NEXT_STATE) >> PMCCR1_NEXT_STATE_SHIFT; curr_state = reg_cfg1 & PMCCR1_CURR_STATE; if (curr_state != pci_pm_state) { reg_cfg1 &= ~PMCCR1_CURR_STATE; reg_cfg1 |= pci_pm_state; out_be32(&pmc_regs->config1, reg_cfg1); wake_up(&agent_wq); return 1; } } return 0; } static irqreturn_t pmc_irq_handler(int irq, void *dev_id) { u32 event = in_be32(&pmc_regs->event); int ret = IRQ_NONE; if (mpc83xx_change_state()) ret = IRQ_HANDLED; if (event) { out_be32(&pmc_regs->event, event); ret = IRQ_HANDLED; } return ret; } static void mpc83xx_suspend_restore_regs(void) { out_be32(&syscr_regs->sicrl, saved_regs.sicrl); out_be32(&syscr_regs->sicrh, saved_regs.sicrh); out_be32(&clock_regs->sccr, saved_regs.sccr); } static void mpc83xx_suspend_save_regs(void) { saved_regs.sicrl = in_be32(&syscr_regs->sicrl); saved_regs.sicrh = in_be32(&syscr_regs->sicrh); saved_regs.sccr = in_be32(&clock_regs->sccr); } static int mpc83xx_suspend_enter(suspend_state_t state) { int ret = -EAGAIN; /* Don't go to sleep if there's a race where pci_pm_state changes * between the agent thread checking it and the PM code disabling * interrupts. */ if (wake_from_pci) { if (pci_pm_state != (deep_sleeping ? 3 : 2)) goto out; out_be32(&pmc_regs->config1, in_be32(&pmc_regs->config1) | PMCCR1_PME_EN); } /* Put the system into low-power mode and the RAM * into self-refresh mode once the core goes to * sleep. */ out_be32(&pmc_regs->config, PMCCR_SLPEN | PMCCR_DLPEN); /* If it has deep sleep (i.e. it's an 831x or compatible), * disable power to the core upon entering sleep mode. This will * require going through the boot firmware upon a wakeup event. */ if (deep_sleeping) { mpc83xx_suspend_save_regs(); out_be32(&pmc_regs->mask, PMCER_ALL); out_be32(&pmc_regs->config1, in_be32(&pmc_regs->config1) | PMCCR1_POWER_OFF); enable_kernel_fp(); mpc83xx_enter_deep_sleep(immrbase); out_be32(&pmc_regs->config1, in_be32(&pmc_regs->config1) & ~PMCCR1_POWER_OFF); out_be32(&pmc_regs->mask, PMCER_PMCI); mpc83xx_suspend_restore_regs(); } else { out_be32(&pmc_regs->mask, PMCER_PMCI); mpc6xx_enter_standby(); } ret = 0; out: out_be32(&pmc_regs->config1, in_be32(&pmc_regs->config1) & ~PMCCR1_PME_EN); return ret; } static void mpc83xx_suspend_end(void) { deep_sleeping = 0; } static int mpc83xx_suspend_valid(suspend_state_t state) { return state == PM_SUSPEND_STANDBY || state == PM_SUSPEND_MEM; } static int mpc83xx_suspend_begin(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: deep_sleeping = 0; return 0; case PM_SUSPEND_MEM: if (has_deep_sleep) deep_sleeping = 1; return 0; default: return -EINVAL; } } static int agent_thread_fn(void *data) { while (1) { wait_event_interruptible(agent_wq, pci_pm_state >= 2); try_to_freeze(); if (signal_pending(current) || pci_pm_state < 2) continue; /* With a preemptible kernel (or SMP), this could race with * a userspace-driven suspend request. It's probably best * to avoid mixing the two with such a configuration (or * else fix it by adding a mutex to state_store that we can * synchronize with). */ wake_from_pci = 1; pm_suspend(pci_pm_state == 3 ? PM_SUSPEND_MEM : PM_SUSPEND_STANDBY); wake_from_pci = 0; } return 0; } static void mpc83xx_set_agent(void) { out_be32(&pmc_regs->config1, PMCCR1_USE_STATE); out_be32(&pmc_regs->mask, PMCER_PMCI); kthread_run(agent_thread_fn, NULL, "PCI power mgt"); } static int mpc83xx_is_pci_agent(void) { struct mpc83xx_rcw __iomem *rcw_regs; int ret; rcw_regs = ioremap(get_immrbase() + IMMR_RCW_OFFSET, sizeof(struct mpc83xx_rcw)); if (!rcw_regs) return -ENOMEM; ret = !(in_be32(&rcw_regs->rcwhr) & RCW_PCI_HOST); iounmap(rcw_regs); return ret; } static const struct platform_suspend_ops mpc83xx_suspend_ops = { .valid = mpc83xx_suspend_valid, .begin = mpc83xx_suspend_begin, .enter = mpc83xx_suspend_enter, .end = mpc83xx_suspend_end, }; static struct pmc_type pmc_types[] = { { .has_deep_sleep = 1, }, { .has_deep_sleep = 0, } }; static const struct of_device_id pmc_match[] = { { .compatible = "fsl,mpc8313-pmc", .data = &pmc_types[0], }, { .compatible = "fsl,mpc8349-pmc", .data = &pmc_types[1], }, {} }; static int pmc_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; struct resource res; const struct pmc_type *type; int ret = 0; type = of_device_get_match_data(&ofdev->dev); if (!type) return -EINVAL; if (!of_device_is_available(np)) return -ENODEV; has_deep_sleep = type->has_deep_sleep; immrbase = get_immrbase(); is_pci_agent = mpc83xx_is_pci_agent(); if (is_pci_agent < 0) return is_pci_agent; ret = of_address_to_resource(np, 0, &res); if (ret) return -ENODEV; pmc_irq = irq_of_parse_and_map(np, 0); if (pmc_irq) { ret = request_irq(pmc_irq, pmc_irq_handler, IRQF_SHARED, "pmc", ofdev); if (ret) return -EBUSY; } pmc_regs = ioremap(res.start, sizeof(*pmc_regs)); if (!pmc_regs) { ret = -ENOMEM; goto out; } ret = of_address_to_resource(np, 1, &res); if (ret) { ret = -ENODEV; goto out_pmc; } clock_regs = ioremap(res.start, sizeof(*clock_regs)); if (!clock_regs) { ret = -ENOMEM; goto out_pmc; } if (has_deep_sleep) { syscr_regs = ioremap(immrbase + IMMR_SYSCR_OFFSET, sizeof(*syscr_regs)); if (!syscr_regs) { ret = -ENOMEM; goto out_syscr; } } if (is_pci_agent) mpc83xx_set_agent(); suspend_set_ops(&mpc83xx_suspend_ops); return 0; out_syscr: iounmap(clock_regs); out_pmc: iounmap(pmc_regs); out: if (pmc_irq) free_irq(pmc_irq, ofdev); return ret; } static struct platform_driver pmc_driver = { .driver = { .name = "mpc83xx-pmc", .of_match_table = pmc_match, .suppress_bind_attrs = true, }, .probe = pmc_probe, }; builtin_platform_driver(pmc_driver);
linux-master
arch/powerpc/platforms/83xx/suspend.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Freescale 83xx USB SOC setup code * * Copyright (C) 2007 Freescale Semiconductor, Inc. * Author: Li Yang */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> #include <sysdev/fsl_soc.h> #include "mpc83xx.h" int __init mpc831x_usb_cfg(void) { u32 temp; void __iomem *immap, *usb_regs; struct device_node *np = NULL; struct device_node *immr_node = NULL; const void *prop; struct resource res; int ret = 0; #ifdef CONFIG_USB_OTG const void *dr_mode; #endif np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr"); if (!np) return -ENODEV; prop = of_get_property(np, "phy_type", NULL); /* Map IMMR space for pin and clock settings */ immap = ioremap(get_immrbase(), 0x1000); if (!immap) { of_node_put(np); return -ENOMEM; } /* Configure clock */ immr_node = of_get_parent(np); if (immr_node && (of_device_is_compatible(immr_node, "fsl,mpc8315-immr") || of_device_is_compatible(immr_node, "fsl,mpc8308-immr"))) clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, MPC8315_SCCR_USB_MASK, MPC8315_SCCR_USB_DRCM_01); else clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, MPC83XX_SCCR_USB_MASK, MPC83XX_SCCR_USB_DRCM_11); /* Configure pin mux for ULPI. There is no pin mux for UTMI */ if (prop && !strcmp(prop, "ulpi")) { if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) { clrsetbits_be32(immap + MPC83XX_SICRH_OFFS, MPC8308_SICRH_USB_MASK, MPC8308_SICRH_USB_ULPI); } else if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) { clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, MPC8315_SICRL_USB_MASK, MPC8315_SICRL_USB_ULPI); clrsetbits_be32(immap + MPC83XX_SICRH_OFFS, MPC8315_SICRH_USB_MASK, MPC8315_SICRH_USB_ULPI); } else { clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, MPC831X_SICRL_USB_MASK, MPC831X_SICRL_USB_ULPI); clrsetbits_be32(immap + MPC83XX_SICRH_OFFS, MPC831X_SICRH_USB_MASK, MPC831X_SICRH_USB_ULPI); } } iounmap(immap); of_node_put(immr_node); /* Map USB SOC space */ ret = of_address_to_resource(np, 0, &res); if (ret) { of_node_put(np); return ret; } usb_regs = ioremap(res.start, resource_size(&res)); /* Using on-chip PHY */ if (prop && (!strcmp(prop, "utmi_wide") || !strcmp(prop, "utmi"))) { u32 refsel; if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) goto out; if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) refsel = CONTROL_REFSEL_24MHZ; else refsel = CONTROL_REFSEL_48MHZ; /* Set UTMI_PHY_EN and REFSEL */ out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, CONTROL_UTMI_PHY_EN | refsel); /* Using external UPLI PHY */ } else if (prop && !strcmp(prop, "ulpi")) { /* Set PHY_CLK_SEL to ULPI */ temp = CONTROL_PHY_CLK_SEL_ULPI; #ifdef CONFIG_USB_OTG /* Set OTG_PORT */ if (!of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) { dr_mode = of_get_property(np, "dr_mode", NULL); if (dr_mode && !strcmp(dr_mode, "otg")) temp |= CONTROL_OTG_PORT; } #endif /* CONFIG_USB_OTG */ out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, temp); } else { pr_warn("831x USB PHY type not supported\n"); ret = -EINVAL; } out: iounmap(usb_regs); of_node_put(np); return ret; }
linux-master
arch/powerpc/platforms/83xx/usb_831x.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Freescale 83xx USB SOC setup code * * Copyright (C) 2007 Freescale Semiconductor, Inc. * Author: Li Yang */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> #include <sysdev/fsl_soc.h> #include "mpc83xx.h" int __init mpc834x_usb_cfg(void) { unsigned long sccr, sicrl, sicrh; void __iomem *immap; struct device_node *np = NULL; int port0_is_dr = 0, port1_is_dr = 0; const void *prop, *dr_mode; immap = ioremap(get_immrbase(), 0x1000); if (!immap) return -ENOMEM; /* Read registers */ /* Note: DR and MPH must use the same clock setting in SCCR */ sccr = in_be32(immap + MPC83XX_SCCR_OFFS) & ~MPC83XX_SCCR_USB_MASK; sicrl = in_be32(immap + MPC83XX_SICRL_OFFS) & ~MPC834X_SICRL_USB_MASK; sicrh = in_be32(immap + MPC83XX_SICRH_OFFS) & ~MPC834X_SICRH_USB_UTMI; np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr"); if (np) { sccr |= MPC83XX_SCCR_USB_DRCM_11; /* 1:3 */ prop = of_get_property(np, "phy_type", NULL); port1_is_dr = 1; if (prop && (!strcmp(prop, "utmi") || !strcmp(prop, "utmi_wide"))) { sicrl |= MPC834X_SICRL_USB0 | MPC834X_SICRL_USB1; sicrh |= MPC834X_SICRH_USB_UTMI; port0_is_dr = 1; } else if (prop && !strcmp(prop, "serial")) { dr_mode = of_get_property(np, "dr_mode", NULL); if (dr_mode && !strcmp(dr_mode, "otg")) { sicrl |= MPC834X_SICRL_USB0 | MPC834X_SICRL_USB1; port0_is_dr = 1; } else { sicrl |= MPC834X_SICRL_USB1; } } else if (prop && !strcmp(prop, "ulpi")) { sicrl |= MPC834X_SICRL_USB1; } else { pr_warn("834x USB PHY type not supported\n"); } of_node_put(np); } np = of_find_compatible_node(NULL, NULL, "fsl-usb2-mph"); if (np) { sccr |= MPC83XX_SCCR_USB_MPHCM_11; /* 1:3 */ prop = of_get_property(np, "port0", NULL); if (prop) { if (port0_is_dr) pr_warn("834x USB port0 can't be used by both DR and MPH!\n"); sicrl &= ~MPC834X_SICRL_USB0; } prop = of_get_property(np, "port1", NULL); if (prop) { if (port1_is_dr) pr_warn("834x USB port1 can't be used by both DR and MPH!\n"); sicrl &= ~MPC834X_SICRL_USB1; } of_node_put(np); } /* Write back */ out_be32(immap + MPC83XX_SCCR_OFFS, sccr); out_be32(immap + MPC83XX_SICRL_OFFS, sicrl); out_be32(immap + MPC83XX_SICRH_OFFS, sicrh); iounmap(immap); return 0; }
linux-master
arch/powerpc/platforms/83xx/usb_834x.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/platforms/83xx/mpc830x_rdb.c * * Description: MPC830x RDB board specific routines. * This file is based on mpc831x_rdb.c * * Copyright (C) Freescale Semiconductor, Inc. 2009. All rights reserved. * Copyright (C) 2010. Ilya Yanok, Emcraft Systems, [email protected] */ #include <linux/pci.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <sysdev/fsl_pci.h> #include <sysdev/fsl_soc.h> #include "mpc83xx.h" /* * Setup the architecture */ static void __init mpc830x_rdb_setup_arch(void) { mpc83xx_setup_arch(); mpc831x_usb_cfg(); } static const char *board[] __initdata = { "MPC8308RDB", "fsl,mpc8308rdb", "denx,mpc8308_p1m", NULL }; /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc830x_rdb_probe(void) { return of_device_compatible_match(of_root, board); } machine_device_initcall(mpc830x_rdb, mpc83xx_declare_of_platform_devices); define_machine(mpc830x_rdb) { .name = "MPC830x RDB", .probe = mpc830x_rdb_probe, .setup_arch = mpc830x_rdb_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/83xx/mpc830x_rdb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Power Management and GPIO expander driver for MPC8349E-mITX-compatible MCU * * Copyright (c) 2008 MontaVista Software, Inc. * * Author: Anton Vorontsov <[email protected]> */ #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/device.h> #include <linux/mutex.h> #include <linux/i2c.h> #include <linux/gpio/driver.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/property.h> #include <linux/reboot.h> #include <asm/machdep.h> /* * I don't have specifications for the MCU firmware, I found this register * and bits positions by the trial&error method. */ #define MCU_REG_CTRL 0x20 #define MCU_CTRL_POFF 0x40 #define MCU_CTRL_BTN 0x80 #define MCU_NUM_GPIO 2 struct mcu { struct mutex lock; struct i2c_client *client; struct gpio_chip gc; u8 reg_ctrl; }; static struct mcu *glob_mcu; struct task_struct *shutdown_thread; static int shutdown_thread_fn(void *data) { int ret; struct mcu *mcu = glob_mcu; while (!kthread_should_stop()) { ret = i2c_smbus_read_byte_data(mcu->client, MCU_REG_CTRL); if (ret < 0) pr_err("MCU status reg read failed.\n"); mcu->reg_ctrl = ret; if (mcu->reg_ctrl & MCU_CTRL_BTN) { i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL, mcu->reg_ctrl & ~MCU_CTRL_BTN); ctrl_alt_del(); } set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ); } return 0; } static ssize_t show_status(struct device *d, struct device_attribute *attr, char *buf) { int ret; struct mcu *mcu = glob_mcu; ret = i2c_smbus_read_byte_data(mcu->client, MCU_REG_CTRL); if (ret < 0) return -ENODEV; mcu->reg_ctrl = ret; return sprintf(buf, "%02x\n", ret); } static DEVICE_ATTR(status, 0444, show_status, NULL); static void mcu_power_off(void) { struct mcu *mcu = glob_mcu; pr_info("Sending power-off request to the MCU...\n"); mutex_lock(&mcu->lock); i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL, mcu->reg_ctrl | MCU_CTRL_POFF); mutex_unlock(&mcu->lock); } static void mcu_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct mcu *mcu = gpiochip_get_data(gc); u8 bit = 1 << (4 + gpio); mutex_lock(&mcu->lock); if (val) mcu->reg_ctrl &= ~bit; else mcu->reg_ctrl |= bit; i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL, mcu->reg_ctrl); mutex_unlock(&mcu->lock); } static int mcu_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { mcu_gpio_set(gc, gpio, val); return 0; } static int mcu_gpiochip_add(struct mcu *mcu) { struct device *dev = &mcu->client->dev; struct gpio_chip *gc = &mcu->gc; gc->owner = THIS_MODULE; gc->label = kasprintf(GFP_KERNEL, "%pfw", dev_fwnode(dev)); gc->can_sleep = 1; gc->ngpio = MCU_NUM_GPIO; gc->base = -1; gc->set = mcu_gpio_set; gc->direction_output = mcu_gpio_dir_out; gc->parent = dev; return gpiochip_add_data(gc, mcu); } static void mcu_gpiochip_remove(struct mcu *mcu) { kfree(mcu->gc.label); gpiochip_remove(&mcu->gc); } static int mcu_probe(struct i2c_client *client) { struct mcu *mcu; int ret; mcu = kzalloc(sizeof(*mcu), GFP_KERNEL); if (!mcu) return -ENOMEM; mutex_init(&mcu->lock); mcu->client = client; i2c_set_clientdata(client, mcu); ret = i2c_smbus_read_byte_data(mcu->client, MCU_REG_CTRL); if (ret < 0) goto err; mcu->reg_ctrl = ret; ret = mcu_gpiochip_add(mcu); if (ret) goto err; /* XXX: this is potentially racy, but there is no lock for pm_power_off */ if (!pm_power_off) { glob_mcu = mcu; pm_power_off = mcu_power_off; dev_info(&client->dev, "will provide power-off service\n"); } if (device_create_file(&client->dev, &dev_attr_status)) dev_err(&client->dev, "couldn't create device file for status\n"); shutdown_thread = kthread_run(shutdown_thread_fn, NULL, "mcu-i2c-shdn"); return 0; err: kfree(mcu); return ret; } static void mcu_remove(struct i2c_client *client) { struct mcu *mcu = i2c_get_clientdata(client); kthread_stop(shutdown_thread); device_remove_file(&client->dev, &dev_attr_status); if (glob_mcu == mcu) { pm_power_off = NULL; glob_mcu = NULL; } mcu_gpiochip_remove(mcu); kfree(mcu); } static const struct i2c_device_id mcu_ids[] = { { "mcu-mpc8349emitx", }, {}, }; MODULE_DEVICE_TABLE(i2c, mcu_ids); static const struct of_device_id mcu_of_match_table[] = { { .compatible = "fsl,mcu-mpc8349emitx", }, { }, }; static struct i2c_driver mcu_driver = { .driver = { .name = "mcu-mpc8349emitx", .of_match_table = mcu_of_match_table, }, .probe = mcu_probe, .remove = mcu_remove, .id_table = mcu_ids, }; module_i2c_driver(mcu_driver); MODULE_DESCRIPTION("Power Management and GPIO expander driver for " "MPC8349E-mITX-compatible MCU"); MODULE_AUTHOR("Anton Vorontsov <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * MPC8360E-RDK board file. * * Copyright (c) 2006 Freescale Semiconductor, Inc. * Copyright (c) 2007-2008 MontaVista Software, Inc. * * Author: Anton Vorontsov <[email protected]> */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/of_platform.h> #include <linux/io.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <soc/fsl/qe/qe.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" machine_device_initcall(mpc836x_rdk, mpc83xx_declare_of_platform_devices); static void __init mpc836x_rdk_setup_arch(void) { mpc83xx_setup_arch(); } define_machine(mpc836x_rdk) { .name = "MPC836x RDK", .compatible = "fsl,mpc8360rdk", .setup_arch = mpc836x_rdk_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/83xx/mpc836x_rdk.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Freescale 83xx USB SOC setup code * * Copyright (C) 2007 Freescale Semiconductor, Inc. * Author: Li Yang */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> #include <sysdev/fsl_soc.h> #include "mpc83xx.h" int __init mpc837x_usb_cfg(void) { void __iomem *immap; struct device_node *np = NULL; const void *prop; int ret = 0; np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr"); if (!np || !of_device_is_available(np)) { of_node_put(np); return -ENODEV; } prop = of_get_property(np, "phy_type", NULL); if (!prop || (strcmp(prop, "ulpi") && strcmp(prop, "serial"))) { pr_warn("837x USB PHY type not supported\n"); of_node_put(np); return -EINVAL; } /* Map IMMR space for pin and clock settings */ immap = ioremap(get_immrbase(), 0x1000); if (!immap) { of_node_put(np); return -ENOMEM; } /* Configure clock */ clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, MPC837X_SCCR_USB_DRCM_11, MPC837X_SCCR_USB_DRCM_11); /* Configure pin mux for ULPI/serial */ clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, MPC837X_SICRL_USB_MASK, MPC837X_SICRL_USB_ULPI); iounmap(immap); of_node_put(np); return ret; }
linux-master
arch/powerpc/platforms/83xx/usb_837x.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/platforms/83xx/mpc832x_rdb.c * * Copyright (C) Freescale Semiconductor, Inc. 2007. All rights reserved. * * Description: * MPC832x RDB board specific routines. * This file is based on mpc832x_mds.c and mpc8313_rdb.c * Author: Michael Barkowski <[email protected]> */ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/spi/spi.h> #include <linux/spi/mmc_spi.h> #include <linux/mmc/host.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/fsl_devices.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <soc/fsl/qe/qe.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" #undef DEBUG #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif #ifdef CONFIG_QUICC_ENGINE static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk, struct spi_board_info *board_infos, unsigned int num_board_infos, void (*cs_control)(struct spi_device *dev, bool on)) { struct device_node *np; unsigned int i = 0; for_each_compatible_node(np, type, compatible) { int ret; unsigned int j; const void *prop; struct resource res[2]; struct platform_device *pdev; struct fsl_spi_platform_data pdata = { .cs_control = cs_control, }; memset(res, 0, sizeof(res)); pdata.sysclk = sysclk; prop = of_get_property(np, "reg", NULL); if (!prop) goto err; pdata.bus_num = *(u32 *)prop; prop = of_get_property(np, "cell-index", NULL); if (prop) i = *(u32 *)prop; prop = of_get_property(np, "mode", NULL); if (prop && !strcmp(prop, "cpu-qe")) pdata.flags = SPI_QE_CPU_MODE; for (j = 0; j < num_board_infos; j++) { if (board_infos[j].bus_num == pdata.bus_num) pdata.max_chipselect++; } if (!pdata.max_chipselect) continue; ret = of_address_to_resource(np, 0, &res[0]); if (ret) goto err; ret = of_irq_to_resource(np, 0, &res[1]); if (ret <= 0) goto err; pdev = platform_device_alloc("mpc83xx_spi", i); if (!pdev) goto err; ret = platform_device_add_data(pdev, &pdata, sizeof(pdata)); if (ret) goto unreg; ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret) goto unreg; ret = platform_device_add(pdev); if (ret) goto unreg; goto next; unreg: platform_device_put(pdev); err: pr_err("%pOF: registration failed\n", np); next: i++; } return i; } static int __init fsl_spi_init(struct spi_board_info *board_infos, unsigned int num_board_infos, void (*cs_control)(struct spi_device *spi, bool on)) { u32 sysclk = -1; int ret; /* SPI controller is either clocked from QE or SoC clock */ sysclk = get_brgfreq(); if (sysclk == -1) { sysclk = fsl_get_sys_freq(); if (sysclk == -1) return -ENODEV; } ret = of_fsl_spi_probe(NULL, "fsl,spi", sysclk, board_infos, num_board_infos, cs_control); if (!ret) of_fsl_spi_probe("spi", "fsl_spi", sysclk, board_infos, num_board_infos, cs_control); return spi_register_board_info(board_infos, num_board_infos); } static void mpc83xx_spi_cs_control(struct spi_device *spi, bool on) { pr_debug("%s %d %d\n", __func__, spi_get_chipselect(spi, 0), on); par_io_data_set(3, 13, on); } static struct mmc_spi_platform_data mpc832x_mmc_pdata = { .ocr_mask = MMC_VDD_33_34, }; static struct spi_board_info mpc832x_spi_boardinfo = { .bus_num = 0x4c0, .chip_select = 0, .max_speed_hz = 50000000, .modalias = "mmc_spi", .platform_data = &mpc832x_mmc_pdata, }; static int __init mpc832x_spi_init(void) { struct device_node *np; par_io_config_pin(3, 0, 3, 0, 1, 0); /* SPI1 MOSI, I/O */ par_io_config_pin(3, 1, 3, 0, 1, 0); /* SPI1 MISO, I/O */ par_io_config_pin(3, 2, 3, 0, 1, 0); /* SPI1 CLK, I/O */ par_io_config_pin(3, 3, 2, 0, 1, 0); /* SPI1 SEL, I */ par_io_config_pin(3, 13, 1, 0, 0, 0); /* !SD_CS, O */ par_io_config_pin(3, 14, 2, 0, 0, 0); /* SD_INSERT, I */ par_io_config_pin(3, 15, 2, 0, 0, 0); /* SD_PROTECT,I */ /* * Don't bother with legacy stuff when device tree contains * mmc-spi-slot node. */ np = of_find_compatible_node(NULL, NULL, "mmc-spi-slot"); of_node_put(np); if (np) return 0; return fsl_spi_init(&mpc832x_spi_boardinfo, 1, mpc83xx_spi_cs_control); } machine_device_initcall(mpc832x_rdb, mpc832x_spi_init); #endif /* CONFIG_QUICC_ENGINE */ /* ************************************************************************ * * Setup the architecture * */ static void __init mpc832x_rdb_setup_arch(void) { #if defined(CONFIG_QUICC_ENGINE) struct device_node *np; #endif mpc83xx_setup_arch(); #ifdef CONFIG_QUICC_ENGINE if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) { par_io_init(np); of_node_put(np); for_each_node_by_name(np, "ucc") par_io_of_config(np); } #endif /* CONFIG_QUICC_ENGINE */ } machine_device_initcall(mpc832x_rdb, mpc83xx_declare_of_platform_devices); define_machine(mpc832x_rdb) { .name = "MPC832x RDB", .compatible = "MPC832xRDB", .setup_arch = mpc832x_rdb_setup_arch, .discover_phbs = mpc83xx_setup_pci, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/83xx/mpc832x_rdb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * misc setup functions for MPC83xx * * Maintainer: Kumar Gala <[email protected]> */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/of_platform.h> #include <linux/pci.h> #include <asm/debug.h> #include <asm/io.h> #include <asm/hw_irq.h> #include <asm/ipic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <mm/mmu_decl.h> #include "mpc83xx.h" static __be32 __iomem *restart_reg_base; static int __init mpc83xx_restart_init(void) { /* map reset restart_reg_baseister space */ restart_reg_base = ioremap(get_immrbase() + 0x900, 0xff); return 0; } arch_initcall(mpc83xx_restart_init); void __noreturn mpc83xx_restart(char *cmd) { #define RST_OFFSET 0x00000900 #define RST_PROT_REG 0x00000018 #define RST_CTRL_REG 0x0000001c local_irq_disable(); if (restart_reg_base) { /* enable software reset "RSTE" */ out_be32(restart_reg_base + (RST_PROT_REG >> 2), 0x52535445); /* set software hard reset */ out_be32(restart_reg_base + (RST_CTRL_REG >> 2), 0x2); } else { printk (KERN_EMERG "Error: Restart registers not mapped, spinning!\n"); } for (;;) ; } long __init mpc83xx_time_init(void) { #define SPCR_OFFSET 0x00000110 #define SPCR_TBEN 0x00400000 __be32 __iomem *spcr = ioremap(get_immrbase() + SPCR_OFFSET, 4); __be32 tmp; tmp = in_be32(spcr); out_be32(spcr, tmp | SPCR_TBEN); iounmap(spcr); return 0; } void __init mpc83xx_ipic_init_IRQ(void) { struct device_node *np; /* looking for fsl,pq2pro-pic which is asl compatible with fsl,ipic */ np = of_find_compatible_node(NULL, NULL, "fsl,ipic"); if (!np) np = of_find_node_by_type(NULL, "ipic"); if (!np) return; ipic_init(np, 0); of_node_put(np); /* Initialize the default interrupt mapping priorities, * in case the boot rom changed something on us. */ ipic_set_default_priority(); } static const struct of_device_id of_bus_ids[] __initconst = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus" }, { .compatible = "gianfar" }, { .compatible = "gpio-leds", }, { .type = "qe", }, { .compatible = "fsl,qe", }, {}, }; int __init mpc83xx_declare_of_platform_devices(void) { of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } #ifdef CONFIG_PCI void __init mpc83xx_setup_pci(void) { struct device_node *np; for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") mpc83xx_add_bridge(np); for_each_compatible_node(np, "pci", "fsl,mpc8314-pcie") mpc83xx_add_bridge(np); } #endif void __init mpc83xx_setup_arch(void) { phys_addr_t immrbase = get_immrbase(); int immrsize = IS_ALIGNED(immrbase, SZ_2M) ? SZ_2M : SZ_1M; unsigned long va = fix_to_virt(FIX_IMMR_BASE); if (ppc_md.progress) ppc_md.progress("mpc83xx_setup_arch()", 0); setbat(-1, va, immrbase, immrsize, PAGE_KERNEL_NCG); update_bats(); } int machine_check_83xx(struct pt_regs *regs) { u32 mask = 1 << (31 - IPIC_MCP_WDT); if (!(regs->msr & SRR1_MCE_MCP) || !(ipic_get_mcp_status() & mask)) return machine_check_generic(regs); ipic_clear_mcp_status(mask); if (debugger_fault_handler(regs)) return 1; die("Watchdog NMI Reset", regs, 0); return 1; }
linux-master
arch/powerpc/platforms/83xx/misc.c
// SPDX-License-Identifier: GPL-2.0-only /* * The driver for Freescale MPC512x LocalPlus Bus FIFO * (called SCLPC in the Reference Manual). * * Copyright (C) 2013-2015 Alexander Popov <[email protected]>. */ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <asm/mpc5121.h> #include <asm/io.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/dmaengine.h> #include <linux/dma-direction.h> #include <linux/dma-mapping.h> #define DRV_NAME "mpc512x_lpbfifo" struct cs_range { u32 csnum; u32 base; /* must be zero */ u32 addr; u32 size; }; static struct lpbfifo_data { spinlock_t lock; /* for protecting lpbfifo_data */ phys_addr_t regs_phys; resource_size_t regs_size; struct mpc512x_lpbfifo __iomem *regs; int irq; struct cs_range *cs_ranges; size_t cs_n; struct dma_chan *chan; struct mpc512x_lpbfifo_request *req; dma_addr_t ram_bus_addr; bool wait_lpbfifo_irq; bool wait_lpbfifo_callback; } lpbfifo; /* * A data transfer from RAM to some device on LPB is finished * when both mpc512x_lpbfifo_irq() and mpc512x_lpbfifo_callback() * have been called. We execute the callback registered in * mpc512x_lpbfifo_request just after that. * But for a data transfer from some device on LPB to RAM we don't enable * LPBFIFO interrupt because clearing MPC512X_SCLPC_SUCCESS interrupt flag * automatically disables LPBFIFO reading request to the DMA controller * and the data transfer hangs. So the callback registered in * mpc512x_lpbfifo_request is executed at the end of mpc512x_lpbfifo_callback(). */ /* * mpc512x_lpbfifo_irq - IRQ handler for LPB FIFO */ static irqreturn_t mpc512x_lpbfifo_irq(int irq, void *param) { struct device *dev = (struct device *)param; struct mpc512x_lpbfifo_request *req = NULL; unsigned long flags; u32 status; spin_lock_irqsave(&lpbfifo.lock, flags); if (!lpbfifo.regs) goto end; req = lpbfifo.req; if (!req || req->dir == MPC512X_LPBFIFO_REQ_DIR_READ) { dev_err(dev, "bogus LPBFIFO IRQ\n"); goto end; } status = in_be32(&lpbfifo.regs->status); if (status != MPC512X_SCLPC_SUCCESS) { dev_err(dev, "DMA transfer from RAM to peripheral failed\n"); out_be32(&lpbfifo.regs->enable, MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET); goto end; } /* Clear the interrupt flag */ out_be32(&lpbfifo.regs->status, MPC512X_SCLPC_SUCCESS); lpbfifo.wait_lpbfifo_irq = false; if (lpbfifo.wait_lpbfifo_callback) goto end; /* Transfer is finished, set the FIFO as idle */ lpbfifo.req = NULL; spin_unlock_irqrestore(&lpbfifo.lock, flags); if (req->callback) req->callback(req); return IRQ_HANDLED; end: spin_unlock_irqrestore(&lpbfifo.lock, flags); return IRQ_HANDLED; } /* * mpc512x_lpbfifo_callback is called by DMA driver when * DMA transaction is finished. */ static void mpc512x_lpbfifo_callback(void *param) { unsigned long flags; struct mpc512x_lpbfifo_request *req = NULL; enum dma_data_direction dir; spin_lock_irqsave(&lpbfifo.lock, flags); if (!lpbfifo.regs) { spin_unlock_irqrestore(&lpbfifo.lock, flags); return; } req = lpbfifo.req; if (!req) { pr_err("bogus LPBFIFO callback\n"); spin_unlock_irqrestore(&lpbfifo.lock, flags); return; } /* Release the mapping */ if (req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE) dir = DMA_TO_DEVICE; else dir = DMA_FROM_DEVICE; dma_unmap_single(lpbfifo.chan->device->dev, lpbfifo.ram_bus_addr, req->size, dir); lpbfifo.wait_lpbfifo_callback = false; if (!lpbfifo.wait_lpbfifo_irq) { /* Transfer is finished, set the FIFO as idle */ lpbfifo.req = NULL; spin_unlock_irqrestore(&lpbfifo.lock, flags); if (req->callback) req->callback(req); } else { spin_unlock_irqrestore(&lpbfifo.lock, flags); } } static int mpc512x_lpbfifo_kick(void) { u32 bits; bool no_incr = false; u32 bpt = 32; /* max bytes per LPBFIFO transaction involving DMA */ u32 cs = 0; size_t i; struct dma_device *dma_dev = NULL; struct scatterlist sg; enum dma_data_direction dir; struct dma_slave_config dma_conf = {}; struct dma_async_tx_descriptor *dma_tx = NULL; dma_cookie_t cookie; int ret; /* * 1. Fit the requirements: * - the packet size must be a multiple of 4 since FIFO Data Word * Register allows only full-word access according the Reference * Manual; * - the physical address of the device on LPB and the packet size * must be aligned on BPT (bytes per transaction) or 8-bytes * boundary according the Reference Manual; * - but we choose DMA maxburst equal (or very close to) BPT to prevent * DMA controller from overtaking FIFO and causing FIFO underflow * error. So we force the packet size to be aligned on BPT boundary * not to confuse DMA driver which requires the packet size to be * aligned on maxburst boundary; * - BPT should be set to the LPB device port size for operation with * disabled auto-incrementing according Reference Manual. */ if (lpbfifo.req->size == 0 || !IS_ALIGNED(lpbfifo.req->size, 4)) return -EINVAL; if (lpbfifo.req->portsize != LPB_DEV_PORTSIZE_UNDEFINED) { bpt = lpbfifo.req->portsize; no_incr = true; } while (bpt > 1) { if (IS_ALIGNED(lpbfifo.req->dev_phys_addr, min(bpt, 0x8u)) && IS_ALIGNED(lpbfifo.req->size, bpt)) { break; } if (no_incr) return -EINVAL; bpt >>= 1; } dma_conf.dst_maxburst = max(bpt, 0x4u) / 4; dma_conf.src_maxburst = max(bpt, 0x4u) / 4; for (i = 0; i < lpbfifo.cs_n; i++) { phys_addr_t cs_start = lpbfifo.cs_ranges[i].addr; phys_addr_t cs_end = cs_start + lpbfifo.cs_ranges[i].size; phys_addr_t access_start = lpbfifo.req->dev_phys_addr; phys_addr_t access_end = access_start + lpbfifo.req->size; if (access_start >= cs_start && access_end <= cs_end) { cs = lpbfifo.cs_ranges[i].csnum; break; } } if (i == lpbfifo.cs_n) return -EFAULT; /* 2. Prepare DMA */ dma_dev = lpbfifo.chan->device; if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE) { dir = DMA_TO_DEVICE; dma_conf.direction = DMA_MEM_TO_DEV; dma_conf.dst_addr = lpbfifo.regs_phys + offsetof(struct mpc512x_lpbfifo, data_word); } else { dir = DMA_FROM_DEVICE; dma_conf.direction = DMA_DEV_TO_MEM; dma_conf.src_addr = lpbfifo.regs_phys + offsetof(struct mpc512x_lpbfifo, data_word); } dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; /* Make DMA channel work with LPB FIFO data register */ if (dma_dev->device_config(lpbfifo.chan, &dma_conf)) { ret = -EINVAL; goto err_dma_prep; } sg_init_table(&sg, 1); sg_dma_address(&sg) = dma_map_single(dma_dev->dev, lpbfifo.req->ram_virt_addr, lpbfifo.req->size, dir); if (dma_mapping_error(dma_dev->dev, sg_dma_address(&sg))) return -EFAULT; lpbfifo.ram_bus_addr = sg_dma_address(&sg); /* For freeing later */ sg_dma_len(&sg) = lpbfifo.req->size; dma_tx = dmaengine_prep_slave_sg(lpbfifo.chan, &sg, 1, dma_conf.direction, 0); if (!dma_tx) { ret = -ENOSPC; goto err_dma_prep; } dma_tx->callback = mpc512x_lpbfifo_callback; dma_tx->callback_param = NULL; /* 3. Prepare FIFO */ out_be32(&lpbfifo.regs->enable, MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET); out_be32(&lpbfifo.regs->enable, 0x0); /* * Configure the watermarks for write operation (RAM->DMA->FIFO->dev): * - high watermark 7 words according the Reference Manual, * - low watermark 512 bytes (half of the FIFO). * These watermarks don't work for read operation since the * MPC512X_SCLPC_FLUSH bit is set (according the Reference Manual). */ out_be32(&lpbfifo.regs->fifo_ctrl, MPC512X_SCLPC_FIFO_CTRL(0x7)); out_be32(&lpbfifo.regs->fifo_alarm, MPC512X_SCLPC_FIFO_ALARM(0x200)); /* * Start address is a physical address of the region which belongs * to the device on the LocalPlus Bus */ out_be32(&lpbfifo.regs->start_addr, lpbfifo.req->dev_phys_addr); /* * Configure chip select, transfer direction, address increment option * and bytes per transaction option */ bits = MPC512X_SCLPC_CS(cs); if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_READ) bits |= MPC512X_SCLPC_READ | MPC512X_SCLPC_FLUSH; if (no_incr) bits |= MPC512X_SCLPC_DAI; bits |= MPC512X_SCLPC_BPT(bpt); out_be32(&lpbfifo.regs->ctrl, bits); /* Unmask irqs */ bits = MPC512X_SCLPC_ENABLE | MPC512X_SCLPC_ABORT_INT_ENABLE; if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE) bits |= MPC512X_SCLPC_NORM_INT_ENABLE; else lpbfifo.wait_lpbfifo_irq = false; out_be32(&lpbfifo.regs->enable, bits); /* 4. Set packet size and kick FIFO off */ bits = lpbfifo.req->size | MPC512X_SCLPC_START; out_be32(&lpbfifo.regs->pkt_size, bits); /* 5. Finally kick DMA off */ cookie = dma_tx->tx_submit(dma_tx); if (dma_submit_error(cookie)) { ret = -ENOSPC; goto err_dma_submit; } return 0; err_dma_submit: out_be32(&lpbfifo.regs->enable, MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET); err_dma_prep: dma_unmap_single(dma_dev->dev, sg_dma_address(&sg), lpbfifo.req->size, dir); return ret; } static int mpc512x_lpbfifo_submit_locked(struct mpc512x_lpbfifo_request *req) { int ret = 0; if (!lpbfifo.regs) return -ENODEV; /* Check whether a transfer is in progress */ if (lpbfifo.req) return -EBUSY; lpbfifo.wait_lpbfifo_irq = true; lpbfifo.wait_lpbfifo_callback = true; lpbfifo.req = req; ret = mpc512x_lpbfifo_kick(); if (ret != 0) lpbfifo.req = NULL; /* Set the FIFO as idle */ return ret; } int mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request *req) { unsigned long flags; int ret = 0; spin_lock_irqsave(&lpbfifo.lock, flags); ret = mpc512x_lpbfifo_submit_locked(req); spin_unlock_irqrestore(&lpbfifo.lock, flags); return ret; } EXPORT_SYMBOL(mpc512x_lpbfifo_submit); /* * LPBFIFO driver uses "ranges" property of "localbus" device tree node * for being able to determine the chip select number of a client device * ordering a DMA transfer. */ static int get_cs_ranges(struct device *dev) { int ret = -ENODEV; struct device_node *lb_node; size_t i = 0; struct of_range_parser parser; struct of_range range; lb_node = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-localbus"); if (!lb_node) return ret; of_range_parser_init(&parser, lb_node); lpbfifo.cs_n = of_range_count(&parser); lpbfifo.cs_ranges = devm_kcalloc(dev, lpbfifo.cs_n, sizeof(struct cs_range), GFP_KERNEL); if (!lpbfifo.cs_ranges) goto end; for_each_of_range(&parser, &range) { u32 base = lower_32_bits(range.bus_addr); if (base) goto end; lpbfifo.cs_ranges[i].csnum = upper_32_bits(range.bus_addr); lpbfifo.cs_ranges[i].base = base; lpbfifo.cs_ranges[i].addr = range.cpu_addr; lpbfifo.cs_ranges[i].size = range.size; i++; } ret = 0; end: of_node_put(lb_node); return ret; } static int mpc512x_lpbfifo_probe(struct platform_device *pdev) { struct resource r; int ret = 0; memset(&lpbfifo, 0, sizeof(struct lpbfifo_data)); spin_lock_init(&lpbfifo.lock); lpbfifo.chan = dma_request_chan(&pdev->dev, "rx-tx"); if (IS_ERR(lpbfifo.chan)) return PTR_ERR(lpbfifo.chan); if (of_address_to_resource(pdev->dev.of_node, 0, &r) != 0) { dev_err(&pdev->dev, "bad 'reg' in 'sclpc' device tree node\n"); ret = -ENODEV; goto err0; } lpbfifo.regs_phys = r.start; lpbfifo.regs_size = resource_size(&r); if (!devm_request_mem_region(&pdev->dev, lpbfifo.regs_phys, lpbfifo.regs_size, DRV_NAME)) { dev_err(&pdev->dev, "unable to request region\n"); ret = -EBUSY; goto err0; } lpbfifo.regs = devm_ioremap(&pdev->dev, lpbfifo.regs_phys, lpbfifo.regs_size); if (!lpbfifo.regs) { dev_err(&pdev->dev, "mapping registers failed\n"); ret = -ENOMEM; goto err0; } out_be32(&lpbfifo.regs->enable, MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET); if (get_cs_ranges(&pdev->dev) != 0) { dev_err(&pdev->dev, "bad '/localbus' device tree node\n"); ret = -ENODEV; goto err0; } lpbfifo.irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!lpbfifo.irq) { dev_err(&pdev->dev, "mapping irq failed\n"); ret = -ENODEV; goto err0; } if (request_irq(lpbfifo.irq, mpc512x_lpbfifo_irq, 0, DRV_NAME, &pdev->dev) != 0) { dev_err(&pdev->dev, "requesting irq failed\n"); ret = -ENODEV; goto err1; } dev_info(&pdev->dev, "probe succeeded\n"); return 0; err1: irq_dispose_mapping(lpbfifo.irq); err0: dma_release_channel(lpbfifo.chan); return ret; } static void mpc512x_lpbfifo_remove(struct platform_device *pdev) { unsigned long flags; struct dma_device *dma_dev = lpbfifo.chan->device; struct mpc512x_lpbfifo __iomem *regs = NULL; spin_lock_irqsave(&lpbfifo.lock, flags); regs = lpbfifo.regs; lpbfifo.regs = NULL; spin_unlock_irqrestore(&lpbfifo.lock, flags); dma_dev->device_terminate_all(lpbfifo.chan); out_be32(&regs->enable, MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET); free_irq(lpbfifo.irq, &pdev->dev); irq_dispose_mapping(lpbfifo.irq); dma_release_channel(lpbfifo.chan); } static const struct of_device_id mpc512x_lpbfifo_match[] = { { .compatible = "fsl,mpc512x-lpbfifo", }, {}, }; MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match); static struct platform_driver mpc512x_lpbfifo_driver = { .probe = mpc512x_lpbfifo_probe, .remove_new = mpc512x_lpbfifo_remove, .driver = { .name = DRV_NAME, .of_match_table = mpc512x_lpbfifo_match, }, }; module_platform_driver(mpc512x_lpbfifo_driver); MODULE_AUTHOR("Alexander Popov <[email protected]>"); MODULE_DESCRIPTION("MPC512x LocalPlus Bus FIFO device driver"); MODULE_LICENSE("GPL v2");
linux-master
arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2013 DENX Software Engineering * * Gerhard Sittig, <[email protected]> * * common clock driver support for the MPC512x platform */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/mpc5121.h> #include <dt-bindings/clock/mpc512x-clock.h> #include "mpc512x.h" /* our public mpc5121_clk_init() API */ /* helpers to keep the MCLK intermediates "somewhere" in our table */ enum { MCLK_IDX_MUX0, MCLK_IDX_EN0, MCLK_IDX_DIV0, MCLK_MAX_IDX, }; #define NR_PSCS 12 #define NR_MSCANS 4 #define NR_SPDIFS 1 #define NR_OUTCLK 4 #define NR_MCLKS (NR_PSCS + NR_MSCANS + NR_SPDIFS + NR_OUTCLK) /* extend the public set of clocks by adding internal slots for management */ enum { /* arrange for adjacent numbers after the public set */ MPC512x_CLK_START_PRIVATE = MPC512x_CLK_LAST_PUBLIC, /* clocks which aren't announced to the public */ MPC512x_CLK_DDR, MPC512x_CLK_MEM, MPC512x_CLK_IIM, /* intermediates in div+gate combos or fractional dividers */ MPC512x_CLK_DDR_UG, MPC512x_CLK_SDHC_x4, MPC512x_CLK_SDHC_UG, MPC512x_CLK_SDHC2_UG, MPC512x_CLK_DIU_x4, MPC512x_CLK_DIU_UG, MPC512x_CLK_MBX_BUS_UG, MPC512x_CLK_MBX_UG, MPC512x_CLK_MBX_3D_UG, MPC512x_CLK_PCI_UG, MPC512x_CLK_NFC_UG, MPC512x_CLK_LPC_UG, MPC512x_CLK_SPDIF_TX_IN, /* intermediates for the mux+gate+div+mux MCLK generation */ MPC512x_CLK_MCLKS_FIRST, MPC512x_CLK_MCLKS_LAST = MPC512x_CLK_MCLKS_FIRST + NR_MCLKS * MCLK_MAX_IDX, /* internal, symbolic spec for the number of slots */ MPC512x_CLK_LAST_PRIVATE, }; /* data required for the OF clock provider registration */ static struct clk *clks[MPC512x_CLK_LAST_PRIVATE]; static struct clk_onecell_data clk_data; /* CCM register access */ static struct mpc512x_ccm __iomem *clkregs; static DEFINE_SPINLOCK(clklock); /* SoC variants {{{ */ /* * tell SoC variants apart as they are rather similar yet not identical, * cache the result in an enum to not repeatedly run the expensive OF test * * MPC5123 is an MPC5121 without the MBX graphics accelerator * * MPC5125 has many more differences: no MBX, no AXE, no VIU, no SPDIF, * no PATA, no SATA, no PCI, two FECs (of different compatibility name), * only 10 PSCs (of different compatibility name), two SDHCs, different * NFC IP block, output clocks, system PLL status query, different CPMF * interpretation, no CFM, different fourth PSC/CAN mux0 input -- yet * those differences can get folded into this clock provider support * code and don't warrant a separate highly redundant implementation */ static enum soc_type { MPC512x_SOC_MPC5121, MPC512x_SOC_MPC5123, MPC512x_SOC_MPC5125, } soc; static void __init mpc512x_clk_determine_soc(void) { if (of_machine_is_compatible("fsl,mpc5121")) { soc = MPC512x_SOC_MPC5121; return; } if (of_machine_is_compatible("fsl,mpc5123")) { soc = MPC512x_SOC_MPC5123; return; } if (of_machine_is_compatible("fsl,mpc5125")) { soc = MPC512x_SOC_MPC5125; return; } } static bool __init soc_has_mbx(void) { if (soc == MPC512x_SOC_MPC5121) return true; return false; } static bool __init soc_has_axe(void) { if (soc == MPC512x_SOC_MPC5125) return false; return true; } static bool __init soc_has_viu(void) { if (soc == MPC512x_SOC_MPC5125) return false; return true; } static bool __init soc_has_spdif(void) { if (soc == MPC512x_SOC_MPC5125) return false; return true; } static bool __init soc_has_pata(void) { if (soc == MPC512x_SOC_MPC5125) return false; return true; } static bool __init soc_has_sata(void) { if (soc == MPC512x_SOC_MPC5125) return false; return true; } static bool __init soc_has_pci(void) { if (soc == MPC512x_SOC_MPC5125) return false; return true; } static bool __init soc_has_fec2(void) { if (soc == MPC512x_SOC_MPC5125) return true; return false; } static int __init soc_max_pscnum(void) { if (soc == MPC512x_SOC_MPC5125) return 10; return 12; } static bool __init soc_has_sdhc2(void) { if (soc == MPC512x_SOC_MPC5125) return true; return false; } static bool __init soc_has_nfc_5125(void) { if (soc == MPC512x_SOC_MPC5125) return true; return false; } static bool __init soc_has_outclk(void) { if (soc == MPC512x_SOC_MPC5125) return true; return false; } static bool __init soc_has_cpmf_0_bypass(void) { if (soc == MPC512x_SOC_MPC5125) return true; return false; } static bool __init soc_has_mclk_mux0_canin(void) { if (soc == MPC512x_SOC_MPC5125) return true; return false; } /* }}} SoC variants */ /* common clk API wrappers {{{ */ /* convenience wrappers around the common clk API */ static inline struct clk *mpc512x_clk_fixed(const char *name, int rate) { return clk_register_fixed_rate(NULL, name, NULL, 0, rate); } static inline struct clk *mpc512x_clk_factor( const char *name, const char *parent_name, int mul, int div) { int clkflags; clkflags = CLK_SET_RATE_PARENT; return clk_register_fixed_factor(NULL, name, parent_name, clkflags, mul, div); } static inline struct clk *mpc512x_clk_divider( const char *name, const char *parent_name, u8 clkflags, u32 __iomem *reg, u8 pos, u8 len, int divflags) { divflags |= CLK_DIVIDER_BIG_ENDIAN; return clk_register_divider(NULL, name, parent_name, clkflags, reg, pos, len, divflags, &clklock); } static inline struct clk *mpc512x_clk_divtable( const char *name, const char *parent_name, u32 __iomem *reg, u8 pos, u8 len, const struct clk_div_table *divtab) { u8 divflags; divflags = CLK_DIVIDER_BIG_ENDIAN; return clk_register_divider_table(NULL, name, parent_name, 0, reg, pos, len, divflags, divtab, &clklock); } static inline struct clk *mpc512x_clk_gated( const char *name, const char *parent_name, u32 __iomem *reg, u8 pos) { int clkflags; u8 gateflags; clkflags = CLK_SET_RATE_PARENT; gateflags = CLK_GATE_BIG_ENDIAN; return clk_register_gate(NULL, name, parent_name, clkflags, reg, pos, gateflags, &clklock); } static inline struct clk *mpc512x_clk_muxed(const char *name, const char **parent_names, int parent_count, u32 __iomem *reg, u8 pos, u8 len) { int clkflags; u8 muxflags; clkflags = CLK_SET_RATE_PARENT; muxflags = CLK_MUX_BIG_ENDIAN; return clk_register_mux(NULL, name, parent_names, parent_count, clkflags, reg, pos, len, muxflags, &clklock); } /* }}} common clk API wrappers */ /* helper to isolate a bit field from a register */ static inline int get_bit_field(uint32_t __iomem *reg, uint8_t pos, uint8_t len) { uint32_t val; val = in_be32(reg); val >>= pos; val &= (1 << len) - 1; return val; } /* get the SPMF and translate it into the "sys pll" multiplier */ static int __init get_spmf_mult(void) { static int spmf_to_mult[] = { 68, 1, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, }; int spmf; spmf = get_bit_field(&clkregs->spmr, 24, 4); return spmf_to_mult[spmf]; } /* * get the SYS_DIV value and translate it into a divide factor * * values returned from here are a multiple of the real factor since the * divide ratio is fractional */ static int __init get_sys_div_x2(void) { static int sysdiv_code_to_x2[] = { 4, 5, 6, 7, 8, 9, 10, 14, 12, 16, 18, 22, 20, 24, 26, 30, 28, 32, 34, 38, 36, 40, 42, 46, 44, 48, 50, 54, 52, 56, 58, 62, 60, 64, 66, }; int divcode; divcode = get_bit_field(&clkregs->scfr2, 26, 6); return sysdiv_code_to_x2[divcode]; } /* * get the CPMF value and translate it into a multiplier factor * * values returned from here are a multiple of the real factor since the * multiplier ratio is fractional */ static int __init get_cpmf_mult_x2(void) { static int cpmf_to_mult_x36[] = { /* 0b000 is "times 36" */ 72, 2, 2, 3, 4, 5, 6, 7, }; static int cpmf_to_mult_0by[] = { /* 0b000 is "bypass" */ 2, 2, 2, 3, 4, 5, 6, 7, }; int *cpmf_to_mult; int cpmf; cpmf = get_bit_field(&clkregs->spmr, 16, 4); if (soc_has_cpmf_0_bypass()) cpmf_to_mult = cpmf_to_mult_0by; else cpmf_to_mult = cpmf_to_mult_x36; return cpmf_to_mult[cpmf]; } /* * some of the clock dividers do scale in a linear way, yet not all of * their bit combinations are legal; use a divider table to get a * resulting set of applicable divider values */ /* applies to the IPS_DIV, and PCI_DIV values */ static const struct clk_div_table divtab_2346[] = { { .val = 2, .div = 2, }, { .val = 3, .div = 3, }, { .val = 4, .div = 4, }, { .val = 6, .div = 6, }, { .div = 0, }, }; /* applies to the MBX_DIV, LPC_DIV, and NFC_DIV values */ static const struct clk_div_table divtab_1234[] = { { .val = 1, .div = 1, }, { .val = 2, .div = 2, }, { .val = 3, .div = 3, }, { .val = 4, .div = 4, }, { .div = 0, }, }; static int __init get_freq_from_dt(char *propname) { struct device_node *np; const unsigned int *prop; int val; val = 0; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-immr"); if (np) { prop = of_get_property(np, propname, NULL); if (prop) val = *prop; of_node_put(np); } return val; } static void __init mpc512x_clk_preset_data(void) { size_t i; for (i = 0; i < ARRAY_SIZE(clks); i++) clks[i] = ERR_PTR(-ENODEV); } /* * - receives the "bus frequency" from the caller (that's the IPS clock * rate, the historical source of clock information) * - fetches the system PLL multiplier and divider values as well as the * IPS divider value from hardware * - determines the REF clock rate either from the XTAL/OSC spec (if * there is a device tree node describing the oscillator) or from the * IPS bus clock (supported for backwards compatibility, such that * setups without XTAL/OSC specs keep working) * - creates the "ref" clock item in the clock tree, such that * subsequent code can create the remainder of the hierarchy (REF -> * SYS -> CSB -> IPS) from the REF clock rate and the returned mul/div * values */ static void __init mpc512x_clk_setup_ref_clock(struct device_node *np, int bus_freq, int *sys_mul, int *sys_div, int *ips_div) { struct clk *osc_clk; int calc_freq; /* fetch mul/div factors from the hardware */ *sys_mul = get_spmf_mult(); *sys_mul *= 2; /* compensate for the fractional divider */ *sys_div = get_sys_div_x2(); *ips_div = get_bit_field(&clkregs->scfr1, 23, 3); /* lookup the oscillator clock for its rate */ osc_clk = of_clk_get_by_name(np, "osc"); /* * either descend from OSC to REF (and in bypassing verify the * IPS rate), or backtrack from IPS and multiplier values that * were fetched from hardware to REF and thus to the OSC value * * in either case the REF clock gets created here and the * remainder of the clock tree can get spanned from there */ if (!IS_ERR(osc_clk)) { clks[MPC512x_CLK_REF] = mpc512x_clk_factor("ref", "osc", 1, 1); calc_freq = clk_get_rate(clks[MPC512x_CLK_REF]); calc_freq *= *sys_mul; calc_freq /= *sys_div; calc_freq /= 2; calc_freq /= *ips_div; if (bus_freq && calc_freq != bus_freq) pr_warn("calc rate %d != OF spec %d\n", calc_freq, bus_freq); } else { calc_freq = bus_freq; /* start with IPS */ calc_freq *= *ips_div; /* IPS -> CSB */ calc_freq *= 2; /* CSB -> SYS */ calc_freq *= *sys_div; /* SYS -> PLL out */ calc_freq /= *sys_mul; /* PLL out -> REF == OSC */ clks[MPC512x_CLK_REF] = mpc512x_clk_fixed("ref", calc_freq); } } /* MCLK helpers {{{ */ /* * helper code for the MCLK subtree setup * * the overview in section 5.2.4 of the MPC5121e Reference Manual rev4 * suggests that all instances of the "PSC clock generation" are equal, * and that one might re-use the PSC setup for MSCAN clock generation * (section 5.2.5) as well, at least the logic if not the data for * description * * the details (starting at page 5-20) show differences in the specific * inputs of the first mux stage ("can clk in", "spdif tx"), and the * factual non-availability of the second mux stage (it's present yet * only one input is valid) * * the MSCAN clock related registers (starting at page 5-35) all * reference "spdif clk" at the first mux stage and don't mention any * "can clk" at all, which somehow is unexpected * * TODO re-check the document, and clarify whether the RM is correct in * the overview or in the details, and whether the difference is a * clipboard induced error or results from chip revisions * * it turns out that the RM rev4 as of 2012-06 talks about "can" for the * PSCs while RM rev3 as of 2008-10 talks about "spdif", so I guess that * first a doc update is required which better reflects reality in the * SoC before the implementation should follow while no questions remain */ /* * note that this declaration raises a checkpatch warning, but * it's the very data type dictated by <linux/clk-provider.h>, * "fixing" this warning will break compilation */ static const char *parent_names_mux0_spdif[] = { "sys", "ref", "psc-mclk-in", "spdif-tx", }; static const char *parent_names_mux0_canin[] = { "sys", "ref", "psc-mclk-in", "can-clk-in", }; enum mclk_type { MCLK_TYPE_PSC, MCLK_TYPE_MSCAN, MCLK_TYPE_SPDIF, MCLK_TYPE_OUTCLK, }; struct mclk_setup_data { enum mclk_type type; bool has_mclk1; const char *name_mux0; const char *name_en0; const char *name_div0; const char *parent_names_mux1[2]; const char *name_mclk; }; #define MCLK_SETUP_DATA_PSC(id) { \ MCLK_TYPE_PSC, 0, \ "psc" #id "-mux0", \ "psc" #id "-en0", \ "psc" #id "_mclk_div", \ { "psc" #id "_mclk_div", "dummy", }, \ "psc" #id "_mclk", \ } #define MCLK_SETUP_DATA_MSCAN(id) { \ MCLK_TYPE_MSCAN, 0, \ "mscan" #id "-mux0", \ "mscan" #id "-en0", \ "mscan" #id "_mclk_div", \ { "mscan" #id "_mclk_div", "dummy", }, \ "mscan" #id "_mclk", \ } #define MCLK_SETUP_DATA_SPDIF { \ MCLK_TYPE_SPDIF, 1, \ "spdif-mux0", \ "spdif-en0", \ "spdif_mclk_div", \ { "spdif_mclk_div", "spdif-rx", }, \ "spdif_mclk", \ } #define MCLK_SETUP_DATA_OUTCLK(id) { \ MCLK_TYPE_OUTCLK, 0, \ "out" #id "-mux0", \ "out" #id "-en0", \ "out" #id "_mclk_div", \ { "out" #id "_mclk_div", "dummy", }, \ "out" #id "_clk", \ } static struct mclk_setup_data mclk_psc_data[] = { MCLK_SETUP_DATA_PSC(0), MCLK_SETUP_DATA_PSC(1), MCLK_SETUP_DATA_PSC(2), MCLK_SETUP_DATA_PSC(3), MCLK_SETUP_DATA_PSC(4), MCLK_SETUP_DATA_PSC(5), MCLK_SETUP_DATA_PSC(6), MCLK_SETUP_DATA_PSC(7), MCLK_SETUP_DATA_PSC(8), MCLK_SETUP_DATA_PSC(9), MCLK_SETUP_DATA_PSC(10), MCLK_SETUP_DATA_PSC(11), }; static struct mclk_setup_data mclk_mscan_data[] = { MCLK_SETUP_DATA_MSCAN(0), MCLK_SETUP_DATA_MSCAN(1), MCLK_SETUP_DATA_MSCAN(2), MCLK_SETUP_DATA_MSCAN(3), }; static struct mclk_setup_data mclk_spdif_data[] = { MCLK_SETUP_DATA_SPDIF, }; static struct mclk_setup_data mclk_outclk_data[] = { MCLK_SETUP_DATA_OUTCLK(0), MCLK_SETUP_DATA_OUTCLK(1), MCLK_SETUP_DATA_OUTCLK(2), MCLK_SETUP_DATA_OUTCLK(3), }; /* setup the MCLK clock subtree of an individual PSC/MSCAN/SPDIF */ static void __init mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t idx) { size_t clks_idx_pub, clks_idx_int; u32 __iomem *mccr_reg; /* MCLK control register (mux, en, div) */ int div; /* derive a few parameters from the component type and index */ switch (entry->type) { case MCLK_TYPE_PSC: clks_idx_pub = MPC512x_CLK_PSC0_MCLK + idx; clks_idx_int = MPC512x_CLK_MCLKS_FIRST + (idx) * MCLK_MAX_IDX; mccr_reg = &clkregs->psc_ccr[idx]; break; case MCLK_TYPE_MSCAN: clks_idx_pub = MPC512x_CLK_MSCAN0_MCLK + idx; clks_idx_int = MPC512x_CLK_MCLKS_FIRST + (NR_PSCS + idx) * MCLK_MAX_IDX; mccr_reg = &clkregs->mscan_ccr[idx]; break; case MCLK_TYPE_SPDIF: clks_idx_pub = MPC512x_CLK_SPDIF_MCLK; clks_idx_int = MPC512x_CLK_MCLKS_FIRST + (NR_PSCS + NR_MSCANS) * MCLK_MAX_IDX; mccr_reg = &clkregs->spccr; break; case MCLK_TYPE_OUTCLK: clks_idx_pub = MPC512x_CLK_OUT0_CLK + idx; clks_idx_int = MPC512x_CLK_MCLKS_FIRST + (NR_PSCS + NR_MSCANS + NR_SPDIFS + idx) * MCLK_MAX_IDX; mccr_reg = &clkregs->out_ccr[idx]; break; default: return; } /* * this was grabbed from the PPC_CLOCK implementation, which * enforced a specific MCLK divider while the clock was gated * during setup (that's a documented hardware requirement) * * the PPC_CLOCK implementation might even have violated the * "MCLK <= IPS" constraint, the fixed divider value of 1 * results in a divider of 2 and thus MCLK = SYS/2 which equals * CSB which is greater than IPS; the serial port setup may have * adjusted the divider which the clock setup might have left in * an undesirable state * * initial setup is: * - MCLK 0 from SYS * - MCLK DIV such to not exceed the IPS clock * - MCLK 0 enabled * - MCLK 1 from MCLK DIV */ div = clk_get_rate(clks[MPC512x_CLK_SYS]); div /= clk_get_rate(clks[MPC512x_CLK_IPS]); out_be32(mccr_reg, (0 << 16)); out_be32(mccr_reg, (0 << 16) | ((div - 1) << 17)); out_be32(mccr_reg, (1 << 16) | ((div - 1) << 17)); /* * create the 'struct clk' items of the MCLK's clock subtree * * note that by design we always create all nodes and won't take * shortcuts here, because * - the "internal" MCLK_DIV and MCLK_OUT signal in turn are * selectable inputs to the CFM while those who "actually use" * the PSC/MSCAN/SPDIF (serial drivers et al) need the MCLK * for their bitrate * - in the absence of "aliases" for clocks we need to create * individual 'struct clk' items for whatever might get * referenced or looked up, even if several of those items are * identical from the logical POV (their rate value) * - for easier future maintenance and for better reflection of * the SoC's documentation, it appears appropriate to generate * clock items even for those muxers which actually are NOPs * (those with two inputs of which one is reserved) */ clks[clks_idx_int + MCLK_IDX_MUX0] = mpc512x_clk_muxed( entry->name_mux0, soc_has_mclk_mux0_canin() ? &parent_names_mux0_canin[0] : &parent_names_mux0_spdif[0], ARRAY_SIZE(parent_names_mux0_spdif), mccr_reg, 14, 2); clks[clks_idx_int + MCLK_IDX_EN0] = mpc512x_clk_gated( entry->name_en0, entry->name_mux0, mccr_reg, 16); clks[clks_idx_int + MCLK_IDX_DIV0] = mpc512x_clk_divider( entry->name_div0, entry->name_en0, CLK_SET_RATE_GATE, mccr_reg, 17, 15, 0); if (entry->has_mclk1) { clks[clks_idx_pub] = mpc512x_clk_muxed( entry->name_mclk, &entry->parent_names_mux1[0], ARRAY_SIZE(entry->parent_names_mux1), mccr_reg, 7, 1); } else { clks[clks_idx_pub] = mpc512x_clk_factor( entry->name_mclk, entry->parent_names_mux1[0], 1, 1); } } /* }}} MCLK helpers */ static void __init mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq) { int sys_mul, sys_div, ips_div; int mul, div; size_t mclk_idx; int freq; /* * developer's notes: * - consider whether to handle clocks which have both gates and * dividers via intermediates or by means of composites * - fractional dividers appear to not map well to composites * since they can be seen as a fixed multiplier and an * adjustable divider, while composites can only combine at * most one of a mux, div, and gate each into one 'struct clk' * item * - PSC/MSCAN/SPDIF clock generation OTOH already is very * specific and cannot get mapped to composites (at least not * a single one, maybe two of them, but then some of these * intermediate clock signals get referenced elsewhere (e.g. * in the clock frequency measurement, CFM) and thus need * publicly available names * - the current source layout appropriately reflects the * hardware setup, and it works, so it's questionable whether * further changes will result in big enough a benefit */ /* regardless of whether XTAL/OSC exists, have REF created */ mpc512x_clk_setup_ref_clock(np, busfreq, &sys_mul, &sys_div, &ips_div); /* now setup the REF -> SYS -> CSB -> IPS hierarchy */ clks[MPC512x_CLK_SYS] = mpc512x_clk_factor("sys", "ref", sys_mul, sys_div); clks[MPC512x_CLK_CSB] = mpc512x_clk_factor("csb", "sys", 1, 2); clks[MPC512x_CLK_IPS] = mpc512x_clk_divtable("ips", "csb", &clkregs->scfr1, 23, 3, divtab_2346); /* now setup anything below SYS and CSB and IPS */ clks[MPC512x_CLK_DDR_UG] = mpc512x_clk_factor("ddr-ug", "sys", 1, 2); /* * the Reference Manual discusses that for SDHC only even divide * ratios are supported because clock domain synchronization * between 'per' and 'ipg' is broken; * keep the divider's bit 0 cleared (per reset value), and only * allow to setup the divider's bits 7:1, which results in that * only even divide ratios can get configured upon rate changes; * keep the "x4" name because this bit shift hack is an internal * implementation detail, the "fractional divider with quarters" * semantics remains */ clks[MPC512x_CLK_SDHC_x4] = mpc512x_clk_factor("sdhc-x4", "csb", 2, 1); clks[MPC512x_CLK_SDHC_UG] = mpc512x_clk_divider("sdhc-ug", "sdhc-x4", 0, &clkregs->scfr2, 1, 7, CLK_DIVIDER_ONE_BASED); if (soc_has_sdhc2()) { clks[MPC512x_CLK_SDHC2_UG] = mpc512x_clk_divider( "sdhc2-ug", "sdhc-x4", 0, &clkregs->scfr2, 9, 7, CLK_DIVIDER_ONE_BASED); } clks[MPC512x_CLK_DIU_x4] = mpc512x_clk_factor("diu-x4", "csb", 4, 1); clks[MPC512x_CLK_DIU_UG] = mpc512x_clk_divider("diu-ug", "diu-x4", 0, &clkregs->scfr1, 0, 8, CLK_DIVIDER_ONE_BASED); /* * the "power architecture PLL" was setup from data which was * sampled from the reset config word, at this point in time the * configuration can be considered fixed and read only (i.e. no * longer adjustable, or no longer in need of adjustment), which * is why we don't register a PLL here but assume fixed factors */ mul = get_cpmf_mult_x2(); div = 2; /* compensate for the fractional factor */ clks[MPC512x_CLK_E300] = mpc512x_clk_factor("e300", "csb", mul, div); if (soc_has_mbx()) { clks[MPC512x_CLK_MBX_BUS_UG] = mpc512x_clk_factor( "mbx-bus-ug", "csb", 1, 2); clks[MPC512x_CLK_MBX_UG] = mpc512x_clk_divtable( "mbx-ug", "mbx-bus-ug", &clkregs->scfr1, 14, 3, divtab_1234); clks[MPC512x_CLK_MBX_3D_UG] = mpc512x_clk_factor( "mbx-3d-ug", "mbx-ug", 1, 1); } if (soc_has_pci()) { clks[MPC512x_CLK_PCI_UG] = mpc512x_clk_divtable( "pci-ug", "csb", &clkregs->scfr1, 20, 3, divtab_2346); } if (soc_has_nfc_5125()) { /* * XXX TODO implement 5125 NFC clock setup logic, * with high/low period counters in clkregs->scfr3, * currently there are no users so it's ENOIMPL */ clks[MPC512x_CLK_NFC_UG] = ERR_PTR(-ENOTSUPP); } else { clks[MPC512x_CLK_NFC_UG] = mpc512x_clk_divtable( "nfc-ug", "ips", &clkregs->scfr1, 8, 3, divtab_1234); } clks[MPC512x_CLK_LPC_UG] = mpc512x_clk_divtable("lpc-ug", "ips", &clkregs->scfr1, 11, 3, divtab_1234); clks[MPC512x_CLK_LPC] = mpc512x_clk_gated("lpc", "lpc-ug", &clkregs->sccr1, 30); clks[MPC512x_CLK_NFC] = mpc512x_clk_gated("nfc", "nfc-ug", &clkregs->sccr1, 29); if (soc_has_pata()) { clks[MPC512x_CLK_PATA] = mpc512x_clk_gated( "pata", "ips", &clkregs->sccr1, 28); } /* for PSCs there is a "registers" gate and a bitrate MCLK subtree */ for (mclk_idx = 0; mclk_idx < soc_max_pscnum(); mclk_idx++) { char name[12]; snprintf(name, sizeof(name), "psc%d", mclk_idx); clks[MPC512x_CLK_PSC0 + mclk_idx] = mpc512x_clk_gated( name, "ips", &clkregs->sccr1, 27 - mclk_idx); mpc512x_clk_setup_mclk(&mclk_psc_data[mclk_idx], mclk_idx); } clks[MPC512x_CLK_PSC_FIFO] = mpc512x_clk_gated("psc-fifo", "ips", &clkregs->sccr1, 15); if (soc_has_sata()) { clks[MPC512x_CLK_SATA] = mpc512x_clk_gated( "sata", "ips", &clkregs->sccr1, 14); } clks[MPC512x_CLK_FEC] = mpc512x_clk_gated("fec", "ips", &clkregs->sccr1, 13); if (soc_has_pci()) { clks[MPC512x_CLK_PCI] = mpc512x_clk_gated( "pci", "pci-ug", &clkregs->sccr1, 11); } clks[MPC512x_CLK_DDR] = mpc512x_clk_gated("ddr", "ddr-ug", &clkregs->sccr1, 10); if (soc_has_fec2()) { clks[MPC512x_CLK_FEC2] = mpc512x_clk_gated( "fec2", "ips", &clkregs->sccr1, 9); } clks[MPC512x_CLK_DIU] = mpc512x_clk_gated("diu", "diu-ug", &clkregs->sccr2, 31); if (soc_has_axe()) { clks[MPC512x_CLK_AXE] = mpc512x_clk_gated( "axe", "csb", &clkregs->sccr2, 30); } clks[MPC512x_CLK_MEM] = mpc512x_clk_gated("mem", "ips", &clkregs->sccr2, 29); clks[MPC512x_CLK_USB1] = mpc512x_clk_gated("usb1", "csb", &clkregs->sccr2, 28); clks[MPC512x_CLK_USB2] = mpc512x_clk_gated("usb2", "csb", &clkregs->sccr2, 27); clks[MPC512x_CLK_I2C] = mpc512x_clk_gated("i2c", "ips", &clkregs->sccr2, 26); /* MSCAN differs from PSC with just one gate for multiple components */ clks[MPC512x_CLK_BDLC] = mpc512x_clk_gated("bdlc", "ips", &clkregs->sccr2, 25); for (mclk_idx = 0; mclk_idx < ARRAY_SIZE(mclk_mscan_data); mclk_idx++) mpc512x_clk_setup_mclk(&mclk_mscan_data[mclk_idx], mclk_idx); clks[MPC512x_CLK_SDHC] = mpc512x_clk_gated("sdhc", "sdhc-ug", &clkregs->sccr2, 24); /* there is only one SPDIF component, which shares MCLK support code */ if (soc_has_spdif()) { clks[MPC512x_CLK_SPDIF] = mpc512x_clk_gated( "spdif", "ips", &clkregs->sccr2, 23); mpc512x_clk_setup_mclk(&mclk_spdif_data[0], 0); } if (soc_has_mbx()) { clks[MPC512x_CLK_MBX_BUS] = mpc512x_clk_gated( "mbx-bus", "mbx-bus-ug", &clkregs->sccr2, 22); clks[MPC512x_CLK_MBX] = mpc512x_clk_gated( "mbx", "mbx-ug", &clkregs->sccr2, 21); clks[MPC512x_CLK_MBX_3D] = mpc512x_clk_gated( "mbx-3d", "mbx-3d-ug", &clkregs->sccr2, 20); } clks[MPC512x_CLK_IIM] = mpc512x_clk_gated("iim", "csb", &clkregs->sccr2, 19); if (soc_has_viu()) { clks[MPC512x_CLK_VIU] = mpc512x_clk_gated( "viu", "csb", &clkregs->sccr2, 18); } if (soc_has_sdhc2()) { clks[MPC512x_CLK_SDHC2] = mpc512x_clk_gated( "sdhc-2", "sdhc2-ug", &clkregs->sccr2, 17); } if (soc_has_outclk()) { size_t idx; /* used as mclk_idx, just to trim line length */ for (idx = 0; idx < ARRAY_SIZE(mclk_outclk_data); idx++) mpc512x_clk_setup_mclk(&mclk_outclk_data[idx], idx); } /* * externally provided clocks (when implemented in hardware, * device tree may specify values which otherwise were unknown) */ freq = get_freq_from_dt("psc_mclk_in"); if (!freq) freq = 25000000; clks[MPC512x_CLK_PSC_MCLK_IN] = mpc512x_clk_fixed("psc_mclk_in", freq); if (soc_has_mclk_mux0_canin()) { freq = get_freq_from_dt("can_clk_in"); clks[MPC512x_CLK_CAN_CLK_IN] = mpc512x_clk_fixed( "can_clk_in", freq); } else { freq = get_freq_from_dt("spdif_tx_in"); clks[MPC512x_CLK_SPDIF_TX_IN] = mpc512x_clk_fixed( "spdif_tx_in", freq); freq = get_freq_from_dt("spdif_rx_in"); clks[MPC512x_CLK_SPDIF_TX_IN] = mpc512x_clk_fixed( "spdif_rx_in", freq); } /* fixed frequency for AC97, always 24.567MHz */ clks[MPC512x_CLK_AC97] = mpc512x_clk_fixed("ac97", 24567000); /* * pre-enable those "internal" clock items which never get * claimed by any peripheral driver, to not have the clock * subsystem disable them late at startup */ clk_prepare_enable(clks[MPC512x_CLK_DUMMY]); clk_prepare_enable(clks[MPC512x_CLK_E300]); /* PowerPC CPU */ clk_prepare_enable(clks[MPC512x_CLK_DDR]); /* DRAM */ clk_prepare_enable(clks[MPC512x_CLK_MEM]); /* SRAM */ clk_prepare_enable(clks[MPC512x_CLK_IPS]); /* SoC periph */ clk_prepare_enable(clks[MPC512x_CLK_LPC]); /* boot media */ } /* * registers the set of public clocks (those listed in the dt-bindings/ * header file) for OF lookups, keeps the intermediates private to us */ static void __init mpc5121_clk_register_of_provider(struct device_node *np) { clk_data.clks = clks; clk_data.clk_num = MPC512x_CLK_LAST_PUBLIC + 1; /* _not_ ARRAY_SIZE() */ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); } /* * temporary support for the period of time between introduction of CCF * support and the adjustment of peripheral drivers to OF based lookups */ static void __init mpc5121_clk_provide_migration_support(void) { struct device_node *np; /* * pre-enable those clock items which are not yet appropriately * acquired by their peripheral driver * * the PCI clock cannot get acquired by its peripheral driver, * because for this platform the driver won't probe(), instead * initialization is done from within the .setup_arch() routine * at a point in time where the clock provider has not been * setup yet and thus isn't available yet * * so we "pre-enable" the clock here, to not have the clock * subsystem automatically disable this item in a late init call * * this PCI clock pre-enable workaround only applies when there * are device tree nodes for PCI and thus the peripheral driver * has attached to bridges, otherwise the PCI clock remains * unused and so it gets disabled */ clk_prepare_enable(clks[MPC512x_CLK_PSC3_MCLK]);/* serial console */ np = of_find_compatible_node(NULL, "pci", "fsl,mpc5121-pci"); of_node_put(np); if (np) clk_prepare_enable(clks[MPC512x_CLK_PCI]); } /* * those macros are not exactly pretty, but they encapsulate a lot * of copy'n'paste heavy code which is even more ugly, and reduce * the potential for inconsistencies in those many code copies */ #define FOR_NODES(compatname) \ for_each_compatible_node(np, NULL, compatname) #define NODE_PREP do { \ of_address_to_resource(np, 0, &res); \ snprintf(devname, sizeof(devname), "%pa.%s", &res.start, np->name); \ } while (0) #define NODE_CHK(clkname, clkitem, regnode, regflag) do { \ struct clk *clk; \ clk = of_clk_get_by_name(np, clkname); \ if (IS_ERR(clk)) { \ clk = clkitem; \ clk_register_clkdev(clk, clkname, devname); \ if (regnode) \ clk_register_clkdev(clk, clkname, np->name); \ did_register |= DID_REG_ ## regflag; \ pr_debug("clock alias name '%s' for dev '%s' pointer %p\n", \ clkname, devname, clk); \ } else { \ clk_put(clk); \ } \ } while (0) /* * register source code provided fallback results for clock lookups, * these get consulted when OF based clock lookup fails (that is in the * case of not yet adjusted device tree data, where clock related specs * are missing) */ static void __init mpc5121_clk_provide_backwards_compat(void) { enum did_reg_flags { DID_REG_PSC = BIT(0), DID_REG_PSCFIFO = BIT(1), DID_REG_NFC = BIT(2), DID_REG_CAN = BIT(3), DID_REG_I2C = BIT(4), DID_REG_DIU = BIT(5), DID_REG_VIU = BIT(6), DID_REG_FEC = BIT(7), DID_REG_USB = BIT(8), DID_REG_PATA = BIT(9), }; int did_register; struct device_node *np; struct resource res; int idx; char devname[32]; did_register = 0; FOR_NODES(mpc512x_select_psc_compat()) { NODE_PREP; idx = (res.start >> 8) & 0xf; NODE_CHK("ipg", clks[MPC512x_CLK_PSC0 + idx], 0, PSC); NODE_CHK("mclk", clks[MPC512x_CLK_PSC0_MCLK + idx], 0, PSC); } FOR_NODES("fsl,mpc5121-psc-fifo") { NODE_PREP; NODE_CHK("ipg", clks[MPC512x_CLK_PSC_FIFO], 1, PSCFIFO); } FOR_NODES("fsl,mpc5121-nfc") { NODE_PREP; NODE_CHK("ipg", clks[MPC512x_CLK_NFC], 0, NFC); } FOR_NODES("fsl,mpc5121-mscan") { NODE_PREP; idx = 0; idx += (res.start & 0x2000) ? 2 : 0; idx += (res.start & 0x0080) ? 1 : 0; NODE_CHK("ipg", clks[MPC512x_CLK_BDLC], 0, CAN); NODE_CHK("mclk", clks[MPC512x_CLK_MSCAN0_MCLK + idx], 0, CAN); } /* * do register the 'ips', 'sys', and 'ref' names globally * instead of inside each individual CAN node, as there is no * potential for a name conflict (in contrast to 'ipg' and 'mclk') */ if (did_register & DID_REG_CAN) { clk_register_clkdev(clks[MPC512x_CLK_IPS], "ips", NULL); clk_register_clkdev(clks[MPC512x_CLK_SYS], "sys", NULL); clk_register_clkdev(clks[MPC512x_CLK_REF], "ref", NULL); } FOR_NODES("fsl,mpc5121-i2c") { NODE_PREP; NODE_CHK("ipg", clks[MPC512x_CLK_I2C], 0, I2C); } /* * workaround for the fact that the I2C driver does an "anonymous" * lookup (NULL name spec, which yields the first clock spec) for * which we cannot register an alias -- a _global_ 'ipg' alias that * is not bound to any device name and returns the I2C clock item * is not a good idea * * so we have the lookup in the peripheral driver fail, which is * silent and non-fatal, and pre-enable the clock item here such * that register access is possible * * see commit b3bfce2b "i2c: mpc: cleanup clock API use" for * details, adjusting s/NULL/"ipg"/ in i2c-mpc.c would make this * workaround obsolete */ if (did_register & DID_REG_I2C) clk_prepare_enable(clks[MPC512x_CLK_I2C]); FOR_NODES("fsl,mpc5121-diu") { NODE_PREP; NODE_CHK("ipg", clks[MPC512x_CLK_DIU], 1, DIU); } FOR_NODES("fsl,mpc5121-viu") { NODE_PREP; NODE_CHK("ipg", clks[MPC512x_CLK_VIU], 0, VIU); } /* * note that 2771399a "fs_enet: cleanup clock API use" did use the * "per" string for the clock lookup in contrast to the "ipg" name * which most other nodes are using -- this is not a fatal thing * but just something to keep in mind when doing compatibility * registration, it's a non-issue with up-to-date device tree data */ FOR_NODES("fsl,mpc5121-fec") { NODE_PREP; NODE_CHK("per", clks[MPC512x_CLK_FEC], 0, FEC); } FOR_NODES("fsl,mpc5121-fec-mdio") { NODE_PREP; NODE_CHK("per", clks[MPC512x_CLK_FEC], 0, FEC); } /* * MPC5125 has two FECs: FEC1 at 0x2800, FEC2 at 0x4800; * the clock items don't "form an array" since FEC2 was * added only later and was not allowed to shift all other * clock item indices, so the numbers aren't adjacent */ FOR_NODES("fsl,mpc5125-fec") { NODE_PREP; if (res.start & 0x4000) idx = MPC512x_CLK_FEC2; else idx = MPC512x_CLK_FEC; NODE_CHK("per", clks[idx], 0, FEC); } FOR_NODES("fsl,mpc5121-usb2-dr") { NODE_PREP; idx = (res.start & 0x4000) ? 1 : 0; NODE_CHK("ipg", clks[MPC512x_CLK_USB1 + idx], 0, USB); } FOR_NODES("fsl,mpc5121-pata") { NODE_PREP; NODE_CHK("ipg", clks[MPC512x_CLK_PATA], 0, PATA); } /* * try to collapse diagnostics into a single line of output yet * provide a full list of what is missing, to avoid noise in the * absence of up-to-date device tree data -- backwards * compatibility to old DTBs is a requirement, updates may be * desirable or preferrable but are not at all mandatory */ if (did_register) { pr_notice("device tree lacks clock specs, adding fallbacks (0x%x,%s%s%s%s%s%s%s%s%s%s)\n", did_register, (did_register & DID_REG_PSC) ? " PSC" : "", (did_register & DID_REG_PSCFIFO) ? " PSCFIFO" : "", (did_register & DID_REG_NFC) ? " NFC" : "", (did_register & DID_REG_CAN) ? " CAN" : "", (did_register & DID_REG_I2C) ? " I2C" : "", (did_register & DID_REG_DIU) ? " DIU" : "", (did_register & DID_REG_VIU) ? " VIU" : "", (did_register & DID_REG_FEC) ? " FEC" : "", (did_register & DID_REG_USB) ? " USB" : "", (did_register & DID_REG_PATA) ? " PATA" : ""); } else { pr_debug("device tree has clock specs, no fallbacks added\n"); } } /* * The "fixed-clock" nodes (which includes the oscillator node if the board's * DT provides one) has already been scanned by the of_clk_init() in * time_init(). */ int __init mpc5121_clk_init(void) { struct device_node *clk_np; int busfreq; /* map the clock control registers */ clk_np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-clock"); if (!clk_np) return -ENODEV; clkregs = of_iomap(clk_np, 0); WARN_ON(!clkregs); /* determine the SoC variant we run on */ mpc512x_clk_determine_soc(); /* invalidate all not yet registered clock slots */ mpc512x_clk_preset_data(); /* * add a dummy clock for those situations where a clock spec is * required yet no real clock is involved */ clks[MPC512x_CLK_DUMMY] = mpc512x_clk_fixed("dummy", 0); /* * have all the real nodes in the clock tree populated from REF * down to all leaves, either starting from the OSC node or from * a REF root that was created from the IPS bus clock input */ busfreq = get_freq_from_dt("bus-frequency"); mpc512x_clk_setup_clock_tree(clk_np, busfreq); /* register as an OF clock provider */ mpc5121_clk_register_of_provider(clk_np); of_node_put(clk_np); /* * unbreak not yet adjusted peripheral drivers during migration * towards fully operational common clock support, and allow * operation in the absence of clock related device tree specs */ mpc5121_clk_provide_migration_support(); mpc5121_clk_provide_backwards_compat(); return 0; }
linux-master
arch/powerpc/platforms/512x/clock-commonclk.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. * * Author: John Rigby, <[email protected]> * * Description: * MPC5121ADS CPLD irq handling */ #undef DEBUG #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/of_address.h> #include <linux/of_irq.h> static struct device_node *cpld_pic_node; static struct irq_domain *cpld_pic_host; /* * Bits to ignore in the misc_status register * 0x10 touch screen pendown is hard routed to irq1 * 0x02 pci status is read from pci status register */ #define MISC_IGNORE 0x12 /* * Nothing to ignore in pci status register */ #define PCI_IGNORE 0x00 struct cpld_pic { u8 pci_mask; u8 pci_status; u8 route; u8 misc_mask; u8 misc_status; u8 misc_control; }; static struct cpld_pic __iomem *cpld_regs; static void __iomem * irq_to_pic_mask(unsigned int irq) { return irq <= 7 ? &cpld_regs->pci_mask : &cpld_regs->misc_mask; } static unsigned int irq_to_pic_bit(unsigned int irq) { return 1 << (irq & 0x7); } static void cpld_mask_irq(struct irq_data *d) { unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d); void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); out_8(pic_mask, in_8(pic_mask) | irq_to_pic_bit(cpld_irq)); } static void cpld_unmask_irq(struct irq_data *d) { unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d); void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); out_8(pic_mask, in_8(pic_mask) & ~irq_to_pic_bit(cpld_irq)); } static struct irq_chip cpld_pic = { .name = "CPLD PIC", .irq_mask = cpld_mask_irq, .irq_ack = cpld_mask_irq, .irq_unmask = cpld_unmask_irq, }; static unsigned int cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp, u8 __iomem *maskp) { u8 status = in_8(statusp); u8 mask = in_8(maskp); /* ignore don't cares and masked irqs */ status |= (ignore | mask); if (status == 0xff) return ~0; return ffz(status) + offset; } static void cpld_pic_cascade(struct irq_desc *desc) { unsigned int hwirq; hwirq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status, &cpld_regs->pci_mask); if (hwirq != ~0) { generic_handle_domain_irq(cpld_pic_host, hwirq); return; } hwirq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status, &cpld_regs->misc_mask); if (hwirq != ~0) { generic_handle_domain_irq(cpld_pic_host, hwirq); return; } } static int cpld_pic_host_match(struct irq_domain *h, struct device_node *node, enum irq_domain_bus_token bus_token) { return cpld_pic_node == node; } static int cpld_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &cpld_pic, handle_level_irq); return 0; } static const struct irq_domain_ops cpld_pic_host_ops = { .match = cpld_pic_host_match, .map = cpld_pic_host_map, }; void __init mpc5121_ads_cpld_map(void) { struct device_node *np = NULL; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld-pic"); if (!np) { printk(KERN_ERR "CPLD PIC init: can not find cpld-pic node\n"); return; } cpld_regs = of_iomap(np, 0); of_node_put(np); } void __init mpc5121_ads_cpld_pic_init(void) { unsigned int cascade_irq; struct device_node *np = NULL; pr_debug("cpld_ic_init\n"); np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld-pic"); if (!np) { printk(KERN_ERR "CPLD PIC init: can not find cpld-pic node\n"); return; } if (!cpld_regs) goto end; cascade_irq = irq_of_parse_and_map(np, 0); if (!cascade_irq) goto end; /* * statically route touch screen pendown through 1 * and ignore it here * route all others through our cascade irq */ out_8(&cpld_regs->route, 0xfd); out_8(&cpld_regs->pci_mask, 0xff); /* unmask pci ints in misc mask */ out_8(&cpld_regs->misc_mask, ~(MISC_IGNORE)); cpld_pic_node = of_node_get(np); cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL); if (!cpld_pic_host) { printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); goto end; } irq_set_chained_handler(cascade_irq, cpld_pic_cascade); end: of_node_put(np); }
linux-master
arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. All rights reserved. * * Author: John Rigby, <[email protected]>, Thur Mar 29 2007 * * Description: * MPC5121 ADS board setup */ #include <linux/kernel.h> #include <linux/io.h> #include <linux/of.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/time.h> #include <sysdev/fsl_pci.h> #include "mpc512x.h" #include "mpc5121_ads.h" static void __init mpc5121_ads_setup_arch(void) { printk(KERN_INFO "MPC5121 ADS board from Freescale Semiconductor\n"); /* * cpld regs are needed early */ mpc5121_ads_cpld_map(); mpc512x_setup_arch(); } static void __init mpc5121_ads_setup_pci(void) { #ifdef CONFIG_PCI struct device_node *np; for_each_compatible_node(np, "pci", "fsl,mpc5121-pci") mpc83xx_add_bridge(np); #endif } static void __init mpc5121_ads_init_IRQ(void) { mpc512x_init_IRQ(); mpc5121_ads_cpld_pic_init(); } /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc5121_ads_probe(void) { mpc512x_init_early(); return 1; } define_machine(mpc5121_ads) { .name = "MPC5121 ADS", .compatible = "fsl,mpc5121ads", .probe = mpc5121_ads_probe, .setup_arch = mpc5121_ads_setup_arch, .discover_phbs = mpc5121_ads_setup_pci, .init = mpc512x_init, .init_IRQ = mpc5121_ads_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc512x_restart, };
linux-master
arch/powerpc/platforms/512x/mpc5121_ads.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2010 DENX Software Engineering * * Anatolij Gustschin, <[email protected]> * * PDM360NG board setup */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_fdt.h> #include <asm/machdep.h> #include <asm/ipic.h> #include "mpc512x.h" #if defined(CONFIG_TOUCHSCREEN_ADS7846) || \ defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) #include <linux/interrupt.h> #include <linux/spi/ads7846.h> #include <linux/spi/spi.h> #include <linux/notifier.h> static void *pdm360ng_gpio_base; static int pdm360ng_get_pendown_state(void) { u32 reg; reg = in_be32(pdm360ng_gpio_base + 0xc); if (reg & 0x40) setbits32(pdm360ng_gpio_base + 0xc, 0x40); reg = in_be32(pdm360ng_gpio_base + 0x8); /* return 1 if pen is down */ return (reg & 0x40) == 0; } static struct ads7846_platform_data pdm360ng_ads7846_pdata = { .model = 7845, .get_pendown_state = pdm360ng_get_pendown_state, .irq_flags = IRQF_TRIGGER_LOW, }; static int __init pdm360ng_penirq_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-gpio"); if (!np) { pr_err("%s: Can't find 'mpc5121-gpio' node\n", __func__); return -ENODEV; } pdm360ng_gpio_base = of_iomap(np, 0); of_node_put(np); if (!pdm360ng_gpio_base) { pr_err("%s: Can't map gpio regs.\n", __func__); return -ENODEV; } out_be32(pdm360ng_gpio_base + 0xc, 0xffffffff); setbits32(pdm360ng_gpio_base + 0x18, 0x2000); setbits32(pdm360ng_gpio_base + 0x10, 0x40); return 0; } static int pdm360ng_touchscreen_notifier_call(struct notifier_block *nb, unsigned long event, void *__dev) { struct device *dev = __dev; if ((event == BUS_NOTIFY_ADD_DEVICE) && of_device_is_compatible(dev->of_node, "ti,ads7846")) { dev->platform_data = &pdm360ng_ads7846_pdata; return NOTIFY_OK; } return NOTIFY_DONE; } static struct notifier_block pdm360ng_touchscreen_nb = { .notifier_call = pdm360ng_touchscreen_notifier_call, }; static void __init pdm360ng_touchscreen_init(void) { if (pdm360ng_penirq_init()) return; bus_register_notifier(&spi_bus_type, &pdm360ng_touchscreen_nb); } #else static inline void __init pdm360ng_touchscreen_init(void) { } #endif /* CONFIG_TOUCHSCREEN_ADS7846 */ void __init pdm360ng_init(void) { mpc512x_init(); pdm360ng_touchscreen_init(); } static int __init pdm360ng_probe(void) { mpc512x_init_early(); return 1; } define_machine(pdm360ng) { .name = "PDM360NG", .compatible = "ifm,pdm360ng", .probe = pdm360ng_probe, .setup_arch = mpc512x_setup_arch, .init = pdm360ng_init, .init_IRQ = mpc512x_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc512x_restart, };
linux-master
arch/powerpc/platforms/512x/pdm360ng.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2007,2008 Freescale Semiconductor, Inc. All rights reserved. * * Author: John Rigby, <[email protected]> * * Description: * MPC512x SoC setup */ #include <linux/kernel.h> #include <linux/of.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/time.h> #include "mpc512x.h" /* * list of supported boards */ static const char * const board[] __initconst = { "prt,prtlvt", "fsl,mpc5125ads", "ifm,ac14xx", NULL }; /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc512x_generic_probe(void) { if (!of_device_compatible_match(of_root, board)) return 0; mpc512x_init_early(); return 1; } define_machine(mpc512x_generic) { .name = "MPC512x generic", .probe = mpc512x_generic_probe, .init = mpc512x_init, .setup_arch = mpc512x_setup_arch, .init_IRQ = mpc512x_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc512x_restart, };
linux-master
arch/powerpc/platforms/512x/mpc512x_generic.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2007,2008 Freescale Semiconductor, Inc. All rights reserved. * * Author: John Rigby <[email protected]> * * Description: * MPC512x Shared code */ #include <linux/clk.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/fsl-diu-fb.h> #include <linux/memblock.h> #include <sysdev/fsl_soc.h> #include <asm/cacheflush.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/time.h> #include <asm/mpc5121.h> #include <asm/mpc52xx_psc.h> #include "mpc512x.h" static struct mpc512x_reset_module __iomem *reset_module_base; void __noreturn mpc512x_restart(char *cmd) { if (reset_module_base) { /* Enable software reset "RSTE" */ out_be32(&reset_module_base->rpr, 0x52535445); /* Set software hard reset */ out_be32(&reset_module_base->rcr, 0x2); } else { pr_err("Restart module not mapped.\n"); } for (;;) ; } struct fsl_diu_shared_fb { u8 gamma[0x300]; /* 32-bit aligned! */ struct diu_ad ad0; /* 32-bit aligned! */ phys_addr_t fb_phys; size_t fb_len; bool in_use; }; /* receives a pixel clock spec in pico seconds, adjusts the DIU clock rate */ static void mpc512x_set_pixel_clock(unsigned int pixclock) { struct device_node *np; struct clk *clk_diu; unsigned long epsilon, minpixclock, maxpixclock; unsigned long offset, want, got, delta; /* lookup and enable the DIU clock */ np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-diu"); if (!np) { pr_err("Could not find DIU device tree node.\n"); return; } clk_diu = of_clk_get(np, 0); if (IS_ERR(clk_diu)) { /* backwards compat with device trees that lack clock specs */ clk_diu = clk_get_sys(np->name, "ipg"); } of_node_put(np); if (IS_ERR(clk_diu)) { pr_err("Could not lookup DIU clock.\n"); return; } if (clk_prepare_enable(clk_diu)) { pr_err("Could not enable DIU clock.\n"); return; } /* * convert the picoseconds spec into the desired clock rate, * determine the acceptable clock range for the monitor (+/- 5%), * do the calculation in steps to avoid integer overflow */ pr_debug("DIU pixclock in ps - %u\n", pixclock); pixclock = (1000000000 / pixclock) * 1000; pr_debug("DIU pixclock freq - %u\n", pixclock); epsilon = pixclock / 20; /* pixclock * 0.05 */ pr_debug("DIU deviation - %lu\n", epsilon); minpixclock = pixclock - epsilon; maxpixclock = pixclock + epsilon; pr_debug("DIU minpixclock - %lu\n", minpixclock); pr_debug("DIU maxpixclock - %lu\n", maxpixclock); /* * check whether the DIU supports the desired pixel clock * * - simply request the desired clock and see what the * platform's clock driver will make of it, assuming that it * will setup the best approximation of the requested value * - try other candidate frequencies in the order of decreasing * preference (i.e. with increasing distance from the desired * pixel clock, and checking the lower frequency before the * higher frequency to not overload the hardware) until the * first match is found -- any potential subsequent match * would only be as good as the former match or typically * would be less preferrable * * the offset increment of pixelclock divided by 64 is an * arbitrary choice -- it's simple to calculate, in the typical * case we expect the first check to succeed already, in the * worst case seven frequencies get tested (the exact center and * three more values each to the left and to the right) before * the 5% tolerance window is exceeded, resulting in fast enough * execution yet high enough probability of finding a suitable * value, while the error rate will be in the order of single * percents */ for (offset = 0; offset <= epsilon; offset += pixclock / 64) { want = pixclock - offset; pr_debug("DIU checking clock - %lu\n", want); clk_set_rate(clk_diu, want); got = clk_get_rate(clk_diu); delta = abs(pixclock - got); if (delta < epsilon) break; if (!offset) continue; want = pixclock + offset; pr_debug("DIU checking clock - %lu\n", want); clk_set_rate(clk_diu, want); got = clk_get_rate(clk_diu); delta = abs(pixclock - got); if (delta < epsilon) break; } if (offset <= epsilon) { pr_debug("DIU clock accepted - %lu\n", want); pr_debug("DIU pixclock want %u, got %lu, delta %lu, eps %lu\n", pixclock, got, delta, epsilon); return; } pr_warn("DIU pixclock auto search unsuccessful\n"); /* * what is the most appropriate action to take when the search * for an available pixel clock which is acceptable to the * monitor has failed? disable the DIU (clock) or just provide * a "best effort"? we go with the latter */ pr_warn("DIU pixclock best effort fallback (backend's choice)\n"); clk_set_rate(clk_diu, pixclock); got = clk_get_rate(clk_diu); delta = abs(pixclock - got); pr_debug("DIU pixclock want %u, got %lu, delta %lu, eps %lu\n", pixclock, got, delta, epsilon); } static enum fsl_diu_monitor_port mpc512x_valid_monitor_port(enum fsl_diu_monitor_port port) { return FSL_DIU_PORT_DVI; } static struct fsl_diu_shared_fb __attribute__ ((__aligned__(8))) diu_shared_fb; static inline void mpc512x_free_bootmem(struct page *page) { BUG_ON(PageTail(page)); BUG_ON(page_ref_count(page) > 1); free_reserved_page(page); } static void mpc512x_release_bootmem(void) { unsigned long addr = diu_shared_fb.fb_phys & PAGE_MASK; unsigned long size = diu_shared_fb.fb_len; unsigned long start, end; if (diu_shared_fb.in_use) { start = PFN_UP(addr); end = PFN_DOWN(addr + size); for (; start < end; start++) mpc512x_free_bootmem(pfn_to_page(start)); diu_shared_fb.in_use = false; } diu_ops.release_bootmem = NULL; } /* * Check if DIU was pre-initialized. If so, perform steps * needed to continue displaying through the whole boot process. * Move area descriptor and gamma table elsewhere, they are * destroyed by bootmem allocator otherwise. The frame buffer * address range will be reserved in setup_arch() after bootmem * allocator is up. */ static void __init mpc512x_init_diu(void) { struct device_node *np; struct diu __iomem *diu_reg; phys_addr_t desc; void __iomem *vaddr; unsigned long mode, pix_fmt, res, bpp; unsigned long dst; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-diu"); if (!np) { pr_err("No DIU node\n"); return; } diu_reg = of_iomap(np, 0); of_node_put(np); if (!diu_reg) { pr_err("Can't map DIU\n"); return; } mode = in_be32(&diu_reg->diu_mode); if (mode == MFB_MODE0) { pr_info("%s: DIU OFF\n", __func__); goto out; } desc = in_be32(&diu_reg->desc[0]); vaddr = ioremap(desc, sizeof(struct diu_ad)); if (!vaddr) { pr_err("Can't map DIU area desc.\n"); goto out; } memcpy(&diu_shared_fb.ad0, vaddr, sizeof(struct diu_ad)); /* flush fb area descriptor */ dst = (unsigned long)&diu_shared_fb.ad0; flush_dcache_range(dst, dst + sizeof(struct diu_ad) - 1); res = in_be32(&diu_reg->disp_size); pix_fmt = in_le32(vaddr); bpp = ((pix_fmt >> 16) & 0x3) + 1; diu_shared_fb.fb_phys = in_le32(vaddr + 4); diu_shared_fb.fb_len = ((res & 0xfff0000) >> 16) * (res & 0xfff) * bpp; diu_shared_fb.in_use = true; iounmap(vaddr); desc = in_be32(&diu_reg->gamma); vaddr = ioremap(desc, sizeof(diu_shared_fb.gamma)); if (!vaddr) { pr_err("Can't map DIU area desc.\n"); diu_shared_fb.in_use = false; goto out; } memcpy(&diu_shared_fb.gamma, vaddr, sizeof(diu_shared_fb.gamma)); /* flush gamma table */ dst = (unsigned long)&diu_shared_fb.gamma; flush_dcache_range(dst, dst + sizeof(diu_shared_fb.gamma) - 1); iounmap(vaddr); out_be32(&diu_reg->gamma, virt_to_phys(&diu_shared_fb.gamma)); out_be32(&diu_reg->desc[1], 0); out_be32(&diu_reg->desc[2], 0); out_be32(&diu_reg->desc[0], virt_to_phys(&diu_shared_fb.ad0)); out: iounmap(diu_reg); } static void __init mpc512x_setup_diu(void) { int ret; /* * We do not allocate and configure new area for bitmap buffer * because it would require copying bitmap data (splash image) * and so negatively affect boot time. Instead we reserve the * already configured frame buffer area so that it won't be * destroyed. The starting address of the area to reserve and * also it's length is passed to memblock_reserve(). It will be * freed later on first open of fbdev, when splash image is not * needed any more. */ if (diu_shared_fb.in_use) { ret = memblock_reserve(diu_shared_fb.fb_phys, diu_shared_fb.fb_len); if (ret) { pr_err("%s: reserve bootmem failed\n", __func__); diu_shared_fb.in_use = false; } } diu_ops.set_pixel_clock = mpc512x_set_pixel_clock; diu_ops.valid_monitor_port = mpc512x_valid_monitor_port; diu_ops.release_bootmem = mpc512x_release_bootmem; } void __init mpc512x_init_IRQ(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-ipic"); if (!np) return; ipic_init(np, 0); of_node_put(np); /* * Initialize the default interrupt mapping priorities, * in case the boot rom changed something on us. */ ipic_set_default_priority(); } /* * Nodes to do bus probe on, soc and localbus */ static const struct of_device_id of_bus_ids[] __initconst = { { .compatible = "fsl,mpc5121-immr", }, { .compatible = "fsl,mpc5121-localbus", }, { .compatible = "fsl,mpc5121-mbx", }, { .compatible = "fsl,mpc5121-nfc", }, { .compatible = "fsl,mpc5121-sram", }, { .compatible = "fsl,mpc5121-pci", }, { .compatible = "gpio-leds", }, {}, }; static void __init mpc512x_declare_of_platform_devices(void) { if (of_platform_bus_probe(NULL, of_bus_ids, NULL)) printk(KERN_ERR __FILE__ ": " "Error while probing of_platform bus\n"); } #define DEFAULT_FIFO_SIZE 16 const char *__init mpc512x_select_psc_compat(void) { if (of_machine_is_compatible("fsl,mpc5121")) return "fsl,mpc5121-psc"; if (of_machine_is_compatible("fsl,mpc5125")) return "fsl,mpc5125-psc"; return NULL; } static const char *__init mpc512x_select_reset_compat(void) { if (of_machine_is_compatible("fsl,mpc5121")) return "fsl,mpc5121-reset"; if (of_machine_is_compatible("fsl,mpc5125")) return "fsl,mpc5125-reset"; return NULL; } static unsigned int __init get_fifo_size(struct device_node *np, char *prop_name) { const unsigned int *fp; fp = of_get_property(np, prop_name, NULL); if (fp) return *fp; pr_warn("no %s property in %pOF node, defaulting to %d\n", prop_name, np, DEFAULT_FIFO_SIZE); return DEFAULT_FIFO_SIZE; } #define FIFOC(_base) ((struct mpc512x_psc_fifo __iomem *) \ ((u32)(_base) + sizeof(struct mpc52xx_psc))) /* Init PSC FIFO space for TX and RX slices */ static void __init mpc512x_psc_fifo_init(void) { struct device_node *np; void __iomem *psc; unsigned int tx_fifo_size; unsigned int rx_fifo_size; const char *psc_compat; int fifobase = 0; /* current fifo address in 32 bit words */ psc_compat = mpc512x_select_psc_compat(); if (!psc_compat) { pr_err("%s: no compatible devices found\n", __func__); return; } for_each_compatible_node(np, NULL, psc_compat) { tx_fifo_size = get_fifo_size(np, "fsl,tx-fifo-size"); rx_fifo_size = get_fifo_size(np, "fsl,rx-fifo-size"); /* size in register is in 4 byte units */ tx_fifo_size /= 4; rx_fifo_size /= 4; if (!tx_fifo_size) tx_fifo_size = 1; if (!rx_fifo_size) rx_fifo_size = 1; psc = of_iomap(np, 0); if (!psc) { pr_err("%s: Can't map %pOF device\n", __func__, np); continue; } /* FIFO space is 4KiB, check if requested size is available */ if ((fifobase + tx_fifo_size + rx_fifo_size) > 0x1000) { pr_err("%s: no fifo space available for %pOF\n", __func__, np); iounmap(psc); /* * chances are that another device requests less * fifo space, so we continue. */ continue; } /* set tx and rx fifo size registers */ out_be32(&FIFOC(psc)->txsz, (fifobase << 16) | tx_fifo_size); fifobase += tx_fifo_size; out_be32(&FIFOC(psc)->rxsz, (fifobase << 16) | rx_fifo_size); fifobase += rx_fifo_size; /* reset and enable the slices */ out_be32(&FIFOC(psc)->txcmd, 0x80); out_be32(&FIFOC(psc)->txcmd, 0x01); out_be32(&FIFOC(psc)->rxcmd, 0x80); out_be32(&FIFOC(psc)->rxcmd, 0x01); iounmap(psc); } } static void __init mpc512x_restart_init(void) { struct device_node *np; const char *reset_compat; reset_compat = mpc512x_select_reset_compat(); np = of_find_compatible_node(NULL, NULL, reset_compat); if (!np) return; reset_module_base = of_iomap(np, 0); of_node_put(np); } void __init mpc512x_init_early(void) { mpc512x_restart_init(); if (IS_ENABLED(CONFIG_FB_FSL_DIU)) mpc512x_init_diu(); } void __init mpc512x_init(void) { mpc5121_clk_init(); mpc512x_declare_of_platform_devices(); mpc512x_psc_fifo_init(); } void __init mpc512x_setup_arch(void) { if (IS_ENABLED(CONFIG_FB_FSL_DIU)) mpc512x_setup_diu(); } /** * mpc512x_cs_config - Setup chip select configuration * @cs: chip select number * @val: chip select configuration value * * Perform chip select configuration for devices on LocalPlus Bus. * Intended to dynamically reconfigure the chip select parameters * for configurable devices on the bus. */ int mpc512x_cs_config(unsigned int cs, u32 val) { static struct mpc512x_lpc __iomem *lpc; struct device_node *np; if (cs > 7) return -EINVAL; if (!lpc) { np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-lpc"); lpc = of_iomap(np, 0); of_node_put(np); if (!lpc) return -ENOMEM; } out_be32(&lpc->cs_cfg[cs], val); return 0; } EXPORT_SYMBOL(mpc512x_cs_config);
linux-master
arch/powerpc/platforms/512x/mpc512x_shared.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerPC 4xx Clock and Power Management * * Copyright (C) 2010, Applied Micro Circuits Corporation * Victor Gallardo ([email protected]) * * Based on arch/powerpc/platforms/44x/idle.c: * Jerone Young <[email protected]> * Copyright 2008 IBM Corp. * * Based on arch/powerpc/sysdev/fsl_pmc.c: * Anton Vorontsov <[email protected]> * Copyright 2009 MontaVista Software, Inc. * * See file CREDITS for list of people who contributed to this * project. */ #include <linux/kernel.h> #include <linux/of.h> #include <linux/sysfs.h> #include <linux/cpu.h> #include <linux/suspend.h> #include <asm/dcr.h> #include <asm/dcr-native.h> #include <asm/machdep.h> #define CPM_ER 0 #define CPM_FR 1 #define CPM_SR 2 #define CPM_IDLE_WAIT 0 #define CPM_IDLE_DOZE 1 struct cpm { dcr_host_t dcr_host; unsigned int dcr_offset[3]; unsigned int powersave_off; unsigned int unused; unsigned int idle_doze; unsigned int standby; unsigned int suspend; }; static struct cpm cpm; struct cpm_idle_mode { unsigned int enabled; const char *name; }; static struct cpm_idle_mode idle_mode[] = { [CPM_IDLE_WAIT] = { 1, "wait" }, /* default */ [CPM_IDLE_DOZE] = { 0, "doze" }, }; static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask) { unsigned int value; /* CPM controller supports 3 different types of sleep interface * known as class 1, 2 and 3. For class 1 units, they are * unconditionally put to sleep when the corresponding CPM bit is * set. For class 2 and 3 units this is not case; if they can be * put to sleep, they will. Here we do not verify, we just * set them and expect them to eventually go off when they can. */ value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]); dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask); /* return old state, to restore later if needed */ return value; } static void cpm_idle_wait(void) { unsigned long msr_save; /* save off initial state */ msr_save = mfmsr(); /* sync required when CPM0_ER[CPU] is set */ mb(); /* set wait state MSR */ mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE); isync(); /* return to initial state */ mtmsr(msr_save); isync(); } static void cpm_idle_sleep(unsigned int mask) { unsigned int er_save; /* update CPM_ER state */ er_save = cpm_set(CPM_ER, mask); /* go to wait state so that CPM0_ER[CPU] can take effect */ cpm_idle_wait(); /* restore CPM_ER state */ dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save); } static void cpm_idle_doze(void) { cpm_idle_sleep(cpm.idle_doze); } static void cpm_idle_config(int mode) { int i; if (idle_mode[mode].enabled) return; for (i = 0; i < ARRAY_SIZE(idle_mode); i++) idle_mode[i].enabled = 0; idle_mode[mode].enabled = 1; } static ssize_t cpm_idle_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { char *s = buf; int i; for (i = 0; i < ARRAY_SIZE(idle_mode); i++) { if (idle_mode[i].enabled) s += sprintf(s, "[%s] ", idle_mode[i].name); else s += sprintf(s, "%s ", idle_mode[i].name); } *(s-1) = '\n'; /* convert the last space to a newline */ return s - buf; } static ssize_t cpm_idle_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { int i; char *p; int len; p = memchr(buf, '\n', n); len = p ? p - buf : n; for (i = 0; i < ARRAY_SIZE(idle_mode); i++) { if (strncmp(buf, idle_mode[i].name, len) == 0) { cpm_idle_config(i); return n; } } return -EINVAL; } static struct kobj_attribute cpm_idle_attr = __ATTR(idle, 0644, cpm_idle_show, cpm_idle_store); static void __init cpm_idle_config_sysfs(void) { struct device *dev; unsigned long ret; dev = get_cpu_device(0); ret = sysfs_create_file(&dev->kobj, &cpm_idle_attr.attr); if (ret) printk(KERN_WARNING "cpm: failed to create idle sysfs entry\n"); } static void cpm_idle(void) { if (idle_mode[CPM_IDLE_DOZE].enabled) cpm_idle_doze(); else cpm_idle_wait(); } static int cpm_suspend_valid(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: return !!cpm.standby; case PM_SUSPEND_MEM: return !!cpm.suspend; default: return 0; } } static void cpm_suspend_standby(unsigned int mask) { unsigned long tcr_save; /* disable decrement interrupt */ tcr_save = mfspr(SPRN_TCR); mtspr(SPRN_TCR, tcr_save & ~TCR_DIE); /* go to sleep state */ cpm_idle_sleep(mask); /* restore decrement interrupt */ mtspr(SPRN_TCR, tcr_save); } static int cpm_suspend_enter(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: cpm_suspend_standby(cpm.standby); break; case PM_SUSPEND_MEM: cpm_suspend_standby(cpm.suspend); break; } return 0; } static const struct platform_suspend_ops cpm_suspend_ops = { .valid = cpm_suspend_valid, .enter = cpm_suspend_enter, }; static int __init cpm_get_uint_property(struct device_node *np, const char *name) { int len; const unsigned int *prop = of_get_property(np, name, &len); if (prop == NULL || len < sizeof(u32)) return 0; return *prop; } static int __init cpm_init(void) { struct device_node *np; int dcr_base, dcr_len; int ret = 0; if (!cpm.powersave_off) { cpm_idle_config(CPM_IDLE_WAIT); ppc_md.power_save = &cpm_idle; } np = of_find_compatible_node(NULL, NULL, "ibm,cpm"); if (!np) { ret = -EINVAL; goto out; } dcr_base = dcr_resource_start(np, 0); dcr_len = dcr_resource_len(np, 0); if (dcr_base == 0 || dcr_len == 0) { printk(KERN_ERR "cpm: could not parse dcr property for %pOF\n", np); ret = -EINVAL; goto node_put; } cpm.dcr_host = dcr_map(np, dcr_base, dcr_len); if (!DCR_MAP_OK(cpm.dcr_host)) { printk(KERN_ERR "cpm: failed to map dcr property for %pOF\n", np); ret = -EINVAL; goto node_put; } /* All 4xx SoCs with a CPM controller have one of two * different order for the CPM registers. Some have the * CPM registers in the following order (ER,FR,SR). The * others have them in the following order (SR,ER,FR). */ if (cpm_get_uint_property(np, "er-offset") == 0) { cpm.dcr_offset[CPM_ER] = 0; cpm.dcr_offset[CPM_FR] = 1; cpm.dcr_offset[CPM_SR] = 2; } else { cpm.dcr_offset[CPM_ER] = 1; cpm.dcr_offset[CPM_FR] = 2; cpm.dcr_offset[CPM_SR] = 0; } /* Now let's see what IPs to turn off for the following modes */ cpm.unused = cpm_get_uint_property(np, "unused-units"); cpm.idle_doze = cpm_get_uint_property(np, "idle-doze"); cpm.standby = cpm_get_uint_property(np, "standby"); cpm.suspend = cpm_get_uint_property(np, "suspend"); /* If some IPs are unused let's turn them off now */ if (cpm.unused) { cpm_set(CPM_ER, cpm.unused); cpm_set(CPM_FR, cpm.unused); } /* Now let's export interfaces */ if (!cpm.powersave_off && cpm.idle_doze) cpm_idle_config_sysfs(); if (cpm.standby || cpm.suspend) suspend_set_ops(&cpm_suspend_ops); node_put: of_node_put(np); out: return ret; } late_initcall(cpm_init); static int __init cpm_powersave_off(char *arg) { cpm.powersave_off = 1; return 1; } __setup("powersave=off", cpm_powersave_off);
linux-master
arch/powerpc/platforms/4xx/cpm.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/powerpc/sysdev/uic.c * * IBM PowerPC 4xx Universal Interrupt Controller * * Copyright 2007 David Gibson <[email protected]>, IBM Corporation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/stddef.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/device.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/of.h> #include <linux/of_irq.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/dcr.h> #include <asm/uic.h> #define NR_UIC_INTS 32 #define UIC_SR 0x0 #define UIC_ER 0x2 #define UIC_CR 0x3 #define UIC_PR 0x4 #define UIC_TR 0x5 #define UIC_MSR 0x6 #define UIC_VR 0x7 #define UIC_VCR 0x8 struct uic *primary_uic; struct uic { int index; int dcrbase; raw_spinlock_t lock; /* The remapper for this UIC */ struct irq_domain *irqhost; }; static void uic_unmask_irq(struct irq_data *d) { struct uic *uic = irq_data_get_irq_chip_data(d); unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 er, sr; sr = 1 << (31-src); raw_spin_lock_irqsave(&uic->lock, flags); /* ack level-triggered interrupts here */ if (irqd_is_level_type(d)) mtdcr(uic->dcrbase + UIC_SR, sr); er = mfdcr(uic->dcrbase + UIC_ER); er |= sr; mtdcr(uic->dcrbase + UIC_ER, er); raw_spin_unlock_irqrestore(&uic->lock, flags); } static void uic_mask_irq(struct irq_data *d) { struct uic *uic = irq_data_get_irq_chip_data(d); unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 er; raw_spin_lock_irqsave(&uic->lock, flags); er = mfdcr(uic->dcrbase + UIC_ER); er &= ~(1 << (31 - src)); mtdcr(uic->dcrbase + UIC_ER, er); raw_spin_unlock_irqrestore(&uic->lock, flags); } static void uic_ack_irq(struct irq_data *d) { struct uic *uic = irq_data_get_irq_chip_data(d); unsigned int src = irqd_to_hwirq(d); unsigned long flags; raw_spin_lock_irqsave(&uic->lock, flags); mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src)); raw_spin_unlock_irqrestore(&uic->lock, flags); } static void uic_mask_ack_irq(struct irq_data *d) { struct uic *uic = irq_data_get_irq_chip_data(d); unsigned int src = irqd_to_hwirq(d); unsigned long flags; u32 er, sr; sr = 1 << (31-src); raw_spin_lock_irqsave(&uic->lock, flags); er = mfdcr(uic->dcrbase + UIC_ER); er &= ~sr; mtdcr(uic->dcrbase + UIC_ER, er); /* On the UIC, acking (i.e. clearing the SR bit) * a level irq will have no effect if the interrupt * is still asserted by the device, even if * the interrupt is already masked. Therefore * we only ack the egde interrupts here, while * level interrupts are ack'ed after the actual * isr call in the uic_unmask_irq() */ if (!irqd_is_level_type(d)) mtdcr(uic->dcrbase + UIC_SR, sr); raw_spin_unlock_irqrestore(&uic->lock, flags); } static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) { struct uic *uic = irq_data_get_irq_chip_data(d); unsigned int src = irqd_to_hwirq(d); unsigned long flags; int trigger, polarity; u32 tr, pr, mask; switch (flow_type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_NONE: uic_mask_irq(d); return 0; case IRQ_TYPE_EDGE_RISING: trigger = 1; polarity = 1; break; case IRQ_TYPE_EDGE_FALLING: trigger = 1; polarity = 0; break; case IRQ_TYPE_LEVEL_HIGH: trigger = 0; polarity = 1; break; case IRQ_TYPE_LEVEL_LOW: trigger = 0; polarity = 0; break; default: return -EINVAL; } mask = ~(1 << (31 - src)); raw_spin_lock_irqsave(&uic->lock, flags); tr = mfdcr(uic->dcrbase + UIC_TR); pr = mfdcr(uic->dcrbase + UIC_PR); tr = (tr & mask) | (trigger << (31-src)); pr = (pr & mask) | (polarity << (31-src)); mtdcr(uic->dcrbase + UIC_PR, pr); mtdcr(uic->dcrbase + UIC_TR, tr); mtdcr(uic->dcrbase + UIC_SR, ~mask); raw_spin_unlock_irqrestore(&uic->lock, flags); return 0; } static struct irq_chip uic_irq_chip = { .name = "UIC", .irq_unmask = uic_unmask_irq, .irq_mask = uic_mask_irq, .irq_mask_ack = uic_mask_ack_irq, .irq_ack = uic_ack_irq, .irq_set_type = uic_set_irq_type, }; static int uic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct uic *uic = h->host_data; irq_set_chip_data(virq, uic); /* Despite the name, handle_level_irq() works for both level * and edge irqs on UIC. FIXME: check this is correct */ irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); /* Set default irq type */ irq_set_irq_type(virq, IRQ_TYPE_NONE); return 0; } static const struct irq_domain_ops uic_host_ops = { .map = uic_host_map, .xlate = irq_domain_xlate_twocell, }; static void uic_irq_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_data *idata = irq_desc_get_irq_data(desc); struct uic *uic = irq_desc_get_handler_data(desc); u32 msr; int src; raw_spin_lock(&desc->lock); if (irqd_is_level_type(idata)) chip->irq_mask(idata); else chip->irq_mask_ack(idata); raw_spin_unlock(&desc->lock); msr = mfdcr(uic->dcrbase + UIC_MSR); if (!msr) /* spurious interrupt */ goto uic_irq_ret; src = 32 - ffs(msr); generic_handle_domain_irq(uic->irqhost, src); uic_irq_ret: raw_spin_lock(&desc->lock); if (irqd_is_level_type(idata)) chip->irq_ack(idata); if (!irqd_irq_disabled(idata) && chip->irq_unmask) chip->irq_unmask(idata); raw_spin_unlock(&desc->lock); } static struct uic * __init uic_init_one(struct device_node *node) { struct uic *uic; const u32 *indexp, *dcrreg; int len; BUG_ON(! of_device_is_compatible(node, "ibm,uic")); uic = kzalloc(sizeof(*uic), GFP_KERNEL); if (! uic) return NULL; /* FIXME: panic? */ raw_spin_lock_init(&uic->lock); indexp = of_get_property(node, "cell-index", &len); if (!indexp || (len != sizeof(u32))) { printk(KERN_ERR "uic: Device node %pOF has missing or invalid " "cell-index property\n", node); return NULL; } uic->index = *indexp; dcrreg = of_get_property(node, "dcr-reg", &len); if (!dcrreg || (len != 2*sizeof(u32))) { printk(KERN_ERR "uic: Device node %pOF has missing or invalid " "dcr-reg property\n", node); return NULL; } uic->dcrbase = *dcrreg; uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops, uic); if (! uic->irqhost) return NULL; /* FIXME: panic? */ /* Start with all interrupts disabled, level and non-critical */ mtdcr(uic->dcrbase + UIC_ER, 0); mtdcr(uic->dcrbase + UIC_CR, 0); mtdcr(uic->dcrbase + UIC_TR, 0); /* Clear any pending interrupts, in case the firmware left some */ mtdcr(uic->dcrbase + UIC_SR, 0xffffffff); printk ("UIC%d (%d IRQ sources) at DCR 0x%x\n", uic->index, NR_UIC_INTS, uic->dcrbase); return uic; } void __init uic_init_tree(void) { struct device_node *np; struct uic *uic; const u32 *interrupts; /* First locate and initialize the top-level UIC */ for_each_compatible_node(np, NULL, "ibm,uic") { interrupts = of_get_property(np, "interrupts", NULL); if (!interrupts) break; } BUG_ON(!np); /* uic_init_tree() assumes there's a UIC as the * top-level interrupt controller */ primary_uic = uic_init_one(np); if (!primary_uic) panic("Unable to initialize primary UIC %pOF\n", np); irq_set_default_host(primary_uic->irqhost); of_node_put(np); /* The scan again for cascaded UICs */ for_each_compatible_node(np, NULL, "ibm,uic") { interrupts = of_get_property(np, "interrupts", NULL); if (interrupts) { /* Secondary UIC */ int cascade_virq; uic = uic_init_one(np); if (! uic) panic("Unable to initialize a secondary UIC %pOF\n", np); cascade_virq = irq_of_parse_and_map(np, 0); irq_set_handler_data(cascade_virq, uic); irq_set_chained_handler(cascade_virq, uic_irq_cascade); /* FIXME: setup critical cascade?? */ } } } /* Return an interrupt vector or 0 if no interrupt is pending. */ unsigned int uic_get_irq(void) { u32 msr; int src; BUG_ON(! primary_uic); msr = mfdcr(primary_uic->dcrbase + UIC_MSR); src = 32 - ffs(msr); return irq_linear_revmap(primary_uic->irqhost, src); }
linux-master
arch/powerpc/platforms/4xx/uic.c
// SPDX-License-Identifier: GPL-2.0-only /* * PPC4xx gpio driver * * Copyright (c) 2008 Harris Corporation * Copyright (c) 2008 Sascha Hauer <[email protected]>, Pengutronix * Copyright (c) MontaVista Software, Inc. 2008. * * Author: Steve Falco <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/of.h> #include <linux/gpio/legacy-of-mm-gpiochip.h> #include <linux/gpio/driver.h> #include <linux/types.h> #include <linux/slab.h> #define GPIO_MASK(gpio) (0x80000000 >> (gpio)) #define GPIO_MASK2(gpio) (0xc0000000 >> ((gpio) * 2)) /* Physical GPIO register layout */ struct ppc4xx_gpio { __be32 or; __be32 tcr; __be32 osrl; __be32 osrh; __be32 tsrl; __be32 tsrh; __be32 odr; __be32 ir; __be32 rr1; __be32 rr2; __be32 rr3; __be32 reserved1; __be32 isr1l; __be32 isr1h; __be32 isr2l; __be32 isr2h; __be32 isr3l; __be32 isr3h; }; struct ppc4xx_gpio_chip { struct of_mm_gpio_chip mm_gc; spinlock_t lock; }; /* * GPIO LIB API implementation for GPIOs * * There are a maximum of 32 gpios in each gpio controller. */ static int ppc4xx_gpio_get(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct ppc4xx_gpio __iomem *regs = mm_gc->regs; return !!(in_be32(&regs->ir) & GPIO_MASK(gpio)); } static inline void __ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct ppc4xx_gpio __iomem *regs = mm_gc->regs; if (val) setbits32(&regs->or, GPIO_MASK(gpio)); else clrbits32(&regs->or, GPIO_MASK(gpio)); } static void ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc); unsigned long flags; spin_lock_irqsave(&chip->lock, flags); __ppc4xx_gpio_set(gc, gpio, val); spin_unlock_irqrestore(&chip->lock, flags); pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); } static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc); struct ppc4xx_gpio __iomem *regs = mm_gc->regs; unsigned long flags; spin_lock_irqsave(&chip->lock, flags); /* Disable open-drain function */ clrbits32(&regs->odr, GPIO_MASK(gpio)); /* Float the pin */ clrbits32(&regs->tcr, GPIO_MASK(gpio)); /* Bits 0-15 use TSRL/OSRL, bits 16-31 use TSRH/OSRH */ if (gpio < 16) { clrbits32(&regs->osrl, GPIO_MASK2(gpio)); clrbits32(&regs->tsrl, GPIO_MASK2(gpio)); } else { clrbits32(&regs->osrh, GPIO_MASK2(gpio)); clrbits32(&regs->tsrh, GPIO_MASK2(gpio)); } spin_unlock_irqrestore(&chip->lock, flags); return 0; } static int ppc4xx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc); struct ppc4xx_gpio __iomem *regs = mm_gc->regs; unsigned long flags; spin_lock_irqsave(&chip->lock, flags); /* First set initial value */ __ppc4xx_gpio_set(gc, gpio, val); /* Disable open-drain function */ clrbits32(&regs->odr, GPIO_MASK(gpio)); /* Drive the pin */ setbits32(&regs->tcr, GPIO_MASK(gpio)); /* Bits 0-15 use TSRL, bits 16-31 use TSRH */ if (gpio < 16) { clrbits32(&regs->osrl, GPIO_MASK2(gpio)); clrbits32(&regs->tsrl, GPIO_MASK2(gpio)); } else { clrbits32(&regs->osrh, GPIO_MASK2(gpio)); clrbits32(&regs->tsrh, GPIO_MASK2(gpio)); } spin_unlock_irqrestore(&chip->lock, flags); pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); return 0; } static int __init ppc4xx_add_gpiochips(void) { struct device_node *np; for_each_compatible_node(np, NULL, "ibm,ppc4xx-gpio") { int ret; struct ppc4xx_gpio_chip *ppc4xx_gc; struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; ppc4xx_gc = kzalloc(sizeof(*ppc4xx_gc), GFP_KERNEL); if (!ppc4xx_gc) { ret = -ENOMEM; goto err; } spin_lock_init(&ppc4xx_gc->lock); mm_gc = &ppc4xx_gc->mm_gc; gc = &mm_gc->gc; gc->ngpio = 32; gc->direction_input = ppc4xx_gpio_dir_in; gc->direction_output = ppc4xx_gpio_dir_out; gc->get = ppc4xx_gpio_get; gc->set = ppc4xx_gpio_set; ret = of_mm_gpiochip_add_data(np, mm_gc, ppc4xx_gc); if (ret) goto err; continue; err: pr_err("%pOF: registration failed with status %d\n", np, ret); kfree(ppc4xx_gc); /* try others anyway */ } return 0; } arch_initcall(ppc4xx_add_gpiochips);
linux-master
arch/powerpc/platforms/4xx/gpio.c