python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/m68k/kernel/sys_m68k.c
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/m68k
* platform.
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/ipc.h>
#include <asm/setup.h>
#include <linux/uaccess.h>
#include <asm/cachectl.h>
#include <asm/traps.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_MMU
#include <asm/tlb.h>
asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
/*
* This is wrong for sun3 - there PAGE_SIZE is 8Kb,
* so we need to shift the argument down by 1; m68k mmap64(3)
* (in libc) expects the last argument of mmap2 in 4Kb units.
*/
return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
/* Convert virtual (user) address VADDR to physical address PADDR */
#define virt_to_phys_040(vaddr) \
({ \
unsigned long _mmusr, _paddr; \
\
__asm__ __volatile__ (".chip 68040\n\t" \
"ptestr (%1)\n\t" \
"movec %%mmusr,%0\n\t" \
".chip 68k" \
: "=r" (_mmusr) \
: "a" (vaddr)); \
_paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
_paddr; \
})
static inline int
cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
{
unsigned long paddr, i;
switch (scope)
{
case FLUSH_SCOPE_ALL:
switch (cache)
{
case FLUSH_CACHE_DATA:
/* This nop is needed for some broken versions of the 68040. */
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %dc\n\t"
".chip 68k");
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %ic\n\t"
".chip 68k");
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %bc\n\t"
".chip 68k");
break;
}
break;
case FLUSH_SCOPE_LINE:
/* Find the physical address of the first mapped page in the
address range. */
if ((paddr = virt_to_phys_040(addr))) {
paddr += addr & ~(PAGE_MASK | 15);
len = (len + (addr & 15) + 15) >> 4;
} else {
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
tmp = PAGE_SIZE;
for (;;)
{
if ((paddr = virt_to_phys_040(addr)))
break;
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
}
len = (len + 15) >> 4;
}
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
while (len--)
{
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
if (!--i && len)
{
/*
* No need to page align here since it is done by
* virt_to_phys_040().
*/
addr += PAGE_SIZE;
i = PAGE_SIZE / 16;
/* Recompute physical address when crossing a page
boundary. */
for (;;)
{
if ((paddr = virt_to_phys_040(addr)))
break;
if (len <= i)
return 0;
len -= i;
addr += PAGE_SIZE;
}
}
else
paddr += 16;
}
break;
default:
case FLUSH_SCOPE_PAGE:
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
{
if (!(paddr = virt_to_phys_040(addr)))
continue;
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
}
break;
}
return 0;
}
#define virt_to_phys_060(vaddr) \
({ \
unsigned long paddr; \
__asm__ __volatile__ (".chip 68060\n\t" \
"plpar (%0)\n\t" \
".chip 68k" \
: "=a" (paddr) \
: "0" (vaddr)); \
(paddr); /* XXX */ \
})
static inline int
cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
{
unsigned long paddr, i;
/*
* 68060 manual says:
* cpush %dc : flush DC, remains valid (with our %cacr setup)
* cpush %ic : invalidate IC
* cpush %bc : flush DC + invalidate IC
*/
switch (scope)
{
case FLUSH_SCOPE_ALL:
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %dc\n\t"
".chip 68k");
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %ic\n\t"
".chip 68k");
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %bc\n\t"
".chip 68k");
break;
}
break;
case FLUSH_SCOPE_LINE:
/* Find the physical address of the first mapped page in the
address range. */
len += addr & 15;
addr &= -16;
if (!(paddr = virt_to_phys_060(addr))) {
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
tmp = PAGE_SIZE;
for (;;)
{
if ((paddr = virt_to_phys_060(addr)))
break;
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
}
}
len = (len + 15) >> 4;
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
while (len--)
{
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
if (!--i && len)
{
/*
* We just want to jump to the first cache line
* in the next page.
*/
addr += PAGE_SIZE;
addr &= PAGE_MASK;
i = PAGE_SIZE / 16;
/* Recompute physical address when crossing a page
boundary. */
for (;;)
{
if ((paddr = virt_to_phys_060(addr)))
break;
if (len <= i)
return 0;
len -= i;
addr += PAGE_SIZE;
}
}
else
paddr += 16;
}
break;
default:
case FLUSH_SCOPE_PAGE:
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
addr &= PAGE_MASK; /* Workaround for bug in some
revisions of the 68060 */
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
{
if (!(paddr = virt_to_phys_060(addr)))
continue;
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
}
break;
}
return 0;
}
/* sys_cacheflush -- flush (part of) the processor cache. */
asmlinkage int
sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
{
int ret = -EINVAL;
if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
cache & ~FLUSH_CACHE_BOTH)
goto out;
if (scope == FLUSH_SCOPE_ALL) {
/* Only the superuser may explicitly flush the whole cache. */
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
mmap_read_lock(current->mm);
} else {
struct vm_area_struct *vma;
/* Check for overflow. */
if (addr + len < addr)
goto out;
/*
* Verify that the specified address region actually belongs
* to this process.
*/
mmap_read_lock(current->mm);
vma = vma_lookup(current->mm, addr);
if (!vma || addr + len > vma->vm_end)
goto out_unlock;
}
if (CPU_IS_020_OR_030) {
if (scope == FLUSH_SCOPE_LINE && len < 256) {
unsigned long cacr;
__asm__ ("movec %%cacr, %0" : "=r" (cacr));
if (cache & FLUSH_CACHE_INSN)
cacr |= 4;
if (cache & FLUSH_CACHE_DATA)
cacr |= 0x400;
len >>= 2;
while (len--) {
__asm__ __volatile__ ("movec %1, %%caar\n\t"
"movec %0, %%cacr"
: /* no outputs */
: "r" (cacr), "r" (addr));
addr += 4;
}
} else {
/* Flush the whole cache, even if page granularity requested. */
unsigned long cacr;
__asm__ ("movec %%cacr, %0" : "=r" (cacr));
if (cache & FLUSH_CACHE_INSN)
cacr |= 8;
if (cache & FLUSH_CACHE_DATA)
cacr |= 0x800;
__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
}
ret = 0;
goto out_unlock;
} else {
/*
* 040 or 060: don't blindly trust 'scope', someone could
* try to flush a few megs of memory.
*/
if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
scope=FLUSH_SCOPE_PAGE;
if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
scope=FLUSH_SCOPE_ALL;
if (CPU_IS_040) {
ret = cache_flush_040 (addr, scope, cache, len);
} else if (CPU_IS_060) {
ret = cache_flush_060 (addr, scope, cache, len);
}
}
out_unlock:
mmap_read_unlock(current->mm);
out:
return ret;
}
/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
D1 (newval). */
asmlinkage int
sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
unsigned long __user * mem)
{
/* This was borrowed from ARM's implementation. */
for (;;) {
struct mm_struct *mm = current->mm;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
unsigned long mem_value;
mmap_read_lock(mm);
pgd = pgd_offset(mm, (unsigned long)mem);
if (!pgd_present(*pgd))
goto bad_access;
p4d = p4d_offset(pgd, (unsigned long)mem);
if (!p4d_present(*p4d))
goto bad_access;
pud = pud_offset(p4d, (unsigned long)mem);
if (!pud_present(*pud))
goto bad_access;
pmd = pmd_offset(pud, (unsigned long)mem);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
if (!pte)
goto bad_access;
if (!pte_present(*pte) || !pte_dirty(*pte)
|| !pte_write(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
/*
* No need to check for EFAULT; we know that the page is
* present and writable.
*/
__get_user(mem_value, mem);
if (mem_value == oldval)
__put_user(newval, mem);
pte_unmap_unlock(pte, ptl);
mmap_read_unlock(mm);
return mem_value;
bad_access:
mmap_read_unlock(mm);
/* This is not necessarily a bad access, we can get here if
a memory we're trying to write to should be copied-on-write.
Make the kernel do the necessary page stuff, then re-iterate.
Simulate a write access fault to do that. */
{
/* The first argument of the function corresponds to
D1, which is the first field of struct pt_regs. */
struct pt_regs *fp = (struct pt_regs *)&newval;
/* '3' is an RMW flag. */
if (do_page_fault(fp, (unsigned long)mem, 3))
/* If the do_page_fault() failed, we don't
have anything meaningful to return.
There should be a SIGSEGV pending for
the process. */
return 0xdeadbeef;
}
}
}
#else
/* sys_cacheflush -- flush (part of) the processor cache. */
asmlinkage int
sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
{
flush_cache_all();
return 0;
}
/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
D1 (newval). */
asmlinkage int
sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
unsigned long __user * mem)
{
struct mm_struct *mm = current->mm;
unsigned long mem_value;
mmap_read_lock(mm);
mem_value = *mem;
if (mem_value == oldval)
*mem = newval;
mmap_read_unlock(mm);
return mem_value;
}
#endif /* CONFIG_MMU */
asmlinkage int sys_getpagesize(void)
{
return PAGE_SIZE;
}
asmlinkage unsigned long sys_get_thread_area(void)
{
return current_thread_info()->tp_value;
}
asmlinkage int sys_set_thread_area(unsigned long tp)
{
current_thread_info()->tp_value = tp;
return 0;
}
asmlinkage int sys_atomic_barrier(void)
{
/* no code needed for uniprocs */
return 0;
}
| linux-master | arch/m68k/kernel/sys_m68k.c |
/*
* linux/arch/m68k/kernel/ptrace.c
*
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of
* this archive for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
#include <linux/regset.h>
#include <linux/elf.h>
#include <linux/seccomp.h>
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/processor.h>
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/* determines which bits in the SR the user has access to. */
/* 1 = access 0 = no access */
#define SR_MASK 0x001f
/* sets the trace bits. */
#define TRACE_BITS 0xC000
#define T1_BIT 0x8000
#define T0_BIT 0x4000
/* Find the stack offset for a register, relative to thread.esp0. */
#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg)
#define SW_REG(reg) ((long)&((struct switch_stack *)0)->reg \
- sizeof(struct switch_stack))
/* Mapping from PT_xxx to the stack offset at which the register is
saved. Notice that usp has no stack-slot and needs to be treated
specially (see get_reg/put_reg below). */
static const int regoff[] = {
[0] = PT_REG(d1),
[1] = PT_REG(d2),
[2] = PT_REG(d3),
[3] = PT_REG(d4),
[4] = PT_REG(d5),
[5] = SW_REG(d6),
[6] = SW_REG(d7),
[7] = PT_REG(a0),
[8] = PT_REG(a1),
[9] = PT_REG(a2),
[10] = SW_REG(a3),
[11] = SW_REG(a4),
[12] = SW_REG(a5),
[13] = SW_REG(a6),
[14] = PT_REG(d0),
[15] = -1,
[16] = PT_REG(orig_d0),
[17] = PT_REG(sr),
[18] = PT_REG(pc),
};
/*
* Get contents of register REGNO in task TASK.
*/
static inline long get_reg(struct task_struct *task, int regno)
{
unsigned long *addr;
if (regno == PT_USP)
addr = &task->thread.usp;
else if (regno < ARRAY_SIZE(regoff))
addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
else
return 0;
/* Need to take stkadj into account. */
if (regno == PT_SR || regno == PT_PC) {
long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
addr = (unsigned long *) ((unsigned long)addr + stkadj);
/* The sr is actually a 16 bit register. */
if (regno == PT_SR)
return *(unsigned short *)addr;
}
return *addr;
}
/*
* Write contents of register REGNO in task TASK.
*/
static inline int put_reg(struct task_struct *task, int regno,
unsigned long data)
{
unsigned long *addr;
if (regno == PT_USP)
addr = &task->thread.usp;
else if (regno < ARRAY_SIZE(regoff))
addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
else
return -1;
/* Need to take stkadj into account. */
if (regno == PT_SR || regno == PT_PC) {
long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
addr = (unsigned long *) ((unsigned long)addr + stkadj);
/* The sr is actually a 16 bit register. */
if (regno == PT_SR) {
*(unsigned short *)addr = data;
return 0;
}
}
*addr = data;
return 0;
}
/*
* Make sure the single step bit is not set.
*/
static inline void singlestep_disable(struct task_struct *child)
{
unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
put_reg(child, PT_SR, tmp);
clear_tsk_thread_flag(child, TIF_DELAYED_TRACE);
}
/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
{
singlestep_disable(child);
}
void user_enable_single_step(struct task_struct *child)
{
unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
put_reg(child, PT_SR, tmp | T1_BIT);
set_tsk_thread_flag(child, TIF_DELAYED_TRACE);
}
#ifdef CONFIG_MMU
void user_enable_block_step(struct task_struct *child)
{
unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
put_reg(child, PT_SR, tmp | T0_BIT);
}
#endif
void user_disable_single_step(struct task_struct *child)
{
singlestep_disable(child);
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long tmp;
int i, ret = 0;
int regno = addr >> 2; /* temporary hack. */
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
if (addr & 3)
goto out_eio;
if (regno >= 0 && regno < 19) {
tmp = get_reg(child, regno);
} else if (regno >= 21 && regno < 49) {
tmp = child->thread.fp[regno - 21];
/* Convert internal fpu reg representation
* into long double format
*/
if (FPU_IS_EMU && (regno < 45) && !(regno % 3))
tmp = ((tmp & 0xffff0000) << 15) |
((tmp & 0x0000ffff) << 16);
#ifndef CONFIG_MMU
} else if (regno == 49) {
tmp = child->mm->start_code;
} else if (regno == 50) {
tmp = child->mm->start_data;
} else if (regno == 51) {
tmp = child->mm->end_code;
#endif
} else
goto out_eio;
ret = put_user(tmp, datap);
break;
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area */
if (addr & 3)
goto out_eio;
if (regno == PT_SR) {
data &= SR_MASK;
data |= get_reg(child, PT_SR) & ~SR_MASK;
}
if (regno >= 0 && regno < 19) {
if (put_reg(child, regno, data))
goto out_eio;
} else if (regno >= 21 && regno < 48) {
/* Convert long double format
* into internal fpu reg representation
*/
if (FPU_IS_EMU && (regno < 45) && !(regno % 3)) {
data <<= 15;
data = (data & 0xffff0000) |
((data & 0x0000ffff) >> 1);
}
child->thread.fp[regno - 21] = data;
} else
goto out_eio;
break;
case PTRACE_GETREGS: /* Get all gp regs from the child. */
for (i = 0; i < 19; i++) {
tmp = get_reg(child, i);
ret = put_user(tmp, datap);
if (ret)
break;
datap++;
}
break;
case PTRACE_SETREGS: /* Set all gp regs in the child. */
for (i = 0; i < 19; i++) {
ret = get_user(tmp, datap);
if (ret)
break;
if (i == PT_SR) {
tmp &= SR_MASK;
tmp |= get_reg(child, PT_SR) & ~SR_MASK;
}
put_reg(child, i, tmp);
datap++;
}
break;
case PTRACE_GETFPREGS: /* Get the child FPU state. */
if (copy_to_user(datap, &child->thread.fp,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
case PTRACE_SETFPREGS: /* Set the child FPU state. */
if (copy_from_user(&child->thread.fp, datap,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value, datap);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
out_eio:
return -EIO;
}
asmlinkage int syscall_trace_enter(void)
{
int ret = 0;
if (test_thread_flag(TIF_SYSCALL_TRACE))
ret = ptrace_report_syscall_entry(task_pt_regs(current));
if (secure_computing() == -1)
return -1;
return ret;
}
asmlinkage void syscall_trace_leave(void)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
ptrace_report_syscall_exit(task_pt_regs(current), 0);
}
#if defined(CONFIG_BINFMT_ELF_FDPIC) && defined(CONFIG_ELF_CORE)
/*
* Currently the only thing that needs to use regsets for m68k is the
* coredump support of the elf_fdpic loader. Implement the minimum
* definitions required for that.
*/
static int m68k_regset_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct pt_regs *ptregs = task_pt_regs(target);
u32 uregs[ELF_NGREG];
ELF_CORE_COPY_REGS(uregs, ptregs);
return membuf_write(&to, uregs, sizeof(uregs));
}
enum m68k_regset {
REGSET_GPR,
#ifdef CONFIG_FPU
REGSET_FPU,
#endif
};
static const struct user_regset m68k_user_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(u32),
.align = sizeof(u16),
.regset_get = m68k_regset_get,
},
#ifdef CONFIG_FPU
[REGSET_FPU] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_m68kfp_struct) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
}
#endif /* CONFIG_FPU */
};
static const struct user_regset_view user_m68k_view = {
.name = "m68k",
.e_machine = EM_68K,
.ei_osabi = ELF_OSABI,
.regsets = m68k_user_regsets,
.n = ARRAY_SIZE(m68k_user_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_m68k_view;
}
#endif /* CONFIG_BINFMT_ELF_FDPIC && CONFIG_ELF_CORE */
| linux-master | arch/m68k/kernel/ptrace.c |
/*
* linux/arch/m68k/kernel/ints.c -- Linux/m68k general interrupt handling code
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/page.h>
#include <asm/machdep.h>
#include <asm/cacheflush.h>
#include <asm/irq_regs.h>
#ifdef CONFIG_Q40
#include <asm/q40ints.h>
#endif
extern u32 auto_irqhandler_fixup[];
extern u16 user_irqvec_fixup[];
static int m68k_first_user_vec;
static struct irq_chip auto_irq_chip = {
.name = "auto",
.irq_startup = m68k_irq_startup,
.irq_shutdown = m68k_irq_shutdown,
};
static struct irq_chip user_irq_chip = {
.name = "user",
.irq_startup = m68k_irq_startup,
.irq_shutdown = m68k_irq_shutdown,
};
/*
* void init_IRQ(void)
*
* Parameters: None
*
* Returns: Nothing
*
* This function should be called during kernel startup to initialize
* the IRQ handling routines.
*/
void __init init_IRQ(void)
{
int i;
for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++)
irq_set_chip_and_handler(i, &auto_irq_chip, handle_simple_irq);
mach_init_IRQ();
}
/**
* m68k_setup_auto_interrupt
* @handler: called from auto vector interrupts
*
* setup the handler to be called from auto vector interrupts instead of the
* standard do_IRQ(), it will be called with irq numbers in the range
* from IRQ_AUTO_1 - IRQ_AUTO_7.
*/
void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *))
{
if (handler)
*auto_irqhandler_fixup = (u32)handler;
flush_icache();
}
/**
* m68k_setup_user_interrupt
* @vec: first user vector interrupt to handle
* @cnt: number of active user vector interrupts
*
* setup user vector interrupts, this includes activating the specified range
* of interrupts, only then these interrupts can be requested (note: this is
* different from auto vector interrupts).
*/
void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt)
{
int i;
BUG_ON(IRQ_USER + cnt > NR_IRQS);
m68k_first_user_vec = vec;
for (i = 0; i < cnt; i++)
irq_set_chip_and_handler(i, &user_irq_chip, handle_simple_irq);
*user_irqvec_fixup = vec - IRQ_USER;
flush_icache();
}
/**
* m68k_setup_irq_controller
* @chip: irq chip which controls specified irq
* @handle: flow handler which handles specified irq
* @irq: first irq to be managed by the controller
* @cnt: number of irqs to be managed by the controller
*
* Change the controller for the specified range of irq, which will be used to
* manage these irq. auto/user irq already have a default controller, which can
* be changed as well, but the controller probably should use m68k_irq_startup/
* m68k_irq_shutdown.
*/
void m68k_setup_irq_controller(struct irq_chip *chip,
irq_flow_handler_t handle, unsigned int irq,
unsigned int cnt)
{
int i;
for (i = 0; i < cnt; i++) {
irq_set_chip(irq + i, chip);
if (handle)
irq_set_handler(irq + i, handle);
}
}
unsigned int m68k_irq_startup_irq(unsigned int irq)
{
if (irq <= IRQ_AUTO_7)
vectors[VEC_SPUR + irq] = auto_inthandler;
else
vectors[m68k_first_user_vec + irq - IRQ_USER] = user_inthandler;
return 0;
}
unsigned int m68k_irq_startup(struct irq_data *data)
{
return m68k_irq_startup_irq(data->irq);
}
void m68k_irq_shutdown(struct irq_data *data)
{
unsigned int irq = data->irq;
if (irq <= IRQ_AUTO_7)
vectors[VEC_SPUR + irq] = bad_inthandler;
else
vectors[m68k_first_user_vec + irq - IRQ_USER] = bad_inthandler;
}
unsigned int irq_canonicalize(unsigned int irq)
{
#ifdef CONFIG_Q40
if (MACH_IS_Q40 && irq == 11)
irq = 10;
#endif
return irq;
}
EXPORT_SYMBOL(irq_canonicalize);
asmlinkage void handle_badint(struct pt_regs *regs)
{
atomic_inc(&irq_err_count);
pr_warn("unexpected interrupt from %u\n", regs->vector);
}
| linux-master | arch/m68k/kernel/ints.c |
/*
* linux/arch/m68k/kernel/traps.c
*
* Copyright (C) 1993, 1994 by Hamish Macdonald
*
* 68040 fixes by Michael Rausch
* 68040 fixes by Martin Apel
* 68040 fixes and writeback by Richard Zidlicky
* 68060 fixes by Roman Hodek
* 68060 fixes by Jesper Skov
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* Sets up all exception vectors
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/signal.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/user.h>
#include <linux/string.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/kallsyms.h>
#include <linux/extable.h>
#include <asm/setup.h>
#include <asm/fpu.h>
#include <linux/uaccess.h>
#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/processor.h>
#include <asm/siginfo.h>
#include <asm/tlbflush.h>
static const char *vec_names[] = {
[VEC_RESETSP] = "RESET SP",
[VEC_RESETPC] = "RESET PC",
[VEC_BUSERR] = "BUS ERROR",
[VEC_ADDRERR] = "ADDRESS ERROR",
[VEC_ILLEGAL] = "ILLEGAL INSTRUCTION",
[VEC_ZERODIV] = "ZERO DIVIDE",
[VEC_CHK] = "CHK",
[VEC_TRAP] = "TRAPcc",
[VEC_PRIV] = "PRIVILEGE VIOLATION",
[VEC_TRACE] = "TRACE",
[VEC_LINE10] = "LINE 1010",
[VEC_LINE11] = "LINE 1111",
[VEC_RESV12] = "UNASSIGNED RESERVED 12",
[VEC_COPROC] = "COPROCESSOR PROTOCOL VIOLATION",
[VEC_FORMAT] = "FORMAT ERROR",
[VEC_UNINT] = "UNINITIALIZED INTERRUPT",
[VEC_RESV16] = "UNASSIGNED RESERVED 16",
[VEC_RESV17] = "UNASSIGNED RESERVED 17",
[VEC_RESV18] = "UNASSIGNED RESERVED 18",
[VEC_RESV19] = "UNASSIGNED RESERVED 19",
[VEC_RESV20] = "UNASSIGNED RESERVED 20",
[VEC_RESV21] = "UNASSIGNED RESERVED 21",
[VEC_RESV22] = "UNASSIGNED RESERVED 22",
[VEC_RESV23] = "UNASSIGNED RESERVED 23",
[VEC_SPUR] = "SPURIOUS INTERRUPT",
[VEC_INT1] = "LEVEL 1 INT",
[VEC_INT2] = "LEVEL 2 INT",
[VEC_INT3] = "LEVEL 3 INT",
[VEC_INT4] = "LEVEL 4 INT",
[VEC_INT5] = "LEVEL 5 INT",
[VEC_INT6] = "LEVEL 6 INT",
[VEC_INT7] = "LEVEL 7 INT",
[VEC_SYS] = "SYSCALL",
[VEC_TRAP1] = "TRAP #1",
[VEC_TRAP2] = "TRAP #2",
[VEC_TRAP3] = "TRAP #3",
[VEC_TRAP4] = "TRAP #4",
[VEC_TRAP5] = "TRAP #5",
[VEC_TRAP6] = "TRAP #6",
[VEC_TRAP7] = "TRAP #7",
[VEC_TRAP8] = "TRAP #8",
[VEC_TRAP9] = "TRAP #9",
[VEC_TRAP10] = "TRAP #10",
[VEC_TRAP11] = "TRAP #11",
[VEC_TRAP12] = "TRAP #12",
[VEC_TRAP13] = "TRAP #13",
[VEC_TRAP14] = "TRAP #14",
[VEC_TRAP15] = "TRAP #15",
[VEC_FPBRUC] = "FPCP BSUN",
[VEC_FPIR] = "FPCP INEXACT",
[VEC_FPDIVZ] = "FPCP DIV BY 0",
[VEC_FPUNDER] = "FPCP UNDERFLOW",
[VEC_FPOE] = "FPCP OPERAND ERROR",
[VEC_FPOVER] = "FPCP OVERFLOW",
[VEC_FPNAN] = "FPCP SNAN",
[VEC_FPUNSUP] = "FPCP UNSUPPORTED OPERATION",
[VEC_MMUCFG] = "MMU CONFIGURATION ERROR",
[VEC_MMUILL] = "MMU ILLEGAL OPERATION ERROR",
[VEC_MMUACC] = "MMU ACCESS LEVEL VIOLATION ERROR",
[VEC_RESV59] = "UNASSIGNED RESERVED 59",
[VEC_UNIMPEA] = "UNASSIGNED RESERVED 60",
[VEC_UNIMPII] = "UNASSIGNED RESERVED 61",
[VEC_RESV62] = "UNASSIGNED RESERVED 62",
[VEC_RESV63] = "UNASSIGNED RESERVED 63",
};
static const char *space_names[] = {
[0] = "Space 0",
[USER_DATA] = "User Data",
[USER_PROGRAM] = "User Program",
#ifndef CONFIG_SUN3
[3] = "Space 3",
#else
[FC_CONTROL] = "Control",
#endif
[4] = "Space 4",
[SUPER_DATA] = "Super Data",
[SUPER_PROGRAM] = "Super Program",
[CPU_SPACE] = "CPU"
};
void die_if_kernel(char *,struct pt_regs *,int);
asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
int send_fault_sig(struct pt_regs *regs);
asmlinkage void trap_c(struct frame *fp);
#if defined (CONFIG_M68060)
static inline void access_error060 (struct frame *fp)
{
unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
pr_debug("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
if (fslw & MMU060_BPE) {
/* branch prediction error -> clear branch cache */
__asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
"orl #0x00400000,%/d0\n\t"
"movec %/d0,%/cacr"
: : : "d0" );
/* return if there's no other error */
if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE))
return;
}
if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) {
unsigned long errorcode;
unsigned long addr = fp->un.fmt4.effaddr;
if (fslw & MMU060_MA)
addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
errorcode = 1;
if (fslw & MMU060_DESC_ERR) {
__flush_tlb040_one(addr);
errorcode = 0;
}
if (fslw & MMU060_W)
errorcode |= 2;
pr_debug("errorcode = %ld\n", errorcode);
do_page_fault(&fp->ptregs, addr, errorcode);
} else if (fslw & (MMU060_SEE)){
/* Software Emulation Error.
* fault during mem_read/mem_write in ifpsp060/os.S
*/
send_fault_sig(&fp->ptregs);
} else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
send_fault_sig(&fp->ptregs) > 0) {
pr_err("pc=%#lx, fa=%#lx\n", fp->ptregs.pc,
fp->un.fmt4.effaddr);
pr_err("68060 access error, fslw=%lx\n", fslw);
trap_c( fp );
}
}
#endif /* CONFIG_M68060 */
#if defined (CONFIG_M68040)
static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
{
unsigned long mmusr;
set_fc(wbs);
if (iswrite)
asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
else
asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr));
asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
set_fc(USER_DATA);
return mmusr;
}
static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
unsigned long wbd)
{
int res = 0;
set_fc(wbs);
switch (wbs & WBSIZ_040) {
case BA_SIZE_BYTE:
res = put_user(wbd & 0xff, (char __user *)wba);
break;
case BA_SIZE_WORD:
res = put_user(wbd & 0xffff, (short __user *)wba);
break;
case BA_SIZE_LONG:
res = put_user(wbd, (int __user *)wba);
break;
}
set_fc(USER_DATA);
pr_debug("do_040writeback1, res=%d\n", res);
return res;
}
/* after an exception in a writeback the stack frame corresponding
* to that exception is discarded, set a few bits in the old frame
* to simulate what it should look like
*/
static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs)
{
fp->un.fmt7.faddr = wba;
fp->un.fmt7.ssw = wbs & 0xff;
if (wba != current->thread.faddr)
fp->un.fmt7.ssw |= MA_040;
}
static inline void do_040writebacks(struct frame *fp)
{
int res = 0;
#if 0
if (fp->un.fmt7.wb1s & WBV_040)
pr_err("access_error040: cannot handle 1st writeback. oops.\n");
#endif
if ((fp->un.fmt7.wb2s & WBV_040) &&
!(fp->un.fmt7.wb2s & WBTT_040)) {
res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
fp->un.fmt7.wb2d);
if (res)
fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s);
else
fp->un.fmt7.wb2s = 0;
}
/* do the 2nd wb only if the first one was successful (except for a kernel wb) */
if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
fp->un.fmt7.wb3d);
if (res)
{
fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
fp->un.fmt7.wb2s = fp->un.fmt7.wb3s;
fp->un.fmt7.wb3s &= (~WBV_040);
fp->un.fmt7.wb2a = fp->un.fmt7.wb3a;
fp->un.fmt7.wb2d = fp->un.fmt7.wb3d;
}
else
fp->un.fmt7.wb3s = 0;
}
if (res)
send_fault_sig(&fp->ptregs);
}
/*
* called from sigreturn(), must ensure userspace code didn't
* manipulate exception frame to circumvent protection, then complete
* pending writebacks
* we just clear TM2 to turn it into a userspace access
*/
asmlinkage void berr_040cleanup(struct frame *fp)
{
fp->un.fmt7.wb2s &= ~4;
fp->un.fmt7.wb3s &= ~4;
do_040writebacks(fp);
}
static inline void access_error040(struct frame *fp)
{
unsigned short ssw = fp->un.fmt7.ssw;
unsigned long mmusr;
pr_debug("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
pr_debug("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
pr_debug("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
if (ssw & ATC_040) {
unsigned long addr = fp->un.fmt7.faddr;
unsigned long errorcode;
/*
* The MMU status has to be determined AFTER the address
* has been corrected if there was a misaligned access (MA).
*/
if (ssw & MA_040)
addr = (addr + 7) & -8;
/* MMU error, get the MMUSR info for this access */
mmusr = probe040(!(ssw & RW_040), addr, ssw);
pr_debug("mmusr = %lx\n", mmusr);
errorcode = 1;
if (!(mmusr & MMU_R_040)) {
/* clear the invalid atc entry */
__flush_tlb040_one(addr);
errorcode = 0;
}
/* despite what documentation seems to say, RMW
* accesses have always both the LK and RW bits set */
if (!(ssw & RW_040) || (ssw & LK_040))
errorcode |= 2;
if (do_page_fault(&fp->ptregs, addr, errorcode)) {
pr_debug("do_page_fault() !=0\n");
if (user_mode(&fp->ptregs)){
/* delay writebacks after signal delivery */
pr_debug(".. was usermode - return\n");
return;
}
/* disable writeback into user space from kernel
* (if do_page_fault didn't fix the mapping,
* the writeback won't do good)
*/
disable_wb:
pr_debug(".. disabling wb2\n");
if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
fp->un.fmt7.wb2s &= ~WBV_040;
if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr)
fp->un.fmt7.wb3s &= ~WBV_040;
}
} else {
/* In case of a bus error we either kill the process or expect
* the kernel to catch the fault, which then is also responsible
* for cleaning up the mess.
*/
current->thread.signo = SIGBUS;
current->thread.faddr = fp->un.fmt7.faddr;
if (send_fault_sig(&fp->ptregs) >= 0)
pr_err("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
fp->un.fmt7.faddr);
goto disable_wb;
}
do_040writebacks(fp);
}
#endif /* CONFIG_M68040 */
#if defined(CONFIG_SUN3)
#include <asm/sun3mmu.h>
extern int mmu_emu_handle_fault (unsigned long, int, int);
/* sun3 version of bus_error030 */
static inline void bus_error030 (struct frame *fp)
{
unsigned char buserr_type = sun3_get_buserr ();
unsigned long addr, errorcode;
unsigned short ssw = fp->un.fmtb.ssw;
extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
if (ssw & (FC | FB))
pr_debug("Instruction fault at %#010lx\n",
ssw & FC ?
fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
:
fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
if (ssw & DF)
pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
/*
* Check if this page should be demand-mapped. This needs to go before
* the testing for a bad kernel-space access (demand-mapping applies
* to kernel accesses too).
*/
if ((ssw & DF)
&& (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) {
if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0))
return;
}
/* Check for kernel-space pagefault (BAD). */
if (fp->ptregs.sr & PS_S) {
/* kernel fault must be a data fault to user space */
if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) {
// try checking the kernel mappings before surrender
if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1))
return;
/* instruction fault or kernel data fault! */
if (ssw & (FC | FB))
pr_err("Instruction fault at %#010lx\n",
fp->ptregs.pc);
if (ssw & DF) {
/* was this fault incurred testing bus mappings? */
if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) &&
(fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) {
send_fault_sig(&fp->ptregs);
return;
}
pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
}
pr_err("BAD KERNEL BUSERR\n");
die_if_kernel("Oops", &fp->ptregs,0);
force_sig(SIGKILL);
return;
}
} else {
/* user fault */
if (!(ssw & (FC | FB)) && !(ssw & DF))
/* not an instruction fault or data fault! BAD */
panic ("USER BUSERR w/o instruction or data fault");
}
/* First handle the data fault, if any. */
if (ssw & DF) {
addr = fp->un.fmtb.daddr;
// errorcode bit 0: 0 -> no page 1 -> protection fault
// errorcode bit 1: 0 -> read fault 1 -> write fault
// (buserr_type & SUN3_BUSERR_PROTERR) -> protection fault
// (buserr_type & SUN3_BUSERR_INVALID) -> invalid page fault
if (buserr_type & SUN3_BUSERR_PROTERR)
errorcode = 0x01;
else if (buserr_type & SUN3_BUSERR_INVALID)
errorcode = 0x00;
else {
pr_debug("*** unexpected busfault type=%#04x\n",
buserr_type);
pr_debug("invalid %s access at %#lx from pc %#lx\n",
!(ssw & RW) ? "write" : "read", addr,
fp->ptregs.pc);
die_if_kernel ("Oops", &fp->ptregs, buserr_type);
force_sig (SIGBUS);
return;
}
//todo: wtf is RM bit? --m
if (!(ssw & RW) || ssw & RM)
errorcode |= 0x02;
/* Handle page fault. */
do_page_fault (&fp->ptregs, addr, errorcode);
/* Retry the data fault now. */
return;
}
/* Now handle the instruction fault. */
/* Get the fault address. */
if (fp->ptregs.format == 0xA)
addr = fp->ptregs.pc + 4;
else
addr = fp->un.fmtb.baddr;
if (ssw & FC)
addr -= 2;
if (buserr_type & SUN3_BUSERR_INVALID) {
if (!mmu_emu_handle_fault(addr, 1, 0))
do_page_fault (&fp->ptregs, addr, 0);
} else {
pr_debug("protection fault on insn access (segv).\n");
force_sig (SIGSEGV);
}
}
#else
#if defined(CPU_M68020_OR_M68030)
static inline void bus_error030 (struct frame *fp)
{
volatile unsigned short temp;
unsigned short mmusr;
unsigned long addr, errorcode;
unsigned short ssw = fp->un.fmtb.ssw;
#ifdef DEBUG
unsigned long desc;
#endif
pr_debug("pid = %x ", current->pid);
pr_debug("SSW=%#06x ", ssw);
if (ssw & (FC | FB))
pr_debug("Instruction fault at %#010lx\n",
ssw & FC ?
fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
:
fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
if (ssw & DF)
pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
/* ++andreas: If a data fault and an instruction fault happen
at the same time map in both pages. */
/* First handle the data fault, if any. */
if (ssw & DF) {
addr = fp->un.fmtb.daddr;
#ifdef DEBUG
asm volatile ("ptestr %3,%2@,#7,%0\n\t"
"pmove %%psr,%1"
: "=a&" (desc), "=m" (temp)
: "a" (addr), "d" (ssw));
pr_debug("mmusr is %#x for addr %#lx in task %p\n",
temp, addr, current);
pr_debug("descriptor address is 0x%p, contents %#lx\n",
__va(desc), *(unsigned long *)__va(desc));
#else
asm volatile ("ptestr %2,%1@,#7\n\t"
"pmove %%psr,%0"
: "=m" (temp) : "a" (addr), "d" (ssw));
#endif
mmusr = temp;
errorcode = (mmusr & MMU_I) ? 0 : 1;
if (!(ssw & RW) || (ssw & RM))
errorcode |= 2;
if (mmusr & (MMU_I | MMU_WP)) {
/* We might have an exception table for this PC */
if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) {
pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
goto buserr;
}
/* Don't try to do anything further if an exception was
handled. */
if (do_page_fault (&fp->ptregs, addr, errorcode) < 0)
return;
} else if (!(mmusr & MMU_I)) {
/* probably a 020 cas fault */
if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
pr_err("unexpected bus error (%#x,%#x)\n", ssw,
mmusr);
} else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
pr_err("invalid %s access at %#lx from pc %#lx\n",
!(ssw & RW) ? "write" : "read", addr,
fp->ptregs.pc);
die_if_kernel("Oops",&fp->ptregs,mmusr);
force_sig(SIGSEGV);
return;
} else {
#if 0
static volatile long tlong;
#endif
pr_err("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
!(ssw & RW) ? "write" : "read", addr,
fp->ptregs.pc, ssw);
asm volatile ("ptestr #1,%1@,#0\n\t"
"pmove %%psr,%0"
: "=m" (temp)
: "a" (addr));
mmusr = temp;
pr_err("level 0 mmusr is %#x\n", mmusr);
#if 0
asm volatile ("pmove %%tt0,%0"
: "=m" (tlong));
pr_debug("tt0 is %#lx, ", tlong);
asm volatile ("pmove %%tt1,%0"
: "=m" (tlong));
pr_debug("tt1 is %#lx\n", tlong);
#endif
pr_debug("Unknown SIGSEGV - 1\n");
die_if_kernel("Oops",&fp->ptregs,mmusr);
force_sig(SIGSEGV);
return;
}
/* setup an ATC entry for the access about to be retried */
if (!(ssw & RW) || (ssw & RM))
asm volatile ("ploadw %1,%0@" : /* no outputs */
: "a" (addr), "d" (ssw));
else
asm volatile ("ploadr %1,%0@" : /* no outputs */
: "a" (addr), "d" (ssw));
}
/* Now handle the instruction fault. */
if (!(ssw & (FC|FB)))
return;
if (fp->ptregs.sr & PS_S) {
pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc);
buserr:
pr_err("BAD KERNEL BUSERR\n");
die_if_kernel("Oops",&fp->ptregs,0);
force_sig(SIGKILL);
return;
}
/* get the fault address */
if (fp->ptregs.format == 10)
addr = fp->ptregs.pc + 4;
else
addr = fp->un.fmtb.baddr;
if (ssw & FC)
addr -= 2;
if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0)
/* Insn fault on same page as data fault. But we
should still create the ATC entry. */
goto create_atc_entry;
#ifdef DEBUG
asm volatile ("ptestr #1,%2@,#7,%0\n\t"
"pmove %%psr,%1"
: "=a&" (desc), "=m" (temp)
: "a" (addr));
pr_debug("mmusr is %#x for addr %#lx in task %p\n",
temp, addr, current);
pr_debug("descriptor address is 0x%p, contents %#lx\n",
__va(desc), *(unsigned long *)__va(desc));
#else
asm volatile ("ptestr #1,%1@,#7\n\t"
"pmove %%psr,%0"
: "=m" (temp) : "a" (addr));
#endif
mmusr = temp;
if (mmusr & MMU_I)
do_page_fault (&fp->ptregs, addr, 0);
else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
pr_err("invalid insn access at %#lx from pc %#lx\n",
addr, fp->ptregs.pc);
pr_debug("Unknown SIGSEGV - 2\n");
die_if_kernel("Oops",&fp->ptregs,mmusr);
force_sig(SIGSEGV);
return;
}
create_atc_entry:
/* setup an ATC entry for the access about to be retried */
asm volatile ("ploadr #2,%0@" : /* no outputs */
: "a" (addr));
}
#endif /* CPU_M68020_OR_M68030 */
#endif /* !CONFIG_SUN3 */
#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
#include <asm/mcfmmu.h>
/*
* The following table converts the FS encoding of a ColdFire
* exception stack frame into the error_code value needed by
* do_fault.
*/
static const unsigned char fs_err_code[] = {
0, /* 0000 */
0, /* 0001 */
0, /* 0010 */
0, /* 0011 */
1, /* 0100 */
0, /* 0101 */
0, /* 0110 */
0, /* 0111 */
2, /* 1000 */
3, /* 1001 */
2, /* 1010 */
0, /* 1011 */
1, /* 1100 */
1, /* 1101 */
0, /* 1110 */
0 /* 1111 */
};
static inline void access_errorcf(unsigned int fs, struct frame *fp)
{
unsigned long mmusr, addr;
unsigned int err_code;
int need_page_fault;
mmusr = mmu_read(MMUSR);
addr = mmu_read(MMUAR);
/*
* error_code:
* bit 0 == 0 means no page found, 1 means protection fault
* bit 1 == 0 means read, 1 means write
*/
switch (fs) {
case 5: /* 0101 TLB opword X miss */
need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
addr = fp->ptregs.pc;
break;
case 6: /* 0110 TLB extension word X miss */
need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
addr = fp->ptregs.pc + sizeof(long);
break;
case 10: /* 1010 TLB W miss */
need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
break;
case 14: /* 1110 TLB R miss */
need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
break;
default:
/* 0000 Normal */
/* 0001 Reserved */
/* 0010 Interrupt during debug service routine */
/* 0011 Reserved */
/* 0100 X Protection */
/* 0111 IFP in emulator mode */
/* 1000 W Protection*/
/* 1001 Write error*/
/* 1011 Reserved*/
/* 1100 R Protection*/
/* 1101 R Protection*/
/* 1111 OEP in emulator mode*/
need_page_fault = 1;
break;
}
if (need_page_fault) {
err_code = fs_err_code[fs];
if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
err_code |= 2; /* bit1 - write, bit0 - protection */
do_page_fault(&fp->ptregs, addr, err_code);
}
}
#endif /* CONFIG_COLDFIRE CONFIG_MMU */
asmlinkage void buserr_c(struct frame *fp)
{
/* Only set esp0 if coming from user mode */
if (user_mode(&fp->ptregs))
current->thread.esp0 = (unsigned long) fp;
pr_debug("*** Bus Error *** Format is %x\n", fp->ptregs.format);
#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
if (CPU_IS_COLDFIRE) {
unsigned int fs;
fs = (fp->ptregs.vector & 0x3) |
((fp->ptregs.vector & 0xc00) >> 8);
switch (fs) {
case 0x5:
case 0x6:
case 0x7:
case 0x9:
case 0xa:
case 0xd:
case 0xe:
case 0xf:
access_errorcf(fs, fp);
return;
default:
break;
}
}
#endif /* CONFIG_COLDFIRE && CONFIG_MMU */
switch (fp->ptregs.format) {
#if defined (CONFIG_M68060)
case 4: /* 68060 access error */
access_error060 (fp);
break;
#endif
#if defined (CONFIG_M68040)
case 0x7: /* 68040 access error */
access_error040 (fp);
break;
#endif
#if defined (CPU_M68020_OR_M68030)
case 0xa:
case 0xb:
bus_error030 (fp);
break;
#endif
default:
die_if_kernel("bad frame format",&fp->ptregs,0);
pr_debug("Unknown SIGSEGV - 4\n");
force_sig(SIGSEGV);
}
}
static int kstack_depth_to_print = 48;
static void show_trace(unsigned long *stack, const char *loglvl)
{
unsigned long *endstack;
unsigned long addr;
int i;
printk("%sCall Trace:", loglvl);
addr = (unsigned long)stack + THREAD_SIZE - 1;
endstack = (unsigned long *)(addr & -THREAD_SIZE);
i = 0;
while (stack + 1 <= endstack) {
addr = *stack++;
/*
* If the address is either in the text segment of the
* kernel, or in the region which contains vmalloc'ed
* memory, it *may* be the address of a calling
* routine; if so, print it so that someone tracing
* down the cause of the crash will be able to figure
* out the call path that was taken.
*/
if (__kernel_text_address(addr)) {
#ifndef CONFIG_KALLSYMS
if (i % 5 == 0)
pr_cont("\n ");
#endif
pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr);
i++;
}
}
pr_cont("\n");
}
void show_registers(struct pt_regs *regs)
{
struct frame *fp = (struct frame *)regs;
u16 c, *cp;
unsigned long addr;
int i;
print_modules();
pr_info("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc);
pr_info("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2);
pr_info("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
regs->d0, regs->d1, regs->d2, regs->d3);
pr_info("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
regs->d4, regs->d5, regs->a0, regs->a1);
pr_info("Process %s (pid: %d, task=%p)\n",
current->comm, task_pid_nr(current), current);
addr = (unsigned long)&fp->un;
pr_info("Frame format=%X ", regs->format);
switch (regs->format) {
case 0x2:
pr_cont("instr addr=%08lx\n", fp->un.fmt2.iaddr);
addr += sizeof(fp->un.fmt2);
break;
case 0x3:
pr_cont("eff addr=%08lx\n", fp->un.fmt3.effaddr);
addr += sizeof(fp->un.fmt3);
break;
case 0x4:
if (CPU_IS_060)
pr_cont("fault addr=%08lx fslw=%08lx\n",
fp->un.fmt4.effaddr, fp->un.fmt4.pc);
else
pr_cont("eff addr=%08lx pc=%08lx\n",
fp->un.fmt4.effaddr, fp->un.fmt4.pc);
addr += sizeof(fp->un.fmt4);
break;
case 0x7:
pr_cont("eff addr=%08lx ssw=%04x faddr=%08lx\n",
fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
pr_info("wb 1 stat/addr/data: %04x %08lx %08lx\n",
fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
pr_info("wb 2 stat/addr/data: %04x %08lx %08lx\n",
fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
pr_info("wb 3 stat/addr/data: %04x %08lx %08lx\n",
fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
pr_info("push data: %08lx %08lx %08lx %08lx\n",
fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
fp->un.fmt7.pd3);
addr += sizeof(fp->un.fmt7);
break;
case 0x9:
pr_cont("instr addr=%08lx\n", fp->un.fmt9.iaddr);
addr += sizeof(fp->un.fmt9);
break;
case 0xa:
pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
fp->un.fmta.daddr, fp->un.fmta.dobuf);
addr += sizeof(fp->un.fmta);
break;
case 0xb:
pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
pr_info("baddr=%08lx dibuf=%08lx ver=%x\n",
fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
addr += sizeof(fp->un.fmtb);
break;
default:
pr_cont("\n");
}
show_stack(NULL, (unsigned long *)addr, KERN_INFO);
pr_info("Code:");
cp = (u16 *)regs->pc;
for (i = -8; i < 16; i++) {
if (get_kernel_nofault(c, cp + i) && i >= 0) {
pr_cont(" Bad PC value.");
break;
}
if (i)
pr_cont(" %04x", c);
else
pr_cont(" <%04x>", c);
}
pr_cont("\n");
}
void show_stack(struct task_struct *task, unsigned long *stack,
const char *loglvl)
{
unsigned long *p;
unsigned long *endstack;
int i;
if (!stack) {
if (task)
stack = (unsigned long *)task->thread.esp0;
else
stack = (unsigned long *)&stack;
}
endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
printk("%sStack from %08lx:", loglvl, (unsigned long)stack);
p = stack;
for (i = 0; i < kstack_depth_to_print; i++) {
if (p + 1 > endstack)
break;
if (i % 8 == 0)
pr_cont("\n ");
pr_cont(" %08lx", *p++);
}
pr_cont("\n");
show_trace(stack, loglvl);
}
/*
* The vector number returned in the frame pointer may also contain
* the "fs" (Fault Status) bits on ColdFire. These are in the bottom
* 2 bits, and upper 2 bits. So we need to mask out the real vector
* number before using it in comparisons. You don't need to do this on
* real 68k parts, but it won't hurt either.
*/
void bad_super_trap (struct frame *fp)
{
int vector = (fp->ptregs.vector >> 2) & 0xff;
console_verbose();
if (vector < ARRAY_SIZE(vec_names))
pr_err("*** %s *** FORMAT=%X\n",
vec_names[vector],
fp->ptregs.format);
else
pr_err("*** Exception %d *** FORMAT=%X\n",
vector, fp->ptregs.format);
if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) {
unsigned short ssw = fp->un.fmtb.ssw;
pr_err("SSW=%#06x ", ssw);
if (ssw & RC)
pr_err("Pipe stage C instruction fault at %#010lx\n",
(fp->ptregs.format) == 0xA ?
fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
if (ssw & RB)
pr_err("Pipe stage B instruction fault at %#010lx\n",
(fp->ptregs.format) == 0xA ?
fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
if (ssw & DF)
pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr, space_names[ssw & DFC],
fp->ptregs.pc);
}
pr_err("Current process id is %d\n", task_pid_nr(current));
die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
}
asmlinkage void trap_c(struct frame *fp)
{
int sig, si_code;
void __user *addr;
int vector = (fp->ptregs.vector >> 2) & 0xff;
if (fp->ptregs.sr & PS_S) {
if (vector == VEC_TRACE) {
/* traced a trapping instruction on a 68020/30,
* real exception will be executed afterwards.
*/
return;
}
#ifdef CONFIG_MMU
if (fixup_exception(&fp->ptregs))
return;
#endif
bad_super_trap(fp);
return;
}
/* send the appropriate signal to the user program */
switch (vector) {
case VEC_ADDRERR:
si_code = BUS_ADRALN;
sig = SIGBUS;
break;
case VEC_ILLEGAL:
case VEC_LINE10:
case VEC_LINE11:
si_code = ILL_ILLOPC;
sig = SIGILL;
break;
case VEC_PRIV:
si_code = ILL_PRVOPC;
sig = SIGILL;
break;
case VEC_COPROC:
si_code = ILL_COPROC;
sig = SIGILL;
break;
case VEC_TRAP1:
case VEC_TRAP2:
case VEC_TRAP3:
case VEC_TRAP4:
case VEC_TRAP5:
case VEC_TRAP6:
case VEC_TRAP7:
case VEC_TRAP8:
case VEC_TRAP9:
case VEC_TRAP10:
case VEC_TRAP11:
case VEC_TRAP12:
case VEC_TRAP13:
case VEC_TRAP14:
si_code = ILL_ILLTRP;
sig = SIGILL;
break;
case VEC_FPBRUC:
case VEC_FPOE:
case VEC_FPNAN:
si_code = FPE_FLTINV;
sig = SIGFPE;
break;
case VEC_FPIR:
si_code = FPE_FLTRES;
sig = SIGFPE;
break;
case VEC_FPDIVZ:
si_code = FPE_FLTDIV;
sig = SIGFPE;
break;
case VEC_FPUNDER:
si_code = FPE_FLTUND;
sig = SIGFPE;
break;
case VEC_FPOVER:
si_code = FPE_FLTOVF;
sig = SIGFPE;
break;
case VEC_ZERODIV:
si_code = FPE_INTDIV;
sig = SIGFPE;
break;
case VEC_CHK:
case VEC_TRAP:
si_code = FPE_INTOVF;
sig = SIGFPE;
break;
case VEC_TRACE: /* ptrace single step */
si_code = TRAP_TRACE;
sig = SIGTRAP;
break;
case VEC_TRAP15: /* breakpoint */
si_code = TRAP_BRKPT;
sig = SIGTRAP;
break;
default:
si_code = ILL_ILLOPC;
sig = SIGILL;
break;
}
switch (fp->ptregs.format) {
default:
addr = (void __user *) fp->ptregs.pc;
break;
case 2:
addr = (void __user *) fp->un.fmt2.iaddr;
break;
case 7:
addr = (void __user *) fp->un.fmt7.effaddr;
break;
case 9:
addr = (void __user *) fp->un.fmt9.iaddr;
break;
case 10:
addr = (void __user *) fp->un.fmta.daddr;
break;
case 11:
addr = (void __user*) fp->un.fmtb.daddr;
break;
}
force_sig_fault(sig, si_code, addr);
}
void die_if_kernel (char *str, struct pt_regs *fp, int nr)
{
if (!(fp->sr & PS_S))
return;
console_verbose();
pr_crit("%s: %08x\n", str, nr);
show_registers(fp);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
make_task_dead(SIGSEGV);
}
asmlinkage void set_esp0(unsigned long ssp)
{
current->thread.esp0 = ssp;
}
/*
* This function is called if an error occur while accessing
* user-space from the fpsp040 code.
*/
asmlinkage void fpsp040_die(void)
{
force_exit_sig(SIGSEGV);
}
#ifdef CONFIG_M68KFPU_EMU
asmlinkage void fpemu_signal(int signal, int code, void *addr)
{
force_sig_fault(signal, code, addr);
}
#endif
| linux-master | arch/m68k/kernel/traps.c |
// SPDX-License-Identifier: GPL-2.0
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#define ASM_OFFSETS_C
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/kbuild.h>
#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/amigahw.h>
#include <linux/font.h>
int main(void)
{
/* offsets into the task struct */
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_STACK, offsetof(struct task_struct, stack));
/* offsets into the thread struct */
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
DEFINE(THREAD_FC, offsetof(struct thread_struct, fc));
DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
/* offsets into the thread_info struct */
DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
/* offsets into the pt_regs */
DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
/* bitfields are a bit difficult */
#ifdef CONFIG_COLDFIRE
DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
#else
DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
#endif
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
/* signal defines */
DEFINE(LSIGSEGV, SIGSEGV);
DEFINE(LSEGV_MAPERR, SEGV_MAPERR);
DEFINE(LSIGTRAP, SIGTRAP);
DEFINE(LTRAP_TRACE, TRAP_TRACE);
#ifdef CONFIG_MMU
/* offsets into the bi_record struct */
DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
DEFINE(BIR_DATA, offsetof(struct bi_record, data));
/* offsets into the font_desc struct */
DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height));
DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data));
DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
/* offsets into the custom struct */
DEFINE(CUSTOMBASE, &amiga_custom);
DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq));
DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr));
DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat));
DEFINE(C_SERPER, offsetof(struct CUSTOM, serper));
DEFINE(CIAABASE, &ciaa);
DEFINE(CIABBASE, &ciab);
DEFINE(C_PRA, offsetof(struct CIA, pra));
DEFINE(ZTWOBASE, zTwoBase);
/* enum m68k_fixup_type */
DEFINE(M68K_FIXUP_MEMOFFSET, m68k_fixup_memoffset);
#endif
return 0;
}
| linux-master | arch/m68k/kernel/asm-offsets.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#if 0
#define DEBUGP(fmt, ...) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#else
#define DEBUGP(fmt, ...) no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
#ifdef CONFIG_MODULES
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_68K_32:
/* We add the value into the location given */
*location += sym->st_value;
break;
case R_68K_PC32:
/* Add the value, subtract its position */
*location += sym->st_value - (uint32_t)location;
break;
default:
pr_err("module %s: Unknown relocation: %u\n", me->name,
ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate_add section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_68K_32:
/* We add the value into the location given */
*location = rel[i].r_addend + sym->st_value;
break;
case R_68K_PC32:
/* Add the value, subtract its position */
*location = rel[i].r_addend + sym->st_value - (uint32_t)location;
break;
default:
pr_err("module %s: Unknown relocation: %u\n", me->name,
ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod)
{
module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end);
return 0;
}
#endif /* CONFIG_MODULES */
void module_fixup(struct module *mod, struct m68k_fixup_info *start,
struct m68k_fixup_info *end)
{
#ifdef CONFIG_MMU
struct m68k_fixup_info *fixup;
for (fixup = start; fixup < end; fixup++) {
switch (fixup->type) {
case m68k_fixup_memoffset:
*(u32 *)fixup->addr = m68k_memoffset;
break;
case m68k_fixup_vnode_shift:
*(u16 *)fixup->addr += m68k_virt_to_node_shift;
break;
}
}
#endif
}
| linux-master | arch/m68k/kernel/module.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/m68k/kernel/setup.c
*
* Copyright (C) 1995 Hamish Macdonald
*/
/*
* This file handles the architecture-dependent parts of system setup
*/
#include <linux/kernel.h>
#include <linux/cpu.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/nvram.h>
#include <linux/initrd.h>
#include <linux/random.h>
#include <asm/bootinfo.h>
#include <asm/byteorder.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/fpu.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/machdep.h>
#ifdef CONFIG_AMIGA
#include <asm/amigahw.h>
#endif
#include <asm/atarihw.h>
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
#ifdef CONFIG_SUN3X
#include <asm/dvma.h>
#endif
#include <asm/macintosh.h>
#include <asm/natfeat.h>
#include <asm/config.h>
#if !FPSTATESIZE || !NR_IRQS
#warning No CPU/platform type selected, your kernel will not work!
#warning Are you building an allnoconfig kernel?
#endif
unsigned long m68k_machtype;
EXPORT_SYMBOL(m68k_machtype);
unsigned long m68k_cputype;
EXPORT_SYMBOL(m68k_cputype);
unsigned long m68k_fputype;
unsigned long m68k_mmutype;
EXPORT_SYMBOL(m68k_mmutype);
#ifdef CONFIG_VME
unsigned long vme_brdtype;
EXPORT_SYMBOL(vme_brdtype);
#endif
int m68k_is040or060;
EXPORT_SYMBOL(m68k_is040or060);
extern unsigned long availmem;
int m68k_num_memory;
EXPORT_SYMBOL(m68k_num_memory);
int m68k_realnum_memory;
EXPORT_SYMBOL(m68k_realnum_memory);
unsigned long m68k_memoffset;
struct m68k_mem_info m68k_memory[NUM_MEMINFO];
EXPORT_SYMBOL(m68k_memory);
static struct m68k_mem_info m68k_ramdisk __initdata;
static char m68k_command_line[CL_SIZE] __initdata;
void (*mach_sched_init) (void) __initdata = NULL;
/* machine dependent irq functions */
void (*mach_init_IRQ) (void) __initdata = NULL;
void (*mach_get_model) (char *model);
void (*mach_get_hardware_list) (struct seq_file *m);
void (*mach_reset)( void );
void (*mach_halt)( void );
#ifdef CONFIG_HEARTBEAT
void (*mach_heartbeat) (int);
EXPORT_SYMBOL(mach_heartbeat);
#endif
#ifdef CONFIG_M68K_L2_CACHE
void (*mach_l2_flush) (int);
#endif
#if defined(CONFIG_ISA) && defined(MULTI_ISA)
int isa_type;
int isa_sex;
EXPORT_SYMBOL(isa_type);
EXPORT_SYMBOL(isa_sex);
#endif
#define MASK_256K 0xfffc0000
extern void paging_init(void);
static void __init m68k_parse_bootinfo(const struct bi_record *record)
{
const struct bi_record *first_record = record;
uint16_t tag;
while ((tag = be16_to_cpu(record->tag)) != BI_LAST) {
int unknown = 0;
const void *data = record->data;
uint16_t size = be16_to_cpu(record->size);
switch (tag) {
case BI_MACHTYPE:
case BI_CPUTYPE:
case BI_FPUTYPE:
case BI_MMUTYPE:
/* Already set up by head.S */
break;
case BI_MEMCHUNK:
if (m68k_num_memory < NUM_MEMINFO) {
const struct mem_info *m = data;
m68k_memory[m68k_num_memory].addr =
be32_to_cpu(m->addr);
m68k_memory[m68k_num_memory].size =
be32_to_cpu(m->size);
m68k_num_memory++;
} else
pr_warn("%s: too many memory chunks\n",
__func__);
break;
case BI_RAMDISK:
{
const struct mem_info *m = data;
m68k_ramdisk.addr = be32_to_cpu(m->addr);
m68k_ramdisk.size = be32_to_cpu(m->size);
}
break;
case BI_COMMAND_LINE:
strscpy(m68k_command_line, data,
sizeof(m68k_command_line));
break;
case BI_RNG_SEED: {
u16 len = be16_to_cpup(data);
add_bootloader_randomness(data + 2, len);
/*
* Zero the data to preserve forward secrecy, and zero the
* length to prevent kexec from using it.
*/
memzero_explicit((void *)data, len + 2);
break;
}
default:
if (MACH_IS_AMIGA)
unknown = amiga_parse_bootinfo(record);
else if (MACH_IS_ATARI)
unknown = atari_parse_bootinfo(record);
else if (MACH_IS_MAC)
unknown = mac_parse_bootinfo(record);
else if (MACH_IS_Q40)
unknown = q40_parse_bootinfo(record);
else if (MACH_IS_BVME6000)
unknown = bvme6000_parse_bootinfo(record);
else if (MACH_IS_MVME16x)
unknown = mvme16x_parse_bootinfo(record);
else if (MACH_IS_MVME147)
unknown = mvme147_parse_bootinfo(record);
else if (MACH_IS_HP300)
unknown = hp300_parse_bootinfo(record);
else if (MACH_IS_APOLLO)
unknown = apollo_parse_bootinfo(record);
else if (MACH_IS_VIRT)
unknown = virt_parse_bootinfo(record);
else
unknown = 1;
}
if (unknown)
pr_warn("%s: unknown tag 0x%04x ignored\n", __func__,
tag);
record = (struct bi_record *)((unsigned long)record + size);
}
save_bootinfo(first_record);
m68k_realnum_memory = m68k_num_memory;
#ifdef CONFIG_SINGLE_MEMORY_CHUNK
if (m68k_num_memory > 1) {
pr_warn("%s: ignoring last %i chunks of physical memory\n",
__func__, (m68k_num_memory - 1));
m68k_num_memory = 1;
}
#endif
}
void __init setup_arch(char **cmdline_p)
{
/* The bootinfo is located right after the kernel */
if (!CPU_IS_COLDFIRE)
m68k_parse_bootinfo((const struct bi_record *)_end);
if (CPU_IS_040)
m68k_is040or060 = 4;
else if (CPU_IS_060)
m68k_is040or060 = 6;
/* FIXME: m68k_fputype is passed in by Penguin booter, which can
* be confused by software FPU emulation. BEWARE.
* We should really do our own FPU check at startup.
* [what do we do with buggy 68LC040s? if we have problems
* with them, we should add a test to check_bugs() below] */
#if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU_ONLY)
/* clear the fpu if we have one */
if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|FPU_COLDFIRE)) {
volatile int zero = 0;
asm volatile ("frestore %0" : : "m" (zero));
}
#endif
if (CPU_IS_060) {
u32 pcr;
asm (".chip 68060; movec %%pcr,%0; .chip 68k"
: "=d" (pcr));
if (((pcr >> 8) & 0xff) <= 5) {
pr_warn("Enabling workaround for errata I14\n");
asm (".chip 68060; movec %0,%%pcr; .chip 68k"
: : "d" (pcr | 0x20));
}
}
setup_initial_init_mm((void *)PAGE_OFFSET, _etext, _edata, _end);
#if defined(CONFIG_BOOTPARAM)
strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE);
m68k_command_line[CL_SIZE - 1] = 0;
#endif /* CONFIG_BOOTPARAM */
process_uboot_commandline(&m68k_command_line[0], CL_SIZE);
*cmdline_p = m68k_command_line;
memcpy(boot_command_line, *cmdline_p, CL_SIZE);
parse_early_param();
switch (m68k_machtype) {
#ifdef CONFIG_AMIGA
case MACH_AMIGA:
config_amiga();
break;
#endif
#ifdef CONFIG_ATARI
case MACH_ATARI:
config_atari();
break;
#endif
#ifdef CONFIG_MAC
case MACH_MAC:
config_mac();
break;
#endif
#ifdef CONFIG_SUN3
case MACH_SUN3:
config_sun3();
break;
#endif
#ifdef CONFIG_APOLLO
case MACH_APOLLO:
config_apollo();
break;
#endif
#ifdef CONFIG_MVME147
case MACH_MVME147:
config_mvme147();
break;
#endif
#ifdef CONFIG_MVME16x
case MACH_MVME16x:
config_mvme16x();
break;
#endif
#ifdef CONFIG_BVME6000
case MACH_BVME6000:
config_bvme6000();
break;
#endif
#ifdef CONFIG_HP300
case MACH_HP300:
config_hp300();
break;
#endif
#ifdef CONFIG_Q40
case MACH_Q40:
config_q40();
break;
#endif
#ifdef CONFIG_SUN3X
case MACH_SUN3X:
config_sun3x();
break;
#endif
#ifdef CONFIG_COLDFIRE
case MACH_M54XX:
case MACH_M5441X:
cf_bootmem_alloc();
cf_mmu_context_init();
config_BSP(NULL, 0);
break;
#endif
#ifdef CONFIG_VIRT
case MACH_VIRT:
config_virt();
break;
#endif
default:
panic("No configuration setup");
}
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size)
memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
paging_init();
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size) {
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
}
#ifdef CONFIG_NATFEAT
nf_init();
#endif
#ifdef CONFIG_ATARI
if (MACH_IS_ATARI)
atari_stram_reserve_pages((void *)availmem);
#endif
#ifdef CONFIG_SUN3X
if (MACH_IS_SUN3X) {
dvma_init();
}
#endif
/* set ISA defs early as possible */
#if defined(CONFIG_ISA) && defined(MULTI_ISA)
if (MACH_IS_Q40) {
isa_type = ISA_TYPE_Q40;
isa_sex = 0;
}
#ifdef CONFIG_AMIGA_PCMCIA
if (MACH_IS_AMIGA && AMIGAHW_PRESENT(PCMCIA)) {
isa_type = ISA_TYPE_AG;
isa_sex = 1;
}
#endif
#ifdef CONFIG_ATARI_ROM_ISA
if (MACH_IS_ATARI) {
isa_type = ISA_TYPE_ENEC;
isa_sex = 0;
}
#endif
#endif
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
const char *cpu, *mmu, *fpu;
unsigned long clockfreq, clockfactor;
#define LOOP_CYCLES_68020 (8)
#define LOOP_CYCLES_68030 (8)
#define LOOP_CYCLES_68040 (3)
#define LOOP_CYCLES_68060 (1)
#define LOOP_CYCLES_COLDFIRE (2)
if (CPU_IS_020) {
cpu = "68020";
clockfactor = LOOP_CYCLES_68020;
} else if (CPU_IS_030) {
cpu = "68030";
clockfactor = LOOP_CYCLES_68030;
} else if (CPU_IS_040) {
cpu = "68040";
clockfactor = LOOP_CYCLES_68040;
} else if (CPU_IS_060) {
cpu = "68060";
clockfactor = LOOP_CYCLES_68060;
} else if (CPU_IS_COLDFIRE) {
cpu = "ColdFire";
clockfactor = LOOP_CYCLES_COLDFIRE;
} else {
cpu = "680x0";
clockfactor = 0;
}
#ifdef CONFIG_M68KFPU_EMU_ONLY
fpu = "none(soft float)";
#else
if (m68k_fputype & FPU_68881)
fpu = "68881";
else if (m68k_fputype & FPU_68882)
fpu = "68882";
else if (m68k_fputype & FPU_68040)
fpu = "68040";
else if (m68k_fputype & FPU_68060)
fpu = "68060";
else if (m68k_fputype & FPU_SUNFPA)
fpu = "Sun FPA";
else if (m68k_fputype & FPU_COLDFIRE)
fpu = "ColdFire";
else
fpu = "none";
#endif
if (m68k_mmutype & MMU_68851)
mmu = "68851";
else if (m68k_mmutype & MMU_68030)
mmu = "68030";
else if (m68k_mmutype & MMU_68040)
mmu = "68040";
else if (m68k_mmutype & MMU_68060)
mmu = "68060";
else if (m68k_mmutype & MMU_SUN3)
mmu = "Sun-3";
else if (m68k_mmutype & MMU_APOLLO)
mmu = "Apollo";
else if (m68k_mmutype & MMU_COLDFIRE)
mmu = "ColdFire";
else
mmu = "unknown";
clockfreq = loops_per_jiffy * HZ * clockfactor;
seq_printf(m, "CPU:\t\t%s\n"
"MMU:\t\t%s\n"
"FPU:\t\t%s\n"
"Clocking:\t%lu.%1luMHz\n"
"BogoMips:\t%lu.%02lu\n"
"Calibration:\t%lu loops\n",
cpu, mmu, fpu,
clockfreq/1000000,(clockfreq/100000)%10,
loops_per_jiffy/(500000/HZ),(loops_per_jiffy/(5000/HZ))%100,
loops_per_jiffy);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < 1 ? (void *)1 : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return NULL;
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
#ifdef CONFIG_PROC_HARDWARE
static int hardware_proc_show(struct seq_file *m, void *v)
{
char model[80];
unsigned long mem;
int i;
if (mach_get_model)
mach_get_model(model);
else
strcpy(model, "Unknown m68k");
seq_printf(m, "Model:\t\t%s\n", model);
for (mem = 0, i = 0; i < m68k_num_memory; i++)
mem += m68k_memory[i].size;
seq_printf(m, "System Memory:\t%ldK\n", mem >> 10);
if (mach_get_hardware_list)
mach_get_hardware_list(m);
return 0;
}
static int __init proc_hardware_init(void)
{
proc_create_single("hardware", 0, NULL, hardware_proc_show);
return 0;
}
module_init(proc_hardware_init);
#endif
void __init arch_cpu_finalize_init(void)
{
#if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU)
if (m68k_fputype == 0) {
pr_emerg("*** YOU DO NOT HAVE A FLOATING POINT UNIT, "
"WHICH IS REQUIRED BY LINUX/M68K ***\n");
pr_emerg("Upgrade your hardware or join the FPU "
"emulation project\n");
panic("no FPU");
}
#endif /* !CONFIG_M68KFPU_EMU */
}
#ifdef CONFIG_ADB
static int __init adb_probe_sync_enable (char *str) {
extern int __adb_probe_sync;
__adb_probe_sync = 1;
return 1;
}
__setup("adb_sync", adb_probe_sync_enable);
#endif /* CONFIG_ADB */
#if IS_ENABLED(CONFIG_NVRAM)
#ifdef CONFIG_MAC
static unsigned char m68k_nvram_read_byte(int addr)
{
if (MACH_IS_MAC)
return mac_pram_read_byte(addr);
return 0xff;
}
static void m68k_nvram_write_byte(unsigned char val, int addr)
{
if (MACH_IS_MAC)
mac_pram_write_byte(val, addr);
}
#endif /* CONFIG_MAC */
#ifdef CONFIG_ATARI
static ssize_t m68k_nvram_read(char *buf, size_t count, loff_t *ppos)
{
if (MACH_IS_ATARI)
return atari_nvram_read(buf, count, ppos);
else if (MACH_IS_MAC)
return nvram_read_bytes(buf, count, ppos);
return -EINVAL;
}
static ssize_t m68k_nvram_write(char *buf, size_t count, loff_t *ppos)
{
if (MACH_IS_ATARI)
return atari_nvram_write(buf, count, ppos);
else if (MACH_IS_MAC)
return nvram_write_bytes(buf, count, ppos);
return -EINVAL;
}
static long m68k_nvram_set_checksum(void)
{
if (MACH_IS_ATARI)
return atari_nvram_set_checksum();
return -EINVAL;
}
static long m68k_nvram_initialize(void)
{
if (MACH_IS_ATARI)
return atari_nvram_initialize();
return -EINVAL;
}
#endif /* CONFIG_ATARI */
static ssize_t m68k_nvram_get_size(void)
{
if (MACH_IS_ATARI)
return atari_nvram_get_size();
else if (MACH_IS_MAC)
return mac_pram_get_size();
return -ENODEV;
}
/* Atari device drivers call .read (to get checksum validation) whereas
* Mac and PowerMac device drivers just use .read_byte.
*/
const struct nvram_ops arch_nvram_ops = {
#ifdef CONFIG_MAC
.read_byte = m68k_nvram_read_byte,
.write_byte = m68k_nvram_write_byte,
#endif
#ifdef CONFIG_ATARI
.read = m68k_nvram_read,
.write = m68k_nvram_write,
.set_checksum = m68k_nvram_set_checksum,
.initialize = m68k_nvram_initialize,
#endif
.get_size = m68k_nvram_get_size,
};
EXPORT_SYMBOL(arch_nvram_ops);
#endif /* CONFIG_NVRAM */
| linux-master | arch/m68k/kernel/setup_mm.c |
// SPDX-License-Identifier: GPL-2.0
#ifdef CONFIG_MMU
#include "setup_mm.c"
#else
#include "setup_no.c"
#endif
#if IS_ENABLED(CONFIG_INPUT_M68K_BEEP)
void (*mach_beep)(unsigned int, unsigned int);
EXPORT_SYMBOL(mach_beep);
#endif
| linux-master | arch/m68k/kernel/setup.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#undef DEBUG
#include <linux/dma-map-ops.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <asm/cacheflush.h>
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
void arch_dma_prep_coherent(struct page *page, size_t size)
{
cache_push(page_to_phys(page), size);
}
pgprot_t pgprot_dmacoherent(pgprot_t prot)
{
if (CPU_IS_040_OR_060) {
pgprot_val(prot) &= ~_PAGE_CACHE040;
pgprot_val(prot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
} else {
pgprot_val(prot) |= _PAGE_NOCACHE030;
}
return prot;
}
#else
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
void *ret;
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
*dma_handle = virt_to_phys(ret);
}
return ret;
}
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
}
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
void arch_sync_dma_for_device(phys_addr_t handle, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
case DMA_TO_DEVICE:
cache_push(handle, size);
break;
case DMA_FROM_DEVICE:
cache_clear(handle, size);
break;
default:
pr_err_ratelimited("dma_sync_single_for_device: unsupported dir %u\n",
dir);
break;
}
}
| linux-master | arch/m68k/kernel/dma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/m68k/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*
* This file contains the m68k-specific time handling details.
* Most of the stuff is located in the machine specific files.
*
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sched/loadavg.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/irq_regs.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/profile.h>
unsigned long (*mach_random_get_entropy)(void);
EXPORT_SYMBOL_GPL(mach_random_get_entropy);
#ifdef CONFIG_HEARTBEAT
void timer_heartbeat(void)
{
/* use power LED as a heartbeat instead -- much more useful
for debugging -- based on the version for PReP by Cort */
/* acts like an actual heart beat -- ie thump-thump-pause... */
if (mach_heartbeat) {
static unsigned cnt = 0, period = 0, dist = 0;
if (cnt == 0 || cnt == dist)
mach_heartbeat( 1 );
else if (cnt == 7 || cnt == dist+7)
mach_heartbeat( 0 );
if (++cnt > period) {
cnt = 0;
/* The hyperbolic function below modifies the heartbeat period
* length in dependency of the current (5min) load. It goes
* through the points f(0)=126, f(1)=86, f(5)=51,
* f(inf)->30. */
period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30;
dist = period / 4;
}
}
}
#endif /* CONFIG_HEARTBEAT */
#ifdef CONFIG_M68KCLASSIC
/* machine dependent timer functions */
int (*mach_hwclk) (int, struct rtc_time*);
EXPORT_SYMBOL(mach_hwclk);
int (*mach_get_rtc_pll)(struct rtc_pll_info *);
int (*mach_set_rtc_pll)(struct rtc_pll_info *);
EXPORT_SYMBOL(mach_get_rtc_pll);
EXPORT_SYMBOL(mach_set_rtc_pll);
#if !IS_BUILTIN(CONFIG_RTC_DRV_GENERIC)
void read_persistent_clock64(struct timespec64 *ts)
{
struct rtc_time time;
ts->tv_sec = 0;
ts->tv_nsec = 0;
if (!mach_hwclk)
return;
mach_hwclk(0, &time);
ts->tv_sec = mktime64(time.tm_year + 1900, time.tm_mon + 1, time.tm_mday,
time.tm_hour, time.tm_min, time.tm_sec);
}
#endif
#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
{
mach_hwclk(0, tm);
return 0;
}
static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
{
if (mach_hwclk(1, tm) < 0)
return -EOPNOTSUPP;
return 0;
}
static int rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct rtc_pll_info pll;
struct rtc_pll_info __user *argp = (void __user *)arg;
switch (cmd) {
case RTC_PLL_GET:
if (!mach_get_rtc_pll || mach_get_rtc_pll(&pll))
return -EINVAL;
return copy_to_user(argp, &pll, sizeof pll) ? -EFAULT : 0;
case RTC_PLL_SET:
if (!mach_set_rtc_pll)
return -EINVAL;
if (!capable(CAP_SYS_TIME))
return -EACCES;
if (copy_from_user(&pll, argp, sizeof(pll)))
return -EFAULT;
return mach_set_rtc_pll(&pll);
}
return -ENOIOCTLCMD;
}
static const struct rtc_class_ops generic_rtc_ops = {
.ioctl = rtc_ioctl,
.read_time = rtc_generic_get_time,
.set_time = rtc_generic_set_time,
};
static int __init rtc_init(void)
{
struct platform_device *pdev;
if (!mach_hwclk)
return -ENODEV;
pdev = platform_device_register_data(NULL, "rtc-generic", -1,
&generic_rtc_ops,
sizeof(generic_rtc_ops));
return PTR_ERR_OR_ZERO(pdev);
}
module_init(rtc_init);
#endif /* CONFIG_RTC_DRV_GENERIC */
#endif /* CONFIG M68KCLASSIC */
void __init time_init(void)
{
mach_sched_init();
}
| linux-master | arch/m68k/kernel/time.c |
// SPDX-License-Identifier: GPL-2.0
/*
* machine_kexec.c - handle transition of Linux booting another kernel
*/
#include <linux/compiler.h>
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/setup.h>
extern const unsigned char relocate_new_kernel[];
extern const size_t relocate_new_kernel_size;
int machine_kexec_prepare(struct kimage *kimage)
{
return 0;
}
void machine_kexec_cleanup(struct kimage *kimage)
{
}
void machine_shutdown(void)
{
}
void machine_crash_shutdown(struct pt_regs *regs)
{
}
typedef void (*relocate_kernel_t)(unsigned long ptr,
unsigned long start,
unsigned long cpu_mmu_flags) __noreturn;
void machine_kexec(struct kimage *image)
{
void *reboot_code_buffer;
unsigned long cpu_mmu_flags;
reboot_code_buffer = page_address(image->control_code_page);
memcpy(reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
/*
* we do not want to be bothered.
*/
local_irq_disable();
pr_info("Will call new kernel at 0x%08lx. Bye...\n", image->start);
__flush_cache_all();
cpu_mmu_flags = m68k_cputype | m68k_mmutype << 8;
((relocate_kernel_t) reboot_code_buffer)(image->head & PAGE_MASK,
image->start,
cpu_mmu_flags);
}
| linux-master | arch/m68k/kernel/machine_kexec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Based on arch/arm/kernel/atags_proc.c
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/printk.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <asm/bootinfo.h>
#include <asm/byteorder.h>
static char bootinfo_tmp[1536] __initdata;
static void *bootinfo_copy;
static size_t bootinfo_size;
static ssize_t bootinfo_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, bootinfo_copy,
bootinfo_size);
}
static const struct proc_ops bootinfo_proc_ops = {
.proc_read = bootinfo_read,
.proc_lseek = default_llseek,
};
void __init save_bootinfo(const struct bi_record *bi)
{
const void *start = bi;
size_t size = sizeof(bi->tag);
while (be16_to_cpu(bi->tag) != BI_LAST) {
uint16_t n = be16_to_cpu(bi->size);
size += n;
bi = (struct bi_record *)((unsigned long)bi + n);
}
if (size > sizeof(bootinfo_tmp)) {
pr_err("Cannot save %zu bytes of bootinfo\n", size);
return;
}
pr_info("Saving %zu bytes of bootinfo\n", size);
memcpy(bootinfo_tmp, start, size);
bootinfo_size = size;
}
static int __init init_bootinfo_procfs(void)
{
/*
* This cannot go into save_bootinfo() because kmalloc and proc don't
* work yet when it is called.
*/
struct proc_dir_entry *pde;
if (!bootinfo_size)
return -EINVAL;
bootinfo_copy = kmemdup(bootinfo_tmp, bootinfo_size, GFP_KERNEL);
if (!bootinfo_copy)
return -ENOMEM;
pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_proc_ops, NULL);
if (!pde) {
kfree(bootinfo_copy);
return -ENOMEM;
}
return 0;
}
arch_initcall(init_bootinfo_procfs);
| linux-master | arch/m68k/kernel/bootinfo_proc.c |
/*
* linux/arch/m68k/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* Linux/m68k support by Hamish Macdonald
*
* 68060 fixes by Jesper Skov
*
* 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
*
* mathemu support by Roman Zippel
* (Note: fpstate in the signal context is completely ignored for the emulator
* and the internal floating point format is put on stack)
*/
/*
* ++roman (07/09/96): implemented signal stacks (specially for tosemu on
* Atari :-) Current limitation: Only one sigstack can be active at one time.
* If a second signal with SA_ONSTACK set arrives while working on a sigstack,
* SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
* signal handlers!
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/highuid.h>
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/extable.h>
#include <linux/resume_user_mode.h>
#include <asm/setup.h>
#include <linux/uaccess.h>
#include <asm/traps.h>
#include <asm/ucontext.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_MMU
/*
* Handle the slight differences in classic 68k and ColdFire trap frames.
*/
#ifdef CONFIG_COLDFIRE
#define FORMAT 4
#define FMT4SIZE 0
#else
#define FORMAT 0
#define FMT4SIZE sizeof_field(struct frame, un.fmt4)
#endif
static const int frame_size_change[16] = {
[1] = -1, /* sizeof_field(struct frame, un.fmt1), */
[2] = sizeof_field(struct frame, un.fmt2),
[3] = sizeof_field(struct frame, un.fmt3),
[4] = FMT4SIZE,
[5] = -1, /* sizeof_field(struct frame, un.fmt5), */
[6] = -1, /* sizeof_field(struct frame, un.fmt6), */
[7] = sizeof_field(struct frame, un.fmt7),
[8] = -1, /* sizeof_field(struct frame, un.fmt8), */
[9] = sizeof_field(struct frame, un.fmt9),
[10] = sizeof_field(struct frame, un.fmta),
[11] = sizeof_field(struct frame, un.fmtb),
[12] = -1, /* sizeof_field(struct frame, un.fmtc), */
[13] = -1, /* sizeof_field(struct frame, un.fmtd), */
[14] = -1, /* sizeof_field(struct frame, un.fmte), */
[15] = -1, /* sizeof_field(struct frame, un.fmtf), */
};
static inline int frame_extra_sizes(int f)
{
return frame_size_change[f];
}
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
struct pt_regs *tregs;
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->pc);
if (!fixup)
return 0;
/* Create a new four word stack frame, discarding the old one. */
regs->stkadj = frame_extra_sizes(regs->format);
tregs = (struct pt_regs *)((long)regs + regs->stkadj);
tregs->vector = regs->vector;
tregs->format = FORMAT;
tregs->pc = fixup->fixup;
tregs->sr = regs->sr;
return 1;
}
static inline void push_cache (unsigned long vaddr)
{
/*
* Using the old cache_push_v() was really a big waste.
*
* What we are trying to do is to flush 8 bytes to ram.
* Flushing 2 cache lines of 16 bytes is much cheaper than
* flushing 1 or 2 pages, as previously done in
* cache_push_v().
* Jes
*/
if (CPU_IS_040) {
unsigned long temp;
__asm__ __volatile__ (".chip 68040\n\t"
"nop\n\t"
"ptestr (%1)\n\t"
"movec %%mmusr,%0\n\t"
".chip 68k"
: "=r" (temp)
: "a" (vaddr));
temp &= PAGE_MASK;
temp |= vaddr & ~PAGE_MASK;
__asm__ __volatile__ (".chip 68040\n\t"
"nop\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (temp));
}
else if (CPU_IS_060) {
unsigned long temp;
__asm__ __volatile__ (".chip 68060\n\t"
"plpar (%0)\n\t"
".chip 68k"
: "=a" (temp)
: "0" (vaddr));
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (temp));
} else if (!CPU_IS_COLDFIRE) {
/*
* 68030/68020 have no writeback cache;
* still need to clear icache.
* Note that vaddr is guaranteed to be long word aligned.
*/
unsigned long temp;
asm volatile ("movec %%cacr,%0" : "=r" (temp));
temp += 4;
asm volatile ("movec %0,%%caar\n\t"
"movec %1,%%cacr"
: : "r" (vaddr), "r" (temp));
asm volatile ("movec %0,%%caar\n\t"
"movec %1,%%cacr"
: : "r" (vaddr + 4), "r" (temp));
} else {
/* CPU_IS_COLDFIRE */
#if defined(CONFIG_CACHE_COPYBACK)
flush_cf_dcache(0, DCACHE_MAX_ADDR);
#endif
/* Invalidate instruction cache for the pushed bytes */
clear_cf_icache(vaddr, vaddr + 8);
}
}
static inline void adjustformat(struct pt_regs *regs)
{
}
static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
{
}
#else /* CONFIG_MMU */
void ret_from_user_signal(void);
void ret_from_user_rt_signal(void);
static inline int frame_extra_sizes(int f)
{
/* No frame size adjustments required on non-MMU CPUs */
return 0;
}
static inline void adjustformat(struct pt_regs *regs)
{
/*
* set format byte to make stack appear modulo 4, which it will
* be when doing the rte
*/
regs->format = 0x4;
}
static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
{
sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
}
static inline void push_cache(unsigned long vaddr)
{
}
#endif /* CONFIG_MMU */
/*
* Do a signal return; undo the signal stack.
*
* Keep the return code on the stack quadword aligned!
* That makes the cache flush below easier.
*/
struct sigframe
{
char __user *pretcode;
int sig;
int code;
struct sigcontext __user *psc;
char retcode[8];
unsigned long extramask[_NSIG_WORDS-1];
struct sigcontext sc;
};
struct rt_sigframe
{
char __user *pretcode;
int sig;
struct siginfo __user *pinfo;
void __user *puc;
char retcode[8];
struct siginfo info;
struct ucontext uc;
};
#define FPCONTEXT_SIZE 216
#define uc_fpstate uc_filler[0]
#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
#ifdef CONFIG_FPU
static unsigned char fpu_version; /* version number of fpu, set by setup_frame */
static inline int restore_fpu_state(struct sigcontext *sc)
{
int err = 1;
if (FPU_IS_EMU) {
/* restore registers */
memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
memcpy(current->thread.fp, sc->sc_fpregs, 24);
return 0;
}
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
/* Verify the frame format. */
if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
(sc->sc_fpstate[0] != fpu_version))
goto out;
if (CPU_IS_020_OR_030) {
if (m68k_fputype & FPU_68881 &&
!(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
goto out;
if (m68k_fputype & FPU_68882 &&
!(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
goto out;
} else if (CPU_IS_040) {
if (!(sc->sc_fpstate[1] == 0x00 ||
sc->sc_fpstate[1] == 0x28 ||
sc->sc_fpstate[1] == 0x60))
goto out;
} else if (CPU_IS_060) {
if (!(sc->sc_fpstate[3] == 0x00 ||
sc->sc_fpstate[3] == 0x60 ||
sc->sc_fpstate[3] == 0xe0))
goto out;
} else if (CPU_IS_COLDFIRE) {
if (!(sc->sc_fpstate[0] == 0x00 ||
sc->sc_fpstate[0] == 0x05 ||
sc->sc_fpstate[0] == 0xe5))
goto out;
} else
goto out;
if (CPU_IS_COLDFIRE) {
__asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
"fmovel %1,%%fpcr\n\t"
"fmovel %2,%%fpsr\n\t"
"fmovel %3,%%fpiar"
: /* no outputs */
: "m" (sc->sc_fpregs[0]),
"m" (sc->sc_fpcntl[0]),
"m" (sc->sc_fpcntl[1]),
"m" (sc->sc_fpcntl[2]));
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%%fp0-%%fp1\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
".chip 68k"
: /* no outputs */
: "m" (*sc->sc_fpregs),
"m" (*sc->sc_fpcntl));
}
}
if (CPU_IS_COLDFIRE) {
__asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t"
".chip 68k"
: : "m" (*sc->sc_fpstate));
}
err = 0;
out:
return err;
}
static inline int rt_restore_fpu_state(struct ucontext __user *uc)
{
unsigned char fpstate[FPCONTEXT_SIZE];
int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
fpregset_t fpregs;
int err = 1;
if (FPU_IS_EMU) {
/* restore fpu control register */
if (__copy_from_user(current->thread.fpcntl,
uc->uc_mcontext.fpregs.f_fpcntl, 12))
goto out;
/* restore all other fpu register */
if (__copy_from_user(current->thread.fp,
uc->uc_mcontext.fpregs.f_fpregs, 96))
goto out;
return 0;
}
if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
goto out;
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
context_size = fpstate[1];
/* Verify the frame format. */
if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
(fpstate[0] != fpu_version))
goto out;
if (CPU_IS_020_OR_030) {
if (m68k_fputype & FPU_68881 &&
!(context_size == 0x18 || context_size == 0xb4))
goto out;
if (m68k_fputype & FPU_68882 &&
!(context_size == 0x38 || context_size == 0xd4))
goto out;
} else if (CPU_IS_040) {
if (!(context_size == 0x00 ||
context_size == 0x28 ||
context_size == 0x60))
goto out;
} else if (CPU_IS_060) {
if (!(fpstate[3] == 0x00 ||
fpstate[3] == 0x60 ||
fpstate[3] == 0xe0))
goto out;
} else if (CPU_IS_COLDFIRE) {
if (!(fpstate[3] == 0x00 ||
fpstate[3] == 0x05 ||
fpstate[3] == 0xe5))
goto out;
} else
goto out;
if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
sizeof(fpregs)))
goto out;
if (CPU_IS_COLDFIRE) {
__asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
"fmovel %1,%%fpcr\n\t"
"fmovel %2,%%fpsr\n\t"
"fmovel %3,%%fpiar"
: /* no outputs */
: "m" (fpregs.f_fpregs[0]),
"m" (fpregs.f_fpcntl[0]),
"m" (fpregs.f_fpcntl[1]),
"m" (fpregs.f_fpcntl[2]));
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%%fp0-%%fp7\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
".chip 68k"
: /* no outputs */
: "m" (*fpregs.f_fpregs),
"m" (*fpregs.f_fpcntl));
}
}
if (context_size &&
__copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
context_size))
goto out;
if (CPU_IS_COLDFIRE) {
__asm__ volatile ("frestore %0" : : "m" (*fpstate));
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t"
".chip 68k"
: : "m" (*fpstate));
}
err = 0;
out:
return err;
}
/*
* Set up a signal frame.
*/
static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
{
if (FPU_IS_EMU) {
/* save registers */
memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
memcpy(sc->sc_fpregs, current->thread.fp, 24);
return;
}
if (CPU_IS_COLDFIRE) {
__asm__ volatile ("fsave %0"
: : "m" (*sc->sc_fpstate) : "memory");
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fsave %0\n\t"
".chip 68k"
: : "m" (*sc->sc_fpstate) : "memory");
}
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
fpu_version = sc->sc_fpstate[0];
if (CPU_IS_020_OR_030 && !regs->stkadj &&
regs->vector >= (VEC_FPBRUC * 4) &&
regs->vector <= (VEC_FPNAN * 4)) {
/* Clear pending exception in 68882 idle frame */
if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
sc->sc_fpstate[0x38] |= 1 << 3;
}
if (CPU_IS_COLDFIRE) {
__asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
"fmovel %%fpcr,%1\n\t"
"fmovel %%fpsr,%2\n\t"
"fmovel %%fpiar,%3"
: "=m" (sc->sc_fpregs[0]),
"=m" (sc->sc_fpcntl[0]),
"=m" (sc->sc_fpcntl[1]),
"=m" (sc->sc_fpcntl[2])
: /* no inputs */
: "memory");
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %%fp0-%%fp1,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
".chip 68k"
: "=m" (*sc->sc_fpregs),
"=m" (*sc->sc_fpcntl)
: /* no inputs */
: "memory");
}
}
}
static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
{
unsigned char fpstate[FPCONTEXT_SIZE];
int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
int err = 0;
if (FPU_IS_EMU) {
/* save fpu control register */
err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
current->thread.fpcntl, 12);
/* save all other fpu register */
err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
current->thread.fp, 96);
return err;
}
if (CPU_IS_COLDFIRE) {
__asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fsave %0\n\t"
".chip 68k"
: : "m" (*fpstate) : "memory");
}
err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
fpregset_t fpregs;
if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
context_size = fpstate[1];
fpu_version = fpstate[0];
if (CPU_IS_020_OR_030 && !regs->stkadj &&
regs->vector >= (VEC_FPBRUC * 4) &&
regs->vector <= (VEC_FPNAN * 4)) {
/* Clear pending exception in 68882 idle frame */
if (*(unsigned short *) fpstate == 0x1f38)
fpstate[0x38] |= 1 << 3;
}
if (CPU_IS_COLDFIRE) {
__asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
"fmovel %%fpcr,%1\n\t"
"fmovel %%fpsr,%2\n\t"
"fmovel %%fpiar,%3"
: "=m" (fpregs.f_fpregs[0]),
"=m" (fpregs.f_fpcntl[0]),
"=m" (fpregs.f_fpcntl[1]),
"=m" (fpregs.f_fpcntl[2])
: /* no inputs */
: "memory");
} else {
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %%fp0-%%fp7,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
".chip 68k"
: "=m" (*fpregs.f_fpregs),
"=m" (*fpregs.f_fpcntl)
: /* no inputs */
: "memory");
}
err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
sizeof(fpregs));
}
if (context_size)
err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
context_size);
return err;
}
#else /* CONFIG_FPU */
/*
* For the case with no FPU configured these all do nothing.
*/
static inline int restore_fpu_state(struct sigcontext *sc)
{
return 0;
}
static inline int rt_restore_fpu_state(struct ucontext __user *uc)
{
return 0;
}
static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
{
}
static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_FPU */
static inline void siginfo_build_tests(void)
{
/*
* This needs to be tested on m68k as it has a lesser
* alignment requirement than x86 and that can cause surprises.
*/
/* This is part of the ABI and can never change in size: */
BUILD_BUG_ON(sizeof(siginfo_t) != 128);
/* Ensure the known fields never change in location */
BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0);
BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4);
BUILD_BUG_ON(offsetof(siginfo_t, si_code) != 8);
/* _kill */
BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x0c);
BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x10);
/* _timer */
BUILD_BUG_ON(offsetof(siginfo_t, si_tid) != 0x0c);
BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x14);
/* _rt */
BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x0c);
BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x14);
/* _sigchld */
BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x0c);
BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x14);
BUILD_BUG_ON(offsetof(siginfo_t, si_utime) != 0x18);
BUILD_BUG_ON(offsetof(siginfo_t, si_stime) != 0x1c);
/* _sigfault */
BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x0c);
/* _sigfault._mcerr */
BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x10);
/* _sigfault._addr_bnd */
BUILD_BUG_ON(offsetof(siginfo_t, si_lower) != 0x12);
BUILD_BUG_ON(offsetof(siginfo_t, si_upper) != 0x16);
/* _sigfault._addr_pkey */
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12);
/* _sigfault._perf */
BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x14);
BUILD_BUG_ON(offsetof(siginfo_t, si_perf_flags) != 0x18);
/* _sigpoll */
BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x0c);
BUILD_BUG_ON(offsetof(siginfo_t, si_fd) != 0x10);
/* _sigsys */
BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x0c);
BUILD_BUG_ON(offsetof(siginfo_t, si_syscall) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_arch) != 0x14);
/* any new si_fields should be added here */
}
static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
void __user *fp)
{
int extra = frame_extra_sizes(formatvec >> 12);
char buf[sizeof_field(struct frame, un)];
if (extra < 0) {
/*
* user process trying to return with weird frame format
*/
pr_debug("user process returning with weird frame format\n");
return -1;
}
if (extra && copy_from_user(buf, fp, extra))
return -1;
regs->format = formatvec >> 12;
regs->vector = formatvec & 0xfff;
if (extra) {
void *p = (struct switch_stack *)regs - 1;
struct frame *new = (void *)regs - extra;
int size = sizeof(struct pt_regs)+sizeof(struct switch_stack);
memmove(p - extra, p, size);
memcpy(p - extra + size, buf, extra);
current->thread.esp0 = (unsigned long)&new->ptregs;
#ifdef CONFIG_M68040
/* on 68040 complete pending writebacks if any */
if (new->ptregs.format == 7) // bus error frame
berr_040cleanup(new);
#endif
}
return extra;
}
static inline int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
{
int formatvec;
struct sigcontext context;
siginfo_build_tests();
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
/* get previous context */
if (copy_from_user(&context, usc, sizeof(context)))
return -1;
/* restore passed registers */
regs->d0 = context.sc_d0;
regs->d1 = context.sc_d1;
regs->a0 = context.sc_a0;
regs->a1 = context.sc_a1;
regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
regs->pc = context.sc_pc;
regs->orig_d0 = -1; /* disable syscall checks */
wrusp(context.sc_usp);
formatvec = context.sc_formatvec;
if (restore_fpu_state(&context))
return -1;
return mangle_kernel_stack(regs, formatvec, fp);
}
static inline int
rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
struct ucontext __user *uc)
{
int temp;
greg_t __user *gregs = uc->uc_mcontext.gregs;
unsigned long usp;
int err;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
err = __get_user(temp, &uc->uc_mcontext.version);
if (temp != MCONTEXT_VERSION)
return -1;
/* restore passed registers */
err |= __get_user(regs->d0, &gregs[0]);
err |= __get_user(regs->d1, &gregs[1]);
err |= __get_user(regs->d2, &gregs[2]);
err |= __get_user(regs->d3, &gregs[3]);
err |= __get_user(regs->d4, &gregs[4]);
err |= __get_user(regs->d5, &gregs[5]);
err |= __get_user(sw->d6, &gregs[6]);
err |= __get_user(sw->d7, &gregs[7]);
err |= __get_user(regs->a0, &gregs[8]);
err |= __get_user(regs->a1, &gregs[9]);
err |= __get_user(regs->a2, &gregs[10]);
err |= __get_user(sw->a3, &gregs[11]);
err |= __get_user(sw->a4, &gregs[12]);
err |= __get_user(sw->a5, &gregs[13]);
err |= __get_user(sw->a6, &gregs[14]);
err |= __get_user(usp, &gregs[15]);
wrusp(usp);
err |= __get_user(regs->pc, &gregs[16]);
err |= __get_user(temp, &gregs[17]);
regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
regs->orig_d0 = -1; /* disable syscall checks */
err |= __get_user(temp, &uc->uc_formatvec);
err |= rt_restore_fpu_state(uc);
err |= restore_altstack(&uc->uc_stack);
if (err)
return -1;
return mangle_kernel_stack(regs, temp, &uc->uc_extra);
}
asmlinkage void *do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
{
unsigned long usp = rdusp();
struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
sigset_t set;
int size;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
(_NSIG_WORDS > 1 &&
__copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
set_current_blocked(&set);
size = restore_sigcontext(regs, &frame->sc, frame + 1);
if (size < 0)
goto badframe;
return (void *)sw - size;
badframe:
force_sig(SIGSEGV);
return sw;
}
asmlinkage void *do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
{
unsigned long usp = rdusp();
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
sigset_t set;
int size;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
size = rt_restore_ucontext(regs, sw, &frame->uc);
if (size < 0)
goto badframe;
return (void *)sw - size;
badframe:
force_sig(SIGSEGV);
return sw;
}
static inline struct pt_regs *rte_regs(struct pt_regs *regs)
{
return (void *)regs + regs->stkadj;
}
static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
unsigned long mask)
{
struct pt_regs *tregs = rte_regs(regs);
sc->sc_mask = mask;
sc->sc_usp = rdusp();
sc->sc_d0 = regs->d0;
sc->sc_d1 = regs->d1;
sc->sc_a0 = regs->a0;
sc->sc_a1 = regs->a1;
sc->sc_sr = tregs->sr;
sc->sc_pc = tregs->pc;
sc->sc_formatvec = tregs->format << 12 | tregs->vector;
save_a5_state(sc, regs);
save_fpu_state(sc, regs);
}
static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
{
struct switch_stack *sw = (struct switch_stack *)regs - 1;
struct pt_regs *tregs = rte_regs(regs);
greg_t __user *gregs = uc->uc_mcontext.gregs;
int err = 0;
err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
err |= __put_user(regs->d0, &gregs[0]);
err |= __put_user(regs->d1, &gregs[1]);
err |= __put_user(regs->d2, &gregs[2]);
err |= __put_user(regs->d3, &gregs[3]);
err |= __put_user(regs->d4, &gregs[4]);
err |= __put_user(regs->d5, &gregs[5]);
err |= __put_user(sw->d6, &gregs[6]);
err |= __put_user(sw->d7, &gregs[7]);
err |= __put_user(regs->a0, &gregs[8]);
err |= __put_user(regs->a1, &gregs[9]);
err |= __put_user(regs->a2, &gregs[10]);
err |= __put_user(sw->a3, &gregs[11]);
err |= __put_user(sw->a4, &gregs[12]);
err |= __put_user(sw->a5, &gregs[13]);
err |= __put_user(sw->a6, &gregs[14]);
err |= __put_user(rdusp(), &gregs[15]);
err |= __put_user(tregs->pc, &gregs[16]);
err |= __put_user(tregs->sr, &gregs[17]);
err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);
err |= rt_save_fpu_state(uc, regs);
return err;
}
static inline void __user *
get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size)
{
unsigned long usp = sigsp(rdusp(), ksig);
unsigned long gap = 0;
if (CPU_IS_020_OR_030 && tregs->format == 0xb) {
/* USP is unreliable so use worst-case value */
gap = 256;
}
return (void __user *)((usp - gap - frame_size) & -8UL);
}
static int setup_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct sigframe __user *frame;
struct pt_regs *tregs = rte_regs(regs);
int fsize = frame_extra_sizes(tregs->format);
struct sigcontext context;
int err = 0, sig = ksig->sig;
if (fsize < 0) {
pr_debug("setup_frame: Unknown frame format %#x\n",
tregs->format);
return -EFAULT;
}
frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize);
if (fsize)
err |= copy_to_user (frame + 1, regs + 1, fsize);
err |= __put_user(sig, &frame->sig);
err |= __put_user(tregs->vector, &frame->code);
err |= __put_user(&frame->sc, &frame->psc);
if (_NSIG_WORDS > 1)
err |= copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask));
setup_sigcontext(&context, regs, set->sig[0]);
err |= copy_to_user (&frame->sc, &context, sizeof(context));
/* Set up to return from userspace. */
#ifdef CONFIG_MMU
err |= __put_user(frame->retcode, &frame->pretcode);
/* moveq #,d0; trap #0 */
err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
(long __user *)(frame->retcode));
#else
err |= __put_user((long) ret_from_user_signal,
(long __user *) &frame->pretcode);
#endif
if (err)
return -EFAULT;
push_cache ((unsigned long) &frame->retcode);
/*
* This is subtle; if we build more than one sigframe, all but the
* first one will see frame format 0 and have fsize == 0, so we won't
* screw stkadj.
*/
if (fsize) {
regs->stkadj = fsize;
tregs = rte_regs(regs);
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
tregs->vector = 0;
tregs->format = 0;
tregs->sr = regs->sr;
}
/*
* Set up registers for signal handler. All the state we are about
* to destroy is successfully copied to sigframe.
*/
wrusp ((unsigned long) frame);
tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
adjustformat(regs);
return 0;
}
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
struct pt_regs *tregs = rte_regs(regs);
int fsize = frame_extra_sizes(tregs->format);
int err = 0, sig = ksig->sig;
if (fsize < 0) {
pr_debug("setup_frame: Unknown frame format %#x\n",
regs->format);
return -EFAULT;
}
frame = get_sigframe(ksig, tregs, sizeof(*frame));
if (fsize)
err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
err |= __put_user(sig, &frame->sig);
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(NULL, &frame->uc.uc_link);
err |= __save_altstack(&frame->uc.uc_stack, rdusp());
err |= rt_setup_ucontext(&frame->uc, regs);
err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
/* Set up to return from userspace. */
#ifdef CONFIG_MMU
err |= __put_user(frame->retcode, &frame->pretcode);
#ifdef __mcoldfire__
/* movel #__NR_rt_sigreturn,d0; trap #0 */
err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
(long __user *)(frame->retcode + 4));
#else
/* moveq #,d0; notb d0; trap #0 */
err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
(long __user *)(frame->retcode + 0));
err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
#endif
#else
err |= __put_user((long) ret_from_user_rt_signal,
(long __user *) &frame->pretcode);
#endif /* CONFIG_MMU */
if (err)
return -EFAULT;
push_cache ((unsigned long) &frame->retcode);
/*
* This is subtle; if we build more than one sigframe, all but the
* first one will see frame format 0 and have fsize == 0, so we won't
* screw stkadj.
*/
if (fsize) {
regs->stkadj = fsize;
tregs = rte_regs(regs);
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
tregs->vector = 0;
tregs->format = 0;
tregs->sr = regs->sr;
}
/*
* Set up registers for signal handler. All the state we are about
* to destroy is successfully copied to sigframe.
*/
wrusp ((unsigned long) frame);
tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
adjustformat(regs);
return 0;
}
static inline void
handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
{
switch (regs->d0) {
case -ERESTARTNOHAND:
if (!has_handler)
goto do_restart;
regs->d0 = -EINTR;
break;
case -ERESTART_RESTARTBLOCK:
if (!has_handler) {
regs->d0 = __NR_restart_syscall;
regs->pc -= 2;
break;
}
regs->d0 = -EINTR;
break;
case -ERESTARTSYS:
if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
regs->d0 = -EINTR;
break;
}
fallthrough;
case -ERESTARTNOINTR:
do_restart:
regs->d0 = regs->orig_d0;
regs->pc -= 2;
break;
}
}
/*
* OK, we're invoking a handler
*/
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int err;
/* are we from a system call? */
if (regs->orig_d0 >= 0)
/* If so, check system call restarting.. */
handle_restart(regs, &ksig->ka, 1);
/* set up the stack frame */
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
err = setup_rt_frame(ksig, oldset, regs);
else
err = setup_frame(ksig, oldset, regs);
signal_setup_done(err, ksig, 0);
if (test_thread_flag(TIF_DELAYED_TRACE)) {
regs->sr &= ~0x8000;
send_sig(SIGTRAP, current, 1);
}
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
static void do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
current->thread.esp0 = (unsigned long) regs;
if (get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
return;
}
/* Did we come from a system call? */
if (regs->orig_d0 >= 0)
/* Restart the system call - no handlers present */
handle_restart(regs, NULL, 0);
/* If there's no signal to deliver, we just restore the saved mask. */
restore_saved_sigmask();
}
void do_notify_resume(struct pt_regs *regs)
{
if (test_thread_flag(TIF_NOTIFY_SIGNAL) ||
test_thread_flag(TIF_SIGPENDING))
do_signal(regs);
if (test_thread_flag(TIF_NOTIFY_RESUME))
resume_user_mode_work(regs);
}
| linux-master | arch/m68k/kernel/signal.c |
/*
* vectors.c
*
* Copyright (C) 1993, 1994 by Hamish Macdonald
*
* 68040 fixes by Michael Rausch
* 68040 fixes by Martin Apel
* 68040 fixes and writeback by Richard Zidlicky
* 68060 fixes by Roman Hodek
* 68060 fixes by Jesper Skov
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* Sets up all exception vectors
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
#include <asm/setup.h>
#include <asm/fpu.h>
#include <asm/traps.h>
/* assembler routines */
asmlinkage void system_call(void);
asmlinkage void buserr(void);
asmlinkage void trap(void);
asmlinkage void nmihandler(void);
#ifdef CONFIG_M68KFPU_EMU
asmlinkage void fpu_emu(void);
#endif
e_vector vectors[256];
/* nmi handler for the Amiga */
asm(".text\n"
__ALIGN_STR "\n"
"nmihandler: rte");
/*
* this must be called very early as the kernel might
* use some instruction that are emulated on the 060
* and so we're prepared for early probe attempts (e.g. nf_init).
*/
void __init base_trap_init(void)
{
if (MACH_IS_SUN3X) {
extern e_vector *sun3x_prom_vbr;
__asm__ volatile ("movec %%vbr, %0" : "=r" (sun3x_prom_vbr));
}
/* setup the exception vector table */
__asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)vectors));
if (CPU_IS_060) {
/* set up ISP entry points */
asmlinkage void unimp_vec(void) asm ("_060_isp_unimp");
vectors[VEC_UNIMPII] = unimp_vec;
}
vectors[VEC_BUSERR] = buserr;
vectors[VEC_ILLEGAL] = trap;
vectors[VEC_SYS] = system_call;
}
void __init trap_init (void)
{
int i;
for (i = VEC_SPUR; i <= VEC_INT7; i++)
vectors[i] = bad_inthandler;
for (i = 0; i < VEC_USER; i++)
if (!vectors[i])
vectors[i] = trap;
for (i = VEC_USER; i < 256; i++)
vectors[i] = bad_inthandler;
#ifdef CONFIG_M68KFPU_EMU
if (FPU_IS_EMU)
vectors[VEC_LINE11] = fpu_emu;
#endif
if (CPU_IS_040 && !FPU_IS_EMU) {
/* set up FPSP entry points */
asmlinkage void dz_vec(void) asm ("dz");
asmlinkage void inex_vec(void) asm ("inex");
asmlinkage void ovfl_vec(void) asm ("ovfl");
asmlinkage void unfl_vec(void) asm ("unfl");
asmlinkage void snan_vec(void) asm ("snan");
asmlinkage void operr_vec(void) asm ("operr");
asmlinkage void bsun_vec(void) asm ("bsun");
asmlinkage void fline_vec(void) asm ("fline");
asmlinkage void unsupp_vec(void) asm ("unsupp");
vectors[VEC_FPDIVZ] = dz_vec;
vectors[VEC_FPIR] = inex_vec;
vectors[VEC_FPOVER] = ovfl_vec;
vectors[VEC_FPUNDER] = unfl_vec;
vectors[VEC_FPNAN] = snan_vec;
vectors[VEC_FPOE] = operr_vec;
vectors[VEC_FPBRUC] = bsun_vec;
vectors[VEC_LINE11] = fline_vec;
vectors[VEC_FPUNSUP] = unsupp_vec;
}
if (CPU_IS_060 && !FPU_IS_EMU) {
/* set up IFPSP entry points */
asmlinkage void snan_vec6(void) asm ("_060_fpsp_snan");
asmlinkage void operr_vec6(void) asm ("_060_fpsp_operr");
asmlinkage void ovfl_vec6(void) asm ("_060_fpsp_ovfl");
asmlinkage void unfl_vec6(void) asm ("_060_fpsp_unfl");
asmlinkage void dz_vec6(void) asm ("_060_fpsp_dz");
asmlinkage void inex_vec6(void) asm ("_060_fpsp_inex");
asmlinkage void fline_vec6(void) asm ("_060_fpsp_fline");
asmlinkage void unsupp_vec6(void) asm ("_060_fpsp_unsupp");
asmlinkage void effadd_vec6(void) asm ("_060_fpsp_effadd");
vectors[VEC_FPNAN] = snan_vec6;
vectors[VEC_FPOE] = operr_vec6;
vectors[VEC_FPOVER] = ovfl_vec6;
vectors[VEC_FPUNDER] = unfl_vec6;
vectors[VEC_FPDIVZ] = dz_vec6;
vectors[VEC_FPIR] = inex_vec6;
vectors[VEC_LINE11] = fline_vec6;
vectors[VEC_FPUNSUP] = unsupp_vec6;
vectors[VEC_UNIMPEA] = effadd_vec6;
}
/* if running on an amiga, make the NMI interrupt do nothing */
if (MACH_IS_AMIGA) {
vectors[VEC_INT7] = nmihandler;
}
}
| linux-master | arch/m68k/kernel/vectors.c |
/*
* uboot.c -- uboot arguments support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
#include <linux/rtc.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/sections.h>
/*
* parse_uboot_commandline
*
* Copies u-boot commandline arguments and store them in the proper linux
* variables.
*
* Assumes:
* _init_sp global contains the address in the stack pointer when the
* kernel starts (see head.S::_start)
*
* U-Boot calling convention:
* (*kernel) (kbd, initrd_start, initrd_end, cmd_start, cmd_end);
*
* _init_sp can be parsed as such
*
* _init_sp+00 = u-boot cmd after jsr into kernel (skip)
* _init_sp+04 = &kernel board_info (residual data)
* _init_sp+08 = &initrd_start
* _init_sp+12 = &initrd_end
* _init_sp+16 = &cmd_start
* _init_sp+20 = &cmd_end
*
* This also assumes that the memory locations pointed to are still
* unmodified. U-boot places them near the end of external SDRAM.
*
* Argument(s):
* commandp = the linux commandline arg container to fill.
* size = the sizeof commandp.
*
* Returns:
*/
static void __init parse_uboot_commandline(char *commandp, int size)
{
extern unsigned long _init_sp;
unsigned long *sp;
unsigned long uboot_kbd;
unsigned long uboot_initrd_start, uboot_initrd_end;
unsigned long uboot_cmd_start, uboot_cmd_end;
sp = (unsigned long *)_init_sp;
uboot_kbd = sp[1];
uboot_initrd_start = sp[2];
uboot_initrd_end = sp[3];
uboot_cmd_start = sp[4];
uboot_cmd_end = sp[5];
if (uboot_cmd_start && uboot_cmd_end)
strncpy(commandp, (const char *)uboot_cmd_start, size);
#if defined(CONFIG_BLK_DEV_INITRD)
if (uboot_initrd_start && uboot_initrd_end &&
(uboot_initrd_end > uboot_initrd_start)) {
initrd_start = uboot_initrd_start;
initrd_end = uboot_initrd_end;
ROOT_DEV = Root_RAM0;
pr_info("initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
}
#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
}
__init void process_uboot_commandline(char *commandp, int size)
{
int len, n;
n = strnlen(commandp, size);
commandp += n;
len = size - n;
if (len) {
/* Add the whitespace separator */
*commandp++ = ' ';
len--;
}
parse_uboot_commandline(commandp, len);
commandp[len - 1] = 0;
}
| linux-master | arch/m68k/kernel/uboot.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2000 Philipp Rumpf <[email protected]>
* Copyright (C) 2001-2020 Helge Deller <[email protected]>
* Copyright (C) 2001-2002 Thomas Bogendoerfer <[email protected]>
*/
#include <linux/fb.h>
#include <linux/module.h>
#include <video/sticore.h>
int fb_is_primary_device(struct fb_info *info)
{
struct sti_struct *sti;
sti = sti_get_rom(0);
/* if no built-in graphics card found, allow any fb driver as default */
if (!sti)
return true;
/* return true if it's the default built-in framebuffer driver */
return (sti->info == info);
}
EXPORT_SYMBOL(fb_is_primary_device);
| linux-master | arch/parisc/video/fbdev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/dfmpy.c $Revision: 1.1 $
*
* Purpose:
* Double Precision Floating-point Multiply
*
* External Interfaces:
* dbl_fmpy(srcptr1,srcptr2,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "dbl_float.h"
/*
* Double Precision Floating-point Multiply
*/
int
dbl_fmpy(
dbl_floating_point *srcptr1,
dbl_floating_point *srcptr2,
dbl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2;
register unsigned int opnd3p1, opnd3p2, resultp1, resultp2;
register int dest_exponent, count;
register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
boolean is_tiny;
Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2);
Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2);
/*
* set sign bit of result
*/
if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1))
Dbl_setnegativezerop1(resultp1);
else Dbl_setzerop1(resultp1);
/*
* check first operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd1p1)) {
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
if (Dbl_isnotnan(opnd2p1,opnd2p2)) {
if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
/*
* invalid since operands are infinity
* and zero
*/
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd1p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd1p1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd2p1)) {
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
/* invalid since operands are zero & infinity */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(opnd2p1,opnd2p2);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate exponent
*/
dest_exponent = Dbl_exponent(opnd1p1) + Dbl_exponent(opnd2p1) -DBL_BIAS;
/*
* Generate mantissa
*/
if (Dbl_isnotzero_exponent(opnd1p1)) {
/* set hidden bit */
Dbl_clear_signexponent_set_hidden(opnd1p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
Dbl_setzero_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized, adjust exponent */
Dbl_clear_signexponent(opnd1p1);
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_normalize(opnd1p1,opnd1p2,dest_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Dbl_isnotzero_exponent(opnd2p1)) {
Dbl_clear_signexponent_set_hidden(opnd2p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
Dbl_setzero_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized; want to normalize */
Dbl_clear_signexponent(opnd2p1);
Dbl_leftshiftby1(opnd2p1,opnd2p2);
Dbl_normalize(opnd2p1,opnd2p2,dest_exponent);
}
/* Multiply two source mantissas together */
/* make room for guard bits */
Dbl_leftshiftby7(opnd2p1,opnd2p2);
Dbl_setzero(opnd3p1,opnd3p2);
/*
* Four bits at a time are inspected in each loop, and a
* simple shift and add multiply algorithm is used.
*/
for (count=1;count<=DBL_P;count+=4) {
stickybit |= Dlow4p2(opnd3p2);
Dbl_rightshiftby4(opnd3p1,opnd3p2);
if (Dbit28p2(opnd1p2)) {
/* Twoword_add should be an ADDC followed by an ADD. */
Twoword_add(opnd3p1, opnd3p2, opnd2p1<<3 | opnd2p2>>29,
opnd2p2<<3);
}
if (Dbit29p2(opnd1p2)) {
Twoword_add(opnd3p1, opnd3p2, opnd2p1<<2 | opnd2p2>>30,
opnd2p2<<2);
}
if (Dbit30p2(opnd1p2)) {
Twoword_add(opnd3p1, opnd3p2, opnd2p1<<1 | opnd2p2>>31,
opnd2p2<<1);
}
if (Dbit31p2(opnd1p2)) {
Twoword_add(opnd3p1, opnd3p2, opnd2p1, opnd2p2);
}
Dbl_rightshiftby4(opnd1p1,opnd1p2);
}
if (Dbit3p1(opnd3p1)==0) {
Dbl_leftshiftby1(opnd3p1,opnd3p2);
}
else {
/* result mantissa >= 2. */
dest_exponent++;
}
/* check for denormalized result */
while (Dbit3p1(opnd3p1)==0) {
Dbl_leftshiftby1(opnd3p1,opnd3p2);
dest_exponent--;
}
/*
* check for guard, sticky and inexact bits
*/
stickybit |= Dallp2(opnd3p2) << 25;
guardbit = (Dallp2(opnd3p2) << 24) >> 31;
inexact = guardbit | stickybit;
/* align result mantissa */
Dbl_rightshiftby8(opnd3p1,opnd3p2);
/*
* round result
*/
if (inexact && (dest_exponent>0 || Is_underflowtrap_enabled())) {
Dbl_clear_signexponent(opnd3p1);
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1))
Dbl_increment(opnd3p1,opnd3p2);
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1))
Dbl_increment(opnd3p1,opnd3p2);
break;
case ROUNDNEAREST:
if (guardbit) {
if (stickybit || Dbl_isone_lowmantissap2(opnd3p2))
Dbl_increment(opnd3p1,opnd3p2);
}
}
if (Dbl_isone_hidden(opnd3p1)) dest_exponent++;
}
Dbl_set_mantissa(resultp1,resultp2,opnd3p1,opnd3p2);
/*
* Test for overflow
*/
if (dest_exponent >= DBL_INFINITY_EXPONENT) {
/* trap if OVERFLOWTRAP enabled */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,dest_exponent,ovfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OVERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return (OVERFLOWEXCEPTION);
}
inexact = TRUE;
Set_overflowflag();
/* set result to infinity or largest number */
Dbl_setoverflow(resultp1,resultp2);
}
/*
* Test for underflow
*/
else if (dest_exponent <= 0) {
/* trap if UNDERFLOWTRAP enabled */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return (UNDERFLOWEXCEPTION);
}
/* Determine if should set underflow flag */
is_tiny = TRUE;
if (dest_exponent == 0 && inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
if (Dbl_isone_hiddenoverflow(opnd3p1))
is_tiny = FALSE;
Dbl_decrement(opnd3p1,opnd3p2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
if (Dbl_isone_hiddenoverflow(opnd3p1))
is_tiny = FALSE;
Dbl_decrement(opnd3p1,opnd3p2);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Dbl_isone_lowmantissap2(opnd3p2))) {
Dbl_increment(opnd3p1,opnd3p2);
if (Dbl_isone_hiddenoverflow(opnd3p1))
is_tiny = FALSE;
Dbl_decrement(opnd3p1,opnd3p2);
}
break;
}
}
/*
* denormalize result or set to signed zero
*/
stickybit = inexact;
Dbl_denormalize(opnd3p1,opnd3p2,dest_exponent,guardbit,
stickybit,inexact);
/* return zero or smallest number */
if (inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Dbl_isone_lowmantissap2(opnd3p2))) {
Dbl_increment(opnd3p1,opnd3p2);
}
break;
}
if (is_tiny) Set_underflowflag();
}
Dbl_set_exponentmantissa(resultp1,resultp2,opnd3p1,opnd3p2);
}
else Dbl_set_exponent(resultp1,dest_exponent);
/* check for inexact */
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/dfmpy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/fp/fpudispatch.c $Revision: 1.1 $
*
* Purpose:
* <<please update with a synopsis of the functionality provided by this file>>
*
* External Interfaces:
* <<the following list was autogenerated, please review>>
* emfpudispatch(ir, dummy1, dummy2, fpregs)
* fpudispatch(ir, excp_code, holder, fpregs)
*
* Internal Interfaces:
* <<the following list was autogenerated, please review>>
* static u_int decode_06(u_int, u_int *)
* static u_int decode_0c(u_int, u_int, u_int, u_int *)
* static u_int decode_0e(u_int, u_int, u_int, u_int *)
* static u_int decode_26(u_int, u_int *)
* static u_int decode_2e(u_int, u_int *)
* static void update_status_cbit(u_int *, u_int, u_int, u_int)
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#define FPUDEBUG 0
#include "float.h"
#include <linux/bug.h>
#include <linux/kernel.h>
#include <asm/processor.h>
/* #include <sys/debug.h> */
/* #include <machine/sys/mdep_private.h> */
#define COPR_INST 0x30000000
/*
* definition of extru macro. If pos and len are constants, the compiler
* will generate an extru instruction when optimized
*/
#define extru(r,pos,len) (((r) >> (31-(pos))) & (( 1 << (len)) - 1))
/* definitions of bit field locations in the instruction */
#define fpmajorpos 5
#define fpr1pos 10
#define fpr2pos 15
#define fptpos 31
#define fpsubpos 18
#define fpclass1subpos 16
#define fpclasspos 22
#define fpfmtpos 20
#define fpdfpos 18
#define fpnulpos 26
/*
* the following are the extra bits for the 0E major op
*/
#define fpxr1pos 24
#define fpxr2pos 19
#define fpxtpos 25
#define fpxpos 23
#define fp0efmtpos 20
/*
* the following are for the multi-ops
*/
#define fprm1pos 10
#define fprm2pos 15
#define fptmpos 31
#define fprapos 25
#define fptapos 20
#define fpmultifmt 26
/*
* the following are for the fused FP instructions
*/
/* fprm1pos 10 */
/* fprm2pos 15 */
#define fpraupos 18
#define fpxrm2pos 19
/* fpfmtpos 20 */
#define fpralpos 23
#define fpxrm1pos 24
/* fpxtpos 25 */
#define fpfusedsubop 26
/* fptpos 31 */
/*
* offset to constant zero in the FP emulation registers
*/
#define fpzeroreg (32*sizeof(double)/sizeof(u_int))
/*
* extract the major opcode from the instruction
*/
#define get_major(op) extru(op,fpmajorpos,6)
/*
* extract the two bit class field from the FP instruction. The class is at bit
* positions 21-22
*/
#define get_class(op) extru(op,fpclasspos,2)
/*
* extract the 3 bit subop field. For all but class 1 instructions, it is
* located at bit positions 16-18
*/
#define get_subop(op) extru(op,fpsubpos,3)
/*
* extract the 2 or 3 bit subop field from class 1 instructions. It is located
* at bit positions 15-16 (PA1.1) or 14-16 (PA2.0)
*/
#define get_subop1_PA1_1(op) extru(op,fpclass1subpos,2) /* PA89 (1.1) fmt */
#define get_subop1_PA2_0(op) extru(op,fpclass1subpos,3) /* PA 2.0 fmt */
/* definitions of unimplemented exceptions */
#define MAJOR_0C_EXCP 0x09
#define MAJOR_0E_EXCP 0x0b
#define MAJOR_06_EXCP 0x03
#define MAJOR_26_EXCP 0x23
#define MAJOR_2E_EXCP 0x2b
#define PA83_UNIMP_EXCP 0x01
/*
* Special Defines for TIMEX specific code
*/
#define FPU_TYPE_FLAG_POS (EM_FPU_TYPE_OFFSET>>2)
#define TIMEX_ROLEX_FPU_MASK (TIMEX_EXTEN_FLAG|ROLEX_EXTEN_FLAG)
/*
* Static function definitions
*/
#define _PROTOTYPES
#if defined(_PROTOTYPES) || defined(_lint)
static u_int decode_0c(u_int, u_int, u_int, u_int *);
static u_int decode_0e(u_int, u_int, u_int, u_int *);
static u_int decode_06(u_int, u_int *);
static u_int decode_26(u_int, u_int *);
static u_int decode_2e(u_int, u_int *);
static void update_status_cbit(u_int *, u_int, u_int, u_int);
#else /* !_PROTOTYPES&&!_lint */
static u_int decode_0c();
static u_int decode_0e();
static u_int decode_06();
static u_int decode_26();
static u_int decode_2e();
static void update_status_cbit();
#endif /* _PROTOTYPES&&!_lint */
#define VASSERT(x)
static void parisc_linux_get_fpu_type(u_int fpregs[])
{
/* on pa-linux the fpu type is not filled in by the
* caller; it is constructed here
*/
if (boot_cpu_data.cpu_type == pcxs)
fpregs[FPU_TYPE_FLAG_POS] = TIMEX_EXTEN_FLAG;
else if (boot_cpu_data.cpu_type == pcxt ||
boot_cpu_data.cpu_type == pcxt_)
fpregs[FPU_TYPE_FLAG_POS] = ROLEX_EXTEN_FLAG;
else if (boot_cpu_data.cpu_type >= pcxu)
fpregs[FPU_TYPE_FLAG_POS] = PA2_0_FPU_FLAG;
}
/*
* this routine will decode the excepting floating point instruction and
* call the appropriate emulation routine.
* It is called by decode_fpu with the following parameters:
* fpudispatch(current_ir, unimplemented_code, 0, &Fpu_register)
* where current_ir is the instruction to be emulated,
* unimplemented_code is the exception_code that the hardware generated
* and &Fpu_register is the address of emulated FP reg 0.
*/
u_int
fpudispatch(u_int ir, u_int excp_code, u_int holder, u_int fpregs[])
{
u_int class, subop;
u_int fpu_type_flags;
/* All FP emulation code assumes that ints are 4-bytes in length */
VASSERT(sizeof(int) == 4);
parisc_linux_get_fpu_type(fpregs);
fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */
class = get_class(ir);
if (class == 1) {
if (fpu_type_flags & PA2_0_FPU_FLAG)
subop = get_subop1_PA2_0(ir);
else
subop = get_subop1_PA1_1(ir);
}
else
subop = get_subop(ir);
if (FPUDEBUG) printk("class %d subop %d\n", class, subop);
switch (excp_code) {
case MAJOR_0C_EXCP:
case PA83_UNIMP_EXCP:
return(decode_0c(ir,class,subop,fpregs));
case MAJOR_0E_EXCP:
return(decode_0e(ir,class,subop,fpregs));
case MAJOR_06_EXCP:
return(decode_06(ir,fpregs));
case MAJOR_26_EXCP:
return(decode_26(ir,fpregs));
case MAJOR_2E_EXCP:
return(decode_2e(ir,fpregs));
default:
/* "crashme Night Gallery painting nr 2. (asm_crash.s).
* This was fixed for multi-user kernels, but
* workstation kernels had a panic here. This allowed
* any arbitrary user to panic the kernel by executing
* setting the FP exception registers to strange values
* and generating an emulation trap. The emulation and
* exception code must never be able to panic the
* kernel.
*/
return(UNIMPLEMENTEDEXCEPTION);
}
}
/*
* this routine is called by $emulation_trap to emulate a coprocessor
* instruction if one doesn't exist
*/
u_int
emfpudispatch(u_int ir, u_int dummy1, u_int dummy2, u_int fpregs[])
{
u_int class, subop, major;
u_int fpu_type_flags;
/* All FP emulation code assumes that ints are 4-bytes in length */
VASSERT(sizeof(int) == 4);
fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */
major = get_major(ir);
class = get_class(ir);
if (class == 1) {
if (fpu_type_flags & PA2_0_FPU_FLAG)
subop = get_subop1_PA2_0(ir);
else
subop = get_subop1_PA1_1(ir);
}
else
subop = get_subop(ir);
switch (major) {
case 0x0C:
return(decode_0c(ir,class,subop,fpregs));
case 0x0E:
return(decode_0e(ir,class,subop,fpregs));
case 0x06:
return(decode_06(ir,fpregs));
case 0x26:
return(decode_26(ir,fpregs));
case 0x2E:
return(decode_2e(ir,fpregs));
default:
return(PA83_UNIMP_EXCP);
}
}
static u_int
decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[])
{
u_int r1,r2,t; /* operand register offsets */
u_int fmt; /* also sf for class 1 conversions */
u_int df; /* for class 1 conversions */
u_int *status;
u_int retval, local_status;
u_int fpu_type_flags;
if (ir == COPR_INST) {
fpregs[0] = EMULATION_VERSION << 11;
return(NOEXCEPTION);
}
status = &fpregs[0]; /* fp status register */
local_status = fpregs[0]; /* and local copy */
r1 = extru(ir,fpr1pos,5) * sizeof(double)/sizeof(u_int);
if (r1 == 0) /* map fr0 source to constant zero */
r1 = fpzeroreg;
t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int);
if (t == 0 && class != 2) /* don't allow fr0 as a dest */
return(MAJOR_0C_EXCP);
fmt = extru(ir,fpfmtpos,2); /* get fmt completer */
switch (class) {
case 0:
switch (subop) {
case 0: /* COPR 0,0 emulated above*/
case 1:
return(MAJOR_0C_EXCP);
case 2: /* FCPY */
switch (fmt) {
case 2: /* illegal */
return(MAJOR_0C_EXCP);
case 3: /* quad */
t &= ~3; /* force to even reg #s */
r1 &= ~3;
fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1];
return(NOEXCEPTION);
}
BUG();
case 3: /* FABS */
switch (fmt) {
case 2: /* illegal */
return(MAJOR_0C_EXCP);
case 3: /* quad */
t &= ~3; /* force to even reg #s */
r1 &= ~3;
fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
/* copy and clear sign bit */
fpregs[t] = fpregs[r1] & 0x7fffffff;
return(NOEXCEPTION);
}
BUG();
case 6: /* FNEG */
switch (fmt) {
case 2: /* illegal */
return(MAJOR_0C_EXCP);
case 3: /* quad */
t &= ~3; /* force to even reg #s */
r1 &= ~3;
fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
/* copy and invert sign bit */
fpregs[t] = fpregs[r1] ^ 0x80000000;
return(NOEXCEPTION);
}
BUG();
case 7: /* FNEGABS */
switch (fmt) {
case 2: /* illegal */
return(MAJOR_0C_EXCP);
case 3: /* quad */
t &= ~3; /* force to even reg #s */
r1 &= ~3;
fpregs[t+3] = fpregs[r1+3];
fpregs[t+2] = fpregs[r1+2];
fallthrough;
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
/* copy and set sign bit */
fpregs[t] = fpregs[r1] | 0x80000000;
return(NOEXCEPTION);
}
BUG();
case 4: /* FSQRT */
switch (fmt) {
case 0:
return(sgl_fsqrt(&fpregs[r1],0,
&fpregs[t],status));
case 1:
return(dbl_fsqrt(&fpregs[r1],0,
&fpregs[t],status));
case 2:
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 5: /* FRND */
switch (fmt) {
case 0:
return(sgl_frnd(&fpregs[r1],0,
&fpregs[t],status));
case 1:
return(dbl_frnd(&fpregs[r1],0,
&fpregs[t],status));
case 2:
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
} /* end of switch (subop) */
BUG();
case 1: /* class 1 */
df = extru(ir,fpdfpos,2); /* get dest format */
if ((df & 2) || (fmt & 2)) {
/*
* fmt's 2 and 3 are illegal of not implemented
* quad conversions
*/
return(MAJOR_0C_EXCP);
}
/*
* encode source and dest formats into 2 bits.
* high bit is source, low bit is dest.
* bit = 1 --> double precision
*/
fmt = (fmt << 1) | df;
switch (subop) {
case 0: /* FCNVFF */
switch(fmt) {
case 0: /* sgl/sgl */
return(MAJOR_0C_EXCP);
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvff(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvff(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(MAJOR_0C_EXCP);
}
BUG();
case 1: /* FCNVXF */
switch(fmt) {
case 0: /* sgl/sgl */
return(sgl_to_sgl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status));
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 2: /* FCNVFX */
switch(fmt) {
case 0: /* sgl/sgl */
return(sgl_to_sgl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status));
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 3: /* FCNVFXT */
switch(fmt) {
case 0: /* sgl/sgl */
return(sgl_to_sgl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status));
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 5: /* FCNVUF (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
return(sgl_to_sgl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status));
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 6: /* FCNVFU (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
return(sgl_to_sgl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status));
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 7: /* FCNVFUT (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
return(sgl_to_sgl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status));
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 4: /* undefined */
return(MAJOR_0C_EXCP);
} /* end of switch subop */
BUG();
case 2: /* class 2 */
fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];
r2 = extru(ir, fpr2pos, 5) * sizeof(double)/sizeof(u_int);
if (r2 == 0)
r2 = fpzeroreg;
if (fpu_type_flags & PA2_0_FPU_FLAG) {
/* FTEST if nullify bit set, otherwise FCMP */
if (extru(ir, fpnulpos, 1)) { /* FTEST */
switch (fmt) {
case 0:
/*
* arg0 is not used
* second param is the t field used for
* ftest,acc and ftest,rej
* third param is the subop (y-field)
*/
BUG();
/* Unsupported
* return(ftest(0L,extru(ir,fptpos,5),
* &fpregs[0],subop));
*/
case 1:
case 2:
case 3:
return(MAJOR_0C_EXCP);
}
} else { /* FCMP */
switch (fmt) {
case 0:
retval = sgl_fcmp(&fpregs[r1],
&fpregs[r2],extru(ir,fptpos,5),
&local_status);
update_status_cbit(status,local_status,
fpu_type_flags, subop);
return(retval);
case 1:
retval = dbl_fcmp(&fpregs[r1],
&fpregs[r2],extru(ir,fptpos,5),
&local_status);
update_status_cbit(status,local_status,
fpu_type_flags, subop);
return(retval);
case 2: /* illegal */
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
}
} /* end of if for PA2.0 */
else { /* PA1.0 & PA1.1 */
switch (subop) {
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
return(MAJOR_0C_EXCP);
case 0: /* FCMP */
switch (fmt) {
case 0:
retval = sgl_fcmp(&fpregs[r1],
&fpregs[r2],extru(ir,fptpos,5),
&local_status);
update_status_cbit(status,local_status,
fpu_type_flags, subop);
return(retval);
case 1:
retval = dbl_fcmp(&fpregs[r1],
&fpregs[r2],extru(ir,fptpos,5),
&local_status);
update_status_cbit(status,local_status,
fpu_type_flags, subop);
return(retval);
case 2: /* illegal */
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 1: /* FTEST */
switch (fmt) {
case 0:
/*
* arg0 is not used
* second param is the t field used for
* ftest,acc and ftest,rej
* third param is the subop (y-field)
*/
BUG();
/* unsupported
* return(ftest(0L,extru(ir,fptpos,5),
* &fpregs[0],subop));
*/
case 1:
case 2:
case 3:
return(MAJOR_0C_EXCP);
}
BUG();
} /* end of switch subop */
} /* end of else for PA1.0 & PA1.1 */
BUG();
case 3: /* class 3 */
r2 = extru(ir,fpr2pos,5) * sizeof(double)/sizeof(u_int);
if (r2 == 0)
r2 = fpzeroreg;
switch (subop) {
case 5:
case 6:
case 7:
return(MAJOR_0C_EXCP);
case 0: /* FADD */
switch (fmt) {
case 0:
return(sgl_fadd(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 1:
return(dbl_fadd(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 2: /* illegal */
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 1: /* FSUB */
switch (fmt) {
case 0:
return(sgl_fsub(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 1:
return(dbl_fsub(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 2: /* illegal */
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 2: /* FMPY */
switch (fmt) {
case 0:
return(sgl_fmpy(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 1:
return(dbl_fmpy(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 2: /* illegal */
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 3: /* FDIV */
switch (fmt) {
case 0:
return(sgl_fdiv(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 1:
return(dbl_fdiv(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 2: /* illegal */
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
case 4: /* FREM */
switch (fmt) {
case 0:
return(sgl_frem(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 1:
return(dbl_frem(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 2: /* illegal */
case 3: /* quad not implemented */
return(MAJOR_0C_EXCP);
}
BUG();
} /* end of class 3 switch */
} /* end of switch(class) */
/* If we get here, something is really wrong! */
return(MAJOR_0C_EXCP);
}
static u_int
decode_0e(ir,class,subop,fpregs)
u_int ir,class,subop;
u_int fpregs[];
{
u_int r1,r2,t; /* operand register offsets */
u_int fmt; /* also sf for class 1 conversions */
u_int df; /* dest format for class 1 conversions */
u_int *status;
u_int retval, local_status;
u_int fpu_type_flags;
status = &fpregs[0];
local_status = fpregs[0];
r1 = ((extru(ir,fpr1pos,5)<<1)|(extru(ir,fpxr1pos,1)));
if (r1 == 0)
r1 = fpzeroreg;
t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1)));
if (t == 0 && class != 2)
return(MAJOR_0E_EXCP);
if (class < 2) /* class 0 or 1 has 2 bit fmt */
fmt = extru(ir,fpfmtpos,2);
else /* class 2 and 3 have 1 bit fmt */
fmt = extru(ir,fp0efmtpos,1);
/*
* An undefined combination, double precision accessing the
* right half of a FPR, can get us into trouble.
* Let's just force proper alignment on it.
*/
if (fmt == DBL) {
r1 &= ~1;
if (class != 1)
t &= ~1;
}
switch (class) {
case 0:
switch (subop) {
case 0: /* unimplemented */
case 1:
return(MAJOR_0E_EXCP);
case 2: /* FCPY */
switch (fmt) {
case 2:
case 3:
return(MAJOR_0E_EXCP);
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1];
return(NOEXCEPTION);
}
BUG();
case 3: /* FABS */
switch (fmt) {
case 2:
case 3:
return(MAJOR_0E_EXCP);
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1] & 0x7fffffff;
return(NOEXCEPTION);
}
BUG();
case 6: /* FNEG */
switch (fmt) {
case 2:
case 3:
return(MAJOR_0E_EXCP);
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1] ^ 0x80000000;
return(NOEXCEPTION);
}
BUG();
case 7: /* FNEGABS */
switch (fmt) {
case 2:
case 3:
return(MAJOR_0E_EXCP);
case 1: /* double */
fpregs[t+1] = fpregs[r1+1];
fallthrough;
case 0: /* single */
fpregs[t] = fpregs[r1] | 0x80000000;
return(NOEXCEPTION);
}
BUG();
case 4: /* FSQRT */
switch (fmt) {
case 0:
return(sgl_fsqrt(&fpregs[r1],0,
&fpregs[t], status));
case 1:
return(dbl_fsqrt(&fpregs[r1],0,
&fpregs[t], status));
case 2:
case 3:
return(MAJOR_0E_EXCP);
}
BUG();
case 5: /* FRMD */
switch (fmt) {
case 0:
return(sgl_frnd(&fpregs[r1],0,
&fpregs[t], status));
case 1:
return(dbl_frnd(&fpregs[r1],0,
&fpregs[t], status));
case 2:
case 3:
return(MAJOR_0E_EXCP);
}
} /* end of switch (subop */
BUG();
case 1: /* class 1 */
df = extru(ir,fpdfpos,2); /* get dest format */
/*
* Fix Crashme problem (writing to 31R in double precision)
* here too.
*/
if (df == DBL) {
t &= ~1;
}
if ((df & 2) || (fmt & 2))
return(MAJOR_0E_EXCP);
fmt = (fmt << 1) | df;
switch (subop) {
case 0: /* FCNVFF */
switch(fmt) {
case 0: /* sgl/sgl */
return(MAJOR_0E_EXCP);
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvff(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvff(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(MAJOR_0E_EXCP);
}
BUG();
case 1: /* FCNVXF */
switch(fmt) {
case 0: /* sgl/sgl */
return(sgl_to_sgl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status));
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(dbl_to_dbl_fcnvxf(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 2: /* FCNVFX */
switch(fmt) {
case 0: /* sgl/sgl */
return(sgl_to_sgl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status));
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(dbl_to_dbl_fcnvfx(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 3: /* FCNVFXT */
switch(fmt) {
case 0: /* sgl/sgl */
return(sgl_to_sgl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status));
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 5: /* FCNVUF (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
return(sgl_to_sgl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status));
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(dbl_to_dbl_fcnvuf(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 6: /* FCNVFU (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
return(sgl_to_sgl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status));
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(dbl_to_dbl_fcnvfu(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 7: /* FCNVFUT (PA2.0 only) */
switch(fmt) {
case 0: /* sgl/sgl */
return(sgl_to_sgl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status));
case 1: /* sgl/dbl */
return(sgl_to_dbl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status));
case 2: /* dbl/sgl */
return(dbl_to_sgl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status));
case 3: /* dbl/dbl */
return(dbl_to_dbl_fcnvfut(&fpregs[r1],0,
&fpregs[t],status));
}
BUG();
case 4: /* undefined */
return(MAJOR_0C_EXCP);
} /* end of switch subop */
BUG();
case 2: /* class 2 */
/*
* Be careful out there.
* Crashme can generate cases where FR31R is specified
* as the source or target of a double precision operation.
* Since we just pass the address of the floating-point
* register to the emulation routines, this can cause
* corruption of fpzeroreg.
*/
if (fmt == DBL)
r2 = (extru(ir,fpr2pos,5)<<1);
else
r2 = ((extru(ir,fpr2pos,5)<<1)|(extru(ir,fpxr2pos,1)));
fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS];
if (r2 == 0)
r2 = fpzeroreg;
if (fpu_type_flags & PA2_0_FPU_FLAG) {
/* FTEST if nullify bit set, otherwise FCMP */
if (extru(ir, fpnulpos, 1)) { /* FTEST */
/* not legal */
return(MAJOR_0E_EXCP);
} else { /* FCMP */
switch (fmt) {
/*
* fmt is only 1 bit long
*/
case 0:
retval = sgl_fcmp(&fpregs[r1],
&fpregs[r2],extru(ir,fptpos,5),
&local_status);
update_status_cbit(status,local_status,
fpu_type_flags, subop);
return(retval);
case 1:
retval = dbl_fcmp(&fpregs[r1],
&fpregs[r2],extru(ir,fptpos,5),
&local_status);
update_status_cbit(status,local_status,
fpu_type_flags, subop);
return(retval);
}
}
} /* end of if for PA2.0 */
else { /* PA1.0 & PA1.1 */
switch (subop) {
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
return(MAJOR_0E_EXCP);
case 0: /* FCMP */
switch (fmt) {
/*
* fmt is only 1 bit long
*/
case 0:
retval = sgl_fcmp(&fpregs[r1],
&fpregs[r2],extru(ir,fptpos,5),
&local_status);
update_status_cbit(status,local_status,
fpu_type_flags, subop);
return(retval);
case 1:
retval = dbl_fcmp(&fpregs[r1],
&fpregs[r2],extru(ir,fptpos,5),
&local_status);
update_status_cbit(status,local_status,
fpu_type_flags, subop);
return(retval);
}
} /* end of switch subop */
} /* end of else for PA1.0 & PA1.1 */
BUG();
case 3: /* class 3 */
/*
* Be careful out there.
* Crashme can generate cases where FR31R is specified
* as the source or target of a double precision operation.
* Since we just pass the address of the floating-point
* register to the emulation routines, this can cause
* corruption of fpzeroreg.
*/
if (fmt == DBL)
r2 = (extru(ir,fpr2pos,5)<<1);
else
r2 = ((extru(ir,fpr2pos,5)<<1)|(extru(ir,fpxr2pos,1)));
if (r2 == 0)
r2 = fpzeroreg;
switch (subop) {
case 5:
case 6:
case 7:
return(MAJOR_0E_EXCP);
/*
* Note that fmt is only 1 bit for class 3 */
case 0: /* FADD */
switch (fmt) {
case 0:
return(sgl_fadd(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 1:
return(dbl_fadd(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
}
BUG();
case 1: /* FSUB */
switch (fmt) {
case 0:
return(sgl_fsub(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 1:
return(dbl_fsub(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
}
BUG();
case 2: /* FMPY or XMPYU */
/*
* check for integer multiply (x bit set)
*/
if (extru(ir,fpxpos,1)) {
/*
* emulate XMPYU
*/
switch (fmt) {
case 0:
/*
* bad instruction if t specifies
* the right half of a register
*/
if (t & 1)
return(MAJOR_0E_EXCP);
BUG();
/* unsupported
* impyu(&fpregs[r1],&fpregs[r2],
* &fpregs[t]);
*/
return(NOEXCEPTION);
case 1:
return(MAJOR_0E_EXCP);
}
}
else { /* FMPY */
switch (fmt) {
case 0:
return(sgl_fmpy(&fpregs[r1],
&fpregs[r2],&fpregs[t],status));
case 1:
return(dbl_fmpy(&fpregs[r1],
&fpregs[r2],&fpregs[t],status));
}
}
BUG();
case 3: /* FDIV */
switch (fmt) {
case 0:
return(sgl_fdiv(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 1:
return(dbl_fdiv(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
}
BUG();
case 4: /* FREM */
switch (fmt) {
case 0:
return(sgl_frem(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
case 1:
return(dbl_frem(&fpregs[r1],&fpregs[r2],
&fpregs[t],status));
}
} /* end of class 3 switch */
} /* end of switch(class) */
/* If we get here, something is really wrong! */
return(MAJOR_0E_EXCP);
}
/*
* routine to decode the 06 (FMPYADD and FMPYCFXT) instruction
*/
static u_int
decode_06(ir,fpregs)
u_int ir;
u_int fpregs[];
{
u_int rm1, rm2, tm, ra, ta; /* operands */
u_int fmt;
u_int error = 0;
u_int status;
u_int fpu_type_flags;
union {
double dbl;
float flt;
struct { u_int i1; u_int i2; } ints;
} mtmp, atmp;
status = fpregs[0]; /* use a local copy of status reg */
fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */
fmt = extru(ir, fpmultifmt, 1); /* get sgl/dbl flag */
if (fmt == 0) { /* DBL */
rm1 = extru(ir, fprm1pos, 5) * sizeof(double)/sizeof(u_int);
if (rm1 == 0)
rm1 = fpzeroreg;
rm2 = extru(ir, fprm2pos, 5) * sizeof(double)/sizeof(u_int);
if (rm2 == 0)
rm2 = fpzeroreg;
tm = extru(ir, fptmpos, 5) * sizeof(double)/sizeof(u_int);
if (tm == 0)
return(MAJOR_06_EXCP);
ra = extru(ir, fprapos, 5) * sizeof(double)/sizeof(u_int);
ta = extru(ir, fptapos, 5) * sizeof(double)/sizeof(u_int);
if (ta == 0)
return(MAJOR_06_EXCP);
if (fpu_type_flags & TIMEX_ROLEX_FPU_MASK) {
if (ra == 0) {
/* special case FMPYCFXT, see sgl case below */
if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],
&mtmp.ints.i1,&status))
error = 1;
if (dbl_to_sgl_fcnvfxt(&fpregs[ta],
&atmp.ints.i1,&atmp.ints.i1,&status))
error = 1;
}
else {
if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,
&status))
error = 1;
if (dbl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,
&status))
error = 1;
}
}
else
{
if (ra == 0)
ra = fpzeroreg;
if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,
&status))
error = 1;
if (dbl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,
&status))
error = 1;
}
if (error)
return(MAJOR_06_EXCP);
else {
/* copy results */
fpregs[tm] = mtmp.ints.i1;
fpregs[tm+1] = mtmp.ints.i2;
fpregs[ta] = atmp.ints.i1;
fpregs[ta+1] = atmp.ints.i2;
fpregs[0] = status;
return(NOEXCEPTION);
}
}
else { /* SGL */
/*
* calculate offsets for single precision numbers
* See table 6-14 in PA-89 architecture for mapping
*/
rm1 = (extru(ir,fprm1pos,4) | 0x10 ) << 1; /* get offset */
rm1 |= extru(ir,fprm1pos-4,1); /* add right word offset */
rm2 = (extru(ir,fprm2pos,4) | 0x10 ) << 1; /* get offset */
rm2 |= extru(ir,fprm2pos-4,1); /* add right word offset */
tm = (extru(ir,fptmpos,4) | 0x10 ) << 1; /* get offset */
tm |= extru(ir,fptmpos-4,1); /* add right word offset */
ra = (extru(ir,fprapos,4) | 0x10 ) << 1; /* get offset */
ra |= extru(ir,fprapos-4,1); /* add right word offset */
ta = (extru(ir,fptapos,4) | 0x10 ) << 1; /* get offset */
ta |= extru(ir,fptapos-4,1); /* add right word offset */
if (ra == 0x20 &&(fpu_type_flags & TIMEX_ROLEX_FPU_MASK)) {
/* special case FMPYCFXT (really 0)
* This instruction is only present on the Timex and
* Rolex fpu's in so if it is the special case and
* one of these fpu's we run the FMPYCFXT instruction
*/
if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,
&status))
error = 1;
if (sgl_to_sgl_fcnvfxt(&fpregs[ta],&atmp.ints.i1,
&atmp.ints.i1,&status))
error = 1;
}
else {
if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,
&status))
error = 1;
if (sgl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,
&status))
error = 1;
}
if (error)
return(MAJOR_06_EXCP);
else {
/* copy results */
fpregs[tm] = mtmp.ints.i1;
fpregs[ta] = atmp.ints.i1;
fpregs[0] = status;
return(NOEXCEPTION);
}
}
}
/*
* routine to decode the 26 (FMPYSUB) instruction
*/
static u_int
decode_26(ir,fpregs)
u_int ir;
u_int fpregs[];
{
u_int rm1, rm2, tm, ra, ta; /* operands */
u_int fmt;
u_int error = 0;
u_int status;
union {
double dbl;
float flt;
struct { u_int i1; u_int i2; } ints;
} mtmp, atmp;
status = fpregs[0];
fmt = extru(ir, fpmultifmt, 1); /* get sgl/dbl flag */
if (fmt == 0) { /* DBL */
rm1 = extru(ir, fprm1pos, 5) * sizeof(double)/sizeof(u_int);
if (rm1 == 0)
rm1 = fpzeroreg;
rm2 = extru(ir, fprm2pos, 5) * sizeof(double)/sizeof(u_int);
if (rm2 == 0)
rm2 = fpzeroreg;
tm = extru(ir, fptmpos, 5) * sizeof(double)/sizeof(u_int);
if (tm == 0)
return(MAJOR_26_EXCP);
ra = extru(ir, fprapos, 5) * sizeof(double)/sizeof(u_int);
if (ra == 0)
return(MAJOR_26_EXCP);
ta = extru(ir, fptapos, 5) * sizeof(double)/sizeof(u_int);
if (ta == 0)
return(MAJOR_26_EXCP);
if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,&status))
error = 1;
if (dbl_fsub(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,&status))
error = 1;
if (error)
return(MAJOR_26_EXCP);
else {
/* copy results */
fpregs[tm] = mtmp.ints.i1;
fpregs[tm+1] = mtmp.ints.i2;
fpregs[ta] = atmp.ints.i1;
fpregs[ta+1] = atmp.ints.i2;
fpregs[0] = status;
return(NOEXCEPTION);
}
}
else { /* SGL */
/*
* calculate offsets for single precision numbers
* See table 6-14 in PA-89 architecture for mapping
*/
rm1 = (extru(ir,fprm1pos,4) | 0x10 ) << 1; /* get offset */
rm1 |= extru(ir,fprm1pos-4,1); /* add right word offset */
rm2 = (extru(ir,fprm2pos,4) | 0x10 ) << 1; /* get offset */
rm2 |= extru(ir,fprm2pos-4,1); /* add right word offset */
tm = (extru(ir,fptmpos,4) | 0x10 ) << 1; /* get offset */
tm |= extru(ir,fptmpos-4,1); /* add right word offset */
ra = (extru(ir,fprapos,4) | 0x10 ) << 1; /* get offset */
ra |= extru(ir,fprapos-4,1); /* add right word offset */
ta = (extru(ir,fptapos,4) | 0x10 ) << 1; /* get offset */
ta |= extru(ir,fptapos-4,1); /* add right word offset */
if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,&status))
error = 1;
if (sgl_fsub(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,&status))
error = 1;
if (error)
return(MAJOR_26_EXCP);
else {
/* copy results */
fpregs[tm] = mtmp.ints.i1;
fpregs[ta] = atmp.ints.i1;
fpregs[0] = status;
return(NOEXCEPTION);
}
}
}
/*
* routine to decode the 2E (FMPYFADD,FMPYNFADD) instructions
*/
static u_int
decode_2e(ir,fpregs)
u_int ir;
u_int fpregs[];
{
u_int rm1, rm2, ra, t; /* operands */
u_int fmt;
fmt = extru(ir,fpfmtpos,1); /* get fmt completer */
if (fmt == DBL) { /* DBL */
rm1 = extru(ir,fprm1pos,5) * sizeof(double)/sizeof(u_int);
if (rm1 == 0)
rm1 = fpzeroreg;
rm2 = extru(ir,fprm2pos,5) * sizeof(double)/sizeof(u_int);
if (rm2 == 0)
rm2 = fpzeroreg;
ra = ((extru(ir,fpraupos,3)<<2)|(extru(ir,fpralpos,3)>>1)) *
sizeof(double)/sizeof(u_int);
if (ra == 0)
ra = fpzeroreg;
t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int);
if (t == 0)
return(MAJOR_2E_EXCP);
if (extru(ir,fpfusedsubop,1)) { /* fmpyfadd or fmpynfadd? */
return(dbl_fmpynfadd(&fpregs[rm1], &fpregs[rm2],
&fpregs[ra], &fpregs[0], &fpregs[t]));
} else {
return(dbl_fmpyfadd(&fpregs[rm1], &fpregs[rm2],
&fpregs[ra], &fpregs[0], &fpregs[t]));
}
} /* end DBL */
else { /* SGL */
rm1 = (extru(ir,fprm1pos,5)<<1)|(extru(ir,fpxrm1pos,1));
if (rm1 == 0)
rm1 = fpzeroreg;
rm2 = (extru(ir,fprm2pos,5)<<1)|(extru(ir,fpxrm2pos,1));
if (rm2 == 0)
rm2 = fpzeroreg;
ra = (extru(ir,fpraupos,3)<<3)|extru(ir,fpralpos,3);
if (ra == 0)
ra = fpzeroreg;
t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1)));
if (t == 0)
return(MAJOR_2E_EXCP);
if (extru(ir,fpfusedsubop,1)) { /* fmpyfadd or fmpynfadd? */
return(sgl_fmpynfadd(&fpregs[rm1], &fpregs[rm2],
&fpregs[ra], &fpregs[0], &fpregs[t]));
} else {
return(sgl_fmpyfadd(&fpregs[rm1], &fpregs[rm2],
&fpregs[ra], &fpregs[0], &fpregs[t]));
}
} /* end SGL */
}
/*
* update_status_cbit
*
* This routine returns the correct FP status register value in
* *status, based on the C-bit & V-bit returned by the FCMP
* emulation routine in new_status. The architecture type
* (PA83, PA89 or PA2.0) is available in fpu_type. The y_field
* and the architecture type are used to determine what flavor
* of FCMP is being emulated.
*/
static void
update_status_cbit(status, new_status, fpu_type, y_field)
u_int *status, new_status;
u_int fpu_type;
u_int y_field;
{
/*
* For PA89 FPU's which implement the Compare Queue and
* for PA2.0 FPU's, update the Compare Queue if the y-field = 0,
* otherwise update the specified bit in the Compare Array.
* Note that the y-field will always be 0 for non-PA2.0 FPU's.
*/
if ((fpu_type & TIMEX_EXTEN_FLAG) ||
(fpu_type & ROLEX_EXTEN_FLAG) ||
(fpu_type & PA2_0_FPU_FLAG)) {
if (y_field == 0) {
*status = ((*status & 0x04000000) >> 5) | /* old Cbit */
((*status & 0x003ff000) >> 1) | /* old CQ */
(new_status & 0xffc007ff); /* all other bits*/
} else {
*status = (*status & 0x04000000) | /* old Cbit */
((new_status & 0x04000000) >> (y_field+4)) |
(new_status & ~0x04000000 & /* other bits */
~(0x04000000 >> (y_field+4)));
}
}
/* if PA83, just update the C-bit */
else {
*status = new_status;
}
}
| linux-master | arch/parisc/math-emu/fpudispatch.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/dfdiv.c $Revision: 1.1 $
*
* Purpose:
* Double Precision Floating-point Divide
*
* External Interfaces:
* dbl_fdiv(srcptr1,srcptr2,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "dbl_float.h"
/*
* Double Precision Floating-point Divide
*/
int
dbl_fdiv (dbl_floating_point * srcptr1, dbl_floating_point * srcptr2,
dbl_floating_point * dstptr, unsigned int *status)
{
register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2;
register unsigned int opnd3p1, opnd3p2, resultp1, resultp2;
register int dest_exponent, count;
register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
boolean is_tiny;
Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2);
Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2);
/*
* set sign bit of result
*/
if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1))
Dbl_setnegativezerop1(resultp1);
else Dbl_setzerop1(resultp1);
/*
* check first operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd1p1)) {
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
if (Dbl_isnotnan(opnd2p1,opnd2p2)) {
if (Dbl_isinfinity(opnd2p1,opnd2p2)) {
/*
* invalid since both operands
* are infinity
*/
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd1p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd1p1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd2p1)) {
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
/*
* return zero
*/
Dbl_setzero_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* check for division by zero
*/
if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
/* invalid since both operands are zero */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
if (Is_divisionbyzerotrap_enabled())
return(DIVISIONBYZEROEXCEPTION);
Set_divisionbyzeroflag();
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate exponent
*/
dest_exponent = Dbl_exponent(opnd1p1) - Dbl_exponent(opnd2p1) + DBL_BIAS;
/*
* Generate mantissa
*/
if (Dbl_isnotzero_exponent(opnd1p1)) {
/* set hidden bit */
Dbl_clear_signexponent_set_hidden(opnd1p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
Dbl_setzero_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized, want to normalize */
Dbl_clear_signexponent(opnd1p1);
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_normalize(opnd1p1,opnd1p2,dest_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Dbl_isnotzero_exponent(opnd2p1)) {
Dbl_clear_signexponent_set_hidden(opnd2p1);
}
else {
/* is denormalized; want to normalize */
Dbl_clear_signexponent(opnd2p1);
Dbl_leftshiftby1(opnd2p1,opnd2p2);
while (Dbl_iszero_hiddenhigh7mantissa(opnd2p1)) {
dest_exponent+=8;
Dbl_leftshiftby8(opnd2p1,opnd2p2);
}
if (Dbl_iszero_hiddenhigh3mantissa(opnd2p1)) {
dest_exponent+=4;
Dbl_leftshiftby4(opnd2p1,opnd2p2);
}
while (Dbl_iszero_hidden(opnd2p1)) {
dest_exponent++;
Dbl_leftshiftby1(opnd2p1,opnd2p2);
}
}
/* Divide the source mantissas */
/*
* A non-restoring divide algorithm is used.
*/
Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
Dbl_setzero(opnd3p1,opnd3p2);
for (count=1; count <= DBL_P && (opnd1p1 || opnd1p2); count++) {
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_leftshiftby1(opnd3p1,opnd3p2);
if (Dbl_iszero_sign(opnd1p1)) {
Dbl_setone_lowmantissap2(opnd3p2);
Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
}
else {
Twoword_add(opnd1p1, opnd1p2, opnd2p1, opnd2p2);
}
}
if (count <= DBL_P) {
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_setone_lowmantissap2(opnd3p2);
Dbl_leftshift(opnd3p1,opnd3p2,(DBL_P-count));
if (Dbl_iszero_hidden(opnd3p1)) {
Dbl_leftshiftby1(opnd3p1,opnd3p2);
dest_exponent--;
}
}
else {
if (Dbl_iszero_hidden(opnd3p1)) {
/* need to get one more bit of result */
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_leftshiftby1(opnd3p1,opnd3p2);
if (Dbl_iszero_sign(opnd1p1)) {
Dbl_setone_lowmantissap2(opnd3p2);
Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
}
else {
Twoword_add(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
}
dest_exponent--;
}
if (Dbl_iszero_sign(opnd1p1)) guardbit = TRUE;
stickybit = Dbl_allp1(opnd1p1) || Dbl_allp2(opnd1p2);
}
inexact = guardbit | stickybit;
/*
* round result
*/
if (inexact && (dest_exponent > 0 || Is_underflowtrap_enabled())) {
Dbl_clear_signexponent(opnd3p1);
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1))
Dbl_increment(opnd3p1,opnd3p2);
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1))
Dbl_increment(opnd3p1,opnd3p2);
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Dbl_isone_lowmantissap2(opnd3p2))) {
Dbl_increment(opnd3p1,opnd3p2);
}
}
if (Dbl_isone_hidden(opnd3p1)) dest_exponent++;
}
Dbl_set_mantissa(resultp1,resultp2,opnd3p1,opnd3p2);
/*
* Test for overflow
*/
if (dest_exponent >= DBL_INFINITY_EXPONENT) {
/* trap if OVERFLOWTRAP enabled */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,dest_exponent,ovfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(OVERFLOWEXCEPTION);
}
Set_overflowflag();
/* set result to infinity or largest number */
Dbl_setoverflow(resultp1,resultp2);
inexact = TRUE;
}
/*
* Test for underflow
*/
else if (dest_exponent <= 0) {
/* trap if UNDERFLOWTRAP enabled */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(UNDERFLOWEXCEPTION);
}
/* Determine if should set underflow flag */
is_tiny = TRUE;
if (dest_exponent == 0 && inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
if (Dbl_isone_hiddenoverflow(opnd3p1))
is_tiny = FALSE;
Dbl_decrement(opnd3p1,opnd3p2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
if (Dbl_isone_hiddenoverflow(opnd3p1))
is_tiny = FALSE;
Dbl_decrement(opnd3p1,opnd3p2);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Dbl_isone_lowmantissap2(opnd3p2))) {
Dbl_increment(opnd3p1,opnd3p2);
if (Dbl_isone_hiddenoverflow(opnd3p1))
is_tiny = FALSE;
Dbl_decrement(opnd3p1,opnd3p2);
}
break;
}
}
/*
* denormalize result or set to signed zero
*/
stickybit = inexact;
Dbl_denormalize(opnd3p1,opnd3p2,dest_exponent,guardbit,
stickybit,inexact);
/* return rounded number */
if (inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Dbl_isone_lowmantissap2(opnd3p2))) {
Dbl_increment(opnd3p1,opnd3p2);
}
break;
}
if (is_tiny) Set_underflowflag();
}
Dbl_set_exponentmantissa(resultp1,resultp2,opnd3p1,opnd3p2);
}
else Dbl_set_exponent(resultp1,dest_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/dfdiv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/fcnvff.c $Revision: 1.1 $
*
* Purpose:
* Single Floating-point to Double Floating-point
* Double Floating-point to Single Floating-point
*
* External Interfaces:
* dbl_to_sgl_fcnvff(srcptr,nullptr,dstptr,status)
* sgl_to_dbl_fcnvff(srcptr,nullptr,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
#include "cnv_float.h"
/*
* Single Floating-point to Double Floating-point
*/
/*ARGSUSED*/
int
sgl_to_dbl_fcnvff(
sgl_floating_point *srcptr,
unsigned int *nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int src, resultp1, resultp2;
register int src_exponent;
src = *srcptr;
src_exponent = Sgl_exponent(src);
Dbl_allp1(resultp1) = Sgl_all(src); /* set sign of result */
/*
* Test for NaN or infinity
*/
if (src_exponent == SGL_INFINITY_EXPONENT) {
/*
* determine if NaN or infinity
*/
if (Sgl_iszero_mantissa(src)) {
/*
* is infinity; want to return double infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(src)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
else {
Set_invalidflag();
Sgl_set_quiet(src);
}
}
/*
* NaN is quiet, return as double NaN
*/
Dbl_setinfinity_exponent(resultp1);
Sgl_to_dbl_mantissa(src,resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
/*
* Test for zero or denormalized
*/
if (src_exponent == 0) {
/*
* determine if zero or denormalized
*/
if (Sgl_isnotzero_mantissa(src)) {
/*
* is denormalized; want to normalize
*/
Sgl_clear_signexponent(src);
Sgl_leftshiftby1(src);
Sgl_normalize(src,src_exponent);
Sgl_to_dbl_exponent(src_exponent,resultp1);
Sgl_to_dbl_mantissa(src,resultp1,resultp2);
}
else {
Dbl_setzero_exponentmantissa(resultp1,resultp2);
}
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* No special cases, just complete the conversion
*/
Sgl_to_dbl_exponent(src_exponent, resultp1);
Sgl_to_dbl_mantissa(Sgl_mantissa(src), resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Double Floating-point to Single Floating-point
*/
/*ARGSUSED*/
int
dbl_to_sgl_fcnvff(
dbl_floating_point *srcptr,
unsigned int *nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int srcp1, srcp2, result;
register int src_exponent, dest_exponent, dest_mantissa;
register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
register boolean lsb_odd = FALSE;
boolean is_tiny = FALSE;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1);
Sgl_all(result) = Dbl_allp1(srcp1); /* set sign of result */
/*
* Test for NaN or infinity
*/
if (src_exponent == DBL_INFINITY_EXPONENT) {
/*
* determine if NaN or infinity
*/
if (Dbl_iszero_mantissa(srcp1,srcp2)) {
/*
* is infinity; want to return single infinity
*/
Sgl_setinfinity_exponentmantissa(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(srcp1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
else {
Set_invalidflag();
/* make NaN quiet */
Dbl_set_quiet(srcp1);
}
}
/*
* NaN is quiet, return as single NaN
*/
Sgl_setinfinity_exponent(result);
Sgl_set_mantissa(result,Dallp1(srcp1)<<3 | Dallp2(srcp2)>>29);
if (Sgl_iszero_mantissa(result)) Sgl_set_quiet(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Generate result
*/
Dbl_to_sgl_exponent(src_exponent,dest_exponent);
if (dest_exponent > 0) {
Dbl_to_sgl_mantissa(srcp1,srcp2,dest_mantissa,inexact,guardbit,
stickybit,lsb_odd);
}
else {
if (Dbl_iszero_exponentmantissa(srcp1,srcp2)){
Sgl_setzero_exponentmantissa(result);
*dstptr = result;
return(NOEXCEPTION);
}
if (Is_underflowtrap_enabled()) {
Dbl_to_sgl_mantissa(srcp1,srcp2,dest_mantissa,inexact,
guardbit,stickybit,lsb_odd);
}
else {
/* compute result, determine inexact info,
* and set Underflowflag if appropriate
*/
Dbl_to_sgl_denormalized(srcp1,srcp2,dest_exponent,
dest_mantissa,inexact,guardbit,stickybit,lsb_odd,
is_tiny);
}
}
/*
* Now round result if not exact
*/
if (inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(result)) dest_mantissa++;
break;
case ROUNDMINUS:
if (Sgl_isone_sign(result)) dest_mantissa++;
break;
case ROUNDNEAREST:
if (guardbit) {
if (stickybit || lsb_odd) dest_mantissa++;
}
}
}
Sgl_set_exponentmantissa(result,dest_mantissa);
/*
* check for mantissa overflow after rounding
*/
if ((dest_exponent>0 || Is_underflowtrap_enabled()) &&
Sgl_isone_hidden(result)) dest_exponent++;
/*
* Test for overflow
*/
if (dest_exponent >= SGL_INFINITY_EXPONENT) {
/* trap if OVERFLOWTRAP enabled */
if (Is_overflowtrap_enabled()) {
/*
* Check for gross overflow
*/
if (dest_exponent >= SGL_INFINITY_EXPONENT+SGL_WRAP)
return(UNIMPLEMENTEDEXCEPTION);
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(result,dest_exponent,ovfl);
*dstptr = result;
if (inexact)
if (Is_inexacttrap_enabled())
return(OVERFLOWEXCEPTION|INEXACTEXCEPTION);
else Set_inexactflag();
return(OVERFLOWEXCEPTION);
}
Set_overflowflag();
inexact = TRUE;
/* set result to infinity or largest number */
Sgl_setoverflow(result);
}
/*
* Test for underflow
*/
else if (dest_exponent <= 0) {
/* trap if UNDERFLOWTRAP enabled */
if (Is_underflowtrap_enabled()) {
/*
* Check for gross underflow
*/
if (dest_exponent <= -(SGL_WRAP))
return(UNIMPLEMENTEDEXCEPTION);
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(result,dest_exponent,unfl);
*dstptr = result;
if (inexact)
if (Is_inexacttrap_enabled())
return(UNDERFLOWEXCEPTION|INEXACTEXCEPTION);
else Set_inexactflag();
return(UNDERFLOWEXCEPTION);
}
/*
* result is denormalized or signed zero
*/
if (inexact && is_tiny) Set_underflowflag();
}
else Sgl_set_exponent(result,dest_exponent);
*dstptr = result;
/*
* Trap if inexact trap is enabled
*/
if (inexact)
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/fcnvff.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* Purpose:
* Single Floating-point Round to Integer
* Double Floating-point Round to Integer
* Quad Floating-point Round to Integer (returns unimplemented)
*
* External Interfaces:
* dbl_frnd(srcptr,nullptr,dstptr,status)
* sgl_frnd(srcptr,nullptr,dstptr,status)
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
#include "cnv_float.h"
/*
* Single Floating-point Round to Integer
*/
/*ARGSUSED*/
int
sgl_frnd(sgl_floating_point *srcptr,
unsigned int *nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int src, result;
register int src_exponent;
register boolean inexact = FALSE;
src = *srcptr;
/*
* check source operand for NaN or infinity
*/
if ((src_exponent = Sgl_exponent(src)) == SGL_INFINITY_EXPONENT) {
/*
* is signaling NaN?
*/
if (Sgl_isone_signaling(src)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(src);
}
/*
* return quiet NaN or infinity
*/
*dstptr = src;
return(NOEXCEPTION);
}
/*
* Need to round?
*/
if ((src_exponent -= SGL_BIAS) >= SGL_P - 1) {
*dstptr = src;
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
Sgl_clear_exponent_set_hidden(src);
result = src;
Sgl_rightshift(result,(SGL_P-1) - (src_exponent));
/* check for inexact */
if (Sgl_isinexact_to_fix(src,src_exponent)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(src)) Sgl_increment(result);
break;
case ROUNDMINUS:
if (Sgl_isone_sign(src)) Sgl_increment(result);
break;
case ROUNDNEAREST:
if (Sgl_isone_roundbit(src,src_exponent))
if (Sgl_isone_stickybit(src,src_exponent)
|| (Sgl_isone_lowmantissa(result)))
Sgl_increment(result);
}
}
Sgl_leftshift(result,(SGL_P-1) - (src_exponent));
if (Sgl_isone_hiddenoverflow(result))
Sgl_set_exponent(result,src_exponent + (SGL_BIAS+1));
else Sgl_set_exponent(result,src_exponent + SGL_BIAS);
}
else {
result = src; /* set sign */
Sgl_setzero_exponentmantissa(result);
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(src))
Sgl_set_exponent(result,SGL_BIAS);
break;
case ROUNDMINUS:
if (Sgl_isone_sign(src))
Sgl_set_exponent(result,SGL_BIAS);
break;
case ROUNDNEAREST:
if (src_exponent == -1)
if (Sgl_isnotzero_mantissa(src))
Sgl_set_exponent(result,SGL_BIAS);
}
}
}
*dstptr = result;
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
/*
* Double Floating-point Round to Integer
*/
/*ARGSUSED*/
int
dbl_frnd(
dbl_floating_point *srcptr,
unsigned int *nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int srcp1, srcp2, resultp1, resultp2;
register int src_exponent;
register boolean inexact = FALSE;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
/*
* check source operand for NaN or infinity
*/
if ((src_exponent = Dbl_exponent(srcp1)) == DBL_INFINITY_EXPONENT) {
/*
* is signaling NaN?
*/
if (Dbl_isone_signaling(srcp1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(srcp1);
}
/*
* return quiet NaN or infinity
*/
Dbl_copytoptr(srcp1,srcp2,dstptr);
return(NOEXCEPTION);
}
/*
* Need to round?
*/
if ((src_exponent -= DBL_BIAS) >= DBL_P - 1) {
Dbl_copytoptr(srcp1,srcp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
Dbl_clear_exponent_set_hidden(srcp1);
resultp1 = srcp1;
resultp2 = srcp2;
Dbl_rightshift(resultp1,resultp2,(DBL_P-1) - (src_exponent));
/* check for inexact */
if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(srcp1))
Dbl_increment(resultp1,resultp2);
break;
case ROUNDMINUS:
if (Dbl_isone_sign(srcp1))
Dbl_increment(resultp1,resultp2);
break;
case ROUNDNEAREST:
if (Dbl_isone_roundbit(srcp1,srcp2,src_exponent))
if (Dbl_isone_stickybit(srcp1,srcp2,src_exponent)
|| (Dbl_isone_lowmantissap2(resultp2)))
Dbl_increment(resultp1,resultp2);
}
}
Dbl_leftshift(resultp1,resultp2,(DBL_P-1) - (src_exponent));
if (Dbl_isone_hiddenoverflow(resultp1))
Dbl_set_exponent(resultp1,src_exponent + (DBL_BIAS+1));
else Dbl_set_exponent(resultp1,src_exponent + DBL_BIAS);
}
else {
resultp1 = srcp1; /* set sign */
Dbl_setzero_exponentmantissa(resultp1,resultp2);
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(srcp1))
Dbl_set_exponent(resultp1,DBL_BIAS);
break;
case ROUNDMINUS:
if (Dbl_isone_sign(srcp1))
Dbl_set_exponent(resultp1,DBL_BIAS);
break;
case ROUNDNEAREST:
if (src_exponent == -1)
if (Dbl_isnotzero_mantissa(srcp1,srcp2))
Dbl_set_exponent(resultp1,DBL_BIAS);
}
}
}
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/frnd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/sfrem.c $Revision: 1.1 $
*
* Purpose:
* Single Precision Floating-point Remainder
*
* External Interfaces:
* sgl_frem(srcptr1,srcptr2,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
/*
* Single Precision Floating-point Remainder
*/
int
sgl_frem (sgl_floating_point * srcptr1, sgl_floating_point * srcptr2,
sgl_floating_point * dstptr, unsigned int *status)
{
register unsigned int opnd1, opnd2, result;
register int opnd1_exponent, opnd2_exponent, dest_exponent, stepcount;
register boolean roundup = FALSE;
opnd1 = *srcptr1;
opnd2 = *srcptr2;
/*
* check first operand for NaN's or infinity
*/
if ((opnd1_exponent = Sgl_exponent(opnd1)) == SGL_INFINITY_EXPONENT) {
if (Sgl_iszero_mantissa(opnd1)) {
if (Sgl_isnotnan(opnd2)) {
/* invalid since first operand is infinity */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(result);
*dstptr = result;
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd1);
}
/*
* is second operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
*dstptr = opnd2;
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
*dstptr = opnd1;
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if ((opnd2_exponent = Sgl_exponent(opnd2)) == SGL_INFINITY_EXPONENT) {
if (Sgl_iszero_mantissa(opnd2)) {
/*
* return first operand
*/
*dstptr = opnd1;
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
}
/*
* return quiet NaN
*/
*dstptr = opnd2;
return(NOEXCEPTION);
}
/*
* check second operand for zero
*/
if (Sgl_iszero_exponentmantissa(opnd2)) {
/* invalid since second operand is zero */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* get sign of result
*/
result = opnd1;
/*
* check for denormalized operands
*/
if (opnd1_exponent == 0) {
/* check for zero */
if (Sgl_iszero_mantissa(opnd1)) {
*dstptr = opnd1;
return(NOEXCEPTION);
}
/* normalize, then continue */
opnd1_exponent = 1;
Sgl_normalize(opnd1,opnd1_exponent);
}
else {
Sgl_clear_signexponent_set_hidden(opnd1);
}
if (opnd2_exponent == 0) {
/* normalize, then continue */
opnd2_exponent = 1;
Sgl_normalize(opnd2,opnd2_exponent);
}
else {
Sgl_clear_signexponent_set_hidden(opnd2);
}
/* find result exponent and divide step loop count */
dest_exponent = opnd2_exponent - 1;
stepcount = opnd1_exponent - opnd2_exponent;
/*
* check for opnd1/opnd2 < 1
*/
if (stepcount < 0) {
/*
* check for opnd1/opnd2 > 1/2
*
* In this case n will round to 1, so
* r = opnd1 - opnd2
*/
if (stepcount == -1 && Sgl_isgreaterthan(opnd1,opnd2)) {
Sgl_all(result) = ~Sgl_all(result); /* set sign */
/* align opnd2 with opnd1 */
Sgl_leftshiftby1(opnd2);
Sgl_subtract(opnd2,opnd1,opnd2);
/* now normalize */
while (Sgl_iszero_hidden(opnd2)) {
Sgl_leftshiftby1(opnd2);
dest_exponent--;
}
Sgl_set_exponentmantissa(result,opnd2);
goto testforunderflow;
}
/*
* opnd1/opnd2 <= 1/2
*
* In this case n will round to zero, so
* r = opnd1
*/
Sgl_set_exponentmantissa(result,opnd1);
dest_exponent = opnd1_exponent;
goto testforunderflow;
}
/*
* Generate result
*
* Do iterative subtract until remainder is less than operand 2.
*/
while (stepcount-- > 0 && Sgl_all(opnd1)) {
if (Sgl_isnotlessthan(opnd1,opnd2))
Sgl_subtract(opnd1,opnd2,opnd1);
Sgl_leftshiftby1(opnd1);
}
/*
* Do last subtract, then determine which way to round if remainder
* is exactly 1/2 of opnd2
*/
if (Sgl_isnotlessthan(opnd1,opnd2)) {
Sgl_subtract(opnd1,opnd2,opnd1);
roundup = TRUE;
}
if (stepcount > 0 || Sgl_iszero(opnd1)) {
/* division is exact, remainder is zero */
Sgl_setzero_exponentmantissa(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Check for cases where opnd1/opnd2 < n
*
* In this case the result's sign will be opposite that of
* opnd1. The mantissa also needs some correction.
*/
Sgl_leftshiftby1(opnd1);
if (Sgl_isgreaterthan(opnd1,opnd2)) {
Sgl_invert_sign(result);
Sgl_subtract((opnd2<<1),opnd1,opnd1);
}
/* check for remainder being exactly 1/2 of opnd2 */
else if (Sgl_isequal(opnd1,opnd2) && roundup) {
Sgl_invert_sign(result);
}
/* normalize result's mantissa */
while (Sgl_iszero_hidden(opnd1)) {
dest_exponent--;
Sgl_leftshiftby1(opnd1);
}
Sgl_set_exponentmantissa(result,opnd1);
/*
* Test for underflow
*/
testforunderflow:
if (dest_exponent <= 0) {
/* trap if UNDERFLOWTRAP enabled */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(result,dest_exponent,unfl);
*dstptr = result;
/* frem is always exact */
return(UNDERFLOWEXCEPTION);
}
/*
* denormalize result or set to signed zero
*/
if (dest_exponent >= (1 - SGL_P)) {
Sgl_rightshift_exponentmantissa(result,1-dest_exponent);
}
else {
Sgl_setzero_exponentmantissa(result);
}
}
else Sgl_set_exponent(result,dest_exponent);
*dstptr = result;
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/sfrem.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/fp/decode_exc.c $ Revision: $
*
* Purpose:
* <<please update with a synopsis of the functionality provided by this file>>
*
* External Interfaces:
* <<the following list was autogenerated, please review>>
* decode_fpu(Fpu_register, trap_counts)
*
* Internal Interfaces:
* <<please update>>
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include <linux/kernel.h>
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
#include "cnv_float.h"
/* #include "types.h" */
#include <asm/signal.h>
#include <asm/siginfo.h>
/* #include <machine/sys/mdep_private.h> */
#undef Fpustatus_register
#define Fpustatus_register Fpu_register[0]
/* General definitions */
#define DOESTRAP 1
#define NOTRAP 0
#define SIGNALCODE(signal, code) ((signal) << 24 | (code))
#define copropbit 1<<31-2 /* bit position 2 */
#define opclass 9 /* bits 21 & 22 */
#define fmtbits 11 /* bits 19 & 20 */
#define df 13 /* bits 17 & 18 */
#define twobits 3 /* mask low-order 2 bits */
#define fivebits 31 /* mask low-order 5 bits */
#define MAX_EXCP_REG 7 /* number of excpeption registers to check */
/* Exception register definitions */
#define Excp_type(index) Exceptiontype(Fpu_register[index])
#define Excp_instr(index) Instructionfield(Fpu_register[index])
#define Clear_excp_register(index) Allexception(Fpu_register[index]) = 0
#define Excp_format() \
(current_ir >> ((current_ir>>opclass & twobits) == 1 ? df : fmtbits) & twobits)
/* Miscellaneous definitions */
#define Fpu_sgl(index) Fpu_register[index*2]
#define Fpu_dblp1(index) Fpu_register[index*2]
#define Fpu_dblp2(index) Fpu_register[(index*2)+1]
#define Fpu_quadp1(index) Fpu_register[index*2]
#define Fpu_quadp2(index) Fpu_register[(index*2)+1]
#define Fpu_quadp3(index) Fpu_register[(index*2)+2]
#define Fpu_quadp4(index) Fpu_register[(index*2)+3]
/* Single precision floating-point definitions */
#ifndef Sgl_decrement
# define Sgl_decrement(sgl_value) Sall(sgl_value)--
#endif
/* Double precision floating-point definitions */
#ifndef Dbl_decrement
# define Dbl_decrement(dbl_valuep1,dbl_valuep2) \
if ((Dallp2(dbl_valuep2)--) == 0) Dallp1(dbl_valuep1)--
#endif
#define update_trap_counts(Fpu_register, aflags, bflags, trap_counts) { \
aflags=(Fpu_register[0])>>27; /* assumes zero fill. 32 bit */ \
Fpu_register[0] |= bflags; \
}
u_int
decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[])
{
unsigned int current_ir, excp;
int target, exception_index = 1;
boolean inexact;
unsigned int aflags;
unsigned int bflags;
unsigned int excptype;
/* Keep stats on how many floating point exceptions (based on type)
* that happen. Want to keep this overhead low, but still provide
* some information to the customer. All exits from this routine
* need to restore Fpu_register[0]
*/
bflags=(Fpu_register[0] & 0xf8000000);
Fpu_register[0] &= 0x07ffffff;
/* exception_index is used to index the exception register queue. It
* always points at the last register that contains a valid exception. A
* zero value implies no exceptions (also the initialized value). Setting
* the T-bit resets the exception_index to zero.
*/
/*
* Check for reserved-op exception. A reserved-op exception does not
* set any exception registers nor does it set the T-bit. If the T-bit
* is not set then a reserved-op exception occurred.
*
* At some point, we may want to report reserved op exceptions as
* illegal instructions.
*/
if (!Is_tbit_set()) {
update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
return SIGNALCODE(SIGILL, ILL_COPROC);
}
/*
* Is a coprocessor op.
*
* Now we need to determine what type of exception occurred.
*/
for (exception_index=1; exception_index<=MAX_EXCP_REG; exception_index++) {
current_ir = Excp_instr(exception_index);
/*
* On PA89: there are 5 different unimplemented exception
* codes: 0x1, 0x9, 0xb, 0x3, and 0x23. PA-RISC 2.0 adds
* another, 0x2b. Only these have the low order bit set.
*/
excptype = Excp_type(exception_index);
if (excptype & UNIMPLEMENTEDEXCEPTION) {
/*
* Clear T-bit and exception register so that
* we can tell if a trap really occurs while
* emulating the instruction.
*/
Clear_tbit();
Clear_excp_register(exception_index);
/*
* Now emulate this instruction. If a trap occurs,
* fpudispatch will return a non-zero number
*/
excp = fpudispatch(current_ir,excptype,0,Fpu_register);
/* accumulate the status flags, don't lose them as in hpux */
if (excp) {
/*
* We now need to make sure that the T-bit and the
* exception register contain the correct values
* before continuing.
*/
/*
* Set t-bit since it might still be needed for a
* subsequent real trap (I don't understand fully -PB)
*/
Set_tbit();
/* some of the following code uses
* Excp_type(exception_index) so fix that up */
Set_exceptiontype_and_instr_field(excp,current_ir,
Fpu_register[exception_index]);
if (excp == UNIMPLEMENTEDEXCEPTION) {
/*
* it is really unimplemented, so restore the
* TIMEX extended unimplemented exception code
*/
excp = excptype;
update_trap_counts(Fpu_register, aflags, bflags,
trap_counts);
return SIGNALCODE(SIGILL, ILL_COPROC);
}
/* some of the following code uses excptype, so
* fix that up too */
excptype = excp;
}
/* handle exceptions other than the real UNIMPLIMENTED the
* same way as if the hardware had caused them */
if (excp == NOEXCEPTION)
/* For now use 'break', should technically be 'continue' */
break;
}
/*
* In PA89, the underflow exception has been extended to encode
* additional information. The exception looks like pp01x0,
* where x is 1 if inexact and pp represent the inexact bit (I)
* and the round away bit (RA)
*/
if (excptype & UNDERFLOWEXCEPTION) {
/* check for underflow trap enabled */
if (Is_underflowtrap_enabled()) {
update_trap_counts(Fpu_register, aflags, bflags,
trap_counts);
return SIGNALCODE(SIGFPE, FPE_FLTUND);
} else {
/*
* Isn't a real trap; we need to
* return the default value.
*/
target = current_ir & fivebits;
#ifndef lint
if (Ibit(Fpu_register[exception_index])) inexact = TRUE;
else inexact = FALSE;
#endif
switch (Excp_format()) {
case SGL:
/*
* If ra (round-away) is set, will
* want to undo the rounding done
* by the hardware.
*/
if (Rabit(Fpu_register[exception_index]))
Sgl_decrement(Fpu_sgl(target));
/* now denormalize */
sgl_denormalize(&Fpu_sgl(target),&inexact,Rounding_mode());
break;
case DBL:
/*
* If ra (round-away) is set, will
* want to undo the rounding done
* by the hardware.
*/
if (Rabit(Fpu_register[exception_index]))
Dbl_decrement(Fpu_dblp1(target),Fpu_dblp2(target));
/* now denormalize */
dbl_denormalize(&Fpu_dblp1(target),&Fpu_dblp2(target),
&inexact,Rounding_mode());
break;
}
if (inexact) Set_underflowflag();
/*
* Underflow can generate an inexact
* exception. If inexact trap is enabled,
* want to do an inexact trap, otherwise
* set inexact flag.
*/
if (inexact && Is_inexacttrap_enabled()) {
/*
* Set exception field of exception register
* to inexact, parm field to zero.
* Underflow bit should be cleared.
*/
Set_exceptiontype(Fpu_register[exception_index],
INEXACTEXCEPTION);
Set_parmfield(Fpu_register[exception_index],0);
update_trap_counts(Fpu_register, aflags, bflags,
trap_counts);
return SIGNALCODE(SIGFPE, FPE_FLTRES);
}
else {
/*
* Exception register needs to be cleared.
* Inexact flag needs to be set if inexact.
*/
Clear_excp_register(exception_index);
if (inexact) Set_inexactflag();
}
}
continue;
}
switch(Excp_type(exception_index)) {
case OVERFLOWEXCEPTION:
case OVERFLOWEXCEPTION | INEXACTEXCEPTION:
/* check for overflow trap enabled */
update_trap_counts(Fpu_register, aflags, bflags,
trap_counts);
if (Is_overflowtrap_enabled()) {
update_trap_counts(Fpu_register, aflags, bflags,
trap_counts);
return SIGNALCODE(SIGFPE, FPE_FLTOVF);
} else {
/*
* Isn't a real trap; we need to
* return the default value.
*/
target = current_ir & fivebits;
switch (Excp_format()) {
case SGL:
Sgl_setoverflow(Fpu_sgl(target));
break;
case DBL:
Dbl_setoverflow(Fpu_dblp1(target),Fpu_dblp2(target));
break;
}
Set_overflowflag();
/*
* Overflow always generates an inexact
* exception. If inexact trap is enabled,
* want to do an inexact trap, otherwise
* set inexact flag.
*/
if (Is_inexacttrap_enabled()) {
/*
* Set exception field of exception
* register to inexact. Overflow
* bit should be cleared.
*/
Set_exceptiontype(Fpu_register[exception_index],
INEXACTEXCEPTION);
update_trap_counts(Fpu_register, aflags, bflags,
trap_counts);
return SIGNALCODE(SIGFPE, FPE_FLTRES);
}
else {
/*
* Exception register needs to be cleared.
* Inexact flag needs to be set.
*/
Clear_excp_register(exception_index);
Set_inexactflag();
}
}
break;
case INVALIDEXCEPTION:
case OPC_2E_INVALIDEXCEPTION:
update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
return SIGNALCODE(SIGFPE, FPE_FLTINV);
case DIVISIONBYZEROEXCEPTION:
update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
Clear_excp_register(exception_index);
return SIGNALCODE(SIGFPE, FPE_FLTDIV);
case INEXACTEXCEPTION:
update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
return SIGNALCODE(SIGFPE, FPE_FLTRES);
default:
update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
printk("%s(%d) Unknown FPU exception 0x%x\n", __FILE__,
__LINE__, Excp_type(exception_index));
return SIGNALCODE(SIGILL, ILL_COPROC);
case NOEXCEPTION: /* no exception */
/*
* Clear exception register in case
* other fields are non-zero.
*/
Clear_excp_register(exception_index);
break;
}
}
/*
* No real exceptions occurred.
*/
Clear_tbit();
update_trap_counts(Fpu_register, aflags, bflags, trap_counts);
return(NOTRAP);
}
| linux-master | arch/parisc/math-emu/decode_exc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/fcnvfut.c $Revision: 1.1 $
*
* Purpose:
* Floating-point to Unsigned Fixed-point Converts with Truncation
*
* External Interfaces:
* dbl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status)
* dbl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status)
* sgl_to_dbl_fcnvfut(srcptr,nullptr,dstptr,status)
* sgl_to_sgl_fcnvfut(srcptr,nullptr,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
#include "cnv_float.h"
/************************************************************************
* Floating-point to Unsigned Fixed-point Converts with Truncation *
************************************************************************/
/*
* Convert single floating-point to single fixed-point format
* with truncated result
*/
/*ARGSUSED*/
int
sgl_to_sgl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr,
unsigned int *dstptr, unsigned int *status)
{
register unsigned int src, result;
register int src_exponent;
src = *srcptr;
src_exponent = Sgl_exponent(src) - SGL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > SGL_FX_MAX_EXP + 1) {
if (Sgl_isone_sign(src)) {
result = 0;
} else {
result = 0xffffffff;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
/*
* Check sign.
* If negative, trap unimplemented.
*/
if (Sgl_isone_sign(src)) {
result = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
Sgl_clear_signexponent_set_hidden(src);
Suint_from_sgl_mantissa(src,src_exponent,result);
*dstptr = result;
/* check for inexact */
if (Sgl_isinexact_to_unsigned(src,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
*dstptr = 0;
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
/*
* Single Floating-point to Double Unsigned Fixed
*/
/*ARGSUSED*/
int
sgl_to_dbl_fcnvfut (sgl_floating_point * srcptr, unsigned int *nullptr,
dbl_unsigned * dstptr, unsigned int *status)
{
register int src_exponent;
register unsigned int src, resultp1, resultp2;
src = *srcptr;
src_exponent = Sgl_exponent(src) - SGL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > DBL_FX_MAX_EXP + 1) {
if (Sgl_isone_sign(src)) {
resultp1 = resultp2 = 0;
} else {
resultp1 = resultp2 = 0xffffffff;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Duint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
/*
* Check sign.
* If negative, trap unimplemented.
*/
if (Sgl_isone_sign(src)) {
resultp1 = resultp2 = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Duint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
Sgl_clear_signexponent_set_hidden(src);
Duint_from_sgl_mantissa(src,src_exponent,resultp1,resultp2);
Duint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Sgl_isinexact_to_unsigned(src,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
Duint_setzero(resultp1,resultp2);
Duint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
/*
* Double Floating-point to Single Unsigned Fixed
*/
/*ARGSUSED*/
int
dbl_to_sgl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr,
unsigned int *dstptr, unsigned int *status)
{
register unsigned int srcp1, srcp2, result;
register int src_exponent;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > SGL_FX_MAX_EXP + 1) {
if (Dbl_isone_sign(srcp1)) {
result = 0;
} else {
result = 0xffffffff;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
/*
* Check sign.
* If negative, trap unimplemented.
*/
if (Dbl_isone_sign(srcp1)) {
result = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
Dbl_clear_signexponent_set_hidden(srcp1);
Suint_from_dbl_mantissa(srcp1,srcp2,src_exponent,result);
*dstptr = result;
/* check for inexact */
if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
*dstptr = 0;
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
/*
* Double Floating-point to Double Unsigned Fixed
*/
/*ARGSUSED*/
int
dbl_to_dbl_fcnvfut (dbl_floating_point * srcptr, unsigned int *nullptr,
dbl_unsigned * dstptr, unsigned int *status)
{
register int src_exponent;
register unsigned int srcp1, srcp2, resultp1, resultp2;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > DBL_FX_MAX_EXP + 1) {
if (Dbl_isone_sign(srcp1)) {
resultp1 = resultp2 = 0;
} else {
resultp1 = resultp2 = 0xffffffff;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Duint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
/*
* Check sign.
* If negative, trap unimplemented.
*/
if (Dbl_isone_sign(srcp1)) {
resultp1 = resultp2 = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Duint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
Dbl_clear_signexponent_set_hidden(srcp1);
Duint_from_dbl_mantissa(srcp1,srcp2,src_exponent,
resultp1,resultp2);
Duint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
Duint_setzero(resultp1,resultp2);
Duint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/fcnvfut.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/fcnvuf.c $Revision: 1.1 $
*
* Purpose:
* Fixed point to Floating-point Converts
*
* External Interfaces:
* dbl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status)
* dbl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status)
* sgl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status)
* sgl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
#include "cnv_float.h"
/************************************************************************
* Fixed point to Floating-point Converts *
************************************************************************/
/*
* Convert Single Unsigned Fixed to Single Floating-point format
*/
int
sgl_to_sgl_fcnvuf(
unsigned int *srcptr,
unsigned int *nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int src, result = 0;
register int dst_exponent;
src = *srcptr;
/* Check for zero */
if (src == 0) {
Sgl_setzero(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Generate exponent and normalized mantissa
*/
dst_exponent = 16; /* initialize for normalization */
/*
* Check word for most significant bit set. Returns
* a value in dst_exponent indicating the bit position,
* between -1 and 30.
*/
Find_ms_one_bit(src,dst_exponent);
/* left justify source, with msb at bit position 0 */
src <<= dst_exponent+1;
Sgl_set_mantissa(result, src >> SGL_EXP_LENGTH);
Sgl_set_exponent(result, 30+SGL_BIAS - dst_exponent);
/* check for inexact */
if (Suint_isinexact_to_sgl(src)) {
switch (Rounding_mode()) {
case ROUNDPLUS:
Sgl_increment(result);
break;
case ROUNDMINUS: /* never negative */
break;
case ROUNDNEAREST:
Sgl_roundnearest_from_suint(src,result);
break;
}
if (Is_inexacttrap_enabled()) {
*dstptr = result;
return(INEXACTEXCEPTION);
}
else Set_inexactflag();
}
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Single Unsigned Fixed to Double Floating-point
*/
int
sgl_to_dbl_fcnvuf(
unsigned int *srcptr,
unsigned int *nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
register int dst_exponent;
register unsigned int src, resultp1 = 0, resultp2 = 0;
src = *srcptr;
/* Check for zero */
if (src == 0) {
Dbl_setzero(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate exponent and normalized mantissa
*/
dst_exponent = 16; /* initialize for normalization */
/*
* Check word for most significant bit set. Returns
* a value in dst_exponent indicating the bit position,
* between -1 and 30.
*/
Find_ms_one_bit(src,dst_exponent);
/* left justify source, with msb at bit position 0 */
src <<= dst_exponent+1;
Dbl_set_mantissap1(resultp1, src >> DBL_EXP_LENGTH);
Dbl_set_mantissap2(resultp2, src << (32-DBL_EXP_LENGTH));
Dbl_set_exponent(resultp1, (30+DBL_BIAS) - dst_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Double Unsigned Fixed to Single Floating-point
*/
int
dbl_to_sgl_fcnvuf(
dbl_unsigned *srcptr,
unsigned int *nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
int dst_exponent;
unsigned int srcp1, srcp2, result = 0;
Duint_copyfromptr(srcptr,srcp1,srcp2);
/* Check for zero */
if (srcp1 == 0 && srcp2 == 0) {
Sgl_setzero(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Generate exponent and normalized mantissa
*/
dst_exponent = 16; /* initialize for normalization */
if (srcp1 == 0) {
/*
* Check word for most significant bit set. Returns
* a value in dst_exponent indicating the bit position,
* between -1 and 30.
*/
Find_ms_one_bit(srcp2,dst_exponent);
/* left justify source, with msb at bit position 0 */
srcp1 = srcp2 << dst_exponent+1;
srcp2 = 0;
/*
* since msb set is in second word, need to
* adjust bit position count
*/
dst_exponent += 32;
}
else {
/*
* Check word for most significant bit set. Returns
* a value in dst_exponent indicating the bit position,
* between -1 and 30.
*
*/
Find_ms_one_bit(srcp1,dst_exponent);
/* left justify source, with msb at bit position 0 */
if (dst_exponent >= 0) {
Variable_shift_double(srcp1,srcp2,(31-dst_exponent),
srcp1);
srcp2 <<= dst_exponent+1;
}
}
Sgl_set_mantissa(result, srcp1 >> SGL_EXP_LENGTH);
Sgl_set_exponent(result, (62+SGL_BIAS) - dst_exponent);
/* check for inexact */
if (Duint_isinexact_to_sgl(srcp1,srcp2)) {
switch (Rounding_mode()) {
case ROUNDPLUS:
Sgl_increment(result);
break;
case ROUNDMINUS: /* never negative */
break;
case ROUNDNEAREST:
Sgl_roundnearest_from_duint(srcp1,srcp2,result);
break;
}
if (Is_inexacttrap_enabled()) {
*dstptr = result;
return(INEXACTEXCEPTION);
}
else Set_inexactflag();
}
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Double Unsigned Fixed to Double Floating-point
*/
int
dbl_to_dbl_fcnvuf(
dbl_unsigned *srcptr,
unsigned int *nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
register int dst_exponent;
register unsigned int srcp1, srcp2, resultp1 = 0, resultp2 = 0;
Duint_copyfromptr(srcptr,srcp1,srcp2);
/* Check for zero */
if (srcp1 == 0 && srcp2 ==0) {
Dbl_setzero(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate exponent and normalized mantissa
*/
dst_exponent = 16; /* initialize for normalization */
if (srcp1 == 0) {
/*
* Check word for most significant bit set. Returns
* a value in dst_exponent indicating the bit position,
* between -1 and 30.
*/
Find_ms_one_bit(srcp2,dst_exponent);
/* left justify source, with msb at bit position 0 */
srcp1 = srcp2 << dst_exponent+1;
srcp2 = 0;
/*
* since msb set is in second word, need to
* adjust bit position count
*/
dst_exponent += 32;
}
else {
/*
* Check word for most significant bit set. Returns
* a value in dst_exponent indicating the bit position,
* between -1 and 30.
*/
Find_ms_one_bit(srcp1,dst_exponent);
/* left justify source, with msb at bit position 0 */
if (dst_exponent >= 0) {
Variable_shift_double(srcp1,srcp2,(31-dst_exponent),
srcp1);
srcp2 <<= dst_exponent+1;
}
}
Dbl_set_mantissap1(resultp1, srcp1 >> DBL_EXP_LENGTH);
Shiftdouble(srcp1,srcp2,DBL_EXP_LENGTH,resultp2);
Dbl_set_exponent(resultp1, (62+DBL_BIAS) - dst_exponent);
/* check for inexact */
if (Duint_isinexact_to_dbl(srcp2)) {
switch (Rounding_mode()) {
case ROUNDPLUS:
Dbl_increment(resultp1,resultp2);
break;
case ROUNDMINUS: /* never negative */
break;
case ROUNDNEAREST:
Dbl_roundnearest_from_duint(srcp2,resultp1,
resultp2);
break;
}
if (Is_inexacttrap_enabled()) {
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(INEXACTEXCEPTION);
}
else Set_inexactflag();
}
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/fcnvuf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/fmpyfadd.c $Revision: 1.1 $
*
* Purpose:
* Double Floating-point Multiply Fused Add
* Double Floating-point Multiply Negate Fused Add
* Single Floating-point Multiply Fused Add
* Single Floating-point Multiply Negate Fused Add
*
* External Interfaces:
* dbl_fmpyfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
* dbl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
* sgl_fmpyfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
* sgl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
/*
* Double Floating-point Multiply Fused Add
*/
int
dbl_fmpyfadd(
dbl_floating_point *src1ptr,
dbl_floating_point *src2ptr,
dbl_floating_point *src3ptr,
unsigned int *status,
dbl_floating_point *dstptr)
{
unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2, opnd3p1, opnd3p2;
register unsigned int tmpresp1, tmpresp2, tmpresp3, tmpresp4;
unsigned int rightp1, rightp2, rightp3, rightp4;
unsigned int resultp1, resultp2 = 0, resultp3 = 0, resultp4 = 0;
register int mpy_exponent, add_exponent, count;
boolean inexact = FALSE, is_tiny = FALSE;
unsigned int signlessleft1, signlessright1, save;
register int result_exponent, diff_exponent;
int sign_save, jumpsize;
Dbl_copyfromptr(src1ptr,opnd1p1,opnd1p2);
Dbl_copyfromptr(src2ptr,opnd2p1,opnd2p2);
Dbl_copyfromptr(src3ptr,opnd3p1,opnd3p2);
/*
* set sign bit of result of multiply
*/
if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1))
Dbl_setnegativezerop1(resultp1);
else Dbl_setzerop1(resultp1);
/*
* Generate multiply exponent
*/
mpy_exponent = Dbl_exponent(opnd1p1) + Dbl_exponent(opnd2p1) - DBL_BIAS;
/*
* check first operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd1p1)) {
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
if (Dbl_isnotnan(opnd2p1,opnd2p2) &&
Dbl_isnotnan(opnd3p1,opnd3p2)) {
if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
/*
* invalid since operands are infinity
* and zero
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
(Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd1p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd1p1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* is third operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd3p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd3p1);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd2p1)) {
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
if (Dbl_isnotnan(opnd3p1,opnd3p2)) {
if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
/*
* invalid since multiply operands are
* zero & infinity
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(opnd2p1,opnd2p2);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
(Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
}
/*
* is third operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd3p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd3p1);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check third operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd3p1)) {
if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
/* return infinity */
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
} else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd3p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd3p1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate multiply mantissa
*/
if (Dbl_isnotzero_exponent(opnd1p1)) {
/* set hidden bit */
Dbl_clear_signexponent_set_hidden(opnd1p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Dbl_or_signs(opnd3p1,resultp1);
} else {
Dbl_and_signs(opnd3p1,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Dbl_iszero_exponent(opnd3p1) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Dbl_signextendedsign(opnd3p1);
result_exponent = 0;
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
Dbl_set_sign(opnd3p1,/*using*/sign_save);
Dbl_setwrapped_exponent(opnd3p1,result_exponent,
unfl);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized, adjust exponent */
Dbl_clear_signexponent(opnd1p1);
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_normalize(opnd1p1,opnd1p2,mpy_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Dbl_isnotzero_exponent(opnd2p1)) {
Dbl_clear_signexponent_set_hidden(opnd2p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Dbl_or_signs(opnd3p1,resultp1);
} else {
Dbl_and_signs(opnd3p1,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Dbl_iszero_exponent(opnd3p1) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Dbl_signextendedsign(opnd3p1);
result_exponent = 0;
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
Dbl_set_sign(opnd3p1,/*using*/sign_save);
Dbl_setwrapped_exponent(opnd3p1,result_exponent,
unfl);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized; want to normalize */
Dbl_clear_signexponent(opnd2p1);
Dbl_leftshiftby1(opnd2p1,opnd2p2);
Dbl_normalize(opnd2p1,opnd2p2,mpy_exponent);
}
/* Multiply the first two source mantissas together */
/*
* The intermediate result will be kept in tmpres,
* which needs enough room for 106 bits of mantissa,
* so lets call it a Double extended.
*/
Dblext_setzero(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
/*
* Four bits at a time are inspected in each loop, and a
* simple shift and add multiply algorithm is used.
*/
for (count = DBL_P-1; count >= 0; count -= 4) {
Dblext_rightshiftby4(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
if (Dbit28p2(opnd1p2)) {
/* Fourword_add should be an ADD followed by 3 ADDC's */
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1<<3 | opnd2p2>>29, opnd2p2<<3, 0, 0);
}
if (Dbit29p2(opnd1p2)) {
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1<<2 | opnd2p2>>30, opnd2p2<<2, 0, 0);
}
if (Dbit30p2(opnd1p2)) {
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1<<1 | opnd2p2>>31, opnd2p2<<1, 0, 0);
}
if (Dbit31p2(opnd1p2)) {
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1, opnd2p2, 0, 0);
}
Dbl_rightshiftby4(opnd1p1,opnd1p2);
}
if (Is_dexthiddenoverflow(tmpresp1)) {
/* result mantissa >= 2 (mantissa overflow) */
mpy_exponent++;
Dblext_rightshiftby1(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
}
/*
* Restore the sign of the mpy result which was saved in resultp1.
* The exponent will continue to be kept in mpy_exponent.
*/
Dblext_set_sign(tmpresp1,Dbl_sign(resultp1));
/*
* No rounding is required, since the result of the multiply
* is exact in the extended format.
*/
/*
* Now we are ready to perform the add portion of the operation.
*
* The exponents need to be kept as integers for now, since the
* multiply result might not fit into the exponent field. We
* can't overflow or underflow because of this yet, since the
* add could bring the final result back into range.
*/
add_exponent = Dbl_exponent(opnd3p1);
/*
* Check for denormalized or zero add operand.
*/
if (add_exponent == 0) {
/* check for zero */
if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
/* right is zero */
/* Left can't be zero and must be result.
*
* The final result is now in tmpres and mpy_exponent,
* and needs to be rounded and squeezed back into
* double precision format from double extended.
*/
result_exponent = mpy_exponent;
Dblext_copy(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
resultp1,resultp2,resultp3,resultp4);
sign_save = Dbl_signextendedsign(resultp1);/*save sign*/
goto round;
}
/*
* Neither are zeroes.
* Adjust exponent and normalize add operand.
*/
sign_save = Dbl_signextendedsign(opnd3p1); /* save sign */
Dbl_clear_signexponent(opnd3p1);
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_normalize(opnd3p1,opnd3p2,add_exponent);
Dbl_set_sign(opnd3p1,sign_save); /* restore sign */
} else {
Dbl_clear_exponent_set_hidden(opnd3p1);
}
/*
* Copy opnd3 to the double extended variable called right.
*/
Dbl_copyto_dblext(opnd3p1,opnd3p2,rightp1,rightp2,rightp3,rightp4);
/*
* A zero "save" helps discover equal operands (for later),
* and is used in swapping operands (if needed).
*/
Dblext_xortointp1(tmpresp1,rightp1,/*to*/save);
/*
* Compare magnitude of operands.
*/
Dblext_copytoint_exponentmantissap1(tmpresp1,signlessleft1);
Dblext_copytoint_exponentmantissap1(rightp1,signlessright1);
if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
Dblext_ismagnitudeless(tmpresp2,rightp2,signlessleft1,signlessright1)){
/*
* Set the left operand to the larger one by XOR swap.
* First finish the first word "save".
*/
Dblext_xorfromintp1(save,rightp1,/*to*/rightp1);
Dblext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
Dblext_swap_lower(tmpresp2,tmpresp3,tmpresp4,
rightp2,rightp3,rightp4);
/* also setup exponents used in rest of routine */
diff_exponent = add_exponent - mpy_exponent;
result_exponent = add_exponent;
} else {
/* also setup exponents used in rest of routine */
diff_exponent = mpy_exponent - add_exponent;
result_exponent = mpy_exponent;
}
/* Invariant: left is not smaller than right. */
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for
* this infrequent case.
*/
if (diff_exponent > DBLEXT_THRESHOLD) {
diff_exponent = DBLEXT_THRESHOLD;
}
/* Align right operand by shifting it to the right */
Dblext_clear_sign(rightp1);
Dblext_right_align(rightp1,rightp2,rightp3,rightp4,
/*shifted by*/diff_exponent);
/* Treat sum and difference of the operands separately. */
if ((int)save < 0) {
/*
* Difference of the two operands. Overflow can occur if the
* multiply overflowed. A borrow can occur out of the hidden
* bit and force a post normalization phase.
*/
Dblext_subtract(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
rightp1,rightp2,rightp3,rightp4,
resultp1,resultp2,resultp3,resultp4);
sign_save = Dbl_signextendedsign(resultp1);
if (Dbl_iszero_hidden(resultp1)) {
/* Handle normalization */
/* A straightforward algorithm would now shift the
* result and extension left until the hidden bit
* becomes one. Not all of the extension bits need
* participate in the shift. Only the two most
* significant bits (round and guard) are needed.
* If only a single shift is needed then the guard
* bit becomes a significant low order bit and the
* extension must participate in the rounding.
* If more than a single shift is needed, then all
* bits to the right of the guard bit are zeros,
* and the guard bit may or may not be zero. */
Dblext_leftshiftby1(resultp1,resultp2,resultp3,
resultp4);
/* Need to check for a zero result. The sign and
* exponent fields have already been zeroed. The more
* efficient test of the full object can be used.
*/
if(Dblext_iszero(resultp1,resultp2,resultp3,resultp4)){
/* Must have been "x-x" or "x+(-x)". */
if (Is_rounding_mode(ROUNDMINUS))
Dbl_setone_sign(resultp1);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if (Dbl_isone_hidden(resultp1)) {
/* No further normalization is needed */
goto round;
}
/* Discover first one bit to determine shift amount.
* Use a modified binary search. We have already
* shifted the result one position right and still
* not found a one so the remainder of the extension
* must be zero and simplifies rounding. */
/* Scan bytes */
while (Dbl_iszero_hiddenhigh7mantissa(resultp1)) {
Dblext_leftshiftby8(resultp1,resultp2,resultp3,resultp4);
result_exponent -= 8;
}
/* Now narrow it down to the nibble */
if (Dbl_iszero_hiddenhigh3mantissa(resultp1)) {
/* The lower nibble contains the
* normalizing one */
Dblext_leftshiftby4(resultp1,resultp2,resultp3,resultp4);
result_exponent -= 4;
}
/* Select case where first bit is set (already
* normalized) otherwise select the proper shift. */
jumpsize = Dbl_hiddenhigh3mantissa(resultp1);
if (jumpsize <= 7) switch(jumpsize) {
case 1:
Dblext_leftshiftby3(resultp1,resultp2,resultp3,
resultp4);
result_exponent -= 3;
break;
case 2:
case 3:
Dblext_leftshiftby2(resultp1,resultp2,resultp3,
resultp4);
result_exponent -= 2;
break;
case 4:
case 5:
case 6:
case 7:
Dblext_leftshiftby1(resultp1,resultp2,resultp3,
resultp4);
result_exponent -= 1;
break;
}
} /* end if (hidden...)... */
/* Fall through and round */
} /* end if (save < 0)... */
else {
/* Add magnitudes */
Dblext_addition(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
rightp1,rightp2,rightp3,rightp4,
/*to*/resultp1,resultp2,resultp3,resultp4);
sign_save = Dbl_signextendedsign(resultp1);
if (Dbl_isone_hiddenoverflow(resultp1)) {
/* Prenormalization required. */
Dblext_arithrightshiftby1(resultp1,resultp2,resultp3,
resultp4);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...add magnitudes... */
/* Round the result. If the extension and lower two words are
* all zeros, then the result is exact. Otherwise round in the
* correct direction. Underflow is possible. If a postnormalization
* is necessary, then the mantissa is all zeros so no shift is needed.
*/
round:
if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
Dblext_denormalize(resultp1,resultp2,resultp3,resultp4,
result_exponent,is_tiny);
}
Dbl_set_sign(resultp1,/*using*/sign_save);
if (Dblext_isnotzero_mantissap3(resultp3) ||
Dblext_isnotzero_mantissap4(resultp4)) {
inexact = TRUE;
switch(Rounding_mode()) {
case ROUNDNEAREST: /* The default. */
if (Dblext_isone_highp3(resultp3)) {
/* at least 1/2 ulp */
if (Dblext_isnotzero_low31p3(resultp3) ||
Dblext_isnotzero_mantissap4(resultp4) ||
Dblext_isone_lowp2(resultp2)) {
/* either exactly half way and odd or
* more than 1/2ulp */
Dbl_increment(resultp1,resultp2);
}
}
break;
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
/* Round up positive results */
Dbl_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
/* Round down negative results */
Dbl_increment(resultp1,resultp2);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if (Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
}
if (result_exponent >= DBL_INFINITY_EXPONENT) {
/* trap if OVERFLOWTRAP enabled */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_OVERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return (OPC_2E_OVERFLOWEXCEPTION);
}
inexact = TRUE;
Set_overflowflag();
/* set result to infinity or largest number */
Dbl_setoverflow(resultp1,resultp2);
} else if (result_exponent <= 0) { /* underflow case */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_UNDERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(OPC_2E_UNDERFLOWEXCEPTION);
}
else if (inexact && is_tiny) Set_underflowflag();
}
else Dbl_set_exponent(resultp1,result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
/*
* Double Floating-point Multiply Negate Fused Add
*/
dbl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
dbl_floating_point *src1ptr, *src2ptr, *src3ptr, *dstptr;
unsigned int *status;
{
unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2, opnd3p1, opnd3p2;
register unsigned int tmpresp1, tmpresp2, tmpresp3, tmpresp4;
unsigned int rightp1, rightp2, rightp3, rightp4;
unsigned int resultp1, resultp2 = 0, resultp3 = 0, resultp4 = 0;
register int mpy_exponent, add_exponent, count;
boolean inexact = FALSE, is_tiny = FALSE;
unsigned int signlessleft1, signlessright1, save;
register int result_exponent, diff_exponent;
int sign_save, jumpsize;
Dbl_copyfromptr(src1ptr,opnd1p1,opnd1p2);
Dbl_copyfromptr(src2ptr,opnd2p1,opnd2p2);
Dbl_copyfromptr(src3ptr,opnd3p1,opnd3p2);
/*
* set sign bit of result of multiply
*/
if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1))
Dbl_setzerop1(resultp1);
else
Dbl_setnegativezerop1(resultp1);
/*
* Generate multiply exponent
*/
mpy_exponent = Dbl_exponent(opnd1p1) + Dbl_exponent(opnd2p1) - DBL_BIAS;
/*
* check first operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd1p1)) {
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
if (Dbl_isnotnan(opnd2p1,opnd2p2) &&
Dbl_isnotnan(opnd3p1,opnd3p2)) {
if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
/*
* invalid since operands are infinity
* and zero
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
(Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd1p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd1p1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* is third operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd3p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd3p1);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd2p1)) {
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
if (Dbl_isnotnan(opnd3p1,opnd3p2)) {
if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
/*
* invalid since multiply operands are
* zero & infinity
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(opnd2p1,opnd2p2);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Dbl_isinfinity(opnd3p1,opnd3p2) &&
(Dbl_sign(resultp1) ^ Dbl_sign(opnd3p1))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
}
/*
* is third operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd3p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd3p1);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check third operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd3p1)) {
if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
/* return infinity */
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
} else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd3p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd3p1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate multiply mantissa
*/
if (Dbl_isnotzero_exponent(opnd1p1)) {
/* set hidden bit */
Dbl_clear_signexponent_set_hidden(opnd1p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Dbl_or_signs(opnd3p1,resultp1);
} else {
Dbl_and_signs(opnd3p1,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Dbl_iszero_exponent(opnd3p1) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Dbl_signextendedsign(opnd3p1);
result_exponent = 0;
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
Dbl_set_sign(opnd3p1,/*using*/sign_save);
Dbl_setwrapped_exponent(opnd3p1,result_exponent,
unfl);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized, adjust exponent */
Dbl_clear_signexponent(opnd1p1);
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_normalize(opnd1p1,opnd1p2,mpy_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Dbl_isnotzero_exponent(opnd2p1)) {
Dbl_clear_signexponent_set_hidden(opnd2p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Dbl_iszero_exponentmantissa(opnd3p1,opnd3p2)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Dbl_or_signs(opnd3p1,resultp1);
} else {
Dbl_and_signs(opnd3p1,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Dbl_iszero_exponent(opnd3p1) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Dbl_signextendedsign(opnd3p1);
result_exponent = 0;
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_normalize(opnd3p1,opnd3p2,result_exponent);
Dbl_set_sign(opnd3p1,/*using*/sign_save);
Dbl_setwrapped_exponent(opnd3p1,result_exponent,
unfl);
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Dbl_copytoptr(opnd3p1,opnd3p2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized; want to normalize */
Dbl_clear_signexponent(opnd2p1);
Dbl_leftshiftby1(opnd2p1,opnd2p2);
Dbl_normalize(opnd2p1,opnd2p2,mpy_exponent);
}
/* Multiply the first two source mantissas together */
/*
* The intermediate result will be kept in tmpres,
* which needs enough room for 106 bits of mantissa,
* so lets call it a Double extended.
*/
Dblext_setzero(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
/*
* Four bits at a time are inspected in each loop, and a
* simple shift and add multiply algorithm is used.
*/
for (count = DBL_P-1; count >= 0; count -= 4) {
Dblext_rightshiftby4(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
if (Dbit28p2(opnd1p2)) {
/* Fourword_add should be an ADD followed by 3 ADDC's */
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1<<3 | opnd2p2>>29, opnd2p2<<3, 0, 0);
}
if (Dbit29p2(opnd1p2)) {
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1<<2 | opnd2p2>>30, opnd2p2<<2, 0, 0);
}
if (Dbit30p2(opnd1p2)) {
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1<<1 | opnd2p2>>31, opnd2p2<<1, 0, 0);
}
if (Dbit31p2(opnd1p2)) {
Fourword_add(tmpresp1, tmpresp2, tmpresp3, tmpresp4,
opnd2p1, opnd2p2, 0, 0);
}
Dbl_rightshiftby4(opnd1p1,opnd1p2);
}
if (Is_dexthiddenoverflow(tmpresp1)) {
/* result mantissa >= 2 (mantissa overflow) */
mpy_exponent++;
Dblext_rightshiftby1(tmpresp1,tmpresp2,tmpresp3,tmpresp4);
}
/*
* Restore the sign of the mpy result which was saved in resultp1.
* The exponent will continue to be kept in mpy_exponent.
*/
Dblext_set_sign(tmpresp1,Dbl_sign(resultp1));
/*
* No rounding is required, since the result of the multiply
* is exact in the extended format.
*/
/*
* Now we are ready to perform the add portion of the operation.
*
* The exponents need to be kept as integers for now, since the
* multiply result might not fit into the exponent field. We
* can't overflow or underflow because of this yet, since the
* add could bring the final result back into range.
*/
add_exponent = Dbl_exponent(opnd3p1);
/*
* Check for denormalized or zero add operand.
*/
if (add_exponent == 0) {
/* check for zero */
if (Dbl_iszero_mantissa(opnd3p1,opnd3p2)) {
/* right is zero */
/* Left can't be zero and must be result.
*
* The final result is now in tmpres and mpy_exponent,
* and needs to be rounded and squeezed back into
* double precision format from double extended.
*/
result_exponent = mpy_exponent;
Dblext_copy(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
resultp1,resultp2,resultp3,resultp4);
sign_save = Dbl_signextendedsign(resultp1);/*save sign*/
goto round;
}
/*
* Neither are zeroes.
* Adjust exponent and normalize add operand.
*/
sign_save = Dbl_signextendedsign(opnd3p1); /* save sign */
Dbl_clear_signexponent(opnd3p1);
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_normalize(opnd3p1,opnd3p2,add_exponent);
Dbl_set_sign(opnd3p1,sign_save); /* restore sign */
} else {
Dbl_clear_exponent_set_hidden(opnd3p1);
}
/*
* Copy opnd3 to the double extended variable called right.
*/
Dbl_copyto_dblext(opnd3p1,opnd3p2,rightp1,rightp2,rightp3,rightp4);
/*
* A zero "save" helps discover equal operands (for later),
* and is used in swapping operands (if needed).
*/
Dblext_xortointp1(tmpresp1,rightp1,/*to*/save);
/*
* Compare magnitude of operands.
*/
Dblext_copytoint_exponentmantissap1(tmpresp1,signlessleft1);
Dblext_copytoint_exponentmantissap1(rightp1,signlessright1);
if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
Dblext_ismagnitudeless(tmpresp2,rightp2,signlessleft1,signlessright1)){
/*
* Set the left operand to the larger one by XOR swap.
* First finish the first word "save".
*/
Dblext_xorfromintp1(save,rightp1,/*to*/rightp1);
Dblext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
Dblext_swap_lower(tmpresp2,tmpresp3,tmpresp4,
rightp2,rightp3,rightp4);
/* also setup exponents used in rest of routine */
diff_exponent = add_exponent - mpy_exponent;
result_exponent = add_exponent;
} else {
/* also setup exponents used in rest of routine */
diff_exponent = mpy_exponent - add_exponent;
result_exponent = mpy_exponent;
}
/* Invariant: left is not smaller than right. */
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for
* this infrequent case.
*/
if (diff_exponent > DBLEXT_THRESHOLD) {
diff_exponent = DBLEXT_THRESHOLD;
}
/* Align right operand by shifting it to the right */
Dblext_clear_sign(rightp1);
Dblext_right_align(rightp1,rightp2,rightp3,rightp4,
/*shifted by*/diff_exponent);
/* Treat sum and difference of the operands separately. */
if ((int)save < 0) {
/*
* Difference of the two operands. Overflow can occur if the
* multiply overflowed. A borrow can occur out of the hidden
* bit and force a post normalization phase.
*/
Dblext_subtract(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
rightp1,rightp2,rightp3,rightp4,
resultp1,resultp2,resultp3,resultp4);
sign_save = Dbl_signextendedsign(resultp1);
if (Dbl_iszero_hidden(resultp1)) {
/* Handle normalization */
/* A straightforward algorithm would now shift the
* result and extension left until the hidden bit
* becomes one. Not all of the extension bits need
* participate in the shift. Only the two most
* significant bits (round and guard) are needed.
* If only a single shift is needed then the guard
* bit becomes a significant low order bit and the
* extension must participate in the rounding.
* If more than a single shift is needed, then all
* bits to the right of the guard bit are zeros,
* and the guard bit may or may not be zero. */
Dblext_leftshiftby1(resultp1,resultp2,resultp3,
resultp4);
/* Need to check for a zero result. The sign and
* exponent fields have already been zeroed. The more
* efficient test of the full object can be used.
*/
if (Dblext_iszero(resultp1,resultp2,resultp3,resultp4)) {
/* Must have been "x-x" or "x+(-x)". */
if (Is_rounding_mode(ROUNDMINUS))
Dbl_setone_sign(resultp1);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if (Dbl_isone_hidden(resultp1)) {
/* No further normalization is needed */
goto round;
}
/* Discover first one bit to determine shift amount.
* Use a modified binary search. We have already
* shifted the result one position right and still
* not found a one so the remainder of the extension
* must be zero and simplifies rounding. */
/* Scan bytes */
while (Dbl_iszero_hiddenhigh7mantissa(resultp1)) {
Dblext_leftshiftby8(resultp1,resultp2,resultp3,resultp4);
result_exponent -= 8;
}
/* Now narrow it down to the nibble */
if (Dbl_iszero_hiddenhigh3mantissa(resultp1)) {
/* The lower nibble contains the
* normalizing one */
Dblext_leftshiftby4(resultp1,resultp2,resultp3,resultp4);
result_exponent -= 4;
}
/* Select case where first bit is set (already
* normalized) otherwise select the proper shift. */
jumpsize = Dbl_hiddenhigh3mantissa(resultp1);
if (jumpsize <= 7) switch(jumpsize) {
case 1:
Dblext_leftshiftby3(resultp1,resultp2,resultp3,
resultp4);
result_exponent -= 3;
break;
case 2:
case 3:
Dblext_leftshiftby2(resultp1,resultp2,resultp3,
resultp4);
result_exponent -= 2;
break;
case 4:
case 5:
case 6:
case 7:
Dblext_leftshiftby1(resultp1,resultp2,resultp3,
resultp4);
result_exponent -= 1;
break;
}
} /* end if (hidden...)... */
/* Fall through and round */
} /* end if (save < 0)... */
else {
/* Add magnitudes */
Dblext_addition(tmpresp1,tmpresp2,tmpresp3,tmpresp4,
rightp1,rightp2,rightp3,rightp4,
/*to*/resultp1,resultp2,resultp3,resultp4);
sign_save = Dbl_signextendedsign(resultp1);
if (Dbl_isone_hiddenoverflow(resultp1)) {
/* Prenormalization required. */
Dblext_arithrightshiftby1(resultp1,resultp2,resultp3,
resultp4);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...add magnitudes... */
/* Round the result. If the extension and lower two words are
* all zeros, then the result is exact. Otherwise round in the
* correct direction. Underflow is possible. If a postnormalization
* is necessary, then the mantissa is all zeros so no shift is needed.
*/
round:
if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
Dblext_denormalize(resultp1,resultp2,resultp3,resultp4,
result_exponent,is_tiny);
}
Dbl_set_sign(resultp1,/*using*/sign_save);
if (Dblext_isnotzero_mantissap3(resultp3) ||
Dblext_isnotzero_mantissap4(resultp4)) {
inexact = TRUE;
switch(Rounding_mode()) {
case ROUNDNEAREST: /* The default. */
if (Dblext_isone_highp3(resultp3)) {
/* at least 1/2 ulp */
if (Dblext_isnotzero_low31p3(resultp3) ||
Dblext_isnotzero_mantissap4(resultp4) ||
Dblext_isone_lowp2(resultp2)) {
/* either exactly half way and odd or
* more than 1/2ulp */
Dbl_increment(resultp1,resultp2);
}
}
break;
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
/* Round up positive results */
Dbl_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
/* Round down negative results */
Dbl_increment(resultp1,resultp2);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if (Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
}
if (result_exponent >= DBL_INFINITY_EXPONENT) {
/* Overflow */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_OVERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return (OPC_2E_OVERFLOWEXCEPTION);
}
inexact = TRUE;
Set_overflowflag();
Dbl_setoverflow(resultp1,resultp2);
} else if (result_exponent <= 0) { /* underflow case */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_UNDERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(OPC_2E_UNDERFLOWEXCEPTION);
}
else if (inexact && is_tiny) Set_underflowflag();
}
else Dbl_set_exponent(resultp1,result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
/*
* Single Floating-point Multiply Fused Add
*/
sgl_fmpyfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
sgl_floating_point *src1ptr, *src2ptr, *src3ptr, *dstptr;
unsigned int *status;
{
unsigned int opnd1, opnd2, opnd3;
register unsigned int tmpresp1, tmpresp2;
unsigned int rightp1, rightp2;
unsigned int resultp1, resultp2 = 0;
register int mpy_exponent, add_exponent, count;
boolean inexact = FALSE, is_tiny = FALSE;
unsigned int signlessleft1, signlessright1, save;
register int result_exponent, diff_exponent;
int sign_save, jumpsize;
Sgl_copyfromptr(src1ptr,opnd1);
Sgl_copyfromptr(src2ptr,opnd2);
Sgl_copyfromptr(src3ptr,opnd3);
/*
* set sign bit of result of multiply
*/
if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2))
Sgl_setnegativezero(resultp1);
else Sgl_setzero(resultp1);
/*
* Generate multiply exponent
*/
mpy_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS;
/*
* check first operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd1)) {
if (Sgl_iszero_mantissa(opnd1)) {
if (Sgl_isnotnan(opnd2) && Sgl_isnotnan(opnd3)) {
if (Sgl_iszero_exponentmantissa(opnd2)) {
/*
* invalid since operands are infinity
* and zero
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Sgl_isinfinity(opnd3) &&
(Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Sgl_setinfinity_exponentmantissa(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd1);
}
/*
* is second operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
Sgl_copytoptr(opnd2,dstptr);
return(NOEXCEPTION);
}
/*
* is third operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd3)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd3);
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Sgl_copytoptr(opnd1,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd2)) {
if (Sgl_iszero_mantissa(opnd2)) {
if (Sgl_isnotnan(opnd3)) {
if (Sgl_iszero_exponentmantissa(opnd1)) {
/*
* invalid since multiply operands are
* zero & infinity
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(opnd2);
Sgl_copytoptr(opnd2,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Sgl_isinfinity(opnd3) &&
(Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Sgl_setinfinity_exponentmantissa(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
}
/*
* is third operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd3)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd3);
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Sgl_copytoptr(opnd2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check third operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd3)) {
if (Sgl_iszero_mantissa(opnd3)) {
/* return infinity */
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
} else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd3)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd3);
}
/*
* return quiet NaN
*/
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate multiply mantissa
*/
if (Sgl_isnotzero_exponent(opnd1)) {
/* set hidden bit */
Sgl_clear_signexponent_set_hidden(opnd1);
}
else {
/* check for zero */
if (Sgl_iszero_mantissa(opnd1)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Sgl_iszero_exponentmantissa(opnd3)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Sgl_or_signs(opnd3,resultp1);
} else {
Sgl_and_signs(opnd3,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Sgl_iszero_exponent(opnd3) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Sgl_signextendedsign(opnd3);
result_exponent = 0;
Sgl_leftshiftby1(opnd3);
Sgl_normalize(opnd3,result_exponent);
Sgl_set_sign(opnd3,/*using*/sign_save);
Sgl_setwrapped_exponent(opnd3,result_exponent,
unfl);
Sgl_copytoptr(opnd3,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/* is denormalized, adjust exponent */
Sgl_clear_signexponent(opnd1);
Sgl_leftshiftby1(opnd1);
Sgl_normalize(opnd1,mpy_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Sgl_isnotzero_exponent(opnd2)) {
Sgl_clear_signexponent_set_hidden(opnd2);
}
else {
/* check for zero */
if (Sgl_iszero_mantissa(opnd2)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Sgl_iszero_exponentmantissa(opnd3)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Sgl_or_signs(opnd3,resultp1);
} else {
Sgl_and_signs(opnd3,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Sgl_iszero_exponent(opnd3) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Sgl_signextendedsign(opnd3);
result_exponent = 0;
Sgl_leftshiftby1(opnd3);
Sgl_normalize(opnd3,result_exponent);
Sgl_set_sign(opnd3,/*using*/sign_save);
Sgl_setwrapped_exponent(opnd3,result_exponent,
unfl);
Sgl_copytoptr(opnd3,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/* is denormalized; want to normalize */
Sgl_clear_signexponent(opnd2);
Sgl_leftshiftby1(opnd2);
Sgl_normalize(opnd2,mpy_exponent);
}
/* Multiply the first two source mantissas together */
/*
* The intermediate result will be kept in tmpres,
* which needs enough room for 106 bits of mantissa,
* so lets call it a Double extended.
*/
Sglext_setzero(tmpresp1,tmpresp2);
/*
* Four bits at a time are inspected in each loop, and a
* simple shift and add multiply algorithm is used.
*/
for (count = SGL_P-1; count >= 0; count -= 4) {
Sglext_rightshiftby4(tmpresp1,tmpresp2);
if (Sbit28(opnd1)) {
/* Twoword_add should be an ADD followed by 2 ADDC's */
Twoword_add(tmpresp1, tmpresp2, opnd2<<3, 0);
}
if (Sbit29(opnd1)) {
Twoword_add(tmpresp1, tmpresp2, opnd2<<2, 0);
}
if (Sbit30(opnd1)) {
Twoword_add(tmpresp1, tmpresp2, opnd2<<1, 0);
}
if (Sbit31(opnd1)) {
Twoword_add(tmpresp1, tmpresp2, opnd2, 0);
}
Sgl_rightshiftby4(opnd1);
}
if (Is_sexthiddenoverflow(tmpresp1)) {
/* result mantissa >= 2 (mantissa overflow) */
mpy_exponent++;
Sglext_rightshiftby4(tmpresp1,tmpresp2);
} else {
Sglext_rightshiftby3(tmpresp1,tmpresp2);
}
/*
* Restore the sign of the mpy result which was saved in resultp1.
* The exponent will continue to be kept in mpy_exponent.
*/
Sglext_set_sign(tmpresp1,Sgl_sign(resultp1));
/*
* No rounding is required, since the result of the multiply
* is exact in the extended format.
*/
/*
* Now we are ready to perform the add portion of the operation.
*
* The exponents need to be kept as integers for now, since the
* multiply result might not fit into the exponent field. We
* can't overflow or underflow because of this yet, since the
* add could bring the final result back into range.
*/
add_exponent = Sgl_exponent(opnd3);
/*
* Check for denormalized or zero add operand.
*/
if (add_exponent == 0) {
/* check for zero */
if (Sgl_iszero_mantissa(opnd3)) {
/* right is zero */
/* Left can't be zero and must be result.
*
* The final result is now in tmpres and mpy_exponent,
* and needs to be rounded and squeezed back into
* double precision format from double extended.
*/
result_exponent = mpy_exponent;
Sglext_copy(tmpresp1,tmpresp2,resultp1,resultp2);
sign_save = Sgl_signextendedsign(resultp1);/*save sign*/
goto round;
}
/*
* Neither are zeroes.
* Adjust exponent and normalize add operand.
*/
sign_save = Sgl_signextendedsign(opnd3); /* save sign */
Sgl_clear_signexponent(opnd3);
Sgl_leftshiftby1(opnd3);
Sgl_normalize(opnd3,add_exponent);
Sgl_set_sign(opnd3,sign_save); /* restore sign */
} else {
Sgl_clear_exponent_set_hidden(opnd3);
}
/*
* Copy opnd3 to the double extended variable called right.
*/
Sgl_copyto_sglext(opnd3,rightp1,rightp2);
/*
* A zero "save" helps discover equal operands (for later),
* and is used in swapping operands (if needed).
*/
Sglext_xortointp1(tmpresp1,rightp1,/*to*/save);
/*
* Compare magnitude of operands.
*/
Sglext_copytoint_exponentmantissa(tmpresp1,signlessleft1);
Sglext_copytoint_exponentmantissa(rightp1,signlessright1);
if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
Sglext_ismagnitudeless(signlessleft1,signlessright1)) {
/*
* Set the left operand to the larger one by XOR swap.
* First finish the first word "save".
*/
Sglext_xorfromintp1(save,rightp1,/*to*/rightp1);
Sglext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
Sglext_swap_lower(tmpresp2,rightp2);
/* also setup exponents used in rest of routine */
diff_exponent = add_exponent - mpy_exponent;
result_exponent = add_exponent;
} else {
/* also setup exponents used in rest of routine */
diff_exponent = mpy_exponent - add_exponent;
result_exponent = mpy_exponent;
}
/* Invariant: left is not smaller than right. */
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for
* this infrequent case.
*/
if (diff_exponent > SGLEXT_THRESHOLD) {
diff_exponent = SGLEXT_THRESHOLD;
}
/* Align right operand by shifting it to the right */
Sglext_clear_sign(rightp1);
Sglext_right_align(rightp1,rightp2,/*shifted by*/diff_exponent);
/* Treat sum and difference of the operands separately. */
if ((int)save < 0) {
/*
* Difference of the two operands. Overflow can occur if the
* multiply overflowed. A borrow can occur out of the hidden
* bit and force a post normalization phase.
*/
Sglext_subtract(tmpresp1,tmpresp2, rightp1,rightp2,
resultp1,resultp2);
sign_save = Sgl_signextendedsign(resultp1);
if (Sgl_iszero_hidden(resultp1)) {
/* Handle normalization */
/* A straightforward algorithm would now shift the
* result and extension left until the hidden bit
* becomes one. Not all of the extension bits need
* participate in the shift. Only the two most
* significant bits (round and guard) are needed.
* If only a single shift is needed then the guard
* bit becomes a significant low order bit and the
* extension must participate in the rounding.
* If more than a single shift is needed, then all
* bits to the right of the guard bit are zeros,
* and the guard bit may or may not be zero. */
Sglext_leftshiftby1(resultp1,resultp2);
/* Need to check for a zero result. The sign and
* exponent fields have already been zeroed. The more
* efficient test of the full object can be used.
*/
if (Sglext_iszero(resultp1,resultp2)) {
/* Must have been "x-x" or "x+(-x)". */
if (Is_rounding_mode(ROUNDMINUS))
Sgl_setone_sign(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if (Sgl_isone_hidden(resultp1)) {
/* No further normalization is needed */
goto round;
}
/* Discover first one bit to determine shift amount.
* Use a modified binary search. We have already
* shifted the result one position right and still
* not found a one so the remainder of the extension
* must be zero and simplifies rounding. */
/* Scan bytes */
while (Sgl_iszero_hiddenhigh7mantissa(resultp1)) {
Sglext_leftshiftby8(resultp1,resultp2);
result_exponent -= 8;
}
/* Now narrow it down to the nibble */
if (Sgl_iszero_hiddenhigh3mantissa(resultp1)) {
/* The lower nibble contains the
* normalizing one */
Sglext_leftshiftby4(resultp1,resultp2);
result_exponent -= 4;
}
/* Select case where first bit is set (already
* normalized) otherwise select the proper shift. */
jumpsize = Sgl_hiddenhigh3mantissa(resultp1);
if (jumpsize <= 7) switch(jumpsize) {
case 1:
Sglext_leftshiftby3(resultp1,resultp2);
result_exponent -= 3;
break;
case 2:
case 3:
Sglext_leftshiftby2(resultp1,resultp2);
result_exponent -= 2;
break;
case 4:
case 5:
case 6:
case 7:
Sglext_leftshiftby1(resultp1,resultp2);
result_exponent -= 1;
break;
}
} /* end if (hidden...)... */
/* Fall through and round */
} /* end if (save < 0)... */
else {
/* Add magnitudes */
Sglext_addition(tmpresp1,tmpresp2,
rightp1,rightp2, /*to*/resultp1,resultp2);
sign_save = Sgl_signextendedsign(resultp1);
if (Sgl_isone_hiddenoverflow(resultp1)) {
/* Prenormalization required. */
Sglext_arithrightshiftby1(resultp1,resultp2);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...add magnitudes... */
/* Round the result. If the extension and lower two words are
* all zeros, then the result is exact. Otherwise round in the
* correct direction. Underflow is possible. If a postnormalization
* is necessary, then the mantissa is all zeros so no shift is needed.
*/
round:
if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
Sglext_denormalize(resultp1,resultp2,result_exponent,is_tiny);
}
Sgl_set_sign(resultp1,/*using*/sign_save);
if (Sglext_isnotzero_mantissap2(resultp2)) {
inexact = TRUE;
switch(Rounding_mode()) {
case ROUNDNEAREST: /* The default. */
if (Sglext_isone_highp2(resultp2)) {
/* at least 1/2 ulp */
if (Sglext_isnotzero_low31p2(resultp2) ||
Sglext_isone_lowp1(resultp1)) {
/* either exactly half way and odd or
* more than 1/2ulp */
Sgl_increment(resultp1);
}
}
break;
case ROUNDPLUS:
if (Sgl_iszero_sign(resultp1)) {
/* Round up positive results */
Sgl_increment(resultp1);
}
break;
case ROUNDMINUS:
if (Sgl_isone_sign(resultp1)) {
/* Round down negative results */
Sgl_increment(resultp1);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if (Sgl_isone_hiddenoverflow(resultp1)) result_exponent++;
}
if (result_exponent >= SGL_INFINITY_EXPONENT) {
/* Overflow */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(resultp1,result_exponent,ovfl);
Sgl_copytoptr(resultp1,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_OVERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return (OPC_2E_OVERFLOWEXCEPTION);
}
inexact = TRUE;
Set_overflowflag();
Sgl_setoverflow(resultp1);
} else if (result_exponent <= 0) { /* underflow case */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(resultp1,result_exponent,unfl);
Sgl_copytoptr(resultp1,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_UNDERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(OPC_2E_UNDERFLOWEXCEPTION);
}
else if (inexact && is_tiny) Set_underflowflag();
}
else Sgl_set_exponent(resultp1,result_exponent);
Sgl_copytoptr(resultp1,dstptr);
if (inexact)
if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
/*
* Single Floating-point Multiply Negate Fused Add
*/
sgl_fmpynfadd(src1ptr,src2ptr,src3ptr,status,dstptr)
sgl_floating_point *src1ptr, *src2ptr, *src3ptr, *dstptr;
unsigned int *status;
{
unsigned int opnd1, opnd2, opnd3;
register unsigned int tmpresp1, tmpresp2;
unsigned int rightp1, rightp2;
unsigned int resultp1, resultp2 = 0;
register int mpy_exponent, add_exponent, count;
boolean inexact = FALSE, is_tiny = FALSE;
unsigned int signlessleft1, signlessright1, save;
register int result_exponent, diff_exponent;
int sign_save, jumpsize;
Sgl_copyfromptr(src1ptr,opnd1);
Sgl_copyfromptr(src2ptr,opnd2);
Sgl_copyfromptr(src3ptr,opnd3);
/*
* set sign bit of result of multiply
*/
if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2))
Sgl_setzero(resultp1);
else
Sgl_setnegativezero(resultp1);
/*
* Generate multiply exponent
*/
mpy_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS;
/*
* check first operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd1)) {
if (Sgl_iszero_mantissa(opnd1)) {
if (Sgl_isnotnan(opnd2) && Sgl_isnotnan(opnd3)) {
if (Sgl_iszero_exponentmantissa(opnd2)) {
/*
* invalid since operands are infinity
* and zero
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Sgl_isinfinity(opnd3) &&
(Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Sgl_setinfinity_exponentmantissa(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd1);
}
/*
* is second operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
Sgl_copytoptr(opnd2,dstptr);
return(NOEXCEPTION);
}
/*
* is third operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd3)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd3);
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Sgl_copytoptr(opnd1,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd2)) {
if (Sgl_iszero_mantissa(opnd2)) {
if (Sgl_isnotnan(opnd3)) {
if (Sgl_iszero_exponentmantissa(opnd1)) {
/*
* invalid since multiply operands are
* zero & infinity
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(opnd2);
Sgl_copytoptr(opnd2,dstptr);
return(NOEXCEPTION);
}
/*
* Check third operand for infinity with a
* sign opposite of the multiply result
*/
if (Sgl_isinfinity(opnd3) &&
(Sgl_sign(resultp1) ^ Sgl_sign(opnd3))) {
/*
* invalid since attempting a magnitude
* subtraction of infinities
*/
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Sgl_setinfinity_exponentmantissa(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
}
/*
* is third operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd3)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd3);
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Sgl_copytoptr(opnd2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check third operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd3)) {
if (Sgl_iszero_mantissa(opnd3)) {
/* return infinity */
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
} else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd3)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(OPC_2E_INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd3);
}
/*
* return quiet NaN
*/
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate multiply mantissa
*/
if (Sgl_isnotzero_exponent(opnd1)) {
/* set hidden bit */
Sgl_clear_signexponent_set_hidden(opnd1);
}
else {
/* check for zero */
if (Sgl_iszero_mantissa(opnd1)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Sgl_iszero_exponentmantissa(opnd3)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Sgl_or_signs(opnd3,resultp1);
} else {
Sgl_and_signs(opnd3,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Sgl_iszero_exponent(opnd3) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Sgl_signextendedsign(opnd3);
result_exponent = 0;
Sgl_leftshiftby1(opnd3);
Sgl_normalize(opnd3,result_exponent);
Sgl_set_sign(opnd3,/*using*/sign_save);
Sgl_setwrapped_exponent(opnd3,result_exponent,
unfl);
Sgl_copytoptr(opnd3,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/* is denormalized, adjust exponent */
Sgl_clear_signexponent(opnd1);
Sgl_leftshiftby1(opnd1);
Sgl_normalize(opnd1,mpy_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Sgl_isnotzero_exponent(opnd2)) {
Sgl_clear_signexponent_set_hidden(opnd2);
}
else {
/* check for zero */
if (Sgl_iszero_mantissa(opnd2)) {
/*
* Perform the add opnd3 with zero here.
*/
if (Sgl_iszero_exponentmantissa(opnd3)) {
if (Is_rounding_mode(ROUNDMINUS)) {
Sgl_or_signs(opnd3,resultp1);
} else {
Sgl_and_signs(opnd3,resultp1);
}
}
/*
* Now let's check for trapped underflow case.
*/
else if (Sgl_iszero_exponent(opnd3) &&
Is_underflowtrap_enabled()) {
/* need to normalize results mantissa */
sign_save = Sgl_signextendedsign(opnd3);
result_exponent = 0;
Sgl_leftshiftby1(opnd3);
Sgl_normalize(opnd3,result_exponent);
Sgl_set_sign(opnd3,/*using*/sign_save);
Sgl_setwrapped_exponent(opnd3,result_exponent,
unfl);
Sgl_copytoptr(opnd3,dstptr);
/* inexact = FALSE */
return(OPC_2E_UNDERFLOWEXCEPTION);
}
Sgl_copytoptr(opnd3,dstptr);
return(NOEXCEPTION);
}
/* is denormalized; want to normalize */
Sgl_clear_signexponent(opnd2);
Sgl_leftshiftby1(opnd2);
Sgl_normalize(opnd2,mpy_exponent);
}
/* Multiply the first two source mantissas together */
/*
* The intermediate result will be kept in tmpres,
* which needs enough room for 106 bits of mantissa,
* so lets call it a Double extended.
*/
Sglext_setzero(tmpresp1,tmpresp2);
/*
* Four bits at a time are inspected in each loop, and a
* simple shift and add multiply algorithm is used.
*/
for (count = SGL_P-1; count >= 0; count -= 4) {
Sglext_rightshiftby4(tmpresp1,tmpresp2);
if (Sbit28(opnd1)) {
/* Twoword_add should be an ADD followed by 2 ADDC's */
Twoword_add(tmpresp1, tmpresp2, opnd2<<3, 0);
}
if (Sbit29(opnd1)) {
Twoword_add(tmpresp1, tmpresp2, opnd2<<2, 0);
}
if (Sbit30(opnd1)) {
Twoword_add(tmpresp1, tmpresp2, opnd2<<1, 0);
}
if (Sbit31(opnd1)) {
Twoword_add(tmpresp1, tmpresp2, opnd2, 0);
}
Sgl_rightshiftby4(opnd1);
}
if (Is_sexthiddenoverflow(tmpresp1)) {
/* result mantissa >= 2 (mantissa overflow) */
mpy_exponent++;
Sglext_rightshiftby4(tmpresp1,tmpresp2);
} else {
Sglext_rightshiftby3(tmpresp1,tmpresp2);
}
/*
* Restore the sign of the mpy result which was saved in resultp1.
* The exponent will continue to be kept in mpy_exponent.
*/
Sglext_set_sign(tmpresp1,Sgl_sign(resultp1));
/*
* No rounding is required, since the result of the multiply
* is exact in the extended format.
*/
/*
* Now we are ready to perform the add portion of the operation.
*
* The exponents need to be kept as integers for now, since the
* multiply result might not fit into the exponent field. We
* can't overflow or underflow because of this yet, since the
* add could bring the final result back into range.
*/
add_exponent = Sgl_exponent(opnd3);
/*
* Check for denormalized or zero add operand.
*/
if (add_exponent == 0) {
/* check for zero */
if (Sgl_iszero_mantissa(opnd3)) {
/* right is zero */
/* Left can't be zero and must be result.
*
* The final result is now in tmpres and mpy_exponent,
* and needs to be rounded and squeezed back into
* double precision format from double extended.
*/
result_exponent = mpy_exponent;
Sglext_copy(tmpresp1,tmpresp2,resultp1,resultp2);
sign_save = Sgl_signextendedsign(resultp1);/*save sign*/
goto round;
}
/*
* Neither are zeroes.
* Adjust exponent and normalize add operand.
*/
sign_save = Sgl_signextendedsign(opnd3); /* save sign */
Sgl_clear_signexponent(opnd3);
Sgl_leftshiftby1(opnd3);
Sgl_normalize(opnd3,add_exponent);
Sgl_set_sign(opnd3,sign_save); /* restore sign */
} else {
Sgl_clear_exponent_set_hidden(opnd3);
}
/*
* Copy opnd3 to the double extended variable called right.
*/
Sgl_copyto_sglext(opnd3,rightp1,rightp2);
/*
* A zero "save" helps discover equal operands (for later),
* and is used in swapping operands (if needed).
*/
Sglext_xortointp1(tmpresp1,rightp1,/*to*/save);
/*
* Compare magnitude of operands.
*/
Sglext_copytoint_exponentmantissa(tmpresp1,signlessleft1);
Sglext_copytoint_exponentmantissa(rightp1,signlessright1);
if (mpy_exponent < add_exponent || mpy_exponent == add_exponent &&
Sglext_ismagnitudeless(signlessleft1,signlessright1)) {
/*
* Set the left operand to the larger one by XOR swap.
* First finish the first word "save".
*/
Sglext_xorfromintp1(save,rightp1,/*to*/rightp1);
Sglext_xorfromintp1(save,tmpresp1,/*to*/tmpresp1);
Sglext_swap_lower(tmpresp2,rightp2);
/* also setup exponents used in rest of routine */
diff_exponent = add_exponent - mpy_exponent;
result_exponent = add_exponent;
} else {
/* also setup exponents used in rest of routine */
diff_exponent = mpy_exponent - add_exponent;
result_exponent = mpy_exponent;
}
/* Invariant: left is not smaller than right. */
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for
* this infrequent case.
*/
if (diff_exponent > SGLEXT_THRESHOLD) {
diff_exponent = SGLEXT_THRESHOLD;
}
/* Align right operand by shifting it to the right */
Sglext_clear_sign(rightp1);
Sglext_right_align(rightp1,rightp2,/*shifted by*/diff_exponent);
/* Treat sum and difference of the operands separately. */
if ((int)save < 0) {
/*
* Difference of the two operands. Overflow can occur if the
* multiply overflowed. A borrow can occur out of the hidden
* bit and force a post normalization phase.
*/
Sglext_subtract(tmpresp1,tmpresp2, rightp1,rightp2,
resultp1,resultp2);
sign_save = Sgl_signextendedsign(resultp1);
if (Sgl_iszero_hidden(resultp1)) {
/* Handle normalization */
/* A straightforward algorithm would now shift the
* result and extension left until the hidden bit
* becomes one. Not all of the extension bits need
* participate in the shift. Only the two most
* significant bits (round and guard) are needed.
* If only a single shift is needed then the guard
* bit becomes a significant low order bit and the
* extension must participate in the rounding.
* If more than a single shift is needed, then all
* bits to the right of the guard bit are zeros,
* and the guard bit may or may not be zero. */
Sglext_leftshiftby1(resultp1,resultp2);
/* Need to check for a zero result. The sign and
* exponent fields have already been zeroed. The more
* efficient test of the full object can be used.
*/
if (Sglext_iszero(resultp1,resultp2)) {
/* Must have been "x-x" or "x+(-x)". */
if (Is_rounding_mode(ROUNDMINUS))
Sgl_setone_sign(resultp1);
Sgl_copytoptr(resultp1,dstptr);
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if (Sgl_isone_hidden(resultp1)) {
/* No further normalization is needed */
goto round;
}
/* Discover first one bit to determine shift amount.
* Use a modified binary search. We have already
* shifted the result one position right and still
* not found a one so the remainder of the extension
* must be zero and simplifies rounding. */
/* Scan bytes */
while (Sgl_iszero_hiddenhigh7mantissa(resultp1)) {
Sglext_leftshiftby8(resultp1,resultp2);
result_exponent -= 8;
}
/* Now narrow it down to the nibble */
if (Sgl_iszero_hiddenhigh3mantissa(resultp1)) {
/* The lower nibble contains the
* normalizing one */
Sglext_leftshiftby4(resultp1,resultp2);
result_exponent -= 4;
}
/* Select case where first bit is set (already
* normalized) otherwise select the proper shift. */
jumpsize = Sgl_hiddenhigh3mantissa(resultp1);
if (jumpsize <= 7) switch(jumpsize) {
case 1:
Sglext_leftshiftby3(resultp1,resultp2);
result_exponent -= 3;
break;
case 2:
case 3:
Sglext_leftshiftby2(resultp1,resultp2);
result_exponent -= 2;
break;
case 4:
case 5:
case 6:
case 7:
Sglext_leftshiftby1(resultp1,resultp2);
result_exponent -= 1;
break;
}
} /* end if (hidden...)... */
/* Fall through and round */
} /* end if (save < 0)... */
else {
/* Add magnitudes */
Sglext_addition(tmpresp1,tmpresp2,
rightp1,rightp2, /*to*/resultp1,resultp2);
sign_save = Sgl_signextendedsign(resultp1);
if (Sgl_isone_hiddenoverflow(resultp1)) {
/* Prenormalization required. */
Sglext_arithrightshiftby1(resultp1,resultp2);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...add magnitudes... */
/* Round the result. If the extension and lower two words are
* all zeros, then the result is exact. Otherwise round in the
* correct direction. Underflow is possible. If a postnormalization
* is necessary, then the mantissa is all zeros so no shift is needed.
*/
round:
if (result_exponent <= 0 && !Is_underflowtrap_enabled()) {
Sglext_denormalize(resultp1,resultp2,result_exponent,is_tiny);
}
Sgl_set_sign(resultp1,/*using*/sign_save);
if (Sglext_isnotzero_mantissap2(resultp2)) {
inexact = TRUE;
switch(Rounding_mode()) {
case ROUNDNEAREST: /* The default. */
if (Sglext_isone_highp2(resultp2)) {
/* at least 1/2 ulp */
if (Sglext_isnotzero_low31p2(resultp2) ||
Sglext_isone_lowp1(resultp1)) {
/* either exactly half way and odd or
* more than 1/2ulp */
Sgl_increment(resultp1);
}
}
break;
case ROUNDPLUS:
if (Sgl_iszero_sign(resultp1)) {
/* Round up positive results */
Sgl_increment(resultp1);
}
break;
case ROUNDMINUS:
if (Sgl_isone_sign(resultp1)) {
/* Round down negative results */
Sgl_increment(resultp1);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if (Sgl_isone_hiddenoverflow(resultp1)) result_exponent++;
}
if (result_exponent >= SGL_INFINITY_EXPONENT) {
/* Overflow */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(resultp1,result_exponent,ovfl);
Sgl_copytoptr(resultp1,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_OVERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return (OPC_2E_OVERFLOWEXCEPTION);
}
inexact = TRUE;
Set_overflowflag();
Sgl_setoverflow(resultp1);
} else if (result_exponent <= 0) { /* underflow case */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(resultp1,result_exponent,unfl);
Sgl_copytoptr(resultp1,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return (OPC_2E_UNDERFLOWEXCEPTION |
OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(OPC_2E_UNDERFLOWEXCEPTION);
}
else if (inexact && is_tiny) Set_underflowflag();
}
else Sgl_set_exponent(resultp1,result_exponent);
Sgl_copytoptr(resultp1,dstptr);
if (inexact)
if (Is_inexacttrap_enabled()) return(OPC_2E_INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/fmpyfadd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/fp/denormal.c $ Revision: $
*
* Purpose:
* <<please update with a synopsis of the functionality provided by this file>>
*
* External Interfaces:
* <<the following list was autogenerated, please review>>
* dbl_denormalize(dbl_opndp1,dbl_opndp2,inexactflag,rmode)
* sgl_denormalize(sgl_opnd,inexactflag,rmode)
*
* Internal Interfaces:
* <<please update>>
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
#include "hppa.h"
#include <linux/kernel.h>
/* #include <machine/sys/mdep_private.h> */
#undef Fpustatus_register
#define Fpustatus_register Fpu_register[0]
void
sgl_denormalize(unsigned int *sgl_opnd, boolean *inexactflag, int rmode)
{
unsigned int opnd;
int sign, exponent;
boolean guardbit = FALSE, stickybit, inexact;
opnd = *sgl_opnd;
stickybit = *inexactflag;
exponent = Sgl_exponent(opnd) - SGL_WRAP;
sign = Sgl_sign(opnd);
Sgl_denormalize(opnd,exponent,guardbit,stickybit,inexact);
if (inexact) {
switch (rmode) {
case ROUNDPLUS:
if (sign == 0) {
Sgl_increment(opnd);
}
break;
case ROUNDMINUS:
if (sign != 0) {
Sgl_increment(opnd);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Sgl_isone_lowmantissa(opnd))) {
Sgl_increment(opnd);
}
break;
}
}
Sgl_set_sign(opnd,sign);
*sgl_opnd = opnd;
*inexactflag = inexact;
return;
}
void
dbl_denormalize(unsigned int *dbl_opndp1,
unsigned int * dbl_opndp2,
boolean *inexactflag,
int rmode)
{
unsigned int opndp1, opndp2;
int sign, exponent;
boolean guardbit = FALSE, stickybit, inexact;
opndp1 = *dbl_opndp1;
opndp2 = *dbl_opndp2;
stickybit = *inexactflag;
exponent = Dbl_exponent(opndp1) - DBL_WRAP;
sign = Dbl_sign(opndp1);
Dbl_denormalize(opndp1,opndp2,exponent,guardbit,stickybit,inexact);
if (inexact) {
switch (rmode) {
case ROUNDPLUS:
if (sign == 0) {
Dbl_increment(opndp1,opndp2);
}
break;
case ROUNDMINUS:
if (sign != 0) {
Dbl_increment(opndp1,opndp2);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Dbl_isone_lowmantissap2(opndp2))) {
Dbl_increment(opndp1,opndp2);
}
break;
}
}
Dbl_set_sign(opndp1,sign);
*dbl_opndp1 = opndp1;
*dbl_opndp2 = opndp2;
*inexactflag = inexact;
return;
}
| linux-master | arch/parisc/math-emu/denormal.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/sfsub.c $Revision: 1.1 $
*
* Purpose:
* Single_subtract: subtract two single precision values.
*
* External Interfaces:
* sgl_fsub(leftptr, rightptr, dstptr, status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
/*
* Single_subtract: subtract two single precision values.
*/
int
sgl_fsub(
sgl_floating_point *leftptr,
sgl_floating_point *rightptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int left, right, result, extent;
register unsigned int signless_upper_left, signless_upper_right, save;
register int result_exponent, right_exponent, diff_exponent;
register int sign_save, jumpsize;
register boolean inexact = FALSE, underflowtrap;
/* Create local copies of the numbers */
left = *leftptr;
right = *rightptr;
/* A zero "save" helps discover equal operands (for later), *
* and is used in swapping operands (if needed). */
Sgl_xortointp1(left,right,/*to*/save);
/*
* check first operand for NaN's or infinity
*/
if ((result_exponent = Sgl_exponent(left)) == SGL_INFINITY_EXPONENT)
{
if (Sgl_iszero_mantissa(left))
{
if (Sgl_isnotnan(right))
{
if (Sgl_isinfinity(right) && save==0)
{
/*
* invalid since operands are same signed infinity's
*/
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* return infinity
*/
*dstptr = left;
return(NOEXCEPTION);
}
}
else
{
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(left))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(left);
}
/*
* is second operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(right))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(right);
*dstptr = right;
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
*dstptr = left;
return(NOEXCEPTION);
}
} /* End left NaN or Infinity processing */
/*
* check second operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(right))
{
if (Sgl_iszero_mantissa(right))
{
/* return infinity */
Sgl_invert_sign(right);
*dstptr = right;
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(right))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(right);
}
/*
* return quiet NaN
*/
*dstptr = right;
return(NOEXCEPTION);
} /* End right NaN or Infinity processing */
/* Invariant: Must be dealing with finite numbers */
/* Compare operands by removing the sign */
Sgl_copytoint_exponentmantissa(left,signless_upper_left);
Sgl_copytoint_exponentmantissa(right,signless_upper_right);
/* sign difference selects sub or add operation. */
if(Sgl_ismagnitudeless(signless_upper_left,signless_upper_right))
{
/* Set the left operand to the larger one by XOR swap *
* First finish the first word using "save" */
Sgl_xorfromintp1(save,right,/*to*/right);
Sgl_xorfromintp1(save,left,/*to*/left);
result_exponent = Sgl_exponent(left);
Sgl_invert_sign(left);
}
/* Invariant: left is not smaller than right. */
if((right_exponent = Sgl_exponent(right)) == 0)
{
/* Denormalized operands. First look for zeroes */
if(Sgl_iszero_mantissa(right))
{
/* right is zero */
if(Sgl_iszero_exponentmantissa(left))
{
/* Both operands are zeros */
Sgl_invert_sign(right);
if(Is_rounding_mode(ROUNDMINUS))
{
Sgl_or_signs(left,/*with*/right);
}
else
{
Sgl_and_signs(left,/*with*/right);
}
}
else
{
/* Left is not a zero and must be the result. Trapped
* underflows are signaled if left is denormalized. Result
* is always exact. */
if( (result_exponent == 0) && Is_underflowtrap_enabled() )
{
/* need to normalize results mantissa */
sign_save = Sgl_signextendedsign(left);
Sgl_leftshiftby1(left);
Sgl_normalize(left,result_exponent);
Sgl_set_sign(left,/*using*/sign_save);
Sgl_setwrapped_exponent(left,result_exponent,unfl);
*dstptr = left;
/* inexact = FALSE */
return(UNDERFLOWEXCEPTION);
}
}
*dstptr = left;
return(NOEXCEPTION);
}
/* Neither are zeroes */
Sgl_clear_sign(right); /* Exponent is already cleared */
if(result_exponent == 0 )
{
/* Both operands are denormalized. The result must be exact
* and is simply calculated. A sum could become normalized and a
* difference could cancel to a true zero. */
if( (/*signed*/int) save >= 0 )
{
Sgl_subtract(left,/*minus*/right,/*into*/result);
if(Sgl_iszero_mantissa(result))
{
if(Is_rounding_mode(ROUNDMINUS))
{
Sgl_setone_sign(result);
}
else
{
Sgl_setzero_sign(result);
}
*dstptr = result;
return(NOEXCEPTION);
}
}
else
{
Sgl_addition(left,right,/*into*/result);
if(Sgl_isone_hidden(result))
{
*dstptr = result;
return(NOEXCEPTION);
}
}
if(Is_underflowtrap_enabled())
{
/* need to normalize result */
sign_save = Sgl_signextendedsign(result);
Sgl_leftshiftby1(result);
Sgl_normalize(result,result_exponent);
Sgl_set_sign(result,/*using*/sign_save);
Sgl_setwrapped_exponent(result,result_exponent,unfl);
*dstptr = result;
/* inexact = FALSE */
return(UNDERFLOWEXCEPTION);
}
*dstptr = result;
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
* with denormalized numbers. */
}
else
{
Sgl_clear_signexponent_set_hidden(right);
}
Sgl_clear_exponent_set_hidden(left);
diff_exponent = result_exponent - right_exponent;
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for this
* infrequent case.
*/
if(diff_exponent > SGL_THRESHOLD)
{
diff_exponent = SGL_THRESHOLD;
}
/* Align right operand by shifting to right */
Sgl_right_align(/*operand*/right,/*shifted by*/diff_exponent,
/*and lower to*/extent);
/* Treat sum and difference of the operands separately. */
if( (/*signed*/int) save >= 0 )
{
/*
* Difference of the two operands. Their can be no overflow. A
* borrow can occur out of the hidden bit and force a post
* normalization phase.
*/
Sgl_subtract_withextension(left,/*minus*/right,/*with*/extent,/*into*/result);
if(Sgl_iszero_hidden(result))
{
/* Handle normalization */
/* A straightforward algorithm would now shift the result
* and extension left until the hidden bit becomes one. Not
* all of the extension bits need participate in the shift.
* Only the two most significant bits (round and guard) are
* needed. If only a single shift is needed then the guard
* bit becomes a significant low order bit and the extension
* must participate in the rounding. If more than a single
* shift is needed, then all bits to the right of the guard
* bit are zeros, and the guard bit may or may not be zero. */
sign_save = Sgl_signextendedsign(result);
Sgl_leftshiftby1_withextent(result,extent,result);
/* Need to check for a zero result. The sign and exponent
* fields have already been zeroed. The more efficient test
* of the full object can be used.
*/
if(Sgl_iszero(result))
/* Must have been "x-x" or "x+(-x)". */
{
if(Is_rounding_mode(ROUNDMINUS)) Sgl_setone_sign(result);
*dstptr = result;
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if(Sgl_isone_hidden(result))
{
if(result_exponent==0)
{
/* Denormalized, exponent should be zero. Left operand *
* was normalized, so extent (guard, round) was zero */
goto underflow;
}
else
{
/* No further normalization is needed. */
Sgl_set_sign(result,/*using*/sign_save);
Ext_leftshiftby1(extent);
goto round;
}
}
/* Check for denormalized, exponent should be zero. Left *
* operand was normalized, so extent (guard, round) was zero */
if(!(underflowtrap = Is_underflowtrap_enabled()) &&
result_exponent==0) goto underflow;
/* Shift extension to complete one bit of normalization and
* update exponent. */
Ext_leftshiftby1(extent);
/* Discover first one bit to determine shift amount. Use a
* modified binary search. We have already shifted the result
* one position right and still not found a one so the remainder
* of the extension must be zero and simplifies rounding. */
/* Scan bytes */
while(Sgl_iszero_hiddenhigh7mantissa(result))
{
Sgl_leftshiftby8(result);
if((result_exponent -= 8) <= 0 && !underflowtrap)
goto underflow;
}
/* Now narrow it down to the nibble */
if(Sgl_iszero_hiddenhigh3mantissa(result))
{
/* The lower nibble contains the normalizing one */
Sgl_leftshiftby4(result);
if((result_exponent -= 4) <= 0 && !underflowtrap)
goto underflow;
}
/* Select case were first bit is set (already normalized)
* otherwise select the proper shift. */
if((jumpsize = Sgl_hiddenhigh3mantissa(result)) > 7)
{
/* Already normalized */
if(result_exponent <= 0) goto underflow;
Sgl_set_sign(result,/*using*/sign_save);
Sgl_set_exponent(result,/*using*/result_exponent);
*dstptr = result;
return(NOEXCEPTION);
}
Sgl_sethigh4bits(result,/*using*/sign_save);
switch(jumpsize)
{
case 1:
{
Sgl_leftshiftby3(result);
result_exponent -= 3;
break;
}
case 2:
case 3:
{
Sgl_leftshiftby2(result);
result_exponent -= 2;
break;
}
case 4:
case 5:
case 6:
case 7:
{
Sgl_leftshiftby1(result);
result_exponent -= 1;
break;
}
}
if(result_exponent > 0)
{
Sgl_set_exponent(result,/*using*/result_exponent);
*dstptr = result; /* Sign bit is already set */
return(NOEXCEPTION);
}
/* Fixup potential underflows */
underflow:
if(Is_underflowtrap_enabled())
{
Sgl_set_sign(result,sign_save);
Sgl_setwrapped_exponent(result,result_exponent,unfl);
*dstptr = result;
/* inexact = FALSE */
return(UNDERFLOWEXCEPTION);
}
/*
* Since we cannot get an inexact denormalized result,
* we can now return.
*/
Sgl_right_align(result,/*by*/(1-result_exponent),extent);
Sgl_clear_signexponent(result);
Sgl_set_sign(result,sign_save);
*dstptr = result;
return(NOEXCEPTION);
} /* end if(hidden...)... */
/* Fall through and round */
} /* end if(save >= 0)... */
else
{
/* Add magnitudes */
Sgl_addition(left,right,/*to*/result);
if(Sgl_isone_hiddenoverflow(result))
{
/* Prenormalization required. */
Sgl_rightshiftby1_withextent(result,extent,extent);
Sgl_arithrightshiftby1(result);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...sub magnitudes... */
/* Round the result. If the extension is all zeros,then the result is
* exact. Otherwise round in the correct direction. No underflow is
* possible. If a postnormalization is necessary, then the mantissa is
* all zeros so no shift is needed. */
round:
if(Ext_isnotzero(extent))
{
inexact = TRUE;
switch(Rounding_mode())
{
case ROUNDNEAREST: /* The default. */
if(Ext_isone_sign(extent))
{
/* at least 1/2 ulp */
if(Ext_isnotzero_lower(extent) ||
Sgl_isone_lowmantissa(result))
{
/* either exactly half way and odd or more than 1/2ulp */
Sgl_increment(result);
}
}
break;
case ROUNDPLUS:
if(Sgl_iszero_sign(result))
{
/* Round up positive results */
Sgl_increment(result);
}
break;
case ROUNDMINUS:
if(Sgl_isone_sign(result))
{
/* Round down negative results */
Sgl_increment(result);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if(Sgl_isone_hiddenoverflow(result)) result_exponent++;
}
if(result_exponent == SGL_INFINITY_EXPONENT)
{
/* Overflow */
if(Is_overflowtrap_enabled())
{
Sgl_setwrapped_exponent(result,result_exponent,ovfl);
*dstptr = result;
if (inexact)
if (Is_inexacttrap_enabled())
return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(OVERFLOWEXCEPTION);
}
else
{
Set_overflowflag();
inexact = TRUE;
Sgl_setoverflow(result);
}
}
else Sgl_set_exponent(result,result_exponent);
*dstptr = result;
if(inexact)
if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/sfsub.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/sfadd.c $Revision: 1.1 $
*
* Purpose:
* Single_add: add two single precision values.
*
* External Interfaces:
* sgl_fadd(leftptr, rightptr, dstptr, status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
/*
* Single_add: add two single precision values.
*/
int
sgl_fadd(
sgl_floating_point *leftptr,
sgl_floating_point *rightptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int left, right, result, extent;
register unsigned int signless_upper_left, signless_upper_right, save;
register int result_exponent, right_exponent, diff_exponent;
register int sign_save, jumpsize;
register boolean inexact = FALSE;
register boolean underflowtrap;
/* Create local copies of the numbers */
left = *leftptr;
right = *rightptr;
/* A zero "save" helps discover equal operands (for later), *
* and is used in swapping operands (if needed). */
Sgl_xortointp1(left,right,/*to*/save);
/*
* check first operand for NaN's or infinity
*/
if ((result_exponent = Sgl_exponent(left)) == SGL_INFINITY_EXPONENT)
{
if (Sgl_iszero_mantissa(left))
{
if (Sgl_isnotnan(right))
{
if (Sgl_isinfinity(right) && save!=0)
{
/*
* invalid since operands are opposite signed infinity's
*/
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* return infinity
*/
*dstptr = left;
return(NOEXCEPTION);
}
}
else
{
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(left))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(left);
}
/*
* is second operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(right))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(right);
*dstptr = right;
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
*dstptr = left;
return(NOEXCEPTION);
}
} /* End left NaN or Infinity processing */
/*
* check second operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(right))
{
if (Sgl_iszero_mantissa(right))
{
/* return infinity */
*dstptr = right;
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(right))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(right);
}
/*
* return quiet NaN
*/
*dstptr = right;
return(NOEXCEPTION);
} /* End right NaN or Infinity processing */
/* Invariant: Must be dealing with finite numbers */
/* Compare operands by removing the sign */
Sgl_copytoint_exponentmantissa(left,signless_upper_left);
Sgl_copytoint_exponentmantissa(right,signless_upper_right);
/* sign difference selects add or sub operation. */
if(Sgl_ismagnitudeless(signless_upper_left,signless_upper_right))
{
/* Set the left operand to the larger one by XOR swap *
* First finish the first word using "save" */
Sgl_xorfromintp1(save,right,/*to*/right);
Sgl_xorfromintp1(save,left,/*to*/left);
result_exponent = Sgl_exponent(left);
}
/* Invariant: left is not smaller than right. */
if((right_exponent = Sgl_exponent(right)) == 0)
{
/* Denormalized operands. First look for zeroes */
if(Sgl_iszero_mantissa(right))
{
/* right is zero */
if(Sgl_iszero_exponentmantissa(left))
{
/* Both operands are zeros */
if(Is_rounding_mode(ROUNDMINUS))
{
Sgl_or_signs(left,/*with*/right);
}
else
{
Sgl_and_signs(left,/*with*/right);
}
}
else
{
/* Left is not a zero and must be the result. Trapped
* underflows are signaled if left is denormalized. Result
* is always exact. */
if( (result_exponent == 0) && Is_underflowtrap_enabled() )
{
/* need to normalize results mantissa */
sign_save = Sgl_signextendedsign(left);
Sgl_leftshiftby1(left);
Sgl_normalize(left,result_exponent);
Sgl_set_sign(left,/*using*/sign_save);
Sgl_setwrapped_exponent(left,result_exponent,unfl);
*dstptr = left;
return(UNDERFLOWEXCEPTION);
}
}
*dstptr = left;
return(NOEXCEPTION);
}
/* Neither are zeroes */
Sgl_clear_sign(right); /* Exponent is already cleared */
if(result_exponent == 0 )
{
/* Both operands are denormalized. The result must be exact
* and is simply calculated. A sum could become normalized and a
* difference could cancel to a true zero. */
if( (/*signed*/int) save < 0 )
{
Sgl_subtract(left,/*minus*/right,/*into*/result);
if(Sgl_iszero_mantissa(result))
{
if(Is_rounding_mode(ROUNDMINUS))
{
Sgl_setone_sign(result);
}
else
{
Sgl_setzero_sign(result);
}
*dstptr = result;
return(NOEXCEPTION);
}
}
else
{
Sgl_addition(left,right,/*into*/result);
if(Sgl_isone_hidden(result))
{
*dstptr = result;
return(NOEXCEPTION);
}
}
if(Is_underflowtrap_enabled())
{
/* need to normalize result */
sign_save = Sgl_signextendedsign(result);
Sgl_leftshiftby1(result);
Sgl_normalize(result,result_exponent);
Sgl_set_sign(result,/*using*/sign_save);
Sgl_setwrapped_exponent(result,result_exponent,unfl);
*dstptr = result;
return(UNDERFLOWEXCEPTION);
}
*dstptr = result;
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
* with denormalized numbers. */
}
else
{
Sgl_clear_signexponent_set_hidden(right);
}
Sgl_clear_exponent_set_hidden(left);
diff_exponent = result_exponent - right_exponent;
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for this
* infrequent case.
*/
if(diff_exponent > SGL_THRESHOLD)
{
diff_exponent = SGL_THRESHOLD;
}
/* Align right operand by shifting to right */
Sgl_right_align(/*operand*/right,/*shifted by*/diff_exponent,
/*and lower to*/extent);
/* Treat sum and difference of the operands separately. */
if( (/*signed*/int) save < 0 )
{
/*
* Difference of the two operands. Their can be no overflow. A
* borrow can occur out of the hidden bit and force a post
* normalization phase.
*/
Sgl_subtract_withextension(left,/*minus*/right,/*with*/extent,/*into*/result);
if(Sgl_iszero_hidden(result))
{
/* Handle normalization */
/* A straightforward algorithm would now shift the result
* and extension left until the hidden bit becomes one. Not
* all of the extension bits need participate in the shift.
* Only the two most significant bits (round and guard) are
* needed. If only a single shift is needed then the guard
* bit becomes a significant low order bit and the extension
* must participate in the rounding. If more than a single
* shift is needed, then all bits to the right of the guard
* bit are zeros, and the guard bit may or may not be zero. */
sign_save = Sgl_signextendedsign(result);
Sgl_leftshiftby1_withextent(result,extent,result);
/* Need to check for a zero result. The sign and exponent
* fields have already been zeroed. The more efficient test
* of the full object can be used.
*/
if(Sgl_iszero(result))
/* Must have been "x-x" or "x+(-x)". */
{
if(Is_rounding_mode(ROUNDMINUS)) Sgl_setone_sign(result);
*dstptr = result;
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if(Sgl_isone_hidden(result))
{
if(result_exponent==0)
{
/* Denormalized, exponent should be zero. Left operand *
* was normalized, so extent (guard, round) was zero */
goto underflow;
}
else
{
/* No further normalization is needed. */
Sgl_set_sign(result,/*using*/sign_save);
Ext_leftshiftby1(extent);
goto round;
}
}
/* Check for denormalized, exponent should be zero. Left *
* operand was normalized, so extent (guard, round) was zero */
if(!(underflowtrap = Is_underflowtrap_enabled()) &&
result_exponent==0) goto underflow;
/* Shift extension to complete one bit of normalization and
* update exponent. */
Ext_leftshiftby1(extent);
/* Discover first one bit to determine shift amount. Use a
* modified binary search. We have already shifted the result
* one position right and still not found a one so the remainder
* of the extension must be zero and simplifies rounding. */
/* Scan bytes */
while(Sgl_iszero_hiddenhigh7mantissa(result))
{
Sgl_leftshiftby8(result);
if((result_exponent -= 8) <= 0 && !underflowtrap)
goto underflow;
}
/* Now narrow it down to the nibble */
if(Sgl_iszero_hiddenhigh3mantissa(result))
{
/* The lower nibble contains the normalizing one */
Sgl_leftshiftby4(result);
if((result_exponent -= 4) <= 0 && !underflowtrap)
goto underflow;
}
/* Select case were first bit is set (already normalized)
* otherwise select the proper shift. */
if((jumpsize = Sgl_hiddenhigh3mantissa(result)) > 7)
{
/* Already normalized */
if(result_exponent <= 0) goto underflow;
Sgl_set_sign(result,/*using*/sign_save);
Sgl_set_exponent(result,/*using*/result_exponent);
*dstptr = result;
return(NOEXCEPTION);
}
Sgl_sethigh4bits(result,/*using*/sign_save);
switch(jumpsize)
{
case 1:
{
Sgl_leftshiftby3(result);
result_exponent -= 3;
break;
}
case 2:
case 3:
{
Sgl_leftshiftby2(result);
result_exponent -= 2;
break;
}
case 4:
case 5:
case 6:
case 7:
{
Sgl_leftshiftby1(result);
result_exponent -= 1;
break;
}
}
if(result_exponent > 0)
{
Sgl_set_exponent(result,/*using*/result_exponent);
*dstptr = result;
return(NOEXCEPTION); /* Sign bit is already set */
}
/* Fixup potential underflows */
underflow:
if(Is_underflowtrap_enabled())
{
Sgl_set_sign(result,sign_save);
Sgl_setwrapped_exponent(result,result_exponent,unfl);
*dstptr = result;
/* inexact = FALSE; */
return(UNDERFLOWEXCEPTION);
}
/*
* Since we cannot get an inexact denormalized result,
* we can now return.
*/
Sgl_right_align(result,/*by*/(1-result_exponent),extent);
Sgl_clear_signexponent(result);
Sgl_set_sign(result,sign_save);
*dstptr = result;
return(NOEXCEPTION);
} /* end if(hidden...)... */
/* Fall through and round */
} /* end if(save < 0)... */
else
{
/* Add magnitudes */
Sgl_addition(left,right,/*to*/result);
if(Sgl_isone_hiddenoverflow(result))
{
/* Prenormalization required. */
Sgl_rightshiftby1_withextent(result,extent,extent);
Sgl_arithrightshiftby1(result);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...add magnitudes... */
/* Round the result. If the extension is all zeros,then the result is
* exact. Otherwise round in the correct direction. No underflow is
* possible. If a postnormalization is necessary, then the mantissa is
* all zeros so no shift is needed. */
round:
if(Ext_isnotzero(extent))
{
inexact = TRUE;
switch(Rounding_mode())
{
case ROUNDNEAREST: /* The default. */
if(Ext_isone_sign(extent))
{
/* at least 1/2 ulp */
if(Ext_isnotzero_lower(extent) ||
Sgl_isone_lowmantissa(result))
{
/* either exactly half way and odd or more than 1/2ulp */
Sgl_increment(result);
}
}
break;
case ROUNDPLUS:
if(Sgl_iszero_sign(result))
{
/* Round up positive results */
Sgl_increment(result);
}
break;
case ROUNDMINUS:
if(Sgl_isone_sign(result))
{
/* Round down negative results */
Sgl_increment(result);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if(Sgl_isone_hiddenoverflow(result)) result_exponent++;
}
if(result_exponent == SGL_INFINITY_EXPONENT)
{
/* Overflow */
if(Is_overflowtrap_enabled())
{
Sgl_setwrapped_exponent(result,result_exponent,ovfl);
*dstptr = result;
if (inexact)
if (Is_inexacttrap_enabled())
return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(OVERFLOWEXCEPTION);
}
else
{
Set_overflowflag();
inexact = TRUE;
Sgl_setoverflow(result);
}
}
else Sgl_set_exponent(result,result_exponent);
*dstptr = result;
if(inexact)
if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/sfadd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/sfcmp.c $Revision: 1.1 $
*
* Purpose:
* sgl_cmp: compare two values
*
* External Interfaces:
* sgl_fcmp(leftptr, rightptr, cond, status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
/*
* sgl_cmp: compare two values
*/
int
sgl_fcmp (sgl_floating_point * leftptr, sgl_floating_point * rightptr,
unsigned int cond, unsigned int *status)
/* The predicate to be tested */
{
register unsigned int left, right;
register int xorresult;
/* Create local copies of the numbers */
left = *leftptr;
right = *rightptr;
/*
* Test for NaN
*/
if( (Sgl_exponent(left) == SGL_INFINITY_EXPONENT)
|| (Sgl_exponent(right) == SGL_INFINITY_EXPONENT) )
{
/* Check if a NaN is involved. Signal an invalid exception when
* comparing a signaling NaN or when comparing quiet NaNs and the
* low bit of the condition is set */
if( ( (Sgl_exponent(left) == SGL_INFINITY_EXPONENT)
&& Sgl_isnotzero_mantissa(left)
&& (Exception(cond) || Sgl_isone_signaling(left)))
||
( (Sgl_exponent(right) == SGL_INFINITY_EXPONENT)
&& Sgl_isnotzero_mantissa(right)
&& (Exception(cond) || Sgl_isone_signaling(right)) ) )
{
if( Is_invalidtrap_enabled() ) {
Set_status_cbit(Unordered(cond));
return(INVALIDEXCEPTION);
}
else Set_invalidflag();
Set_status_cbit(Unordered(cond));
return(NOEXCEPTION);
}
/* All the exceptional conditions are handled, now special case
NaN compares */
else if( ((Sgl_exponent(left) == SGL_INFINITY_EXPONENT)
&& Sgl_isnotzero_mantissa(left))
||
((Sgl_exponent(right) == SGL_INFINITY_EXPONENT)
&& Sgl_isnotzero_mantissa(right)) )
{
/* NaNs always compare unordered. */
Set_status_cbit(Unordered(cond));
return(NOEXCEPTION);
}
/* infinities will drop down to the normal compare mechanisms */
}
/* First compare for unequal signs => less or greater or
* special equal case */
Sgl_xortointp1(left,right,xorresult);
if( xorresult < 0 )
{
/* left negative => less, left positive => greater.
* equal is possible if both operands are zeros. */
if( Sgl_iszero_exponentmantissa(left)
&& Sgl_iszero_exponentmantissa(right) )
{
Set_status_cbit(Equal(cond));
}
else if( Sgl_isone_sign(left) )
{
Set_status_cbit(Lessthan(cond));
}
else
{
Set_status_cbit(Greaterthan(cond));
}
}
/* Signs are the same. Treat negative numbers separately
* from the positives because of the reversed sense. */
else if( Sgl_all(left) == Sgl_all(right) )
{
Set_status_cbit(Equal(cond));
}
else if( Sgl_iszero_sign(left) )
{
/* Positive compare */
if( Sgl_all(left) < Sgl_all(right) )
{
Set_status_cbit(Lessthan(cond));
}
else
{
Set_status_cbit(Greaterthan(cond));
}
}
else
{
/* Negative compare. Signed or unsigned compares
* both work the same. That distinction is only
* important when the sign bits differ. */
if( Sgl_all(left) > Sgl_all(right) )
{
Set_status_cbit(Lessthan(cond));
}
else
{
Set_status_cbit(Greaterthan(cond));
}
}
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/sfcmp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/dfsqrt.c $Revision: 1.1 $
*
* Purpose:
* Double Floating-point Square Root
*
* External Interfaces:
* dbl_fsqrt(srcptr,nullptr,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "dbl_float.h"
/*
* Double Floating-point Square Root
*/
/*ARGSUSED*/
unsigned int
dbl_fsqrt(
dbl_floating_point *srcptr,
unsigned int *nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int srcp1, srcp2, resultp1, resultp2;
register unsigned int newbitp1, newbitp2, sump1, sump2;
register int src_exponent;
register boolean guardbit = FALSE, even_exponent;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
/*
* check source operand for NaN or infinity
*/
if ((src_exponent = Dbl_exponent(srcp1)) == DBL_INFINITY_EXPONENT) {
/*
* is signaling NaN?
*/
if (Dbl_isone_signaling(srcp1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(srcp1);
}
/*
* Return quiet NaN or positive infinity.
* Fall through to negative test if negative infinity.
*/
if (Dbl_iszero_sign(srcp1) ||
Dbl_isnotzero_mantissa(srcp1,srcp2)) {
Dbl_copytoptr(srcp1,srcp2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check for zero source operand
*/
if (Dbl_iszero_exponentmantissa(srcp1,srcp2)) {
Dbl_copytoptr(srcp1,srcp2,dstptr);
return(NOEXCEPTION);
}
/*
* check for negative source operand
*/
if (Dbl_isone_sign(srcp1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_makequietnan(srcp1,srcp2);
Dbl_copytoptr(srcp1,srcp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent > 0) {
even_exponent = Dbl_hidden(srcp1);
Dbl_clear_signexponent_set_hidden(srcp1);
}
else {
/* normalize operand */
Dbl_clear_signexponent(srcp1);
src_exponent++;
Dbl_normalize(srcp1,srcp2,src_exponent);
even_exponent = src_exponent & 1;
}
if (even_exponent) {
/* exponent is even */
/* Add comment here. Explain why odd exponent needs correction */
Dbl_leftshiftby1(srcp1,srcp2);
}
/*
* Add comment here. Explain following algorithm.
*
* Trust me, it works.
*
*/
Dbl_setzero(resultp1,resultp2);
Dbl_allp1(newbitp1) = 1 << (DBL_P - 32);
Dbl_setzero_mantissap2(newbitp2);
while (Dbl_isnotzero(newbitp1,newbitp2) && Dbl_isnotzero(srcp1,srcp2)) {
Dbl_addition(resultp1,resultp2,newbitp1,newbitp2,sump1,sump2);
if(Dbl_isnotgreaterthan(sump1,sump2,srcp1,srcp2)) {
Dbl_leftshiftby1(newbitp1,newbitp2);
/* update result */
Dbl_addition(resultp1,resultp2,newbitp1,newbitp2,
resultp1,resultp2);
Dbl_subtract(srcp1,srcp2,sump1,sump2,srcp1,srcp2);
Dbl_rightshiftby2(newbitp1,newbitp2);
}
else {
Dbl_rightshiftby1(newbitp1,newbitp2);
}
Dbl_leftshiftby1(srcp1,srcp2);
}
/* correct exponent for pre-shift */
if (even_exponent) {
Dbl_rightshiftby1(resultp1,resultp2);
}
/* check for inexact */
if (Dbl_isnotzero(srcp1,srcp2)) {
if (!even_exponent && Dbl_islessthan(resultp1,resultp2,srcp1,srcp2)) {
Dbl_increment(resultp1,resultp2);
}
guardbit = Dbl_lowmantissap2(resultp2);
Dbl_rightshiftby1(resultp1,resultp2);
/* now round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
Dbl_increment(resultp1,resultp2);
break;
case ROUNDNEAREST:
/* stickybit is always true, so guardbit
* is enough to determine rounding */
if (guardbit) {
Dbl_increment(resultp1,resultp2);
}
break;
}
/* increment result exponent by 1 if mantissa overflowed */
if (Dbl_isone_hiddenoverflow(resultp1)) src_exponent+=2;
if (Is_inexacttrap_enabled()) {
Dbl_set_exponent(resultp1,
((src_exponent-DBL_BIAS)>>1)+DBL_BIAS);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(INEXACTEXCEPTION);
}
else Set_inexactflag();
}
else {
Dbl_rightshiftby1(resultp1,resultp2);
}
Dbl_set_exponent(resultp1,((src_exponent-DBL_BIAS)>>1)+DBL_BIAS);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/dfsqrt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/dfcmp.c $Revision: 1.1 $
*
* Purpose:
* dbl_cmp: compare two values
*
* External Interfaces:
* dbl_fcmp(leftptr, rightptr, cond, status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "dbl_float.h"
/*
* dbl_cmp: compare two values
*/
int
dbl_fcmp (dbl_floating_point * leftptr, dbl_floating_point * rightptr,
unsigned int cond, unsigned int *status)
/* The predicate to be tested */
{
register unsigned int leftp1, leftp2, rightp1, rightp2;
register int xorresult;
/* Create local copies of the numbers */
Dbl_copyfromptr(leftptr,leftp1,leftp2);
Dbl_copyfromptr(rightptr,rightp1,rightp2);
/*
* Test for NaN
*/
if( (Dbl_exponent(leftp1) == DBL_INFINITY_EXPONENT)
|| (Dbl_exponent(rightp1) == DBL_INFINITY_EXPONENT) )
{
/* Check if a NaN is involved. Signal an invalid exception when
* comparing a signaling NaN or when comparing quiet NaNs and the
* low bit of the condition is set */
if( ((Dbl_exponent(leftp1) == DBL_INFINITY_EXPONENT)
&& Dbl_isnotzero_mantissa(leftp1,leftp2)
&& (Exception(cond) || Dbl_isone_signaling(leftp1)))
||
((Dbl_exponent(rightp1) == DBL_INFINITY_EXPONENT)
&& Dbl_isnotzero_mantissa(rightp1,rightp2)
&& (Exception(cond) || Dbl_isone_signaling(rightp1))) )
{
if( Is_invalidtrap_enabled() ) {
Set_status_cbit(Unordered(cond));
return(INVALIDEXCEPTION);
}
else Set_invalidflag();
Set_status_cbit(Unordered(cond));
return(NOEXCEPTION);
}
/* All the exceptional conditions are handled, now special case
NaN compares */
else if( ((Dbl_exponent(leftp1) == DBL_INFINITY_EXPONENT)
&& Dbl_isnotzero_mantissa(leftp1,leftp2))
||
((Dbl_exponent(rightp1) == DBL_INFINITY_EXPONENT)
&& Dbl_isnotzero_mantissa(rightp1,rightp2)) )
{
/* NaNs always compare unordered. */
Set_status_cbit(Unordered(cond));
return(NOEXCEPTION);
}
/* infinities will drop down to the normal compare mechanisms */
}
/* First compare for unequal signs => less or greater or
* special equal case */
Dbl_xortointp1(leftp1,rightp1,xorresult);
if( xorresult < 0 )
{
/* left negative => less, left positive => greater.
* equal is possible if both operands are zeros. */
if( Dbl_iszero_exponentmantissa(leftp1,leftp2)
&& Dbl_iszero_exponentmantissa(rightp1,rightp2) )
{
Set_status_cbit(Equal(cond));
}
else if( Dbl_isone_sign(leftp1) )
{
Set_status_cbit(Lessthan(cond));
}
else
{
Set_status_cbit(Greaterthan(cond));
}
}
/* Signs are the same. Treat negative numbers separately
* from the positives because of the reversed sense. */
else if(Dbl_isequal(leftp1,leftp2,rightp1,rightp2))
{
Set_status_cbit(Equal(cond));
}
else if( Dbl_iszero_sign(leftp1) )
{
/* Positive compare */
if( Dbl_allp1(leftp1) < Dbl_allp1(rightp1) )
{
Set_status_cbit(Lessthan(cond));
}
else if( Dbl_allp1(leftp1) > Dbl_allp1(rightp1) )
{
Set_status_cbit(Greaterthan(cond));
}
else
{
/* Equal first parts. Now we must use unsigned compares to
* resolve the two possibilities. */
if( Dbl_allp2(leftp2) < Dbl_allp2(rightp2) )
{
Set_status_cbit(Lessthan(cond));
}
else
{
Set_status_cbit(Greaterthan(cond));
}
}
}
else
{
/* Negative compare. Signed or unsigned compares
* both work the same. That distinction is only
* important when the sign bits differ. */
if( Dbl_allp1(leftp1) > Dbl_allp1(rightp1) )
{
Set_status_cbit(Lessthan(cond));
}
else if( Dbl_allp1(leftp1) < Dbl_allp1(rightp1) )
{
Set_status_cbit(Greaterthan(cond));
}
else
{
/* Equal first parts. Now we must use unsigned compares to
* resolve the two possibilities. */
if( Dbl_allp2(leftp2) > Dbl_allp2(rightp2) )
{
Set_status_cbit(Lessthan(cond));
}
else
{
Set_status_cbit(Greaterthan(cond));
}
}
}
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/dfcmp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/dfadd.c $Revision: 1.1 $
*
* Purpose:
* Double_add: add two double precision values.
*
* External Interfaces:
* dbl_fadd(leftptr, rightptr, dstptr, status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "dbl_float.h"
/*
* Double_add: add two double precision values.
*/
dbl_fadd(
dbl_floating_point *leftptr,
dbl_floating_point *rightptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int signless_upper_left, signless_upper_right, save;
register unsigned int leftp1, leftp2, rightp1, rightp2, extent;
register unsigned int resultp1 = 0, resultp2 = 0;
register int result_exponent, right_exponent, diff_exponent;
register int sign_save, jumpsize;
register boolean inexact = FALSE;
register boolean underflowtrap;
/* Create local copies of the numbers */
Dbl_copyfromptr(leftptr,leftp1,leftp2);
Dbl_copyfromptr(rightptr,rightp1,rightp2);
/* A zero "save" helps discover equal operands (for later), *
* and is used in swapping operands (if needed). */
Dbl_xortointp1(leftp1,rightp1,/*to*/save);
/*
* check first operand for NaN's or infinity
*/
if ((result_exponent = Dbl_exponent(leftp1)) == DBL_INFINITY_EXPONENT)
{
if (Dbl_iszero_mantissa(leftp1,leftp2))
{
if (Dbl_isnotnan(rightp1,rightp2))
{
if (Dbl_isinfinity(rightp1,rightp2) && save!=0)
{
/*
* invalid since operands are opposite signed infinity's
*/
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_copytoptr(leftp1,leftp2,dstptr);
return(NOEXCEPTION);
}
}
else
{
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(leftp1))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(leftp1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(rightp1))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(rightp1);
Dbl_copytoptr(rightp1,rightp2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(leftp1,leftp2,dstptr);
return(NOEXCEPTION);
}
} /* End left NaN or Infinity processing */
/*
* check second operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(rightp1))
{
if (Dbl_iszero_mantissa(rightp1,rightp2))
{
/* return infinity */
Dbl_copytoptr(rightp1,rightp2,dstptr);
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(rightp1))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(rightp1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(rightp1,rightp2,dstptr);
return(NOEXCEPTION);
} /* End right NaN or Infinity processing */
/* Invariant: Must be dealing with finite numbers */
/* Compare operands by removing the sign */
Dbl_copytoint_exponentmantissap1(leftp1,signless_upper_left);
Dbl_copytoint_exponentmantissap1(rightp1,signless_upper_right);
/* sign difference selects add or sub operation. */
if(Dbl_ismagnitudeless(leftp2,rightp2,signless_upper_left,signless_upper_right))
{
/* Set the left operand to the larger one by XOR swap *
* First finish the first word using "save" */
Dbl_xorfromintp1(save,rightp1,/*to*/rightp1);
Dbl_xorfromintp1(save,leftp1,/*to*/leftp1);
Dbl_swap_lower(leftp2,rightp2);
result_exponent = Dbl_exponent(leftp1);
}
/* Invariant: left is not smaller than right. */
if((right_exponent = Dbl_exponent(rightp1)) == 0)
{
/* Denormalized operands. First look for zeroes */
if(Dbl_iszero_mantissa(rightp1,rightp2))
{
/* right is zero */
if(Dbl_iszero_exponentmantissa(leftp1,leftp2))
{
/* Both operands are zeros */
if(Is_rounding_mode(ROUNDMINUS))
{
Dbl_or_signs(leftp1,/*with*/rightp1);
}
else
{
Dbl_and_signs(leftp1,/*with*/rightp1);
}
}
else
{
/* Left is not a zero and must be the result. Trapped
* underflows are signaled if left is denormalized. Result
* is always exact. */
if( (result_exponent == 0) && Is_underflowtrap_enabled() )
{
/* need to normalize results mantissa */
sign_save = Dbl_signextendedsign(leftp1);
Dbl_leftshiftby1(leftp1,leftp2);
Dbl_normalize(leftp1,leftp2,result_exponent);
Dbl_set_sign(leftp1,/*using*/sign_save);
Dbl_setwrapped_exponent(leftp1,result_exponent,unfl);
Dbl_copytoptr(leftp1,leftp2,dstptr);
/* inexact = FALSE */
return(UNDERFLOWEXCEPTION);
}
}
Dbl_copytoptr(leftp1,leftp2,dstptr);
return(NOEXCEPTION);
}
/* Neither are zeroes */
Dbl_clear_sign(rightp1); /* Exponent is already cleared */
if(result_exponent == 0 )
{
/* Both operands are denormalized. The result must be exact
* and is simply calculated. A sum could become normalized and a
* difference could cancel to a true zero. */
if( (/*signed*/int) save < 0 )
{
Dbl_subtract(leftp1,leftp2,/*minus*/rightp1,rightp2,
/*into*/resultp1,resultp2);
if(Dbl_iszero_mantissa(resultp1,resultp2))
{
if(Is_rounding_mode(ROUNDMINUS))
{
Dbl_setone_sign(resultp1);
}
else
{
Dbl_setzero_sign(resultp1);
}
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else
{
Dbl_addition(leftp1,leftp2,rightp1,rightp2,
/*into*/resultp1,resultp2);
if(Dbl_isone_hidden(resultp1))
{
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
if(Is_underflowtrap_enabled())
{
/* need to normalize result */
sign_save = Dbl_signextendedsign(resultp1);
Dbl_leftshiftby1(resultp1,resultp2);
Dbl_normalize(resultp1,resultp2,result_exponent);
Dbl_set_sign(resultp1,/*using*/sign_save);
Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
/* inexact = FALSE */
return(UNDERFLOWEXCEPTION);
}
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
* with denormalized numbers. */
}
else
{
Dbl_clear_signexponent_set_hidden(rightp1);
}
Dbl_clear_exponent_set_hidden(leftp1);
diff_exponent = result_exponent - right_exponent;
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for this
* infrequent case.
*/
if(diff_exponent > DBL_THRESHOLD)
{
diff_exponent = DBL_THRESHOLD;
}
/* Align right operand by shifting to right */
Dbl_right_align(/*operand*/rightp1,rightp2,/*shifted by*/diff_exponent,
/*and lower to*/extent);
/* Treat sum and difference of the operands separately. */
if( (/*signed*/int) save < 0 )
{
/*
* Difference of the two operands. Their can be no overflow. A
* borrow can occur out of the hidden bit and force a post
* normalization phase.
*/
Dbl_subtract_withextension(leftp1,leftp2,/*minus*/rightp1,rightp2,
/*with*/extent,/*into*/resultp1,resultp2);
if(Dbl_iszero_hidden(resultp1))
{
/* Handle normalization */
/* A straight forward algorithm would now shift the result
* and extension left until the hidden bit becomes one. Not
* all of the extension bits need participate in the shift.
* Only the two most significant bits (round and guard) are
* needed. If only a single shift is needed then the guard
* bit becomes a significant low order bit and the extension
* must participate in the rounding. If more than a single
* shift is needed, then all bits to the right of the guard
* bit are zeros, and the guard bit may or may not be zero. */
sign_save = Dbl_signextendedsign(resultp1);
Dbl_leftshiftby1_withextent(resultp1,resultp2,extent,resultp1,resultp2);
/* Need to check for a zero result. The sign and exponent
* fields have already been zeroed. The more efficient test
* of the full object can be used.
*/
if(Dbl_iszero(resultp1,resultp2))
/* Must have been "x-x" or "x+(-x)". */
{
if(Is_rounding_mode(ROUNDMINUS)) Dbl_setone_sign(resultp1);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if(Dbl_isone_hidden(resultp1))
{
if(result_exponent==0)
{
/* Denormalized, exponent should be zero. Left operand *
* was normalized, so extent (guard, round) was zero */
goto underflow;
}
else
{
/* No further normalization is needed. */
Dbl_set_sign(resultp1,/*using*/sign_save);
Ext_leftshiftby1(extent);
goto round;
}
}
/* Check for denormalized, exponent should be zero. Left *
* operand was normalized, so extent (guard, round) was zero */
if(!(underflowtrap = Is_underflowtrap_enabled()) &&
result_exponent==0) goto underflow;
/* Shift extension to complete one bit of normalization and
* update exponent. */
Ext_leftshiftby1(extent);
/* Discover first one bit to determine shift amount. Use a
* modified binary search. We have already shifted the result
* one position right and still not found a one so the remainder
* of the extension must be zero and simplifies rounding. */
/* Scan bytes */
while(Dbl_iszero_hiddenhigh7mantissa(resultp1))
{
Dbl_leftshiftby8(resultp1,resultp2);
if((result_exponent -= 8) <= 0 && !underflowtrap)
goto underflow;
}
/* Now narrow it down to the nibble */
if(Dbl_iszero_hiddenhigh3mantissa(resultp1))
{
/* The lower nibble contains the normalizing one */
Dbl_leftshiftby4(resultp1,resultp2);
if((result_exponent -= 4) <= 0 && !underflowtrap)
goto underflow;
}
/* Select case were first bit is set (already normalized)
* otherwise select the proper shift. */
if((jumpsize = Dbl_hiddenhigh3mantissa(resultp1)) > 7)
{
/* Already normalized */
if(result_exponent <= 0) goto underflow;
Dbl_set_sign(resultp1,/*using*/sign_save);
Dbl_set_exponent(resultp1,/*using*/result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
Dbl_sethigh4bits(resultp1,/*using*/sign_save);
switch(jumpsize)
{
case 1:
{
Dbl_leftshiftby3(resultp1,resultp2);
result_exponent -= 3;
break;
}
case 2:
case 3:
{
Dbl_leftshiftby2(resultp1,resultp2);
result_exponent -= 2;
break;
}
case 4:
case 5:
case 6:
case 7:
{
Dbl_leftshiftby1(resultp1,resultp2);
result_exponent -= 1;
break;
}
}
if(result_exponent > 0)
{
Dbl_set_exponent(resultp1,/*using*/result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION); /* Sign bit is already set */
}
/* Fixup potential underflows */
underflow:
if(Is_underflowtrap_enabled())
{
Dbl_set_sign(resultp1,sign_save);
Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
/* inexact = FALSE */
return(UNDERFLOWEXCEPTION);
}
/*
* Since we cannot get an inexact denormalized result,
* we can now return.
*/
Dbl_fix_overshift(resultp1,resultp2,(1-result_exponent),extent);
Dbl_clear_signexponent(resultp1);
Dbl_set_sign(resultp1,sign_save);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
} /* end if(hidden...)... */
/* Fall through and round */
} /* end if(save < 0)... */
else
{
/* Add magnitudes */
Dbl_addition(leftp1,leftp2,rightp1,rightp2,/*to*/resultp1,resultp2);
if(Dbl_isone_hiddenoverflow(resultp1))
{
/* Prenormalization required. */
Dbl_rightshiftby1_withextent(resultp2,extent,extent);
Dbl_arithrightshiftby1(resultp1,resultp2);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...add magnitudes... */
/* Round the result. If the extension is all zeros,then the result is
* exact. Otherwise round in the correct direction. No underflow is
* possible. If a postnormalization is necessary, then the mantissa is
* all zeros so no shift is needed. */
round:
if(Ext_isnotzero(extent))
{
inexact = TRUE;
switch(Rounding_mode())
{
case ROUNDNEAREST: /* The default. */
if(Ext_isone_sign(extent))
{
/* at least 1/2 ulp */
if(Ext_isnotzero_lower(extent) ||
Dbl_isone_lowmantissap2(resultp2))
{
/* either exactly half way and odd or more than 1/2ulp */
Dbl_increment(resultp1,resultp2);
}
}
break;
case ROUNDPLUS:
if(Dbl_iszero_sign(resultp1))
{
/* Round up positive results */
Dbl_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if(Dbl_isone_sign(resultp1))
{
/* Round down negative results */
Dbl_increment(resultp1,resultp2);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if(Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
}
if(result_exponent == DBL_INFINITY_EXPONENT)
{
/* Overflow */
if(Is_overflowtrap_enabled())
{
Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(OVERFLOWEXCEPTION);
}
else
{
inexact = TRUE;
Set_overflowflag();
Dbl_setoverflow(resultp1,resultp2);
}
}
else Dbl_set_exponent(resultp1,result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if(inexact)
if(Is_inexacttrap_enabled())
return(INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/dfadd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/dfsub.c $Revision: 1.1 $
*
* Purpose:
* Double_subtract: subtract two double precision values.
*
* External Interfaces:
* dbl_fsub(leftptr, rightptr, dstptr, status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "dbl_float.h"
/*
* Double_subtract: subtract two double precision values.
*/
int
dbl_fsub(
dbl_floating_point *leftptr,
dbl_floating_point *rightptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int signless_upper_left, signless_upper_right, save;
register unsigned int leftp1, leftp2, rightp1, rightp2, extent;
register unsigned int resultp1 = 0, resultp2 = 0;
register int result_exponent, right_exponent, diff_exponent;
register int sign_save, jumpsize;
register boolean inexact = FALSE, underflowtrap;
/* Create local copies of the numbers */
Dbl_copyfromptr(leftptr,leftp1,leftp2);
Dbl_copyfromptr(rightptr,rightp1,rightp2);
/* A zero "save" helps discover equal operands (for later), *
* and is used in swapping operands (if needed). */
Dbl_xortointp1(leftp1,rightp1,/*to*/save);
/*
* check first operand for NaN's or infinity
*/
if ((result_exponent = Dbl_exponent(leftp1)) == DBL_INFINITY_EXPONENT)
{
if (Dbl_iszero_mantissa(leftp1,leftp2))
{
if (Dbl_isnotnan(rightp1,rightp2))
{
if (Dbl_isinfinity(rightp1,rightp2) && save==0)
{
/*
* invalid since operands are same signed infinity's
*/
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_copytoptr(leftp1,leftp2,dstptr);
return(NOEXCEPTION);
}
}
else
{
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(leftp1))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(leftp1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(rightp1))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(rightp1);
Dbl_copytoptr(rightp1,rightp2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(leftp1,leftp2,dstptr);
return(NOEXCEPTION);
}
} /* End left NaN or Infinity processing */
/*
* check second operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(rightp1))
{
if (Dbl_iszero_mantissa(rightp1,rightp2))
{
/* return infinity */
Dbl_invert_sign(rightp1);
Dbl_copytoptr(rightp1,rightp2,dstptr);
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(rightp1))
{
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(rightp1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(rightp1,rightp2,dstptr);
return(NOEXCEPTION);
} /* End right NaN or Infinity processing */
/* Invariant: Must be dealing with finite numbers */
/* Compare operands by removing the sign */
Dbl_copytoint_exponentmantissap1(leftp1,signless_upper_left);
Dbl_copytoint_exponentmantissap1(rightp1,signless_upper_right);
/* sign difference selects add or sub operation. */
if(Dbl_ismagnitudeless(leftp2,rightp2,signless_upper_left,signless_upper_right))
{
/* Set the left operand to the larger one by XOR swap *
* First finish the first word using "save" */
Dbl_xorfromintp1(save,rightp1,/*to*/rightp1);
Dbl_xorfromintp1(save,leftp1,/*to*/leftp1);
Dbl_swap_lower(leftp2,rightp2);
result_exponent = Dbl_exponent(leftp1);
Dbl_invert_sign(leftp1);
}
/* Invariant: left is not smaller than right. */
if((right_exponent = Dbl_exponent(rightp1)) == 0)
{
/* Denormalized operands. First look for zeroes */
if(Dbl_iszero_mantissa(rightp1,rightp2))
{
/* right is zero */
if(Dbl_iszero_exponentmantissa(leftp1,leftp2))
{
/* Both operands are zeros */
Dbl_invert_sign(rightp1);
if(Is_rounding_mode(ROUNDMINUS))
{
Dbl_or_signs(leftp1,/*with*/rightp1);
}
else
{
Dbl_and_signs(leftp1,/*with*/rightp1);
}
}
else
{
/* Left is not a zero and must be the result. Trapped
* underflows are signaled if left is denormalized. Result
* is always exact. */
if( (result_exponent == 0) && Is_underflowtrap_enabled() )
{
/* need to normalize results mantissa */
sign_save = Dbl_signextendedsign(leftp1);
Dbl_leftshiftby1(leftp1,leftp2);
Dbl_normalize(leftp1,leftp2,result_exponent);
Dbl_set_sign(leftp1,/*using*/sign_save);
Dbl_setwrapped_exponent(leftp1,result_exponent,unfl);
Dbl_copytoptr(leftp1,leftp2,dstptr);
/* inexact = FALSE */
return(UNDERFLOWEXCEPTION);
}
}
Dbl_copytoptr(leftp1,leftp2,dstptr);
return(NOEXCEPTION);
}
/* Neither are zeroes */
Dbl_clear_sign(rightp1); /* Exponent is already cleared */
if(result_exponent == 0 )
{
/* Both operands are denormalized. The result must be exact
* and is simply calculated. A sum could become normalized and a
* difference could cancel to a true zero. */
if( (/*signed*/int) save >= 0 )
{
Dbl_subtract(leftp1,leftp2,/*minus*/rightp1,rightp2,
/*into*/resultp1,resultp2);
if(Dbl_iszero_mantissa(resultp1,resultp2))
{
if(Is_rounding_mode(ROUNDMINUS))
{
Dbl_setone_sign(resultp1);
}
else
{
Dbl_setzero_sign(resultp1);
}
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else
{
Dbl_addition(leftp1,leftp2,rightp1,rightp2,
/*into*/resultp1,resultp2);
if(Dbl_isone_hidden(resultp1))
{
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
if(Is_underflowtrap_enabled())
{
/* need to normalize result */
sign_save = Dbl_signextendedsign(resultp1);
Dbl_leftshiftby1(resultp1,resultp2);
Dbl_normalize(resultp1,resultp2,result_exponent);
Dbl_set_sign(resultp1,/*using*/sign_save);
Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
/* inexact = FALSE */
return(UNDERFLOWEXCEPTION);
}
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
* with denormalized numbers. */
}
else
{
Dbl_clear_signexponent_set_hidden(rightp1);
}
Dbl_clear_exponent_set_hidden(leftp1);
diff_exponent = result_exponent - right_exponent;
/*
* Special case alignment of operands that would force alignment
* beyond the extent of the extension. A further optimization
* could special case this but only reduces the path length for this
* infrequent case.
*/
if(diff_exponent > DBL_THRESHOLD)
{
diff_exponent = DBL_THRESHOLD;
}
/* Align right operand by shifting to right */
Dbl_right_align(/*operand*/rightp1,rightp2,/*shifted by*/diff_exponent,
/*and lower to*/extent);
/* Treat sum and difference of the operands separately. */
if( (/*signed*/int) save >= 0 )
{
/*
* Difference of the two operands. Their can be no overflow. A
* borrow can occur out of the hidden bit and force a post
* normalization phase.
*/
Dbl_subtract_withextension(leftp1,leftp2,/*minus*/rightp1,rightp2,
/*with*/extent,/*into*/resultp1,resultp2);
if(Dbl_iszero_hidden(resultp1))
{
/* Handle normalization */
/* A straight forward algorithm would now shift the result
* and extension left until the hidden bit becomes one. Not
* all of the extension bits need participate in the shift.
* Only the two most significant bits (round and guard) are
* needed. If only a single shift is needed then the guard
* bit becomes a significant low order bit and the extension
* must participate in the rounding. If more than a single
* shift is needed, then all bits to the right of the guard
* bit are zeros, and the guard bit may or may not be zero. */
sign_save = Dbl_signextendedsign(resultp1);
Dbl_leftshiftby1_withextent(resultp1,resultp2,extent,resultp1,resultp2);
/* Need to check for a zero result. The sign and exponent
* fields have already been zeroed. The more efficient test
* of the full object can be used.
*/
if(Dbl_iszero(resultp1,resultp2))
/* Must have been "x-x" or "x+(-x)". */
{
if(Is_rounding_mode(ROUNDMINUS)) Dbl_setone_sign(resultp1);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
result_exponent--;
/* Look to see if normalization is finished. */
if(Dbl_isone_hidden(resultp1))
{
if(result_exponent==0)
{
/* Denormalized, exponent should be zero. Left operand *
* was normalized, so extent (guard, round) was zero */
goto underflow;
}
else
{
/* No further normalization is needed. */
Dbl_set_sign(resultp1,/*using*/sign_save);
Ext_leftshiftby1(extent);
goto round;
}
}
/* Check for denormalized, exponent should be zero. Left *
* operand was normalized, so extent (guard, round) was zero */
if(!(underflowtrap = Is_underflowtrap_enabled()) &&
result_exponent==0) goto underflow;
/* Shift extension to complete one bit of normalization and
* update exponent. */
Ext_leftshiftby1(extent);
/* Discover first one bit to determine shift amount. Use a
* modified binary search. We have already shifted the result
* one position right and still not found a one so the remainder
* of the extension must be zero and simplifies rounding. */
/* Scan bytes */
while(Dbl_iszero_hiddenhigh7mantissa(resultp1))
{
Dbl_leftshiftby8(resultp1,resultp2);
if((result_exponent -= 8) <= 0 && !underflowtrap)
goto underflow;
}
/* Now narrow it down to the nibble */
if(Dbl_iszero_hiddenhigh3mantissa(resultp1))
{
/* The lower nibble contains the normalizing one */
Dbl_leftshiftby4(resultp1,resultp2);
if((result_exponent -= 4) <= 0 && !underflowtrap)
goto underflow;
}
/* Select case were first bit is set (already normalized)
* otherwise select the proper shift. */
if((jumpsize = Dbl_hiddenhigh3mantissa(resultp1)) > 7)
{
/* Already normalized */
if(result_exponent <= 0) goto underflow;
Dbl_set_sign(resultp1,/*using*/sign_save);
Dbl_set_exponent(resultp1,/*using*/result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
Dbl_sethigh4bits(resultp1,/*using*/sign_save);
switch(jumpsize)
{
case 1:
{
Dbl_leftshiftby3(resultp1,resultp2);
result_exponent -= 3;
break;
}
case 2:
case 3:
{
Dbl_leftshiftby2(resultp1,resultp2);
result_exponent -= 2;
break;
}
case 4:
case 5:
case 6:
case 7:
{
Dbl_leftshiftby1(resultp1,resultp2);
result_exponent -= 1;
break;
}
}
if(result_exponent > 0)
{
Dbl_set_exponent(resultp1,/*using*/result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION); /* Sign bit is already set */
}
/* Fixup potential underflows */
underflow:
if(Is_underflowtrap_enabled())
{
Dbl_set_sign(resultp1,sign_save);
Dbl_setwrapped_exponent(resultp1,result_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
/* inexact = FALSE */
return(UNDERFLOWEXCEPTION);
}
/*
* Since we cannot get an inexact denormalized result,
* we can now return.
*/
Dbl_fix_overshift(resultp1,resultp2,(1-result_exponent),extent);
Dbl_clear_signexponent(resultp1);
Dbl_set_sign(resultp1,sign_save);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
} /* end if(hidden...)... */
/* Fall through and round */
} /* end if(save >= 0)... */
else
{
/* Subtract magnitudes */
Dbl_addition(leftp1,leftp2,rightp1,rightp2,/*to*/resultp1,resultp2);
if(Dbl_isone_hiddenoverflow(resultp1))
{
/* Prenormalization required. */
Dbl_rightshiftby1_withextent(resultp2,extent,extent);
Dbl_arithrightshiftby1(resultp1,resultp2);
result_exponent++;
} /* end if hiddenoverflow... */
} /* end else ...subtract magnitudes... */
/* Round the result. If the extension is all zeros,then the result is
* exact. Otherwise round in the correct direction. No underflow is
* possible. If a postnormalization is necessary, then the mantissa is
* all zeros so no shift is needed. */
round:
if(Ext_isnotzero(extent))
{
inexact = TRUE;
switch(Rounding_mode())
{
case ROUNDNEAREST: /* The default. */
if(Ext_isone_sign(extent))
{
/* at least 1/2 ulp */
if(Ext_isnotzero_lower(extent) ||
Dbl_isone_lowmantissap2(resultp2))
{
/* either exactly half way and odd or more than 1/2ulp */
Dbl_increment(resultp1,resultp2);
}
}
break;
case ROUNDPLUS:
if(Dbl_iszero_sign(resultp1))
{
/* Round up positive results */
Dbl_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if(Dbl_isone_sign(resultp1))
{
/* Round down negative results */
Dbl_increment(resultp1,resultp2);
}
case ROUNDZERO:;
/* truncate is simple */
} /* end switch... */
if(Dbl_isone_hiddenoverflow(resultp1)) result_exponent++;
}
if(result_exponent == DBL_INFINITY_EXPONENT)
{
/* Overflow */
if(Is_overflowtrap_enabled())
{
Dbl_setwrapped_exponent(resultp1,result_exponent,ovfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(OVERFLOWEXCEPTION);
}
else
{
inexact = TRUE;
Set_overflowflag();
Dbl_setoverflow(resultp1,resultp2);
}
}
else Dbl_set_exponent(resultp1,result_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if(inexact)
if(Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/dfsub.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/fcnvfx.c $Revision: 1.1 $
*
* Purpose:
* Single Floating-point to Single Fixed-point
* Single Floating-point to Double Fixed-point
* Double Floating-point to Single Fixed-point
* Double Floating-point to Double Fixed-point
*
* External Interfaces:
* dbl_to_dbl_fcnvfx(srcptr,nullptr,dstptr,status)
* dbl_to_sgl_fcnvfx(srcptr,nullptr,dstptr,status)
* sgl_to_dbl_fcnvfx(srcptr,nullptr,dstptr,status)
* sgl_to_sgl_fcnvfx(srcptr,nullptr,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
#include "cnv_float.h"
/*
* Single Floating-point to Single Fixed-point
*/
/*ARGSUSED*/
int
sgl_to_sgl_fcnvfx(
sgl_floating_point *srcptr,
sgl_floating_point *nullptr,
int *dstptr,
sgl_floating_point *status)
{
register unsigned int src, temp;
register int src_exponent, result;
register boolean inexact = FALSE;
src = *srcptr;
src_exponent = Sgl_exponent(src) - SGL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > SGL_FX_MAX_EXP) {
/* check for MININT */
if ((src_exponent > SGL_FX_MAX_EXP + 1) ||
Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
if (Sgl_iszero_sign(src)) result = 0x7fffffff;
else result = 0x80000000;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
}
/*
* Generate result
*/
if (src_exponent >= 0) {
temp = src;
Sgl_clear_signexponent_set_hidden(temp);
Int_from_sgl_mantissa(temp,src_exponent);
if (Sgl_isone_sign(src)) result = -Sgl_all(temp);
else result = Sgl_all(temp);
/* check for inexact */
if (Sgl_isinexact_to_fix(src,src_exponent)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(src)) result++;
break;
case ROUNDMINUS:
if (Sgl_isone_sign(src)) result--;
break;
case ROUNDNEAREST:
if (Sgl_isone_roundbit(src,src_exponent)) {
if (Sgl_isone_stickybit(src,src_exponent)
|| (Sgl_isone_lowmantissa(temp)))
if (Sgl_iszero_sign(src)) result++;
else result--;
}
}
}
}
else {
result = 0;
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(src)) result++;
break;
case ROUNDMINUS:
if (Sgl_isone_sign(src)) result--;
break;
case ROUNDNEAREST:
if (src_exponent == -1)
if (Sgl_isnotzero_mantissa(src))
if (Sgl_iszero_sign(src)) result++;
else result--;
}
}
}
*dstptr = result;
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
/*
* Single Floating-point to Double Fixed-point
*/
/*ARGSUSED*/
int
sgl_to_dbl_fcnvfx(
sgl_floating_point *srcptr,
unsigned int *nullptr,
dbl_integer *dstptr,
unsigned int *status)
{
register int src_exponent, resultp1;
register unsigned int src, temp, resultp2;
register boolean inexact = FALSE;
src = *srcptr;
src_exponent = Sgl_exponent(src) - SGL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > DBL_FX_MAX_EXP) {
/* check for MININT */
if ((src_exponent > DBL_FX_MAX_EXP + 1) ||
Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
if (Sgl_iszero_sign(src)) {
resultp1 = 0x7fffffff;
resultp2 = 0xffffffff;
}
else {
resultp1 = 0x80000000;
resultp2 = 0;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Dint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
Dint_set_minint(resultp1,resultp2);
Dint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
temp = src;
Sgl_clear_signexponent_set_hidden(temp);
Dint_from_sgl_mantissa(temp,src_exponent,resultp1,resultp2);
if (Sgl_isone_sign(src)) {
Dint_setone_sign(resultp1,resultp2);
}
/* check for inexact */
if (Sgl_isinexact_to_fix(src,src_exponent)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(src)) {
Dint_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if (Sgl_isone_sign(src)) {
Dint_decrement(resultp1,resultp2);
}
break;
case ROUNDNEAREST:
if (Sgl_isone_roundbit(src,src_exponent))
if (Sgl_isone_stickybit(src,src_exponent) ||
(Dint_isone_lowp2(resultp2)))
if (Sgl_iszero_sign(src)) {
Dint_increment(resultp1,resultp2);
}
else {
Dint_decrement(resultp1,resultp2);
}
}
}
}
else {
Dint_setzero(resultp1,resultp2);
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(src)) {
Dint_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if (Sgl_isone_sign(src)) {
Dint_decrement(resultp1,resultp2);
}
break;
case ROUNDNEAREST:
if (src_exponent == -1)
if (Sgl_isnotzero_mantissa(src))
if (Sgl_iszero_sign(src)) {
Dint_increment(resultp1,resultp2);
}
else {
Dint_decrement(resultp1,resultp2);
}
}
}
}
Dint_copytoptr(resultp1,resultp2,dstptr);
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
/*
* Double Floating-point to Single Fixed-point
*/
/*ARGSUSED*/
int
dbl_to_sgl_fcnvfx(
dbl_floating_point *srcptr,
unsigned int *nullptr,
int *dstptr,
unsigned int *status)
{
register unsigned int srcp1,srcp2, tempp1,tempp2;
register int src_exponent, result;
register boolean inexact = FALSE;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > SGL_FX_MAX_EXP) {
/* check for MININT */
if (Dbl_isoverflow_to_int(src_exponent,srcp1,srcp2)) {
if (Dbl_iszero_sign(srcp1)) result = 0x7fffffff;
else result = 0x80000000;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
}
/*
* Generate result
*/
if (src_exponent >= 0) {
tempp1 = srcp1;
tempp2 = srcp2;
Dbl_clear_signexponent_set_hidden(tempp1);
Int_from_dbl_mantissa(tempp1,tempp2,src_exponent);
if (Dbl_isone_sign(srcp1) && (src_exponent <= SGL_FX_MAX_EXP))
result = -Dbl_allp1(tempp1);
else result = Dbl_allp1(tempp1);
/* check for inexact */
if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(srcp1)) result++;
break;
case ROUNDMINUS:
if (Dbl_isone_sign(srcp1)) result--;
break;
case ROUNDNEAREST:
if (Dbl_isone_roundbit(srcp1,srcp2,src_exponent))
if (Dbl_isone_stickybit(srcp1,srcp2,src_exponent) ||
(Dbl_isone_lowmantissap1(tempp1)))
if (Dbl_iszero_sign(srcp1)) result++;
else result--;
}
/* check for overflow */
if ((Dbl_iszero_sign(srcp1) && result < 0) ||
(Dbl_isone_sign(srcp1) && result > 0)) {
if (Dbl_iszero_sign(srcp1)) result = 0x7fffffff;
else result = 0x80000000;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
}
}
else {
result = 0;
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(srcp1)) result++;
break;
case ROUNDMINUS:
if (Dbl_isone_sign(srcp1)) result--;
break;
case ROUNDNEAREST:
if (src_exponent == -1)
if (Dbl_isnotzero_mantissa(srcp1,srcp2))
if (Dbl_iszero_sign(srcp1)) result++;
else result--;
}
}
}
*dstptr = result;
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
/*
* Double Floating-point to Double Fixed-point
*/
/*ARGSUSED*/
int
dbl_to_dbl_fcnvfx(
dbl_floating_point *srcptr,
unsigned int *nullptr,
dbl_integer *dstptr,
unsigned int *status)
{
register int src_exponent, resultp1;
register unsigned int srcp1, srcp2, tempp1, tempp2, resultp2;
register boolean inexact = FALSE;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > DBL_FX_MAX_EXP) {
/* check for MININT */
if ((src_exponent > DBL_FX_MAX_EXP + 1) ||
Dbl_isnotzero_mantissa(srcp1,srcp2) || Dbl_iszero_sign(srcp1)) {
if (Dbl_iszero_sign(srcp1)) {
resultp1 = 0x7fffffff;
resultp2 = 0xffffffff;
}
else {
resultp1 = 0x80000000;
resultp2 = 0;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Dint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate result
*/
if (src_exponent >= 0) {
tempp1 = srcp1;
tempp2 = srcp2;
Dbl_clear_signexponent_set_hidden(tempp1);
Dint_from_dbl_mantissa(tempp1,tempp2,src_exponent,resultp1,
resultp2);
if (Dbl_isone_sign(srcp1)) {
Dint_setone_sign(resultp1,resultp2);
}
/* check for inexact */
if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(srcp1)) {
Dint_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(srcp1)) {
Dint_decrement(resultp1,resultp2);
}
break;
case ROUNDNEAREST:
if (Dbl_isone_roundbit(srcp1,srcp2,src_exponent))
if (Dbl_isone_stickybit(srcp1,srcp2,src_exponent) ||
(Dint_isone_lowp2(resultp2)))
if (Dbl_iszero_sign(srcp1)) {
Dint_increment(resultp1,resultp2);
}
else {
Dint_decrement(resultp1,resultp2);
}
}
}
}
else {
Dint_setzero(resultp1,resultp2);
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(srcp1)) {
Dint_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(srcp1)) {
Dint_decrement(resultp1,resultp2);
}
break;
case ROUNDNEAREST:
if (src_exponent == -1)
if (Dbl_isnotzero_mantissa(srcp1,srcp2))
if (Dbl_iszero_sign(srcp1)) {
Dint_increment(resultp1,resultp2);
}
else {
Dint_decrement(resultp1,resultp2);
}
}
}
}
Dint_copytoptr(resultp1,resultp2,dstptr);
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/fcnvfx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/dfrem.c $Revision: 1.1 $
*
* Purpose:
* Double Precision Floating-point Remainder
*
* External Interfaces:
* dbl_frem(srcptr1,srcptr2,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "dbl_float.h"
/*
* Double Precision Floating-point Remainder
*/
int
dbl_frem (dbl_floating_point * srcptr1, dbl_floating_point * srcptr2,
dbl_floating_point * dstptr, unsigned int *status)
{
register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2;
register unsigned int resultp1, resultp2;
register int opnd1_exponent, opnd2_exponent, dest_exponent, stepcount;
register boolean roundup = FALSE;
Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2);
Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2);
/*
* check first operand for NaN's or infinity
*/
if ((opnd1_exponent = Dbl_exponent(opnd1p1)) == DBL_INFINITY_EXPONENT) {
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
if (Dbl_isnotnan(opnd2p1,opnd2p2)) {
/* invalid since first operand is infinity */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd1p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd1p1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if ((opnd2_exponent = Dbl_exponent(opnd2p1)) == DBL_INFINITY_EXPONENT) {
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
/*
* return first operand
*/
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* check second operand for zero
*/
if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
/* invalid since second operand is zero */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* get sign of result
*/
resultp1 = opnd1p1;
/*
* check for denormalized operands
*/
if (opnd1_exponent == 0) {
/* check for zero */
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
/* normalize, then continue */
opnd1_exponent = 1;
Dbl_normalize(opnd1p1,opnd1p2,opnd1_exponent);
}
else {
Dbl_clear_signexponent_set_hidden(opnd1p1);
}
if (opnd2_exponent == 0) {
/* normalize, then continue */
opnd2_exponent = 1;
Dbl_normalize(opnd2p1,opnd2p2,opnd2_exponent);
}
else {
Dbl_clear_signexponent_set_hidden(opnd2p1);
}
/* find result exponent and divide step loop count */
dest_exponent = opnd2_exponent - 1;
stepcount = opnd1_exponent - opnd2_exponent;
/*
* check for opnd1/opnd2 < 1
*/
if (stepcount < 0) {
/*
* check for opnd1/opnd2 > 1/2
*
* In this case n will round to 1, so
* r = opnd1 - opnd2
*/
if (stepcount == -1 &&
Dbl_isgreaterthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
/* set sign */
Dbl_allp1(resultp1) = ~Dbl_allp1(resultp1);
/* align opnd2 with opnd1 */
Dbl_leftshiftby1(opnd2p1,opnd2p2);
Dbl_subtract(opnd2p1,opnd2p2,opnd1p1,opnd1p2,
opnd2p1,opnd2p2);
/* now normalize */
while (Dbl_iszero_hidden(opnd2p1)) {
Dbl_leftshiftby1(opnd2p1,opnd2p2);
dest_exponent--;
}
Dbl_set_exponentmantissa(resultp1,resultp2,opnd2p1,opnd2p2);
goto testforunderflow;
}
/*
* opnd1/opnd2 <= 1/2
*
* In this case n will round to zero, so
* r = opnd1
*/
Dbl_set_exponentmantissa(resultp1,resultp2,opnd1p1,opnd1p2);
dest_exponent = opnd1_exponent;
goto testforunderflow;
}
/*
* Generate result
*
* Do iterative subtract until remainder is less than operand 2.
*/
while (stepcount-- > 0 && (Dbl_allp1(opnd1p1) || Dbl_allp2(opnd1p2))) {
if (Dbl_isnotlessthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
Dbl_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2,opnd1p1,opnd1p2);
}
Dbl_leftshiftby1(opnd1p1,opnd1p2);
}
/*
* Do last subtract, then determine which way to round if remainder
* is exactly 1/2 of opnd2
*/
if (Dbl_isnotlessthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
Dbl_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2,opnd1p1,opnd1p2);
roundup = TRUE;
}
if (stepcount > 0 || Dbl_iszero(opnd1p1,opnd1p2)) {
/* division is exact, remainder is zero */
Dbl_setzero_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Check for cases where opnd1/opnd2 < n
*
* In this case the result's sign will be opposite that of
* opnd1. The mantissa also needs some correction.
*/
Dbl_leftshiftby1(opnd1p1,opnd1p2);
if (Dbl_isgreaterthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
Dbl_invert_sign(resultp1);
Dbl_leftshiftby1(opnd2p1,opnd2p2);
Dbl_subtract(opnd2p1,opnd2p2,opnd1p1,opnd1p2,opnd1p1,opnd1p2);
}
/* check for remainder being exactly 1/2 of opnd2 */
else if (Dbl_isequal(opnd1p1,opnd1p2,opnd2p1,opnd2p2) && roundup) {
Dbl_invert_sign(resultp1);
}
/* normalize result's mantissa */
while (Dbl_iszero_hidden(opnd1p1)) {
dest_exponent--;
Dbl_leftshiftby1(opnd1p1,opnd1p2);
}
Dbl_set_exponentmantissa(resultp1,resultp2,opnd1p1,opnd1p2);
/*
* Test for underflow
*/
testforunderflow:
if (dest_exponent <= 0) {
/* trap if UNDERFLOWTRAP enabled */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl);
/* frem is always exact */
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(UNDERFLOWEXCEPTION);
}
/*
* denormalize result or set to signed zero
*/
if (dest_exponent >= (1 - DBL_P)) {
Dbl_rightshift_exponentmantissa(resultp1,resultp2,
1-dest_exponent);
}
else {
Dbl_setzero_exponentmantissa(resultp1,resultp2);
}
}
else Dbl_set_exponent(resultp1,dest_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/dfrem.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/fcnvfu.c $Revision: 1.1 $
*
* Purpose:
* Floating-point to Unsigned Fixed-point Converts
*
* External Interfaces:
* dbl_to_dbl_fcnvfu(srcptr,nullptr,dstptr,status)
* dbl_to_sgl_fcnvfu(srcptr,nullptr,dstptr,status)
* sgl_to_dbl_fcnvfu(srcptr,nullptr,dstptr,status)
* sgl_to_sgl_fcnvfu(srcptr,nullptr,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
#include "cnv_float.h"
/************************************************************************
* Floating-point to Unsigned Fixed-point Converts *
************************************************************************/
/*
* Single Floating-point to Single Unsigned Fixed
*/
/*ARGSUSED*/
int
sgl_to_sgl_fcnvfu(
sgl_floating_point *srcptr,
unsigned int *nullptr,
unsigned int *dstptr,
unsigned int *status)
{
register unsigned int src, result;
register int src_exponent;
register boolean inexact = FALSE;
src = *srcptr;
src_exponent = Sgl_exponent(src) - SGL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > SGL_FX_MAX_EXP + 1) {
if (Sgl_isone_sign(src)) {
result = 0;
} else {
result = 0xffffffff;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
/*
* Check sign.
* If negative, trap unimplemented.
*/
if (Sgl_isone_sign(src)) {
result = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
Sgl_clear_signexponent_set_hidden(src);
Suint_from_sgl_mantissa(src,src_exponent,result);
/* check for inexact */
if (Sgl_isinexact_to_unsigned(src,src_exponent)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
result++;
break;
case ROUNDMINUS: /* never negative */
break;
case ROUNDNEAREST:
if (Sgl_isone_roundbit(src,src_exponent) &&
(Sgl_isone_stickybit(src,src_exponent) ||
(result & 1))) {
result++;
}
break;
}
}
} else {
result = 0;
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(src)) {
result++;
}
break;
case ROUNDMINUS:
if (Sgl_isone_sign(src)) {
result = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
inexact = FALSE;
}
break;
case ROUNDNEAREST:
if (src_exponent == -1 &&
Sgl_isnotzero_mantissa(src)) {
if (Sgl_isone_sign(src)) {
result = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
inexact = FALSE;
}
else result++;
}
break;
}
}
}
*dstptr = result;
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
/*
* Single Floating-point to Double Unsigned Fixed
*/
/*ARGSUSED*/
int
sgl_to_dbl_fcnvfu(
sgl_floating_point *srcptr,
unsigned int *nullptr,
dbl_unsigned *dstptr,
unsigned int *status)
{
register int src_exponent;
register unsigned int src, resultp1, resultp2;
register boolean inexact = FALSE;
src = *srcptr;
src_exponent = Sgl_exponent(src) - SGL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > DBL_FX_MAX_EXP + 1) {
if (Sgl_isone_sign(src)) {
resultp1 = resultp2 = 0;
} else {
resultp1 = resultp2 = 0xffffffff;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Duint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
/*
* Check sign.
* If negative, trap unimplemented.
*/
if (Sgl_isone_sign(src)) {
resultp1 = resultp2 = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Duint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
Sgl_clear_signexponent_set_hidden(src);
Duint_from_sgl_mantissa(src,src_exponent,resultp1,resultp2);
/* check for inexact */
if (Sgl_isinexact_to_unsigned(src,src_exponent)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
Duint_increment(resultp1,resultp2);
break;
case ROUNDMINUS: /* never negative */
break;
case ROUNDNEAREST:
if (Sgl_isone_roundbit(src,src_exponent) &&
(Sgl_isone_stickybit(src,src_exponent) ||
Duint_isone_lowp2(resultp2))) {
Duint_increment(resultp1,resultp2);
}
break;
}
}
} else {
Duint_setzero(resultp1,resultp2);
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(src)) {
Duint_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if (Sgl_isone_sign(src)) {
resultp1 = resultp2 = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
inexact = FALSE;
}
break;
case ROUNDNEAREST:
if (src_exponent == -1 &&
Sgl_isnotzero_mantissa(src)) {
if (Sgl_isone_sign(src)) {
resultp1 = 0;
resultp2 = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
inexact = FALSE;
}
else Duint_increment(resultp1,resultp2);
}
}
}
}
Duint_copytoptr(resultp1,resultp2,dstptr);
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
/*
* Double Floating-point to Single Unsigned Fixed
*/
/*ARGSUSED*/
int
dbl_to_sgl_fcnvfu (dbl_floating_point * srcptr, unsigned int *nullptr,
unsigned int *dstptr, unsigned int *status)
{
register unsigned int srcp1, srcp2, result;
register int src_exponent;
register boolean inexact = FALSE;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > SGL_FX_MAX_EXP + 1) {
if (Dbl_isone_sign(srcp1)) {
result = 0;
} else {
result = 0xffffffff;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
/*
* Check sign.
* If negative, trap unimplemented.
*/
if (Dbl_isone_sign(srcp1)) {
result = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
Dbl_clear_signexponent_set_hidden(srcp1);
Suint_from_dbl_mantissa(srcp1,srcp2,src_exponent,result);
/* check for inexact */
if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
result++;
break;
case ROUNDMINUS: /* never negative */
break;
case ROUNDNEAREST:
if(Dbl_isone_roundbit(srcp1,srcp2,src_exponent) &&
(Dbl_isone_stickybit(srcp1,srcp2,src_exponent)||
result&1))
result++;
break;
}
/* check for overflow */
if (result == 0) {
result = 0xffffffff;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
}
} else {
result = 0;
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(srcp1)) result++;
break;
case ROUNDMINUS:
if (Dbl_isone_sign(srcp1)) {
result = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
inexact = FALSE;
}
break;
case ROUNDNEAREST:
if (src_exponent == -1 &&
Dbl_isnotzero_mantissa(srcp1,srcp2))
if (Dbl_isone_sign(srcp1)) {
result = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
inexact = FALSE;
}
else result++;
}
}
}
*dstptr = result;
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
/*
* Double Floating-point to Double Unsigned Fixed
*/
/*ARGSUSED*/
int
dbl_to_dbl_fcnvfu (dbl_floating_point * srcptr, unsigned int *nullptr,
dbl_unsigned * dstptr, unsigned int *status)
{
register int src_exponent;
register unsigned int srcp1, srcp2, resultp1, resultp2;
register boolean inexact = FALSE;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > DBL_FX_MAX_EXP + 1) {
if (Dbl_isone_sign(srcp1)) {
resultp1 = resultp2 = 0;
} else {
resultp1 = resultp2 = 0xffffffff;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Duint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
/*
* Check sign.
* If negative, trap unimplemented.
*/
if (Dbl_isone_sign(srcp1)) {
resultp1 = resultp2 = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Duint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
Dbl_clear_signexponent_set_hidden(srcp1);
Duint_from_dbl_mantissa(srcp1,srcp2,src_exponent,resultp1,
resultp2);
/* check for inexact */
if (Dbl_isinexact_to_unsigned(srcp1,srcp2,src_exponent)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
Duint_increment(resultp1,resultp2);
break;
case ROUNDMINUS: /* never negative */
break;
case ROUNDNEAREST:
if(Dbl_isone_roundbit(srcp1,srcp2,src_exponent))
if(Dbl_isone_stickybit(srcp1,srcp2,src_exponent) ||
Duint_isone_lowp2(resultp2))
Duint_increment(resultp1,resultp2);
}
}
} else {
Duint_setzero(resultp1,resultp2);
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
inexact = TRUE;
/* round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(srcp1)) {
Duint_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(srcp1)) {
resultp1 = resultp2 = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
inexact = FALSE;
}
break;
case ROUNDNEAREST:
if (src_exponent == -1 &&
Dbl_isnotzero_mantissa(srcp1,srcp2))
if (Dbl_iszero_sign(srcp1)) {
Duint_increment(resultp1,resultp2);
} else {
resultp1 = 0;
resultp2 = 0;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
inexact = FALSE;
}
}
}
}
Duint_copytoptr(resultp1,resultp2,dstptr);
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/fcnvfu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/sfdiv.c $Revision: 1.1 $
*
* Purpose:
* Single Precision Floating-point Divide
*
* External Interfaces:
* sgl_fdiv(srcptr1,srcptr2,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
/*
* Single Precision Floating-point Divide
*/
int
sgl_fdiv (sgl_floating_point * srcptr1, sgl_floating_point * srcptr2,
sgl_floating_point * dstptr, unsigned int *status)
{
register unsigned int opnd1, opnd2, opnd3, result;
register int dest_exponent, count;
register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
boolean is_tiny;
opnd1 = *srcptr1;
opnd2 = *srcptr2;
/*
* set sign bit of result
*/
if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2)) Sgl_setnegativezero(result);
else Sgl_setzero(result);
/*
* check first operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd1)) {
if (Sgl_iszero_mantissa(opnd1)) {
if (Sgl_isnotnan(opnd2)) {
if (Sgl_isinfinity(opnd2)) {
/*
* invalid since both operands
* are infinity
*/
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* return infinity
*/
Sgl_setinfinity_exponentmantissa(result);
*dstptr = result;
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd1);
}
/*
* is second operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
*dstptr = opnd2;
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
*dstptr = opnd1;
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd2)) {
if (Sgl_iszero_mantissa(opnd2)) {
/*
* return zero
*/
Sgl_setzero_exponentmantissa(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
}
/*
* return quiet NaN
*/
*dstptr = opnd2;
return(NOEXCEPTION);
}
/*
* check for division by zero
*/
if (Sgl_iszero_exponentmantissa(opnd2)) {
if (Sgl_iszero_exponentmantissa(opnd1)) {
/* invalid since both operands are zero */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(result);
*dstptr = result;
return(NOEXCEPTION);
}
if (Is_divisionbyzerotrap_enabled())
return(DIVISIONBYZEROEXCEPTION);
Set_divisionbyzeroflag();
Sgl_setinfinity_exponentmantissa(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Generate exponent
*/
dest_exponent = Sgl_exponent(opnd1) - Sgl_exponent(opnd2) + SGL_BIAS;
/*
* Generate mantissa
*/
if (Sgl_isnotzero_exponent(opnd1)) {
/* set hidden bit */
Sgl_clear_signexponent_set_hidden(opnd1);
}
else {
/* check for zero */
if (Sgl_iszero_mantissa(opnd1)) {
Sgl_setzero_exponentmantissa(result);
*dstptr = result;
return(NOEXCEPTION);
}
/* is denormalized; want to normalize */
Sgl_clear_signexponent(opnd1);
Sgl_leftshiftby1(opnd1);
Sgl_normalize(opnd1,dest_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Sgl_isnotzero_exponent(opnd2)) {
Sgl_clear_signexponent_set_hidden(opnd2);
}
else {
/* is denormalized; want to normalize */
Sgl_clear_signexponent(opnd2);
Sgl_leftshiftby1(opnd2);
while(Sgl_iszero_hiddenhigh7mantissa(opnd2)) {
Sgl_leftshiftby8(opnd2);
dest_exponent += 8;
}
if(Sgl_iszero_hiddenhigh3mantissa(opnd2)) {
Sgl_leftshiftby4(opnd2);
dest_exponent += 4;
}
while(Sgl_iszero_hidden(opnd2)) {
Sgl_leftshiftby1(opnd2);
dest_exponent += 1;
}
}
/* Divide the source mantissas */
/*
* A non_restoring divide algorithm is used.
*/
Sgl_subtract(opnd1,opnd2,opnd1);
Sgl_setzero(opnd3);
for (count=1;count<=SGL_P && Sgl_all(opnd1);count++) {
Sgl_leftshiftby1(opnd1);
Sgl_leftshiftby1(opnd3);
if (Sgl_iszero_sign(opnd1)) {
Sgl_setone_lowmantissa(opnd3);
Sgl_subtract(opnd1,opnd2,opnd1);
}
else Sgl_addition(opnd1,opnd2,opnd1);
}
if (count <= SGL_P) {
Sgl_leftshiftby1(opnd3);
Sgl_setone_lowmantissa(opnd3);
Sgl_leftshift(opnd3,SGL_P-count);
if (Sgl_iszero_hidden(opnd3)) {
Sgl_leftshiftby1(opnd3);
dest_exponent--;
}
}
else {
if (Sgl_iszero_hidden(opnd3)) {
/* need to get one more bit of result */
Sgl_leftshiftby1(opnd1);
Sgl_leftshiftby1(opnd3);
if (Sgl_iszero_sign(opnd1)) {
Sgl_setone_lowmantissa(opnd3);
Sgl_subtract(opnd1,opnd2,opnd1);
}
else Sgl_addition(opnd1,opnd2,opnd1);
dest_exponent--;
}
if (Sgl_iszero_sign(opnd1)) guardbit = TRUE;
stickybit = Sgl_all(opnd1);
}
inexact = guardbit | stickybit;
/*
* round result
*/
if (inexact && (dest_exponent > 0 || Is_underflowtrap_enabled())) {
Sgl_clear_signexponent(opnd3);
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(result))
Sgl_increment_mantissa(opnd3);
break;
case ROUNDMINUS:
if (Sgl_isone_sign(result))
Sgl_increment_mantissa(opnd3);
break;
case ROUNDNEAREST:
if (guardbit) {
if (stickybit || Sgl_isone_lowmantissa(opnd3))
Sgl_increment_mantissa(opnd3);
}
}
if (Sgl_isone_hidden(opnd3)) dest_exponent++;
}
Sgl_set_mantissa(result,opnd3);
/*
* Test for overflow
*/
if (dest_exponent >= SGL_INFINITY_EXPONENT) {
/* trap if OVERFLOWTRAP enabled */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(result,dest_exponent,ovfl);
*dstptr = result;
if (inexact)
if (Is_inexacttrap_enabled())
return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(OVERFLOWEXCEPTION);
}
Set_overflowflag();
/* set result to infinity or largest number */
Sgl_setoverflow(result);
inexact = TRUE;
}
/*
* Test for underflow
*/
else if (dest_exponent <= 0) {
/* trap if UNDERFLOWTRAP enabled */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(result,dest_exponent,unfl);
*dstptr = result;
if (inexact)
if (Is_inexacttrap_enabled())
return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(UNDERFLOWEXCEPTION);
}
/* Determine if should set underflow flag */
is_tiny = TRUE;
if (dest_exponent == 0 && inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(result)) {
Sgl_increment(opnd3);
if (Sgl_isone_hiddenoverflow(opnd3))
is_tiny = FALSE;
Sgl_decrement(opnd3);
}
break;
case ROUNDMINUS:
if (Sgl_isone_sign(result)) {
Sgl_increment(opnd3);
if (Sgl_isone_hiddenoverflow(opnd3))
is_tiny = FALSE;
Sgl_decrement(opnd3);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Sgl_isone_lowmantissa(opnd3))) {
Sgl_increment(opnd3);
if (Sgl_isone_hiddenoverflow(opnd3))
is_tiny = FALSE;
Sgl_decrement(opnd3);
}
break;
}
}
/*
* denormalize result or set to signed zero
*/
stickybit = inexact;
Sgl_denormalize(opnd3,dest_exponent,guardbit,stickybit,inexact);
/* return rounded number */
if (inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(result)) {
Sgl_increment(opnd3);
}
break;
case ROUNDMINUS:
if (Sgl_isone_sign(result)) {
Sgl_increment(opnd3);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Sgl_isone_lowmantissa(opnd3))) {
Sgl_increment(opnd3);
}
break;
}
if (is_tiny) Set_underflowflag();
}
Sgl_set_exponentmantissa(result,opnd3);
}
else Sgl_set_exponent(result,dest_exponent);
*dstptr = result;
/* check for inexact */
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/sfdiv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/sfsqrt.c $Revision: 1.1 $
*
* Purpose:
* Single Floating-point Square Root
*
* External Interfaces:
* sgl_fsqrt(srcptr,nullptr,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
/*
* Single Floating-point Square Root
*/
/*ARGSUSED*/
unsigned int
sgl_fsqrt(
sgl_floating_point *srcptr,
unsigned int *nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int src, result;
register int src_exponent;
register unsigned int newbit, sum;
register boolean guardbit = FALSE, even_exponent;
src = *srcptr;
/*
* check source operand for NaN or infinity
*/
if ((src_exponent = Sgl_exponent(src)) == SGL_INFINITY_EXPONENT) {
/*
* is signaling NaN?
*/
if (Sgl_isone_signaling(src)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(src);
}
/*
* Return quiet NaN or positive infinity.
* Fall through to negative test if negative infinity.
*/
if (Sgl_iszero_sign(src) || Sgl_isnotzero_mantissa(src)) {
*dstptr = src;
return(NOEXCEPTION);
}
}
/*
* check for zero source operand
*/
if (Sgl_iszero_exponentmantissa(src)) {
*dstptr = src;
return(NOEXCEPTION);
}
/*
* check for negative source operand
*/
if (Sgl_isone_sign(src)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_makequietnan(src);
*dstptr = src;
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent > 0) {
even_exponent = Sgl_hidden(src);
Sgl_clear_signexponent_set_hidden(src);
}
else {
/* normalize operand */
Sgl_clear_signexponent(src);
src_exponent++;
Sgl_normalize(src,src_exponent);
even_exponent = src_exponent & 1;
}
if (even_exponent) {
/* exponent is even */
/* Add comment here. Explain why odd exponent needs correction */
Sgl_leftshiftby1(src);
}
/*
* Add comment here. Explain following algorithm.
*
* Trust me, it works.
*
*/
Sgl_setzero(result);
newbit = 1 << SGL_P;
while (newbit && Sgl_isnotzero(src)) {
Sgl_addition(result,newbit,sum);
if(sum <= Sgl_all(src)) {
/* update result */
Sgl_addition(result,(newbit<<1),result);
Sgl_subtract(src,sum,src);
}
Sgl_rightshiftby1(newbit);
Sgl_leftshiftby1(src);
}
/* correct exponent for pre-shift */
if (even_exponent) {
Sgl_rightshiftby1(result);
}
/* check for inexact */
if (Sgl_isnotzero(src)) {
if (!even_exponent && Sgl_islessthan(result,src))
Sgl_increment(result);
guardbit = Sgl_lowmantissa(result);
Sgl_rightshiftby1(result);
/* now round result */
switch (Rounding_mode()) {
case ROUNDPLUS:
Sgl_increment(result);
break;
case ROUNDNEAREST:
/* stickybit is always true, so guardbit
* is enough to determine rounding */
if (guardbit) {
Sgl_increment(result);
}
break;
}
/* increment result exponent by 1 if mantissa overflowed */
if (Sgl_isone_hiddenoverflow(result)) src_exponent+=2;
if (Is_inexacttrap_enabled()) {
Sgl_set_exponent(result,
((src_exponent-SGL_BIAS)>>1)+SGL_BIAS);
*dstptr = result;
return(INEXACTEXCEPTION);
}
else Set_inexactflag();
}
else {
Sgl_rightshiftby1(result);
}
Sgl_set_exponent(result,((src_exponent-SGL_BIAS)>>1)+SGL_BIAS);
*dstptr = result;
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/sfsqrt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/fcnvfxt.c $Revision: 1.1 $
*
* Purpose:
* Single Floating-point to Single Fixed-point /w truncated result
* Single Floating-point to Double Fixed-point /w truncated result
* Double Floating-point to Single Fixed-point /w truncated result
* Double Floating-point to Double Fixed-point /w truncated result
*
* External Interfaces:
* dbl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status)
* dbl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status)
* sgl_to_dbl_fcnvfxt(srcptr,nullptr,dstptr,status)
* sgl_to_sgl_fcnvfxt(srcptr,nullptr,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
#include "cnv_float.h"
/*
* Convert single floating-point to single fixed-point format
* with truncated result
*/
/*ARGSUSED*/
int
sgl_to_sgl_fcnvfxt(
sgl_floating_point *srcptr,
unsigned int *nullptr,
int *dstptr,
unsigned int *status)
{
register unsigned int src, temp;
register int src_exponent, result;
src = *srcptr;
src_exponent = Sgl_exponent(src) - SGL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > SGL_FX_MAX_EXP) {
/* check for MININT */
if ((src_exponent > SGL_FX_MAX_EXP + 1) ||
Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
if (Sgl_iszero_sign(src)) result = 0x7fffffff;
else result = 0x80000000;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
}
/*
* Generate result
*/
if (src_exponent >= 0) {
temp = src;
Sgl_clear_signexponent_set_hidden(temp);
Int_from_sgl_mantissa(temp,src_exponent);
if (Sgl_isone_sign(src)) result = -Sgl_all(temp);
else result = Sgl_all(temp);
*dstptr = result;
/* check for inexact */
if (Sgl_isinexact_to_fix(src,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
*dstptr = 0;
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
/*
* Single Floating-point to Double Fixed-point
*/
/*ARGSUSED*/
int
sgl_to_dbl_fcnvfxt(
sgl_floating_point *srcptr,
unsigned int *nullptr,
dbl_integer *dstptr,
unsigned int *status)
{
register int src_exponent, resultp1;
register unsigned int src, temp, resultp2;
src = *srcptr;
src_exponent = Sgl_exponent(src) - SGL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > DBL_FX_MAX_EXP) {
/* check for MININT */
if ((src_exponent > DBL_FX_MAX_EXP + 1) ||
Sgl_isnotzero_mantissa(src) || Sgl_iszero_sign(src)) {
if (Sgl_iszero_sign(src)) {
resultp1 = 0x7fffffff;
resultp2 = 0xffffffff;
}
else {
resultp1 = 0x80000000;
resultp2 = 0;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Dint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
Dint_set_minint(resultp1,resultp2);
Dint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate result
*/
if (src_exponent >= 0) {
temp = src;
Sgl_clear_signexponent_set_hidden(temp);
Dint_from_sgl_mantissa(temp,src_exponent,resultp1,resultp2);
if (Sgl_isone_sign(src)) {
Dint_setone_sign(resultp1,resultp2);
}
Dint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Sgl_isinexact_to_fix(src,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
Dint_setzero(resultp1,resultp2);
Dint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Sgl_isnotzero_exponentmantissa(src)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
/*
* Double Floating-point to Single Fixed-point
*/
/*ARGSUSED*/
int
dbl_to_sgl_fcnvfxt(
dbl_floating_point *srcptr,
unsigned int *nullptr,
int *dstptr,
unsigned int *status)
{
register unsigned int srcp1, srcp2, tempp1, tempp2;
register int src_exponent, result;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > SGL_FX_MAX_EXP) {
/* check for MININT */
if (Dbl_isoverflow_to_int(src_exponent,srcp1,srcp2)) {
if (Dbl_iszero_sign(srcp1)) result = 0x7fffffff;
else result = 0x80000000;
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
*dstptr = result;
return(NOEXCEPTION);
}
}
/*
* Generate result
*/
if (src_exponent >= 0) {
tempp1 = srcp1;
tempp2 = srcp2;
Dbl_clear_signexponent_set_hidden(tempp1);
Int_from_dbl_mantissa(tempp1,tempp2,src_exponent);
if (Dbl_isone_sign(srcp1) && (src_exponent <= SGL_FX_MAX_EXP))
result = -Dbl_allp1(tempp1);
else result = Dbl_allp1(tempp1);
*dstptr = result;
/* check for inexact */
if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
*dstptr = 0;
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
/*
* Double Floating-point to Double Fixed-point
*/
/*ARGSUSED*/
int
dbl_to_dbl_fcnvfxt(
dbl_floating_point *srcptr,
unsigned int *nullptr,
dbl_integer *dstptr,
unsigned int *status)
{
register int src_exponent, resultp1;
register unsigned int srcp1, srcp2, tempp1, tempp2, resultp2;
Dbl_copyfromptr(srcptr,srcp1,srcp2);
src_exponent = Dbl_exponent(srcp1) - DBL_BIAS;
/*
* Test for overflow
*/
if (src_exponent > DBL_FX_MAX_EXP) {
/* check for MININT */
if ((src_exponent > DBL_FX_MAX_EXP + 1) ||
Dbl_isnotzero_mantissa(srcp1,srcp2) || Dbl_iszero_sign(srcp1)) {
if (Dbl_iszero_sign(srcp1)) {
resultp1 = 0x7fffffff;
resultp2 = 0xffffffff;
}
else {
resultp1 = 0x80000000;
resultp2 = 0;
}
if (Is_invalidtrap_enabled()) {
return(INVALIDEXCEPTION);
}
Set_invalidflag();
Dint_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate result
*/
if (src_exponent >= 0) {
tempp1 = srcp1;
tempp2 = srcp2;
Dbl_clear_signexponent_set_hidden(tempp1);
Dint_from_dbl_mantissa(tempp1,tempp2,src_exponent,
resultp1,resultp2);
if (Dbl_isone_sign(srcp1)) {
Dint_setone_sign(resultp1,resultp2);
}
Dint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Dbl_isinexact_to_fix(srcp1,srcp2,src_exponent)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
else {
Dint_setzero(resultp1,resultp2);
Dint_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (Dbl_isnotzero_exponentmantissa(srcp1,srcp2)) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
}
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/fcnvfxt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/fcnvxf.c $Revision: 1.1 $
*
* Purpose:
* Single Fixed-point to Single Floating-point
* Single Fixed-point to Double Floating-point
* Double Fixed-point to Single Floating-point
* Double Fixed-point to Double Floating-point
*
* External Interfaces:
* dbl_to_dbl_fcnvxf(srcptr,nullptr,dstptr,status)
* dbl_to_sgl_fcnvxf(srcptr,nullptr,dstptr,status)
* sgl_to_dbl_fcnvxf(srcptr,nullptr,dstptr,status)
* sgl_to_sgl_fcnvxf(srcptr,nullptr,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
#include "dbl_float.h"
#include "cnv_float.h"
/*
* Convert single fixed-point to single floating-point format
*/
int
sgl_to_sgl_fcnvxf(
int *srcptr,
unsigned int *nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
register int src, dst_exponent;
register unsigned int result = 0;
src = *srcptr;
/*
* set sign bit of result and get magnitude of source
*/
if (src < 0) {
Sgl_setone_sign(result);
Int_negate(src);
}
else {
Sgl_setzero_sign(result);
/* Check for zero */
if (src == 0) {
Sgl_setzero(result);
*dstptr = result;
return(NOEXCEPTION);
}
}
/*
* Generate exponent and normalized mantissa
*/
dst_exponent = 16; /* initialize for normalization */
/*
* Check word for most significant bit set. Returns
* a value in dst_exponent indicating the bit position,
* between -1 and 30.
*/
Find_ms_one_bit(src,dst_exponent);
/* left justify source, with msb at bit position 1 */
if (dst_exponent >= 0) src <<= dst_exponent;
else src = 1 << 30;
Sgl_set_mantissa(result, src >> (SGL_EXP_LENGTH-1));
Sgl_set_exponent(result, 30+SGL_BIAS - dst_exponent);
/* check for inexact */
if (Int_isinexact_to_sgl(src)) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(result))
Sgl_increment(result);
break;
case ROUNDMINUS:
if (Sgl_isone_sign(result))
Sgl_increment(result);
break;
case ROUNDNEAREST:
Sgl_roundnearest_from_int(src,result);
}
if (Is_inexacttrap_enabled()) {
*dstptr = result;
return(INEXACTEXCEPTION);
}
else Set_inexactflag();
}
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Single Fixed-point to Double Floating-point
*/
int
sgl_to_dbl_fcnvxf(
int *srcptr,
unsigned int *nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
register int src, dst_exponent;
register unsigned int resultp1 = 0, resultp2 = 0;
src = *srcptr;
/*
* set sign bit of result and get magnitude of source
*/
if (src < 0) {
Dbl_setone_sign(resultp1);
Int_negate(src);
}
else {
Dbl_setzero_sign(resultp1);
/* Check for zero */
if (src == 0) {
Dbl_setzero(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate exponent and normalized mantissa
*/
dst_exponent = 16; /* initialize for normalization */
/*
* Check word for most significant bit set. Returns
* a value in dst_exponent indicating the bit position,
* between -1 and 30.
*/
Find_ms_one_bit(src,dst_exponent);
/* left justify source, with msb at bit position 1 */
if (dst_exponent >= 0) src <<= dst_exponent;
else src = 1 << 30;
Dbl_set_mantissap1(resultp1, src >> DBL_EXP_LENGTH - 1);
Dbl_set_mantissap2(resultp2, src << (33-DBL_EXP_LENGTH));
Dbl_set_exponent(resultp1, (30+DBL_BIAS) - dst_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Double Fixed-point to Single Floating-point
*/
int
dbl_to_sgl_fcnvxf(
dbl_integer *srcptr,
unsigned int *nullptr,
sgl_floating_point *dstptr,
unsigned int *status)
{
int dst_exponent, srcp1;
unsigned int result = 0, srcp2;
Dint_copyfromptr(srcptr,srcp1,srcp2);
/*
* set sign bit of result and get magnitude of source
*/
if (srcp1 < 0) {
Sgl_setone_sign(result);
Dint_negate(srcp1,srcp2);
}
else {
Sgl_setzero_sign(result);
/* Check for zero */
if (srcp1 == 0 && srcp2 == 0) {
Sgl_setzero(result);
*dstptr = result;
return(NOEXCEPTION);
}
}
/*
* Generate exponent and normalized mantissa
*/
dst_exponent = 16; /* initialize for normalization */
if (srcp1 == 0) {
/*
* Check word for most significant bit set. Returns
* a value in dst_exponent indicating the bit position,
* between -1 and 30.
*/
Find_ms_one_bit(srcp2,dst_exponent);
/* left justify source, with msb at bit position 1 */
if (dst_exponent >= 0) {
srcp1 = srcp2 << dst_exponent;
srcp2 = 0;
}
else {
srcp1 = srcp2 >> 1;
srcp2 <<= 31;
}
/*
* since msb set is in second word, need to
* adjust bit position count
*/
dst_exponent += 32;
}
else {
/*
* Check word for most significant bit set. Returns
* a value in dst_exponent indicating the bit position,
* between -1 and 30.
*
*/
Find_ms_one_bit(srcp1,dst_exponent);
/* left justify source, with msb at bit position 1 */
if (dst_exponent > 0) {
Variable_shift_double(srcp1,srcp2,(32-dst_exponent),
srcp1);
srcp2 <<= dst_exponent;
}
/*
* If dst_exponent = 0, we don't need to shift anything.
* If dst_exponent = -1, src = - 2**63 so we won't need to
* shift srcp2.
*/
else srcp1 >>= -(dst_exponent);
}
Sgl_set_mantissa(result, srcp1 >> SGL_EXP_LENGTH - 1);
Sgl_set_exponent(result, (62+SGL_BIAS) - dst_exponent);
/* check for inexact */
if (Dint_isinexact_to_sgl(srcp1,srcp2)) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(result))
Sgl_increment(result);
break;
case ROUNDMINUS:
if (Sgl_isone_sign(result))
Sgl_increment(result);
break;
case ROUNDNEAREST:
Sgl_roundnearest_from_dint(srcp1,srcp2,result);
}
if (Is_inexacttrap_enabled()) {
*dstptr = result;
return(INEXACTEXCEPTION);
}
else Set_inexactflag();
}
*dstptr = result;
return(NOEXCEPTION);
}
/*
* Double Fixed-point to Double Floating-point
*/
int
dbl_to_dbl_fcnvxf(
dbl_integer *srcptr,
unsigned int *nullptr,
dbl_floating_point *dstptr,
unsigned int *status)
{
register int srcp1, dst_exponent;
register unsigned int srcp2, resultp1 = 0, resultp2 = 0;
Dint_copyfromptr(srcptr,srcp1,srcp2);
/*
* set sign bit of result and get magnitude of source
*/
if (srcp1 < 0) {
Dbl_setone_sign(resultp1);
Dint_negate(srcp1,srcp2);
}
else {
Dbl_setzero_sign(resultp1);
/* Check for zero */
if (srcp1 == 0 && srcp2 ==0) {
Dbl_setzero(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
/*
* Generate exponent and normalized mantissa
*/
dst_exponent = 16; /* initialize for normalization */
if (srcp1 == 0) {
/*
* Check word for most significant bit set. Returns
* a value in dst_exponent indicating the bit position,
* between -1 and 30.
*/
Find_ms_one_bit(srcp2,dst_exponent);
/* left justify source, with msb at bit position 1 */
if (dst_exponent >= 0) {
srcp1 = srcp2 << dst_exponent;
srcp2 = 0;
}
else {
srcp1 = srcp2 >> 1;
srcp2 <<= 31;
}
/*
* since msb set is in second word, need to
* adjust bit position count
*/
dst_exponent += 32;
}
else {
/*
* Check word for most significant bit set. Returns
* a value in dst_exponent indicating the bit position,
* between -1 and 30.
*/
Find_ms_one_bit(srcp1,dst_exponent);
/* left justify source, with msb at bit position 1 */
if (dst_exponent > 0) {
Variable_shift_double(srcp1,srcp2,(32-dst_exponent),
srcp1);
srcp2 <<= dst_exponent;
}
/*
* If dst_exponent = 0, we don't need to shift anything.
* If dst_exponent = -1, src = - 2**63 so we won't need to
* shift srcp2.
*/
else srcp1 >>= -(dst_exponent);
}
Dbl_set_mantissap1(resultp1, srcp1 >> (DBL_EXP_LENGTH-1));
Shiftdouble(srcp1,srcp2,DBL_EXP_LENGTH-1,resultp2);
Dbl_set_exponent(resultp1, (62+DBL_BIAS) - dst_exponent);
/* check for inexact */
if (Dint_isinexact_to_dbl(srcp2)) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
Dbl_increment(resultp1,resultp2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
Dbl_increment(resultp1,resultp2);
}
break;
case ROUNDNEAREST:
Dbl_roundnearest_from_dint(srcp2,resultp1,
resultp2);
}
if (Is_inexacttrap_enabled()) {
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(INEXACTEXCEPTION);
}
else Set_inexactflag();
}
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/fcnvxf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* linux/arch/math-emu/driver.c.c
*
* decodes and dispatches unimplemented FPU instructions
*
* Copyright (C) 1999, 2000 Philipp Rumpf <[email protected]>
* Copyright (C) 2001 Hewlett-Packard <[email protected]>
*/
#include <linux/sched/signal.h>
#include "float.h"
#include "math-emu.h"
#define fptpos 31
#define fpr1pos 10
#define extru(r,pos,len) (((r) >> (31-(pos))) & (( 1 << (len)) - 1))
#define FPUDEBUG 0
/* Format of the floating-point exception registers. */
struct exc_reg {
unsigned int exception : 6;
unsigned int ei : 26;
};
/* Macros for grabbing bits of the instruction format from the 'ei'
field above. */
/* Major opcode 0c and 0e */
#define FP0CE_UID(i) (((i) >> 6) & 3)
#define FP0CE_CLASS(i) (((i) >> 9) & 3)
#define FP0CE_SUBOP(i) (((i) >> 13) & 7)
#define FP0CE_SUBOP1(i) (((i) >> 15) & 7) /* Class 1 subopcode */
#define FP0C_FORMAT(i) (((i) >> 11) & 3)
#define FP0E_FORMAT(i) (((i) >> 11) & 1)
/* Major opcode 0c, uid 2 (performance monitoring) */
#define FPPM_SUBOP(i) (((i) >> 9) & 0x1f)
/* Major opcode 2e (fused operations). */
#define FP2E_SUBOP(i) (((i) >> 5) & 1)
#define FP2E_FORMAT(i) (((i) >> 11) & 1)
/* Major opcode 26 (FMPYSUB) */
/* Major opcode 06 (FMPYADD) */
#define FPx6_FORMAT(i) ((i) & 0x1f)
/* Flags and enable bits of the status word. */
#define FPSW_FLAGS(w) ((w) >> 27)
#define FPSW_ENABLE(w) ((w) & 0x1f)
#define FPSW_V (1<<4)
#define FPSW_Z (1<<3)
#define FPSW_O (1<<2)
#define FPSW_U (1<<1)
#define FPSW_I (1<<0)
/* Handle a floating point exception. Return zero if the faulting
instruction can be completed successfully. */
int
handle_fpe(struct pt_regs *regs)
{
extern void printbinary(unsigned long x, int nbits);
unsigned int orig_sw, sw;
int signalcode;
/* need an intermediate copy of float regs because FPU emulation
* code expects an artificial last entry which contains zero
*
* also, the passed in fr registers contain one word that defines
* the fpu type. the fpu type information is constructed
* inside the emulation code
*/
__u64 frcopy[36];
memcpy(frcopy, regs->fr, sizeof regs->fr);
frcopy[32] = 0;
memcpy(&orig_sw, frcopy, sizeof(orig_sw));
if (FPUDEBUG) {
printk(KERN_DEBUG "FP VZOUICxxxxCQCQCQCQCQCRMxxTDVZOUI ->\n ");
printbinary(orig_sw, 32);
printk(KERN_DEBUG "\n");
}
signalcode = decode_fpu(frcopy, 0x666);
/* Status word = FR0L. */
memcpy(&sw, frcopy, sizeof(sw));
if (FPUDEBUG) {
printk(KERN_DEBUG "VZOUICxxxxCQCQCQCQCQCRMxxTDVZOUI decode_fpu returns %d|0x%x\n",
signalcode >> 24, signalcode & 0xffffff);
printbinary(sw, 32);
printk(KERN_DEBUG "\n");
}
memcpy(regs->fr, frcopy, sizeof regs->fr);
if (signalcode != 0) {
force_sig_fault(signalcode >> 24, signalcode & 0xffffff,
(void __user *) regs->iaoq[0]);
return -1;
}
return signalcode ? -1 : 0;
}
| linux-master | arch/parisc/math-emu/driver.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <[email protected]>
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/sfmpy.c $Revision: 1.1 $
*
* Purpose:
* Single Precision Floating-point Multiply
*
* External Interfaces:
* sgl_fmpy(srcptr1,srcptr2,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "sgl_float.h"
/*
* Single Precision Floating-point Multiply
*/
int
sgl_fmpy(
sgl_floating_point *srcptr1,
sgl_floating_point *srcptr2,
sgl_floating_point *dstptr,
unsigned int *status)
{
register unsigned int opnd1, opnd2, opnd3, result;
register int dest_exponent, count;
register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
boolean is_tiny;
opnd1 = *srcptr1;
opnd2 = *srcptr2;
/*
* set sign bit of result
*/
if (Sgl_sign(opnd1) ^ Sgl_sign(opnd2)) Sgl_setnegativezero(result);
else Sgl_setzero(result);
/*
* check first operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd1)) {
if (Sgl_iszero_mantissa(opnd1)) {
if (Sgl_isnotnan(opnd2)) {
if (Sgl_iszero_exponentmantissa(opnd2)) {
/*
* invalid since operands are infinity
* and zero
*/
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* return infinity
*/
Sgl_setinfinity_exponentmantissa(result);
*dstptr = result;
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd1);
}
/*
* is second operand a signaling NaN?
*/
else if (Sgl_is_signalingnan(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
*dstptr = opnd2;
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
*dstptr = opnd1;
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Sgl_isinfinity_exponent(opnd2)) {
if (Sgl_iszero_mantissa(opnd2)) {
if (Sgl_iszero_exponentmantissa(opnd1)) {
/* invalid since operands are zero & infinity */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
Set_invalidflag();
Sgl_makequietnan(opnd2);
*dstptr = opnd2;
return(NOEXCEPTION);
}
/*
* return infinity
*/
Sgl_setinfinity_exponentmantissa(result);
*dstptr = result;
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Sgl_isone_signaling(opnd2)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Sgl_set_quiet(opnd2);
}
/*
* return quiet NaN
*/
*dstptr = opnd2;
return(NOEXCEPTION);
}
/*
* Generate exponent
*/
dest_exponent = Sgl_exponent(opnd1) + Sgl_exponent(opnd2) - SGL_BIAS;
/*
* Generate mantissa
*/
if (Sgl_isnotzero_exponent(opnd1)) {
/* set hidden bit */
Sgl_clear_signexponent_set_hidden(opnd1);
}
else {
/* check for zero */
if (Sgl_iszero_mantissa(opnd1)) {
Sgl_setzero_exponentmantissa(result);
*dstptr = result;
return(NOEXCEPTION);
}
/* is denormalized, adjust exponent */
Sgl_clear_signexponent(opnd1);
Sgl_leftshiftby1(opnd1);
Sgl_normalize(opnd1,dest_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Sgl_isnotzero_exponent(opnd2)) {
Sgl_clear_signexponent_set_hidden(opnd2);
}
else {
/* check for zero */
if (Sgl_iszero_mantissa(opnd2)) {
Sgl_setzero_exponentmantissa(result);
*dstptr = result;
return(NOEXCEPTION);
}
/* is denormalized; want to normalize */
Sgl_clear_signexponent(opnd2);
Sgl_leftshiftby1(opnd2);
Sgl_normalize(opnd2,dest_exponent);
}
/* Multiply two source mantissas together */
Sgl_leftshiftby4(opnd2); /* make room for guard bits */
Sgl_setzero(opnd3);
/*
* Four bits at a time are inspected in each loop, and a
* simple shift and add multiply algorithm is used.
*/
for (count=1;count<SGL_P;count+=4) {
stickybit |= Slow4(opnd3);
Sgl_rightshiftby4(opnd3);
if (Sbit28(opnd1)) Sall(opnd3) += (Sall(opnd2) << 3);
if (Sbit29(opnd1)) Sall(opnd3) += (Sall(opnd2) << 2);
if (Sbit30(opnd1)) Sall(opnd3) += (Sall(opnd2) << 1);
if (Sbit31(opnd1)) Sall(opnd3) += Sall(opnd2);
Sgl_rightshiftby4(opnd1);
}
/* make sure result is left-justified */
if (Sgl_iszero_sign(opnd3)) {
Sgl_leftshiftby1(opnd3);
}
else {
/* result mantissa >= 2. */
dest_exponent++;
}
/* check for denormalized result */
while (Sgl_iszero_sign(opnd3)) {
Sgl_leftshiftby1(opnd3);
dest_exponent--;
}
/*
* check for guard, sticky and inexact bits
*/
stickybit |= Sgl_all(opnd3) << (SGL_BITLENGTH - SGL_EXP_LENGTH + 1);
guardbit = Sbit24(opnd3);
inexact = guardbit | stickybit;
/* re-align mantissa */
Sgl_rightshiftby8(opnd3);
/*
* round result
*/
if (inexact && (dest_exponent>0 || Is_underflowtrap_enabled())) {
Sgl_clear_signexponent(opnd3);
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(result))
Sgl_increment(opnd3);
break;
case ROUNDMINUS:
if (Sgl_isone_sign(result))
Sgl_increment(opnd3);
break;
case ROUNDNEAREST:
if (guardbit) {
if (stickybit || Sgl_isone_lowmantissa(opnd3))
Sgl_increment(opnd3);
}
}
if (Sgl_isone_hidden(opnd3)) dest_exponent++;
}
Sgl_set_mantissa(result,opnd3);
/*
* Test for overflow
*/
if (dest_exponent >= SGL_INFINITY_EXPONENT) {
/* trap if OVERFLOWTRAP enabled */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(result,dest_exponent,ovfl);
*dstptr = result;
if (inexact)
if (Is_inexacttrap_enabled())
return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(OVERFLOWEXCEPTION);
}
inexact = TRUE;
Set_overflowflag();
/* set result to infinity or largest number */
Sgl_setoverflow(result);
}
/*
* Test for underflow
*/
else if (dest_exponent <= 0) {
/* trap if UNDERFLOWTRAP enabled */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Sgl_setwrapped_exponent(result,dest_exponent,unfl);
*dstptr = result;
if (inexact)
if (Is_inexacttrap_enabled())
return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(UNDERFLOWEXCEPTION);
}
/* Determine if should set underflow flag */
is_tiny = TRUE;
if (dest_exponent == 0 && inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(result)) {
Sgl_increment(opnd3);
if (Sgl_isone_hiddenoverflow(opnd3))
is_tiny = FALSE;
Sgl_decrement(opnd3);
}
break;
case ROUNDMINUS:
if (Sgl_isone_sign(result)) {
Sgl_increment(opnd3);
if (Sgl_isone_hiddenoverflow(opnd3))
is_tiny = FALSE;
Sgl_decrement(opnd3);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Sgl_isone_lowmantissa(opnd3))) {
Sgl_increment(opnd3);
if (Sgl_isone_hiddenoverflow(opnd3))
is_tiny = FALSE;
Sgl_decrement(opnd3);
}
break;
}
}
/*
* denormalize result or set to signed zero
*/
stickybit = inexact;
Sgl_denormalize(opnd3,dest_exponent,guardbit,stickybit,inexact);
/* return zero or smallest number */
if (inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Sgl_iszero_sign(result)) {
Sgl_increment(opnd3);
}
break;
case ROUNDMINUS:
if (Sgl_isone_sign(result)) {
Sgl_increment(opnd3);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Sgl_isone_lowmantissa(opnd3))) {
Sgl_increment(opnd3);
}
break;
}
if (is_tiny) Set_underflowflag();
}
Sgl_set_exponentmantissa(result,opnd3);
}
else Sgl_set_exponent(result,dest_exponent);
*dstptr = result;
/* check for inexact */
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
| linux-master | arch/parisc/math-emu/sfmpy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* BPF JIT compiler for PA-RISC (64-bit)
*
* Copyright(c) 2023 Helge Deller <[email protected]>
*
* The code is based on the BPF JIT compiler for RV64 by Björn Töpel.
*
* TODO:
* - check if bpf_jit_needs_zext() is needed (currently enabled)
* - implement arch_prepare_bpf_trampoline(), poke(), ...
*/
#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/libgcc.h>
#include "bpf_jit.h"
static const int regmap[] = {
[BPF_REG_0] = HPPA_REG_RET0,
[BPF_REG_1] = HPPA_R(5),
[BPF_REG_2] = HPPA_R(6),
[BPF_REG_3] = HPPA_R(7),
[BPF_REG_4] = HPPA_R(8),
[BPF_REG_5] = HPPA_R(9),
[BPF_REG_6] = HPPA_R(10),
[BPF_REG_7] = HPPA_R(11),
[BPF_REG_8] = HPPA_R(12),
[BPF_REG_9] = HPPA_R(13),
[BPF_REG_FP] = HPPA_R(14),
[BPF_REG_AX] = HPPA_R(15),
};
/*
* Stack layout during BPF program execution (note: stack grows up):
*
* high
* HPPA64 sp => +----------+ <= HPPA64 fp
* | saved sp |
* | saved rp |
* | ... | HPPA64 callee-saved registers
* | curr args|
* | local var|
* +----------+ <= (BPF FP)
* | |
* | ... | BPF program stack
* | |
* | ... | Function call stack
* | |
* +----------+
* low
*/
/* Offset from fp for BPF registers stored on stack. */
#define STACK_ALIGN FRAME_SIZE
#define EXIT_PTR_LOAD(reg) hppa64_ldd_im16(-FRAME_SIZE, HPPA_REG_SP, reg)
#define EXIT_PTR_STORE(reg) hppa64_std_im16(reg, -FRAME_SIZE, HPPA_REG_SP)
#define EXIT_PTR_JUMP(reg, nop) hppa_bv(HPPA_REG_ZERO, reg, nop)
static u8 bpf_to_hppa_reg(int bpf_reg, struct hppa_jit_context *ctx)
{
u8 reg = regmap[bpf_reg];
REG_SET_SEEN(ctx, reg);
return reg;
};
static void emit_hppa_copy(const s8 rs, const s8 rd, struct hppa_jit_context *ctx)
{
REG_SET_SEEN(ctx, rd);
if (OPTIMIZE_HPPA && (rs == rd))
return;
REG_SET_SEEN(ctx, rs);
emit(hppa_copy(rs, rd), ctx);
}
static void emit_hppa64_depd(u8 src, u8 pos, u8 len, u8 target, bool no_zero, struct hppa_jit_context *ctx)
{
int c;
pos &= (BITS_PER_LONG - 1);
pos = 63 - pos;
len = 64 - len;
c = (len < 32) ? 0x4 : 0;
c |= (pos >= 32) ? 0x2 : 0;
c |= (no_zero) ? 0x1 : 0;
emit(hppa_t10_insn(0x3c, target, src, 0, c, pos & 0x1f, len & 0x1f), ctx);
}
static void emit_hppa64_shld(u8 src, int num, u8 target, struct hppa_jit_context *ctx)
{
emit_hppa64_depd(src, 63-num, 64-num, target, 0, ctx);
}
static void emit_hppa64_extrd(u8 src, u8 pos, u8 len, u8 target, bool signed_op, struct hppa_jit_context *ctx)
{
int c;
pos &= (BITS_PER_LONG - 1);
len = 64 - len;
c = (len < 32) ? 0x4 : 0;
c |= (pos >= 32) ? 0x2 : 0;
c |= signed_op ? 0x1 : 0;
emit(hppa_t10_insn(0x36, src, target, 0, c, pos & 0x1f, len & 0x1f), ctx);
}
static void emit_hppa64_extrw(u8 src, u8 pos, u8 len, u8 target, bool signed_op, struct hppa_jit_context *ctx)
{
int c;
pos &= (32 - 1);
len = 32 - len;
c = 0x06 | (signed_op ? 1 : 0);
emit(hppa_t10_insn(0x34, src, target, 0, c, pos, len), ctx);
}
#define emit_hppa64_zext32(r, target, ctx) \
emit_hppa64_extrd(r, 63, 32, target, false, ctx)
#define emit_hppa64_sext32(r, target, ctx) \
emit_hppa64_extrd(r, 63, 32, target, true, ctx)
static void emit_hppa64_shrd(u8 src, int num, u8 target, bool signed_op, struct hppa_jit_context *ctx)
{
emit_hppa64_extrd(src, 63-num, 64-num, target, signed_op, ctx);
}
static void emit_hppa64_shrw(u8 src, int num, u8 target, bool signed_op, struct hppa_jit_context *ctx)
{
emit_hppa64_extrw(src, 31-num, 32-num, target, signed_op, ctx);
}
/* Emit variable-length instructions for 32-bit imm */
static void emit_imm32(u8 rd, s32 imm, struct hppa_jit_context *ctx)
{
u32 lower = im11(imm);
REG_SET_SEEN(ctx, rd);
if (OPTIMIZE_HPPA && relative_bits_ok(imm, 14)) {
emit(hppa_ldi(imm, rd), ctx);
return;
}
if (OPTIMIZE_HPPA && lower == imm) {
emit(hppa_ldo(lower, HPPA_REG_ZERO, rd), ctx);
return;
}
emit(hppa_ldil(imm, rd), ctx);
if (OPTIMIZE_HPPA && (lower == 0))
return;
emit(hppa_ldo(lower, rd, rd), ctx);
}
static bool is_32b_int(s64 val)
{
return val == (s32) val;
}
/* Emit variable-length instructions for 64-bit imm */
static void emit_imm(u8 rd, s64 imm, u8 tmpreg, struct hppa_jit_context *ctx)
{
u32 upper32;
/* get lower 32-bits into rd, sign extended */
emit_imm32(rd, imm, ctx);
/* do we have upper 32-bits too ? */
if (OPTIMIZE_HPPA && is_32b_int(imm))
return;
/* load upper 32-bits into lower tmpreg and deposit into rd */
upper32 = imm >> 32;
if (upper32 || !OPTIMIZE_HPPA) {
emit_imm32(tmpreg, upper32, ctx);
emit_hppa64_depd(tmpreg, 31, 32, rd, 1, ctx);
} else
emit_hppa64_depd(HPPA_REG_ZERO, 31, 32, rd, 1, ctx);
}
static int emit_jump(signed long paoff, bool force_far,
struct hppa_jit_context *ctx)
{
unsigned long pc, addr;
/* Note: Use 2 instructions for jumps if force_far is set. */
if (relative_bits_ok(paoff - HPPA_BRANCH_DISPLACEMENT, 22)) {
/* use BL,long branch followed by nop() */
emit(hppa64_bl_long(paoff - HPPA_BRANCH_DISPLACEMENT), ctx);
if (force_far)
emit(hppa_nop(), ctx);
return 0;
}
pc = (uintptr_t) &ctx->insns[ctx->ninsns];
addr = pc + (paoff * HPPA_INSN_SIZE);
/* even the 64-bit kernel runs in memory below 4GB */
if (WARN_ON_ONCE(addr >> 32))
return -E2BIG;
emit(hppa_ldil(addr, HPPA_REG_R31), ctx);
emit(hppa_be_l(im11(addr) >> 2, HPPA_REG_R31, NOP_NEXT_INSTR), ctx);
return 0;
}
static void __build_epilogue(bool is_tail_call, struct hppa_jit_context *ctx)
{
int i;
if (is_tail_call) {
/*
* goto *(t0 + 4);
* Skips first instruction of prologue which initializes tail
* call counter. Assumes t0 contains address of target program,
* see emit_bpf_tail_call.
*/
emit(hppa_ldo(1 * HPPA_INSN_SIZE, HPPA_REG_T0, HPPA_REG_T0), ctx);
emit(hppa_bv(HPPA_REG_ZERO, HPPA_REG_T0, EXEC_NEXT_INSTR), ctx);
/* in delay slot: */
emit(hppa_copy(HPPA_REG_TCC, HPPA_REG_TCC_IN_INIT), ctx);
return;
}
/* load epilogue function pointer and jump to it. */
/* exit point is either at next instruction, or the outest TCC exit function */
emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx);
emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
/* NOTE: we are 64-bit and big-endian, so return lower sign-extended 32-bit value */
emit_hppa64_sext32(regmap[BPF_REG_0], HPPA_REG_RET0, ctx);
/* Restore callee-saved registers. */
for (i = 3; i <= 15; i++) {
if (OPTIMIZE_HPPA && !REG_WAS_SEEN(ctx, HPPA_R(i)))
continue;
emit(hppa64_ldd_im16(-REG_SIZE * i, HPPA_REG_SP, HPPA_R(i)), ctx);
}
/* load original return pointer (stored by outest TCC function) */
emit(hppa64_ldd_im16(-2*REG_SIZE, HPPA_REG_SP, HPPA_REG_RP), ctx);
emit(hppa_bv(HPPA_REG_ZERO, HPPA_REG_RP, EXEC_NEXT_INSTR), ctx);
/* in delay slot: */
emit(hppa64_ldd_im5(-REG_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx);
emit(hppa_nop(), ctx); // XXX WARUM einer zu wenig ??
}
static int emit_branch(u8 op, u8 rd, u8 rs, signed long paoff,
struct hppa_jit_context *ctx)
{
int e, s;
bool far = false;
int off;
if (op == BPF_JSET) {
/*
* BPF_JSET is a special case: it has no inverse so translate
* to and() function and compare against zero
*/
emit(hppa_and(rd, rs, HPPA_REG_T0), ctx);
paoff -= 1; /* reduce offset due to hppa_and() above */
rd = HPPA_REG_T0;
rs = HPPA_REG_ZERO;
op = BPF_JNE;
}
/* set start after BPF_JSET */
s = ctx->ninsns;
if (!relative_branch_ok(paoff - HPPA_BRANCH_DISPLACEMENT + 1, 12)) {
op = invert_bpf_cond(op);
far = true;
}
/*
* For a far branch, the condition is negated and we jump over the
* branch itself, and the two instructions from emit_jump.
* For a near branch, just use paoff.
*/
off = far ? (2 - HPPA_BRANCH_DISPLACEMENT) : paoff - HPPA_BRANCH_DISPLACEMENT;
switch (op) {
/* IF (dst COND src) JUMP off */
case BPF_JEQ:
emit(hppa_beq(rd, rs, off), ctx);
break;
case BPF_JGT:
emit(hppa_bgtu(rd, rs, off), ctx);
break;
case BPF_JLT:
emit(hppa_bltu(rd, rs, off), ctx);
break;
case BPF_JGE:
emit(hppa_bgeu(rd, rs, off), ctx);
break;
case BPF_JLE:
emit(hppa_bleu(rd, rs, off), ctx);
break;
case BPF_JNE:
emit(hppa_bne(rd, rs, off), ctx);
break;
case BPF_JSGT:
emit(hppa_bgt(rd, rs, off), ctx);
break;
case BPF_JSLT:
emit(hppa_blt(rd, rs, off), ctx);
break;
case BPF_JSGE:
emit(hppa_bge(rd, rs, off), ctx);
break;
case BPF_JSLE:
emit(hppa_ble(rd, rs, off), ctx);
break;
default:
WARN_ON(1);
}
if (far) {
int ret;
e = ctx->ninsns;
/* Adjust for extra insns. */
paoff -= (e - s);
ret = emit_jump(paoff, true, ctx);
if (ret)
return ret;
} else {
/*
* always allocate 2 nops instead of the far branch to
* reduce translation loops
*/
emit(hppa_nop(), ctx);
emit(hppa_nop(), ctx);
}
return 0;
}
static void emit_zext_32(u8 reg, struct hppa_jit_context *ctx)
{
emit_hppa64_zext32(reg, reg, ctx);
}
static void emit_bpf_tail_call(int insn, struct hppa_jit_context *ctx)
{
/*
* R1 -> &ctx
* R2 -> &array
* R3 -> index
*/
int off;
const s8 arr_reg = regmap[BPF_REG_2];
const s8 idx_reg = regmap[BPF_REG_3];
struct bpf_array bpfa;
struct bpf_prog bpfp;
/* if there is any tail call, we need to save & restore all registers */
REG_SET_SEEN_ALL(ctx);
/* get address of TCC main exit function for error case into rp */
emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx);
/* max_entries = array->map.max_entries; */
off = offsetof(struct bpf_array, map.max_entries);
BUILD_BUG_ON(sizeof(bpfa.map.max_entries) != 4);
emit(hppa_ldw(off, arr_reg, HPPA_REG_T1), ctx);
/*
* if (index >= max_entries)
* goto out;
*/
emit(hppa_bltu(idx_reg, HPPA_REG_T1, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
/*
* if (--tcc < 0)
* goto out;
*/
REG_FORCE_SEEN(ctx, HPPA_REG_TCC);
emit(hppa_ldo(-1, HPPA_REG_TCC, HPPA_REG_TCC), ctx);
emit(hppa_bge(HPPA_REG_TCC, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
/*
* prog = array->ptrs[index];
* if (!prog)
* goto out;
*/
BUILD_BUG_ON(sizeof(bpfa.ptrs[0]) != 8);
emit(hppa64_shladd(idx_reg, 3, arr_reg, HPPA_REG_T0), ctx);
off = offsetof(struct bpf_array, ptrs);
BUILD_BUG_ON(off < 16);
emit(hppa64_ldd_im16(off, HPPA_REG_T0, HPPA_REG_T0), ctx);
emit(hppa_bne(HPPA_REG_T0, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
/*
* tcc = temp_tcc;
* goto *(prog->bpf_func + 4);
*/
off = offsetof(struct bpf_prog, bpf_func);
BUILD_BUG_ON(off < 16);
BUILD_BUG_ON(sizeof(bpfp.bpf_func) != 8);
emit(hppa64_ldd_im16(off, HPPA_REG_T0, HPPA_REG_T0), ctx);
/* Epilogue jumps to *(t0 + 4). */
__build_epilogue(true, ctx);
}
static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
struct hppa_jit_context *ctx)
{
u8 code = insn->code;
switch (code) {
case BPF_JMP | BPF_JA:
case BPF_JMP | BPF_CALL:
case BPF_JMP | BPF_EXIT:
case BPF_JMP | BPF_TAIL_CALL:
break;
default:
*rd = bpf_to_hppa_reg(insn->dst_reg, ctx);
}
if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) ||
code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) ||
code & BPF_LDX || code & BPF_STX)
*rs = bpf_to_hppa_reg(insn->src_reg, ctx);
}
static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct hppa_jit_context *ctx)
{
emit_hppa64_zext32(*rd, HPPA_REG_T2, ctx);
*rd = HPPA_REG_T2;
emit_hppa64_zext32(*rs, HPPA_REG_T1, ctx);
*rs = HPPA_REG_T1;
}
static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct hppa_jit_context *ctx)
{
emit_hppa64_sext32(*rd, HPPA_REG_T2, ctx);
*rd = HPPA_REG_T2;
emit_hppa64_sext32(*rs, HPPA_REG_T1, ctx);
*rs = HPPA_REG_T1;
}
static void emit_zext_32_rd_t1(u8 *rd, struct hppa_jit_context *ctx)
{
emit_hppa64_zext32(*rd, HPPA_REG_T2, ctx);
*rd = HPPA_REG_T2;
emit_zext_32(HPPA_REG_T1, ctx);
}
static void emit_sext_32_rd(u8 *rd, struct hppa_jit_context *ctx)
{
emit_hppa64_sext32(*rd, HPPA_REG_T2, ctx);
*rd = HPPA_REG_T2;
}
static bool is_signed_bpf_cond(u8 cond)
{
return cond == BPF_JSGT || cond == BPF_JSLT ||
cond == BPF_JSGE || cond == BPF_JSLE;
}
static void emit_call(u64 addr, bool fixed, struct hppa_jit_context *ctx)
{
const int offset_sp = 2*FRAME_SIZE;
emit(hppa_ldo(offset_sp, HPPA_REG_SP, HPPA_REG_SP), ctx);
emit_hppa_copy(regmap[BPF_REG_1], HPPA_REG_ARG0, ctx);
emit_hppa_copy(regmap[BPF_REG_2], HPPA_REG_ARG1, ctx);
emit_hppa_copy(regmap[BPF_REG_3], HPPA_REG_ARG2, ctx);
emit_hppa_copy(regmap[BPF_REG_4], HPPA_REG_ARG3, ctx);
emit_hppa_copy(regmap[BPF_REG_5], HPPA_REG_ARG4, ctx);
/* Backup TCC. */
REG_FORCE_SEEN(ctx, HPPA_REG_TCC_SAVED);
if (REG_WAS_SEEN(ctx, HPPA_REG_TCC))
emit(hppa_copy(HPPA_REG_TCC, HPPA_REG_TCC_SAVED), ctx);
/*
* Use ldil() to load absolute address. Don't use emit_imm as the
* number of emitted instructions should not depend on the value of
* addr.
*/
WARN_ON(addr >> 32);
/* load function address and gp from Elf64_Fdesc descriptor */
emit(hppa_ldil(addr, HPPA_REG_R31), ctx);
emit(hppa_ldo(im11(addr), HPPA_REG_R31, HPPA_REG_R31), ctx);
emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc, addr),
HPPA_REG_R31, HPPA_REG_RP), ctx);
emit(hppa64_bve_l_rp(HPPA_REG_RP), ctx);
emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc, gp),
HPPA_REG_R31, HPPA_REG_GP), ctx);
/* Restore TCC. */
if (REG_WAS_SEEN(ctx, HPPA_REG_TCC))
emit(hppa_copy(HPPA_REG_TCC_SAVED, HPPA_REG_TCC), ctx);
emit(hppa_ldo(-offset_sp, HPPA_REG_SP, HPPA_REG_SP), ctx);
/* Set return value. */
emit_hppa_copy(HPPA_REG_RET0, regmap[BPF_REG_0], ctx);
}
static void emit_call_libgcc_ll(void *func, const s8 arg0,
const s8 arg1, u8 opcode, struct hppa_jit_context *ctx)
{
u64 func_addr;
if (BPF_CLASS(opcode) == BPF_ALU) {
emit_hppa64_zext32(arg0, HPPA_REG_ARG0, ctx);
emit_hppa64_zext32(arg1, HPPA_REG_ARG1, ctx);
} else {
emit_hppa_copy(arg0, HPPA_REG_ARG0, ctx);
emit_hppa_copy(arg1, HPPA_REG_ARG1, ctx);
}
/* libcgcc overwrites HPPA_REG_RET0, so keep copy in HPPA_REG_TCC_SAVED */
if (arg0 != HPPA_REG_RET0) {
REG_SET_SEEN(ctx, HPPA_REG_TCC_SAVED);
emit(hppa_copy(HPPA_REG_RET0, HPPA_REG_TCC_SAVED), ctx);
}
/* set up stack */
emit(hppa_ldo(FRAME_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx);
func_addr = (uintptr_t) func;
/* load function func_address and gp from Elf64_Fdesc descriptor */
emit_imm(HPPA_REG_R31, func_addr, arg0, ctx);
emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc, addr),
HPPA_REG_R31, HPPA_REG_RP), ctx);
/* skip the following bve_l instruction if divisor is 0. */
if (BPF_OP(opcode) == BPF_DIV || BPF_OP(opcode) == BPF_MOD) {
if (BPF_OP(opcode) == BPF_DIV)
emit_hppa_copy(HPPA_REG_ZERO, HPPA_REG_RET0, ctx);
else {
emit_hppa_copy(HPPA_REG_ARG0, HPPA_REG_RET0, ctx);
}
emit(hppa_beq(HPPA_REG_ARG1, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
}
emit(hppa64_bve_l_rp(HPPA_REG_RP), ctx);
emit(hppa64_ldd_im16(offsetof(struct elf64_fdesc, gp),
HPPA_REG_R31, HPPA_REG_GP), ctx);
emit(hppa_ldo(-FRAME_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx);
emit_hppa_copy(HPPA_REG_RET0, arg0, ctx);
/* restore HPPA_REG_RET0 */
if (arg0 != HPPA_REG_RET0)
emit(hppa_copy(HPPA_REG_TCC_SAVED, HPPA_REG_RET0), ctx);
}
static void emit_store(const s8 rd, const s8 rs, s16 off,
struct hppa_jit_context *ctx, const u8 size,
const u8 mode)
{
s8 dstreg;
/* need to calculate address since offset does not fit in 14 bits? */
if (relative_bits_ok(off, 14))
dstreg = rd;
else {
/* need to use R1 here, since addil puts result into R1 */
dstreg = HPPA_REG_R1;
emit(hppa_addil(off, rd), ctx);
off = im11(off);
}
switch (size) {
case BPF_B:
emit(hppa_stb(rs, off, dstreg), ctx);
break;
case BPF_H:
emit(hppa_sth(rs, off, dstreg), ctx);
break;
case BPF_W:
emit(hppa_stw(rs, off, dstreg), ctx);
break;
case BPF_DW:
if (off & 7) {
emit(hppa_ldo(off, dstreg, HPPA_REG_R1), ctx);
emit(hppa64_std_im5(rs, 0, HPPA_REG_R1), ctx);
} else if (off >= -16 && off <= 15)
emit(hppa64_std_im5(rs, off, dstreg), ctx);
else
emit(hppa64_std_im16(rs, off, dstreg), ctx);
break;
}
}
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct hppa_jit_context *ctx,
bool extra_pass)
{
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
BPF_CLASS(insn->code) == BPF_JMP;
int s, e, ret, i = insn - ctx->prog->insnsi;
s64 paoff;
struct bpf_prog_aux *aux = ctx->prog->aux;
u8 rd = -1, rs = -1, code = insn->code;
s16 off = insn->off;
s32 imm = insn->imm;
init_regs(&rd, &rs, insn, ctx);
switch (code) {
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
if (imm == 1) {
/* Special mov32 for zext */
emit_zext_32(rd, ctx);
break;
}
if (!is64 && !aux->verifier_zext)
emit_hppa64_zext32(rs, rd, ctx);
else
emit_hppa_copy(rs, rd, ctx);
break;
/* dst = dst OP src */
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_X:
emit(hppa_add(rd, rs, rd), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_X:
emit(hppa_sub(rd, rs, rd), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_AND | BPF_X:
emit(hppa_and(rd, rs, rd), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_X:
emit(hppa_or(rd, rs, rd), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_X:
emit(hppa_xor(rd, rs, rd), ctx);
if (!is64 && !aux->verifier_zext && rs != rd)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU64 | BPF_MUL | BPF_K:
emit_imm(HPPA_REG_T1, is64 ? (s64)(s32)imm : (u32)imm, HPPA_REG_T2, ctx);
rs = HPPA_REG_T1;
fallthrough;
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_X:
emit_call_libgcc_ll(__muldi3, rd, rs, code, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_K:
emit_imm(HPPA_REG_T1, is64 ? (s64)(s32)imm : (u32)imm, HPPA_REG_T2, ctx);
rs = HPPA_REG_T1;
fallthrough;
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
emit_call_libgcc_ll(&hppa_div64, rd, rs, code, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_K:
emit_imm(HPPA_REG_T1, is64 ? (s64)(s32)imm : (u32)imm, HPPA_REG_T2, ctx);
rs = HPPA_REG_T1;
fallthrough;
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
emit_call_libgcc_ll(&hppa_div64_rem, rd, rs, code, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
emit_hppa64_sext32(rs, HPPA_REG_T0, ctx);
emit(hppa64_mtsarcm(HPPA_REG_T0), ctx);
if (is64)
emit(hppa64_depdz_sar(rd, rd), ctx);
else
emit(hppa_depwz_sar(rd, rd), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU64 | BPF_RSH | BPF_X:
emit(hppa_mtsar(rs), ctx);
if (is64)
emit(hppa64_shrpd_sar(rd, rd), ctx);
else
emit(hppa_shrpw_sar(rd, rd), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_X:
case BPF_ALU64 | BPF_ARSH | BPF_X:
emit_hppa64_sext32(rs, HPPA_REG_T0, ctx);
emit(hppa64_mtsarcm(HPPA_REG_T0), ctx);
if (is64)
emit(hppa_extrd_sar(rd, rd, 1), ctx);
else
emit(hppa_extrws_sar(rd, rd), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
/* dst = -dst */
case BPF_ALU | BPF_NEG:
case BPF_ALU64 | BPF_NEG:
emit(hppa_sub(HPPA_REG_ZERO, rd, rd), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
/* dst = BSWAP##imm(dst) */
case BPF_ALU | BPF_END | BPF_FROM_BE:
switch (imm) {
case 16:
/* zero-extend 16 bits into 64 bits */
emit_hppa64_depd(HPPA_REG_ZERO, 63-16, 64-16, rd, 1, ctx);
break;
case 32:
if (!aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case 64:
/* Do nothing */
break;
}
break;
case BPF_ALU | BPF_END | BPF_FROM_LE:
switch (imm) {
case 16:
emit(hppa_extru(rd, 31 - 8, 8, HPPA_REG_T1), ctx);
emit(hppa_depwz(rd, 23, 8, HPPA_REG_T1), ctx);
emit(hppa_extru(HPPA_REG_T1, 31, 16, rd), ctx);
emit_hppa64_extrd(HPPA_REG_T1, 63, 16, rd, 0, ctx);
break;
case 32:
emit(hppa_shrpw(rd, rd, 16, HPPA_REG_T1), ctx);
emit_hppa64_depd(HPPA_REG_T1, 63-16, 8, HPPA_REG_T1, 1, ctx);
emit(hppa_shrpw(rd, HPPA_REG_T1, 8, HPPA_REG_T1), ctx);
emit_hppa64_extrd(HPPA_REG_T1, 63, 32, rd, 0, ctx);
break;
case 64:
emit(hppa64_permh_3210(rd, HPPA_REG_T1), ctx);
emit(hppa64_hshl(HPPA_REG_T1, 8, HPPA_REG_T2), ctx);
emit(hppa64_hshr_u(HPPA_REG_T1, 8, HPPA_REG_T1), ctx);
emit(hppa_or(HPPA_REG_T2, HPPA_REG_T1, rd), ctx);
break;
default:
pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
return -1;
}
break;
/* dst = imm */
case BPF_ALU | BPF_MOV | BPF_K:
case BPF_ALU64 | BPF_MOV | BPF_K:
emit_imm(rd, imm, HPPA_REG_T2, ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
/* dst = dst OP imm */
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_ADD | BPF_K:
if (relative_bits_ok(imm, 14)) {
emit(hppa_ldo(imm, rd, rd), ctx);
} else {
emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
emit(hppa_add(rd, HPPA_REG_T1, rd), ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_K:
if (relative_bits_ok(-imm, 14)) {
emit(hppa_ldo(-imm, rd, rd), ctx);
} else {
emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
emit(hppa_sub(rd, HPPA_REG_T1, rd), ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_K:
emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
emit(hppa_and(rd, HPPA_REG_T1, rd), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU64 | BPF_OR | BPF_K:
emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
emit(hppa_or(rd, HPPA_REG_T1, rd), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU64 | BPF_XOR | BPF_K:
emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
emit(hppa_xor(rd, HPPA_REG_T1, rd), ctx);
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_K:
if (imm != 0) {
emit_hppa64_shld(rd, imm, rd, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
if (imm != 0) {
if (is64)
emit_hppa64_shrd(rd, imm, rd, false, ctx);
else
emit_hppa64_shrw(rd, imm, rd, false, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
case BPF_ALU | BPF_ARSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
if (imm != 0) {
if (is64)
emit_hppa64_shrd(rd, imm, rd, true, ctx);
else
emit_hppa64_shrw(rd, imm, rd, true, ctx);
}
if (!is64 && !aux->verifier_zext)
emit_zext_32(rd, ctx);
break;
/* JUMP off */
case BPF_JMP | BPF_JA:
paoff = hppa_offset(i, off, ctx);
ret = emit_jump(paoff, false, ctx);
if (ret)
return ret;
break;
/* IF (dst COND src) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP32 | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP32 | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP32 | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP32 | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP32 | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP32 | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP32 | BPF_JSLE | BPF_X:
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X:
paoff = hppa_offset(i, off, ctx);
if (!is64) {
s = ctx->ninsns;
if (is_signed_bpf_cond(BPF_OP(code)))
emit_sext_32_rd_rs(&rd, &rs, ctx);
else
emit_zext_32_rd_rs(&rd, &rs, ctx);
e = ctx->ninsns;
/* Adjust for extra insns */
paoff -= (e - s);
}
if (BPF_OP(code) == BPF_JSET) {
/* Adjust for and */
paoff -= 1;
emit(hppa_and(rs, rd, HPPA_REG_T1), ctx);
emit_branch(BPF_JNE, HPPA_REG_T1, HPPA_REG_ZERO, paoff,
ctx);
} else {
emit_branch(BPF_OP(code), rd, rs, paoff, ctx);
}
break;
/* IF (dst COND imm) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP32 | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP32 | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP32 | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP32 | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_K:
paoff = hppa_offset(i, off, ctx);
s = ctx->ninsns;
if (imm) {
emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
rs = HPPA_REG_T1;
} else {
rs = HPPA_REG_ZERO;
}
if (!is64) {
if (is_signed_bpf_cond(BPF_OP(code)))
emit_sext_32_rd(&rd, ctx);
else
emit_zext_32_rd_t1(&rd, ctx);
}
e = ctx->ninsns;
/* Adjust for extra insns */
paoff -= (e - s);
emit_branch(BPF_OP(code), rd, rs, paoff, ctx);
break;
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_K:
paoff = hppa_offset(i, off, ctx);
s = ctx->ninsns;
emit_imm(HPPA_REG_T1, imm, HPPA_REG_T2, ctx);
emit(hppa_and(HPPA_REG_T1, rd, HPPA_REG_T1), ctx);
/* For jset32, we should clear the upper 32 bits of t1, but
* sign-extension is sufficient here and saves one instruction,
* as t1 is used only in comparison against zero.
*/
if (!is64 && imm < 0)
emit_hppa64_sext32(HPPA_REG_T1, HPPA_REG_T1, ctx);
e = ctx->ninsns;
paoff -= (e - s);
emit_branch(BPF_JNE, HPPA_REG_T1, HPPA_REG_ZERO, paoff, ctx);
break;
/* function call */
case BPF_JMP | BPF_CALL:
{
bool fixed_addr;
u64 addr;
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
&addr, &fixed_addr);
if (ret < 0)
return ret;
REG_SET_SEEN_ALL(ctx);
emit_call(addr, fixed_addr, ctx);
break;
}
/* tail call */
case BPF_JMP | BPF_TAIL_CALL:
emit_bpf_tail_call(i, ctx);
break;
/* function return */
case BPF_JMP | BPF_EXIT:
if (i == ctx->prog->len - 1)
break;
paoff = epilogue_offset(ctx);
ret = emit_jump(paoff, false, ctx);
if (ret)
return ret;
break;
/* dst = imm64 */
case BPF_LD | BPF_IMM | BPF_DW:
{
struct bpf_insn insn1 = insn[1];
u64 imm64 = (u64)insn1.imm << 32 | (u32)imm;
if (bpf_pseudo_func(insn))
imm64 = (uintptr_t)dereference_function_descriptor((void*)imm64);
emit_imm(rd, imm64, HPPA_REG_T2, ctx);
return 1;
}
/* LDX: dst = *(size *)(src + off) */
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
{
u8 srcreg;
/* need to calculate address since offset does not fit in 14 bits? */
if (relative_bits_ok(off, 14))
srcreg = rs;
else {
/* need to use R1 here, since addil puts result into R1 */
srcreg = HPPA_REG_R1;
BUG_ON(rs == HPPA_REG_R1);
BUG_ON(rd == HPPA_REG_R1);
emit(hppa_addil(off, rs), ctx);
off = im11(off);
}
switch (BPF_SIZE(code)) {
case BPF_B:
emit(hppa_ldb(off, srcreg, rd), ctx);
if (insn_is_zext(&insn[1]))
return 1;
break;
case BPF_H:
emit(hppa_ldh(off, srcreg, rd), ctx);
if (insn_is_zext(&insn[1]))
return 1;
break;
case BPF_W:
emit(hppa_ldw(off, srcreg, rd), ctx);
if (insn_is_zext(&insn[1]))
return 1;
break;
case BPF_DW:
if (off & 7) {
emit(hppa_ldo(off, srcreg, HPPA_REG_R1), ctx);
emit(hppa64_ldd_reg(HPPA_REG_ZERO, HPPA_REG_R1, rd), ctx);
} else if (off >= -16 && off <= 15)
emit(hppa64_ldd_im5(off, srcreg, rd), ctx);
else
emit(hppa64_ldd_im16(off, srcreg, rd), ctx);
break;
}
break;
}
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
break;
/* ST: *(size *)(dst + off) = imm */
/* STX: *(size *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_DW:
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_W:
case BPF_STX | BPF_MEM | BPF_DW:
if (BPF_CLASS(code) == BPF_ST) {
emit_imm(HPPA_REG_T2, imm, HPPA_REG_T1, ctx);
rs = HPPA_REG_T2;
}
emit_store(rd, rs, off, ctx, BPF_SIZE(code), BPF_MODE(code));
break;
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
pr_info_once(
"bpf-jit: not supported: atomic operation %02x ***\n",
insn->imm);
return -EFAULT;
default:
pr_err("bpf-jit: unknown opcode %02x\n", code);
return -EINVAL;
}
return 0;
}
void bpf_jit_build_prologue(struct hppa_jit_context *ctx)
{
int bpf_stack_adjust, stack_adjust, i;
unsigned long addr;
s8 reg;
/*
* stack on hppa grows up, so if tail calls are used we need to
* allocate the maximum stack size
*/
if (REG_ALL_SEEN(ctx))
bpf_stack_adjust = MAX_BPF_STACK;
else
bpf_stack_adjust = ctx->prog->aux->stack_depth;
bpf_stack_adjust = round_up(bpf_stack_adjust, STACK_ALIGN);
stack_adjust = FRAME_SIZE + bpf_stack_adjust;
stack_adjust = round_up(stack_adjust, STACK_ALIGN);
/*
* NOTE: We construct an Elf64_Fdesc descriptor here.
* The first 4 words initialize the TCC and compares them.
* Then follows the virtual address of the eBPF function,
* and the gp for this function.
*
* The first instruction sets the tail-call-counter (TCC) register.
* This instruction is skipped by tail calls.
* Use a temporary register instead of a caller-saved register initially.
*/
REG_FORCE_SEEN(ctx, HPPA_REG_TCC_IN_INIT);
emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_TCC_IN_INIT), ctx);
/*
* Skip all initializations when called as BPF TAIL call.
*/
emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_R1), ctx);
emit(hppa_beq(HPPA_REG_TCC_IN_INIT, HPPA_REG_R1, 6 - HPPA_BRANCH_DISPLACEMENT), ctx);
emit(hppa64_bl_long(ctx->prologue_len - 3 - HPPA_BRANCH_DISPLACEMENT), ctx);
/* store entry address of this eBPF function */
addr = (uintptr_t) &ctx->insns[0];
emit(addr >> 32, ctx);
emit(addr & 0xffffffff, ctx);
/* store gp of this eBPF function */
asm("copy %%r27,%0" : "=r" (addr) );
emit(addr >> 32, ctx);
emit(addr & 0xffffffff, ctx);
/* Set up hppa stack frame. */
emit_hppa_copy(HPPA_REG_SP, HPPA_REG_R1, ctx);
emit(hppa_ldo(stack_adjust, HPPA_REG_SP, HPPA_REG_SP), ctx);
emit(hppa64_std_im5 (HPPA_REG_R1, -REG_SIZE, HPPA_REG_SP), ctx);
emit(hppa64_std_im16(HPPA_REG_RP, -2*REG_SIZE, HPPA_REG_SP), ctx);
/* Save callee-save registers. */
for (i = 3; i <= 15; i++) {
if (OPTIMIZE_HPPA && !REG_WAS_SEEN(ctx, HPPA_R(i)))
continue;
emit(hppa64_std_im16(HPPA_R(i), -REG_SIZE * i, HPPA_REG_SP), ctx);
}
/* load function parameters; load all if we use tail functions */
#define LOAD_PARAM(arg, dst) \
if (REG_WAS_SEEN(ctx, regmap[dst]) || \
REG_WAS_SEEN(ctx, HPPA_REG_TCC)) \
emit_hppa_copy(arg, regmap[dst], ctx)
LOAD_PARAM(HPPA_REG_ARG0, BPF_REG_1);
LOAD_PARAM(HPPA_REG_ARG1, BPF_REG_2);
LOAD_PARAM(HPPA_REG_ARG2, BPF_REG_3);
LOAD_PARAM(HPPA_REG_ARG3, BPF_REG_4);
LOAD_PARAM(HPPA_REG_ARG4, BPF_REG_5);
#undef LOAD_PARAM
REG_FORCE_SEEN(ctx, HPPA_REG_T0);
REG_FORCE_SEEN(ctx, HPPA_REG_T1);
REG_FORCE_SEEN(ctx, HPPA_REG_T2);
/*
* Now really set the tail call counter (TCC) register.
*/
if (REG_WAS_SEEN(ctx, HPPA_REG_TCC))
emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_TCC), ctx);
/*
* Save epilogue function pointer for outer TCC call chain.
* The main TCC call stores the final RP on stack.
*/
addr = (uintptr_t) &ctx->insns[ctx->epilogue_offset];
/* skip first two instructions which jump to exit */
addr += 2 * HPPA_INSN_SIZE;
emit_imm(HPPA_REG_T2, addr, HPPA_REG_T1, ctx);
emit(EXIT_PTR_STORE(HPPA_REG_T2), ctx);
/* Set up BPF frame pointer. */
reg = regmap[BPF_REG_FP]; /* -> HPPA_REG_FP */
if (REG_WAS_SEEN(ctx, reg)) {
emit(hppa_ldo(-FRAME_SIZE, HPPA_REG_SP, reg), ctx);
}
}
void bpf_jit_build_epilogue(struct hppa_jit_context *ctx)
{
__build_epilogue(false, ctx);
}
bool bpf_jit_supports_kfunc_call(void)
{
return true;
}
| linux-master | arch/parisc/net/bpf_jit_comp64.c |
// SPDX-License-Identifier: GPL-2.0
/*
* BPF JIT compiler for PA-RISC (32-bit)
*
* Copyright (c) 2023 Helge Deller <[email protected]>
*
* The code is based on the BPF JIT compiler for RV64 by Björn Töpel and
* the BPF JIT compiler for 32-bit ARM by Shubham Bansal and Mircea Gherzan.
*/
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/libgcc.h>
#include "bpf_jit.h"
/*
* Stack layout during BPF program execution (note: stack grows up):
*
* high
* HPPA32 sp => +----------+ <= HPPA32 fp
* | saved sp |
* | saved rp |
* | ... | HPPA32 callee-saved registers
* | curr args|
* | local var|
* +----------+ <= (sp - 4 * NR_SAVED_REGISTERS)
* | lo(R9) |
* | hi(R9) |
* | lo(FP) | JIT scratch space for BPF registers
* | hi(FP) |
* | ... |
* +----------+ <= (sp - 4 * NR_SAVED_REGISTERS
* | | - 4 * BPF_JIT_SCRATCH_REGS)
* | |
* | ... | BPF program stack
* | |
* | ... | Function call stack
* | |
* +----------+
* low
*/
enum {
/* Stack layout - these are offsets from top of JIT scratch space. */
BPF_R8_HI,
BPF_R8_LO,
BPF_R9_HI,
BPF_R9_LO,
BPF_FP_HI,
BPF_FP_LO,
BPF_AX_HI,
BPF_AX_LO,
BPF_R0_TEMP_HI,
BPF_R0_TEMP_LO,
BPF_JIT_SCRATCH_REGS,
};
/* Number of callee-saved registers stored to stack: rp, r3-r18. */
#define NR_SAVED_REGISTERS (18 - 3 + 1 + 8)
/* Offset from fp for BPF registers stored on stack. */
#define STACK_OFFSET(k) (- (NR_SAVED_REGISTERS + k + 1))
#define STACK_ALIGN FRAME_SIZE
#define EXIT_PTR_LOAD(reg) hppa_ldw(-0x08, HPPA_REG_SP, reg)
#define EXIT_PTR_STORE(reg) hppa_stw(reg, -0x08, HPPA_REG_SP)
#define EXIT_PTR_JUMP(reg, nop) hppa_bv(HPPA_REG_ZERO, reg, nop)
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TMP_REG_R0 (MAX_BPF_JIT_REG + 2)
static const s8 regmap[][2] = {
/* Return value from in-kernel function, and exit value from eBPF. */
[BPF_REG_0] = {HPPA_REG_RET0, HPPA_REG_RET1}, /* HI/LOW */
/* Arguments from eBPF program to in-kernel function. */
[BPF_REG_1] = {HPPA_R(3), HPPA_R(4)},
[BPF_REG_2] = {HPPA_R(5), HPPA_R(6)},
[BPF_REG_3] = {HPPA_R(7), HPPA_R(8)},
[BPF_REG_4] = {HPPA_R(9), HPPA_R(10)},
[BPF_REG_5] = {HPPA_R(11), HPPA_R(12)},
[BPF_REG_6] = {HPPA_R(13), HPPA_R(14)},
[BPF_REG_7] = {HPPA_R(15), HPPA_R(16)},
/*
* Callee-saved registers that in-kernel function will preserve.
* Stored on the stack.
*/
[BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
[BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
/* Read-only frame pointer to access BPF stack. Not needed. */
[BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)},
/* Temporary register for blinding constants. Stored on the stack. */
[BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
/*
* Temporary registers used by the JIT to operate on registers stored
* on the stack. Save t0 and t1 to be used as temporaries in generated
* code.
*/
[TMP_REG_1] = {HPPA_REG_T3, HPPA_REG_T2},
[TMP_REG_2] = {HPPA_REG_T5, HPPA_REG_T4},
/* temporary space for BPF_R0 during libgcc and millicode calls */
[TMP_REG_R0] = {STACK_OFFSET(BPF_R0_TEMP_HI), STACK_OFFSET(BPF_R0_TEMP_LO)},
};
static s8 hi(const s8 *r)
{
return r[0];
}
static s8 lo(const s8 *r)
{
return r[1];
}
static void emit_hppa_copy(const s8 rs, const s8 rd, struct hppa_jit_context *ctx)
{
REG_SET_SEEN(ctx, rd);
if (OPTIMIZE_HPPA && (rs == rd))
return;
REG_SET_SEEN(ctx, rs);
emit(hppa_copy(rs, rd), ctx);
}
static void emit_hppa_xor(const s8 r1, const s8 r2, const s8 r3, struct hppa_jit_context *ctx)
{
REG_SET_SEEN(ctx, r1);
REG_SET_SEEN(ctx, r2);
REG_SET_SEEN(ctx, r3);
if (OPTIMIZE_HPPA && (r1 == r2)) {
emit(hppa_copy(HPPA_REG_ZERO, r3), ctx);
} else {
emit(hppa_xor(r1, r2, r3), ctx);
}
}
static void emit_imm(const s8 rd, s32 imm, struct hppa_jit_context *ctx)
{
u32 lower = im11(imm);
REG_SET_SEEN(ctx, rd);
if (OPTIMIZE_HPPA && relative_bits_ok(imm, 14)) {
emit(hppa_ldi(imm, rd), ctx);
return;
}
emit(hppa_ldil(imm, rd), ctx);
if (OPTIMIZE_HPPA && (lower == 0))
return;
emit(hppa_ldo(lower, rd, rd), ctx);
}
static void emit_imm32(const s8 *rd, s32 imm, struct hppa_jit_context *ctx)
{
/* Emit immediate into lower bits. */
REG_SET_SEEN(ctx, lo(rd));
emit_imm(lo(rd), imm, ctx);
/* Sign-extend into upper bits. */
REG_SET_SEEN(ctx, hi(rd));
if (imm >= 0)
emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
else
emit(hppa_ldi(-1, hi(rd)), ctx);
}
static void emit_imm64(const s8 *rd, s32 imm_hi, s32 imm_lo,
struct hppa_jit_context *ctx)
{
emit_imm(hi(rd), imm_hi, ctx);
emit_imm(lo(rd), imm_lo, ctx);
}
static void __build_epilogue(bool is_tail_call, struct hppa_jit_context *ctx)
{
const s8 *r0 = regmap[BPF_REG_0];
int i;
if (is_tail_call) {
/*
* goto *(t0 + 4);
* Skips first instruction of prologue which initializes tail
* call counter. Assumes t0 contains address of target program,
* see emit_bpf_tail_call.
*/
emit(hppa_ldo(1 * HPPA_INSN_SIZE, HPPA_REG_T0, HPPA_REG_T0), ctx);
emit(hppa_bv(HPPA_REG_ZERO, HPPA_REG_T0, EXEC_NEXT_INSTR), ctx);
/* in delay slot: */
emit(hppa_copy(HPPA_REG_TCC, HPPA_REG_TCC_IN_INIT), ctx);
return;
}
/* load epilogue function pointer and jump to it. */
/* exit point is either directly below, or the outest TCC exit function */
emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx);
emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
/* NOTE: we are 32-bit and big-endian, so return lower 32-bit value */
emit_hppa_copy(lo(r0), HPPA_REG_RET0, ctx);
/* Restore callee-saved registers. */
for (i = 3; i <= 18; i++) {
if (OPTIMIZE_HPPA && !REG_WAS_SEEN(ctx, HPPA_R(i)))
continue;
emit(hppa_ldw(-REG_SIZE * (8 + (i-3)), HPPA_REG_SP, HPPA_R(i)), ctx);
}
/* load original return pointer (stored by outest TCC function) */
emit(hppa_ldw(-0x14, HPPA_REG_SP, HPPA_REG_RP), ctx);
emit(hppa_bv(HPPA_REG_ZERO, HPPA_REG_RP, EXEC_NEXT_INSTR), ctx);
/* in delay slot: */
emit(hppa_ldw(-0x04, HPPA_REG_SP, HPPA_REG_SP), ctx);
}
static bool is_stacked(s8 reg)
{
return reg < 0;
}
static const s8 *bpf_get_reg64_offset(const s8 *reg, const s8 *tmp,
u16 offset_sp, struct hppa_jit_context *ctx)
{
if (is_stacked(hi(reg))) {
emit(hppa_ldw(REG_SIZE * hi(reg) - offset_sp, HPPA_REG_SP, hi(tmp)), ctx);
emit(hppa_ldw(REG_SIZE * lo(reg) - offset_sp, HPPA_REG_SP, lo(tmp)), ctx);
reg = tmp;
}
REG_SET_SEEN(ctx, hi(reg));
REG_SET_SEEN(ctx, lo(reg));
return reg;
}
static const s8 *bpf_get_reg64(const s8 *reg, const s8 *tmp,
struct hppa_jit_context *ctx)
{
return bpf_get_reg64_offset(reg, tmp, 0, ctx);
}
static const s8 *bpf_get_reg64_ref(const s8 *reg, const s8 *tmp,
bool must_load, struct hppa_jit_context *ctx)
{
if (!OPTIMIZE_HPPA)
return bpf_get_reg64(reg, tmp, ctx);
if (is_stacked(hi(reg))) {
if (must_load)
emit(hppa_ldw(REG_SIZE * hi(reg), HPPA_REG_SP, hi(tmp)), ctx);
reg = tmp;
}
REG_SET_SEEN(ctx, hi(reg));
REG_SET_SEEN(ctx, lo(reg));
return reg;
}
static void bpf_put_reg64(const s8 *reg, const s8 *src,
struct hppa_jit_context *ctx)
{
if (is_stacked(hi(reg))) {
emit(hppa_stw(hi(src), REG_SIZE * hi(reg), HPPA_REG_SP), ctx);
emit(hppa_stw(lo(src), REG_SIZE * lo(reg), HPPA_REG_SP), ctx);
}
}
static void bpf_save_R0(struct hppa_jit_context *ctx)
{
bpf_put_reg64(regmap[TMP_REG_R0], regmap[BPF_REG_0], ctx);
}
static void bpf_restore_R0(struct hppa_jit_context *ctx)
{
bpf_get_reg64(regmap[TMP_REG_R0], regmap[BPF_REG_0], ctx);
}
static const s8 *bpf_get_reg32(const s8 *reg, const s8 *tmp,
struct hppa_jit_context *ctx)
{
if (is_stacked(lo(reg))) {
emit(hppa_ldw(REG_SIZE * lo(reg), HPPA_REG_SP, lo(tmp)), ctx);
reg = tmp;
}
REG_SET_SEEN(ctx, lo(reg));
return reg;
}
static const s8 *bpf_get_reg32_ref(const s8 *reg, const s8 *tmp,
struct hppa_jit_context *ctx)
{
if (!OPTIMIZE_HPPA)
return bpf_get_reg32(reg, tmp, ctx);
if (is_stacked(hi(reg))) {
reg = tmp;
}
REG_SET_SEEN(ctx, lo(reg));
return reg;
}
static void bpf_put_reg32(const s8 *reg, const s8 *src,
struct hppa_jit_context *ctx)
{
if (is_stacked(lo(reg))) {
REG_SET_SEEN(ctx, lo(src));
emit(hppa_stw(lo(src), REG_SIZE * lo(reg), HPPA_REG_SP), ctx);
if (1 && !ctx->prog->aux->verifier_zext) {
REG_SET_SEEN(ctx, hi(reg));
emit(hppa_stw(HPPA_REG_ZERO, REG_SIZE * hi(reg), HPPA_REG_SP), ctx);
}
} else if (1 && !ctx->prog->aux->verifier_zext) {
REG_SET_SEEN(ctx, hi(reg));
emit_hppa_copy(HPPA_REG_ZERO, hi(reg), ctx);
}
}
/* extern hppa millicode functions */
extern void $$mulI(void);
extern void $$divU(void);
extern void $$remU(void);
static void emit_call_millicode(void *func, const s8 arg0,
const s8 arg1, u8 opcode, struct hppa_jit_context *ctx)
{
u32 func_addr;
emit_hppa_copy(arg0, HPPA_REG_ARG0, ctx);
emit_hppa_copy(arg1, HPPA_REG_ARG1, ctx);
/* libcgcc overwrites HPPA_REG_RET0/1, save temp. in dest. */
if (arg0 != HPPA_REG_RET1)
bpf_save_R0(ctx);
func_addr = (uintptr_t) dereference_function_descriptor(func);
emit(hppa_ldil(func_addr, HPPA_REG_R31), ctx);
/* skip the following be_l instruction if divisor is zero. */
if (BPF_OP(opcode) == BPF_DIV || BPF_OP(opcode) == BPF_MOD) {
if (BPF_OP(opcode) == BPF_DIV)
emit_hppa_copy(HPPA_REG_ZERO, HPPA_REG_RET1, ctx);
else
emit_hppa_copy(HPPA_REG_ARG0, HPPA_REG_RET1, ctx);
emit(hppa_or_cond(HPPA_REG_ARG1, HPPA_REG_ZERO, 1, 0, HPPA_REG_ZERO), ctx);
}
/* Note: millicode functions use r31 as return pointer instead of rp */
emit(hppa_be_l(im11(func_addr) >> 2, HPPA_REG_R31, NOP_NEXT_INSTR), ctx);
emit(hppa_nop(), ctx); /* this nop is needed here for delay slot */
/* Note: millicode functions return result in RET1, not RET0 */
emit_hppa_copy(HPPA_REG_RET1, arg0, ctx);
/* restore HPPA_REG_RET0/1, temp. save in dest. */
if (arg0 != HPPA_REG_RET1)
bpf_restore_R0(ctx);
}
static void emit_call_libgcc_ll(void *func, const s8 *arg0,
const s8 *arg1, u8 opcode, struct hppa_jit_context *ctx)
{
u32 func_addr;
emit_hppa_copy(lo(arg0), HPPA_REG_ARG0, ctx);
emit_hppa_copy(hi(arg0), HPPA_REG_ARG1, ctx);
emit_hppa_copy(lo(arg1), HPPA_REG_ARG2, ctx);
emit_hppa_copy(hi(arg1), HPPA_REG_ARG3, ctx);
/* libcgcc overwrites HPPA_REG_RET0/_RET1, so keep copy of R0 on stack */
if (hi(arg0) != HPPA_REG_RET0)
bpf_save_R0(ctx);
/* prepare stack */
emit(hppa_ldo(2 * FRAME_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx);
func_addr = (uintptr_t) dereference_function_descriptor(func);
emit(hppa_ldil(func_addr, HPPA_REG_R31), ctx);
/* zero out the following be_l instruction if divisor is 0 (and set default values) */
if (BPF_OP(opcode) == BPF_DIV || BPF_OP(opcode) == BPF_MOD) {
emit_hppa_copy(HPPA_REG_ZERO, HPPA_REG_RET0, ctx);
if (BPF_OP(opcode) == BPF_DIV)
emit_hppa_copy(HPPA_REG_ZERO, HPPA_REG_RET1, ctx);
else
emit_hppa_copy(HPPA_REG_ARG0, HPPA_REG_RET1, ctx);
emit(hppa_or_cond(HPPA_REG_ARG2, HPPA_REG_ARG3, 1, 0, HPPA_REG_ZERO), ctx);
}
emit(hppa_be_l(im11(func_addr) >> 2, HPPA_REG_R31, EXEC_NEXT_INSTR), ctx);
emit_hppa_copy(HPPA_REG_R31, HPPA_REG_RP, ctx);
/* restore stack */
emit(hppa_ldo(-2 * FRAME_SIZE, HPPA_REG_SP, HPPA_REG_SP), ctx);
emit_hppa_copy(HPPA_REG_RET0, hi(arg0), ctx);
emit_hppa_copy(HPPA_REG_RET1, lo(arg0), ctx);
/* restore HPPA_REG_RET0/_RET1 */
if (hi(arg0) != HPPA_REG_RET0)
bpf_restore_R0(ctx);
}
static void emit_jump(s32 paoff, bool force_far,
struct hppa_jit_context *ctx)
{
unsigned long pc, addr;
/* Note: allocate 2 instructions for jumps if force_far is set. */
if (relative_bits_ok(paoff - HPPA_BRANCH_DISPLACEMENT, 17)) {
/* use BL,short branch followed by nop() */
emit(hppa_bl(paoff - HPPA_BRANCH_DISPLACEMENT, HPPA_REG_ZERO), ctx);
if (force_far)
emit(hppa_nop(), ctx);
return;
}
pc = (uintptr_t) &ctx->insns[ctx->ninsns];
addr = pc + (paoff * HPPA_INSN_SIZE);
emit(hppa_ldil(addr, HPPA_REG_R31), ctx);
emit(hppa_be_l(im11(addr) >> 2, HPPA_REG_R31, NOP_NEXT_INSTR), ctx); // be,l,n addr(sr4,r31), %sr0, %r31
}
static void emit_alu_i64(const s8 *dst, s32 imm,
struct hppa_jit_context *ctx, const u8 op)
{
const s8 *tmp1 = regmap[TMP_REG_1];
const s8 *rd;
if (0 && op == BPF_MOV)
rd = bpf_get_reg64_ref(dst, tmp1, false, ctx);
else
rd = bpf_get_reg64(dst, tmp1, ctx);
/* dst = dst OP imm */
switch (op) {
case BPF_MOV:
emit_imm32(rd, imm, ctx);
break;
case BPF_AND:
emit_imm(HPPA_REG_T0, imm, ctx);
emit(hppa_and(lo(rd), HPPA_REG_T0, lo(rd)), ctx);
if (imm >= 0)
emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
break;
case BPF_OR:
emit_imm(HPPA_REG_T0, imm, ctx);
emit(hppa_or(lo(rd), HPPA_REG_T0, lo(rd)), ctx);
if (imm < 0)
emit_imm(hi(rd), -1, ctx);
break;
case BPF_XOR:
emit_imm(HPPA_REG_T0, imm, ctx);
emit_hppa_xor(lo(rd), HPPA_REG_T0, lo(rd), ctx);
if (imm < 0) {
emit_imm(HPPA_REG_T0, -1, ctx);
emit_hppa_xor(hi(rd), HPPA_REG_T0, hi(rd), ctx);
}
break;
case BPF_LSH:
if (imm == 0)
break;
if (imm > 32) {
imm -= 32;
emit(hppa_zdep(lo(rd), imm, imm, hi(rd)), ctx);
emit_hppa_copy(HPPA_REG_ZERO, lo(rd), ctx);
} else if (imm == 32) {
emit_hppa_copy(lo(rd), hi(rd), ctx);
emit_hppa_copy(HPPA_REG_ZERO, lo(rd), ctx);
} else {
emit(hppa_shd(hi(rd), lo(rd), 32 - imm, hi(rd)), ctx);
emit(hppa_zdep(lo(rd), imm, imm, lo(rd)), ctx);
}
break;
case BPF_RSH:
if (imm == 0)
break;
if (imm > 32) {
imm -= 32;
emit(hppa_shr(hi(rd), imm, lo(rd)), ctx);
emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
} else if (imm == 32) {
emit_hppa_copy(hi(rd), lo(rd), ctx);
emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
} else {
emit(hppa_shrpw(hi(rd), lo(rd), imm, lo(rd)), ctx);
emit(hppa_shr(hi(rd), imm, hi(rd)), ctx);
}
break;
case BPF_ARSH:
if (imm == 0)
break;
if (imm > 32) {
imm -= 32;
emit(hppa_extrws(hi(rd), 31 - imm, imm, lo(rd)), ctx);
emit(hppa_extrws(hi(rd), 0, 31, hi(rd)), ctx);
} else if (imm == 32) {
emit_hppa_copy(hi(rd), lo(rd), ctx);
emit(hppa_extrws(hi(rd), 0, 31, hi(rd)), ctx);
} else {
emit(hppa_shrpw(hi(rd), lo(rd), imm, lo(rd)), ctx);
emit(hppa_extrws(hi(rd), 31 - imm, imm, hi(rd)), ctx);
}
break;
default:
WARN_ON(1);
}
bpf_put_reg64(dst, rd, ctx);
}
static void emit_alu_i32(const s8 *dst, s32 imm,
struct hppa_jit_context *ctx, const u8 op)
{
const s8 *tmp1 = regmap[TMP_REG_1];
const s8 *rd = bpf_get_reg32(dst, tmp1, ctx);
if (op == BPF_MOV)
rd = bpf_get_reg32_ref(dst, tmp1, ctx);
else
rd = bpf_get_reg32(dst, tmp1, ctx);
/* dst = dst OP imm */
switch (op) {
case BPF_MOV:
emit_imm(lo(rd), imm, ctx);
break;
case BPF_ADD:
emit_imm(HPPA_REG_T0, imm, ctx);
emit(hppa_add(lo(rd), HPPA_REG_T0, lo(rd)), ctx);
break;
case BPF_SUB:
emit_imm(HPPA_REG_T0, imm, ctx);
emit(hppa_sub(lo(rd), HPPA_REG_T0, lo(rd)), ctx);
break;
case BPF_AND:
emit_imm(HPPA_REG_T0, imm, ctx);
emit(hppa_and(lo(rd), HPPA_REG_T0, lo(rd)), ctx);
break;
case BPF_OR:
emit_imm(HPPA_REG_T0, imm, ctx);
emit(hppa_or(lo(rd), HPPA_REG_T0, lo(rd)), ctx);
break;
case BPF_XOR:
emit_imm(HPPA_REG_T0, imm, ctx);
emit_hppa_xor(lo(rd), HPPA_REG_T0, lo(rd), ctx);
break;
case BPF_LSH:
if (imm != 0)
emit(hppa_zdep(lo(rd), imm, imm, lo(rd)), ctx);
break;
case BPF_RSH:
if (imm != 0)
emit(hppa_shr(lo(rd), imm, lo(rd)), ctx);
break;
case BPF_ARSH:
if (imm != 0)
emit(hppa_extrws(lo(rd), 31 - imm, imm, lo(rd)), ctx);
break;
default:
WARN_ON(1);
}
bpf_put_reg32(dst, rd, ctx);
}
static void emit_alu_r64(const s8 *dst, const s8 *src,
struct hppa_jit_context *ctx, const u8 op)
{
const s8 *tmp1 = regmap[TMP_REG_1];
const s8 *tmp2 = regmap[TMP_REG_2];
const s8 *rd;
const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
if (op == BPF_MOV)
rd = bpf_get_reg64_ref(dst, tmp1, false, ctx);
else
rd = bpf_get_reg64(dst, tmp1, ctx);
/* dst = dst OP src */
switch (op) {
case BPF_MOV:
emit_hppa_copy(lo(rs), lo(rd), ctx);
emit_hppa_copy(hi(rs), hi(rd), ctx);
break;
case BPF_ADD:
emit(hppa_add(lo(rd), lo(rs), lo(rd)), ctx);
emit(hppa_addc(hi(rd), hi(rs), hi(rd)), ctx);
break;
case BPF_SUB:
emit(hppa_sub(lo(rd), lo(rs), lo(rd)), ctx);
emit(hppa_subb(hi(rd), hi(rs), hi(rd)), ctx);
break;
case BPF_AND:
emit(hppa_and(lo(rd), lo(rs), lo(rd)), ctx);
emit(hppa_and(hi(rd), hi(rs), hi(rd)), ctx);
break;
case BPF_OR:
emit(hppa_or(lo(rd), lo(rs), lo(rd)), ctx);
emit(hppa_or(hi(rd), hi(rs), hi(rd)), ctx);
break;
case BPF_XOR:
emit_hppa_xor(lo(rd), lo(rs), lo(rd), ctx);
emit_hppa_xor(hi(rd), hi(rs), hi(rd), ctx);
break;
case BPF_MUL:
emit_call_libgcc_ll(__muldi3, rd, rs, op, ctx);
break;
case BPF_DIV:
emit_call_libgcc_ll(&hppa_div64, rd, rs, op, ctx);
break;
case BPF_MOD:
emit_call_libgcc_ll(&hppa_div64_rem, rd, rs, op, ctx);
break;
case BPF_LSH:
emit_call_libgcc_ll(__ashldi3, rd, rs, op, ctx);
break;
case BPF_RSH:
emit_call_libgcc_ll(__lshrdi3, rd, rs, op, ctx);
break;
case BPF_ARSH:
emit_call_libgcc_ll(__ashrdi3, rd, rs, op, ctx);
break;
case BPF_NEG:
emit(hppa_sub(HPPA_REG_ZERO, lo(rd), lo(rd)), ctx);
emit(hppa_subb(HPPA_REG_ZERO, hi(rd), hi(rd)), ctx);
break;
default:
WARN_ON(1);
}
bpf_put_reg64(dst, rd, ctx);
}
static void emit_alu_r32(const s8 *dst, const s8 *src,
struct hppa_jit_context *ctx, const u8 op)
{
const s8 *tmp1 = regmap[TMP_REG_1];
const s8 *tmp2 = regmap[TMP_REG_2];
const s8 *rd;
const s8 *rs = bpf_get_reg32(src, tmp2, ctx);
if (op == BPF_MOV)
rd = bpf_get_reg32_ref(dst, tmp1, ctx);
else
rd = bpf_get_reg32(dst, tmp1, ctx);
/* dst = dst OP src */
switch (op) {
case BPF_MOV:
emit_hppa_copy(lo(rs), lo(rd), ctx);
break;
case BPF_ADD:
emit(hppa_add(lo(rd), lo(rs), lo(rd)), ctx);
break;
case BPF_SUB:
emit(hppa_sub(lo(rd), lo(rs), lo(rd)), ctx);
break;
case BPF_AND:
emit(hppa_and(lo(rd), lo(rs), lo(rd)), ctx);
break;
case BPF_OR:
emit(hppa_or(lo(rd), lo(rs), lo(rd)), ctx);
break;
case BPF_XOR:
emit_hppa_xor(lo(rd), lo(rs), lo(rd), ctx);
break;
case BPF_MUL:
emit_call_millicode($$mulI, lo(rd), lo(rs), op, ctx);
break;
case BPF_DIV:
emit_call_millicode($$divU, lo(rd), lo(rs), op, ctx);
break;
case BPF_MOD:
emit_call_millicode($$remU, lo(rd), lo(rs), op, ctx);
break;
case BPF_LSH:
emit(hppa_subi(0x1f, lo(rs), HPPA_REG_T0), ctx);
emit(hppa_mtsar(HPPA_REG_T0), ctx);
emit(hppa_depwz_sar(lo(rd), lo(rd)), ctx);
break;
case BPF_RSH:
emit(hppa_mtsar(lo(rs)), ctx);
emit(hppa_shrpw_sar(lo(rd), lo(rd)), ctx);
break;
case BPF_ARSH: /* sign extending arithmetic shift right */
// emit(hppa_beq(lo(rs), HPPA_REG_ZERO, 2), ctx);
emit(hppa_subi(0x1f, lo(rs), HPPA_REG_T0), ctx);
emit(hppa_mtsar(HPPA_REG_T0), ctx);
emit(hppa_extrws_sar(lo(rd), lo(rd)), ctx);
break;
case BPF_NEG:
emit(hppa_sub(HPPA_REG_ZERO, lo(rd), lo(rd)), ctx); // sub r0,rd,rd
break;
default:
WARN_ON(1);
}
bpf_put_reg32(dst, rd, ctx);
}
static int emit_branch_r64(const s8 *src1, const s8 *src2, s32 paoff,
struct hppa_jit_context *ctx, const u8 op)
{
int e, s = ctx->ninsns;
const s8 *tmp1 = regmap[TMP_REG_1];
const s8 *tmp2 = regmap[TMP_REG_2];
const s8 *rs1 = bpf_get_reg64(src1, tmp1, ctx);
const s8 *rs2 = bpf_get_reg64(src2, tmp2, ctx);
/*
* NO_JUMP skips over the rest of the instructions and the
* emit_jump, meaning the BPF branch is not taken.
* JUMP skips directly to the emit_jump, meaning
* the BPF branch is taken.
*
* The fallthrough case results in the BPF branch being taken.
*/
#define NO_JUMP(idx) (2 + (idx) - 1)
#define JUMP(idx) (0 + (idx) - 1)
switch (op) {
case BPF_JEQ:
emit(hppa_bne(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(hppa_bne(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JGT:
emit(hppa_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(hppa_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(hppa_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JLT:
emit(hppa_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(hppa_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(hppa_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JGE:
emit(hppa_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(hppa_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(hppa_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JLE:
emit(hppa_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(hppa_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(hppa_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JNE:
emit(hppa_bne(hi(rs1), hi(rs2), JUMP(1)), ctx);
emit(hppa_beq(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JSGT:
emit(hppa_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(hppa_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(hppa_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JSLT:
emit(hppa_blt(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(hppa_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(hppa_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JSGE:
emit(hppa_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(hppa_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(hppa_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JSLE:
emit(hppa_blt(hi(rs1), hi(rs2), JUMP(2)), ctx);
emit(hppa_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
emit(hppa_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
break;
case BPF_JSET:
emit(hppa_and(hi(rs1), hi(rs2), HPPA_REG_T0), ctx);
emit(hppa_and(lo(rs1), lo(rs2), HPPA_REG_T1), ctx);
emit(hppa_bne(HPPA_REG_T0, HPPA_REG_ZERO, JUMP(1)), ctx);
emit(hppa_beq(HPPA_REG_T1, HPPA_REG_ZERO, NO_JUMP(0)), ctx);
break;
default:
WARN_ON(1);
}
#undef NO_JUMP
#undef JUMP
e = ctx->ninsns;
/* Adjust for extra insns. */
paoff -= (e - s);
emit_jump(paoff, true, ctx);
return 0;
}
static int emit_bcc(u8 op, u8 rd, u8 rs, int paoff, struct hppa_jit_context *ctx)
{
int e, s;
bool far = false;
int off;
if (op == BPF_JSET) {
/*
* BPF_JSET is a special case: it has no inverse so we always
* treat it as a far branch.
*/
emit(hppa_and(rd, rs, HPPA_REG_T0), ctx);
paoff -= 1; /* reduce offset due to hppa_and() above */
rd = HPPA_REG_T0;
rs = HPPA_REG_ZERO;
op = BPF_JNE;
}
s = ctx->ninsns;
if (!relative_bits_ok(paoff - HPPA_BRANCH_DISPLACEMENT, 12)) {
op = invert_bpf_cond(op);
far = true;
}
/*
* For a far branch, the condition is negated and we jump over the
* branch itself, and the three instructions from emit_jump.
* For a near branch, just use paoff.
*/
off = far ? (HPPA_BRANCH_DISPLACEMENT - 1) : paoff - HPPA_BRANCH_DISPLACEMENT;
switch (op) {
/* IF (dst COND src) JUMP off */
case BPF_JEQ:
emit(hppa_beq(rd, rs, off), ctx);
break;
case BPF_JGT:
emit(hppa_bgtu(rd, rs, off), ctx);
break;
case BPF_JLT:
emit(hppa_bltu(rd, rs, off), ctx);
break;
case BPF_JGE:
emit(hppa_bgeu(rd, rs, off), ctx);
break;
case BPF_JLE:
emit(hppa_bleu(rd, rs, off), ctx);
break;
case BPF_JNE:
emit(hppa_bne(rd, rs, off), ctx);
break;
case BPF_JSGT:
emit(hppa_bgt(rd, rs, off), ctx);
break;
case BPF_JSLT:
emit(hppa_blt(rd, rs, off), ctx);
break;
case BPF_JSGE:
emit(hppa_bge(rd, rs, off), ctx);
break;
case BPF_JSLE:
emit(hppa_ble(rd, rs, off), ctx);
break;
default:
WARN_ON(1);
}
if (far) {
e = ctx->ninsns;
/* Adjust for extra insns. */
paoff -= (e - s);
emit_jump(paoff, true, ctx);
}
return 0;
}
static int emit_branch_r32(const s8 *src1, const s8 *src2, s32 paoff,
struct hppa_jit_context *ctx, const u8 op)
{
int e, s = ctx->ninsns;
const s8 *tmp1 = regmap[TMP_REG_1];
const s8 *tmp2 = regmap[TMP_REG_2];
const s8 *rs1 = bpf_get_reg32(src1, tmp1, ctx);
const s8 *rs2 = bpf_get_reg32(src2, tmp2, ctx);
e = ctx->ninsns;
/* Adjust for extra insns. */
paoff -= (e - s);
if (emit_bcc(op, lo(rs1), lo(rs2), paoff, ctx))
return -1;
return 0;
}
static void emit_call(bool fixed, u64 addr, struct hppa_jit_context *ctx)
{
const s8 *tmp = regmap[TMP_REG_1];
const s8 *r0 = regmap[BPF_REG_0];
const s8 *reg;
const int offset_sp = 2 * STACK_ALIGN;
/* prepare stack */
emit(hppa_ldo(offset_sp, HPPA_REG_SP, HPPA_REG_SP), ctx);
/* load R1 & R2 in registers, R3-R5 to stack. */
reg = bpf_get_reg64_offset(regmap[BPF_REG_5], tmp, offset_sp, ctx);
emit(hppa_stw(hi(reg), -0x48, HPPA_REG_SP), ctx);
emit(hppa_stw(lo(reg), -0x44, HPPA_REG_SP), ctx);
reg = bpf_get_reg64_offset(regmap[BPF_REG_4], tmp, offset_sp, ctx);
emit(hppa_stw(hi(reg), -0x40, HPPA_REG_SP), ctx);
emit(hppa_stw(lo(reg), -0x3c, HPPA_REG_SP), ctx);
reg = bpf_get_reg64_offset(regmap[BPF_REG_3], tmp, offset_sp, ctx);
emit(hppa_stw(hi(reg), -0x38, HPPA_REG_SP), ctx);
emit(hppa_stw(lo(reg), -0x34, HPPA_REG_SP), ctx);
reg = bpf_get_reg64_offset(regmap[BPF_REG_2], tmp, offset_sp, ctx);
emit_hppa_copy(hi(reg), HPPA_REG_ARG3, ctx);
emit_hppa_copy(lo(reg), HPPA_REG_ARG2, ctx);
reg = bpf_get_reg64_offset(regmap[BPF_REG_1], tmp, offset_sp, ctx);
emit_hppa_copy(hi(reg), HPPA_REG_ARG1, ctx);
emit_hppa_copy(lo(reg), HPPA_REG_ARG0, ctx);
/* backup TCC */
if (REG_WAS_SEEN(ctx, HPPA_REG_TCC))
emit(hppa_copy(HPPA_REG_TCC, HPPA_REG_TCC_SAVED), ctx);
/*
* Use ldil() to load absolute address. Don't use emit_imm as the
* number of emitted instructions should not depend on the value of
* addr.
*/
emit(hppa_ldil(addr, HPPA_REG_R31), ctx);
emit(hppa_be_l(im11(addr) >> 2, HPPA_REG_R31, EXEC_NEXT_INSTR), ctx);
/* set return address in delay slot */
emit_hppa_copy(HPPA_REG_R31, HPPA_REG_RP, ctx);
/* restore TCC */
if (REG_WAS_SEEN(ctx, HPPA_REG_TCC))
emit(hppa_copy(HPPA_REG_TCC_SAVED, HPPA_REG_TCC), ctx);
/* restore stack */
emit(hppa_ldo(-offset_sp, HPPA_REG_SP, HPPA_REG_SP), ctx);
/* set return value. */
emit_hppa_copy(HPPA_REG_RET0, hi(r0), ctx);
emit_hppa_copy(HPPA_REG_RET1, lo(r0), ctx);
}
static int emit_bpf_tail_call(int insn, struct hppa_jit_context *ctx)
{
/*
* R1 -> &ctx
* R2 -> &array
* R3 -> index
*/
int off;
const s8 *arr_reg = regmap[BPF_REG_2];
const s8 *idx_reg = regmap[BPF_REG_3];
struct bpf_array bpfa;
struct bpf_prog bpfp;
/* get address of TCC main exit function for error case into rp */
emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx);
/* max_entries = array->map.max_entries; */
off = offsetof(struct bpf_array, map.max_entries);
BUILD_BUG_ON(sizeof(bpfa.map.max_entries) != 4);
emit(hppa_ldw(off, lo(arr_reg), HPPA_REG_T1), ctx);
/*
* if (index >= max_entries)
* goto out;
*/
emit(hppa_bltu(lo(idx_reg), HPPA_REG_T1, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
/*
* if (--tcc < 0)
* goto out;
*/
REG_FORCE_SEEN(ctx, HPPA_REG_TCC);
emit(hppa_ldo(-1, HPPA_REG_TCC, HPPA_REG_TCC), ctx);
emit(hppa_bge(HPPA_REG_TCC, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
/*
* prog = array->ptrs[index];
* if (!prog)
* goto out;
*/
BUILD_BUG_ON(sizeof(bpfa.ptrs[0]) != 4);
emit(hppa_sh2add(lo(idx_reg), lo(arr_reg), HPPA_REG_T0), ctx);
off = offsetof(struct bpf_array, ptrs);
BUILD_BUG_ON(!relative_bits_ok(off, 11));
emit(hppa_ldw(off, HPPA_REG_T0, HPPA_REG_T0), ctx);
emit(hppa_bne(HPPA_REG_T0, HPPA_REG_ZERO, 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
/*
* tcc = temp_tcc;
* goto *(prog->bpf_func + 4);
*/
off = offsetof(struct bpf_prog, bpf_func);
BUILD_BUG_ON(!relative_bits_ok(off, 11));
BUILD_BUG_ON(sizeof(bpfp.bpf_func) != 4);
emit(hppa_ldw(off, HPPA_REG_T0, HPPA_REG_T0), ctx);
/* Epilogue jumps to *(t0 + 4). */
__build_epilogue(true, ctx);
return 0;
}
static int emit_load_r64(const s8 *dst, const s8 *src, s16 off,
struct hppa_jit_context *ctx, const u8 size)
{
const s8 *tmp1 = regmap[TMP_REG_1];
const s8 *tmp2 = regmap[TMP_REG_2];
const s8 *rd = bpf_get_reg64_ref(dst, tmp1, ctx->prog->aux->verifier_zext, ctx);
const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
s8 srcreg;
/* need to calculate address since offset does not fit in 14 bits? */
if (relative_bits_ok(off, 14))
srcreg = lo(rs);
else {
/* need to use R1 here, since addil puts result into R1 */
srcreg = HPPA_REG_R1;
emit(hppa_addil(off, lo(rs)), ctx);
off = im11(off);
}
/* LDX: dst = *(size *)(src + off) */
switch (size) {
case BPF_B:
emit(hppa_ldb(off + 0, srcreg, lo(rd)), ctx);
if (!ctx->prog->aux->verifier_zext)
emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
break;
case BPF_H:
emit(hppa_ldh(off + 0, srcreg, lo(rd)), ctx);
if (!ctx->prog->aux->verifier_zext)
emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
break;
case BPF_W:
emit(hppa_ldw(off + 0, srcreg, lo(rd)), ctx);
if (!ctx->prog->aux->verifier_zext)
emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
break;
case BPF_DW:
emit(hppa_ldw(off + 0, srcreg, hi(rd)), ctx);
emit(hppa_ldw(off + 4, srcreg, lo(rd)), ctx);
break;
}
bpf_put_reg64(dst, rd, ctx);
return 0;
}
static int emit_store_r64(const s8 *dst, const s8 *src, s16 off,
struct hppa_jit_context *ctx, const u8 size,
const u8 mode)
{
const s8 *tmp1 = regmap[TMP_REG_1];
const s8 *tmp2 = regmap[TMP_REG_2];
const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
s8 dstreg;
/* need to calculate address since offset does not fit in 14 bits? */
if (relative_bits_ok(off, 14))
dstreg = lo(rd);
else {
/* need to use R1 here, since addil puts result into R1 */
dstreg = HPPA_REG_R1;
emit(hppa_addil(off, lo(rd)), ctx);
off = im11(off);
}
/* ST: *(size *)(dst + off) = imm */
switch (size) {
case BPF_B:
emit(hppa_stb(lo(rs), off + 0, dstreg), ctx);
break;
case BPF_H:
emit(hppa_sth(lo(rs), off + 0, dstreg), ctx);
break;
case BPF_W:
emit(hppa_stw(lo(rs), off + 0, dstreg), ctx);
break;
case BPF_DW:
emit(hppa_stw(hi(rs), off + 0, dstreg), ctx);
emit(hppa_stw(lo(rs), off + 4, dstreg), ctx);
break;
}
return 0;
}
static void emit_rev16(const s8 rd, struct hppa_jit_context *ctx)
{
emit(hppa_extru(rd, 23, 8, HPPA_REG_T1), ctx);
emit(hppa_depwz(rd, 23, 8, HPPA_REG_T1), ctx);
emit(hppa_extru(HPPA_REG_T1, 31, 16, rd), ctx);
}
static void emit_rev32(const s8 rs, const s8 rd, struct hppa_jit_context *ctx)
{
emit(hppa_shrpw(rs, rs, 16, HPPA_REG_T1), ctx);
emit(hppa_depwz(HPPA_REG_T1, 15, 8, HPPA_REG_T1), ctx);
emit(hppa_shrpw(rs, HPPA_REG_T1, 8, rd), ctx);
}
static void emit_zext64(const s8 *dst, struct hppa_jit_context *ctx)
{
const s8 *rd;
const s8 *tmp1 = regmap[TMP_REG_1];
rd = bpf_get_reg64(dst, tmp1, ctx);
emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
bpf_put_reg64(dst, rd, ctx);
}
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct hppa_jit_context *ctx,
bool extra_pass)
{
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
BPF_CLASS(insn->code) == BPF_JMP;
int s, e, paoff, i = insn - ctx->prog->insnsi;
u8 code = insn->code;
s16 off = insn->off;
s32 imm = insn->imm;
const s8 *dst = regmap[insn->dst_reg];
const s8 *src = regmap[insn->src_reg];
const s8 *tmp1 = regmap[TMP_REG_1];
const s8 *tmp2 = regmap[TMP_REG_2];
if (0) printk("CLASS %03d CODE %#02x ALU64:%d BPF_SIZE %#02x "
"BPF_CODE %#02x src_reg %d dst_reg %d\n",
BPF_CLASS(code), code, (code & BPF_ALU64) ? 1:0, BPF_SIZE(code),
BPF_OP(code), insn->src_reg, insn->dst_reg);
switch (code) {
/* dst = src */
case BPF_ALU64 | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_X:
case BPF_ALU64 | BPF_ADD | BPF_K:
case BPF_ALU64 | BPF_SUB | BPF_X:
case BPF_ALU64 | BPF_SUB | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_X:
case BPF_ALU64 | BPF_OR | BPF_X:
case BPF_ALU64 | BPF_XOR | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_X:
case BPF_ALU64 | BPF_MUL | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_RSH | BPF_X:
case BPF_ALU64 | BPF_ARSH | BPF_X:
if (BPF_SRC(code) == BPF_K) {
emit_imm32(tmp2, imm, ctx);
src = tmp2;
}
emit_alu_r64(dst, src, ctx, BPF_OP(code));
break;
/* dst = -dst */
case BPF_ALU64 | BPF_NEG:
emit_alu_r64(dst, tmp2, ctx, BPF_OP(code));
break;
case BPF_ALU64 | BPF_MOV | BPF_K:
case BPF_ALU64 | BPF_AND | BPF_K:
case BPF_ALU64 | BPF_OR | BPF_K:
case BPF_ALU64 | BPF_XOR | BPF_K:
case BPF_ALU64 | BPF_LSH | BPF_K:
case BPF_ALU64 | BPF_RSH | BPF_K:
case BPF_ALU64 | BPF_ARSH | BPF_K:
emit_alu_i64(dst, imm, ctx, BPF_OP(code));
break;
case BPF_ALU | BPF_MOV | BPF_X:
if (imm == 1) {
/* Special mov32 for zext. */
emit_zext64(dst, ctx);
break;
}
fallthrough;
/* dst = dst OP src */
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU | BPF_ARSH | BPF_X:
if (BPF_SRC(code) == BPF_K) {
emit_imm32(tmp2, imm, ctx);
src = tmp2;
}
emit_alu_r32(dst, src, ctx, BPF_OP(code));
break;
/* dst = dst OP imm */
case BPF_ALU | BPF_MOV | BPF_K:
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU | BPF_ARSH | BPF_K:
/*
* mul,div,mod are handled in the BPF_X case.
*/
emit_alu_i32(dst, imm, ctx, BPF_OP(code));
break;
/* dst = -dst */
case BPF_ALU | BPF_NEG:
/*
* src is ignored---choose tmp2 as a dummy register since it
* is not on the stack.
*/
emit_alu_r32(dst, tmp2, ctx, BPF_OP(code));
break;
/* dst = BSWAP##imm(dst) */
case BPF_ALU | BPF_END | BPF_FROM_BE:
{
const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
switch (imm) {
case 16:
/* zero-extend 16 bits into 64 bits */
emit(hppa_extru(lo(rd), 31, 16, lo(rd)), ctx);
fallthrough;
case 32:
/* zero-extend 32 bits into 64 bits */
if (!ctx->prog->aux->verifier_zext)
emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
break;
case 64:
/* Do nothing. */
break;
default:
pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
return -1;
}
bpf_put_reg64(dst, rd, ctx);
break;
}
case BPF_ALU | BPF_END | BPF_FROM_LE:
{
const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
switch (imm) {
case 16:
emit_rev16(lo(rd), ctx);
if (!ctx->prog->aux->verifier_zext)
emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
break;
case 32:
emit_rev32(lo(rd), lo(rd), ctx);
if (!ctx->prog->aux->verifier_zext)
emit_hppa_copy(HPPA_REG_ZERO, hi(rd), ctx);
break;
case 64:
/* Swap upper and lower halves, then each half. */
emit_hppa_copy(hi(rd), HPPA_REG_T0, ctx);
emit_rev32(lo(rd), hi(rd), ctx);
emit_rev32(HPPA_REG_T0, lo(rd), ctx);
break;
default:
pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
return -1;
}
bpf_put_reg64(dst, rd, ctx);
break;
}
/* JUMP off */
case BPF_JMP | BPF_JA:
paoff = hppa_offset(i, off, ctx);
emit_jump(paoff, false, ctx);
break;
/* function call */
case BPF_JMP | BPF_CALL:
{
bool fixed;
int ret;
u64 addr;
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
&fixed);
if (ret < 0)
return ret;
emit_call(fixed, addr, ctx);
break;
}
/* tail call */
case BPF_JMP | BPF_TAIL_CALL:
REG_SET_SEEN_ALL(ctx);
if (emit_bpf_tail_call(i, ctx))
return -1;
break;
/* IF (dst COND imm) JUMP off */
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JNE | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP32 | BPF_JLE | BPF_X:
case BPF_JMP32 | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP32 | BPF_JLT | BPF_X:
case BPF_JMP32 | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP32 | BPF_JGE | BPF_X:
case BPF_JMP32 | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_X:
case BPF_JMP32 | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_X:
case BPF_JMP32 | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSLT | BPF_X:
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP32 | BPF_JSGT | BPF_X:
case BPF_JMP32 | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_K:
paoff = hppa_offset(i, off, ctx);
if (BPF_SRC(code) == BPF_K) {
s = ctx->ninsns;
emit_imm32(tmp2, imm, ctx);
src = tmp2;
e = ctx->ninsns;
paoff -= (e - s);
}
if (is64)
emit_branch_r64(dst, src, paoff, ctx, BPF_OP(code));
else
emit_branch_r32(dst, src, paoff, ctx, BPF_OP(code));
break;
/* function return */
case BPF_JMP | BPF_EXIT:
if (i == ctx->prog->len - 1)
break;
/* load epilogue function pointer and jump to it. */
emit(EXIT_PTR_LOAD(HPPA_REG_RP), ctx);
emit(EXIT_PTR_JUMP(HPPA_REG_RP, NOP_NEXT_INSTR), ctx);
break;
/* dst = imm64 */
case BPF_LD | BPF_IMM | BPF_DW:
{
struct bpf_insn insn1 = insn[1];
u32 upper = insn1.imm;
u32 lower = imm;
const s8 *rd = bpf_get_reg64_ref(dst, tmp1, false, ctx);
if (0 && bpf_pseudo_func(insn)) {
WARN_ON(upper); /* we are 32-bit! */
upper = 0;
lower = (uintptr_t) dereference_function_descriptor(lower);
}
emit_imm64(rd, upper, lower, ctx);
bpf_put_reg64(dst, rd, ctx);
return 1;
}
/* LDX: dst = *(size *)(src + off) */
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW:
if (emit_load_r64(dst, src, off, ctx, BPF_SIZE(code)))
return -1;
break;
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_B:
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_DW:
case BPF_STX | BPF_MEM | BPF_B:
case BPF_STX | BPF_MEM | BPF_H:
case BPF_STX | BPF_MEM | BPF_W:
case BPF_STX | BPF_MEM | BPF_DW:
if (BPF_CLASS(code) == BPF_ST) {
emit_imm32(tmp2, imm, ctx);
src = tmp2;
}
if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code),
BPF_MODE(code)))
return -1;
break;
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
pr_info_once(
"bpf-jit: not supported: atomic operation %02x ***\n",
insn->imm);
return -EFAULT;
default:
pr_err("bpf-jit: unknown opcode %02x\n", code);
return -EINVAL;
}
return 0;
}
void bpf_jit_build_prologue(struct hppa_jit_context *ctx)
{
const s8 *tmp = regmap[TMP_REG_1];
const s8 *dst, *reg;
int stack_adjust = 0;
int i;
unsigned long addr;
int bpf_stack_adjust;
/*
* stack on hppa grows up, so if tail calls are used we need to
* allocate the maximum stack size
*/
if (REG_ALL_SEEN(ctx))
bpf_stack_adjust = MAX_BPF_STACK;
else
bpf_stack_adjust = ctx->prog->aux->stack_depth;
bpf_stack_adjust = round_up(bpf_stack_adjust, STACK_ALIGN);
/* make space for callee-saved registers. */
stack_adjust += NR_SAVED_REGISTERS * REG_SIZE;
/* make space for BPF registers on stack. */
stack_adjust += BPF_JIT_SCRATCH_REGS * REG_SIZE;
/* make space for BPF stack. */
stack_adjust += bpf_stack_adjust;
/* round up for stack alignment. */
stack_adjust = round_up(stack_adjust, STACK_ALIGN);
/*
* The first instruction sets the tail-call-counter (TCC) register.
* This instruction is skipped by tail calls.
* Use a temporary register instead of a caller-saved register initially.
*/
emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_TCC_IN_INIT), ctx);
/*
* skip all initializations when called as BPF TAIL call.
*/
emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_R1), ctx);
emit(hppa_bne(HPPA_REG_TCC_IN_INIT, HPPA_REG_R1, ctx->prologue_len - 2 - HPPA_BRANCH_DISPLACEMENT), ctx);
/* set up hppa stack frame. */
emit_hppa_copy(HPPA_REG_SP, HPPA_REG_R1, ctx); // copy sp,r1 (=prev_sp)
emit(hppa_ldo(stack_adjust, HPPA_REG_SP, HPPA_REG_SP), ctx); // ldo stack_adjust(sp),sp (increase stack)
emit(hppa_stw(HPPA_REG_R1, -REG_SIZE, HPPA_REG_SP), ctx); // stw prev_sp,-0x04(sp)
emit(hppa_stw(HPPA_REG_RP, -0x14, HPPA_REG_SP), ctx); // stw rp,-0x14(sp)
REG_FORCE_SEEN(ctx, HPPA_REG_T0);
REG_FORCE_SEEN(ctx, HPPA_REG_T1);
REG_FORCE_SEEN(ctx, HPPA_REG_T2);
REG_FORCE_SEEN(ctx, HPPA_REG_T3);
REG_FORCE_SEEN(ctx, HPPA_REG_T4);
REG_FORCE_SEEN(ctx, HPPA_REG_T5);
/* save callee-save registers. */
for (i = 3; i <= 18; i++) {
if (OPTIMIZE_HPPA && !REG_WAS_SEEN(ctx, HPPA_R(i)))
continue;
emit(hppa_stw(HPPA_R(i), -REG_SIZE * (8 + (i-3)), HPPA_REG_SP), ctx); // stw ri,-save_area(sp)
}
/*
* now really set the tail call counter (TCC) register.
*/
if (REG_WAS_SEEN(ctx, HPPA_REG_TCC))
emit(hppa_ldi(MAX_TAIL_CALL_CNT, HPPA_REG_TCC), ctx);
/*
* save epilogue function pointer for outer TCC call chain.
* The main TCC call stores the final RP on stack.
*/
addr = (uintptr_t) &ctx->insns[ctx->epilogue_offset];
/* skip first two instructions of exit function, which jump to exit */
addr += 2 * HPPA_INSN_SIZE;
emit(hppa_ldil(addr, HPPA_REG_T2), ctx);
emit(hppa_ldo(im11(addr), HPPA_REG_T2, HPPA_REG_T2), ctx);
emit(EXIT_PTR_STORE(HPPA_REG_T2), ctx);
/* load R1 & R2 from registers, R3-R5 from stack. */
/* use HPPA_REG_R1 which holds the old stack value */
dst = regmap[BPF_REG_5];
reg = bpf_get_reg64_ref(dst, tmp, false, ctx);
if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) {
if (REG_WAS_SEEN(ctx, hi(reg)))
emit(hppa_ldw(-0x48, HPPA_REG_R1, hi(reg)), ctx);
if (REG_WAS_SEEN(ctx, lo(reg)))
emit(hppa_ldw(-0x44, HPPA_REG_R1, lo(reg)), ctx);
bpf_put_reg64(dst, tmp, ctx);
}
dst = regmap[BPF_REG_4];
reg = bpf_get_reg64_ref(dst, tmp, false, ctx);
if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) {
if (REG_WAS_SEEN(ctx, hi(reg)))
emit(hppa_ldw(-0x40, HPPA_REG_R1, hi(reg)), ctx);
if (REG_WAS_SEEN(ctx, lo(reg)))
emit(hppa_ldw(-0x3c, HPPA_REG_R1, lo(reg)), ctx);
bpf_put_reg64(dst, tmp, ctx);
}
dst = regmap[BPF_REG_3];
reg = bpf_get_reg64_ref(dst, tmp, false, ctx);
if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) {
if (REG_WAS_SEEN(ctx, hi(reg)))
emit(hppa_ldw(-0x38, HPPA_REG_R1, hi(reg)), ctx);
if (REG_WAS_SEEN(ctx, lo(reg)))
emit(hppa_ldw(-0x34, HPPA_REG_R1, lo(reg)), ctx);
bpf_put_reg64(dst, tmp, ctx);
}
dst = regmap[BPF_REG_2];
reg = bpf_get_reg64_ref(dst, tmp, false, ctx);
if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) {
if (REG_WAS_SEEN(ctx, hi(reg)))
emit_hppa_copy(HPPA_REG_ARG3, hi(reg), ctx);
if (REG_WAS_SEEN(ctx, lo(reg)))
emit_hppa_copy(HPPA_REG_ARG2, lo(reg), ctx);
bpf_put_reg64(dst, tmp, ctx);
}
dst = regmap[BPF_REG_1];
reg = bpf_get_reg64_ref(dst, tmp, false, ctx);
if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) {
if (REG_WAS_SEEN(ctx, hi(reg)))
emit_hppa_copy(HPPA_REG_ARG1, hi(reg), ctx);
if (REG_WAS_SEEN(ctx, lo(reg)))
emit_hppa_copy(HPPA_REG_ARG0, lo(reg), ctx);
bpf_put_reg64(dst, tmp, ctx);
}
/* Set up BPF frame pointer. */
dst = regmap[BPF_REG_FP];
reg = bpf_get_reg64_ref(dst, tmp, false, ctx);
if (REG_WAS_SEEN(ctx, lo(reg)) | REG_WAS_SEEN(ctx, hi(reg))) {
if (REG_WAS_SEEN(ctx, lo(reg)))
emit(hppa_ldo(-REG_SIZE * (NR_SAVED_REGISTERS + BPF_JIT_SCRATCH_REGS),
HPPA_REG_SP, lo(reg)), ctx);
if (REG_WAS_SEEN(ctx, hi(reg)))
emit_hppa_copy(HPPA_REG_ZERO, hi(reg), ctx);
bpf_put_reg64(dst, tmp, ctx);
}
emit(hppa_nop(), ctx);
}
void bpf_jit_build_epilogue(struct hppa_jit_context *ctx)
{
__build_epilogue(false, ctx);
}
| linux-master | arch/parisc/net/bpf_jit_comp32.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common functionality for HPPA32 and HPPA64 BPF JIT compilers
*
* Copyright (c) 2023 Helge Deller <[email protected]>
*
*/
#include <linux/bpf.h>
#include <linux/filter.h>
#include "bpf_jit.h"
/* Number of iterations to try until offsets converge. */
#define NR_JIT_ITERATIONS 35
static int build_body(struct hppa_jit_context *ctx, bool extra_pass, int *offset)
{
const struct bpf_prog *prog = ctx->prog;
int i;
ctx->reg_seen_collect = true;
for (i = 0; i < prog->len; i++) {
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
ret = bpf_jit_emit_insn(insn, ctx, extra_pass);
/* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */
if (ret > 0)
i++;
if (offset)
offset[i] = ctx->ninsns;
if (ret < 0)
return ret;
}
ctx->reg_seen_collect = false;
return 0;
}
bool bpf_jit_needs_zext(void)
{
return true;
}
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
unsigned int prog_size = 0, extable_size = 0;
bool tmp_blinded = false, extra_pass = false;
struct bpf_prog *tmp, *orig_prog = prog;
int pass = 0, prev_ninsns = 0, prologue_len, i;
struct hppa_jit_data *jit_data;
struct hppa_jit_context *ctx;
if (!prog->jit_requested)
return orig_prog;
tmp = bpf_jit_blind_constants(prog);
if (IS_ERR(tmp))
return orig_prog;
if (tmp != prog) {
tmp_blinded = true;
prog = tmp;
}
jit_data = prog->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
if (!jit_data) {
prog = orig_prog;
goto out;
}
prog->aux->jit_data = jit_data;
}
ctx = &jit_data->ctx;
if (ctx->offset) {
extra_pass = true;
prog_size = sizeof(*ctx->insns) * ctx->ninsns;
goto skip_init_ctx;
}
ctx->prog = prog;
ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
if (!ctx->offset) {
prog = orig_prog;
goto out_offset;
}
for (i = 0; i < prog->len; i++) {
prev_ninsns += 20;
ctx->offset[i] = prev_ninsns;
}
for (i = 0; i < NR_JIT_ITERATIONS; i++) {
pass++;
ctx->ninsns = 0;
if (build_body(ctx, extra_pass, ctx->offset)) {
prog = orig_prog;
goto out_offset;
}
ctx->body_len = ctx->ninsns;
bpf_jit_build_prologue(ctx);
ctx->prologue_len = ctx->ninsns - ctx->body_len;
ctx->epilogue_offset = ctx->ninsns;
bpf_jit_build_epilogue(ctx);
if (ctx->ninsns == prev_ninsns) {
if (jit_data->header)
break;
/* obtain the actual image size */
extable_size = prog->aux->num_exentries *
sizeof(struct exception_table_entry);
prog_size = sizeof(*ctx->insns) * ctx->ninsns;
jit_data->header =
bpf_jit_binary_alloc(prog_size + extable_size,
&jit_data->image,
sizeof(u32),
bpf_fill_ill_insns);
if (!jit_data->header) {
prog = orig_prog;
goto out_offset;
}
ctx->insns = (u32 *)jit_data->image;
/*
* Now, when the image is allocated, the image can
* potentially shrink more (auipc/jalr -> jal).
*/
}
prev_ninsns = ctx->ninsns;
}
if (i == NR_JIT_ITERATIONS) {
pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
if (jit_data->header)
bpf_jit_binary_free(jit_data->header);
prog = orig_prog;
goto out_offset;
}
if (extable_size)
prog->aux->extable = (void *)ctx->insns + prog_size;
skip_init_ctx:
pass++;
ctx->ninsns = 0;
bpf_jit_build_prologue(ctx);
if (build_body(ctx, extra_pass, NULL)) {
bpf_jit_binary_free(jit_data->header);
prog = orig_prog;
goto out_offset;
}
bpf_jit_build_epilogue(ctx);
if (HPPA_JIT_DEBUG || bpf_jit_enable > 1) {
if (HPPA_JIT_DUMP)
bpf_jit_dump(prog->len, prog_size, pass, ctx->insns);
if (HPPA_JIT_REBOOT)
{ extern int machine_restart(char *); machine_restart(""); }
}
prog->bpf_func = (void *)ctx->insns;
prog->jited = 1;
prog->jited_len = prog_size;
bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
if (!prog->is_func || extra_pass) {
bpf_jit_binary_lock_ro(jit_data->header);
prologue_len = ctx->epilogue_offset - ctx->body_len;
for (i = 0; i < prog->len; i++)
ctx->offset[i] += prologue_len;
bpf_prog_fill_jited_linfo(prog, ctx->offset);
out_offset:
kfree(ctx->offset);
kfree(jit_data);
prog->aux->jit_data = NULL;
}
out:
if (HPPA_JIT_REBOOT)
{ extern int machine_restart(char *); machine_restart(""); }
if (tmp_blinded)
bpf_jit_prog_release_other(prog, prog == orig_prog ?
tmp : orig_prog);
return prog;
}
u64 hppa_div64(u64 div, u64 divisor)
{
div = div64_u64(div, divisor);
return div;
}
u64 hppa_div64_rem(u64 div, u64 divisor)
{
u64 rem;
div64_u64_rem(div, divisor, &rem);
return rem;
}
| linux-master | arch/parisc/net/bpf_jit_core.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "../../kernel/firmware.c"
| linux-master | arch/parisc/boot/compressed/firmware.c |
/*
* Definitions and wrapper functions for kernel decompressor
*
* (C) 2017 Helge Deller <[email protected]>
*/
#include <linux/uaccess.h>
#include <linux/elf.h>
#include <asm/unaligned.h>
#include <asm/page.h>
#include "sizes.h"
/*
* gzip declarations
*/
#define STATIC static
#undef memmove
#define memmove memmove
#define memzero(s, n) memset((s), 0, (n))
#define malloc malloc_gzip
#define free free_gzip
/* Symbols defined by linker scripts */
extern char input_data[];
extern int input_len;
/* output_len is inserted by the linker possibly at an unaligned address */
extern char output_len;
extern char _text, _end;
extern char _bss, _ebss;
extern char _startcode_end;
extern void startup_continue(void *entry, unsigned long cmdline,
unsigned long rd_start, unsigned long rd_end) __noreturn;
void error(char *m) __noreturn;
static unsigned long free_mem_ptr;
static unsigned long free_mem_end_ptr;
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
#ifdef CONFIG_KERNEL_BZIP2
#include "../../../../lib/decompress_bunzip2.c"
#endif
#ifdef CONFIG_KERNEL_LZ4
#include "../../../../lib/decompress_unlz4.c"
#endif
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
#ifdef CONFIG_KERNEL_LZO
#include "../../../../lib/decompress_unlzo.c"
#endif
#ifdef CONFIG_KERNEL_XZ
#include "../../../../lib/decompress_unxz.c"
#endif
void *memmove(void *dest, const void *src, size_t n)
{
const char *s = src;
char *d = dest;
if (d <= s) {
while (n--)
*d++ = *s++;
} else {
d += n;
s += n;
while (n--)
*--d = *--s;
}
return dest;
}
void *memset(void *s, int c, size_t count)
{
char *xs = (char *)s;
while (count--)
*xs++ = c;
return s;
}
void *memcpy(void *d, const void *s, size_t len)
{
char *dest = (char *)d;
const char *source = (const char *)s;
while (len--)
*dest++ = *source++;
return d;
}
size_t strlen(const char *s)
{
const char *sc;
for (sc = s; *sc != '\0'; ++sc)
;
return sc - s;
}
char *strchr(const char *s, int c)
{
while (*s) {
if (*s == (char)c)
return (char *)s;
++s;
}
return NULL;
}
static int puts(const char *s)
{
const char *nuline = s;
while ((nuline = strchr(s, '\n')) != NULL) {
if (nuline != s)
pdc_iodc_print(s, nuline - s);
pdc_iodc_print("\r\n", 2);
s = nuline + 1;
}
if (*s != '\0')
pdc_iodc_print(s, strlen(s));
return 0;
}
static int putchar(int c)
{
char buf[2];
buf[0] = c;
buf[1] = '\0';
puts(buf);
return c;
}
void __noreturn error(char *x)
{
if (x) puts(x);
puts("\n -- System halted\n");
while (1) /* wait forever */
;
}
static int print_num(unsigned long num, int base)
{
const char hex[] = "0123456789abcdef";
char str[40];
int i = sizeof(str)-1;
str[i--] = '\0';
do {
str[i--] = hex[num % base];
num = num / base;
} while (num);
if (base == 16) {
str[i--] = 'x';
str[i] = '0';
} else i++;
puts(&str[i]);
return 0;
}
static int printf(const char *fmt, ...)
{
va_list args;
int i = 0;
va_start(args, fmt);
while (fmt[i]) {
if (fmt[i] != '%') {
put:
putchar(fmt[i++]);
continue;
}
if (fmt[++i] == '%')
goto put;
print_num(va_arg(args, unsigned long),
fmt[i] == 'x' ? 16:10);
++i;
}
va_end(args);
return 0;
}
/* helper functions for libgcc */
void abort(void)
{
error("aborted.");
}
#undef malloc
static void *malloc(size_t size)
{
return malloc_gzip(size);
}
#undef free
static void free(void *ptr)
{
return free_gzip(ptr);
}
static void flush_data_cache(char *start, unsigned long length)
{
char *end = start + length;
do {
asm volatile("fdc 0(%0)" : : "r" (start));
asm volatile("fic 0(%%sr0,%0)" : : "r" (start));
start += 16;
} while (start < end);
asm volatile("fdc 0(%0)" : : "r" (end));
asm ("sync");
}
static void parse_elf(void *output)
{
#ifdef CONFIG_64BIT
Elf64_Ehdr ehdr;
Elf64_Phdr *phdrs, *phdr;
#else
Elf32_Ehdr ehdr;
Elf32_Phdr *phdrs, *phdr;
#endif
void *dest;
int i;
memcpy(&ehdr, output, sizeof(ehdr));
if (ehdr.e_ident[EI_MAG0] != ELFMAG0 ||
ehdr.e_ident[EI_MAG1] != ELFMAG1 ||
ehdr.e_ident[EI_MAG2] != ELFMAG2 ||
ehdr.e_ident[EI_MAG3] != ELFMAG3) {
error("Kernel is not a valid ELF file");
return;
}
#ifdef DEBUG
printf("Parsing ELF... ");
#endif
phdrs = malloc(sizeof(*phdrs) * ehdr.e_phnum);
if (!phdrs)
error("Failed to allocate space for phdrs");
memcpy(phdrs, output + ehdr.e_phoff, sizeof(*phdrs) * ehdr.e_phnum);
for (i = 0; i < ehdr.e_phnum; i++) {
phdr = &phdrs[i];
switch (phdr->p_type) {
case PT_LOAD:
dest = (void *)((unsigned long) phdr->p_paddr &
(__PAGE_OFFSET_DEFAULT-1));
memmove(dest, output + phdr->p_offset, phdr->p_filesz);
break;
default:
break;
}
}
free(phdrs);
}
asmlinkage unsigned long __visible decompress_kernel(unsigned int started_wide,
unsigned int command_line,
const unsigned int rd_start,
const unsigned int rd_end)
{
char *output;
unsigned long vmlinux_addr, vmlinux_len;
unsigned long kernel_addr, kernel_len;
#ifdef CONFIG_64BIT
parisc_narrow_firmware = 0;
#endif
set_firmware_width_unlocked();
putchar('D'); /* if you get this D and no more, string storage */
/* in $GLOBAL$ is wrong or %dp is wrong */
puts("ecompressing Linux... ");
/* where the final bits are stored */
kernel_addr = KERNEL_BINARY_TEXT_START;
kernel_len = __pa(SZ_end) - __pa(SZparisc_kernel_start);
if ((unsigned long) &_startcode_end > kernel_addr)
error("Bootcode overlaps kernel code");
/*
* Calculate addr to where the vmlinux ELF file shall be decompressed.
* Assembly code in head.S positioned the stack directly behind bss, so
* leave 2 MB for the stack.
*/
vmlinux_addr = (unsigned long) &_ebss + 2*1024*1024;
vmlinux_len = get_unaligned_le32(&output_len);
output = (char *) vmlinux_addr;
/*
* Initialize free_mem_ptr and free_mem_end_ptr.
*/
free_mem_ptr = vmlinux_addr + vmlinux_len;
/* Limit memory for bootoader to 1GB */
#define ARTIFICIAL_LIMIT (1*1024*1024*1024)
free_mem_end_ptr = PAGE0->imm_max_mem;
if (free_mem_end_ptr > ARTIFICIAL_LIMIT)
free_mem_end_ptr = ARTIFICIAL_LIMIT;
#ifdef CONFIG_BLK_DEV_INITRD
/* if we have ramdisk this is at end of memory */
if (rd_start && rd_start < free_mem_end_ptr)
free_mem_end_ptr = rd_start;
#endif
if (free_mem_ptr >= free_mem_end_ptr) {
int free_ram;
free_ram = (free_mem_ptr >> 20) + 1;
if (free_ram < 32)
free_ram = 32;
printf("\nKernel requires at least %d MB RAM.\n",
free_ram);
error(NULL);
}
#ifdef DEBUG
printf("\n");
printf("startcode_end = %x\n", &_startcode_end);
printf("commandline = %x\n", command_line);
printf("rd_start = %x\n", rd_start);
printf("rd_end = %x\n", rd_end);
printf("free_ptr = %x\n", free_mem_ptr);
printf("free_ptr_end = %x\n", free_mem_end_ptr);
printf("input_data = %x\n", input_data);
printf("input_len = %x\n", input_len);
printf("output = %x\n", output);
printf("output_len = %x\n", vmlinux_len);
printf("kernel_addr = %x\n", kernel_addr);
printf("kernel_len = %x\n", kernel_len);
#endif
__decompress(input_data, input_len, NULL, NULL,
output, 0, NULL, error);
parse_elf(output);
output = (char *) kernel_addr;
flush_data_cache(output, kernel_len);
printf("done.\nBooting the kernel.\n");
return (unsigned long) output;
}
| linux-master | arch/parisc/boot/compressed/misc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Optimized memory copy routines.
*
* Copyright (C) 2004 Randolph Chung <[email protected]>
* Copyright (C) 2013-2017 Helge Deller <[email protected]>
*
* Portions derived from the GNU C Library
* Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
*/
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
#define get_user_space() mfsp(SR_USER)
#define get_kernel_space() SR_KERNEL
/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
extern unsigned long pa_memcpy(void *dst, const void *src,
unsigned long len);
unsigned long raw_copy_to_user(void __user *dst, const void *src,
unsigned long len)
{
mtsp(get_kernel_space(), SR_TEMP1);
mtsp(get_user_space(), SR_TEMP2);
return pa_memcpy((void __force *)dst, src, len);
}
EXPORT_SYMBOL(raw_copy_to_user);
unsigned long raw_copy_from_user(void *dst, const void __user *src,
unsigned long len)
{
mtsp(get_user_space(), SR_TEMP1);
mtsp(get_kernel_space(), SR_TEMP2);
return pa_memcpy(dst, (void __force *)src, len);
}
EXPORT_SYMBOL(raw_copy_from_user);
void * memcpy(void * dst,const void *src, size_t count)
{
mtsp(get_kernel_space(), SR_TEMP1);
mtsp(get_kernel_space(), SR_TEMP2);
pa_memcpy(dst, src, count);
return dst;
}
EXPORT_SYMBOL(memcpy);
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
if ((unsigned long)unsafe_src < PAGE_SIZE)
return false;
/* check for I/O space F_EXTEND(0xfff00000) access as well? */
return true;
}
| linux-master | arch/parisc/lib/memcpy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/parisc/lib/io.c
*
* Copyright (c) Matthew Wilcox 2001 for Hewlett-Packard
* Copyright (c) Randolph Chung 2001 <[email protected]>
*
* IO accessing functions which shouldn't be inlined because they're too big
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/io.h>
/* Copies a block of memory to a device in an efficient manner.
* Assumes the device can cope with 32-bit transfers. If it can't,
* don't use this function.
*/
void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
{
if (((unsigned long)dst & 3) != ((unsigned long)src & 3))
goto bytecopy;
while ((unsigned long)dst & 3) {
writeb(*(char *)src, dst++);
src++;
count--;
}
while (count > 3) {
__raw_writel(*(u32 *)src, dst);
src += 4;
dst += 4;
count -= 4;
}
bytecopy:
while (count--) {
writeb(*(char *)src, dst++);
src++;
}
}
/*
** Copies a block of memory from a device in an efficient manner.
** Assumes the device can cope with 32-bit transfers. If it can't,
** don't use this function.
**
** CR16 counts on C3000 reading 256 bytes from Symbios 896 RAM:
** 27341/64 = 427 cyc per int
** 61311/128 = 478 cyc per short
** 122637/256 = 479 cyc per byte
** Ergo bus latencies dominant (not transfer size).
** Minimize total number of transfers at cost of CPU cycles.
** TODO: only look at src alignment and adjust the stores to dest.
*/
void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
{
/* first compare alignment of src/dst */
if ( (((unsigned long)dst ^ (unsigned long)src) & 1) || (count < 2) )
goto bytecopy;
if ( (((unsigned long)dst ^ (unsigned long)src) & 2) || (count < 4) )
goto shortcopy;
/* Then check for misaligned start address */
if ((unsigned long)src & 1) {
*(u8 *)dst = readb(src);
src++;
dst++;
count--;
if (count < 2) goto bytecopy;
}
if ((unsigned long)src & 2) {
*(u16 *)dst = __raw_readw(src);
src += 2;
dst += 2;
count -= 2;
}
while (count > 3) {
*(u32 *)dst = __raw_readl(src);
dst += 4;
src += 4;
count -= 4;
}
shortcopy:
while (count > 1) {
*(u16 *)dst = __raw_readw(src);
src += 2;
dst += 2;
count -= 2;
}
bytecopy:
while (count--) {
*(char *)dst = readb(src);
src++;
dst++;
}
}
/* Sets a block of memory on a device to a given value.
* Assumes the device can cope with 32-bit transfers. If it can't,
* don't use this function.
*/
void memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
u32 val32 = (val << 24) | (val << 16) | (val << 8) | val;
while ((unsigned long)addr & 3) {
writeb(val, addr++);
count--;
}
while (count > 3) {
__raw_writel(val32, addr);
addr += 4;
count -= 4;
}
while (count--) {
writeb(val, addr++);
}
}
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at
* SRC.
*/
void insb (unsigned long port, void *dst, unsigned long count)
{
unsigned char *p;
p = (unsigned char *)dst;
while (((unsigned long)p) & 0x3) {
if (!count)
return;
count--;
*p = inb(port);
p++;
}
while (count >= 4) {
unsigned int w;
count -= 4;
w = inb(port) << 24;
w |= inb(port) << 16;
w |= inb(port) << 8;
w |= inb(port);
*(unsigned int *) p = w;
p += 4;
}
while (count) {
--count;
*p = inb(port);
p++;
}
}
/*
* Read COUNT 16-bit words from port PORT into memory starting at
* SRC. SRC must be at least short aligned. This is used by the
* IDE driver to read disk sectors. Performance is important, but
* the interfaces seems to be slow: just using the inlined version
* of the inw() breaks things.
*/
void insw (unsigned long port, void *dst, unsigned long count)
{
unsigned int l = 0, l2;
unsigned char *p;
p = (unsigned char *)dst;
if (!count)
return;
switch (((unsigned long)p) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count>=2) {
count -= 2;
l = cpu_to_le16(inw(port)) << 16;
l |= cpu_to_le16(inw(port));
*(unsigned int *)p = l;
p += 4;
}
if (count) {
*(unsigned short *)p = cpu_to_le16(inw(port));
}
break;
case 0x02: /* Buffer 16-bit aligned */
*(unsigned short *)p = cpu_to_le16(inw(port));
p += 2;
count--;
while (count>=2) {
count -= 2;
l = cpu_to_le16(inw(port)) << 16;
l |= cpu_to_le16(inw(port));
*(unsigned int *)p = l;
p += 4;
}
if (count) {
*(unsigned short *)p = cpu_to_le16(inw(port));
}
break;
case 0x01: /* Buffer 8-bit aligned */
case 0x03:
/* I don't bother with 32bit transfers
* in this case, 16bit will have to do -- DE */
--count;
l = cpu_to_le16(inw(port));
*p = l >> 8;
p++;
while (count--)
{
l2 = cpu_to_le16(inw(port));
*(unsigned short *)p = (l & 0xff) << 8 | (l2 >> 8);
p += 2;
l = l2;
}
*p = l & 0xff;
break;
}
}
/*
* Read COUNT 32-bit words from port PORT into memory starting at
* SRC. Now works with any alignment in SRC. Performance is important,
* but the interfaces seems to be slow: just using the inlined version
* of the inl() breaks things.
*/
void insl (unsigned long port, void *dst, unsigned long count)
{
unsigned int l = 0, l2;
unsigned char *p;
p = (unsigned char *)dst;
if (!count)
return;
switch (((unsigned long) dst) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count--)
{
*(unsigned int *)p = cpu_to_le32(inl(port));
p += 4;
}
break;
case 0x02: /* Buffer 16-bit aligned */
--count;
l = cpu_to_le32(inl(port));
*(unsigned short *)p = l >> 16;
p += 2;
while (count--)
{
l2 = cpu_to_le32(inl(port));
*(unsigned int *)p = (l & 0xffff) << 16 | (l2 >> 16);
p += 4;
l = l2;
}
*(unsigned short *)p = l & 0xffff;
break;
case 0x01: /* Buffer 8-bit aligned */
--count;
l = cpu_to_le32(inl(port));
*(unsigned char *)p = l >> 24;
p++;
*(unsigned short *)p = (l >> 8) & 0xffff;
p += 2;
while (count--)
{
l2 = cpu_to_le32(inl(port));
*(unsigned int *)p = (l & 0xff) << 24 | (l2 >> 8);
p += 4;
l = l2;
}
*p = l & 0xff;
break;
case 0x03: /* Buffer 8-bit aligned */
--count;
l = cpu_to_le32(inl(port));
*p = l >> 24;
p++;
while (count--)
{
l2 = cpu_to_le32(inl(port));
*(unsigned int *)p = (l & 0xffffff) << 8 | l2 >> 24;
p += 4;
l = l2;
}
*(unsigned short *)p = (l >> 8) & 0xffff;
p += 2;
*p = l & 0xff;
break;
}
}
/*
* Like insb but in the opposite direction.
* Don't worry as much about doing aligned memory transfers:
* doing byte reads the "slow" way isn't nearly as slow as
* doing byte writes the slow way (no r-m-w cycle).
*/
void outsb(unsigned long port, const void * src, unsigned long count)
{
const unsigned char *p;
p = (const unsigned char *)src;
while (count) {
count--;
outb(*p, port);
p++;
}
}
/*
* Like insw but in the opposite direction. This is used by the IDE
* driver to write disk sectors. Performance is important, but the
* interfaces seems to be slow: just using the inlined version of the
* outw() breaks things.
*/
void outsw (unsigned long port, const void *src, unsigned long count)
{
unsigned int l = 0, l2;
const unsigned char *p;
p = (const unsigned char *)src;
if (!count)
return;
switch (((unsigned long)p) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count>=2) {
count -= 2;
l = *(unsigned int *)p;
p += 4;
outw(le16_to_cpu(l >> 16), port);
outw(le16_to_cpu(l & 0xffff), port);
}
if (count) {
outw(le16_to_cpu(*(unsigned short*)p), port);
}
break;
case 0x02: /* Buffer 16-bit aligned */
outw(le16_to_cpu(*(unsigned short*)p), port);
p += 2;
count--;
while (count>=2) {
count -= 2;
l = *(unsigned int *)p;
p += 4;
outw(le16_to_cpu(l >> 16), port);
outw(le16_to_cpu(l & 0xffff), port);
}
if (count) {
outw(le16_to_cpu(*(unsigned short *)p), port);
}
break;
case 0x01: /* Buffer 8-bit aligned */
/* I don't bother with 32bit transfers
* in this case, 16bit will have to do -- DE */
l = *p << 8;
p++;
count--;
while (count)
{
count--;
l2 = *(unsigned short *)p;
p += 2;
outw(le16_to_cpu(l | l2 >> 8), port);
l = l2 << 8;
}
l2 = *(unsigned char *)p;
outw (le16_to_cpu(l | l2>>8), port);
break;
}
}
/*
* Like insl but in the opposite direction. This is used by the IDE
* driver to write disk sectors. Works with any alignment in SRC.
* Performance is important, but the interfaces seems to be slow:
* just using the inlined version of the outl() breaks things.
*/
void outsl (unsigned long port, const void *src, unsigned long count)
{
unsigned int l = 0, l2;
const unsigned char *p;
p = (const unsigned char *)src;
if (!count)
return;
switch (((unsigned long)p) & 0x3)
{
case 0x00: /* Buffer 32-bit aligned */
while (count--)
{
outl(le32_to_cpu(*(unsigned int *)p), port);
p += 4;
}
break;
case 0x02: /* Buffer 16-bit aligned */
--count;
l = *(unsigned short *)p;
p += 2;
while (count--)
{
l2 = *(unsigned int *)p;
p += 4;
outl (le32_to_cpu(l << 16 | l2 >> 16), port);
l = l2;
}
l2 = *(unsigned short *)p;
outl (le32_to_cpu(l << 16 | l2), port);
break;
case 0x01: /* Buffer 8-bit aligned */
--count;
l = *p << 24;
p++;
l |= *(unsigned short *)p << 8;
p += 2;
while (count--)
{
l2 = *(unsigned int *)p;
p += 4;
outl (le32_to_cpu(l | l2 >> 24), port);
l = l2 << 8;
}
l2 = *p;
outl (le32_to_cpu(l | l2), port);
break;
case 0x03: /* Buffer 8-bit aligned */
--count;
l = *p << 24;
p++;
while (count--)
{
l2 = *(unsigned int *)p;
p += 4;
outl (le32_to_cpu(l | l2 >> 8), port);
l = l2 << 24;
}
l2 = *(unsigned short *)p << 16;
p += 2;
l2 |= *p;
outl (le32_to_cpu(l | l2), port);
break;
}
}
EXPORT_SYMBOL(insb);
EXPORT_SYMBOL(insw);
EXPORT_SYMBOL(insl);
EXPORT_SYMBOL(outsb);
EXPORT_SYMBOL(outsw);
EXPORT_SYMBOL(outsl);
| linux-master | arch/parisc/lib/io.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Precise Delay Loops for parisc
*
* based on code by:
* Copyright (C) 1993 Linus Torvalds
* Copyright (C) 1997 Martin Mares <[email protected]>
* Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
*
* parisc implementation:
* Copyright (C) 2013 Helge Deller <[email protected]>
*/
#include <linux/module.h>
#include <linux/preempt.h>
#include <linux/init.h>
#include <asm/delay.h>
#include <asm/special_insns.h> /* for mfctl() */
#include <asm/processor.h> /* for boot_cpu_data */
/* CR16 based delay: */
static void __cr16_delay(unsigned long __loops)
{
/*
* Note: Due to unsigned math, cr16 rollovers shouldn't be
* a problem here. However, on 32 bit, we need to make sure
* we don't pass in too big a value. The current default
* value of MAX_UDELAY_MS should help prevent this.
*/
u32 bclock, now, loops = __loops;
int cpu;
preempt_disable();
cpu = smp_processor_id();
bclock = mfctl(16);
for (;;) {
now = mfctl(16);
if ((now - bclock) >= loops)
break;
/* Allow RT tasks to run */
preempt_enable();
asm volatile(" nop\n");
barrier();
preempt_disable();
/*
* It is possible that we moved to another CPU, and
* since CR16's are per-cpu we need to calculate
* that. The delay must guarantee that we wait "at
* least" the amount of time. Being moved to another
* CPU could make the wait longer but we just need to
* make sure we waited long enough. Rebalance the
* counter for this CPU.
*/
if (unlikely(cpu != smp_processor_id())) {
loops -= (now - bclock);
cpu = smp_processor_id();
bclock = mfctl(16);
}
}
preempt_enable();
}
void __udelay(unsigned long usecs)
{
__cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
}
EXPORT_SYMBOL(__udelay);
| linux-master | arch/parisc/lib/delay.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* MIPS specific IP/TCP/UDP checksumming routines
*
* Authors: Ralf Baechle, <[email protected]>
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <net/checksum.h>
#include <asm/byteorder.h>
#include <asm/string.h>
#include <linux/uaccess.h>
#define addc(_t,_r) \
__asm__ __volatile__ ( \
" add %0, %1, %0\n" \
" addc %0, %%r0, %0\n" \
: "=r"(_t) \
: "r"(_r), "0"(_t));
static inline unsigned short from32to16(unsigned int x)
{
/* 32 bits --> 16 bits + carry */
x = (x & 0xffff) + (x >> 16);
/* 16 bits + carry --> 16 bits including carry */
x = (x & 0xffff) + (x >> 16);
return (unsigned short)x;
}
static inline unsigned int do_csum(const unsigned char * buff, int len)
{
int odd, count;
unsigned int result = 0;
if (len <= 0)
goto out;
odd = 1 & (unsigned long) buff;
if (odd) {
result = be16_to_cpu(*buff);
len--;
buff++;
}
count = len >> 1; /* nr of 16-bit words.. */
if (count) {
if (2 & (unsigned long) buff) {
result += *(unsigned short *) buff;
count--;
len -= 2;
buff += 2;
}
count >>= 1; /* nr of 32-bit words.. */
if (count) {
while (count >= 4) {
unsigned int r1, r2, r3, r4;
r1 = *(unsigned int *)(buff + 0);
r2 = *(unsigned int *)(buff + 4);
r3 = *(unsigned int *)(buff + 8);
r4 = *(unsigned int *)(buff + 12);
addc(result, r1);
addc(result, r2);
addc(result, r3);
addc(result, r4);
count -= 4;
buff += 16;
}
while (count) {
unsigned int w = *(unsigned int *) buff;
count--;
buff += 4;
addc(result, w);
}
result = (result & 0xffff) + (result >> 16);
}
if (len & 2) {
result += *(unsigned short *) buff;
buff += 2;
}
}
if (len & 1)
result += le16_to_cpu(*buff);
result = from32to16(result);
if (odd)
result = swab16(result);
out:
return result;
}
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
/*
* why bother folding?
*/
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned int result = do_csum(buff, len);
addc(result, sum);
return (__force __wsum)from32to16(result);
}
EXPORT_SYMBOL(csum_partial);
| linux-master | arch/parisc/lib/checksum.c |
// SPDX-License-Identifier: GPL-2.0
/*
* bitops.c: atomic operations which got too long to be inlined all over
* the place.
*
* Copyright 1999 Philipp Rumpf ([email protected])
* Copyright 2000 Grant Grundler ([email protected])
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#ifdef CONFIG_SMP
arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
[0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
};
#endif
#ifdef CONFIG_64BIT
unsigned long notrace __xchg64(unsigned long x, volatile unsigned long *ptr)
{
unsigned long temp, flags;
_atomic_spin_lock_irqsave(ptr, flags);
temp = *ptr;
*ptr = x;
_atomic_spin_unlock_irqrestore(ptr, flags);
return temp;
}
#endif
unsigned long notrace __xchg32(int x, volatile int *ptr)
{
unsigned long flags;
long temp;
_atomic_spin_lock_irqsave(ptr, flags);
temp = (long) *ptr; /* XXX - sign extension wanted? */
*ptr = x;
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)temp;
}
unsigned long notrace __xchg8(char x, volatile char *ptr)
{
unsigned long flags;
long temp;
_atomic_spin_lock_irqsave(ptr, flags);
temp = (long) *ptr; /* XXX - sign extension wanted? */
*ptr = x;
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)temp;
}
u64 notrace __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
{
unsigned long flags;
u64 prev;
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return prev;
}
unsigned long notrace __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
{
unsigned long flags;
unsigned int prev;
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)prev;
}
u8 notrace __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
{
unsigned long flags;
u8 prev;
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return prev;
}
| linux-master | arch/parisc/lib/bitops.c |
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <linux/types.h>
#include <asm/string.h>
#define OPSIZ (BITS_PER_LONG/8)
typedef unsigned long op_t;
void *
memset (void *dstpp, int sc, size_t len)
{
unsigned int c = sc;
long int dstp = (long int) dstpp;
if (len >= 8)
{
size_t xlen;
op_t cccc;
cccc = (unsigned char) c;
cccc |= cccc << 8;
cccc |= cccc << 16;
if (OPSIZ > 4)
/* Do the shift in two steps to avoid warning if long has 32 bits. */
cccc |= (cccc << 16) << 16;
/* There are at least some bytes to set.
No need to test for LEN == 0 in this alignment loop. */
while (dstp % OPSIZ != 0)
{
((unsigned char *) dstp)[0] = c;
dstp += 1;
len -= 1;
}
/* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */
xlen = len / (OPSIZ * 8);
while (xlen > 0)
{
((op_t *) dstp)[0] = cccc;
((op_t *) dstp)[1] = cccc;
((op_t *) dstp)[2] = cccc;
((op_t *) dstp)[3] = cccc;
((op_t *) dstp)[4] = cccc;
((op_t *) dstp)[5] = cccc;
((op_t *) dstp)[6] = cccc;
((op_t *) dstp)[7] = cccc;
dstp += 8 * OPSIZ;
xlen -= 1;
}
len %= OPSIZ * 8;
/* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */
xlen = len / OPSIZ;
while (xlen > 0)
{
((op_t *) dstp)[0] = cccc;
dstp += OPSIZ;
xlen -= 1;
}
len %= OPSIZ;
}
/* Write the last few bytes. */
while (len > 0)
{
((unsigned char *) dstp)[0] = c;
dstp += 1;
len -= 1;
}
return dstpp;
}
| linux-master | arch/parisc/lib/memset.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/libgcc.h>
union ull_union {
unsigned long long ull;
struct {
unsigned int high;
unsigned int low;
} ui;
};
word_type __ucmpdi2(unsigned long long a, unsigned long long b)
{
union ull_union au = {.ull = a};
union ull_union bu = {.ull = b};
if (au.ui.high < bu.ui.high)
return 0;
else if (au.ui.high > bu.ui.high)
return 2;
if (au.ui.low < bu.ui.low)
return 0;
else if (au.ui.low > bu.ui.low)
return 2;
return 1;
}
| linux-master | arch/parisc/lib/ucmpdi2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* iomap.c - Implement iomap interface for PA-RISC
* Copyright (c) 2004 Matthew Wilcox
*/
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <asm/io.h>
/*
* The iomap space on 32-bit PA-RISC is intended to look like this:
* 00000000-7fffffff virtual mapped IO
* 80000000-8fffffff ISA/EISA port space that can't be virtually mapped
* 90000000-9fffffff Dino port space
* a0000000-afffffff Astro port space
* b0000000-bfffffff PAT port space
* c0000000-cfffffff non-swapped memory IO
* f0000000-ffffffff legacy IO memory pointers
*
* For the moment, here's what it looks like:
* 80000000-8fffffff All ISA/EISA port space
* f0000000-ffffffff legacy IO memory pointers
*
* On 64-bit, everything is extended, so:
* 8000000000000000-8fffffffffffffff All ISA/EISA port space
* f000000000000000-ffffffffffffffff legacy IO memory pointers
*/
/*
* Technically, this should be 'if (VMALLOC_START < addr < VMALLOC_END),
* but that's slow and we know it'll be within the first 2GB.
*/
#ifdef CONFIG_64BIT
#define INDIRECT_ADDR(addr) (((unsigned long)(addr) & 1UL<<63) != 0)
#define ADDR_TO_REGION(addr) (((unsigned long)addr >> 60) & 7)
#define IOPORT_MAP_BASE (8UL << 60)
#else
#define INDIRECT_ADDR(addr) (((unsigned long)(addr) & 1UL<<31) != 0)
#define ADDR_TO_REGION(addr) (((unsigned long)addr >> 28) & 7)
#define IOPORT_MAP_BASE (8UL << 28)
#endif
struct iomap_ops {
unsigned int (*read8)(const void __iomem *);
unsigned int (*read16)(const void __iomem *);
unsigned int (*read16be)(const void __iomem *);
unsigned int (*read32)(const void __iomem *);
unsigned int (*read32be)(const void __iomem *);
#ifdef CONFIG_64BIT
u64 (*read64)(const void __iomem *);
u64 (*read64be)(const void __iomem *);
#endif
void (*write8)(u8, void __iomem *);
void (*write16)(u16, void __iomem *);
void (*write16be)(u16, void __iomem *);
void (*write32)(u32, void __iomem *);
void (*write32be)(u32, void __iomem *);
#ifdef CONFIG_64BIT
void (*write64)(u64, void __iomem *);
void (*write64be)(u64, void __iomem *);
#endif
void (*read8r)(const void __iomem *, void *, unsigned long);
void (*read16r)(const void __iomem *, void *, unsigned long);
void (*read32r)(const void __iomem *, void *, unsigned long);
void (*write8r)(void __iomem *, const void *, unsigned long);
void (*write16r)(void __iomem *, const void *, unsigned long);
void (*write32r)(void __iomem *, const void *, unsigned long);
};
/* Generic ioport ops. To be replaced later by specific dino/elroy/wax code */
#define ADDR2PORT(addr) ((unsigned long __force)(addr) & 0xffffff)
static unsigned int ioport_read8(const void __iomem *addr)
{
return inb(ADDR2PORT(addr));
}
static unsigned int ioport_read16(const void __iomem *addr)
{
return inw(ADDR2PORT(addr));
}
static unsigned int ioport_read32(const void __iomem *addr)
{
return inl(ADDR2PORT(addr));
}
static void ioport_write8(u8 datum, void __iomem *addr)
{
outb(datum, ADDR2PORT(addr));
}
static void ioport_write16(u16 datum, void __iomem *addr)
{
outw(datum, ADDR2PORT(addr));
}
static void ioport_write32(u32 datum, void __iomem *addr)
{
outl(datum, ADDR2PORT(addr));
}
static void ioport_read8r(const void __iomem *addr, void *dst, unsigned long count)
{
insb(ADDR2PORT(addr), dst, count);
}
static void ioport_read16r(const void __iomem *addr, void *dst, unsigned long count)
{
insw(ADDR2PORT(addr), dst, count);
}
static void ioport_read32r(const void __iomem *addr, void *dst, unsigned long count)
{
insl(ADDR2PORT(addr), dst, count);
}
static void ioport_write8r(void __iomem *addr, const void *s, unsigned long n)
{
outsb(ADDR2PORT(addr), s, n);
}
static void ioport_write16r(void __iomem *addr, const void *s, unsigned long n)
{
outsw(ADDR2PORT(addr), s, n);
}
static void ioport_write32r(void __iomem *addr, const void *s, unsigned long n)
{
outsl(ADDR2PORT(addr), s, n);
}
static const struct iomap_ops ioport_ops = {
.read8 = ioport_read8,
.read16 = ioport_read16,
.read16be = ioport_read16,
.read32 = ioport_read32,
.read32be = ioport_read32,
.write8 = ioport_write8,
.write16 = ioport_write16,
.write16be = ioport_write16,
.write32 = ioport_write32,
.write32be = ioport_write32,
.read8r = ioport_read8r,
.read16r = ioport_read16r,
.read32r = ioport_read32r,
.write8r = ioport_write8r,
.write16r = ioport_write16r,
.write32r = ioport_write32r,
};
/* Legacy I/O memory ops */
static unsigned int iomem_read8(const void __iomem *addr)
{
return readb(addr);
}
static unsigned int iomem_read16(const void __iomem *addr)
{
return readw(addr);
}
static unsigned int iomem_read16be(const void __iomem *addr)
{
return __raw_readw(addr);
}
static unsigned int iomem_read32(const void __iomem *addr)
{
return readl(addr);
}
static unsigned int iomem_read32be(const void __iomem *addr)
{
return __raw_readl(addr);
}
#ifdef CONFIG_64BIT
static u64 iomem_read64(const void __iomem *addr)
{
return readq(addr);
}
static u64 iomem_read64be(const void __iomem *addr)
{
return __raw_readq(addr);
}
#endif
static void iomem_write8(u8 datum, void __iomem *addr)
{
writeb(datum, addr);
}
static void iomem_write16(u16 datum, void __iomem *addr)
{
writew(datum, addr);
}
static void iomem_write16be(u16 datum, void __iomem *addr)
{
__raw_writew(datum, addr);
}
static void iomem_write32(u32 datum, void __iomem *addr)
{
writel(datum, addr);
}
static void iomem_write32be(u32 datum, void __iomem *addr)
{
__raw_writel(datum, addr);
}
#ifdef CONFIG_64BIT
static void iomem_write64(u64 datum, void __iomem *addr)
{
writeq(datum, addr);
}
static void iomem_write64be(u64 datum, void __iomem *addr)
{
__raw_writeq(datum, addr);
}
#endif
static void iomem_read8r(const void __iomem *addr, void *dst, unsigned long count)
{
while (count--) {
*(u8 *)dst = __raw_readb(addr);
dst++;
}
}
static void iomem_read16r(const void __iomem *addr, void *dst, unsigned long count)
{
while (count--) {
*(u16 *)dst = __raw_readw(addr);
dst += 2;
}
}
static void iomem_read32r(const void __iomem *addr, void *dst, unsigned long count)
{
while (count--) {
*(u32 *)dst = __raw_readl(addr);
dst += 4;
}
}
static void iomem_write8r(void __iomem *addr, const void *s, unsigned long n)
{
while (n--) {
__raw_writeb(*(u8 *)s, addr);
s++;
}
}
static void iomem_write16r(void __iomem *addr, const void *s, unsigned long n)
{
while (n--) {
__raw_writew(*(u16 *)s, addr);
s += 2;
}
}
static void iomem_write32r(void __iomem *addr, const void *s, unsigned long n)
{
while (n--) {
__raw_writel(*(u32 *)s, addr);
s += 4;
}
}
static const struct iomap_ops iomem_ops = {
.read8 = iomem_read8,
.read16 = iomem_read16,
.read16be = iomem_read16be,
.read32 = iomem_read32,
.read32be = iomem_read32be,
#ifdef CONFIG_64BIT
.read64 = iomem_read64,
.read64be = iomem_read64be,
#endif
.write8 = iomem_write8,
.write16 = iomem_write16,
.write16be = iomem_write16be,
.write32 = iomem_write32,
.write32be = iomem_write32be,
#ifdef CONFIG_64BIT
.write64 = iomem_write64,
.write64be = iomem_write64be,
#endif
.read8r = iomem_read8r,
.read16r = iomem_read16r,
.read32r = iomem_read32r,
.write8r = iomem_write8r,
.write16r = iomem_write16r,
.write32r = iomem_write32r,
};
static const struct iomap_ops *iomap_ops[8] = {
[0] = &ioport_ops,
[7] = &iomem_ops
};
unsigned int ioread8(const void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr)))
return iomap_ops[ADDR_TO_REGION(addr)]->read8(addr);
return *((u8 *)addr);
}
unsigned int ioread16(const void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr)))
return iomap_ops[ADDR_TO_REGION(addr)]->read16(addr);
return le16_to_cpup((u16 *)addr);
}
unsigned int ioread16be(const void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr)))
return iomap_ops[ADDR_TO_REGION(addr)]->read16be(addr);
return *((u16 *)addr);
}
unsigned int ioread32(const void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr)))
return iomap_ops[ADDR_TO_REGION(addr)]->read32(addr);
return le32_to_cpup((u32 *)addr);
}
unsigned int ioread32be(const void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr)))
return iomap_ops[ADDR_TO_REGION(addr)]->read32be(addr);
return *((u32 *)addr);
}
#ifdef CONFIG_64BIT
u64 ioread64(const void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr)))
return iomap_ops[ADDR_TO_REGION(addr)]->read64(addr);
return le64_to_cpup((u64 *)addr);
}
u64 ioread64be(const void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr)))
return iomap_ops[ADDR_TO_REGION(addr)]->read64be(addr);
return *((u64 *)addr);
}
#endif
void iowrite8(u8 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write8(datum, addr);
} else {
*((u8 *)addr) = datum;
}
}
void iowrite16(u16 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write16(datum, addr);
} else {
*((u16 *)addr) = cpu_to_le16(datum);
}
}
void iowrite16be(u16 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write16be(datum, addr);
} else {
*((u16 *)addr) = datum;
}
}
void iowrite32(u32 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write32(datum, addr);
} else {
*((u32 *)addr) = cpu_to_le32(datum);
}
}
void iowrite32be(u32 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write32be(datum, addr);
} else {
*((u32 *)addr) = datum;
}
}
#ifdef CONFIG_64BIT
void iowrite64(u64 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write64(datum, addr);
} else {
*((u64 *)addr) = cpu_to_le64(datum);
}
}
void iowrite64be(u64 datum, void __iomem *addr)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write64be(datum, addr);
} else {
*((u64 *)addr) = datum;
}
}
#endif
/* Repeating interfaces */
void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->read8r(addr, dst, count);
} else {
while (count--) {
*(u8 *)dst = *(u8 *)addr;
dst++;
}
}
}
void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->read16r(addr, dst, count);
} else {
while (count--) {
*(u16 *)dst = *(u16 *)addr;
dst += 2;
}
}
}
void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->read32r(addr, dst, count);
} else {
while (count--) {
*(u32 *)dst = *(u32 *)addr;
dst += 4;
}
}
}
void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write8r(addr, src, count);
} else {
while (count--) {
*(u8 *)addr = *(u8 *)src;
src++;
}
}
}
void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write16r(addr, src, count);
} else {
while (count--) {
*(u16 *)addr = *(u16 *)src;
src += 2;
}
}
}
void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
{
if (unlikely(INDIRECT_ADDR(addr))) {
iomap_ops[ADDR_TO_REGION(addr)]->write32r(addr, src, count);
} else {
while (count--) {
*(u32 *)addr = *(u32 *)src;
src += 4;
}
}
}
/* Mapping interfaces */
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
return (void __iomem *)(IOPORT_MAP_BASE | port);
}
void ioport_unmap(void __iomem *addr)
{
if (!INDIRECT_ADDR(addr)) {
iounmap(addr);
}
}
#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
if (!INDIRECT_ADDR(addr)) {
iounmap(addr);
}
}
EXPORT_SYMBOL(pci_iounmap);
#endif
EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16);
EXPORT_SYMBOL(ioread16be);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
#ifdef CONFIG_64BIT
EXPORT_SYMBOL(ioread64);
EXPORT_SYMBOL(ioread64be);
#endif
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
EXPORT_SYMBOL(iowrite16be);
EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
#ifdef CONFIG_64BIT
EXPORT_SYMBOL(iowrite64);
EXPORT_SYMBOL(iowrite64be);
#endif
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
EXPORT_SYMBOL(ioread32_rep);
EXPORT_SYMBOL(iowrite8_rep);
EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep);
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
| linux-master | arch/parisc/lib/iomap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PARISC64 Huge TLB page support.
*
* This parisc implementation is heavily based on the SPARC and x86 code.
*
* Copyright (C) 2015 Helge Deller <[email protected]>
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (len > TASK_SIZE)
return -ENOMEM;
if (flags & MAP_FIXED)
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
if (addr)
addr = ALIGN(addr, huge_page_size(h));
/* we need to make sure the colouring is OK */
return arch_get_unmapped_area(file, addr, len, pgoff, flags);
}
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
/* We must align the address, because our caller will run
* set_huge_pte_at() on whatever we return, which writes out
* all of the sub-ptes for the hugepage range. So we have
* to give it the first such sub-pte.
*/
addr &= HPAGE_MASK;
pgd = pgd_offset(mm, addr);
p4d = p4d_offset(pgd, addr);
pud = pud_alloc(mm, p4d, addr);
if (pud) {
pmd = pmd_alloc(mm, pud, addr);
if (pmd)
pte = pte_alloc_huge(mm, pmd, addr);
}
return pte;
}
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
addr &= HPAGE_MASK;
pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd)) {
p4d = p4d_offset(pgd, addr);
if (!p4d_none(*p4d)) {
pud = pud_offset(p4d, addr);
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd))
pte = pte_offset_huge(pmd, addr);
}
}
}
return pte;
}
/* Purge data and instruction TLB entries. Must be called holding
* the pa_tlb_lock. The TLB purge instructions are slow on SMP
* machines since the purge must be broadcast to all CPUs.
*/
static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
{
int i;
/* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
* Linux standard huge pages (e.g. 2 MB) */
BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
addr &= HPAGE_MASK;
addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
purge_tlb_entries(mm, addr);
addr += (1UL << REAL_HPAGE_SHIFT);
}
}
/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
unsigned long addr_start;
int i;
addr &= HPAGE_MASK;
addr_start = addr;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
set_pte(ptep, entry);
ptep++;
addr += PAGE_SIZE;
pte_val(entry) += PAGE_SIZE;
}
purge_tlb_entries_huge(mm, addr_start);
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
__set_huge_pte_at(mm, addr, ptep, entry);
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_t entry;
entry = *ptep;
__set_huge_pte_at(mm, addr, ptep, __pte(0));
return entry;
}
void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
old_pte = *ptep;
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
}
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
int changed;
struct mm_struct *mm = vma->vm_mm;
changed = !pte_same(*ptep, pte);
if (changed) {
__set_huge_pte_at(mm, addr, ptep, pte);
}
return changed;
}
int pmd_huge(pmd_t pmd)
{
return 0;
}
int pud_huge(pud_t pud)
{
return 0;
}
| linux-master | arch/parisc/mm/hugetlbpage.c |
// SPDX-License-Identifier: GPL-2.0
/*
* fixmaps for parisc
*
* Copyright (c) 2019 Sven Schnelle <[email protected]>
*/
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
{
unsigned long vaddr = __fix_to_virt(idx);
pgd_t *pgd = pgd_offset_k(vaddr);
p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
pte_t *pte;
pte = pte_offset_kernel(pmd, vaddr);
set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX));
flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
}
void notrace clear_fixmap(enum fixed_addresses idx)
{
unsigned long vaddr = __fix_to_virt(idx);
pte_t *pte = virt_to_kpte(vaddr);
if (WARN_ON(pte_none(*pte)))
return;
pte_clear(&init_mm, vaddr, pte);
flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
}
| linux-master | arch/parisc/mm/fixmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/parisc/mm/init.c
*
* Copyright (C) 1995 Linus Torvalds
* Copyright 1999 SuSE GmbH
* changed by Philipp Rumpf
* Copyright 1999 Philipp Rumpf ([email protected])
* Copyright 2004 Randolph Chung ([email protected])
* Copyright 2006-2007 Helge Deller ([email protected])
*
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/swap.h>
#include <linux/unistd.h>
#include <linux/nodemask.h> /* for node_online_map */
#include <linux/pagemap.h> /* for release_pages */
#include <linux/compat.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/pdc_chassis.h>
#include <asm/mmzone.h>
#include <asm/sections.h>
#include <asm/msgbuf.h>
#include <asm/sparsemem.h>
#include <asm/asm-offsets.h>
extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
#if CONFIG_PGTABLE_LEVELS == 3
pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE)));
#endif
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE)));
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE)));
static struct resource data_resource = {
.name = "Kernel data",
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource code_resource = {
.name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
};
static struct resource pdcdata_resource = {
.name = "PDC data (Page Zero)",
.start = 0,
.end = 0x9ff,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
};
static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
/* The following array is initialized from the firmware specific
* information retrieved in kernel/inventory.c.
*/
physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
int npmem_ranges __initdata;
#ifdef CONFIG_64BIT
#define MAX_MEM (1UL << MAX_PHYSMEM_BITS)
#else /* !CONFIG_64BIT */
#define MAX_MEM (3584U*1024U*1024U)
#endif /* !CONFIG_64BIT */
static unsigned long mem_limit __read_mostly = MAX_MEM;
static void __init mem_limit_func(void)
{
char *cp, *end;
unsigned long limit;
/* We need this before __setup() functions are called */
limit = MAX_MEM;
for (cp = boot_command_line; *cp; ) {
if (memcmp(cp, "mem=", 4) == 0) {
cp += 4;
limit = memparse(cp, &end);
if (end != cp)
break;
cp = end;
} else {
while (*cp != ' ' && *cp)
++cp;
while (*cp == ' ')
++cp;
}
}
if (limit < mem_limit)
mem_limit = limit;
}
#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
static void __init setup_bootmem(void)
{
unsigned long mem_max;
#ifndef CONFIG_SPARSEMEM
physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
int npmem_holes;
#endif
int i, sysram_resource_count;
disable_sr_hashing(); /* Turn off space register hashing */
/*
* Sort the ranges. Since the number of ranges is typically
* small, and performance is not an issue here, just do
* a simple insertion sort.
*/
for (i = 1; i < npmem_ranges; i++) {
int j;
for (j = i; j > 0; j--) {
if (pmem_ranges[j-1].start_pfn <
pmem_ranges[j].start_pfn) {
break;
}
swap(pmem_ranges[j-1], pmem_ranges[j]);
}
}
#ifndef CONFIG_SPARSEMEM
/*
* Throw out ranges that are too far apart (controlled by
* MAX_GAP).
*/
for (i = 1; i < npmem_ranges; i++) {
if (pmem_ranges[i].start_pfn -
(pmem_ranges[i-1].start_pfn +
pmem_ranges[i-1].pages) > MAX_GAP) {
npmem_ranges = i;
printk("Large gap in memory detected (%ld pages). "
"Consider turning on CONFIG_SPARSEMEM\n",
pmem_ranges[i].start_pfn -
(pmem_ranges[i-1].start_pfn +
pmem_ranges[i-1].pages));
break;
}
}
#endif
/* Print the memory ranges */
pr_info("Memory Ranges:\n");
for (i = 0; i < npmem_ranges; i++) {
struct resource *res = &sysram_resources[i];
unsigned long start;
unsigned long size;
size = (pmem_ranges[i].pages << PAGE_SHIFT);
start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
i, start, start + (size - 1), size >> 20);
/* request memory resource */
res->name = "System RAM";
res->start = start;
res->end = start + size - 1;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
}
sysram_resource_count = npmem_ranges;
/*
* For 32 bit kernels we limit the amount of memory we can
* support, in order to preserve enough kernel address space
* for other purposes. For 64 bit kernels we don't normally
* limit the memory, but this mechanism can be used to
* artificially limit the amount of memory (and it is written
* to work with multiple memory ranges).
*/
mem_limit_func(); /* check for "mem=" argument */
mem_max = 0;
for (i = 0; i < npmem_ranges; i++) {
unsigned long rsize;
rsize = pmem_ranges[i].pages << PAGE_SHIFT;
if ((mem_max + rsize) > mem_limit) {
printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
if (mem_max == mem_limit)
npmem_ranges = i;
else {
pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
- (mem_max >> PAGE_SHIFT);
npmem_ranges = i + 1;
mem_max = mem_limit;
}
break;
}
mem_max += rsize;
}
printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
#ifndef CONFIG_SPARSEMEM
/* Merge the ranges, keeping track of the holes */
{
unsigned long end_pfn;
unsigned long hole_pages;
npmem_holes = 0;
end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
for (i = 1; i < npmem_ranges; i++) {
hole_pages = pmem_ranges[i].start_pfn - end_pfn;
if (hole_pages) {
pmem_holes[npmem_holes].start_pfn = end_pfn;
pmem_holes[npmem_holes++].pages = hole_pages;
end_pfn += hole_pages;
}
end_pfn += pmem_ranges[i].pages;
}
pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
npmem_ranges = 1;
}
#endif
/*
* Initialize and free the full range of memory in each range.
*/
max_pfn = 0;
for (i = 0; i < npmem_ranges; i++) {
unsigned long start_pfn;
unsigned long npages;
unsigned long start;
unsigned long size;
start_pfn = pmem_ranges[i].start_pfn;
npages = pmem_ranges[i].pages;
start = start_pfn << PAGE_SHIFT;
size = npages << PAGE_SHIFT;
/* add system RAM memblock */
memblock_add(start, size);
if ((start_pfn + npages) > max_pfn)
max_pfn = start_pfn + npages;
}
/*
* We can't use memblock top-down allocations because we only
* created the initial mapping up to KERNEL_INITIAL_SIZE in
* the assembly bootup code.
*/
memblock_set_bottom_up(true);
/* IOMMU is always used to access "high mem" on those boxes
* that can support enough mem that a PCI device couldn't
* directly DMA to any physical addresses.
* ISA DMA support will need to revisit this.
*/
max_low_pfn = max_pfn;
/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
#define PDC_CONSOLE_IO_IODC_SIZE 32768
memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
PDC_CONSOLE_IO_IODC_SIZE));
memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
(unsigned long)(_end - KERNEL_BINARY_TEXT_START));
#ifndef CONFIG_SPARSEMEM
/* reserve the holes */
for (i = 0; i < npmem_holes; i++) {
memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
(pmem_holes[i].pages << PAGE_SHIFT));
}
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
if (__pa(initrd_start) < mem_max) {
unsigned long initrd_reserve;
if (__pa(initrd_end) > mem_max) {
initrd_reserve = mem_max - __pa(initrd_start);
} else {
initrd_reserve = initrd_end - initrd_start;
}
initrd_below_start_ok = 1;
printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
memblock_reserve(__pa(initrd_start), initrd_reserve);
}
}
#endif
data_resource.start = virt_to_phys(&data_start);
data_resource.end = virt_to_phys(_end) - 1;
code_resource.start = virt_to_phys(_text);
code_resource.end = virt_to_phys(&data_start)-1;
/* We don't know which region the kernel will be in, so try
* all of them.
*/
for (i = 0; i < sysram_resource_count; i++) {
struct resource *res = &sysram_resources[i];
request_resource(res, &code_resource);
request_resource(res, &data_resource);
}
request_resource(&sysram_resources[0], &pdcdata_resource);
/* Initialize Page Deallocation Table (PDT) and check for bad memory. */
pdc_pdt_init();
memblock_allow_resize();
memblock_dump_all();
}
static bool kernel_set_to_readonly;
static void __ref map_pages(unsigned long start_vaddr,
unsigned long start_paddr, unsigned long size,
pgprot_t pgprot, int force)
{
pmd_t *pmd;
pte_t *pg_table;
unsigned long end_paddr;
unsigned long start_pmd;
unsigned long start_pte;
unsigned long tmp1;
unsigned long tmp2;
unsigned long address;
unsigned long vaddr;
unsigned long ro_start;
unsigned long ro_end;
unsigned long kernel_start, kernel_end;
ro_start = __pa((unsigned long)_text);
ro_end = __pa((unsigned long)&data_start);
kernel_start = __pa((unsigned long)&__init_begin);
kernel_end = __pa((unsigned long)&_end);
end_paddr = start_paddr + size;
/* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */
start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
address = start_paddr;
vaddr = start_vaddr;
while (address < end_paddr) {
pgd_t *pgd = pgd_offset_k(vaddr);
p4d_t *p4d = p4d_offset(pgd, vaddr);
pud_t *pud = pud_offset(p4d, vaddr);
#if CONFIG_PGTABLE_LEVELS == 3
if (pud_none(*pud)) {
pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
PAGE_SIZE << PMD_TABLE_ORDER);
if (!pmd)
panic("pmd allocation failed.\n");
pud_populate(NULL, pud, pmd);
}
#endif
pmd = pmd_offset(pud, vaddr);
for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
if (pmd_none(*pmd)) {
pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pg_table)
panic("page table allocation failed\n");
pmd_populate_kernel(NULL, pmd, pg_table);
}
pg_table = pte_offset_kernel(pmd, vaddr);
for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
pte_t pte;
pgprot_t prot;
bool huge = false;
if (force) {
prot = pgprot;
} else if (address < kernel_start || address >= kernel_end) {
/* outside kernel memory */
prot = PAGE_KERNEL;
} else if (!kernel_set_to_readonly) {
/* still initializing, allow writing to RO memory */
prot = PAGE_KERNEL_RWX;
huge = true;
} else if (address >= ro_start) {
/* Code (ro) and Data areas */
prot = (address < ro_end) ?
PAGE_KERNEL_EXEC : PAGE_KERNEL;
huge = true;
} else {
prot = PAGE_KERNEL;
}
pte = __mk_pte(address, prot);
if (huge)
pte = pte_mkhuge(pte);
if (address >= end_paddr)
break;
set_pte(pg_table, pte);
address += PAGE_SIZE;
vaddr += PAGE_SIZE;
}
start_pte = 0;
if (address >= end_paddr)
break;
}
start_pmd = 0;
}
}
void __init set_kernel_text_rw(int enable_read_write)
{
unsigned long start = (unsigned long) __init_begin;
unsigned long end = (unsigned long) &data_start;
map_pages(start, __pa(start), end-start,
PAGE_KERNEL_RWX, enable_read_write ? 1:0);
/* force the kernel to see the new page table entries */
flush_cache_all();
flush_tlb_all();
}
void free_initmem(void)
{
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
unsigned long kernel_end = (unsigned long)&_end;
/* Remap kernel text and data, but do not touch init section yet. */
kernel_set_to_readonly = true;
map_pages(init_end, __pa(init_end), kernel_end - init_end,
PAGE_KERNEL, 0);
/* The init text pages are marked R-X. We have to
* flush the icache and mark them RW-
*
* Do a dummy remap of the data section first (the data
* section is already PAGE_KERNEL) to pull in the TLB entries
* for map_kernel */
map_pages(init_begin, __pa(init_begin), init_end - init_begin,
PAGE_KERNEL_RWX, 1);
/* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
* map_pages */
map_pages(init_begin, __pa(init_begin), init_end - init_begin,
PAGE_KERNEL, 1);
/* force the kernel to see the new TLB entries */
__flush_tlb_range(0, init_begin, kernel_end);
/* finally dump all the instructions which were cached, since the
* pages are no-longer executable */
flush_icache_range(init_begin, init_end);
free_initmem_default(POISON_FREE_INITMEM);
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
{
/* rodata memory was already mapped with KERNEL_RO access rights by
pagetable_init() and map_pages(). No need to do additional stuff here */
unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
}
#endif
/*
* Just an arbitrary offset to serve as a "hole" between mapping areas
* (between top of physical memory and a potential pcxl dma mapping
* area, and below the vmalloc mapping area).
*
* The current 32K value just means that there will be a 32K "hole"
* between mapping areas. That means that any out-of-bounds memory
* accesses will hopefully be caught. The vmalloc() routines leaves
* a hole of 4kB between each vmalloced area for the same reason.
*/
/* Leave room for gateway page expansion */
#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
#error KERNEL_MAP_START is in gateway reserved region
#endif
#define MAP_START (KERNEL_MAP_START)
#define VM_MAP_OFFSET (32*1024)
#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
& ~(VM_MAP_OFFSET-1)))
void *parisc_vmalloc_start __ro_after_init;
EXPORT_SYMBOL(parisc_vmalloc_start);
void __init mem_init(void)
{
/* Do sanity checks on IPC (compat) structures */
BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
#ifndef CONFIG_64BIT
BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
#endif
#ifdef CONFIG_COMPAT
BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
#endif
/* Do sanity checks on page table constants */
BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
> BITS_PER_LONG);
#if CONFIG_PGTABLE_LEVELS == 3
BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD);
#else
BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD);
#endif
#ifdef CONFIG_64BIT
/* avoid ldil_%L() asm statements to sign-extend into upper 32-bits */
BUILD_BUG_ON(__PAGE_OFFSET >= 0x80000000);
BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000);
#endif
high_memory = __va((max_pfn << PAGE_SHIFT));
set_max_mapnr(max_low_pfn);
memblock_free_all();
#ifdef CONFIG_PA11
if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
+ PCXL_DMA_MAP_SIZE);
} else
#endif
parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
#if 0
/*
* Do not expose the virtual kernel memory layout to userspace.
* But keep code for debugging purposes.
*/
printk("virtual kernel memory layout:\n"
" vmalloc : 0x%px - 0x%px (%4ld MB)\n"
" fixmap : 0x%px - 0x%px (%4ld kB)\n"
" memory : 0x%px - 0x%px (%4ld MB)\n"
" .init : 0x%px - 0x%px (%4ld kB)\n"
" .data : 0x%px - 0x%px (%4ld kB)\n"
" .text : 0x%px - 0x%px (%4ld kB)\n",
(void*)VMALLOC_START, (void*)VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
(void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
(unsigned long)(FIXMAP_SIZE / 1024),
__va(0), high_memory,
((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
__init_begin, __init_end,
((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
_etext, _edata,
((unsigned long)_edata - (unsigned long)_etext) >> 10,
_text, _etext,
((unsigned long)_etext - (unsigned long)_text) >> 10);
#endif
}
unsigned long *empty_zero_page __ro_after_init;
EXPORT_SYMBOL(empty_zero_page);
/*
* pagetable_init() sets up the page tables
*
* Note that gateway_init() places the Linux gateway page at page 0.
* Since gateway pages cannot be dereferenced this has the desirable
* side effect of trapping those pesky NULL-reference errors in the
* kernel.
*/
static void __init pagetable_init(void)
{
int range;
/* Map each physical memory range to its kernel vaddr */
for (range = 0; range < npmem_ranges; range++) {
unsigned long start_paddr;
unsigned long size;
start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
size = pmem_ranges[range].pages << PAGE_SHIFT;
map_pages((unsigned long)__va(start_paddr), start_paddr,
size, PAGE_KERNEL, 0);
}
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_end && initrd_end > mem_limit) {
printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
map_pages(initrd_start, __pa(initrd_start),
initrd_end - initrd_start, PAGE_KERNEL, 0);
}
#endif
empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!empty_zero_page)
panic("zero page allocation failed.\n");
}
static void __init gateway_init(void)
{
unsigned long linux_gateway_page_addr;
/* FIXME: This is 'const' in order to trick the compiler
into not treating it as DP-relative data. */
extern void * const linux_gateway_page;
linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
/*
* Setup Linux Gateway page.
*
* The Linux gateway page will reside in kernel space (on virtual
* page 0), so it doesn't need to be aliased into user space.
*/
map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
PAGE_SIZE, PAGE_GATEWAY, 1);
}
static void __init fixmap_init(void)
{
unsigned long addr = FIXMAP_START;
unsigned long end = FIXMAP_START + FIXMAP_SIZE;
pgd_t *pgd = pgd_offset_k(addr);
p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud = pud_offset(p4d, addr);
pmd_t *pmd;
BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE);
#if CONFIG_PGTABLE_LEVELS == 3
if (pud_none(*pud)) {
pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
PAGE_SIZE << PMD_TABLE_ORDER);
if (!pmd)
panic("fixmap: pmd allocation failed.\n");
pud_populate(NULL, pud, pmd);
}
#endif
pmd = pmd_offset(pud, addr);
do {
pte_t *pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pte)
panic("fixmap: pte allocation failed.\n");
pmd_populate_kernel(&init_mm, pmd, pte);
addr += PAGE_SIZE;
} while (addr < end);
}
static void __init parisc_bootmem_free(void)
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
max_zone_pfn[0] = memblock_end_of_DRAM();
free_area_init(max_zone_pfn);
}
void __init paging_init(void)
{
setup_bootmem();
pagetable_init();
gateway_init();
fixmap_init();
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local(NULL);
sparse_init();
parisc_bootmem_free();
}
static void alloc_btlb(unsigned long start, unsigned long end, int *slot,
unsigned long entry_info)
{
const int slot_max = btlb_info.fixed_range_info.num_comb;
int min_num_pages = btlb_info.min_size;
unsigned long size;
/* map at minimum 4 pages */
if (min_num_pages < 4)
min_num_pages = 4;
size = HUGEPAGE_SIZE;
while (start < end && *slot < slot_max && size >= PAGE_SIZE) {
/* starting address must have same alignment as size! */
/* if correctly aligned and fits in double size, increase */
if (((start & (2 * size - 1)) == 0) &&
(end - start) >= (2 * size)) {
size <<= 1;
continue;
}
/* if current size alignment is too big, try smaller size */
if ((start & (size - 1)) != 0) {
size >>= 1;
continue;
}
if ((end - start) >= size) {
if ((size >> PAGE_SHIFT) >= min_num_pages)
pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT,
size >> PAGE_SHIFT, entry_info, *slot);
(*slot)++;
start += size;
continue;
}
size /= 2;
continue;
}
}
void btlb_init_per_cpu(void)
{
unsigned long s, t, e;
int slot;
/* BTLBs are not available on 64-bit CPUs */
if (IS_ENABLED(CONFIG_PA20))
return;
else if (pdc_btlb_info(&btlb_info) < 0) {
memset(&btlb_info, 0, sizeof btlb_info);
}
/* insert BLTLBs for code and data segments */
s = (uintptr_t) dereference_function_descriptor(&_stext);
e = (uintptr_t) dereference_function_descriptor(&_etext);
t = (uintptr_t) dereference_function_descriptor(&_sdata);
BUG_ON(t != e);
/* code segments */
slot = 0;
alloc_btlb(s, e, &slot, 0x13800000);
/* sanity check */
t = (uintptr_t) dereference_function_descriptor(&_edata);
e = (uintptr_t) dereference_function_descriptor(&__bss_start);
BUG_ON(t != e);
/* data segments */
s = (uintptr_t) dereference_function_descriptor(&_sdata);
e = (uintptr_t) dereference_function_descriptor(&__bss_stop);
alloc_btlb(s, e, &slot, 0x11800000);
}
#ifdef CONFIG_PA20
/*
* Currently, all PA20 chips have 18 bit protection IDs, which is the
* limiting factor (space ids are 32 bits).
*/
#define NR_SPACE_IDS 262144
#else
/*
* Currently we have a one-to-one relationship between space IDs and
* protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
* support 15 bit protection IDs, so that is the limiting factor.
* PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
* probably not worth the effort for a special case here.
*/
#define NR_SPACE_IDS 32768
#endif /* !CONFIG_PA20 */
#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
static unsigned long dirty_space_id[SID_ARRAY_SIZE];
static unsigned long space_id_index;
static unsigned long free_space_ids = NR_SPACE_IDS - 1;
static unsigned long dirty_space_ids;
static DEFINE_SPINLOCK(sid_lock);
unsigned long alloc_sid(void)
{
unsigned long index;
spin_lock(&sid_lock);
if (free_space_ids == 0) {
if (dirty_space_ids != 0) {
spin_unlock(&sid_lock);
flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
spin_lock(&sid_lock);
}
BUG_ON(free_space_ids == 0);
}
free_space_ids--;
index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
space_id[BIT_WORD(index)] |= BIT_MASK(index);
space_id_index = index;
spin_unlock(&sid_lock);
return index << SPACEID_SHIFT;
}
void free_sid(unsigned long spaceid)
{
unsigned long index = spaceid >> SPACEID_SHIFT;
unsigned long *dirty_space_offset, mask;
dirty_space_offset = &dirty_space_id[BIT_WORD(index)];
mask = BIT_MASK(index);
spin_lock(&sid_lock);
BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */
*dirty_space_offset |= mask;
dirty_space_ids++;
spin_unlock(&sid_lock);
}
#ifdef CONFIG_SMP
static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
{
int i;
/* NOTE: sid_lock must be held upon entry */
*ndirtyptr = dirty_space_ids;
if (dirty_space_ids != 0) {
for (i = 0; i < SID_ARRAY_SIZE; i++) {
dirty_array[i] = dirty_space_id[i];
dirty_space_id[i] = 0;
}
dirty_space_ids = 0;
}
return;
}
static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
{
int i;
/* NOTE: sid_lock must be held upon entry */
if (ndirty != 0) {
for (i = 0; i < SID_ARRAY_SIZE; i++) {
space_id[i] ^= dirty_array[i];
}
free_space_ids += ndirty;
space_id_index = 0;
}
}
#else /* CONFIG_SMP */
static void recycle_sids(void)
{
int i;
/* NOTE: sid_lock must be held upon entry */
if (dirty_space_ids != 0) {
for (i = 0; i < SID_ARRAY_SIZE; i++) {
space_id[i] ^= dirty_space_id[i];
dirty_space_id[i] = 0;
}
free_space_ids += dirty_space_ids;
dirty_space_ids = 0;
space_id_index = 0;
}
}
#endif
/*
* flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
* purged, we can safely reuse the space ids that were released but
* not flushed from the tlb.
*/
#ifdef CONFIG_SMP
static unsigned long recycle_ndirty;
static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
static unsigned int recycle_inuse;
void flush_tlb_all(void)
{
int do_recycle;
do_recycle = 0;
spin_lock(&sid_lock);
__inc_irq_stat(irq_tlb_count);
if (dirty_space_ids > RECYCLE_THRESHOLD) {
BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
recycle_inuse++;
do_recycle++;
}
spin_unlock(&sid_lock);
on_each_cpu(flush_tlb_all_local, NULL, 1);
if (do_recycle) {
spin_lock(&sid_lock);
recycle_sids(recycle_ndirty,recycle_dirty_array);
recycle_inuse = 0;
spin_unlock(&sid_lock);
}
}
#else
void flush_tlb_all(void)
{
spin_lock(&sid_lock);
__inc_irq_stat(irq_tlb_count);
flush_tlb_all_local(NULL);
recycle_sids();
spin_unlock(&sid_lock);
}
#endif
static const pgprot_t protection_map[16] = {
[VM_NONE] = PAGE_NONE,
[VM_READ] = PAGE_READONLY,
[VM_WRITE] = PAGE_NONE,
[VM_WRITE | VM_READ] = PAGE_READONLY,
[VM_EXEC] = PAGE_EXECREAD,
[VM_EXEC | VM_READ] = PAGE_EXECREAD,
[VM_EXEC | VM_WRITE] = PAGE_EXECREAD,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_EXECREAD,
[VM_SHARED] = PAGE_NONE,
[VM_SHARED | VM_READ] = PAGE_READONLY,
[VM_SHARED | VM_WRITE] = PAGE_WRITEONLY,
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
[VM_SHARED | VM_EXEC] = PAGE_EXECREAD,
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD,
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
};
DECLARE_VM_GET_PAGE_PROT
| linux-master | arch/parisc/mm/init.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/parisc/mm/ioremap.c
*
* (C) Copyright 1995 1996 Linus Torvalds
* (C) Copyright 2001-2019 Helge Deller <[email protected]>
* (C) Copyright 2005 Kyle McMartin <[email protected]>
*/
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/mm.h>
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot)
{
#ifdef CONFIG_EISA
unsigned long end = phys_addr + size - 1;
/* Support EISA addresses */
if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
(phys_addr >= 0x00500000 && end < 0x03bfffff))
phys_addr |= F_EXTEND(0xfc000000);
#endif
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
if (phys_addr < virt_to_phys(high_memory)) {
char *t_addr, *t_end;
struct page *page;
t_addr = __va(phys_addr);
t_end = t_addr + (size - 1);
for (page = virt_to_page(t_addr);
page <= virt_to_page(t_end); page++) {
if(!PageReserved(page))
return NULL;
}
}
return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
}
EXPORT_SYMBOL(ioremap_prot);
| linux-master | arch/parisc/mm/ioremap.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*
* Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
* Copyright 1999 SuSE GmbH (Philipp Rumpf, [email protected])
* Copyright 1999 Hewlett Packard Co.
*
*/
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/interrupt.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <linux/perf_event.h>
#include <asm/traps.h>
#define DEBUG_NATLB 0
/* Various important other fields */
#define bit22set(x) (x & 0x00000200)
#define bits23_25set(x) (x & 0x000001c0)
#define isGraphicsFlushRead(x) ((x & 0xfc003fdf) == 0x04001a80)
/* extended opcode is 0x6a */
#define BITSSET 0x1c0 /* for identifying LDCW */
int show_unhandled_signals = 1;
/*
* parisc_acctyp(unsigned int inst) --
* Given a PA-RISC memory access instruction, determine if the
* instruction would perform a memory read or memory write
* operation.
*
* This function assumes that the given instruction is a memory access
* instruction (i.e. you should really only call it if you know that
* the instruction has generated some sort of a memory access fault).
*
* Returns:
* VM_READ if read operation
* VM_WRITE if write operation
* VM_EXEC if execute operation
*/
unsigned long
parisc_acctyp(unsigned long code, unsigned int inst)
{
if (code == 6 || code == 16)
return VM_EXEC;
switch (inst & 0xf0000000) {
case 0x40000000: /* load */
case 0x50000000: /* new load */
return VM_READ;
case 0x60000000: /* store */
case 0x70000000: /* new store */
return VM_WRITE;
case 0x20000000: /* coproc */
case 0x30000000: /* coproc2 */
if (bit22set(inst))
return VM_WRITE;
fallthrough;
case 0x0: /* indexed/memory management */
if (bit22set(inst)) {
/*
* Check for the 'Graphics Flush Read' instruction.
* It resembles an FDC instruction, except for bits
* 20 and 21. Any combination other than zero will
* utilize the block mover functionality on some
* older PA-RISC platforms. The case where a block
* move is performed from VM to graphics IO space
* should be treated as a READ.
*
* The significance of bits 20,21 in the FDC
* instruction is:
*
* 00 Flush data cache (normal instruction behavior)
* 01 Graphics flush write (IO space -> VM)
* 10 Graphics flush read (VM -> IO space)
* 11 Graphics flush read/write (VM <-> IO space)
*/
if (isGraphicsFlushRead(inst))
return VM_READ;
return VM_WRITE;
} else {
/*
* Check for LDCWX and LDCWS (semaphore instructions).
* If bits 23 through 25 are all 1's it is one of
* the above two instructions and is a write.
*
* Note: With the limited bits we are looking at,
* this will also catch PROBEW and PROBEWI. However,
* these should never get in here because they don't
* generate exceptions of the type:
* Data TLB miss fault/data page fault
* Data memory protection trap
*/
if (bits23_25set(inst) == BITSSET)
return VM_WRITE;
}
return VM_READ; /* Default */
}
return VM_READ; /* Default */
}
#undef bit22set
#undef bits23_25set
#undef isGraphicsFlushRead
#undef BITSSET
#if 0
/* This is the treewalk to find a vma which is the highest that has
* a start < addr. We're using find_vma_prev instead right now, but
* we might want to use this at some point in the future. Probably
* not, but I want it committed to CVS so I don't lose it :-)
*/
while (tree != vm_avl_empty) {
if (tree->vm_start > addr) {
tree = tree->vm_avl_left;
} else {
prev = tree;
if (prev->vm_next == NULL)
break;
if (prev->vm_next->vm_start > addr)
break;
tree = tree->vm_avl_right;
}
}
#endif
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fix;
fix = search_exception_tables(regs->iaoq[0]);
if (fix) {
/*
* Fix up get_user() and put_user().
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
* bit in the relative address of the fixup routine to indicate
* that gr[ASM_EXCEPTIONTABLE_REG] should be loaded with
* -EFAULT to report a userspace access error.
*/
if (fix->fixup & 1) {
regs->gr[ASM_EXCEPTIONTABLE_REG] = -EFAULT;
/* zero target register for get_user() */
if (parisc_acctyp(0, regs->iir) == VM_READ) {
int treg = regs->iir & 0x1f;
BUG_ON(treg == 0);
regs->gr[treg] = 0;
}
}
regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
regs->iaoq[0] &= ~3;
/*
* NOTE: In some cases the faulting instruction
* may be in the delay slot of a branch. We
* don't want to take the branch, so we don't
* increment iaoq[1], instead we set it to be
* iaoq[0]+4, and clear the B bit in the PSW
*/
regs->iaoq[1] = regs->iaoq[0] + 4;
regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
return 1;
}
return 0;
}
/*
* parisc hardware trap list
*
* Documented in section 3 "Addressing and Access Control" of the
* "PA-RISC 1.1 Architecture and Instruction Set Reference Manual"
* https://parisc.wiki.kernel.org/index.php/File:Pa11_acd.pdf
*
* For implementation see handle_interruption() in traps.c
*/
static const char * const trap_description[] = {
[1] = "High-priority machine check (HPMC)",
[2] = "Power failure interrupt",
[3] = "Recovery counter trap",
[5] = "Low-priority machine check",
[6] = "Instruction TLB miss fault",
[7] = "Instruction access rights / protection trap",
[8] = "Illegal instruction trap",
[9] = "Break instruction trap",
[10] = "Privileged operation trap",
[11] = "Privileged register trap",
[12] = "Overflow trap",
[13] = "Conditional trap",
[14] = "FP Assist Exception trap",
[15] = "Data TLB miss fault",
[16] = "Non-access ITLB miss fault",
[17] = "Non-access DTLB miss fault",
[18] = "Data memory protection/unaligned access trap",
[19] = "Data memory break trap",
[20] = "TLB dirty bit trap",
[21] = "Page reference trap",
[22] = "Assist emulation trap",
[25] = "Taken branch trap",
[26] = "Data memory access rights trap",
[27] = "Data memory protection ID trap",
[28] = "Unaligned data reference trap",
};
const char *trap_name(unsigned long code)
{
const char *t = NULL;
if (code < ARRAY_SIZE(trap_description))
t = trap_description[code];
return t ? t : "Unknown trap";
}
/*
* Print out info about fatal segfaults, if the show_unhandled_signals
* sysctl is set:
*/
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long code,
unsigned long address, struct task_struct *tsk,
struct vm_area_struct *vma)
{
if (!unhandled_signal(tsk, SIGSEGV))
return;
if (!printk_ratelimit())
return;
pr_warn("\n");
pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx",
tsk->comm, code, address);
print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
pr_cont("\ntrap #%lu: %s%c", code, trap_name(code),
vma ? ',':'\n');
if (vma)
pr_cont(" vm_start = 0x%08lx, vm_end = 0x%08lx\n",
vma->vm_start, vma->vm_end);
show_regs(regs);
}
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address)
{
struct vm_area_struct *vma, *prev_vma;
struct task_struct *tsk;
struct mm_struct *mm;
unsigned long acc_type;
vm_fault_t fault = 0;
unsigned int flags;
char *msg;
tsk = current;
mm = tsk->mm;
if (!mm) {
msg = "Page fault: no context";
goto no_context;
}
flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
acc_type = parisc_acctyp(code, regs->iir);
if (acc_type & VM_WRITE)
flags |= FAULT_FLAG_WRITE;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
mmap_read_lock(mm);
vma = find_vma_prev(mm, address, &prev_vma);
if (!vma || address < vma->vm_start) {
if (!prev_vma || !(prev_vma->vm_flags & VM_GROWSUP))
goto bad_area;
vma = expand_stack(mm, address);
if (!vma)
goto bad_area_nosemaphore;
}
/*
* Ok, we have a good vm_area for this memory access. We still need to
* check the access permissions.
*/
if ((vma->vm_flags & acc_type) != acc_type)
goto bad_area;
/*
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs)) {
msg = "Page fault: fault signal on kernel memory";
goto no_context;
}
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We hit a shared mapping outside of the file, or some
* other thing happened to us that made us unable to
* handle the page fault gracefully.
*/
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
goto bad_area;
BUG();
}
if (fault & VM_FAULT_RETRY) {
/*
* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
flags |= FAULT_FLAG_TRIED;
goto retry;
}
mmap_read_unlock(mm);
return;
/*
* Something tried to access memory that isn't in our memory map..
*/
bad_area:
mmap_read_unlock(mm);
bad_area_nosemaphore:
if (user_mode(regs)) {
int signo, si_code;
switch (code) {
case 15: /* Data TLB miss fault/Data page fault */
/* send SIGSEGV when outside of vma */
if (!vma ||
address < vma->vm_start || address >= vma->vm_end) {
signo = SIGSEGV;
si_code = SEGV_MAPERR;
break;
}
/* send SIGSEGV for wrong permissions */
if ((vma->vm_flags & acc_type) != acc_type) {
signo = SIGSEGV;
si_code = SEGV_ACCERR;
break;
}
/* probably address is outside of mapped file */
fallthrough;
case 17: /* NA data TLB miss / page fault */
case 18: /* Unaligned access - PCXS only */
signo = SIGBUS;
si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR;
break;
case 16: /* Non-access instruction TLB miss fault */
case 26: /* PCXL: Data memory access rights trap */
default:
signo = SIGSEGV;
si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR;
break;
}
#ifdef CONFIG_MEMORY_FAILURE
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
unsigned int lsb = 0;
printk(KERN_ERR
"MCE: Killing %s:%d due to hardware memory corruption fault at %08lx\n",
tsk->comm, tsk->pid, address);
/*
* Either small page or large page may be poisoned.
* In other words, VM_FAULT_HWPOISON_LARGE and
* VM_FAULT_HWPOISON are mutually exclusive.
*/
if (fault & VM_FAULT_HWPOISON_LARGE)
lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
else if (fault & VM_FAULT_HWPOISON)
lsb = PAGE_SHIFT;
force_sig_mceerr(BUS_MCEERR_AR, (void __user *) address,
lsb);
return;
}
#endif
show_signal_msg(regs, code, address, tsk, vma);
force_sig_fault(signo, si_code, (void __user *) address);
return;
}
msg = "Page fault: bad address";
no_context:
if (!user_mode(regs) && fixup_exception(regs)) {
return;
}
parisc_terminate(msg, regs, code, address);
out_of_memory:
mmap_read_unlock(mm);
if (!user_mode(regs)) {
msg = "Page fault: out of memory";
goto no_context;
}
pagefault_out_of_memory();
}
/* Handle non-access data TLB miss faults.
*
* For probe instructions, accesses to userspace are considered allowed
* if they lie in a valid VMA and the access type matches. We are not
* allowed to handle MM faults here so there may be situations where an
* actual access would fail even though a probe was successful.
*/
int
handle_nadtlb_fault(struct pt_regs *regs)
{
unsigned long insn = regs->iir;
int breg, treg, xreg, val = 0;
struct vm_area_struct *vma;
struct task_struct *tsk;
struct mm_struct *mm;
unsigned long address;
unsigned long acc_type;
switch (insn & 0x380) {
case 0x280:
/* FDC instruction */
fallthrough;
case 0x380:
/* PDC and FIC instructions */
if (DEBUG_NATLB && printk_ratelimit()) {
pr_warn("WARNING: nullifying cache flush/purge instruction\n");
show_regs(regs);
}
if (insn & 0x20) {
/* Base modification */
breg = (insn >> 21) & 0x1f;
xreg = (insn >> 16) & 0x1f;
if (breg && xreg)
regs->gr[breg] += regs->gr[xreg];
}
regs->gr[0] |= PSW_N;
return 1;
case 0x180:
/* PROBE instruction */
treg = insn & 0x1f;
if (regs->isr) {
tsk = current;
mm = tsk->mm;
if (mm) {
/* Search for VMA */
address = regs->ior;
mmap_read_lock(mm);
vma = vma_lookup(mm, address);
mmap_read_unlock(mm);
/*
* Check if access to the VMA is okay.
* We don't allow for stack expansion.
*/
acc_type = (insn & 0x40) ? VM_WRITE : VM_READ;
if (vma
&& (vma->vm_flags & acc_type) == acc_type)
val = 1;
}
}
if (treg)
regs->gr[treg] = val;
regs->gr[0] |= PSW_N;
return 1;
case 0x300:
/* LPA instruction */
if (insn & 0x20) {
/* Base modification */
breg = (insn >> 21) & 0x1f;
xreg = (insn >> 16) & 0x1f;
if (breg && xreg)
regs->gr[breg] += regs->gr[xreg];
}
treg = insn & 0x1f;
if (treg)
regs->gr[treg] = 0;
regs->gr[0] |= PSW_N;
return 1;
default:
break;
}
return 0;
}
| linux-master | arch/parisc/mm/fault.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Code for tracing calls in Linux kernel.
* Copyright (C) 2009-2016 Helge Deller <[email protected]>
*
* based on code for x86 which is:
* Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
*
* future possible enhancements:
* - add CONFIG_STACK_TRACER
*/
#include <linux/init.h>
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/jump_label.h>
#include <asm/assembly.h>
#include <asm/sections.h>
#include <asm/ftrace.h>
#include <asm/patch.h>
#define __hot __section(".text.hot")
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static DEFINE_STATIC_KEY_FALSE(ftrace_graph_enable);
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
static void __hot prepare_ftrace_return(unsigned long *parent,
unsigned long self_addr)
{
unsigned long old;
extern int parisc_return_to_handler;
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
old = *parent;
if (!function_graph_enter(old, self_addr, 0, NULL))
/* activate parisc_return_to_handler() as return point */
*parent = (unsigned long) &parisc_return_to_handler;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
static ftrace_func_t ftrace_func;
asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent,
unsigned long self_addr,
unsigned long org_sp_gr3,
struct ftrace_regs *fregs)
{
extern struct ftrace_ops *function_trace_op;
ftrace_func(self_addr, parent, function_trace_op, fregs);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (static_branch_unlikely(&ftrace_graph_enable)) {
unsigned long *parent_rp;
/* calculate pointer to %rp in stack */
parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
/* sanity check: parent_rp should hold parent */
if (*parent_rp != parent)
return;
prepare_ftrace_return(parent_rp, self_addr);
return;
}
#endif
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int ftrace_enable_ftrace_graph_caller(void)
{
static_key_enable(&ftrace_graph_enable.key);
return 0;
}
int ftrace_disable_ftrace_graph_caller(void)
{
static_key_enable(&ftrace_graph_enable.key);
return 0;
}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
int ftrace_update_ftrace_func(ftrace_func_t func)
{
ftrace_func = func;
return 0;
}
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
return 0;
}
unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
u32 *tramp;
int size, ret, i;
void *ip;
#ifdef CONFIG_64BIT
unsigned long addr2 =
(unsigned long)dereference_function_descriptor((void *)addr);
u32 ftrace_trampoline[] = {
0x73c10208, /* std,ma r1,100(sp) */
0x0c2110c1, /* ldd -10(r1),r1 */
0xe820d002, /* bve,n (r1) */
addr2 >> 32,
addr2 & 0xffffffff,
0xe83f1fd7, /* b,l,n .-14,r1 */
};
u32 ftrace_trampoline_unaligned[] = {
addr2 >> 32,
addr2 & 0xffffffff,
0x37de0200, /* ldo 100(sp),sp */
0x73c13e01, /* std r1,-100(sp) */
0x34213ff9, /* ldo -4(r1),r1 */
0x50213fc1, /* ldd -20(r1),r1 */
0xe820d002, /* bve,n (r1) */
0xe83f1fcf, /* b,l,n .-20,r1 */
};
BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) >
FTRACE_PATCHABLE_FUNCTION_SIZE);
#else
u32 ftrace_trampoline[] = {
(u32)addr,
0x6fc10080, /* stw,ma r1,40(sp) */
0x48213fd1, /* ldw -18(r1),r1 */
0xe820c002, /* bv,n r0(r1) */
0xe83f1fdf, /* b,l,n .-c,r1 */
};
#endif
BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) >
FTRACE_PATCHABLE_FUNCTION_SIZE);
size = sizeof(ftrace_trampoline);
tramp = ftrace_trampoline;
#ifdef CONFIG_64BIT
if (rec->ip & 0x4) {
size = sizeof(ftrace_trampoline_unaligned);
tramp = ftrace_trampoline_unaligned;
}
#endif
ip = (void *)(rec->ip + 4 - size);
ret = copy_from_kernel_nofault(insn, ip, size);
if (ret)
return ret;
for (i = 0; i < size / 4; i++) {
if (insn[i] != INSN_NOP)
return -EINVAL;
}
__patch_text_multiple(ip, tramp, size);
return 0;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
int i;
for (i = 0; i < ARRAY_SIZE(insn); i++)
insn[i] = INSN_NOP;
__patch_text((void *)rec->ip, INSN_NOP);
__patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
insn, sizeof(insn)-4);
return 0;
}
#endif
#ifdef CONFIG_KPROBES_ON_FTRACE
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct kprobe_ctlblk *kcb;
struct pt_regs *regs;
struct kprobe *p;
int bit;
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
regs = ftrace_get_regs(fregs);
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
goto out;
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
goto out;
}
__this_cpu_write(current_kprobe, p);
kcb = get_kprobe_ctlblk();
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
regs->iaoq[0] = ip;
regs->iaoq[1] = ip + 4;
if (!p->pre_handler || !p->pre_handler(p, regs)) {
regs->iaoq[0] = ip + 4;
regs->iaoq[1] = ip + 8;
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
}
__this_cpu_write(current_kprobe, NULL);
out:
ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
return 0;
}
#endif
| linux-master | arch/parisc/kernel/ftrace.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers.c
*
* Copyright (c) 1999 The Puffin Group
* Copyright (c) 2001 Matthew Wilcox for Hewlett Packard
* Copyright (c) 2001-2023 Helge Deller <[email protected]>
* Copyright (c) 2001,2002 Ryan Bradetich
* Copyright (c) 2004-2005 Thibaut VARENE <[email protected]>
*
* The file handles registering devices and drivers, then matching them.
* It's the closest we get to a dating agency.
*
* If you're thinking about modifying this file, here are some gotchas to
* bear in mind:
* - 715/Mirage device paths have a dummy device between Lasi and its children
* - The EISA adapter may show up as a sibling or child of Wax
* - Dino has an optionally functional serial port. If firmware enables it,
* it shows up as a child of Dino. If firmware disables it, the buswalk
* finds it and it shows up as a child of Cujo
* - Dino has both parisc and pci devices as children
* - parisc devices are discovered in a random order, including children
* before parents in some cases.
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/dma-map-ops.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/pdc.h>
#include <asm/parisc-device.h>
#include <asm/ropes.h>
/* See comments in include/asm-parisc/pci.h */
const struct dma_map_ops *hppa_dma_ops __ro_after_init;
EXPORT_SYMBOL(hppa_dma_ops);
static struct device root = {
.init_name = "parisc",
};
static inline int check_dev(struct device *dev)
{
if (dev->bus == &parisc_bus_type) {
struct parisc_device *pdev;
pdev = to_parisc_device(dev);
return pdev->id.hw_type != HPHW_FAULTY;
}
return 1;
}
static struct device *
parse_tree_node(struct device *parent, int index, struct hardware_path *modpath);
struct recurse_struct {
void * obj;
int (*fn)(struct device *, void *);
};
static int descend_children(struct device * dev, void * data)
{
struct recurse_struct * recurse_data = (struct recurse_struct *)data;
if (recurse_data->fn(dev, recurse_data->obj))
return 1;
else
return device_for_each_child(dev, recurse_data, descend_children);
}
/**
* for_each_padev - Iterate over all devices in the tree
* @fn: Function to call for each device.
* @data: Data to pass to the called function.
*
* This performs a depth-first traversal of the tree, calling the
* function passed for each node. It calls the function for parents
* before children.
*/
static int for_each_padev(int (*fn)(struct device *, void *), void * data)
{
struct recurse_struct recurse_data = {
.obj = data,
.fn = fn,
};
return device_for_each_child(&root, &recurse_data, descend_children);
}
/**
* match_device - Report whether this driver can handle this device
* @driver: the PA-RISC driver to try
* @dev: the PA-RISC device to try
*/
static int match_device(struct parisc_driver *driver, struct parisc_device *dev)
{
const struct parisc_device_id *ids;
for (ids = driver->id_table; ids->sversion; ids++) {
if ((ids->sversion != SVERSION_ANY_ID) &&
(ids->sversion != dev->id.sversion))
continue;
if ((ids->hw_type != HWTYPE_ANY_ID) &&
(ids->hw_type != dev->id.hw_type))
continue;
if ((ids->hversion != HVERSION_ANY_ID) &&
(ids->hversion != dev->id.hversion))
continue;
return 1;
}
return 0;
}
static int parisc_driver_probe(struct device *dev)
{
int rc;
struct parisc_device *pa_dev = to_parisc_device(dev);
struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
rc = pa_drv->probe(pa_dev);
if (!rc)
pa_dev->driver = pa_drv;
return rc;
}
static void __exit parisc_driver_remove(struct device *dev)
{
struct parisc_device *pa_dev = to_parisc_device(dev);
struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
if (pa_drv->remove)
pa_drv->remove(pa_dev);
}
/**
* register_parisc_driver - Register this driver if it can handle a device
* @driver: the PA-RISC driver to try
*/
int register_parisc_driver(struct parisc_driver *driver)
{
/* FIXME: we need this because apparently the sti
* driver can be registered twice */
if (driver->drv.name) {
pr_warn("BUG: skipping previously registered driver %s\n",
driver->name);
return 1;
}
if (!driver->probe) {
pr_warn("BUG: driver %s has no probe routine\n", driver->name);
return 1;
}
driver->drv.bus = &parisc_bus_type;
/* We install our own probe and remove routines */
WARN_ON(driver->drv.probe != NULL);
WARN_ON(driver->drv.remove != NULL);
driver->drv.name = driver->name;
return driver_register(&driver->drv);
}
EXPORT_SYMBOL(register_parisc_driver);
struct match_count {
struct parisc_driver * driver;
int count;
};
static int match_and_count(struct device * dev, void * data)
{
struct match_count * m = data;
struct parisc_device * pdev = to_parisc_device(dev);
if (check_dev(dev)) {
if (match_device(m->driver, pdev))
m->count++;
}
return 0;
}
/**
* count_parisc_driver - count # of devices this driver would match
* @driver: the PA-RISC driver to try
*
* Use by IOMMU support to "guess" the right size IOPdir.
* Formula is something like memsize/(num_iommu * entry_size).
*/
int __init count_parisc_driver(struct parisc_driver *driver)
{
struct match_count m = {
.driver = driver,
.count = 0,
};
for_each_padev(match_and_count, &m);
return m.count;
}
/**
* unregister_parisc_driver - Unregister this driver from the list of drivers
* @driver: the PA-RISC driver to unregister
*/
int unregister_parisc_driver(struct parisc_driver *driver)
{
driver_unregister(&driver->drv);
return 0;
}
EXPORT_SYMBOL(unregister_parisc_driver);
struct find_data {
unsigned long hpa;
struct parisc_device * dev;
};
static int find_device(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);
struct find_data * d = (struct find_data*)data;
if (check_dev(dev)) {
if (pdev->hpa.start == d->hpa) {
d->dev = pdev;
return 1;
}
}
return 0;
}
static struct parisc_device *find_device_by_addr(unsigned long hpa)
{
struct find_data d = {
.hpa = hpa,
};
int ret;
ret = for_each_padev(find_device, &d);
return ret ? d.dev : NULL;
}
static int __init is_IKE_device(struct device *dev, void *data)
{
struct parisc_device *pdev = to_parisc_device(dev);
if (!check_dev(dev))
return 0;
if (pdev->id.hw_type != HPHW_BCPORT)
return 0;
if (IS_IKE(pdev) ||
(pdev->id.hversion == REO_MERCED_PORT) ||
(pdev->id.hversion == REOG_MERCED_PORT)) {
return 1;
}
return 0;
}
int __init machine_has_merced_bus(void)
{
int ret;
ret = for_each_padev(is_IKE_device, NULL);
return ret ? 1 : 0;
}
/**
* find_pa_parent_type - Find a parent of a specific type
* @padev: The device to start searching from
* @type: The device type to search for.
*
* Walks up the device tree looking for a device of the specified type.
* If it finds it, it returns it. If not, it returns NULL.
*/
const struct parisc_device *
find_pa_parent_type(const struct parisc_device *padev, int type)
{
const struct device *dev = &padev->dev;
while (dev != &root) {
struct parisc_device *candidate = to_parisc_device(dev);
if (candidate->id.hw_type == type)
return candidate;
dev = dev->parent;
}
return NULL;
}
/*
* get_node_path fills in @path with the firmware path to the device.
* Note that if @node is a parisc device, we don't fill in the 'mod' field.
* This is because both callers pass the parent and fill in the mod
* themselves. If @node is a PCI device, we do fill it in, even though this
* is inconsistent.
*/
static void get_node_path(struct device *dev, struct hardware_path *path)
{
int i = 5;
memset(&path->bc, -1, 6);
if (dev_is_pci(dev)) {
unsigned int devfn = to_pci_dev(dev)->devfn;
path->mod = PCI_FUNC(devfn);
path->bc[i--] = PCI_SLOT(devfn);
dev = dev->parent;
}
while (dev != &root) {
if (dev_is_pci(dev)) {
unsigned int devfn = to_pci_dev(dev)->devfn;
path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5);
} else if (dev->bus == &parisc_bus_type) {
path->bc[i--] = to_parisc_device(dev)->hw_path;
}
dev = dev->parent;
}
}
static char *print_hwpath(struct hardware_path *path, char *output)
{
int i;
for (i = 0; i < 6; i++) {
if (path->bc[i] == -1)
continue;
output += sprintf(output, "%u/", (unsigned char) path->bc[i]);
}
output += sprintf(output, "%u", (unsigned char) path->mod);
return output;
}
/**
* print_pa_hwpath - Returns hardware path for PA devices
* @dev: The device to return the path for
* @output: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the output array with a human-readable path
* to a PA device. This string is compatible with that used by PDC, and
* may be printed on the outside of the box.
*/
char *print_pa_hwpath(struct parisc_device *dev, char *output)
{
struct hardware_path path;
get_node_path(dev->dev.parent, &path);
path.mod = dev->hw_path;
return print_hwpath(&path, output);
}
EXPORT_SYMBOL(print_pa_hwpath);
#if defined(CONFIG_PCI) || defined(CONFIG_ISA)
/**
* get_pci_node_path - Determines the hardware path for a PCI device
* @pdev: The device to return the path for
* @path: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the hardware_path structure with the route to
* the specified PCI device. This structure is suitable for passing to
* PDC calls.
*/
void get_pci_node_path(struct pci_dev *pdev, struct hardware_path *path)
{
get_node_path(&pdev->dev, path);
}
EXPORT_SYMBOL(get_pci_node_path);
/**
* print_pci_hwpath - Returns hardware path for PCI devices
* @dev: The device to return the path for
* @output: Pointer to a previously-allocated array to place the path in.
*
* This function fills in the output array with a human-readable path
* to a PCI device. This string is compatible with that used by PDC, and
* may be printed on the outside of the box.
*/
char *print_pci_hwpath(struct pci_dev *dev, char *output)
{
struct hardware_path path;
get_pci_node_path(dev, &path);
return print_hwpath(&path, output);
}
EXPORT_SYMBOL(print_pci_hwpath);
#endif /* defined(CONFIG_PCI) || defined(CONFIG_ISA) */
static void setup_bus_id(struct parisc_device *padev)
{
struct hardware_path path;
char name[28];
char *output = name;
int i;
get_node_path(padev->dev.parent, &path);
for (i = 0; i < 6; i++) {
if (path.bc[i] == -1)
continue;
output += sprintf(output, "%u:", (unsigned char) path.bc[i]);
}
sprintf(output, "%u", (unsigned char) padev->hw_path);
dev_set_name(&padev->dev, name);
}
static struct parisc_device * __init create_tree_node(char id,
struct device *parent)
{
struct parisc_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
dev->hw_path = id;
dev->id.hw_type = HPHW_FAULTY;
dev->dev.parent = parent;
setup_bus_id(dev);
dev->dev.bus = &parisc_bus_type;
dev->dma_mask = 0xffffffffUL; /* PARISC devices are 32-bit */
/* make the generic dma mask a pointer to the parisc one */
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.coherent_dma_mask = dev->dma_mask;
if (device_register(&dev->dev)) {
kfree(dev);
return NULL;
}
return dev;
}
struct match_id_data {
char id;
struct parisc_device * dev;
};
static int match_by_id(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);
struct match_id_data * d = data;
if (pdev->hw_path == d->id) {
d->dev = pdev;
return 1;
}
return 0;
}
/**
* alloc_tree_node - returns a device entry in the iotree
* @parent: the parent node in the tree
* @id: the element of the module path for this entry
*
* Checks all the children of @parent for a matching @id. If none
* found, it allocates a new device and returns it.
*/
static struct parisc_device * __init alloc_tree_node(
struct device *parent, char id)
{
struct match_id_data d = {
.id = id,
};
if (device_for_each_child(parent, &d, match_by_id))
return d.dev;
else
return create_tree_node(id, parent);
}
static struct parisc_device *create_parisc_device(struct hardware_path *modpath)
{
int i;
struct device *parent = &root;
for (i = 0; i < 6; i++) {
if (modpath->bc[i] == -1)
continue;
parent = &alloc_tree_node(parent, modpath->bc[i])->dev;
}
return alloc_tree_node(parent, modpath->mod);
}
struct parisc_device * __init
alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
{
int status;
unsigned long bytecnt;
u8 iodc_data[32];
struct parisc_device *dev;
const char *name;
/* Check to make sure this device has not already been added - Ryan */
if (find_device_by_addr(hpa) != NULL)
return NULL;
status = pdc_iodc_read(&bytecnt, hpa, 0, &iodc_data, 32);
if (status != PDC_OK)
return NULL;
dev = create_parisc_device(mod_path);
if (dev->id.hw_type != HPHW_FAULTY) {
pr_err("Two devices have hardware path [%s]. IODC data for second device: %7phN\n"
"Rearranging GSC cards sometimes helps\n",
parisc_pathname(dev), iodc_data);
return NULL;
}
dev->id.hw_type = iodc_data[3] & 0x1f;
dev->id.hversion = (iodc_data[0] << 4) | ((iodc_data[1] & 0xf0) >> 4);
dev->id.hversion_rev = iodc_data[1] & 0x0f;
dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) |
(iodc_data[5] << 8) | iodc_data[6];
dev->hpa.start = hpa;
/* This is awkward. The STI spec says that gfx devices may occupy
* 32MB or 64MB. Unfortunately, we don't know how to tell whether
* it's the former or the latter. Assumptions either way can hurt us.
*/
if (hpa == 0xf4000000 || hpa == 0xf8000000) {
dev->hpa.end = hpa + 0x03ffffff;
} else if (hpa == 0xf6000000 || hpa == 0xfa000000) {
dev->hpa.end = hpa + 0x01ffffff;
} else {
dev->hpa.end = hpa + 0xfff;
}
dev->hpa.flags = IORESOURCE_MEM;
dev->hpa.name = dev->name;
name = parisc_hardware_description(&dev->id) ? : "unknown";
snprintf(dev->name, sizeof(dev->name), "%s [%s]",
name, parisc_pathname(dev));
/* Silently fail things like mouse ports which are subsumed within
* the keyboard controller
*/
if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa))
pr_warn("Unable to claim HPA %lx for device %s\n", hpa, name);
return dev;
}
static int parisc_generic_match(struct device *dev, struct device_driver *drv)
{
return match_device(to_parisc_driver(drv), to_parisc_device(dev));
}
static ssize_t make_modalias(const struct device *dev, char *buf)
{
const struct parisc_device *padev = to_parisc_device(dev);
const struct parisc_device_id *id = &padev->id;
return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
(u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
(u32)id->sversion);
}
static int parisc_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct parisc_device *padev;
char modalias[40];
if (!dev)
return -ENODEV;
padev = to_parisc_device(dev);
if (!padev)
return -ENODEV;
if (add_uevent_var(env, "PARISC_NAME=%s", padev->name))
return -ENOMEM;
make_modalias(dev, modalias);
if (add_uevent_var(env, "MODALIAS=%s", modalias))
return -ENOMEM;
return 0;
}
#define pa_dev_attr(name, field, format_string) \
static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct parisc_device *padev = to_parisc_device(dev); \
return sprintf(buf, format_string, padev->field); \
} \
static DEVICE_ATTR_RO(name);
#define pa_dev_attr_id(field, format) pa_dev_attr(field, id.field, format)
pa_dev_attr(irq, irq, "%u\n");
pa_dev_attr_id(hw_type, "0x%02x\n");
pa_dev_attr(rev, id.hversion_rev, "0x%x\n");
pa_dev_attr_id(hversion, "0x%03x\n");
pa_dev_attr_id(sversion, "0x%05x\n");
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return make_modalias(dev, buf);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *parisc_device_attrs[] = {
&dev_attr_irq.attr,
&dev_attr_hw_type.attr,
&dev_attr_rev.attr,
&dev_attr_hversion.attr,
&dev_attr_sversion.attr,
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(parisc_device);
struct bus_type parisc_bus_type = {
.name = "parisc",
.match = parisc_generic_match,
.uevent = parisc_uevent,
.dev_groups = parisc_device_groups,
.probe = parisc_driver_probe,
.remove = __exit_p(parisc_driver_remove),
};
/**
* register_parisc_device - Locate a driver to manage this device.
* @dev: The parisc device.
*
* Search the driver list for a driver that is willing to manage
* this device.
*/
int __init register_parisc_device(struct parisc_device *dev)
{
if (!dev)
return 0;
if (dev->driver)
return 1;
return 0;
}
/**
* match_pci_device - Matches a pci device against a given hardware path
* entry.
* @dev: the generic device (known to be contained by a pci_dev).
* @index: the current BC index
* @modpath: the hardware path.
* @return: true if the device matches the hardware path.
*/
static int match_pci_device(struct device *dev, int index,
struct hardware_path *modpath)
{
struct pci_dev *pdev = to_pci_dev(dev);
int id;
if (index == 5) {
/* we are at the end of the path, and on the actual device */
unsigned int devfn = pdev->devfn;
return ((modpath->bc[5] == PCI_SLOT(devfn)) &&
(modpath->mod == PCI_FUNC(devfn)));
}
/* index might be out of bounds for bc[] */
if (index >= 6)
return 0;
id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
return (modpath->bc[index] == id);
}
/**
* match_parisc_device - Matches a parisc device against a given hardware
* path entry.
* @dev: the generic device (known to be contained by a parisc_device).
* @index: the current BC index
* @modpath: the hardware path.
* @return: true if the device matches the hardware path.
*/
static int match_parisc_device(struct device *dev, int index,
struct hardware_path *modpath)
{
struct parisc_device *curr = to_parisc_device(dev);
char id = (index == 6) ? modpath->mod : modpath->bc[index];
return (curr->hw_path == id);
}
struct parse_tree_data {
int index;
struct hardware_path * modpath;
struct device * dev;
};
static int check_parent(struct device * dev, void * data)
{
struct parse_tree_data * d = data;
if (check_dev(dev)) {
if (dev->bus == &parisc_bus_type) {
if (match_parisc_device(dev, d->index, d->modpath))
d->dev = dev;
} else if (dev_is_pci(dev)) {
if (match_pci_device(dev, d->index, d->modpath))
d->dev = dev;
} else if (dev->bus == NULL) {
/* we are on a bus bridge */
struct device *new = parse_tree_node(dev, d->index, d->modpath);
if (new)
d->dev = new;
}
}
return d->dev != NULL;
}
/**
* parse_tree_node - returns a device entry in the iotree
* @parent: the parent node in the tree
* @index: the current BC index
* @modpath: the hardware_path struct to match a device against
* @return: The corresponding device if found, NULL otherwise.
*
* Checks all the children of @parent for a matching @id. If none
* found, it returns NULL.
*/
static struct device *
parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
{
struct parse_tree_data d = {
.index = index,
.modpath = modpath,
};
struct recurse_struct recurse_data = {
.obj = &d,
.fn = check_parent,
};
if (device_for_each_child(parent, &recurse_data, descend_children))
{ /* nothing */ };
return d.dev;
}
/**
* hwpath_to_device - Finds the generic device corresponding to a given hardware path.
* @modpath: the hardware path.
* @return: The target device, NULL if not found.
*/
struct device *hwpath_to_device(struct hardware_path *modpath)
{
int i;
struct device *parent = &root;
for (i = 0; i < 6; i++) {
if (modpath->bc[i] == -1)
continue;
parent = parse_tree_node(parent, i, modpath);
if (!parent)
return NULL;
}
if (dev_is_pci(parent)) /* pci devices already parse MOD */
return parent;
else
return parse_tree_node(parent, 6, modpath);
}
EXPORT_SYMBOL(hwpath_to_device);
/**
* device_to_hwpath - Populates the hwpath corresponding to the given device.
* @dev: the target device
* @path: pointer to a previously allocated hwpath struct to be filled in
*/
void device_to_hwpath(struct device *dev, struct hardware_path *path)
{
struct parisc_device *padev;
if (dev->bus == &parisc_bus_type) {
padev = to_parisc_device(dev);
get_node_path(dev->parent, path);
path->mod = padev->hw_path;
} else if (dev_is_pci(dev)) {
get_node_path(dev, path);
}
}
EXPORT_SYMBOL(device_to_hwpath);
#define BC_PORT_MASK 0x8
#define BC_LOWER_PORT 0x8
#define BUS_CONVERTER(dev) \
((dev->id.hw_type == HPHW_IOA) || (dev->id.hw_type == HPHW_BCPORT))
#define IS_LOWER_PORT(dev) \
((gsc_readl(dev->hpa.start + offsetof(struct bc_module, io_status)) \
& BC_PORT_MASK) == BC_LOWER_PORT)
#define MAX_NATIVE_DEVICES 64
#define NATIVE_DEVICE_OFFSET 0x1000
#define FLEX_MASK F_EXTEND(0xfffc0000)
#define IO_IO_LOW offsetof(struct bc_module, io_io_low)
#define IO_IO_HIGH offsetof(struct bc_module, io_io_high)
#define READ_IO_IO_LOW(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_LOW)
#define READ_IO_IO_HIGH(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_HIGH)
static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
struct device *parent);
static void __init walk_lower_bus(struct parisc_device *dev)
{
unsigned long io_io_low, io_io_high;
if (!BUS_CONVERTER(dev) || IS_LOWER_PORT(dev))
return;
if (dev->id.hw_type == HPHW_IOA) {
io_io_low = (unsigned long)(signed int)(READ_IO_IO_LOW(dev) << 16);
io_io_high = io_io_low + MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET;
} else {
io_io_low = (READ_IO_IO_LOW(dev) + ~FLEX_MASK) & FLEX_MASK;
io_io_high = (READ_IO_IO_HIGH(dev)+ ~FLEX_MASK) & FLEX_MASK;
}
walk_native_bus(io_io_low, io_io_high, &dev->dev);
}
/**
* walk_native_bus -- Probe a bus for devices
* @io_io_low: Base address of this bus.
* @io_io_high: Last address of this bus.
* @parent: The parent bus device.
*
* A native bus (eg Runway or GSC) may have up to 64 devices on it,
* spaced at intervals of 0x1000 bytes. PDC may not inform us of these
* devices, so we have to probe for them. Unfortunately, we may find
* devices which are not physically connected (such as extra serial &
* keyboard ports). This problem is not yet solved.
*/
static void __init walk_native_bus(unsigned long io_io_low,
unsigned long io_io_high, struct device *parent)
{
int i, devices_found = 0;
unsigned long hpa = io_io_low;
struct hardware_path path;
get_node_path(parent, &path);
do {
for(i = 0; i < MAX_NATIVE_DEVICES; i++, hpa += NATIVE_DEVICE_OFFSET) {
struct parisc_device *dev;
/* Was the device already added by Firmware? */
dev = find_device_by_addr(hpa);
if (!dev) {
path.mod = i;
dev = alloc_pa_dev(hpa, &path);
if (!dev)
continue;
register_parisc_device(dev);
devices_found++;
}
walk_lower_bus(dev);
}
} while(!devices_found && hpa < io_io_high);
}
#define CENTRAL_BUS_ADDR F_EXTEND(0xfff80000)
/**
* walk_central_bus - Find devices attached to the central bus
*
* PDC doesn't tell us about all devices in the system. This routine
* finds devices connected to the central bus.
*/
void __init walk_central_bus(void)
{
walk_native_bus(CENTRAL_BUS_ADDR,
CENTRAL_BUS_ADDR + (MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET),
&root);
}
static __init void print_parisc_device(struct parisc_device *dev)
{
static int count __initdata;
pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }",
++count, dev->name, &(dev->hpa.start), dev->id.hw_type,
dev->id.hversion, dev->id.sversion, dev->id.hversion_rev);
if (dev->num_addrs) {
int k;
pr_cont(", additional addresses: ");
for (k = 0; k < dev->num_addrs; k++)
pr_cont("0x%lx ", dev->addr[k]);
}
pr_cont("\n");
}
/**
* init_parisc_bus - Some preparation to be done before inventory
*/
void __init init_parisc_bus(void)
{
if (bus_register(&parisc_bus_type))
panic("Could not register PA-RISC bus type\n");
if (device_register(&root))
panic("Could not register PA-RISC root device\n");
get_device(&root);
}
static __init void qemu_header(void)
{
int num;
unsigned long *p;
pr_info("--- cut here ---\n");
pr_info("/* AUTO-GENERATED HEADER FILE FOR SEABIOS FIRMWARE */\n");
pr_cont("/* generated with Linux kernel */\n");
pr_cont("/* search for PARISC_QEMU_MACHINE_HEADER in Linux */\n\n");
pr_info("#define PARISC_MODEL \"%s\"\n\n",
boot_cpu_data.pdc.sys_model_name);
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
#undef p
pr_info("#define PARISC_PDC_VERSION 0x%04lx\n\n",
boot_cpu_data.pdc.versions);
pr_info("#define PARISC_PDC_CPUID 0x%04lx\n\n",
boot_cpu_data.pdc.cpuid);
pr_info("#define PARISC_PDC_CAPABILITIES 0x%04lx\n\n",
boot_cpu_data.pdc.capabilities);
pr_info("#define PARISC_PDC_ENTRY_ORG 0x%04lx\n\n",
#ifdef CONFIG_64BIT
(unsigned long)(PAGE0->mem_pdc_hi) << 32 |
#endif
(unsigned long)PAGE0->mem_pdc);
pr_info("#define PARISC_PDC_CACHE_INFO");
p = (unsigned long *) &cache_info;
for (num = 0; num < sizeof(cache_info); num += sizeof(unsigned long)) {
if (((num % 5) == 0)) {
pr_cont(" \\\n");
pr_info("\t");
}
pr_cont("%s0x%04lx",
num?", ":"", *p++);
}
pr_cont("\n\n");
}
static __init int qemu_print_hpa(struct device *lin_dev, void *data)
{
struct parisc_device *dev = to_parisc_device(lin_dev);
unsigned long hpa = dev->hpa.start;
pr_cont("\t{\t.hpa = 0x%08lx,\\\n", hpa);
pr_cont("\t\t.iodc = &iodc_data_hpa_%08lx,\\\n", hpa);
pr_cont("\t\t.mod_info = &mod_info_hpa_%08lx,\\\n", hpa);
pr_cont("\t\t.mod_path = &mod_path_hpa_%08lx,\\\n", hpa);
pr_cont("\t\t.num_addr = HPA_%08lx_num_addr,\\\n", hpa);
pr_cont("\t\t.add_addr = { HPA_%08lx_add_addr } },\\\n", hpa);
return 0;
}
static __init void qemu_footer(void)
{
pr_info("\n\n#define PARISC_DEVICE_LIST \\\n");
for_each_padev(qemu_print_hpa, NULL);
pr_cont("\t{ 0, }\n");
pr_info("--- cut here ---\n");
}
/* print iodc data of the various hpa modules for qemu inclusion */
static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
{
struct parisc_device *dev = to_parisc_device(lin_dev);
unsigned long count;
unsigned long hpa = dev->hpa.start;
int status;
struct pdc_iodc iodc_data;
int mod_index;
struct pdc_system_map_mod_info pdc_mod_info;
struct pdc_module_path mod_path;
status = pdc_iodc_read(&count, hpa, 0,
&iodc_data, sizeof(iodc_data));
if (status != PDC_OK) {
pr_info("No IODC data for hpa 0x%08lx\n", hpa);
return 0;
}
pr_info("\n");
pr_info("#define HPA_%08lx_DESCRIPTION \"%s\"\n",
hpa, parisc_hardware_description(&dev->id));
mod_index = 0;
do {
status = pdc_system_map_find_mods(&pdc_mod_info,
&mod_path, mod_index++);
} while (status == PDC_OK && pdc_mod_info.mod_addr != hpa);
pr_info("static struct pdc_system_map_mod_info"
" mod_info_hpa_%08lx = {\n", hpa);
#define DO(member) \
pr_cont("\t." #member " = 0x%x,\n", \
(unsigned int)pdc_mod_info.member)
DO(mod_addr);
DO(mod_pgs);
DO(add_addrs);
pr_cont("};\n");
#undef DO
pr_info("static struct pdc_module_path "
"mod_path_hpa_%08lx = {\n", hpa);
pr_cont("\t.path = { ");
pr_cont(".flags = 0x%x, ", mod_path.path.flags);
pr_cont(".bc = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }, ",
(unsigned char)mod_path.path.bc[0],
(unsigned char)mod_path.path.bc[1],
(unsigned char)mod_path.path.bc[2],
(unsigned char)mod_path.path.bc[3],
(unsigned char)mod_path.path.bc[4],
(unsigned char)mod_path.path.bc[5]);
pr_cont(".mod = 0x%x ", mod_path.path.mod);
pr_cont(" },\n");
pr_cont("\t.layers = { 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x }\n",
mod_path.layers[0], mod_path.layers[1], mod_path.layers[2],
mod_path.layers[3], mod_path.layers[4], mod_path.layers[5]);
pr_cont("};\n");
pr_info("static struct pdc_iodc iodc_data_hpa_%08lx = {\n", hpa);
#define DO(member) \
pr_cont("\t." #member " = 0x%04lx,\n", \
(unsigned long)iodc_data.member)
DO(hversion_model);
DO(hversion);
DO(spa);
DO(type);
DO(sversion_rev);
DO(sversion_model);
DO(sversion_opt);
DO(rev);
DO(dep);
DO(features);
DO(checksum);
DO(length);
#undef DO
pr_cont("\t/* pad: 0x%04x, 0x%04x */\n",
iodc_data.pad[0], iodc_data.pad[1]);
pr_cont("};\n");
pr_info("#define HPA_%08lx_num_addr %d\n", hpa, dev->num_addrs);
pr_info("#define HPA_%08lx_add_addr ", hpa);
count = 0;
if (dev->num_addrs == 0)
pr_cont("0");
while (count < dev->num_addrs) {
pr_cont("0x%08lx, ", dev->addr[count]);
count++;
}
pr_cont("\n\n");
return 0;
}
static __init int print_one_device(struct device * dev, void * data)
{
struct parisc_device * pdev = to_parisc_device(dev);
if (check_dev(dev))
print_parisc_device(pdev);
return 0;
}
/**
* print_parisc_devices - Print out a list of devices found in this system
*/
void __init print_parisc_devices(void)
{
for_each_padev(print_one_device, NULL);
#define PARISC_QEMU_MACHINE_HEADER 0
if (PARISC_QEMU_MACHINE_HEADER) {
qemu_header();
for_each_padev(qemu_print_iodc_data, NULL);
qemu_footer();
}
}
| linux-master | arch/parisc/kernel/drivers.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* inventory.c
*
* Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
* Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
*
* These are the routines to discover what hardware exists in this box.
* This task is complicated by there being 3 different ways of
* performing an inventory, depending largely on the age of the box.
* The recommended way to do this is to check to see whether the machine
* is a `Snake' first, then try System Map, then try PAT. We try System
* Map before checking for a Snake -- this probably doesn't cause any
* problems, but...
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/platform_device.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/mmzone.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/parisc-device.h>
#include <asm/tlbflush.h>
/*
** Debug options
** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
*/
#undef DEBUG_PAT
int pdc_type __ro_after_init = PDC_TYPE_ILLEGAL;
/* cell number and location (PAT firmware only) */
unsigned long parisc_cell_num __ro_after_init;
unsigned long parisc_cell_loc __ro_after_init;
unsigned long parisc_pat_pdc_cap __ro_after_init;
void __init setup_pdc(void)
{
long status;
unsigned int bus_id;
struct pdc_system_map_mod_info module_result;
struct pdc_module_path module_path;
struct pdc_model model;
#ifdef CONFIG_64BIT
struct pdc_pat_cell_num cell_info;
#endif
/* Determine the pdc "type" used on this machine */
printk(KERN_INFO "Determining PDC firmware type: ");
status = pdc_system_map_find_mods(&module_result, &module_path, 0);
if (status == PDC_OK) {
pdc_type = PDC_TYPE_SYSTEM_MAP;
pr_cont("System Map.\n");
return;
}
/*
* If the machine doesn't support PDC_SYSTEM_MAP then either it
* is a pdc pat box, or it is an older box. All 64 bit capable
* machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
*/
/*
* TODO: We should test for 64 bit capability and give a
* clearer message.
*/
#ifdef CONFIG_64BIT
status = pdc_pat_cell_get_number(&cell_info);
if (status == PDC_OK) {
unsigned long legacy_rev, pat_rev;
pdc_type = PDC_TYPE_PAT;
pr_cont("64 bit PAT.\n");
parisc_cell_num = cell_info.cell_num;
parisc_cell_loc = cell_info.cell_loc;
pr_info("PAT: Running on cell %lu and location %lu.\n",
parisc_cell_num, parisc_cell_loc);
status = pdc_pat_pd_get_pdc_revisions(&legacy_rev,
&pat_rev, &parisc_pat_pdc_cap);
pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n",
legacy_rev, pat_rev, parisc_pat_pdc_cap,
parisc_pat_pdc_cap
& PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB ? 1:0,
parisc_pat_pdc_cap
& PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ ? 1:0);
return;
}
#endif
/* Check the CPU's bus ID. There's probably a better test. */
status = pdc_model_info(&model);
bus_id = (model.hversion >> (4 + 7)) & 0x1f;
switch (bus_id) {
case 0x4: /* 720, 730, 750, 735, 755 */
case 0x6: /* 705, 710 */
case 0x7: /* 715, 725 */
case 0x8: /* 745, 747, 742 */
case 0xA: /* 712 and similar */
case 0xC: /* 715/64, at least */
pdc_type = PDC_TYPE_SNAKE;
pr_cont("Snake.\n");
return;
default: /* Everything else */
pr_cont("Unsupported.\n");
panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
}
}
#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
static void __init
set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
unsigned long pages4k)
{
/* Rather than aligning and potentially throwing away
* memory, we'll assume that any ranges are already
* nicely aligned with any reasonable page size, and
* panic if they are not (it's more likely that the
* pdc info is bad in this case).
*/
if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
|| ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
panic("Memory range doesn't align with page size!\n");
}
pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
}
static void __init pagezero_memconfig(void)
{
unsigned long npages;
/* Use the 32 bit information from page zero to create a single
* entry in the pmem_ranges[] table.
*
* We currently don't support machines with contiguous memory
* >= 4 Gb, who report that memory using 64 bit only fields
* on page zero. It's not worth doing until it can be tested,
* and it is not clear we can support those machines for other
* reasons.
*
* If that support is done in the future, this is where it
* should be done.
*/
npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
set_pmem_entry(pmem_ranges,0UL,npages);
npmem_ranges = 1;
}
#ifdef CONFIG_64BIT
/* All of the PDC PAT specific code is 64-bit only */
/*
** The module object is filled via PDC_PAT_CELL[Return Cell Module].
** If a module is found, register module will get the IODC bytes via
** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
**
** The IO view can be used by PDC_PAT_CELL[Return Cell Module]
** only for SBAs and LBAs. This view will cause an invalid
** argument error for all other cell module types.
**
*/
static int __init
pat_query_module(ulong pcell_loc, ulong mod_index)
{
pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
unsigned long bytecnt;
unsigned long temp; /* 64-bit scratch value */
long status; /* PDC return value status */
struct parisc_device *dev;
pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
if (!pa_pdc_cell)
panic("couldn't allocate memory for PDC_PAT_CELL!");
/* return cell module (PA or Processor view) */
status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
PA_VIEW, pa_pdc_cell);
if (status != PDC_OK) {
/* no more cell modules or error */
kfree(pa_pdc_cell);
return status;
}
temp = pa_pdc_cell->cba;
dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
if (!dev) {
kfree(pa_pdc_cell);
return PDC_OK;
}
/* alloc_pa_dev sets dev->hpa */
/*
** save parameters in the parisc_device
** (The idea being the device driver will call pdc_pat_cell_module()
** and store the results in its own data structure.)
*/
dev->pcell_loc = pcell_loc;
dev->mod_index = mod_index;
/* save generic info returned from the call */
/* REVISIT: who is the consumer of this? not sure yet... */
dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
dev->pmod_loc = pa_pdc_cell->mod_location;
dev->mod0 = pa_pdc_cell->mod[0];
register_parisc_device(dev); /* advertise device */
#ifdef DEBUG_PAT
/* dump what we see so far... */
switch (PAT_GET_ENTITY(dev->mod_info)) {
pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
unsigned long i;
case PAT_ENTITY_PROC:
printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
pa_pdc_cell->mod[0]);
break;
case PAT_ENTITY_MEM:
printk(KERN_DEBUG
"PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
pa_pdc_cell->mod[2]);
break;
case PAT_ENTITY_CA:
printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
break;
case PAT_ENTITY_PBC:
printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
goto print_ranges;
case PAT_ENTITY_SBA:
printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
goto print_ranges;
case PAT_ENTITY_LBA:
printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
print_ranges:
pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
IO_VIEW, &io_pdc_cell);
printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
printk(KERN_DEBUG
" PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
i, pa_pdc_cell->mod[2 + i * 3], /* type */
pa_pdc_cell->mod[3 + i * 3], /* start */
pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
printk(KERN_DEBUG
" IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
i, io_pdc_cell.mod[2 + i * 3], /* type */
io_pdc_cell.mod[3 + i * 3], /* start */
io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
}
printk(KERN_DEBUG "\n");
break;
}
#endif /* DEBUG_PAT */
kfree(pa_pdc_cell);
return PDC_OK;
}
/* pat pdc can return information about a variety of different
* types of memory (e.g. firmware,i/o, etc) but we only care about
* the usable physical ram right now. Since the firmware specific
* information is allocated on the stack, we'll be generous, in
* case there is a lot of other information we don't care about.
*/
#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
static void __init pat_memconfig(void)
{
unsigned long actual_len;
struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
physmem_range_t *pmem_ptr;
long status;
int entries;
unsigned long length;
int i;
length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
if ((status != PDC_OK)
|| ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
/* The above pdc call shouldn't fail, but, just in
* case, just use the PAGE0 info.
*/
printk("\n\n\n");
printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
"All memory may not be used!\n\n\n");
pagezero_memconfig();
return;
}
entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
if (entries > PAT_MAX_RANGES) {
printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
printk(KERN_WARNING "Some memory may not be used!\n");
}
/* Copy information into the firmware independent pmem_ranges
* array, skipping types we don't care about. Notice we said
* "may" above. We'll use all the entries that were returned.
*/
npmem_ranges = 0;
mtbl_ptr = mem_table;
pmem_ptr = pmem_ranges; /* Global firmware independent table */
for (i = 0; i < entries; i++,mtbl_ptr++) {
if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
|| (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
|| (mtbl_ptr->pages == 0)
|| ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
continue;
}
if (npmem_ranges == MAX_PHYSMEM_RANGES) {
printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
printk(KERN_WARNING "Some memory will not be used!\n");
break;
}
set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
npmem_ranges++;
}
}
static int __init pat_inventory(void)
{
int status;
ulong mod_index = 0;
struct pdc_pat_cell_num cell_info;
/*
** Note: Prelude (and it's successors: Lclass, A400/500) only
** implement PDC_PAT_CELL sub-options 0 and 2.
*/
status = pdc_pat_cell_get_number(&cell_info);
if (status != PDC_OK) {
return 0;
}
#ifdef DEBUG_PAT
printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
cell_info.cell_loc);
#endif
while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
mod_index++;
}
return mod_index;
}
/* We only look for extended memory ranges on a 64 bit capable box */
static void __init sprockets_memconfig(void)
{
struct pdc_memory_table_raddr r_addr;
struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
struct pdc_memory_table *mtbl_ptr;
physmem_range_t *pmem_ptr;
long status;
int entries;
int i;
status = pdc_mem_mem_table(&r_addr,mem_table,
(unsigned long)MAX_PHYSMEM_RANGES);
if (status != PDC_OK) {
/* The above pdc call only works on boxes with sprockets
* firmware (newer B,C,J class). Other non PAT PDC machines
* do support more than 3.75 Gb of memory, but we don't
* support them yet.
*/
pagezero_memconfig();
return;
}
if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
printk(KERN_WARNING "Some memory will not be used!\n");
}
entries = (int)r_addr.entries_returned;
npmem_ranges = 0;
mtbl_ptr = mem_table;
pmem_ptr = pmem_ranges; /* Global firmware independent table */
for (i = 0; i < entries; i++,mtbl_ptr++) {
set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
npmem_ranges++;
}
}
#else /* !CONFIG_64BIT */
#define pat_inventory() do { } while (0)
#define pat_memconfig() do { } while (0)
#define sprockets_memconfig() pagezero_memconfig()
#endif /* !CONFIG_64BIT */
#ifndef CONFIG_PA20
/* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
static struct parisc_device * __init
legacy_create_device(struct pdc_memory_map *r_addr,
struct pdc_module_path *module_path)
{
struct parisc_device *dev;
int status = pdc_mem_map_hpa(r_addr, module_path);
if (status != PDC_OK)
return NULL;
dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
if (dev == NULL)
return NULL;
register_parisc_device(dev);
return dev;
}
/**
* snake_inventory
*
* Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
* To use it, we initialise the mod_path.bc to 0xff and try all values of
* mod to get the HPA for the top-level devices. Bus adapters may have
* sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
* module, then trying all possible functions.
*/
static void __init snake_inventory(void)
{
int mod;
for (mod = 0; mod < 16; mod++) {
struct parisc_device *dev;
struct pdc_module_path module_path;
struct pdc_memory_map r_addr;
unsigned int func;
memset(module_path.path.bc, 0xff, 6);
module_path.path.mod = mod;
dev = legacy_create_device(&r_addr, &module_path);
if ((!dev) || (dev->id.hw_type != HPHW_BA))
continue;
memset(module_path.path.bc, 0xff, 4);
module_path.path.bc[4] = mod;
for (func = 0; func < 16; func++) {
module_path.path.bc[5] = 0;
module_path.path.mod = func;
legacy_create_device(&r_addr, &module_path);
}
}
}
#else /* CONFIG_PA20 */
#define snake_inventory() do { } while (0)
#endif /* CONFIG_PA20 */
/* Common 32/64 bit based code goes here */
/**
* add_system_map_addresses - Add additional addresses to the parisc device.
* @dev: The parisc device.
* @num_addrs: Then number of addresses to add;
* @module_instance: The system_map module instance.
*
* This function adds any additional addresses reported by the system_map
* firmware to the parisc device.
*/
static void __init
add_system_map_addresses(struct parisc_device *dev, int num_addrs,
int module_instance)
{
int i;
long status;
struct pdc_system_map_addr_info addr_result;
dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL);
if(!dev->addr) {
printk(KERN_ERR "%s %s(): memory allocation failure\n",
__FILE__, __func__);
return;
}
for(i = 1; i <= num_addrs; ++i) {
status = pdc_system_map_find_addrs(&addr_result,
module_instance, i);
if(PDC_OK == status) {
dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
dev->num_addrs++;
} else {
printk(KERN_WARNING
"Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
status, i);
}
}
}
/**
* system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
*
* This function attempts to retrieve and register all the devices firmware
* knows about via the SYSTEM_MAP PDC call.
*/
static void __init system_map_inventory(void)
{
int i;
long status = PDC_OK;
for (i = 0; i < 256; i++) {
struct parisc_device *dev;
struct pdc_system_map_mod_info module_result;
struct pdc_module_path module_path;
status = pdc_system_map_find_mods(&module_result,
&module_path, i);
if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
break;
if (status != PDC_OK)
continue;
dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
if (!dev)
continue;
register_parisc_device(dev);
/* if available, get the additional addresses for a module */
if (!module_result.add_addrs)
continue;
add_system_map_addresses(dev, module_result.add_addrs, i);
}
walk_central_bus();
return;
}
void __init do_memory_inventory(void)
{
switch (pdc_type) {
case PDC_TYPE_PAT:
pat_memconfig();
break;
case PDC_TYPE_SYSTEM_MAP:
sprockets_memconfig();
break;
case PDC_TYPE_SNAKE:
pagezero_memconfig();
return;
default:
panic("Unknown PDC type!\n");
}
if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
printk(KERN_WARNING "Bad memory configuration returned!\n");
printk(KERN_WARNING "Some memory may not be used!\n");
pagezero_memconfig();
}
}
void __init do_device_inventory(void)
{
printk(KERN_INFO "Searching for devices...\n");
init_parisc_bus();
switch (pdc_type) {
case PDC_TYPE_PAT:
pat_inventory();
break;
case PDC_TYPE_SYSTEM_MAP:
system_map_inventory();
break;
case PDC_TYPE_SNAKE:
snake_inventory();
break;
default:
panic("Unknown PDC type!\n");
}
printk(KERN_INFO "Found devices:\n");
print_parisc_devices();
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
pa_serialize_tlb_flushes = machine_has_merced_bus();
if (pa_serialize_tlb_flushes)
pr_info("Merced bus found: Enable PxTLB serialization.\n");
#endif
#if defined(CONFIG_FW_CFG_SYSFS)
if (running_on_qemu) {
struct resource res[3] = {0,};
unsigned int base;
base = ((unsigned long long) PAGE0->pad0[2] << 32)
| PAGE0->pad0[3]; /* SeaBIOS stored it here */
res[0].name = "fw_cfg";
res[0].start = base;
res[0].end = base + 8 - 1;
res[0].flags = IORESOURCE_MEM;
res[1].name = "ctrl";
res[1].start = 0;
res[1].flags = IORESOURCE_REG;
res[2].name = "data";
res[2].start = 4;
res[2].flags = IORESOURCE_REG;
if (base) {
pr_info("Found qemu fw_cfg interface at %#08x\n", base);
platform_device_register_simple("fw_cfg",
PLATFORM_DEVID_NONE, res, 3);
}
}
#endif
}
| linux-master | arch/parisc/kernel/inventory.c |
// SPDX-License-Identifier: GPL-2.0
/*
** PARISC 1.1 Dynamic DMA mapping support.
** This implementation is for PA-RISC platforms that do not support
** I/O TLBs (aka DMA address translation hardware).
** See Documentation/core-api/dma-api-howto.rst for interface definitions.
**
** (c) Copyright 1999,2000 Hewlett-Packard Company
** (c) Copyright 2000 Grant Grundler
** (c) Copyright 2000 Philipp Rumpf <[email protected]>
** (c) Copyright 2000 John Marvin
**
** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
** (I assume it's from David Mosberger-Tang but there was no Copyright)
**
** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
**
** - ggg
*/
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <asm/cacheflush.h>
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
#include <asm/io.h>
#include <asm/page.h> /* get_order */
#include <linux/uaccess.h>
#include <asm/tlbflush.h> /* for purge_tlb_*() macros */
static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
static unsigned long pcxl_used_bytes __read_mostly;
static unsigned long pcxl_used_pages __read_mostly;
unsigned long pcxl_dma_start __ro_after_init; /* pcxl dma mapping area start */
static DEFINE_SPINLOCK(pcxl_res_lock);
static char *pcxl_res_map;
static int pcxl_res_hint;
static int pcxl_res_size;
#ifdef DEBUG_PCXL_RESOURCE
#define DBG_RES(x...) printk(x)
#else
#define DBG_RES(x...)
#endif
/*
** Dump a hex representation of the resource map.
*/
#ifdef DUMP_RESMAP
static
void dump_resmap(void)
{
u_long *res_ptr = (unsigned long *)pcxl_res_map;
u_long i = 0;
printk("res_map: ");
for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
printk("%08lx ", *res_ptr);
printk("\n");
}
#else
static inline void dump_resmap(void) {;}
#endif
static inline int map_pte_uncached(pte_t * pte,
unsigned long vaddr,
unsigned long size, unsigned long *paddr_ptr)
{
unsigned long end;
unsigned long orig_vaddr = vaddr;
vaddr &= ~PMD_MASK;
end = vaddr + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
unsigned long flags;
if (!pte_none(*pte))
printk(KERN_ERR "map_pte_uncached: page already exists\n");
purge_tlb_start(flags);
set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
pdtlb(SR_KERNEL, orig_vaddr);
purge_tlb_end(flags);
vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE;
(*paddr_ptr) += PAGE_SIZE;
pte++;
} while (vaddr < end);
return 0;
}
static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
unsigned long size, unsigned long *paddr_ptr)
{
unsigned long end;
unsigned long orig_vaddr = vaddr;
vaddr &= ~PGDIR_MASK;
end = vaddr + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
do {
pte_t * pte = pte_alloc_kernel(pmd, vaddr);
if (!pte)
return -ENOMEM;
if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
return -ENOMEM;
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
orig_vaddr += PMD_SIZE;
pmd++;
} while (vaddr < end);
return 0;
}
static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
unsigned long paddr)
{
pgd_t * dir;
unsigned long end = vaddr + size;
dir = pgd_offset_k(vaddr);
do {
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
p4d = p4d_offset(dir, vaddr);
pud = pud_offset(p4d, vaddr);
pmd = pmd_alloc(NULL, pud, vaddr);
if (!pmd)
return -ENOMEM;
if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
return -ENOMEM;
vaddr = vaddr + PGDIR_SIZE;
dir++;
} while (vaddr && (vaddr < end));
return 0;
}
static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
unsigned long size)
{
pte_t * pte;
unsigned long end;
unsigned long orig_vaddr = vaddr;
if (pmd_none(*pmd))
return;
if (pmd_bad(*pmd)) {
pmd_ERROR(*pmd);
pmd_clear(pmd);
return;
}
pte = pte_offset_kernel(pmd, vaddr);
vaddr &= ~PMD_MASK;
end = vaddr + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
unsigned long flags;
pte_t page = *pte;
pte_clear(&init_mm, vaddr, pte);
purge_tlb_start(flags);
pdtlb(SR_KERNEL, orig_vaddr);
purge_tlb_end(flags);
vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE;
pte++;
if (pte_none(page) || pte_present(page))
continue;
printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
} while (vaddr < end);
}
static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
unsigned long size)
{
pmd_t * pmd;
unsigned long end;
unsigned long orig_vaddr = vaddr;
if (pgd_none(*dir))
return;
if (pgd_bad(*dir)) {
pgd_ERROR(*dir);
pgd_clear(dir);
return;
}
pmd = pmd_offset(pud_offset(p4d_offset(dir, vaddr), vaddr), vaddr);
vaddr &= ~PGDIR_MASK;
end = vaddr + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
do {
unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
orig_vaddr += PMD_SIZE;
pmd++;
} while (vaddr < end);
}
static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
{
pgd_t * dir;
unsigned long end = vaddr + size;
dir = pgd_offset_k(vaddr);
do {
unmap_uncached_pmd(dir, vaddr, end - vaddr);
vaddr = vaddr + PGDIR_SIZE;
dir++;
} while (vaddr && (vaddr < end));
}
#define PCXL_SEARCH_LOOP(idx, mask, size) \
for(; res_ptr < res_end; ++res_ptr) \
{ \
if(0 == ((*res_ptr) & mask)) { \
*res_ptr |= mask; \
idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
pcxl_res_hint = idx + (size >> 3); \
goto resource_found; \
} \
}
#define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
PCXL_SEARCH_LOOP(idx, mask, size); \
res_ptr = (u##size *)&pcxl_res_map[0]; \
PCXL_SEARCH_LOOP(idx, mask, size); \
}
static unsigned long
pcxl_alloc_range(size_t size)
{
int res_idx;
u_long mask, flags;
unsigned int pages_needed = size >> PAGE_SHIFT;
mask = (u_long) -1L;
mask >>= BITS_PER_LONG - pages_needed;
DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
size, pages_needed, mask);
spin_lock_irqsave(&pcxl_res_lock, flags);
if(pages_needed <= 8) {
PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
} else if(pages_needed <= 16) {
PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
} else if(pages_needed <= 32) {
PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
} else {
panic("%s: pcxl_alloc_range() Too many pages to map.\n",
__FILE__);
}
dump_resmap();
panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
__FILE__);
resource_found:
DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
res_idx, mask, pcxl_res_hint);
pcxl_used_pages += pages_needed;
pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
spin_unlock_irqrestore(&pcxl_res_lock, flags);
dump_resmap();
/*
** return the corresponding vaddr in the pcxl dma map
*/
return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
}
#define PCXL_FREE_MAPPINGS(idx, m, size) \
u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
/* BUG_ON((*res_ptr & m) != m); */ \
*res_ptr &= ~m;
/*
** clear bits in the pcxl resource map
*/
static void
pcxl_free_range(unsigned long vaddr, size_t size)
{
u_long mask, flags;
unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
unsigned int pages_mapped = size >> PAGE_SHIFT;
mask = (u_long) -1L;
mask >>= BITS_PER_LONG - pages_mapped;
DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
res_idx, size, pages_mapped, mask);
spin_lock_irqsave(&pcxl_res_lock, flags);
if(pages_mapped <= 8) {
PCXL_FREE_MAPPINGS(res_idx, mask, 8);
} else if(pages_mapped <= 16) {
PCXL_FREE_MAPPINGS(res_idx, mask, 16);
} else if(pages_mapped <= 32) {
PCXL_FREE_MAPPINGS(res_idx, mask, 32);
} else {
panic("%s: pcxl_free_range() Too many pages to unmap.\n",
__FILE__);
}
pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
spin_unlock_irqrestore(&pcxl_res_lock, flags);
dump_resmap();
}
static int __maybe_unused proc_pcxl_dma_show(struct seq_file *m, void *v)
{
#if 0
u_long i = 0;
unsigned long *res_ptr = (u_long *)pcxl_res_map;
#endif
unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */
seq_printf(m, "\nDMA Mapping Area size : %d bytes (%ld pages)\n",
PCXL_DMA_MAP_SIZE, total_pages);
seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
seq_puts(m, " total: free: used: % used:\n");
seq_printf(m, "blocks %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
(pcxl_used_bytes * 100) / pcxl_res_size);
seq_printf(m, "pages %8ld %8ld %8ld %8ld%%\n", total_pages,
total_pages - pcxl_used_pages, pcxl_used_pages,
(pcxl_used_pages * 100 / total_pages));
#if 0
seq_puts(m, "\nResource bitmap:");
for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
if ((i & 7) == 0)
seq_puts(m,"\n ");
seq_printf(m, "%s %08lx", buf, *res_ptr);
}
#endif
seq_putc(m, '\n');
return 0;
}
static int __init
pcxl_dma_init(void)
{
if (pcxl_dma_start == 0)
return 0;
pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
pcxl_res_hint = 0;
pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
get_order(pcxl_res_size));
memset(pcxl_res_map, 0, pcxl_res_size);
proc_gsc_root = proc_mkdir("bus/gsc", NULL);
if (!proc_gsc_root)
printk(KERN_WARNING
"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
else {
struct proc_dir_entry* ent;
ent = proc_create_single("pcxl_dma", 0, proc_gsc_root,
proc_pcxl_dma_show);
if (!ent)
printk(KERN_WARNING
"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
}
return 0;
}
__initcall(pcxl_dma_init);
void *arch_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
unsigned long vaddr;
unsigned long paddr;
int order;
if (boot_cpu_data.cpu_type != pcxl2 && boot_cpu_data.cpu_type != pcxl)
return NULL;
order = get_order(size);
size = 1 << (order + PAGE_SHIFT);
vaddr = pcxl_alloc_range(size);
paddr = __get_free_pages(gfp | __GFP_ZERO, order);
flush_kernel_dcache_range(paddr, size);
paddr = __pa(paddr);
map_uncached_pages(vaddr, size, paddr);
*dma_handle = (dma_addr_t) paddr;
return (void *)vaddr;
}
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
int order = get_order(size);
WARN_ON_ONCE(boot_cpu_data.cpu_type != pcxl2 &&
boot_cpu_data.cpu_type != pcxl);
size = 1 << (order + PAGE_SHIFT);
unmap_uncached_pages((unsigned long)vaddr, size);
pcxl_free_range((unsigned long)vaddr, size);
free_pages((unsigned long)__va(dma_handle), order);
}
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
/*
* fdc: The data cache line is written back to memory, if and only if
* it is dirty, and then invalidated from the data cache.
*/
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
}
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
unsigned long addr = (unsigned long) phys_to_virt(paddr);
switch (dir) {
case DMA_TO_DEVICE:
case DMA_BIDIRECTIONAL:
flush_kernel_dcache_range(addr, size);
return;
case DMA_FROM_DEVICE:
purge_kernel_dcache_range_asm(addr, addr + size);
return;
default:
BUG();
}
}
| linux-master | arch/parisc/kernel/pci-dma.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PARISC Architecture-dependent parts of process handling
* based on the work for i386
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
* Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
* Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
* Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
* Copyright (C) 2001-2014 Helge Deller <[email protected]>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
*/
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/rcupdate.h>
#include <linux/random.h>
#include <linux/nmi.h>
#include <linux/sched/hotplug.h>
#include <asm/io.h>
#include <asm/asm-offsets.h>
#include <asm/assembly.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/unwind.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
#define CMD_RESET 5 /* reset any module */
/*
** The Wright Brothers and Gecko systems have a H/W problem
** (Lasi...'nuf said) may cause a broadcast reset to lockup
** the system. An HVERSION dependent PDC call was developed
** to perform a "safe", platform specific broadcast reset instead
** of kludging up all the code.
**
** Older machines which do not implement PDC_BROADCAST_RESET will
** return (with an error) and the regular broadcast reset can be
** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
** the PDC call will not return (the system will be reset).
*/
void machine_restart(char *cmd)
{
#ifdef FASTBOOT_SELFTEST_SUPPORT
/*
** If user has modified the Firmware Selftest Bitmap,
** run the tests specified in the bitmap after the
** system is rebooted w/PDC_DO_RESET.
**
** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
**
** Using "directed resets" at each processor with the MEM_TOC
** vector cleared will also avoid running destructive
** memory self tests. (Not implemented yet)
*/
if (ftc_bitmap) {
pdc_do_firm_test_reset(ftc_bitmap);
}
#endif
/* set up a new led state on systems shipped with a LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
/* "Normal" system reset */
pdc_do_reset();
/* Nope...box should reset with just CMD_RESET now */
gsc_writel(CMD_RESET, COMMAND_GLOBAL);
/* Wait for RESET to lay us to rest. */
while (1) ;
}
/*
* This routine is called from sys_reboot to actually turn off the
* machine
*/
void machine_power_off(void)
{
/* Put the soft power button back under hardware control.
* If the user had already pressed the power button, the
* following call will immediately power off. */
pdc_soft_power_button(0);
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
/* ipmi_poweroff may have been installed. */
do_kernel_power_off();
/* It seems we have no way to power the system off via
* software. The user has to press the button himself. */
printk("Power off or press RETURN to reboot.\n");
/* prevent soft lockup/stalled CPU messages for endless loop. */
rcu_sysrq_start();
lockup_detector_soft_poweroff();
while (1) {
/* reboot if user presses RETURN key */
if (pdc_iodc_getc() == 13) {
printk("Rebooting...\n");
machine_restart(NULL);
}
}
}
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
void machine_halt(void)
{
machine_power_off();
}
void flush_thread(void)
{
/* Only needs to handle fpu stuff or perf monitors.
** REVISIT: several arches implement a "lazy fpu state".
*/
}
/*
* Idle thread support
*
* Detect when running on QEMU with SeaBIOS PDC Firmware and let
* QEMU idle the host too.
*/
int running_on_qemu __ro_after_init;
EXPORT_SYMBOL(running_on_qemu);
/*
* Called from the idle thread for the CPU which has been shutdown.
*/
void __noreturn arch_cpu_idle_dead(void)
{
#ifdef CONFIG_HOTPLUG_CPU
idle_task_exit();
local_irq_disable();
/* Tell the core that this CPU is now safe to dispose of. */
cpuhp_ap_report_dead();
/* Ensure that the cache lines are written out. */
flush_cache_all_local();
flush_tlb_all_local(NULL);
/* Let PDC firmware put CPU into firmware idle loop. */
__pdc_cpu_rendezvous();
pr_warn("PDC does not provide rendezvous function.\n");
#endif
while (1);
}
void __cpuidle arch_cpu_idle(void)
{
/* nop on real hardware, qemu will idle sleep. */
asm volatile("or %%r10,%%r10,%%r10\n":::);
}
static int __init parisc_idle_init(void)
{
if (!running_on_qemu)
cpu_idle_poll_ctrl(1);
return 0;
}
arch_initcall(parisc_idle_init);
/*
* Copy architecture-specific thread state
*/
int
copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct pt_regs *cregs = &(p->thread.regs);
void *stack = task_stack_page(p);
/* We have to use void * instead of a function pointer, because
* function pointers aren't a pointer to the function on 64-bit.
* Make them const so the compiler knows they live in .text */
extern void * const ret_from_kernel_thread;
extern void * const child_return;
if (unlikely(args->fn)) {
/* kernel thread */
memset(cregs, 0, sizeof(struct pt_regs));
if (args->idle) /* idle thread */
return 0;
/* Must exit via ret_from_kernel_thread in order
* to call schedule_tail()
*/
cregs->ksp = (unsigned long) stack + FRAME_SIZE + PT_SZ_ALGN;
cregs->kpc = (unsigned long) &ret_from_kernel_thread;
/*
* Copy function and argument to be called from
* ret_from_kernel_thread.
*/
#ifdef CONFIG_64BIT
cregs->gr[27] = ((unsigned long *)args->fn)[3];
cregs->gr[26] = ((unsigned long *)args->fn)[2];
#else
cregs->gr[26] = (unsigned long) args->fn;
#endif
cregs->gr[25] = (unsigned long) args->fn_arg;
} else {
/* user thread */
/* usp must be word aligned. This also prevents users from
* passing in the value 1 (which is the signal for a special
* return for a kernel thread) */
if (usp) {
usp = ALIGN(usp, 4);
if (likely(usp))
cregs->gr[30] = usp;
}
cregs->ksp = (unsigned long) stack + FRAME_SIZE;
cregs->kpc = (unsigned long) &child_return;
/* Setup thread TLS area */
if (clone_flags & CLONE_SETTLS)
cregs->cr27 = tls;
}
return 0;
}
unsigned long
__get_wchan(struct task_struct *p)
{
struct unwind_frame_info info;
unsigned long ip;
int count = 0;
/*
* These bracket the sleeping functions..
*/
unwind_frame_init_from_blocked_task(&info, p);
do {
if (unwind_once(&info) < 0)
return 0;
if (task_is_running(p))
return 0;
ip = info.ip;
if (!in_sched_functions(ip))
return ip;
} while (count++ < MAX_UNWIND_ENTRIES);
return 0;
}
| linux-master | arch/parisc/kernel/process.c |
// SPDX-License-Identifier: GPL-2.0
/*
* functions to patch RO kernel text during runtime
*
* Copyright (c) 2019 Sven Schnelle <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/patch.h>
struct patch {
void *addr;
u32 *insn;
unsigned int len;
};
static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
int *need_unmap)
{
unsigned long uintaddr = (uintptr_t) addr;
bool module = !core_kernel_text(uintaddr);
struct page *page;
*need_unmap = 0;
if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
page = vmalloc_to_page(addr);
else if (!module && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
page = virt_to_page(addr);
else
return addr;
*need_unmap = 1;
set_fixmap(fixmap, page_to_phys(page));
raw_spin_lock_irqsave(&patch_lock, *flags);
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
}
static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
{
clear_fixmap(fixmap);
raw_spin_unlock_irqrestore(&patch_lock, *flags);
}
void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
{
unsigned long start = (unsigned long)addr;
unsigned long end = (unsigned long)addr + len;
unsigned long flags;
u32 *p, *fixmap;
int mapped;
/* Make sure we don't have any aliases in cache */
flush_kernel_dcache_range_asm(start, end);
flush_kernel_icache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
while (len >= 4) {
*p++ = *insn++;
addr += sizeof(u32);
len -= sizeof(u32);
if (len && offset_in_page(addr) == 0) {
/*
* We're crossing a page boundary, so
* need to remap
*/
flush_kernel_dcache_range_asm((unsigned long)fixmap,
(unsigned long)p);
flush_tlb_kernel_range((unsigned long)fixmap,
(unsigned long)p);
if (mapped)
patch_unmap(FIX_TEXT_POKE0, &flags);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
&mapped);
}
}
flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p);
flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p);
if (mapped)
patch_unmap(FIX_TEXT_POKE0, &flags);
}
void __kprobes __patch_text(void *addr, u32 insn)
{
__patch_text_multiple(addr, &insn, sizeof(insn));
}
static int __kprobes patch_text_stop_machine(void *data)
{
struct patch *patch = data;
__patch_text_multiple(patch->addr, patch->insn, patch->len);
return 0;
}
void __kprobes patch_text(void *addr, unsigned int insn)
{
struct patch patch = {
.addr = addr,
.insn = &insn,
.len = sizeof(insn),
};
stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
}
void __kprobes patch_text_multiple(void *addr, u32 *insn, unsigned int len)
{
struct patch patch = {
.addr = addr,
.insn = insn,
.len = len
};
stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
}
| linux-master | arch/parisc/kernel/patch.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/audit_arch.h>
#include <asm/unistd.h>
unsigned int parisc32_dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
unsigned int parisc32_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
unsigned int parisc32_write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
unsigned int parisc32_read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
unsigned int parisc32_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
| linux-master | arch/parisc/kernel/compat_audit.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Code to handle x86 style IRQs plus some generic interrupt stuff.
*
* Copyright (C) 1992 Linus Torvalds
* Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
* Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, [email protected])
* Copyright (C) 1999-2000 Grant Grundler
* Copyright (c) 2005 Matthew Wilcox
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/types.h>
#include <linux/sched/task_stack.h>
#include <asm/io.h>
#include <asm/softirq_stack.h>
#include <asm/smp.h>
#include <asm/ldcw.h>
#undef PARISC_IRQ_CR16_COUNTS
#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
/* Bits in EIEM correlate with cpu_irq_action[].
** Numbered *Big Endian*! (ie bit 0 is MSB)
*/
static volatile unsigned long cpu_eiem = 0;
/*
** local ACK bitmap ... habitually set to 1, but reset to zero
** between ->ack() and ->end() of the interrupt to prevent
** re-interruption of a processing interrupt.
*/
static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
static void cpu_mask_irq(struct irq_data *d)
{
unsigned long eirr_bit = EIEM_MASK(d->irq);
cpu_eiem &= ~eirr_bit;
/* Do nothing on the other CPUs. If they get this interrupt,
* The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
* handle it, and the set_eiem() at the bottom will ensure it
* then gets disabled */
}
static void __cpu_unmask_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
cpu_eiem |= eirr_bit;
/* This is just a simple NOP IPI. But what it does is cause
* all the other CPUs to do a set_eiem(cpu_eiem) at the end
* of the interrupt handler */
smp_send_all_nop();
}
static void cpu_unmask_irq(struct irq_data *d)
{
__cpu_unmask_irq(d->irq);
}
void cpu_ack_irq(struct irq_data *d)
{
unsigned long mask = EIEM_MASK(d->irq);
int cpu = smp_processor_id();
/* Clear in EIEM so we can no longer process */
per_cpu(local_ack_eiem, cpu) &= ~mask;
/* disable the interrupt */
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
/* and now ack it */
mtctl(mask, 23);
}
void cpu_eoi_irq(struct irq_data *d)
{
unsigned long mask = EIEM_MASK(d->irq);
int cpu = smp_processor_id();
/* set it in the eiems---it's no longer in process */
per_cpu(local_ack_eiem, cpu) |= mask;
/* enable the interrupt */
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
}
#ifdef CONFIG_SMP
int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
{
int cpu_dest;
/* timer and ipi have to always be received on all CPUs */
if (irqd_is_per_cpu(d))
return -EINVAL;
cpu_dest = cpumask_first_and(dest, cpu_online_mask);
if (cpu_dest >= nr_cpu_ids)
cpu_dest = cpumask_first(cpu_online_mask);
return cpu_dest;
}
#endif
static struct irq_chip cpu_interrupt_type = {
.name = "CPU",
.irq_mask = cpu_mask_irq,
.irq_unmask = cpu_unmask_irq,
.irq_ack = cpu_ack_irq,
.irq_eoi = cpu_eoi_irq,
/* XXX: Needs to be written. We managed without it so far, but
* we really ought to write it.
*/
.irq_retrigger = NULL,
};
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define irq_stats(x) (&per_cpu(irq_stat, x))
/*
* /proc/interrupts printing for arch specific interrupts
*/
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
#ifdef CONFIG_DEBUG_STACKOVERFLOW
seq_printf(p, "%*s: ", prec, "STK");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
seq_puts(p, " Kernel stack usage\n");
# ifdef CONFIG_IRQSTACKS
seq_printf(p, "%*s: ", prec, "IST");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
seq_puts(p, " Interrupt stack usage\n");
# endif
#endif
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
seq_printf(p, "%*s: ", prec, "RES");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
seq_puts(p, " Rescheduling interrupts\n");
seq_printf(p, "%*s: ", prec, "CAL");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
seq_puts(p, " Function call interrupts\n");
}
#endif
seq_printf(p, "%*s: ", prec, "UAH");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
seq_puts(p, " Unaligned access handler traps\n");
seq_printf(p, "%*s: ", prec, "FPA");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
seq_puts(p, " Floating point assist traps\n");
seq_printf(p, "%*s: ", prec, "TLB");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
seq_puts(p, " TLB shootdowns\n");
return 0;
}
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
unsigned long flags;
if (i == 0) {
seq_puts(p, " ");
for_each_online_cpu(j)
seq_printf(p, " CPU%d", j);
#ifdef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " [min/avg/max] (CPU cycle counts)");
#endif
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
struct irq_desc *desc = irq_to_desc(i);
struct irqaction *action;
raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
if (!action)
goto skip;
seq_printf(p, "%3d: ", i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_desc_kstat_cpu(desc, j));
seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
#ifndef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " %s", action->name);
while ((action = action->next))
seq_printf(p, ", %s", action->name);
#else
for ( ;action; action = action->next) {
unsigned int k, avg, min, max;
min = max = action->cr16_hist[0];
for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
int hist = action->cr16_hist[k];
if (hist) {
avg += hist;
} else
break;
if (hist > max) max = hist;
if (hist < min) min = hist;
}
avg /= k;
seq_printf(p, " %s[%d/%d/%d]", action->name,
min,avg,max);
}
#endif
seq_putc(p, '\n');
skip:
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
if (i == NR_IRQS)
arch_show_interrupts(p, 3);
return 0;
}
/*
** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
**
** To use txn_XXX() interfaces, get a Virtual IRQ first.
** Then use that to get the Transaction address and data.
*/
int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
{
if (irq_has_action(irq))
return -EBUSY;
if (irq_get_chip(irq) != &cpu_interrupt_type)
return -EBUSY;
/* for iosapic interrupts */
if (type) {
irq_set_chip_and_handler(irq, type, handle_percpu_irq);
irq_set_chip_data(irq, data);
__cpu_unmask_irq(irq);
}
return 0;
}
int txn_claim_irq(int irq)
{
return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
}
/*
* The bits_wide parameter accommodates the limitations of the HW/SW which
* use these bits:
* Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
* V-class (EPIC): 6 bits
* N/L/A-class (iosapic): 8 bits
* PCI 2.2 MSI: 16 bits
* Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
*
* On the service provider side:
* o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
* o PA 2.0 wide mode 6-bits (per processor)
* o IA64 8-bits (0-256 total)
*
* So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
* by the processor...and the N/L-class I/O subsystem supports more bits than
* PA2.0 has. The first case is the problem.
*/
int txn_alloc_irq(unsigned int bits_wide)
{
int irq;
/* never return irq 0 cause that's the interval timer */
for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
if (cpu_claim_irq(irq, NULL, NULL) < 0)
continue;
if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
continue;
return irq;
}
/* unlikely, but be prepared */
return -1;
}
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
struct irq_data *d = irq_get_irq_data(irq);
irq_data_update_affinity(d, cpumask_of(cpu));
#endif
return per_cpu(cpu_data, cpu).txn_addr;
}
unsigned long txn_alloc_addr(unsigned int virt_irq)
{
static int next_cpu = -1;
next_cpu++; /* assign to "next" CPU we want this bugger on */
/* validate entry */
while ((next_cpu < nr_cpu_ids) &&
(!per_cpu(cpu_data, next_cpu).txn_addr ||
!cpu_online(next_cpu)))
next_cpu++;
if (next_cpu >= nr_cpu_ids)
next_cpu = 0; /* nothing else, assign monarch */
return txn_affinity_addr(virt_irq, next_cpu);
}
unsigned int txn_alloc_data(unsigned int virt_irq)
{
return virt_irq - CPU_IRQ_BASE;
}
static inline int eirr_to_irq(unsigned long eirr)
{
int bit = fls_long(eirr);
return (BITS_PER_LONG - bit) + TIMER_IRQ;
}
#ifdef CONFIG_IRQSTACKS
/*
* IRQ STACK - used for irq handler
*/
#ifdef CONFIG_64BIT
#define IRQ_STACK_SIZE (4096 << 4) /* 64k irq stack size */
#else
#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
#endif
union irq_stack_union {
unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
volatile unsigned int slock[4];
volatile unsigned int lock[1];
};
static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
.slock = { 1,1,1,1 },
};
#endif
int sysctl_panic_on_stackoverflow = 1;
static inline void stack_overflow_check(struct pt_regs *regs)
{
#ifdef CONFIG_DEBUG_STACKOVERFLOW
#define STACK_MARGIN (256*6)
unsigned long stack_start = (unsigned long) task_stack_page(current);
unsigned long sp = regs->gr[30];
unsigned long stack_usage;
unsigned int *last_usage;
int cpu = smp_processor_id();
/* if sr7 != 0, we interrupted a userspace process which we do not want
* to check for stack overflow. We will only check the kernel stack. */
if (regs->sr[7])
return;
/* exit if already in panic */
if (sysctl_panic_on_stackoverflow < 0)
return;
/* calculate kernel stack usage */
stack_usage = sp - stack_start;
#ifdef CONFIG_IRQSTACKS
if (likely(stack_usage <= THREAD_SIZE))
goto check_kernel_stack; /* found kernel stack */
/* check irq stack usage */
stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
stack_usage = sp - stack_start;
last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
if (unlikely(stack_usage > *last_usage))
*last_usage = stack_usage;
if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
return;
pr_emerg("stackcheck: %s will most likely overflow irq stack "
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
goto panic_check;
check_kernel_stack:
#endif
/* check kernel stack usage */
last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
if (unlikely(stack_usage > *last_usage))
*last_usage = stack_usage;
if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
return;
pr_emerg("stackcheck: %s will most likely overflow kernel stack "
"(sp:%lx, stk bottom-top:%lx-%lx)\n",
current->comm, sp, stack_start, stack_start + THREAD_SIZE);
#ifdef CONFIG_IRQSTACKS
panic_check:
#endif
if (sysctl_panic_on_stackoverflow) {
sysctl_panic_on_stackoverflow = -1; /* disable further checks */
panic("low stack detected by irq handler - check messages\n");
}
#endif
}
#ifdef CONFIG_IRQSTACKS
/* in entry.S: */
void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
static void execute_on_irq_stack(void *func, unsigned long param1)
{
union irq_stack_union *union_ptr;
unsigned long irq_stack;
volatile unsigned int *irq_stack_in_use;
union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
irq_stack = (unsigned long) &union_ptr->stack;
irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
FRAME_ALIGN); /* align for stack frame usage */
/* We may be called recursive. If we are already using the irq stack,
* just continue to use it. Use spinlocks to serialize
* the irq stack usage.
*/
irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
if (!__ldcw(irq_stack_in_use)) {
void (*direct_call)(unsigned long p1) = func;
/* We are using the IRQ stack already.
* Do direct call on current stack. */
direct_call(param1);
return;
}
/* This is where we switch to the IRQ stack. */
call_on_stack(param1, func, irq_stack);
/* free up irq stack usage. */
*irq_stack_in_use = 1;
}
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void)
{
execute_on_irq_stack(__do_softirq, 0);
}
#endif
#endif /* CONFIG_IRQSTACKS */
/* ONLY called from entry.S:intr_extint() */
asmlinkage void do_cpu_irq_mask(struct pt_regs *regs)
{
struct pt_regs *old_regs;
unsigned long eirr_val;
int irq, cpu = smp_processor_id();
struct irq_data *irq_data;
#ifdef CONFIG_SMP
cpumask_t dest;
#endif
old_regs = set_irq_regs(regs);
local_irq_disable();
irq_enter();
eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
if (!eirr_val)
goto set_out;
irq = eirr_to_irq(eirr_val);
irq_data = irq_get_irq_data(irq);
/* Filter out spurious interrupts, mostly from serial port at bootup */
if (unlikely(!irq_desc_has_action(irq_data_to_desc(irq_data))))
goto set_out;
#ifdef CONFIG_SMP
cpumask_copy(&dest, irq_data_get_affinity_mask(irq_data));
if (irqd_is_per_cpu(irq_data) &&
!cpumask_test_cpu(smp_processor_id(), &dest)) {
int cpu = cpumask_first(&dest);
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
irq, smp_processor_id(), cpu);
gsc_writel(irq + CPU_IRQ_BASE,
per_cpu(cpu_data, cpu).hpa);
goto set_out;
}
#endif
stack_overflow_check(regs);
#ifdef CONFIG_IRQSTACKS
execute_on_irq_stack(&generic_handle_irq, irq);
#else
generic_handle_irq(irq);
#endif /* CONFIG_IRQSTACKS */
out:
irq_exit();
set_irq_regs(old_regs);
return;
set_out:
set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
goto out;
}
static void claim_cpu_irqs(void)
{
unsigned long flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL;
int i;
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
irq_set_chip_and_handler(i, &cpu_interrupt_type,
handle_percpu_irq);
}
irq_set_handler(TIMER_IRQ, handle_percpu_irq);
if (request_irq(TIMER_IRQ, timer_interrupt, flags, "timer", NULL))
pr_err("Failed to register timer interrupt\n");
#ifdef CONFIG_SMP
irq_set_handler(IPI_IRQ, handle_percpu_irq);
if (request_irq(IPI_IRQ, ipi_interrupt, IRQF_PERCPU, "IPI", NULL))
pr_err("Failed to register IPI interrupt\n");
#endif
}
void init_IRQ(void)
{
local_irq_disable(); /* PARANOID - should already be disabled */
mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
#ifdef CONFIG_SMP
if (!cpu_eiem) {
claim_cpu_irqs();
cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
}
#else
claim_cpu_irqs();
cpu_eiem = EIEM_MASK(TIMER_IRQ);
#endif
set_eiem(cpu_eiem); /* EIEM : enable all external intr */
}
| linux-master | arch/parisc/kernel/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/parisc/kernel/firmware.c - safe PDC access routines
*
* PDC == Processor Dependent Code
*
* See PDC documentation at
* https://parisc.wiki.kernel.org/index.php/Technical_Documentation
* for documentation describing the entry points and calling
* conventions defined below.
*
* Copyright 1999 SuSE GmbH Nuernberg (Philipp Rumpf, [email protected])
* Copyright 1999 The Puffin Group, (Alex deVries, David Kennedy)
* Copyright 2003 Grant Grundler <grundler parisc-linux org>
* Copyright 2003,2004 Ryan Bradetich <[email protected]>
* Copyright 2004,2006 Thibaut VARENE <[email protected]>
*/
/* I think it would be in everyone's best interest to follow this
* guidelines when writing PDC wrappers:
*
* - the name of the pdc wrapper should match one of the macros
* used for the first two arguments
* - don't use caps for random parts of the name
* - use the static PDC result buffers and "copyout" to structs
* supplied by the caller to encapsulate alignment restrictions
* - hold pdc_lock while in PDC or using static result buffers
* - use __pa() to convert virtual (kernel) pointers to physical
* ones.
* - the name of the struct used for pdc return values should equal
* one of the macros used for the first two arguments to the
* corresponding PDC call
* - keep the order of arguments
* - don't be smart (setting trailing NUL bytes for strings, return
* something useful even if the call failed) unless you are sure
* it's not going to affect functionality or performance
*
* Example:
* int pdc_cache_info(struct pdc_cache_info *cache_info )
* {
* int retval;
*
* spin_lock_irq(&pdc_lock);
* retval = mem_pdc_call(PDC_CACHE,PDC_CACHE_INFO,__pa(cache_info),0);
* convert_to_wide(pdc_result);
* memcpy(cache_info, pdc_result, sizeof(*cache_info));
* spin_unlock_irq(&pdc_lock);
*
* return retval;
* }
* prumpf 991016
*/
#include <linux/stdarg.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <asm/page.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/processor.h> /* for boot_cpu_data */
#if defined(BOOTLOADER)
# undef spin_lock_irqsave
# define spin_lock_irqsave(a, b) { b = 1; }
# undef spin_unlock_irqrestore
# define spin_unlock_irqrestore(a, b)
#else
static DEFINE_SPINLOCK(pdc_lock);
#endif
static unsigned long pdc_result[NUM_PDC_RESULT] __aligned(8);
static unsigned long pdc_result2[NUM_PDC_RESULT] __aligned(8);
#ifdef CONFIG_64BIT
#define WIDE_FIRMWARE 0x1
#define NARROW_FIRMWARE 0x2
/* Firmware needs to be initially set to narrow to determine the
* actual firmware width. */
int parisc_narrow_firmware __ro_after_init = 2;
#endif
/* On most currently-supported platforms, IODC I/O calls are 32-bit calls
* and MEM_PDC calls are always the same width as the OS.
* Some PAT boxes may have 64-bit IODC I/O.
*
* Ryan Bradetich added the now obsolete CONFIG_PDC_NARROW to allow
* 64-bit kernels to run on systems with 32-bit MEM_PDC calls.
* This allowed wide kernels to run on Cxxx boxes.
* We now detect 32-bit-only PDC and dynamically switch to 32-bit mode
* when running a 64-bit kernel on such boxes (e.g. C200 or C360).
*/
#ifdef CONFIG_64BIT
long real64_call(unsigned long function, ...);
#endif
long real32_call(unsigned long function, ...);
#ifdef CONFIG_64BIT
# define MEM_PDC (unsigned long)(PAGE0->mem_pdc_hi) << 32 | PAGE0->mem_pdc
# define mem_pdc_call(args...) unlikely(parisc_narrow_firmware) ? real32_call(MEM_PDC, args) : real64_call(MEM_PDC, args)
#else
# define MEM_PDC (unsigned long)PAGE0->mem_pdc
# define mem_pdc_call(args...) real32_call(MEM_PDC, args)
#endif
/**
* f_extend - Convert PDC addresses to kernel addresses.
* @address: Address returned from PDC.
*
* This function is used to convert PDC addresses into kernel addresses
* when the PDC address size and kernel address size are different.
*/
static unsigned long f_extend(unsigned long address)
{
#ifdef CONFIG_64BIT
if(unlikely(parisc_narrow_firmware)) {
if((address & 0xff000000) == 0xf0000000)
return 0xf0f0f0f000000000UL | (u32)address;
if((address & 0xf0000000) == 0xf0000000)
return 0xffffffff00000000UL | (u32)address;
}
#endif
return address;
}
/**
* convert_to_wide - Convert the return buffer addresses into kernel addresses.
* @addr: The return buffer from PDC.
*
* This function is used to convert the return buffer addresses retrieved from PDC
* into kernel addresses when the PDC address size and kernel address size are
* different.
*/
static void convert_to_wide(unsigned long *addr)
{
#ifdef CONFIG_64BIT
int i;
unsigned int *p = (unsigned int *)addr;
if (unlikely(parisc_narrow_firmware)) {
for (i = (NUM_PDC_RESULT-1); i >= 0; --i)
addr[i] = p[i];
}
#endif
}
#ifdef CONFIG_64BIT
void set_firmware_width_unlocked(void)
{
int ret;
ret = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES,
__pa(pdc_result), 0);
if (ret < 0)
return;
convert_to_wide(pdc_result);
if (pdc_result[0] != NARROW_FIRMWARE)
parisc_narrow_firmware = 0;
}
/**
* set_firmware_width - Determine if the firmware is wide or narrow.
*
* This function must be called before any pdc_* function that uses the
* convert_to_wide function.
*/
void set_firmware_width(void)
{
unsigned long flags;
/* already initialized? */
if (parisc_narrow_firmware != 2)
return;
spin_lock_irqsave(&pdc_lock, flags);
set_firmware_width_unlocked();
spin_unlock_irqrestore(&pdc_lock, flags);
}
#else
void set_firmware_width_unlocked(void)
{
return;
}
void set_firmware_width(void)
{
return;
}
#endif /*CONFIG_64BIT*/
#if !defined(BOOTLOADER)
/**
* pdc_emergency_unlock - Unlock the linux pdc lock
*
* This call unlocks the linux pdc lock in case we need some PDC functions
* (like pdc_add_valid) during kernel stack dump.
*/
void pdc_emergency_unlock(void)
{
/* Spinlock DEBUG code freaks out if we unconditionally unlock */
if (spin_is_locked(&pdc_lock))
spin_unlock(&pdc_lock);
}
/**
* pdc_add_valid - Verify address can be accessed without causing a HPMC.
* @address: Address to be verified.
*
* This PDC call attempts to read from the specified address and verifies
* if the address is valid.
*
* The return value is PDC_OK (0) in case accessing this address is valid.
*/
int pdc_add_valid(unsigned long address)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_ADD_VALID, PDC_ADD_VALID_VERIFY, address);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
EXPORT_SYMBOL(pdc_add_valid);
/**
* pdc_instr - Get instruction that invokes PDCE_CHECK in HPMC handler.
* @instr: Pointer to variable which will get instruction opcode.
*
* The return value is PDC_OK (0) in case call succeeded.
*/
int __init pdc_instr(unsigned int *instr)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_INSTR, 0UL, __pa(pdc_result));
convert_to_wide(pdc_result);
*instr = pdc_result[0];
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_chassis_info - Return chassis information.
* @chassis_info: The memory buffer address.
* @led_info: The size of the memory buffer address.
* @len: The size of the memory buffer address.
*
* An HVERSION dependent call for returning the chassis information.
*/
int __init pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
memcpy(&pdc_result, chassis_info, sizeof(*chassis_info));
memcpy(&pdc_result2, led_info, len);
retval = mem_pdc_call(PDC_CHASSIS, PDC_RETURN_CHASSIS_INFO,
__pa(pdc_result), __pa(pdc_result2), len);
memcpy(chassis_info, pdc_result, sizeof(*chassis_info));
memcpy(led_info, pdc_result2, len);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_chassis_send_log - Sends a PDC PAT CHASSIS log message.
* @state: state of the machine
* @data: value for that state
*
* Must be correctly formatted or expect system crash
*/
#ifdef CONFIG_64BIT
int pdc_pat_chassis_send_log(unsigned long state, unsigned long data)
{
int retval = 0;
unsigned long flags;
if (!is_pdc_pat())
return -1;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_CHASSIS_LOG, PDC_PAT_CHASSIS_WRITE_LOG, __pa(&state), __pa(&data));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
#endif
/**
* pdc_chassis_disp - Updates chassis code
* @disp: value to show on display
*/
int pdc_chassis_disp(unsigned long disp)
{
int retval = 0;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_CHASSIS, PDC_CHASSIS_DISP, disp);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* __pdc_cpu_rendezvous - Stop currently executing CPU and do not return.
*/
int __pdc_cpu_rendezvous(void)
{
if (is_pdc_pat())
return mem_pdc_call(PDC_PAT_CPU, PDC_PAT_CPU_RENDEZVOUS);
else
return mem_pdc_call(PDC_PROC, 1, 0);
}
/**
* pdc_cpu_rendezvous_lock - Lock PDC while transitioning to rendezvous state
*/
void pdc_cpu_rendezvous_lock(void) __acquires(&pdc_lock)
{
spin_lock(&pdc_lock);
}
/**
* pdc_cpu_rendezvous_unlock - Unlock PDC after reaching rendezvous state
*/
void pdc_cpu_rendezvous_unlock(void) __releases(&pdc_lock)
{
spin_unlock(&pdc_lock);
}
/**
* pdc_pat_get_PDC_entrypoint - Get PDC entry point for current CPU
* @pdc_entry: pointer to where the PDC entry point should be stored
*/
int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry)
{
int retval = 0;
unsigned long flags;
if (!IS_ENABLED(CONFIG_SMP) || !is_pdc_pat()) {
*pdc_entry = MEM_PDC;
return 0;
}
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_CPU, PDC_PAT_CPU_GET_PDC_ENTRYPOINT,
__pa(pdc_result));
*pdc_entry = pdc_result[0];
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_chassis_warn - Fetches chassis warnings
* @warn: The warning value to be shown
*/
int pdc_chassis_warn(unsigned long *warn)
{
int retval = 0;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_CHASSIS, PDC_CHASSIS_WARN, __pa(pdc_result));
*warn = pdc_result[0];
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info)
{
int ret;
ret = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result));
convert_to_wide(pdc_result);
pdc_coproc_info->ccr_functional = pdc_result[0];
pdc_coproc_info->ccr_present = pdc_result[1];
pdc_coproc_info->revision = pdc_result[17];
pdc_coproc_info->model = pdc_result[18];
return ret;
}
/**
* pdc_coproc_cfg - To identify coprocessors attached to the processor.
* @pdc_coproc_info: Return buffer address.
*
* This PDC call returns the presence and status of all the coprocessors
* attached to the processor.
*/
int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
ret = pdc_coproc_cfg_unlocked(pdc_coproc_info);
spin_unlock_irqrestore(&pdc_lock, flags);
return ret;
}
/**
* pdc_iodc_read - Read data from the modules IODC.
* @actcnt: The actual number of bytes.
* @hpa: The HPA of the module for the iodc read.
* @index: The iodc entry point.
* @iodc_data: A buffer memory for the iodc options.
* @iodc_data_size: Size of the memory buffer.
*
* This PDC call reads from the IODC of the module specified by the hpa
* argument.
*/
int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index,
void *iodc_data, unsigned int iodc_data_size)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_IODC, PDC_IODC_READ, __pa(pdc_result), hpa,
index, __pa(pdc_result2), iodc_data_size);
convert_to_wide(pdc_result);
*actcnt = pdc_result[0];
memcpy(iodc_data, pdc_result2, iodc_data_size);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
EXPORT_SYMBOL(pdc_iodc_read);
/**
* pdc_system_map_find_mods - Locate unarchitected modules.
* @pdc_mod_info: Return buffer address.
* @mod_path: pointer to dev path structure.
* @mod_index: fixed address module index.
*
* To locate and identify modules which reside at fixed I/O addresses, which
* do not self-identify via architected bus walks.
*/
int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info,
struct pdc_module_path *mod_path, long mod_index)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_MODULE, __pa(pdc_result),
__pa(pdc_result2), mod_index);
convert_to_wide(pdc_result);
memcpy(pdc_mod_info, pdc_result, sizeof(*pdc_mod_info));
memcpy(mod_path, pdc_result2, sizeof(*mod_path));
spin_unlock_irqrestore(&pdc_lock, flags);
pdc_mod_info->mod_addr = f_extend(pdc_mod_info->mod_addr);
return retval;
}
/**
* pdc_system_map_find_addrs - Retrieve additional address ranges.
* @pdc_addr_info: Return buffer address.
* @mod_index: Fixed address module index.
* @addr_index: Address range index.
*
* Retrieve additional information about subsequent address ranges for modules
* with multiple address ranges.
*/
int pdc_system_map_find_addrs(struct pdc_system_map_addr_info *pdc_addr_info,
long mod_index, long addr_index)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_ADDRESS, __pa(pdc_result),
mod_index, addr_index);
convert_to_wide(pdc_result);
memcpy(pdc_addr_info, pdc_result, sizeof(*pdc_addr_info));
spin_unlock_irqrestore(&pdc_lock, flags);
pdc_addr_info->mod_addr = f_extend(pdc_addr_info->mod_addr);
return retval;
}
/**
* pdc_model_info - Return model information about the processor.
* @model: The return buffer.
*
* Returns the version numbers, identifiers, and capabilities from the processor module.
*/
int pdc_model_info(struct pdc_model *model)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_INFO, __pa(pdc_result), 0);
convert_to_wide(pdc_result);
memcpy(model, pdc_result, sizeof(*model));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_model_sysmodel - Get the system model name.
* @os_id: The operating system ID asked for (an OS_ID_* value)
* @name: A char array of at least 81 characters.
*
* Get system model name from PDC ROM (e.g. 9000/715 or 9000/778/B160L).
* Using OS_ID_HPUX will return the equivalent of the 'modelname' command
* on HP/UX.
*/
int pdc_model_sysmodel(unsigned int os_id, char *name)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_SYSMODEL, __pa(pdc_result),
os_id, __pa(name));
convert_to_wide(pdc_result);
if (retval == PDC_OK) {
name[pdc_result[0]] = '\0'; /* add trailing '\0' */
} else {
name[0] = 0;
}
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_model_versions - Identify the version number of each processor.
* @versions: The return buffer.
* @id: The id of the processor to check.
*
* Returns the version number for each processor component.
*
* This comment was here before, but I do not know what it means :( -RB
* id: 0 = cpu revision, 1 = boot-rom-version
*/
int pdc_model_versions(unsigned long *versions, int id)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_VERSIONS, __pa(pdc_result), id);
convert_to_wide(pdc_result);
*versions = pdc_result[0];
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_model_cpuid - Returns the CPU_ID.
* @cpu_id: The return buffer.
*
* Returns the CPU_ID value which uniquely identifies the cpu portion of
* the processor module.
*/
int pdc_model_cpuid(unsigned long *cpu_id)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
pdc_result[0] = 0; /* preset zero (call may not be implemented!) */
retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CPU_ID, __pa(pdc_result), 0);
convert_to_wide(pdc_result);
*cpu_id = pdc_result[0];
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_model_capabilities - Returns the platform capabilities.
* @capabilities: The return buffer.
*
* Returns information about platform support for 32- and/or 64-bit
* OSes, IO-PDIR coherency, and virtual aliasing.
*/
int pdc_model_capabilities(unsigned long *capabilities)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
pdc_result[0] = 0; /* preset zero (call may not be implemented!) */
retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0);
convert_to_wide(pdc_result);
if (retval == PDC_OK) {
*capabilities = pdc_result[0];
} else {
*capabilities = PDC_MODEL_OS32;
}
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_model_platform_info - Returns machine product and serial number.
* @orig_prod_num: Return buffer for original product number.
* @current_prod_num: Return buffer for current product number.
* @serial_no: Return buffer for serial number.
*
* Returns strings containing the original and current product numbers and the
* serial number of the system.
*/
int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num,
char *serial_no)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_GET_PLATFORM_INFO,
__pa(orig_prod_num), __pa(current_prod_num), __pa(serial_no));
convert_to_wide(pdc_result);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_cache_info - Return cache and TLB information.
* @cache_info: The return buffer.
*
* Returns information about the processor's cache and TLB.
*/
int pdc_cache_info(struct pdc_cache_info *cache_info)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_CACHE, PDC_CACHE_INFO, __pa(pdc_result), 0);
convert_to_wide(pdc_result);
memcpy(cache_info, pdc_result, sizeof(*cache_info));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_spaceid_bits - Return whether Space ID hashing is turned on.
* @space_bits: Should be 0, if not, bad mojo!
*
* Returns information about Space ID hashing.
*/
int pdc_spaceid_bits(unsigned long *space_bits)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
pdc_result[0] = 0;
retval = mem_pdc_call(PDC_CACHE, PDC_CACHE_RET_SPID, __pa(pdc_result), 0);
convert_to_wide(pdc_result);
*space_bits = pdc_result[0];
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_btlb_info - Return block TLB information.
* @btlb: The return buffer.
*
* Returns information about the hardware Block TLB.
*/
int pdc_btlb_info(struct pdc_btlb_info *btlb)
{
int retval;
unsigned long flags;
if (IS_ENABLED(CONFIG_PA20))
return PDC_BAD_PROC;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
memcpy(btlb, pdc_result, sizeof(*btlb));
spin_unlock_irqrestore(&pdc_lock, flags);
if(retval < 0) {
btlb->max_size = 0;
}
return retval;
}
int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
unsigned long entry_info, unsigned long slot)
{
int retval;
unsigned long flags;
if (IS_ENABLED(CONFIG_PA20))
return PDC_BAD_PROC;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INSERT, (unsigned long) (vpage >> 32),
(unsigned long) vpage, physpage, len, entry_info, slot);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
int pdc_btlb_purge_all(void)
{
int retval;
unsigned long flags;
if (IS_ENABLED(CONFIG_PA20))
return PDC_BAD_PROC;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_mem_map_hpa - Find fixed module information.
* @address: The return buffer
* @mod_path: pointer to dev path structure.
*
* This call was developed for S700 workstations to allow the kernel to find
* the I/O devices (Core I/O). In the future (Kittyhawk and beyond) this
* call will be replaced (on workstations) by the architected PDC_SYSTEM_MAP
* call.
*
* This call is supported by all existing S700 workstations (up to Gecko).
*/
int pdc_mem_map_hpa(struct pdc_memory_map *address,
struct pdc_module_path *mod_path)
{
int retval;
unsigned long flags;
if (IS_ENABLED(CONFIG_PA20))
return PDC_BAD_PROC;
spin_lock_irqsave(&pdc_lock, flags);
memcpy(pdc_result2, mod_path, sizeof(*mod_path));
retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result),
__pa(pdc_result2));
memcpy(address, pdc_result, sizeof(*address));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_lan_station_id - Get the LAN address.
* @lan_addr: The return buffer.
* @hpa: The network device HPA.
*
* Get the LAN station address when it is not directly available from the LAN hardware.
*/
int pdc_lan_station_id(char *lan_addr, unsigned long hpa)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_LAN_STATION_ID, PDC_LAN_STATION_ID_READ,
__pa(pdc_result), hpa);
if (retval < 0) {
/* FIXME: else read MAC from NVRAM */
memset(lan_addr, 0, PDC_LAN_STATION_ID_SIZE);
} else {
memcpy(lan_addr, pdc_result, PDC_LAN_STATION_ID_SIZE);
}
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
EXPORT_SYMBOL(pdc_lan_station_id);
/**
* pdc_stable_read - Read data from Stable Storage.
* @staddr: Stable Storage address to access.
* @memaddr: The memory address where Stable Storage data shall be copied.
* @count: number of bytes to transfer. count is multiple of 4.
*
* This PDC call reads from the Stable Storage address supplied in staddr
* and copies count bytes to the memory address memaddr.
* The call will fail if staddr+count > PDC_STABLE size.
*/
int pdc_stable_read(unsigned long staddr, void *memaddr, unsigned long count)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_READ, staddr,
__pa(pdc_result), count);
convert_to_wide(pdc_result);
memcpy(memaddr, pdc_result, count);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
EXPORT_SYMBOL(pdc_stable_read);
/**
* pdc_stable_write - Write data to Stable Storage.
* @staddr: Stable Storage address to access.
* @memaddr: The memory address where Stable Storage data shall be read from.
* @count: number of bytes to transfer. count is multiple of 4.
*
* This PDC call reads count bytes from the supplied memaddr address,
* and copies count bytes to the Stable Storage address staddr.
* The call will fail if staddr+count > PDC_STABLE size.
*/
int pdc_stable_write(unsigned long staddr, void *memaddr, unsigned long count)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
memcpy(pdc_result, memaddr, count);
convert_to_wide(pdc_result);
retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_WRITE, staddr,
__pa(pdc_result), count);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
EXPORT_SYMBOL(pdc_stable_write);
/**
* pdc_stable_get_size - Get Stable Storage size in bytes.
* @size: pointer where the size will be stored.
*
* This PDC call returns the number of bytes in the processor's Stable
* Storage, which is the number of contiguous bytes implemented in Stable
* Storage starting from staddr=0. size in an unsigned 64-bit integer
* which is a multiple of four.
*/
int pdc_stable_get_size(unsigned long *size)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_RETURN_SIZE, __pa(pdc_result));
*size = pdc_result[0];
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
EXPORT_SYMBOL(pdc_stable_get_size);
/**
* pdc_stable_verify_contents - Checks that Stable Storage contents are valid.
*
* This PDC call is meant to be used to check the integrity of the current
* contents of Stable Storage.
*/
int pdc_stable_verify_contents(void)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_VERIFY_CONTENTS);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
EXPORT_SYMBOL(pdc_stable_verify_contents);
/**
* pdc_stable_initialize - Sets Stable Storage contents to zero and initialize
* the validity indicator.
*
* This PDC call will erase all contents of Stable Storage. Use with care!
*/
int pdc_stable_initialize(void)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_INITIALIZE);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
EXPORT_SYMBOL(pdc_stable_initialize);
/**
* pdc_get_initiator - Get the SCSI Interface Card params (SCSI ID, SDTR, SE or LVD)
* @hwpath: fully bc.mod style path to the device.
* @initiator: the array to return the result into
*
* Get the SCSI operational parameters from PDC.
* Needed since HPUX never used BIOS or symbios card NVRAM.
* Most ncr/sym cards won't have an entry and just use whatever
* capabilities of the card are (eg Ultra, LVD). But there are
* several cases where it's useful:
* o set SCSI id for Multi-initiator clusters,
* o cable too long (ie SE scsi 10Mhz won't support 6m length),
* o bus width exported is less than what the interface chip supports.
*/
int pdc_get_initiator(struct hardware_path *hwpath, struct pdc_initiator *initiator)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
/* BCJ-XXXX series boxes. E.G. "9000/785/C3000" */
#define IS_SPROCKETS() (strlen(boot_cpu_data.pdc.sys_model_name) == 14 && \
strncmp(boot_cpu_data.pdc.sys_model_name, "9000/785", 8) == 0)
retval = mem_pdc_call(PDC_INITIATOR, PDC_GET_INITIATOR,
__pa(pdc_result), __pa(hwpath));
if (retval < PDC_OK)
goto out;
if (pdc_result[0] < 16) {
initiator->host_id = pdc_result[0];
} else {
initiator->host_id = -1;
}
/*
* Sprockets and Piranha return 20 or 40 (MT/s). Prelude returns
* 1, 2, 5 or 10 for 5, 10, 20 or 40 MT/s, respectively
*/
switch (pdc_result[1]) {
case 1: initiator->factor = 50; break;
case 2: initiator->factor = 25; break;
case 5: initiator->factor = 12; break;
case 25: initiator->factor = 10; break;
case 20: initiator->factor = 12; break;
case 40: initiator->factor = 10; break;
default: initiator->factor = -1; break;
}
if (IS_SPROCKETS()) {
initiator->width = pdc_result[4];
initiator->mode = pdc_result[5];
} else {
initiator->width = -1;
initiator->mode = -1;
}
out:
spin_unlock_irqrestore(&pdc_lock, flags);
return (retval >= PDC_OK);
}
EXPORT_SYMBOL(pdc_get_initiator);
/**
* pdc_pci_irt_size - Get the number of entries in the interrupt routing table.
* @num_entries: The return value.
* @hpa: The HPA for the device.
*
* This PDC function returns the number of entries in the specified cell's
* interrupt table.
* Similar to PDC_PAT stuff - but added for Forte/Allegro boxes
*/
int pdc_pci_irt_size(unsigned long *num_entries, unsigned long hpa)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_GET_INT_TBL_SIZE,
__pa(pdc_result), hpa);
convert_to_wide(pdc_result);
*num_entries = pdc_result[0];
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pci_irt - Get the PCI interrupt routing table.
* @num_entries: The number of entries in the table.
* @hpa: The Hard Physical Address of the device.
* @tbl:
*
* Get the PCI interrupt routing table for the device at the given HPA.
* Similar to PDC_PAT stuff - but added for Forte/Allegro boxes
*/
int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl)
{
int retval;
unsigned long flags;
BUG_ON((unsigned long)tbl & 0x7);
spin_lock_irqsave(&pdc_lock, flags);
pdc_result[0] = num_entries;
retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_GET_INT_TBL,
__pa(pdc_result), hpa, __pa(tbl));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
#if 0 /* UNTEST CODE - left here in case someone needs it */
/**
* pdc_pci_config_read - read PCI config space.
* @hpa: Token from PDC to indicate which PCI device
* @cfg_addr: Configuration space address to read from
*
* Read PCI Configuration space *before* linux PCI subsystem is running.
*/
unsigned int pdc_pci_config_read(void *hpa, unsigned long cfg_addr)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
pdc_result[0] = 0;
pdc_result[1] = 0;
retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_READ_CONFIG,
__pa(pdc_result), hpa, cfg_addr&~3UL, 4UL);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval ? ~0 : (unsigned int) pdc_result[0];
}
/**
* pdc_pci_config_write - read PCI config space.
* @hpa: Token from PDC to indicate which PCI device
* @cfg_addr: Configuration space address to write
* @val: Value we want in the 32-bit register
*
* Write PCI Configuration space *before* linux PCI subsystem is running.
*/
void pdc_pci_config_write(void *hpa, unsigned long cfg_addr, unsigned int val)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
pdc_result[0] = 0;
retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_WRITE_CONFIG,
__pa(pdc_result), hpa,
cfg_addr&~3UL, 4UL, (unsigned long) val);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
#endif /* UNTESTED CODE */
/**
* pdc_tod_read - Read the Time-Of-Day clock.
* @tod: The return buffer:
*
* Read the Time-Of-Day clock
*/
int pdc_tod_read(struct pdc_tod *tod)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_TOD, PDC_TOD_READ, __pa(pdc_result), 0);
convert_to_wide(pdc_result);
memcpy(tod, pdc_result, sizeof(*tod));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
EXPORT_SYMBOL(pdc_tod_read);
int pdc_mem_pdt_info(struct pdc_mem_retinfo *rinfo)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_MEM, PDC_MEM_MEMINFO, __pa(pdc_result), 0);
convert_to_wide(pdc_result);
memcpy(rinfo, pdc_result, sizeof(*rinfo));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
int pdc_mem_pdt_read_entries(struct pdc_mem_read_pdt *pret,
unsigned long *pdt_entries_ptr)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_MEM, PDC_MEM_READ_PDT, __pa(pdc_result),
__pa(pdt_entries_ptr));
if (retval == PDC_OK) {
convert_to_wide(pdc_result);
memcpy(pret, pdc_result, sizeof(*pret));
}
spin_unlock_irqrestore(&pdc_lock, flags);
#ifdef CONFIG_64BIT
/*
* 64-bit kernels should not call this PDT function in narrow mode.
* The pdt_entries_ptr array above will now contain 32-bit values
*/
if (WARN_ON_ONCE((retval == PDC_OK) && parisc_narrow_firmware))
return PDC_ERROR;
#endif
return retval;
}
/**
* pdc_pim_toc11 - Fetch TOC PIM 1.1 data from firmware.
* @ret: pointer to return buffer
*/
int pdc_pim_toc11(struct pdc_toc_pim_11 *ret)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PIM, PDC_PIM_TOC, __pa(pdc_result),
__pa(ret), sizeof(*ret));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pim_toc20 - Fetch TOC PIM 2.0 data from firmware.
* @ret: pointer to return buffer
*/
int pdc_pim_toc20(struct pdc_toc_pim_20 *ret)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PIM, PDC_PIM_TOC, __pa(pdc_result),
__pa(ret), sizeof(*ret));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_tod_set - Set the Time-Of-Day clock.
* @sec: The number of seconds since epoch.
* @usec: The number of micro seconds.
*
* Set the Time-Of-Day clock.
*/
int pdc_tod_set(unsigned long sec, unsigned long usec)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_TOD, PDC_TOD_WRITE, sec, usec);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
EXPORT_SYMBOL(pdc_tod_set);
#ifdef CONFIG_64BIT
int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr,
struct pdc_memory_table *tbl, unsigned long entries)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_MEM, PDC_MEM_TABLE, __pa(pdc_result), __pa(pdc_result2), entries);
convert_to_wide(pdc_result);
memcpy(r_addr, pdc_result, sizeof(*r_addr));
memcpy(tbl, pdc_result2, entries * sizeof(*tbl));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
#endif /* CONFIG_64BIT */
/* FIXME: Is this pdc used? I could not find type reference to ftc_bitmap
* so I guessed at unsigned long. Someone who knows what this does, can fix
* it later. :)
*/
int pdc_do_firm_test_reset(unsigned long ftc_bitmap)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_BROADCAST_RESET, PDC_DO_FIRM_TEST_RESET,
PDC_FIRM_TEST_MAGIC, ftc_bitmap);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/*
* pdc_do_reset - Reset the system.
*
* Reset the system.
*/
int pdc_do_reset(void)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_BROADCAST_RESET, PDC_DO_RESET);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/*
* pdc_soft_power_info - Enable soft power switch.
* @power_reg: address of soft power register
*
* Return the absolute address of the soft power switch register
*/
int __init pdc_soft_power_info(unsigned long *power_reg)
{
int retval;
unsigned long flags;
*power_reg = (unsigned long) (-1);
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_INFO, __pa(pdc_result), 0);
if (retval == PDC_OK) {
convert_to_wide(pdc_result);
*power_reg = f_extend(pdc_result[0]);
}
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/*
* pdc_soft_power_button{_panic} - Control the soft power button behaviour
* @sw_control: 0 for hardware control, 1 for software control
*
*
* This PDC function places the soft power button under software or
* hardware control.
* Under software control the OS may control to when to allow to shut
* down the system. Under hardware control pressing the power button
* powers off the system immediately.
*
* The _panic version relies on spin_trylock to prevent deadlock
* on panic path.
*/
int pdc_soft_power_button(int sw_control)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_ENABLE, __pa(pdc_result), sw_control);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
int pdc_soft_power_button_panic(int sw_control)
{
int retval;
unsigned long flags;
if (!spin_trylock_irqsave(&pdc_lock, flags)) {
pr_emerg("Couldn't enable soft power button\n");
return -EBUSY; /* ignored by the panic notifier */
}
retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_ENABLE, __pa(pdc_result), sw_control);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/*
* pdc_io_reset - Hack to avoid overlapping range registers of Bridges devices.
* Primarily a problem on T600 (which parisc-linux doesn't support) but
* who knows what other platform firmware might do with this OS "hook".
*/
void pdc_io_reset(void)
{
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
mem_pdc_call(PDC_IO, PDC_IO_RESET, 0);
spin_unlock_irqrestore(&pdc_lock, flags);
}
/*
* pdc_io_reset_devices - Hack to Stop USB controller
*
* If PDC used the usb controller, the usb controller
* is still running and will crash the machines during iommu
* setup, because of still running DMA. This PDC call
* stops the USB controller.
* Normally called after calling pdc_io_reset().
*/
void pdc_io_reset_devices(void)
{
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
mem_pdc_call(PDC_IO, PDC_IO_RESET_DEVICES, 0);
spin_unlock_irqrestore(&pdc_lock, flags);
}
#endif /* defined(BOOTLOADER) */
/* locked by pdc_lock */
static char iodc_dbuf[4096] __page_aligned_bss;
/**
* pdc_iodc_print - Console print using IODC.
* @str: the string to output.
* @count: length of str
*
* Note that only these special chars are architected for console IODC io:
* BEL, BS, CR, and LF. Others are passed through.
* Since the HP console requires CR+LF to perform a 'newline', we translate
* "\n" to "\r\n".
*/
int pdc_iodc_print(const unsigned char *str, unsigned count)
{
unsigned int i, found = 0;
unsigned long flags;
count = min_t(unsigned int, count, sizeof(iodc_dbuf));
spin_lock_irqsave(&pdc_lock, flags);
for (i = 0; i < count;) {
switch(str[i]) {
case '\n':
iodc_dbuf[i+0] = '\r';
iodc_dbuf[i+1] = '\n';
i += 2;
found = 1;
goto print;
default:
iodc_dbuf[i] = str[i];
i++;
break;
}
}
print:
real32_call(PAGE0->mem_cons.iodc_io,
(unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT,
PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers),
__pa(pdc_result), 0, __pa(iodc_dbuf), i, 0);
spin_unlock_irqrestore(&pdc_lock, flags);
return i - found;
}
#if !defined(BOOTLOADER)
/**
* pdc_iodc_getc - Read a character (non-blocking) from the PDC console.
*
* Read a character (non-blocking) from the PDC console, returns -1 if
* key is not present.
*/
int pdc_iodc_getc(void)
{
int ch;
int status;
unsigned long flags;
/* Bail if no console input device. */
if (!PAGE0->mem_kbd.iodc_io)
return 0;
/* wait for a keyboard (rs232)-input */
spin_lock_irqsave(&pdc_lock, flags);
real32_call(PAGE0->mem_kbd.iodc_io,
(unsigned long)PAGE0->mem_kbd.hpa, ENTRY_IO_CIN,
PAGE0->mem_kbd.spa, __pa(PAGE0->mem_kbd.dp.layers),
__pa(pdc_result), 0, __pa(iodc_dbuf), 1, 0);
ch = *iodc_dbuf;
/* like convert_to_wide() but for first return value only: */
status = *(int *)&pdc_result;
spin_unlock_irqrestore(&pdc_lock, flags);
if (status == 0)
return -1;
return ch;
}
int pdc_sti_call(unsigned long func, unsigned long flags,
unsigned long inptr, unsigned long outputr,
unsigned long glob_cfg, int do_call64)
{
int retval = 0;
unsigned long irqflags;
spin_lock_irqsave(&pdc_lock, irqflags);
if (IS_ENABLED(CONFIG_64BIT) && do_call64) {
#ifdef CONFIG_64BIT
retval = real64_call(func, flags, inptr, outputr, glob_cfg);
#else
WARN_ON(1);
#endif
} else {
retval = real32_call(func, flags, inptr, outputr, glob_cfg);
}
spin_unlock_irqrestore(&pdc_lock, irqflags);
return retval;
}
EXPORT_SYMBOL(pdc_sti_call);
#ifdef CONFIG_64BIT
/**
* pdc_pat_cell_get_number - Returns the cell number.
* @cell_info: The return buffer.
*
* This PDC call returns the cell number of the cell from which the call
* is made.
*/
int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_GET_NUMBER, __pa(pdc_result));
memcpy(cell_info, pdc_result, sizeof(*cell_info));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_cell_module - Retrieve the cell's module information.
* @actcnt: The number of bytes written to mem_addr.
* @ploc: The physical location.
* @mod: The module index.
* @view_type: The view of the address type.
* @mem_addr: The return buffer.
*
* This PDC call returns information about each module attached to the cell
* at the specified location.
*/
int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod,
unsigned long view_type, void *mem_addr)
{
int retval;
unsigned long flags;
static struct pdc_pat_cell_mod_maddr_block result __attribute__ ((aligned (8)));
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_MODULE, __pa(pdc_result),
ploc, mod, view_type, __pa(&result));
if(!retval) {
*actcnt = pdc_result[0];
memcpy(mem_addr, &result, *actcnt);
}
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_cell_info - Retrieve the cell's information.
* @info: The pointer to a struct pdc_pat_cell_info_rtn_block.
* @actcnt: The number of bytes which should be written to info.
* @offset: offset of the structure.
* @cell_number: The cell number which should be asked, or -1 for current cell.
*
* This PDC call returns information about the given cell (or all cells).
*/
int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info,
unsigned long *actcnt, unsigned long offset,
unsigned long cell_number)
{
int retval;
unsigned long flags;
struct pdc_pat_cell_info_rtn_block result;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_GET_INFO,
__pa(pdc_result), __pa(&result), *actcnt,
offset, cell_number);
if (!retval) {
*actcnt = pdc_result[0];
memcpy(info, &result, *actcnt);
}
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_cpu_get_number - Retrieve the cpu number.
* @cpu_info: The return buffer.
* @hpa: The Hard Physical Address of the CPU.
*
* Retrieve the cpu number for the cpu at the specified HPA.
*/
int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, unsigned long hpa)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_CPU, PDC_PAT_CPU_GET_NUMBER,
__pa(&pdc_result), hpa);
memcpy(cpu_info, pdc_result, sizeof(*cpu_info));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_get_irt_size - Retrieve the number of entries in the cell's interrupt table.
* @num_entries: The return value.
* @cell_num: The target cell.
*
* This PDC function returns the number of entries in the specified cell's
* interrupt table.
*/
int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_GET_PCI_ROUTING_TABLE_SIZE,
__pa(pdc_result), cell_num);
*num_entries = pdc_result[0];
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_get_irt - Retrieve the cell's interrupt table.
* @r_addr: The return buffer.
* @cell_num: The target cell.
*
* This PDC function returns the actual interrupt table for the specified cell.
*/
int pdc_pat_get_irt(void *r_addr, unsigned long cell_num)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_GET_PCI_ROUTING_TABLE,
__pa(r_addr), cell_num);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_pd_get_addr_map - Retrieve information about memory address ranges.
* @actual_len: The return buffer.
* @mem_addr: Pointer to the memory buffer.
* @count: The number of bytes to read from the buffer.
* @offset: The offset with respect to the beginning of the buffer.
*
*/
int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
unsigned long count, unsigned long offset)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_PD, PDC_PAT_PD_GET_ADDR_MAP, __pa(pdc_result),
__pa(pdc_result2), count, offset);
*actual_len = pdc_result[0];
memcpy(mem_addr, pdc_result2, *actual_len);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_pd_get_pdc_revisions - Retrieve PDC interface revisions.
* @legacy_rev: The legacy revision.
* @pat_rev: The PAT revision.
* @pdc_cap: The PDC capabilities.
*
*/
int pdc_pat_pd_get_pdc_revisions(unsigned long *legacy_rev,
unsigned long *pat_rev, unsigned long *pdc_cap)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_PD, PDC_PAT_PD_GET_PDC_INTERF_REV,
__pa(pdc_result));
if (retval == PDC_OK) {
*legacy_rev = pdc_result[0];
*pat_rev = pdc_result[1];
*pdc_cap = pdc_result[2];
}
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_io_pci_cfg_read - Read PCI configuration space.
* @pci_addr: PCI configuration space address for which the read request is being made.
* @pci_size: Size of read in bytes. Valid values are 1, 2, and 4.
* @mem_addr: Pointer to return memory buffer.
*
*/
int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *mem_addr)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_READ,
__pa(pdc_result), pci_addr, pci_size);
switch(pci_size) {
case 1: *(u8 *) mem_addr = (u8) pdc_result[0]; break;
case 2: *(u16 *)mem_addr = (u16) pdc_result[0]; break;
case 4: *(u32 *)mem_addr = (u32) pdc_result[0]; break;
}
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_io_pci_cfg_write - Retrieve information about memory address ranges.
* @pci_addr: PCI configuration space address for which the write request is being made.
* @pci_size: Size of write in bytes. Valid values are 1, 2, and 4.
* @val: Pointer to 1, 2, or 4 byte value in low order end of argument to be
* written to PCI Config space.
*
*/
int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_WRITE,
pci_addr, pci_size, val);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_mem_pdt_info - Retrieve information about page deallocation table
* @rinfo: memory pdt information
*
*/
int pdc_pat_mem_pdt_info(struct pdc_pat_mem_retinfo *rinfo)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_INFO,
__pa(&pdc_result));
if (retval == PDC_OK)
memcpy(rinfo, &pdc_result, sizeof(*rinfo));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_mem_pdt_cell_info - Retrieve information about page deallocation
* table of a cell
* @rinfo: memory pdt information
* @cell: cell number
*
*/
int pdc_pat_mem_pdt_cell_info(struct pdc_pat_mem_cell_pdt_retinfo *rinfo,
unsigned long cell)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_CELL_INFO,
__pa(&pdc_result), cell);
if (retval == PDC_OK)
memcpy(rinfo, &pdc_result, sizeof(*rinfo));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_mem_read_cell_pdt - Read PDT entries from (old) PAT firmware
* @pret: array of PDT entries
* @pdt_entries_ptr: ptr to hold number of PDT entries
* @max_entries: maximum number of entries to be read
*
*/
int pdc_pat_mem_read_cell_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
unsigned long *pdt_entries_ptr, unsigned long max_entries)
{
int retval;
unsigned long flags, entries;
spin_lock_irqsave(&pdc_lock, flags);
/* PDC_PAT_MEM_CELL_READ is available on early PAT machines only */
retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_CELL_READ,
__pa(&pdc_result), parisc_cell_num,
__pa(pdt_entries_ptr));
if (retval == PDC_OK) {
/* build up return value as for PDC_PAT_MEM_PD_READ */
entries = min(pdc_result[0], max_entries);
pret->pdt_entries = entries;
pret->actual_count_bytes = entries * sizeof(unsigned long);
}
spin_unlock_irqrestore(&pdc_lock, flags);
WARN_ON(retval == PDC_OK && pdc_result[0] > max_entries);
return retval;
}
/**
* pdc_pat_mem_read_pd_pdt - Read PDT entries from (newer) PAT firmware
* @pret: array of PDT entries
* @pdt_entries_ptr: ptr to hold number of PDT entries
* @count: number of bytes to read
* @offset: offset to start (in bytes)
*
*/
int pdc_pat_mem_read_pd_pdt(struct pdc_pat_mem_read_pd_retinfo *pret,
unsigned long *pdt_entries_ptr, unsigned long count,
unsigned long offset)
{
int retval;
unsigned long flags, entries;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_PD_READ,
__pa(&pdc_result), __pa(pdt_entries_ptr),
count, offset);
if (retval == PDC_OK) {
entries = min(pdc_result[0], count);
pret->actual_count_bytes = entries;
pret->pdt_entries = entries / sizeof(unsigned long);
}
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
* pdc_pat_mem_get_dimm_phys_location - Get physical DIMM slot via PAT firmware
* @pret: ptr to hold returned information
* @phys_addr: physical address to examine
*
*/
int pdc_pat_mem_get_dimm_phys_location(
struct pdc_pat_mem_phys_mem_location *pret,
unsigned long phys_addr)
{
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_PAT_MEM, PDC_PAT_MEM_ADDRESS,
__pa(&pdc_result), phys_addr);
if (retval == PDC_OK)
memcpy(pret, &pdc_result, sizeof(*pret));
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
#endif /* CONFIG_64BIT */
#endif /* defined(BOOTLOADER) */
/***************** 32-bit real-mode calls ***********/
/* The struct below is used
* to overlay real_stack (real2.S), preparing a 32-bit call frame.
* real32_call_asm() then uses this stack in narrow real mode
*/
struct narrow_stack {
/* use int, not long which is 64 bits */
unsigned int arg13;
unsigned int arg12;
unsigned int arg11;
unsigned int arg10;
unsigned int arg9;
unsigned int arg8;
unsigned int arg7;
unsigned int arg6;
unsigned int arg5;
unsigned int arg4;
unsigned int arg3;
unsigned int arg2;
unsigned int arg1;
unsigned int arg0;
unsigned int frame_marker[8];
unsigned int sp;
/* in reality, there's nearly 8k of stack after this */
};
long real32_call(unsigned long fn, ...)
{
va_list args;
extern struct narrow_stack real_stack;
extern unsigned long real32_call_asm(unsigned int *,
unsigned int *,
unsigned int);
va_start(args, fn);
real_stack.arg0 = va_arg(args, unsigned int);
real_stack.arg1 = va_arg(args, unsigned int);
real_stack.arg2 = va_arg(args, unsigned int);
real_stack.arg3 = va_arg(args, unsigned int);
real_stack.arg4 = va_arg(args, unsigned int);
real_stack.arg5 = va_arg(args, unsigned int);
real_stack.arg6 = va_arg(args, unsigned int);
real_stack.arg7 = va_arg(args, unsigned int);
real_stack.arg8 = va_arg(args, unsigned int);
real_stack.arg9 = va_arg(args, unsigned int);
real_stack.arg10 = va_arg(args, unsigned int);
real_stack.arg11 = va_arg(args, unsigned int);
real_stack.arg12 = va_arg(args, unsigned int);
real_stack.arg13 = va_arg(args, unsigned int);
va_end(args);
return real32_call_asm(&real_stack.sp, &real_stack.arg0, fn);
}
#ifdef CONFIG_64BIT
/***************** 64-bit real-mode calls ***********/
struct wide_stack {
unsigned long arg0;
unsigned long arg1;
unsigned long arg2;
unsigned long arg3;
unsigned long arg4;
unsigned long arg5;
unsigned long arg6;
unsigned long arg7;
unsigned long arg8;
unsigned long arg9;
unsigned long arg10;
unsigned long arg11;
unsigned long arg12;
unsigned long arg13;
unsigned long frame_marker[2]; /* rp, previous sp */
unsigned long sp;
/* in reality, there's nearly 8k of stack after this */
};
long real64_call(unsigned long fn, ...)
{
va_list args;
extern struct wide_stack real64_stack;
extern unsigned long real64_call_asm(unsigned long *,
unsigned long *,
unsigned long);
va_start(args, fn);
real64_stack.arg0 = va_arg(args, unsigned long);
real64_stack.arg1 = va_arg(args, unsigned long);
real64_stack.arg2 = va_arg(args, unsigned long);
real64_stack.arg3 = va_arg(args, unsigned long);
real64_stack.arg4 = va_arg(args, unsigned long);
real64_stack.arg5 = va_arg(args, unsigned long);
real64_stack.arg6 = va_arg(args, unsigned long);
real64_stack.arg7 = va_arg(args, unsigned long);
real64_stack.arg8 = va_arg(args, unsigned long);
real64_stack.arg9 = va_arg(args, unsigned long);
real64_stack.arg10 = va_arg(args, unsigned long);
real64_stack.arg11 = va_arg(args, unsigned long);
real64_stack.arg12 = va_arg(args, unsigned long);
real64_stack.arg13 = va_arg(args, unsigned long);
va_end(args);
return real64_call_asm(&real64_stack.sp, &real64_stack.arg0, fn);
}
#endif /* CONFIG_64BIT */
| linux-master | arch/parisc/kernel/firmware.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Kernel support for the ptrace() and syscall tracing interfaces.
*
* Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
* Copyright (C) 2000 Matthew Wilcox <[email protected]>
* Copyright (C) 2000 David Huggins-Daines <[email protected]>
* Copyright (C) 2008-2016 Helge Deller <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/personality.h>
#include <linux/regset.h>
#include <linux/security.h>
#include <linux/seccomp.h>
#include <linux/compat.h>
#include <linux/signal.h>
#include <linux/audit.h>
#include <linux/uaccess.h>
#include <asm/processor.h>
#include <asm/asm-offsets.h>
/* PSW bits we allow the debugger to modify */
#define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB)
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/*
* These are our native regset flavors.
*/
enum parisc_regset {
REGSET_GENERAL,
REGSET_FP
};
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
/* make sure the trap bits are not set */
pa_psw(task)->r = 0;
pa_psw(task)->t = 0;
pa_psw(task)->h = 0;
pa_psw(task)->l = 0;
}
/*
* The following functions are called by ptrace_resume() when
* enabling or disabling single/block tracing.
*/
void user_disable_single_step(struct task_struct *task)
{
ptrace_disable(task);
}
void user_enable_single_step(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
set_tsk_thread_flag(task, TIF_SINGLESTEP);
if (pa_psw(task)->n) {
/* Nullified, just crank over the queue. */
task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1];
task_regs(task)->iasq[0] = task_regs(task)->iasq[1];
task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4;
pa_psw(task)->n = 0;
pa_psw(task)->x = 0;
pa_psw(task)->y = 0;
pa_psw(task)->z = 0;
pa_psw(task)->b = 0;
ptrace_disable(task);
/* Don't wake up the task, but let the
parent know something happened. */
force_sig_fault_to_task(SIGTRAP, TRAP_TRACE,
(void __user *) (task_regs(task)->iaoq[0] & ~3),
task);
/* notify_parent(task, SIGCHLD); */
return;
}
/* Enable recovery counter traps. The recovery counter
* itself will be set to zero on a task switch. If the
* task is suspended on a syscall then the syscall return
* path will overwrite the recovery counter with a suitable
* value such that it traps once back in user space. We
* disable interrupts in the tasks PSW here also, to avoid
* interrupts while the recovery counter is decrementing.
*/
pa_psw(task)->r = 1;
pa_psw(task)->t = 0;
pa_psw(task)->h = 0;
pa_psw(task)->l = 0;
}
void user_enable_block_step(struct task_struct *task)
{
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
set_tsk_thread_flag(task, TIF_BLOCKSTEP);
/* Enable taken branch trap. */
pa_psw(task)->r = 0;
pa_psw(task)->t = 1;
pa_psw(task)->h = 0;
pa_psw(task)->l = 0;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long __user *datap = (unsigned long __user *)data;
unsigned long tmp;
long ret = -EIO;
unsigned long user_regs_struct_size = sizeof(struct user_regs_struct);
#ifdef CONFIG_64BIT
if (is_compat_task())
user_regs_struct_size /= 2;
#endif
switch (request) {
/* Read the word at location addr in the USER area. For ptraced
processes, the kernel saves all regs on a syscall. */
case PTRACE_PEEKUSR:
if ((addr & (sizeof(unsigned long)-1)) ||
addr >= sizeof(struct pt_regs))
break;
tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
ret = put_user(tmp, datap);
break;
/* Write the word at location addr in the USER area. This will need
to change when the kernel no longer saves all regs on a syscall.
FIXME. There is a problem at the moment in that r3-r18 are only
saved if the process is ptraced on syscall entry, and even then
those values are overwritten by actual register values on syscall
exit. */
case PTRACE_POKEUSR:
/* Some register values written here may be ignored in
* entry.S:syscall_restore_rfi; e.g. iaoq is written with
* r31/r31+4, and not with the values in pt_regs.
*/
if (addr == PT_PSW) {
/* Allow writing to Nullify, Divide-step-correction,
* and carry/borrow bits.
* BEWARE, if you set N, and then single step, it won't
* stop on the nullified instruction.
*/
data &= USER_PSW_BITS;
task_regs(child)->gr[0] &= ~USER_PSW_BITS;
task_regs(child)->gr[0] |= data;
ret = 0;
break;
}
if ((addr & (sizeof(unsigned long)-1)) ||
addr >= sizeof(struct pt_regs))
break;
if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
data |= PRIV_USER; /* ensure userspace privilege */
}
if ((addr >= PT_GR1 && addr <= PT_GR31) ||
addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
(addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
addr == PT_SAR) {
*(unsigned long *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
break;
case PTRACE_GETREGS: /* Get all gp regs from the child. */
return copy_regset_to_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
0, user_regs_struct_size,
datap);
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child,
task_user_regset_view(current),
REGSET_GENERAL,
0, user_regs_struct_size,
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
return copy_regset_to_user(child,
task_user_regset_view(current),
REGSET_FP,
0, sizeof(struct user_fp_struct),
datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */
return copy_regset_from_user(child,
task_user_regset_view(current),
REGSET_FP,
0, sizeof(struct user_fp_struct),
datap);
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
#ifdef CONFIG_COMPAT
/* This function is needed to translate 32 bit pt_regs offsets in to
* 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel
* will request offset 12 if it wants gr3, but the lower 32 bits of
* the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4).
* This code relies on a 32 bit pt_regs being comprised of 32 bit values
* except for the fp registers which (a) are 64 bits, and (b) follow
* the gr registers at the start of pt_regs. The 32 bit pt_regs should
* be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[]
* being 64 bit in both cases.
*/
static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
{
compat_ulong_t pos;
if (offset < 32*4) /* gr[0..31] */
pos = offset * 2 + 4;
else if (offset < 32*4+32*8) /* fr[0] ... fr[31] */
pos = (offset - 32*4) + PT_FR0;
else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */
pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4;
else
pos = sizeof(struct pt_regs);
return pos;
}
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t addr, compat_ulong_t data)
{
compat_uint_t tmp;
long ret = -EIO;
switch (request) {
case PTRACE_PEEKUSR:
if (addr & (sizeof(compat_uint_t)-1))
break;
addr = translate_usr_offset(addr);
if (addr >= sizeof(struct pt_regs))
break;
tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
ret = put_user(tmp, (compat_uint_t *) (unsigned long) data);
break;
/* Write the word at location addr in the USER area. This will need
to change when the kernel no longer saves all regs on a syscall.
FIXME. There is a problem at the moment in that r3-r18 are only
saved if the process is ptraced on syscall entry, and even then
those values are overwritten by actual register values on syscall
exit. */
case PTRACE_POKEUSR:
/* Some register values written here may be ignored in
* entry.S:syscall_restore_rfi; e.g. iaoq is written with
* r31/r31+4, and not with the values in pt_regs.
*/
if (addr == PT_PSW) {
/* Since PT_PSW==0, it is valid for 32 bit processes
* under 64 bit kernels as well.
*/
ret = arch_ptrace(child, request, addr, data);
} else {
if (addr & (sizeof(compat_uint_t)-1))
break;
addr = translate_usr_offset(addr);
if (addr >= sizeof(struct pt_regs))
break;
if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
data |= PRIV_USER; /* ensure userspace privilege */
}
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
/* Special case, fp regs are 64 bits anyway */
*(__u32 *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 ||
addr == PT_SAR+4) {
/* Zero the top 32 bits */
*(__u32 *) ((char *) task_regs(child) + addr - 4) = 0;
*(__u32 *) ((char *) task_regs(child) + addr) = data;
ret = 0;
}
}
break;
case PTRACE_GETREGS:
case PTRACE_SETREGS:
case PTRACE_GETFPREGS:
case PTRACE_SETFPREGS:
return arch_ptrace(child, request, addr, data);
default:
ret = compat_ptrace_request(child, request, addr, data);
break;
}
return ret;
}
#endif
long do_syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE)) {
int rc = ptrace_report_syscall_entry(regs);
/*
* As tracesys_next does not set %r28 to -ENOSYS
* when %r20 is set to -1, initialize it here.
*/
regs->gr[28] = -ENOSYS;
if (rc) {
/*
* A nonzero return code from
* ptrace_report_syscall_entry() tells us
* to prevent the syscall execution. Skip
* the syscall call and the syscall restart handling.
*
* Note that the tracer may also just change
* regs->gr[20] to an invalid syscall number,
* that is handled by tracesys_next.
*/
regs->gr[20] = -1UL;
return -1;
}
}
/* Do the secure computing check after ptrace. */
if (secure_computing() == -1)
return -1;
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->gr[20]);
#endif
#ifdef CONFIG_64BIT
if (!is_compat_task())
audit_syscall_entry(regs->gr[20], regs->gr[26], regs->gr[25],
regs->gr[24], regs->gr[23]);
else
#endif
audit_syscall_entry(regs->gr[20] & 0xffffffff,
regs->gr[26] & 0xffffffff,
regs->gr[25] & 0xffffffff,
regs->gr[24] & 0xffffffff,
regs->gr[23] & 0xffffffff);
/*
* Sign extend the syscall number to 64bit since it may have been
* modified by a compat ptrace call
*/
return (int) ((u32) regs->gr[20]);
}
void do_syscall_trace_exit(struct pt_regs *regs)
{
int stepping = test_thread_flag(TIF_SINGLESTEP) ||
test_thread_flag(TIF_BLOCKSTEP);
audit_syscall_exit(regs);
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->gr[20]);
#endif
if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
ptrace_report_syscall_exit(regs, stepping);
}
/*
* regset functions.
*/
static int fpr_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct pt_regs *regs = task_regs(target);
return membuf_write(&to, regs->fr, ELF_NFPREG * sizeof(__u64));
}
static int fpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_regs(target);
const __u64 *k = kbuf;
const __u64 __user *u = ubuf;
__u64 reg;
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < ELF_NFPREG; --count)
regs->fr[pos++] = *k++;
else
for (; count > 0 && pos < ELF_NFPREG; --count) {
if (__get_user(reg, u++))
return -EFAULT;
regs->fr[pos++] = reg;
}
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
ELF_NFPREG * sizeof(reg), -1);
return 0;
}
#define RI(reg) (offsetof(struct user_regs_struct,reg) / sizeof(long))
static unsigned long get_reg(struct pt_regs *regs, int num)
{
switch (num) {
case RI(gr[0]) ... RI(gr[31]): return regs->gr[num - RI(gr[0])];
case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])];
case RI(iasq[0]): return regs->iasq[0];
case RI(iasq[1]): return regs->iasq[1];
case RI(iaoq[0]): return regs->iaoq[0];
case RI(iaoq[1]): return regs->iaoq[1];
case RI(sar): return regs->sar;
case RI(iir): return regs->iir;
case RI(isr): return regs->isr;
case RI(ior): return regs->ior;
case RI(ipsw): return regs->ipsw;
case RI(cr27): return regs->cr27;
case RI(cr0): return mfctl(0);
case RI(cr24): return mfctl(24);
case RI(cr25): return mfctl(25);
case RI(cr26): return mfctl(26);
case RI(cr28): return mfctl(28);
case RI(cr29): return mfctl(29);
case RI(cr30): return mfctl(30);
case RI(cr31): return mfctl(31);
case RI(cr8): return mfctl(8);
case RI(cr9): return mfctl(9);
case RI(cr12): return mfctl(12);
case RI(cr13): return mfctl(13);
case RI(cr10): return mfctl(10);
case RI(cr15): return mfctl(15);
default: return 0;
}
}
static void set_reg(struct pt_regs *regs, int num, unsigned long val)
{
switch (num) {
case RI(gr[0]): /*
* PSW is in gr[0].
* Allow writing to Nullify, Divide-step-correction,
* and carry/borrow bits.
* BEWARE, if you set N, and then single step, it won't
* stop on the nullified instruction.
*/
val &= USER_PSW_BITS;
regs->gr[0] &= ~USER_PSW_BITS;
regs->gr[0] |= val;
return;
case RI(gr[1]) ... RI(gr[31]):
regs->gr[num - RI(gr[0])] = val;
return;
case RI(iaoq[0]):
case RI(iaoq[1]):
/* set 2 lowest bits to ensure userspace privilege: */
regs->iaoq[num - RI(iaoq[0])] = val | PRIV_USER;
return;
case RI(sar): regs->sar = val;
return;
default: return;
#if 0
/* do not allow to change any of the following registers (yet) */
case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])];
case RI(iasq[0]): return regs->iasq[0];
case RI(iasq[1]): return regs->iasq[1];
case RI(iir): return regs->iir;
case RI(isr): return regs->isr;
case RI(ior): return regs->ior;
case RI(ipsw): return regs->ipsw;
case RI(cr27): return regs->cr27;
case cr0, cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31;
case cr8, cr9, cr12, cr13, cr10, cr15;
#endif
}
}
static int gpr_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct pt_regs *regs = task_regs(target);
unsigned int pos;
for (pos = 0; pos < ELF_NGREG; pos++)
membuf_store(&to, get_reg(regs, pos));
return 0;
}
static int gpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_regs(target);
const unsigned long *k = kbuf;
const unsigned long __user *u = ubuf;
unsigned long reg;
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < ELF_NGREG; --count)
set_reg(regs, pos++, *k++);
else
for (; count > 0 && pos < ELF_NGREG; --count) {
if (__get_user(reg, u++))
return -EFAULT;
set_reg(regs, pos++, reg);
}
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
ELF_NGREG * sizeof(reg), -1);
return 0;
}
static const struct user_regset native_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.regset_get = gpr_get, .set = gpr_set
},
[REGSET_FP] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(__u64), .align = sizeof(__u64),
.regset_get = fpr_get, .set = fpr_set
}
};
static const struct user_regset_view user_parisc_native_view = {
.name = "parisc", .e_machine = ELF_ARCH, .ei_osabi = ELFOSABI_LINUX,
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
};
#ifdef CONFIG_64BIT
static int gpr32_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct pt_regs *regs = task_regs(target);
unsigned int pos;
for (pos = 0; pos < ELF_NGREG; pos++)
membuf_store(&to, (compat_ulong_t)get_reg(regs, pos));
return 0;
}
static int gpr32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_regs(target);
const compat_ulong_t *k = kbuf;
const compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < ELF_NGREG; --count)
set_reg(regs, pos++, *k++);
else
for (; count > 0 && pos < ELF_NGREG; --count) {
if (__get_user(reg, u++))
return -EFAULT;
set_reg(regs, pos++, reg);
}
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
ELF_NGREG * sizeof(reg), -1);
return 0;
}
/*
* These are the regset flavors matching the 32bit native set.
*/
static const struct user_regset compat_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
.regset_get = gpr32_get, .set = gpr32_set
},
[REGSET_FP] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(__u64), .align = sizeof(__u64),
.regset_get = fpr_get, .set = fpr_set
}
};
static const struct user_regset_view user_parisc_compat_view = {
.name = "parisc", .e_machine = EM_PARISC, .ei_osabi = ELFOSABI_LINUX,
.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
};
#endif /* CONFIG_64BIT */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
BUILD_BUG_ON(sizeof(struct user_regs_struct)/sizeof(long) != ELF_NGREG);
BUILD_BUG_ON(sizeof(struct user_fp_struct)/sizeof(__u64) != ELF_NFPREG);
#ifdef CONFIG_64BIT
if (is_compat_task())
return &user_parisc_compat_view;
#endif
return &user_parisc_native_view;
}
/* HAVE_REGS_AND_STACK_ACCESS_API feature */
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define REG_OFFSET_INDEX(r,i) {.name = #r#i, .offset = offsetof(struct pt_regs, r[i])}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_INDEX(gr,0),
REG_OFFSET_INDEX(gr,1),
REG_OFFSET_INDEX(gr,2),
REG_OFFSET_INDEX(gr,3),
REG_OFFSET_INDEX(gr,4),
REG_OFFSET_INDEX(gr,5),
REG_OFFSET_INDEX(gr,6),
REG_OFFSET_INDEX(gr,7),
REG_OFFSET_INDEX(gr,8),
REG_OFFSET_INDEX(gr,9),
REG_OFFSET_INDEX(gr,10),
REG_OFFSET_INDEX(gr,11),
REG_OFFSET_INDEX(gr,12),
REG_OFFSET_INDEX(gr,13),
REG_OFFSET_INDEX(gr,14),
REG_OFFSET_INDEX(gr,15),
REG_OFFSET_INDEX(gr,16),
REG_OFFSET_INDEX(gr,17),
REG_OFFSET_INDEX(gr,18),
REG_OFFSET_INDEX(gr,19),
REG_OFFSET_INDEX(gr,20),
REG_OFFSET_INDEX(gr,21),
REG_OFFSET_INDEX(gr,22),
REG_OFFSET_INDEX(gr,23),
REG_OFFSET_INDEX(gr,24),
REG_OFFSET_INDEX(gr,25),
REG_OFFSET_INDEX(gr,26),
REG_OFFSET_INDEX(gr,27),
REG_OFFSET_INDEX(gr,28),
REG_OFFSET_INDEX(gr,29),
REG_OFFSET_INDEX(gr,30),
REG_OFFSET_INDEX(gr,31),
REG_OFFSET_INDEX(sr,0),
REG_OFFSET_INDEX(sr,1),
REG_OFFSET_INDEX(sr,2),
REG_OFFSET_INDEX(sr,3),
REG_OFFSET_INDEX(sr,4),
REG_OFFSET_INDEX(sr,5),
REG_OFFSET_INDEX(sr,6),
REG_OFFSET_INDEX(sr,7),
REG_OFFSET_INDEX(iasq,0),
REG_OFFSET_INDEX(iasq,1),
REG_OFFSET_INDEX(iaoq,0),
REG_OFFSET_INDEX(iaoq,1),
REG_OFFSET_NAME(cr27),
REG_OFFSET_NAME(ksp),
REG_OFFSET_NAME(kpc),
REG_OFFSET_NAME(sar),
REG_OFFSET_NAME(iir),
REG_OFFSET_NAME(isr),
REG_OFFSET_NAME(ior),
REG_OFFSET_NAME(ipsw),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr -= n;
if (!regs_within_kernel_stack(regs, (unsigned long)addr))
return 0;
return *addr;
}
| linux-master | arch/parisc/kernel/ptrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Page Deallocation Table (PDT) support
*
* The Page Deallocation Table (PDT) is maintained by firmware and holds a
* list of memory addresses in which memory errors were detected.
* The list contains both single-bit (correctable) and double-bit
* (uncorrectable) errors.
*
* Copyright 2017 by Helge Deller <[email protected]>
*
* possible future enhancements:
* - add userspace interface via procfs or sysfs to clear PDT
*/
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/kthread.h>
#include <linux/proc_fs.h>
#include <linux/initrd.h>
#include <linux/pgtable.h>
#include <linux/mm.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
enum pdt_access_type {
PDT_NONE,
PDT_PDC,
PDT_PAT_NEW,
PDT_PAT_CELL
};
static enum pdt_access_type pdt_type;
/* PDT poll interval: 1 minute if errors, 5 minutes if everything OK. */
#define PDT_POLL_INTERVAL_DEFAULT (5*60*HZ)
#define PDT_POLL_INTERVAL_SHORT (1*60*HZ)
static unsigned long pdt_poll_interval = PDT_POLL_INTERVAL_DEFAULT;
/* global PDT status information */
static struct pdc_mem_retinfo pdt_status;
#define MAX_PDT_TABLE_SIZE PAGE_SIZE
#define MAX_PDT_ENTRIES (MAX_PDT_TABLE_SIZE / sizeof(unsigned long))
static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss;
/*
* Constants for the pdt_entry format:
* A pdt_entry holds the physical address in bits 0-57, bits 58-61 are
* reserved, bit 62 is the perm bit and bit 63 is the error_type bit.
* The perm bit indicates whether the error have been verified as a permanent
* error (value of 1) or has not been verified, and may be transient (value
* of 0). The error_type bit indicates whether the error is a single bit error
* (value of 1) or a multiple bit error.
* On non-PAT machines phys_addr is encoded in bits 0-59 and error_type in bit
* 63. Those machines don't provide the perm bit.
*/
#define PDT_ADDR_PHYS_MASK (pdt_type != PDT_PDC ? ~0x3f : ~0x0f)
#define PDT_ADDR_PERM_ERR (pdt_type != PDT_PDC ? 2UL : 0UL)
#define PDT_ADDR_SINGLE_ERR 1UL
/* report PDT entries via /proc/meminfo */
void arch_report_meminfo(struct seq_file *m)
{
if (pdt_type == PDT_NONE)
return;
seq_printf(m, "PDT_max_entries: %7lu\n",
pdt_status.pdt_size);
seq_printf(m, "PDT_cur_entries: %7lu\n",
pdt_status.pdt_entries);
}
static int get_info_pat_new(void)
{
struct pdc_pat_mem_retinfo pat_rinfo;
int ret;
/* newer PAT machines like C8000 report info for all cells */
if (is_pdc_pat())
ret = pdc_pat_mem_pdt_info(&pat_rinfo);
else
return PDC_BAD_PROC;
pdt_status.pdt_size = pat_rinfo.max_pdt_entries;
pdt_status.pdt_entries = pat_rinfo.current_pdt_entries;
pdt_status.pdt_status = 0;
pdt_status.first_dbe_loc = pat_rinfo.first_dbe_loc;
pdt_status.good_mem = pat_rinfo.good_mem;
return ret;
}
static int get_info_pat_cell(void)
{
struct pdc_pat_mem_cell_pdt_retinfo cell_rinfo;
int ret;
/* older PAT machines like rp5470 report cell info only */
if (is_pdc_pat())
ret = pdc_pat_mem_pdt_cell_info(&cell_rinfo, parisc_cell_num);
else
return PDC_BAD_PROC;
pdt_status.pdt_size = cell_rinfo.max_pdt_entries;
pdt_status.pdt_entries = cell_rinfo.current_pdt_entries;
pdt_status.pdt_status = 0;
pdt_status.first_dbe_loc = cell_rinfo.first_dbe_loc;
pdt_status.good_mem = cell_rinfo.good_mem;
return ret;
}
static void report_mem_err(unsigned long pde)
{
struct pdc_pat_mem_phys_mem_location loc;
unsigned long addr;
char dimm_txt[32];
addr = pde & PDT_ADDR_PHYS_MASK;
/* show DIMM slot description on PAT machines */
if (is_pdc_pat()) {
pdc_pat_mem_get_dimm_phys_location(&loc, addr);
sprintf(dimm_txt, "DIMM slot %02x, ", loc.dimm_slot);
} else
dimm_txt[0] = 0;
pr_warn("PDT: BAD MEMORY at 0x%08lx, %s%s%s-bit error.\n",
addr, dimm_txt,
pde & PDT_ADDR_PERM_ERR ? "permanent ":"",
pde & PDT_ADDR_SINGLE_ERR ? "single":"multi");
}
/*
* pdc_pdt_init()
*
* Initialize kernel PDT structures, read initial PDT table from firmware,
* report all current PDT entries and mark bad memory with memblock_reserve()
* to avoid that the kernel will use broken memory areas.
*
*/
void __init pdc_pdt_init(void)
{
int ret, i;
unsigned long entries;
struct pdc_mem_read_pdt pdt_read_ret;
pdt_type = PDT_PAT_NEW;
ret = get_info_pat_new();
if (ret != PDC_OK) {
pdt_type = PDT_PAT_CELL;
ret = get_info_pat_cell();
}
if (ret != PDC_OK) {
pdt_type = PDT_PDC;
/* non-PAT machines provide the standard PDC call */
ret = pdc_mem_pdt_info(&pdt_status);
}
if (ret != PDC_OK) {
pdt_type = PDT_NONE;
pr_info("PDT: Firmware does not provide any page deallocation"
" information.\n");
return;
}
entries = pdt_status.pdt_entries;
if (WARN_ON(entries > MAX_PDT_ENTRIES))
entries = pdt_status.pdt_entries = MAX_PDT_ENTRIES;
pr_info("PDT: type %s, size %lu, entries %lu, status %lu, dbe_loc 0x%lx,"
" good_mem %lu MB\n",
pdt_type == PDT_PDC ? __stringify(PDT_PDC) :
pdt_type == PDT_PAT_CELL ? __stringify(PDT_PAT_CELL)
: __stringify(PDT_PAT_NEW),
pdt_status.pdt_size, pdt_status.pdt_entries,
pdt_status.pdt_status, pdt_status.first_dbe_loc,
pdt_status.good_mem / 1024 / 1024);
if (entries == 0) {
pr_info("PDT: Firmware reports all memory OK.\n");
return;
}
if (pdt_status.first_dbe_loc &&
pdt_status.first_dbe_loc <= __pa((unsigned long)&_end))
pr_crit("CRITICAL: Bad memory inside kernel image memory area!\n");
pr_warn("PDT: Firmware reports %lu entries of faulty memory:\n",
entries);
if (pdt_type == PDT_PDC)
ret = pdc_mem_pdt_read_entries(&pdt_read_ret, pdt_entry);
else {
#ifdef CONFIG_64BIT
struct pdc_pat_mem_read_pd_retinfo pat_pret;
if (pdt_type == PDT_PAT_CELL)
ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry,
MAX_PDT_ENTRIES);
else
ret = pdc_pat_mem_read_pd_pdt(&pat_pret, pdt_entry,
MAX_PDT_TABLE_SIZE, 0);
#else
ret = PDC_BAD_PROC;
#endif
}
if (ret != PDC_OK) {
pdt_type = PDT_NONE;
pr_warn("PDT: Get PDT entries failed with %d\n", ret);
return;
}
for (i = 0; i < pdt_status.pdt_entries; i++) {
unsigned long addr;
report_mem_err(pdt_entry[i]);
addr = pdt_entry[i] & PDT_ADDR_PHYS_MASK;
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
addr >= initrd_start && addr < initrd_end)
pr_crit("CRITICAL: initrd possibly broken "
"due to bad memory!\n");
/* mark memory page bad */
memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
num_poisoned_pages_inc(addr >> PAGE_SHIFT);
}
}
/*
* This is the PDT kernel thread main loop.
*/
static int pdt_mainloop(void *unused)
{
struct pdc_mem_read_pdt pdt_read_ret;
struct pdc_pat_mem_read_pd_retinfo pat_pret __maybe_unused;
unsigned long old_num_entries;
unsigned long *bad_mem_ptr;
int num, ret;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
old_num_entries = pdt_status.pdt_entries;
schedule_timeout(pdt_poll_interval);
if (kthread_should_stop())
break;
/* Do we have new PDT entries? */
switch (pdt_type) {
case PDT_PAT_NEW:
ret = get_info_pat_new();
break;
case PDT_PAT_CELL:
ret = get_info_pat_cell();
break;
default:
ret = pdc_mem_pdt_info(&pdt_status);
break;
}
if (ret != PDC_OK) {
pr_warn("PDT: unexpected failure %d\n", ret);
return -EINVAL;
}
/* if no new PDT entries, just wait again */
num = pdt_status.pdt_entries - old_num_entries;
if (num <= 0)
continue;
/* decrease poll interval in case we found memory errors */
if (pdt_status.pdt_entries &&
pdt_poll_interval == PDT_POLL_INTERVAL_DEFAULT)
pdt_poll_interval = PDT_POLL_INTERVAL_SHORT;
/* limit entries to get */
if (num > MAX_PDT_ENTRIES) {
num = MAX_PDT_ENTRIES;
pdt_status.pdt_entries = old_num_entries + num;
}
/* get new entries */
switch (pdt_type) {
#ifdef CONFIG_64BIT
case PDT_PAT_CELL:
if (pdt_status.pdt_entries > MAX_PDT_ENTRIES) {
pr_crit("PDT: too many entries.\n");
return -ENOMEM;
}
ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry,
MAX_PDT_ENTRIES);
bad_mem_ptr = &pdt_entry[old_num_entries];
break;
case PDT_PAT_NEW:
ret = pdc_pat_mem_read_pd_pdt(&pat_pret,
pdt_entry,
num * sizeof(unsigned long),
old_num_entries * sizeof(unsigned long));
bad_mem_ptr = &pdt_entry[0];
break;
#endif
default:
ret = pdc_mem_pdt_read_entries(&pdt_read_ret,
pdt_entry);
bad_mem_ptr = &pdt_entry[old_num_entries];
break;
}
/* report and mark memory broken */
while (num--) {
unsigned long pde = *bad_mem_ptr++;
report_mem_err(pde);
#ifdef CONFIG_MEMORY_FAILURE
if ((pde & PDT_ADDR_PERM_ERR) ||
((pde & PDT_ADDR_SINGLE_ERR) == 0))
memory_failure(pde >> PAGE_SHIFT, 0);
else
soft_offline_page(pde >> PAGE_SHIFT, 0);
#else
pr_crit("PDT: memory error at 0x%lx ignored.\n"
"Rebuild kernel with CONFIG_MEMORY_FAILURE=y "
"for real handling.\n",
pde & PDT_ADDR_PHYS_MASK);
#endif
}
}
return 0;
}
static int __init pdt_initcall(void)
{
struct task_struct *kpdtd_task;
if (pdt_type == PDT_NONE)
return -ENODEV;
kpdtd_task = kthread_run(pdt_mainloop, NULL, "kpdtd");
return PTR_ERR_OR_ZERO(kpdtd_task);
}
late_initcall(pdt_initcall);
| linux-master | arch/parisc/kernel/pdt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/parisc/traps.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1999, 2000 Philipp Rumpf <[email protected]>
*/
/*
* 'Traps.c' handles hardware traps and faults after we have saved some
* state in 'asm.s'.
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/console.h>
#include <linux/bug.h>
#include <linux/ratelimit.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/kfence.h>
#include <asm/assembly.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/unaligned.h>
#include <linux/atomic.h>
#include <asm/smp.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/unwind.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <linux/kgdb.h>
#include <linux/kprobes.h>
#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
#include <asm/spinlock.h>
#endif
#include "../math-emu/math-emu.h" /* for handle_fpe() */
static void parisc_show_stack(struct task_struct *task,
struct pt_regs *regs, const char *loglvl);
static int printbinary(char *buf, unsigned long x, int nbits)
{
unsigned long mask = 1UL << (nbits - 1);
while (mask != 0) {
*buf++ = (mask & x ? '1' : '0');
mask >>= 1;
}
*buf = '\0';
return nbits;
}
#ifdef CONFIG_64BIT
#define RFMT "%016lx"
#else
#define RFMT "%08lx"
#endif
#define FFMT "%016llx" /* fpregs are 64-bit always */
#define PRINTREGS(lvl,r,f,fmt,x) \
printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
(r)[(x)+2], (r)[(x)+3])
static void print_gr(const char *level, struct pt_regs *regs)
{
int i;
char buf[64];
printk("%s\n", level);
printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
printbinary(buf, regs->gr[0], 32);
printk("%sPSW: %s %s\n", level, buf, print_tainted());
for (i = 0; i < 32; i += 4)
PRINTREGS(level, regs->gr, "r", RFMT, i);
}
static void print_fr(const char *level, struct pt_regs *regs)
{
int i;
char buf[64];
struct { u32 sw[2]; } s;
/* FR are 64bit everywhere. Need to use asm to get the content
* of fpsr/fper1, and we assume that we won't have a FP Identify
* in our way, otherwise we're screwed.
* The fldd is used to restore the T-bit if there was one, as the
* store clears it anyway.
* PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
asm volatile ("fstd %%fr0,0(%1) \n\t"
"fldd 0(%1),%%fr0 \n\t"
: "=m" (s) : "r" (&s) : "r0");
printk("%s\n", level);
printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
printbinary(buf, s.sw[0], 32);
printk("%sFPSR: %s\n", level, buf);
printk("%sFPER1: %08x\n", level, s.sw[1]);
/* here we'll print fr0 again, tho it'll be meaningless */
for (i = 0; i < 32; i += 4)
PRINTREGS(level, regs->fr, "fr", FFMT, i);
}
void show_regs(struct pt_regs *regs)
{
int i, user;
const char *level;
unsigned long cr30, cr31;
user = user_mode(regs);
level = user ? KERN_DEBUG : KERN_CRIT;
show_regs_print_info(level);
print_gr(level, regs);
for (i = 0; i < 8; i += 4)
PRINTREGS(level, regs->sr, "sr", RFMT, i);
if (user)
print_fr(level, regs);
cr30 = mfctl(30);
cr31 = mfctl(31);
printk("%s\n", level);
printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
level, regs->iir, regs->isr, regs->ior);
printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
level, task_cpu(current), cr30, cr31);
printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
if (user) {
printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
} else {
printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
parisc_show_stack(current, regs, KERN_DEFAULT);
}
}
static DEFINE_RATELIMIT_STATE(_hppa_rs,
DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
#define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
printk(fmt, ##__VA_ARGS__); \
show_regs(regs); \
} \
}
static void do_show_stack(struct unwind_frame_info *info, const char *loglvl)
{
int i = 1;
printk("%sBacktrace:\n", loglvl);
while (i <= MAX_UNWIND_ENTRIES) {
if (unwind_once(info) < 0 || info->ip == 0)
break;
if (__kernel_text_address(info->ip)) {
printk("%s [<" RFMT ">] %pS\n",
loglvl, info->ip, (void *) info->ip);
i++;
}
}
printk("%s\n", loglvl);
}
static void parisc_show_stack(struct task_struct *task,
struct pt_regs *regs, const char *loglvl)
{
struct unwind_frame_info info;
unwind_frame_init_task(&info, task, regs);
do_show_stack(&info, loglvl);
}
void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl)
{
parisc_show_stack(t, NULL, loglvl);
}
int is_valid_bugaddr(unsigned long iaoq)
{
return 1;
}
void die_if_kernel(char *str, struct pt_regs *regs, long err)
{
if (user_mode(regs)) {
if (err == 0)
return; /* STFU */
parisc_printk_ratelimited(1, regs,
KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
return;
}
bust_spinlocks(1);
oops_enter();
/* Amuse the user in a SPARC fashion */
if (err) printk(KERN_CRIT
" _______________________________ \n"
" < Your System ate a SPARC! Gah! >\n"
" ------------------------------- \n"
" \\ ^__^\n"
" (__)\\ )\\/\\\n"
" U ||----w |\n"
" || ||\n");
/* unlock the pdc lock if necessary */
pdc_emergency_unlock();
if (err)
printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
current->comm, task_pid_nr(current), str, err);
/* Wot's wrong wif bein' racy? */
if (current->thread.flags & PARISC_KERNEL_DEATH) {
printk(KERN_CRIT "%s() recursion detected.\n", __func__);
local_irq_enable();
while (1);
}
current->thread.flags |= PARISC_KERNEL_DEATH;
show_regs(regs);
dump_stack();
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
oops_exit();
make_task_dead(SIGSEGV);
}
/* gdb uses break 4,8 */
#define GDB_BREAK_INSN 0x10004
static void handle_gdb_break(struct pt_regs *regs, int wot)
{
force_sig_fault(SIGTRAP, wot,
(void __user *) (regs->iaoq[0] & ~3));
}
static void handle_break(struct pt_regs *regs)
{
unsigned iir = regs->iir;
if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
/* check if a BUG() or WARN() trapped here. */
enum bug_trap_type tt;
tt = report_bug(regs->iaoq[0] & ~3, regs);
if (tt == BUG_TRAP_TYPE_WARN) {
regs->iaoq[0] += 4;
regs->iaoq[1] += 4;
return; /* return to next instruction when WARN_ON(). */
}
die_if_kernel("Unknown kernel breakpoint", regs,
(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
}
#ifdef CONFIG_KPROBES
if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) {
parisc_kprobe_break_handler(regs);
return;
}
if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) {
parisc_kprobe_ss_handler(regs);
return;
}
#endif
#ifdef CONFIG_KGDB
if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
kgdb_handle_exception(9, SIGTRAP, 0, regs);
return;
}
#endif
#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) {
die_if_kernel("Spinlock was trashed", regs, 1);
}
#endif
if (unlikely(iir != GDB_BREAK_INSN))
parisc_printk_ratelimited(0, regs,
KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
iir & 31, (iir>>13) & ((1<<13)-1),
task_pid_nr(current), current->comm);
/* send standard GDB signal */
handle_gdb_break(regs, TRAP_BRKPT);
}
static void default_trap(int code, struct pt_regs *regs)
{
printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
show_regs(regs);
}
static void transfer_pim_to_trap_frame(struct pt_regs *regs)
{
register int i;
extern unsigned int hpmc_pim_data[];
struct pdc_hpmc_pim_11 *pim_narrow;
struct pdc_hpmc_pim_20 *pim_wide;
if (boot_cpu_data.cpu_type >= pcxu) {
pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
/*
* Note: The following code will probably generate a
* bunch of truncation error warnings from the compiler.
* Could be handled with an ifdef, but perhaps there
* is a better way.
*/
regs->gr[0] = pim_wide->cr[22];
for (i = 1; i < 32; i++)
regs->gr[i] = pim_wide->gr[i];
for (i = 0; i < 32; i++)
regs->fr[i] = pim_wide->fr[i];
for (i = 0; i < 8; i++)
regs->sr[i] = pim_wide->sr[i];
regs->iasq[0] = pim_wide->cr[17];
regs->iasq[1] = pim_wide->iasq_back;
regs->iaoq[0] = pim_wide->cr[18];
regs->iaoq[1] = pim_wide->iaoq_back;
regs->sar = pim_wide->cr[11];
regs->iir = pim_wide->cr[19];
regs->isr = pim_wide->cr[20];
regs->ior = pim_wide->cr[21];
}
else {
pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
regs->gr[0] = pim_narrow->cr[22];
for (i = 1; i < 32; i++)
regs->gr[i] = pim_narrow->gr[i];
for (i = 0; i < 32; i++)
regs->fr[i] = pim_narrow->fr[i];
for (i = 0; i < 8; i++)
regs->sr[i] = pim_narrow->sr[i];
regs->iasq[0] = pim_narrow->cr[17];
regs->iasq[1] = pim_narrow->iasq_back;
regs->iaoq[0] = pim_narrow->cr[18];
regs->iaoq[1] = pim_narrow->iaoq_back;
regs->sar = pim_narrow->cr[11];
regs->iir = pim_narrow->cr[19];
regs->isr = pim_narrow->cr[20];
regs->ior = pim_narrow->cr[21];
}
/*
* The following fields only have meaning if we came through
* another path. So just zero them here.
*/
regs->ksp = 0;
regs->kpc = 0;
regs->orig_r28 = 0;
}
/*
* This routine is called as a last resort when everything else
* has gone clearly wrong. We get called for faults in kernel space,
* and HPMC's.
*/
void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
{
static DEFINE_SPINLOCK(terminate_lock);
(void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP);
bust_spinlocks(1);
set_eiem(0);
local_irq_disable();
spin_lock(&terminate_lock);
/* unlock the pdc lock if necessary */
pdc_emergency_unlock();
/* Not all paths will gutter the processor... */
switch(code){
case 1:
transfer_pim_to_trap_frame(regs);
break;
default:
break;
}
{
/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
struct unwind_frame_info info;
unwind_frame_init(&info, current, regs);
do_show_stack(&info, KERN_CRIT);
}
printk("\n");
pr_crit("%s: Code=%d (%s) at addr " RFMT "\n",
msg, code, trap_name(code), offset);
show_regs(regs);
spin_unlock(&terminate_lock);
/* put soft power button back under hardware control;
* if the user had pressed it once at any time, the
* system will shut down immediately right here. */
pdc_soft_power_button(0);
/* Call kernel panic() so reboot timeouts work properly
* FIXME: This function should be on the list of
* panic notifiers, and we should call panic
* directly from the location that we wish.
* e.g. We should not call panic from
* parisc_terminate, but rather the other way around.
* This hack works, prints the panic message twice,
* and it enables reboot timers!
*/
panic(msg);
}
void notrace handle_interruption(int code, struct pt_regs *regs)
{
unsigned long fault_address = 0;
unsigned long fault_space = 0;
int si_code;
if (!irqs_disabled_flags(regs->gr[0]))
local_irq_enable();
/* Security check:
* If the priority level is still user, and the
* faulting space is not equal to the active space
* then the user is attempting something in a space
* that does not belong to them. Kill the process.
*
* This is normally the situation when the user
* attempts to jump into the kernel space at the
* wrong offset, be it at the gateway page or a
* random location.
*
* We cannot normally signal the process because it
* could *be* on the gateway page, and processes
* executing on the gateway page can't have signals
* delivered.
*
* We merely readjust the address into the users
* space, at a destination address of zero, and
* allow processing to continue.
*/
if (((unsigned long)regs->iaoq[0] & 3) &&
((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
/* Kill the user process later */
regs->iaoq[0] = 0 | 3;
regs->iaoq[1] = regs->iaoq[0] + 4;
regs->iasq[0] = regs->iasq[1] = regs->sr[7];
regs->gr[0] &= ~PSW_B;
return;
}
#if 0
printk(KERN_CRIT "Interruption # %d\n", code);
#endif
switch(code) {
case 1:
/* High-priority machine check (HPMC) */
/* set up a new led state on systems shipped with a LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
parisc_terminate("High Priority Machine Check (HPMC)",
regs, code, 0);
/* NOT REACHED */
case 2:
/* Power failure interrupt */
printk(KERN_CRIT "Power failure interrupt !\n");
return;
case 3:
/* Recovery counter trap */
regs->gr[0] &= ~PSW_R;
#ifdef CONFIG_KGDB
if (kgdb_single_step) {
kgdb_handle_exception(0, SIGTRAP, 0, regs);
return;
}
#endif
if (user_space(regs))
handle_gdb_break(regs, TRAP_TRACE);
/* else this must be the start of a syscall - just let it run */
return;
case 5:
/* Low-priority machine check */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
flush_cache_all();
flush_tlb_all();
default_trap(code, regs);
return;
case PARISC_ITLB_TRAP:
/* Instruction TLB miss fault/Instruction page fault */
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
break;
case 8:
/* Illegal instruction trap */
die_if_kernel("Illegal instruction", regs, code);
si_code = ILL_ILLOPC;
goto give_sigill;
case 9:
/* Break instruction trap */
handle_break(regs);
return;
case 10:
/* Privileged operation trap */
die_if_kernel("Privileged operation", regs, code);
si_code = ILL_PRVOPC;
goto give_sigill;
case 11:
/* Privileged register trap */
if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
/* This is a MFCTL cr26/cr27 to gr instruction.
* PCXS traps on this, so we need to emulate it.
*/
if (regs->iir & 0x00200000)
regs->gr[regs->iir & 0x1f] = mfctl(27);
else
regs->gr[regs->iir & 0x1f] = mfctl(26);
regs->iaoq[0] = regs->iaoq[1];
regs->iaoq[1] += 4;
regs->iasq[0] = regs->iasq[1];
return;
}
die_if_kernel("Privileged register usage", regs, code);
si_code = ILL_PRVREG;
give_sigill:
force_sig_fault(SIGILL, si_code,
(void __user *) regs->iaoq[0]);
return;
case 12:
/* Overflow Trap, let the userland signal handler do the cleanup */
force_sig_fault(SIGFPE, FPE_INTOVF,
(void __user *) regs->iaoq[0]);
return;
case 13:
/* Conditional Trap
The condition succeeds in an instruction which traps
on condition */
if(user_mode(regs)){
/* Let userspace app figure it out from the insn pointed
* to by si_addr.
*/
force_sig_fault(SIGFPE, FPE_CONDTRAP,
(void __user *) regs->iaoq[0]);
return;
}
/* The kernel doesn't want to handle condition codes */
break;
case 14:
/* Assist Exception Trap, i.e. floating point exception. */
die_if_kernel("Floating point exception", regs, 0); /* quiet */
__inc_irq_stat(irq_fpassist_count);
handle_fpe(regs);
return;
case 15:
/* Data TLB miss fault/Data page fault */
fallthrough;
case 16:
/* Non-access instruction TLB miss fault */
/* The instruction TLB entry needed for the target address of the FIC
is absent, and hardware can't find it, so we get to cleanup */
fallthrough;
case 17:
/* Non-access data TLB miss fault/Non-access data page fault */
/* FIXME:
Still need to add slow path emulation code here!
If the insn used a non-shadow register, then the tlb
handlers could not have their side-effect (e.g. probe
writing to a target register) emulated since rfir would
erase the changes to said register. Instead we have to
setup everything, call this function we are in, and emulate
by hand. Technically we need to emulate:
fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
*/
if (code == 17 && handle_nadtlb_fault(regs))
return;
fault_address = regs->ior;
fault_space = regs->isr;
break;
case 18:
/* PCXS only -- later cpu's split this into types 26,27 & 28 */
/* Check for unaligned access */
if (check_unaligned(regs)) {
handle_unaligned(regs);
return;
}
fallthrough;
case 26:
/* PCXL: Data memory access rights trap */
fault_address = regs->ior;
fault_space = regs->isr;
break;
case 19:
/* Data memory break trap */
regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
fallthrough;
case 21:
/* Page reference trap */
handle_gdb_break(regs, TRAP_HWBKPT);
return;
case 25:
/* Taken branch trap */
regs->gr[0] &= ~PSW_T;
if (user_space(regs))
handle_gdb_break(regs, TRAP_BRANCH);
/* else this must be the start of a syscall - just let it
* run.
*/
return;
case 7:
/* Instruction access rights */
/* PCXL: Instruction memory protection trap */
/*
* This could be caused by either: 1) a process attempting
* to execute within a vma that does not have execute
* permission, or 2) an access rights violation caused by a
* flush only translation set up by ptep_get_and_clear().
* So we check the vma permissions to differentiate the two.
* If the vma indicates we have execute permission, then
* the cause is the latter one. In this case, we need to
* call do_page_fault() to fix the problem.
*/
if (user_mode(regs)) {
struct vm_area_struct *vma;
mmap_read_lock(current->mm);
vma = find_vma(current->mm,regs->iaoq[0]);
if (vma && (regs->iaoq[0] >= vma->vm_start)
&& (vma->vm_flags & VM_EXEC)) {
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
mmap_read_unlock(current->mm);
break; /* call do_page_fault() */
}
mmap_read_unlock(current->mm);
}
/* CPU could not fetch instruction, so clear stale IIR value. */
regs->iir = 0xbaadf00d;
fallthrough;
case 27:
/* Data memory protection ID trap */
if (code == 27 && !user_mode(regs) &&
fixup_exception(regs))
return;
die_if_kernel("Protection id trap", regs, code);
force_sig_fault(SIGSEGV, SEGV_MAPERR,
(code == 7)?
((void __user *) regs->iaoq[0]) :
((void __user *) regs->ior));
return;
case 28:
/* Unaligned data reference trap */
handle_unaligned(regs);
return;
default:
if (user_mode(regs)) {
parisc_printk_ratelimited(0, regs, KERN_DEBUG
"handle_interruption() pid=%d command='%s'\n",
task_pid_nr(current), current->comm);
/* SIGBUS, for lack of a better one. */
force_sig_fault(SIGBUS, BUS_OBJERR,
(void __user *)regs->ior);
return;
}
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Unexpected interruption", regs, code, 0);
/* NOT REACHED */
}
if (user_mode(regs)) {
if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
parisc_printk_ratelimited(0, regs, KERN_DEBUG
"User fault %d on space 0x%08lx, pid=%d command='%s'\n",
code, fault_space,
task_pid_nr(current), current->comm);
force_sig_fault(SIGSEGV, SEGV_MAPERR,
(void __user *)regs->ior);
return;
}
}
else {
/*
* The kernel should never fault on its own address space,
* unless pagefault_disable() was called before.
*/
if (faulthandler_disabled() || fault_space == 0)
{
/* Clean up and return if in exception table. */
if (fixup_exception(regs))
return;
/* Clean up and return if handled by kfence. */
if (kfence_handle_page_fault(fault_address,
parisc_acctyp(code, regs->iir) == VM_WRITE, regs))
return;
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Kernel Fault", regs, code, fault_address);
}
}
do_page_fault(regs, code, fault_address);
}
static void __init initialize_ivt(const void *iva)
{
extern const u32 os_hpmc[];
int i;
u32 check = 0;
u32 *ivap;
u32 instr;
if (strcmp((const char *)iva, "cows can fly"))
panic("IVT invalid");
ivap = (u32 *)iva;
for (i = 0; i < 8; i++)
*ivap++ = 0;
/*
* Use PDC_INSTR firmware function to get instruction that invokes
* PDCE_CHECK in HPMC handler. See programming note at page 1-31 of
* the PA 1.1 Firmware Architecture document.
*/
if (pdc_instr(&instr) == PDC_OK)
ivap[0] = instr;
/*
* Rules for the checksum of the HPMC handler:
* 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
* its own IVA).
* 2. The word at IVA + 32 is nonzero.
* 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
* Address (IVA + 56) are word-aligned.
* 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
* the Length/4 words starting at Address is zero.
*/
/* Setup IVA and compute checksum for HPMC handler */
ivap[6] = (u32)__pa(os_hpmc);
for (i=0; i<8; i++)
check += ivap[i];
ivap[5] = -check;
pr_debug("initialize_ivt: IVA[6] = 0x%08x\n", ivap[6]);
}
/* early_trap_init() is called before we set up kernel mappings and
* write-protect the kernel */
void __init early_trap_init(void)
{
extern const void fault_vector_20;
#ifndef CONFIG_64BIT
extern const void fault_vector_11;
initialize_ivt(&fault_vector_11);
#endif
initialize_ivt(&fault_vector_20);
}
| linux-master | arch/parisc/kernel/traps.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Parisc performance counters
* Copyright (C) 2001 Randolph Chung <[email protected]>
*
* This code is derived, with permission, from HP/UX sources.
*/
/*
* Edited comment from original sources:
*
* This driver programs the PCX-U/PCX-W performance counters
* on the PA-RISC 2.0 chips. The driver keeps all images now
* internally to the kernel to hopefully eliminate the possibility
* of a bad image halting the CPU. Also, there are different
* images for the PCX-W and later chips vs the PCX-U chips.
*
* Only 1 process is allowed to access the driver at any time,
* so the only protection that is needed is at open and close.
* A variable "perf_enabled" is used to hold the state of the
* driver. The spinlock "perf_lock" is used to protect the
* modification of the state during open/close operations so
* multiple processes don't get into the driver simultaneously.
*
* This driver accesses the processor directly vs going through
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
* in various PDC revisions. The code is much more maintainable
* and reliable this way vs having to debug on every version of PDC
* on every box.
*/
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <asm/perf.h>
#include <asm/parisc-device.h>
#include <asm/processor.h>
#include <asm/runway.h>
#include <asm/io.h> /* for __raw_read() */
#include "perf_images.h"
#define MAX_RDR_WORDS 24
#define PERF_VERSION 2 /* derived from hpux's PI v2 interface */
/* definition of RDR regs */
struct rdr_tbl_ent {
uint16_t width;
uint8_t num_words;
uint8_t write_control;
};
static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
static int perf_enabled __read_mostly;
static DEFINE_SPINLOCK(perf_lock);
static struct parisc_device *cpu_device __read_mostly;
/* RDRs to write for PCX-W */
static const int perf_rdrs_W[] =
{ 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
/* RDRs to write for PCX-U */
static const int perf_rdrs_U[] =
{ 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
/* RDR register descriptions for PCX-W */
static const struct rdr_tbl_ent perf_rdr_tbl_W[] = {
{ 19, 1, 8 }, /* RDR 0 */
{ 16, 1, 16 }, /* RDR 1 */
{ 72, 2, 0 }, /* RDR 2 */
{ 81, 2, 0 }, /* RDR 3 */
{ 328, 6, 0 }, /* RDR 4 */
{ 160, 3, 0 }, /* RDR 5 */
{ 336, 6, 0 }, /* RDR 6 */
{ 164, 3, 0 }, /* RDR 7 */
{ 0, 0, 0 }, /* RDR 8 */
{ 35, 1, 0 }, /* RDR 9 */
{ 6, 1, 0 }, /* RDR 10 */
{ 18, 1, 0 }, /* RDR 11 */
{ 13, 1, 0 }, /* RDR 12 */
{ 8, 1, 0 }, /* RDR 13 */
{ 8, 1, 0 }, /* RDR 14 */
{ 8, 1, 0 }, /* RDR 15 */
{ 1530, 24, 0 }, /* RDR 16 */
{ 16, 1, 0 }, /* RDR 17 */
{ 4, 1, 0 }, /* RDR 18 */
{ 0, 0, 0 }, /* RDR 19 */
{ 152, 3, 24 }, /* RDR 20 */
{ 152, 3, 24 }, /* RDR 21 */
{ 233, 4, 48 }, /* RDR 22 */
{ 233, 4, 48 }, /* RDR 23 */
{ 71, 2, 0 }, /* RDR 24 */
{ 71, 2, 0 }, /* RDR 25 */
{ 11, 1, 0 }, /* RDR 26 */
{ 18, 1, 0 }, /* RDR 27 */
{ 128, 2, 0 }, /* RDR 28 */
{ 0, 0, 0 }, /* RDR 29 */
{ 16, 1, 0 }, /* RDR 30 */
{ 16, 1, 0 }, /* RDR 31 */
};
/* RDR register descriptions for PCX-U */
static const struct rdr_tbl_ent perf_rdr_tbl_U[] = {
{ 19, 1, 8 }, /* RDR 0 */
{ 32, 1, 16 }, /* RDR 1 */
{ 20, 1, 0 }, /* RDR 2 */
{ 0, 0, 0 }, /* RDR 3 */
{ 344, 6, 0 }, /* RDR 4 */
{ 176, 3, 0 }, /* RDR 5 */
{ 336, 6, 0 }, /* RDR 6 */
{ 0, 0, 0 }, /* RDR 7 */
{ 0, 0, 0 }, /* RDR 8 */
{ 0, 0, 0 }, /* RDR 9 */
{ 28, 1, 0 }, /* RDR 10 */
{ 33, 1, 0 }, /* RDR 11 */
{ 0, 0, 0 }, /* RDR 12 */
{ 230, 4, 0 }, /* RDR 13 */
{ 32, 1, 0 }, /* RDR 14 */
{ 128, 2, 0 }, /* RDR 15 */
{ 1494, 24, 0 }, /* RDR 16 */
{ 18, 1, 0 }, /* RDR 17 */
{ 4, 1, 0 }, /* RDR 18 */
{ 0, 0, 0 }, /* RDR 19 */
{ 158, 3, 24 }, /* RDR 20 */
{ 158, 3, 24 }, /* RDR 21 */
{ 194, 4, 48 }, /* RDR 22 */
{ 194, 4, 48 }, /* RDR 23 */
{ 71, 2, 0 }, /* RDR 24 */
{ 71, 2, 0 }, /* RDR 25 */
{ 28, 1, 0 }, /* RDR 26 */
{ 33, 1, 0 }, /* RDR 27 */
{ 88, 2, 0 }, /* RDR 28 */
{ 32, 1, 0 }, /* RDR 29 */
{ 24, 1, 0 }, /* RDR 30 */
{ 16, 1, 0 }, /* RDR 31 */
};
/*
* A non-zero write_control in the above tables is a byte offset into
* this array.
*/
static const uint64_t perf_bitmasks[] = {
0x0000000000000000ul, /* first dbl word must be zero */
0xfdffe00000000000ul, /* RDR0 bitmask */
0x003f000000000000ul, /* RDR1 bitmask */
0x00fffffffffffffful, /* RDR20-RDR21 bitmask (152 bits) */
0xfffffffffffffffful,
0xfffffffc00000000ul,
0xfffffffffffffffful, /* RDR22-RDR23 bitmask (233 bits) */
0xfffffffffffffffful,
0xfffffffffffffffcul,
0xff00000000000000ul
};
/*
* Write control bitmasks for Pa-8700 processor given
* some things have changed slightly.
*/
static const uint64_t perf_bitmasks_piranha[] = {
0x0000000000000000ul, /* first dbl word must be zero */
0xfdffe00000000000ul, /* RDR0 bitmask */
0x003f000000000000ul, /* RDR1 bitmask */
0x00fffffffffffffful, /* RDR20-RDR21 bitmask (158 bits) */
0xfffffffffffffffful,
0xfffffffc00000000ul,
0xfffffffffffffffful, /* RDR22-RDR23 bitmask (210 bits) */
0xfffffffffffffffful,
0xfffffffffffffffful,
0xfffc000000000000ul
};
static const uint64_t *bitmask_array; /* array of bitmasks to use */
/******************************************************************************
* Function Prototypes
*****************************************************************************/
static int perf_config(uint32_t *image_ptr);
static int perf_release(struct inode *inode, struct file *file);
static int perf_open(struct inode *inode, struct file *file);
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
static ssize_t perf_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void perf_start_counters(void);
static int perf_stop_counters(uint32_t *raddr);
static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer);
static int perf_rdr_clear(uint32_t rdr_num);
static int perf_write_image(uint64_t *memaddr);
static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer);
/* External Assembly Routines */
extern uint64_t perf_rdr_shift_in_W (uint32_t rdr_num, uint16_t width);
extern uint64_t perf_rdr_shift_in_U (uint32_t rdr_num, uint16_t width);
extern void perf_rdr_shift_out_W (uint32_t rdr_num, uint64_t buffer);
extern void perf_rdr_shift_out_U (uint32_t rdr_num, uint64_t buffer);
extern void perf_intrigue_enable_perf_counters (void);
extern void perf_intrigue_disable_perf_counters (void);
/******************************************************************************
* Function Definitions
*****************************************************************************/
/*
* configure:
*
* Configure the cpu with a given data image. First turn off the counters,
* then download the image, then turn the counters back on.
*/
static int perf_config(uint32_t *image_ptr)
{
long error;
uint32_t raddr[4];
/* Stop the counters*/
error = perf_stop_counters(raddr);
if (error != 0) {
printk("perf_config: perf_stop_counters = %ld\n", error);
return -EINVAL;
}
printk("Preparing to write image\n");
/* Write the image to the chip */
error = perf_write_image((uint64_t *)image_ptr);
if (error != 0) {
printk("perf_config: DOWNLOAD = %ld\n", error);
return -EINVAL;
}
printk("Preparing to start counters\n");
/* Start the counters */
perf_start_counters();
return sizeof(uint32_t);
}
/*
* Open the device and initialize all of its memory. The device is only
* opened once, but can be "queried" by multiple processes that know its
* file descriptor.
*/
static int perf_open(struct inode *inode, struct file *file)
{
spin_lock(&perf_lock);
if (perf_enabled) {
spin_unlock(&perf_lock);
return -EBUSY;
}
perf_enabled = 1;
spin_unlock(&perf_lock);
return 0;
}
/*
* Close the device.
*/
static int perf_release(struct inode *inode, struct file *file)
{
spin_lock(&perf_lock);
perf_enabled = 0;
spin_unlock(&perf_lock);
return 0;
}
/*
* Read does nothing for this driver
*/
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos)
{
return 0;
}
/*
* write:
*
* This routine downloads the image to the chip. It must be
* called on the processor that the download should happen
* on.
*/
static ssize_t perf_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
size_t image_size __maybe_unused;
uint32_t image_type;
uint32_t interface_type;
uint32_t test;
if (perf_processor_interface == ONYX_INTF)
image_size = PCXU_IMAGE_SIZE;
else if (perf_processor_interface == CUDA_INTF)
image_size = PCXW_IMAGE_SIZE;
else
return -EFAULT;
if (!perfmon_capable())
return -EACCES;
if (count != sizeof(uint32_t))
return -EIO;
if (copy_from_user(&image_type, buf, sizeof(uint32_t)))
return -EFAULT;
/* Get the interface type and test type */
interface_type = (image_type >> 16) & 0xffff;
test = (image_type & 0xffff);
/* Make sure everything makes sense */
/* First check the machine type is correct for
the requested image */
if (((perf_processor_interface == CUDA_INTF) &&
(interface_type != CUDA_INTF)) ||
((perf_processor_interface == ONYX_INTF) &&
(interface_type != ONYX_INTF)))
return -EINVAL;
/* Next check to make sure the requested image
is valid */
if (((interface_type == CUDA_INTF) &&
(test >= MAX_CUDA_IMAGES)) ||
((interface_type == ONYX_INTF) &&
(test >= MAX_ONYX_IMAGES)))
return -EINVAL;
/* Copy the image into the processor */
if (interface_type == CUDA_INTF)
return perf_config(cuda_images[test]);
else
return perf_config(onyx_images[test]);
return count;
}
/*
* Patch the images that need to know the IVA addresses.
*/
static void perf_patch_images(void)
{
#if 0 /* FIXME!! */
/*
* NOTE: this routine is VERY specific to the current TLB image.
* If the image is changed, this routine might also need to be changed.
*/
extern void $i_itlb_miss_2_0();
extern void $i_dtlb_miss_2_0();
extern void PA2_0_iva();
/*
* We can only use the lower 32-bits, the upper 32-bits should be 0
* anyway given this is in the kernel
*/
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
uint32_t IVAaddress = (uint32_t)&PA2_0_iva;
if (perf_processor_interface == ONYX_INTF) {
/* clear last 2 bytes */
onyx_images[TLBMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBMISS][17] = itlb_addr;
/* clear last 2 bytes */
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBHANDMISS][17] = itlb_addr;
/* clear last 2 bytes */
onyx_images[BIG_CPI][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[BIG_CPI][17] = itlb_addr;
onyx_images[PANIC][15] &= 0xffffff00; /* clear last 2 bytes */
onyx_images[PANIC][15] |= (0x000000ff&((IVAaddress) >> 24)); /* set 2 bytes */
onyx_images[PANIC][16] = (IVAaddress << 8)&0xffffff00;
} else if (perf_processor_interface == CUDA_INTF) {
/* Cuda interface */
cuda_images[TLBMISS][16] =
(cuda_images[TLBMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
cuda_images[TLBMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
cuda_images[TLBHANDMISS][16] =
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
cuda_images[TLBHANDMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
cuda_images[BIG_CPI][16] =
(cuda_images[BIG_CPI][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
cuda_images[BIG_CPI][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
} else {
/* Unknown type */
}
#endif
}
/*
* ioctl routine
* All routines effect the processor that they are executed on. Thus you
* must be running on the processor that you wish to change.
*/
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
long error_start;
uint32_t raddr[4];
int error = 0;
switch (cmd) {
case PA_PERF_ON:
/* Start the counters */
perf_start_counters();
break;
case PA_PERF_OFF:
error_start = perf_stop_counters(raddr);
if (error_start != 0) {
printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
error = -EFAULT;
break;
}
/* copy out the Counters */
if (copy_to_user((void __user *)arg, raddr,
sizeof (raddr)) != 0) {
error = -EFAULT;
break;
}
break;
case PA_PERF_VERSION:
/* Return the version # */
error = put_user(PERF_VERSION, (int *)arg);
break;
default:
error = -ENOTTY;
}
return error;
}
static const struct file_operations perf_fops = {
.llseek = no_llseek,
.read = perf_read,
.write = perf_write,
.unlocked_ioctl = perf_ioctl,
.compat_ioctl = perf_ioctl,
.open = perf_open,
.release = perf_release
};
static struct miscdevice perf_dev = {
MISC_DYNAMIC_MINOR,
PA_PERF_DEV,
&perf_fops
};
/*
* Initialize the module
*/
static int __init perf_init(void)
{
int ret;
/* Determine correct processor interface to use */
bitmask_array = perf_bitmasks;
if (boot_cpu_data.cpu_type == pcxu ||
boot_cpu_data.cpu_type == pcxu_) {
perf_processor_interface = ONYX_INTF;
} else if (boot_cpu_data.cpu_type == pcxw ||
boot_cpu_data.cpu_type == pcxw_ ||
boot_cpu_data.cpu_type == pcxw2 ||
boot_cpu_data.cpu_type == mako ||
boot_cpu_data.cpu_type == mako2) {
perf_processor_interface = CUDA_INTF;
if (boot_cpu_data.cpu_type == pcxw2 ||
boot_cpu_data.cpu_type == mako ||
boot_cpu_data.cpu_type == mako2)
bitmask_array = perf_bitmasks_piranha;
} else {
perf_processor_interface = UNKNOWN_INTF;
printk("Performance monitoring counters not supported on this processor\n");
return -ENODEV;
}
ret = misc_register(&perf_dev);
if (ret) {
printk(KERN_ERR "Performance monitoring counters: "
"cannot register misc device.\n");
return ret;
}
/* Patch the images to match the system */
perf_patch_images();
/* TODO: this only lets us access the first cpu.. what to do for SMP? */
cpu_device = per_cpu(cpu_data, 0).dev;
printk("Performance monitoring counters enabled for %s\n",
per_cpu(cpu_data, 0).dev->name);
return 0;
}
device_initcall(perf_init);
/*
* perf_start_counters(void)
*
* Start the counters.
*/
static void perf_start_counters(void)
{
/* Enable performance monitor counters */
perf_intrigue_enable_perf_counters();
}
/*
* perf_stop_counters
*
* Stop the performance counters and save counts
* in a per_processor array.
*/
static int perf_stop_counters(uint32_t *raddr)
{
uint64_t userbuf[MAX_RDR_WORDS];
/* Disable performance counters */
perf_intrigue_disable_perf_counters();
if (perf_processor_interface == ONYX_INTF) {
uint64_t tmp64;
/*
* Read the counters
*/
if (!perf_rdr_read_ubuf(16, userbuf))
return -13;
/* Counter0 is bits 1398 to 1429 */
tmp64 = (userbuf[21] << 22) & 0x00000000ffc00000;
tmp64 |= (userbuf[22] >> 42) & 0x00000000003fffff;
/* OR sticky0 (bit 1430) to counter0 bit 32 */
tmp64 |= (userbuf[22] >> 10) & 0x0000000080000000;
raddr[0] = (uint32_t)tmp64;
/* Counter1 is bits 1431 to 1462 */
tmp64 = (userbuf[22] >> 9) & 0x00000000ffffffff;
/* OR sticky1 (bit 1463) to counter1 bit 32 */
tmp64 |= (userbuf[22] << 23) & 0x0000000080000000;
raddr[1] = (uint32_t)tmp64;
/* Counter2 is bits 1464 to 1495 */
tmp64 = (userbuf[22] << 24) & 0x00000000ff000000;
tmp64 |= (userbuf[23] >> 40) & 0x0000000000ffffff;
/* OR sticky2 (bit 1496) to counter2 bit 32 */
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
raddr[2] = (uint32_t)tmp64;
/* Counter3 is bits 1497 to 1528 */
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
/* OR sticky3 (bit 1529) to counter3 bit 32 */
tmp64 |= (userbuf[23] << 25) & 0x0000000080000000;
raddr[3] = (uint32_t)tmp64;
/*
* Zero out the counters
*/
/*
* The counters and sticky-bits comprise the last 132 bits
* (1398 - 1529) of RDR16 on a U chip. We'll zero these
* out the easy way: zero out last 10 bits of dword 21,
* all of dword 22 and 58 bits (plus 6 don't care bits) of
* dword 23.
*/
userbuf[21] &= 0xfffffffffffffc00ul; /* 0 to last 10 bits */
userbuf[22] = 0;
userbuf[23] = 0;
/*
* Write back the zeroed bytes + the image given
* the read was destructive.
*/
perf_rdr_write(16, userbuf);
} else {
/*
* Read RDR-15 which contains the counters and sticky bits
*/
if (!perf_rdr_read_ubuf(15, userbuf)) {
return -13;
}
/*
* Clear out the counters
*/
perf_rdr_clear(15);
/*
* Copy the counters
*/
raddr[0] = (uint32_t)((userbuf[0] >> 32) & 0x00000000ffffffffUL);
raddr[1] = (uint32_t)(userbuf[0] & 0x00000000ffffffffUL);
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
}
return 0;
}
/*
* perf_rdr_get_entry
*
* Retrieve a pointer to the description of what this
* RDR contains.
*/
static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num)
{
if (perf_processor_interface == ONYX_INTF) {
return &perf_rdr_tbl_U[rdr_num];
} else {
return &perf_rdr_tbl_W[rdr_num];
}
}
/*
* perf_rdr_read_ubuf
*
* Read the RDR value into the buffer specified.
*/
static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
{
uint64_t data, data_mask = 0;
uint32_t width, xbits, i;
const struct rdr_tbl_ent *tentry;
tentry = perf_rdr_get_entry(rdr_num);
if ((width = tentry->width) == 0)
return 0;
/* Clear out buffer */
i = tentry->num_words;
while (i--) {
buffer[i] = 0;
}
/* Check for bits an even number of 64 */
if ((xbits = width & 0x03f) != 0) {
data_mask = 1;
data_mask <<= (64 - xbits);
data_mask--;
}
/* Grab all of the data */
i = tentry->num_words;
while (i--) {
if (perf_processor_interface == ONYX_INTF) {
data = perf_rdr_shift_in_U(rdr_num, width);
} else {
data = perf_rdr_shift_in_W(rdr_num, width);
}
if (xbits) {
buffer[i] |= (data << (64 - xbits));
if (i) {
buffer[i-1] |= ((data >> xbits) & data_mask);
}
} else {
buffer[i] = data;
}
}
return 1;
}
/*
* perf_rdr_clear
*
* Zero out the given RDR register
*/
static int perf_rdr_clear(uint32_t rdr_num)
{
const struct rdr_tbl_ent *tentry;
int32_t i;
tentry = perf_rdr_get_entry(rdr_num);
if (tentry->width == 0) {
return -1;
}
i = tentry->num_words;
while (i--) {
if (perf_processor_interface == ONYX_INTF) {
perf_rdr_shift_out_U(rdr_num, 0UL);
} else {
perf_rdr_shift_out_W(rdr_num, 0UL);
}
}
return 0;
}
/*
* perf_write_image
*
* Write the given image out to the processor
*/
static int perf_write_image(uint64_t *memaddr)
{
uint64_t buffer[MAX_RDR_WORDS];
uint64_t *bptr;
uint32_t dwords;
const uint32_t *intrigue_rdr;
const uint64_t *intrigue_bitmask;
uint64_t tmp64;
void __iomem *runway;
const struct rdr_tbl_ent *tentry;
int i;
/* Clear out counters */
if (perf_processor_interface == ONYX_INTF) {
perf_rdr_clear(16);
/* Toggle performance monitor */
perf_intrigue_enable_perf_counters();
perf_intrigue_disable_perf_counters();
intrigue_rdr = perf_rdrs_U;
} else {
perf_rdr_clear(15);
intrigue_rdr = perf_rdrs_W;
}
/* Write all RDRs */
while (*intrigue_rdr != -1) {
tentry = perf_rdr_get_entry(*intrigue_rdr);
perf_rdr_read_ubuf(*intrigue_rdr, buffer);
bptr = &buffer[0];
dwords = tentry->num_words;
if (tentry->write_control) {
intrigue_bitmask = &bitmask_array[tentry->write_control >> 3];
while (dwords--) {
tmp64 = *intrigue_bitmask & *memaddr++;
tmp64 |= (~(*intrigue_bitmask++)) & *bptr;
*bptr++ = tmp64;
}
} else {
while (dwords--) {
*bptr++ = *memaddr++;
}
}
perf_rdr_write(*intrigue_rdr, buffer);
intrigue_rdr++;
}
/*
* Now copy out the Runway stuff which is not in RDRs
*/
if (cpu_device == NULL)
{
printk(KERN_ERR "write_image: cpu_device not yet initialized!\n");
return -1;
}
runway = ioremap(cpu_device->hpa.start, 4096);
if (!runway) {
pr_err("perf_write_image: ioremap failed!\n");
return -ENOMEM;
}
/* Merge intrigue bits into Runway STATUS 0 */
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
runway + RUNWAY_STATUS);
/* Write RUNWAY DEBUG registers */
for (i = 0; i < 8; i++) {
__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
}
return 0;
}
/*
* perf_rdr_write
*
* Write the given RDR register with the contents
* of the given buffer.
*/
static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer)
{
const struct rdr_tbl_ent *tentry;
int32_t i;
printk("perf_rdr_write\n");
tentry = perf_rdr_get_entry(rdr_num);
if (tentry->width == 0) { return; }
i = tentry->num_words;
while (i--) {
if (perf_processor_interface == ONYX_INTF) {
perf_rdr_shift_out_U(rdr_num, buffer[i]);
} else {
perf_rdr_shift_out_W(rdr_num, buffer[i]);
}
}
printk("perf_rdr_write done\n");
}
| linux-master | arch/parisc/kernel/perf.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Hardware descriptions for HP 9000 based hardware, including
* system types, SCSI controllers, DMA controllers, HPPB controllers
* and lots more.
*
* Based on the document "PA-RISC 1.1 I/O Firmware Architecture
* Reference Specification", March 7, 1999, version 0.96. This
* is available at
* https://parisc.wiki.kernel.org/index.php/Technical_Documentation
*
* Copyright 1999 by Alex deVries <[email protected]>
* and copyright 1999 The Puffin Group Inc.
*/
#include <asm/hardware.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
/*
* HP PARISC Hardware Database
* Access to this database is only possible during bootup
* so don't reference this table after starting the init process
*/
static struct hp_hardware hp_hardware_list[] __initdata = {
{HPHW_NPROC,0x01,0x4,0x0,"Indigo (840, 930)"},
{HPHW_NPROC,0x8,0x4,0x01,"Firefox(825,925)"},
{HPHW_NPROC,0xA,0x4,0x01,"Top Gun (835,834,935,635)"},
{HPHW_NPROC,0xB,0x4,0x01,"Technical Shogun (845, 645)"},
{HPHW_NPROC,0xF,0x4,0x01,"Commercial Shogun (949)"},
{HPHW_NPROC,0xC,0x4,0x01,"Cheetah (850, 950)"},
{HPHW_NPROC,0x80,0x4,0x01,"Cheetah (950S)"},
{HPHW_NPROC,0x81,0x4,0x01,"Jaguar (855, 955)"},
{HPHW_NPROC,0x82,0x4,0x01,"Cougar (860, 960)"},
{HPHW_NPROC,0x83,0x4,0x13,"Panther (865, 870, 980)"},
{HPHW_NPROC,0x100,0x4,0x01,"Burgundy (810)"},
{HPHW_NPROC,0x101,0x4,0x01,"SilverFox Low (822, 922)"},
{HPHW_NPROC,0x102,0x4,0x01,"SilverFox High (832, 932)"},
{HPHW_NPROC,0x103,0x4,0x01,"Lego, SilverLite (815, 808, 920)"},
{HPHW_NPROC,0x104,0x4,0x03,"SilverBullet Low (842, 948)"},
{HPHW_NPROC,0x105,0x4,0x03,"SilverBullet High (852, 958)"},
{HPHW_NPROC,0x106,0x4,0x81,"Oboe"},
{HPHW_NPROC,0x180,0x4,0x12,"Dragon"},
{HPHW_NPROC,0x181,0x4,0x13,"Chimera (890, 990, 992)"},
{HPHW_NPROC,0x182,0x4,0x91,"TNT 100 (891,T500)"},
{HPHW_NPROC,0x183,0x4,0x91,"TNT 120 (892,T520)"},
{HPHW_NPROC,0x184,0x4,0x91,"Jade 180 U (893,T540)"},
{HPHW_NPROC,0x1FF,0x4,0x91,"Hitachi X Processor"},
{HPHW_NPROC,0x200,0x4,0x81,"Cobra (720)"},
{HPHW_NPROC,0x201,0x4,0x81,"Coral (750)"},
{HPHW_NPROC,0x202,0x4,0x81,"King Cobra (730)"},
{HPHW_NPROC,0x203,0x4,0x81,"Hardball (735/99)"},
{HPHW_NPROC,0x204,0x4,0x81,"Coral II (755/99)"},
{HPHW_NPROC,0x205,0x4,0x81,"Coral II (755/125)"},
{HPHW_NPROC,0x205,0x4,0x91,"Snake Eagle "},
{HPHW_NPROC,0x206,0x4,0x81,"Snake Cheetah (735/130)"},
{HPHW_NPROC,0x280,0x4,0x81,"Nova Low (817, 827, 957, 957LX)"},
{HPHW_NPROC,0x281,0x4,0x81,"Nova High (837, 847, 857, 967, 967LX)"},
{HPHW_NPROC,0x282,0x4,0x81,"Nova8 (807, 917, 917LX, 927,927LX, 937, 937LX, 947,947LX)"},
{HPHW_NPROC,0x283,0x4,0x81,"Nova64 (867, 877, 977)"},
{HPHW_NPROC,0x284,0x4,0x81,"TNova (887, 897, 987)"},
{HPHW_NPROC,0x285,0x4,0x81,"TNova64"},
{HPHW_NPROC,0x286,0x4,0x91,"Hydra64 (Nova)"},
{HPHW_NPROC,0x287,0x4,0x91,"Hydra96 (Nova)"},
{HPHW_NPROC,0x288,0x4,0x81,"TNova96"},
{HPHW_NPROC,0x300,0x4,0x81,"Bushmaster (710)"},
{HPHW_NPROC,0x302,0x4,0x81,"Flounder (705)"},
{HPHW_NPROC,0x310,0x4,0x81,"Scorpio (715/50)"},
{HPHW_NPROC,0x311,0x4,0x81,"Scorpio Jr.(715/33)"},
{HPHW_NPROC,0x312,0x4,0x81,"Strider-50 (715S/50)"},
{HPHW_NPROC,0x313,0x4,0x81,"Strider-33 (715S/33)"},
{HPHW_NPROC,0x314,0x4,0x81,"Trailways-50 (715T/50)"},
{HPHW_NPROC,0x315,0x4,0x81,"Trailways-33 (715T/33)"},
{HPHW_NPROC,0x316,0x4,0x81,"Scorpio Sr.(715/75)"},
{HPHW_NPROC,0x317,0x4,0x81,"Scorpio 100 (715/100)"},
{HPHW_NPROC,0x318,0x4,0x81,"Spectra (725/50)"},
{HPHW_NPROC,0x319,0x4,0x81,"Spectra (725/75)"},
{HPHW_NPROC,0x320,0x4,0x81,"Spectra (725/100)"},
{HPHW_NPROC,0x401,0x4,0x81,"Pace (745i, 747i)"},
{HPHW_NPROC,0x402,0x4,0x81,"Sidewinder (742i)"},
{HPHW_NPROC,0x403,0x4,0x81,"Fast Pace"},
{HPHW_NPROC,0x480,0x4,0x81,"Orville (E23)"},
{HPHW_NPROC,0x481,0x4,0x81,"Wilbur (E25)"},
{HPHW_NPROC,0x482,0x4,0x81,"WB-80 (E35)"},
{HPHW_NPROC,0x483,0x4,0x81,"WB-96 (E45)"},
{HPHW_NPROC,0x484,0x4,0x81,"UL Proc L-100 (811/D210,D310)"},
{HPHW_NPROC,0x485,0x4,0x81,"UL Proc L-75 (801/D200)"},
{HPHW_NPROC,0x501,0x4,0x81,"Merlin L2 132 (9000/778/B132L)"},
{HPHW_NPROC,0x502,0x4,0x81,"Merlin L2 160 (9000/778/B160L)"},
{HPHW_NPROC,0x503,0x4,0x81,"Merlin L2+ 132 (9000/778/B132L)"},
{HPHW_NPROC,0x504,0x4,0x81,"Merlin L2+ 180 (9000/778/B180L)"},
{HPHW_NPROC,0x505,0x4,0x81,"Raven L2 132 (9000/778/C132L)"},
{HPHW_NPROC,0x506,0x4,0x81,"Raven L2 160 (9000/779/C160L)"},
{HPHW_NPROC,0x507,0x4,0x81,"Raven L2 180 (9000/779/C180L)"},
{HPHW_NPROC,0x508,0x4,0x81,"Raven L2 160 (9000/779/C160L)"},
{HPHW_NPROC,0x509,0x4,0x81,"712/132 L2 Upgrade"},
{HPHW_NPROC,0x50A,0x4,0x81,"712/160 L2 Upgrade"},
{HPHW_NPROC,0x50B,0x4,0x81,"715/132 L2 Upgrade"},
{HPHW_NPROC,0x50C,0x4,0x81,"715/160 L2 Upgrade"},
{HPHW_NPROC,0x50D,0x4,0x81,"Rocky2 L2 120"},
{HPHW_NPROC,0x50E,0x4,0x81,"Rocky2 L2 150"},
{HPHW_NPROC,0x50F,0x4,0x81,"Anole L2 132 (744)"},
{HPHW_NPROC,0x510,0x4,0x81,"Anole L2 165 (744)"},
{HPHW_NPROC,0x511,0x4,0x81,"Kiji L2 132"},
{HPHW_NPROC,0x512,0x4,0x81,"UL L2 132 (803/D220,D320)"},
{HPHW_NPROC,0x513,0x4,0x81,"UL L2 160 (813/D220,D320)"},
{HPHW_NPROC,0x514,0x4,0x81,"Merlin Jr L2 132"},
{HPHW_NPROC,0x515,0x4,0x81,"Staccato L2 132"},
{HPHW_NPROC,0x516,0x4,0x81,"Staccato L2 180 (A Class 180)"},
{HPHW_NPROC,0x580,0x4,0x81,"KittyHawk DC2-100 (K100)"},
{HPHW_NPROC,0x581,0x4,0x91,"KittyHawk DC3-120 (K210)"},
{HPHW_NPROC,0x582,0x4,0x91,"KittyHawk DC3 100 (K400)"},
{HPHW_NPROC,0x583,0x4,0x91,"KittyHawk DC3 120 (K410)"},
{HPHW_NPROC,0x584,0x4,0x91,"LighteningHawk T120"},
{HPHW_NPROC,0x585,0x4,0x91,"SkyHawk 100"},
{HPHW_NPROC,0x586,0x4,0x91,"SkyHawk 120"},
{HPHW_NPROC,0x587,0x4,0x81,"UL Proc 1-way T'120"},
{HPHW_NPROC,0x588,0x4,0x91,"UL Proc 2-way T'120"},
{HPHW_NPROC,0x589,0x4,0x81,"UL Proc 1-way T'100 (821/D250,D350)"},
{HPHW_NPROC,0x58A,0x4,0x91,"UL Proc 2-way T'100 (831/D250,D350)"},
{HPHW_NPROC,0x58B,0x4,0x91,"KittyHawk DC2 100 (K200)"},
{HPHW_NPROC,0x58C,0x4,0x91,"ThunderHawk DC3- 120 1M (K220)"},
{HPHW_NPROC,0x58D,0x4,0x91,"ThunderHawk DC3 120 1M (K420)"},
{HPHW_NPROC,0x58E,0x4,0x81,"Raven 120 T'"},
{HPHW_NPROC,0x58F,0x4,0x91,"Mohawk 160 U 1M DC3 (K450)"},
{HPHW_NPROC,0x590,0x4,0x91,"Mohawk 180 U 1M DC3 (K460)"},
{HPHW_NPROC,0x591,0x4,0x91,"Mohawk 200 U 1M DC3"},
{HPHW_NPROC,0x592,0x4,0x81,"Raven 100 T'"},
{HPHW_NPROC,0x593,0x4,0x91,"FireHawk 160 U"},
{HPHW_NPROC,0x594,0x4,0x91,"FireHawk 180 U"},
{HPHW_NPROC,0x595,0x4,0x91,"FireHawk 220 U"},
{HPHW_NPROC,0x596,0x4,0x91,"FireHawk 240 U"},
{HPHW_NPROC,0x597,0x4,0x91,"SPP2000 processor"},
{HPHW_NPROC,0x598,0x4,0x81,"Raven U 230 (9000/780/C230)"},
{HPHW_NPROC,0x599,0x4,0x81,"Raven U 240 (9000/780/C240)"},
{HPHW_NPROC,0x59A,0x4,0x91,"Unlisted but reserved"},
{HPHW_NPROC,0x59A,0x4,0x81,"Unlisted but reserved"},
{HPHW_NPROC,0x59B,0x4,0x81,"Raven U 160 (9000/780/C160)"},
{HPHW_NPROC,0x59C,0x4,0x81,"Raven U 180 (9000/780/C180)"},
{HPHW_NPROC,0x59D,0x4,0x81,"Raven U 200 (9000/780/C200)"},
{HPHW_NPROC,0x59E,0x4,0x91,"ThunderHawk T' 120"},
{HPHW_NPROC,0x59F,0x4,0x91,"Raven U 180+ (9000/780)"},
{HPHW_NPROC,0x5A0,0x4,0x81,"UL 1w T120 1MB/1MB (841/D260,D360)"},
{HPHW_NPROC,0x5A1,0x4,0x91,"UL 2w T120 1MB/1MB (851/D260,D360)"},
{HPHW_NPROC,0x5A2,0x4,0x81,"UL 1w U160 512K/512K (861/D270,D370)"},
{HPHW_NPROC,0x5A3,0x4,0x91,"UL 2w U160 512K/512K (871/D270,D370)"},
{HPHW_NPROC,0x5A4,0x4,0x91,"Mohawk 160 U 1M DC3- (K250)"},
{HPHW_NPROC,0x5A5,0x4,0x91,"Mohawk 180 U 1M DC3- (K260)"},
{HPHW_NPROC,0x5A6,0x4,0x91,"Mohawk 200 U 1M DC3-"},
{HPHW_NPROC,0x5A7,0x4,0x81,"UL proc 1-way U160 1M/1M"},
{HPHW_NPROC,0x5A8,0x4,0x91,"UL proc 2-way U160 1M/1M"},
{HPHW_NPROC,0x5A9,0x4,0x81,"UL proc 1-way U180 1M/1M"},
{HPHW_NPROC,0x5AA,0x4,0x91,"UL proc 2-way U180 1M/1M"},
{HPHW_NPROC,0x5AB,0x4,0x91,"Obsolete"},
{HPHW_NPROC,0x5AB,0x4,0x81,"Obsolete"},
{HPHW_NPROC,0x5AC,0x4,0x91,"Obsolete"},
{HPHW_NPROC,0x5AC,0x4,0x81,"Obsolete"},
{HPHW_NPROC,0x5AD,0x4,0x91,"BraveHawk 180MHz DC3-"},
{HPHW_NPROC,0x5AE,0x4,0x91,"BraveHawk 200MHz DC3- (898/K370)"},
{HPHW_NPROC,0x5AF,0x4,0x91,"BraveHawk 220MHz DC3-"},
{HPHW_NPROC,0x5B0,0x4,0x91,"BraveHawk 180MHz DC3"},
{HPHW_NPROC,0x5B1,0x4,0x91,"BraveHawk 200MHz DC3 (899/K570)"},
{HPHW_NPROC,0x5B2,0x4,0x91,"BraveHawk 220MHz DC3"},
{HPHW_NPROC,0x5B3,0x4,0x91,"FireHawk 200"},
{HPHW_NPROC,0x5B4,0x4,0x91,"SPP2500"},
{HPHW_NPROC,0x5B5,0x4,0x91,"SummitHawk U+"},
{HPHW_NPROC,0x5B6,0x4,0x91,"DragonHawk U+ 240 DC3"},
{HPHW_NPROC,0x5B7,0x4,0x91,"DragonHawk U+ 240 DC3-"},
{HPHW_NPROC,0x5B8,0x4,0x91,"SPP2250 240 MHz"},
{HPHW_NPROC,0x5B9,0x4,0x81,"UL 1w U+/240 (350/550)"},
{HPHW_NPROC,0x5BA,0x4,0x91,"UL 2w U+/240 (350/550)"},
{HPHW_NPROC,0x5BB,0x4,0x81,"AllegroHigh W"},
{HPHW_NPROC,0x5BC,0x4,0x91,"AllegroLow W"},
{HPHW_NPROC,0x5BD,0x4,0x91,"Forte W 2-way"},
{HPHW_NPROC,0x5BE,0x4,0x91,"Prelude W"},
{HPHW_NPROC,0x5BF,0x4,0x91,"Forte W 4-way"},
{HPHW_NPROC,0x5C0,0x4,0x91,"M2250"},
{HPHW_NPROC,0x5C1,0x4,0x91,"M2500"},
{HPHW_NPROC,0x5C2,0x4,0x91,"Sonata 440"},
{HPHW_NPROC,0x5C3,0x4,0x91,"Sonata 360"},
{HPHW_NPROC,0x5C4,0x4,0x91,"Rhapsody 440"},
{HPHW_NPROC,0x5C5,0x4,0x91,"Rhapsody 360"},
{HPHW_NPROC,0x5C6,0x4,0x91,"Raven W 360 (9000/780)"},
{HPHW_NPROC,0x5C7,0x4,0x91,"Halfdome W 440"},
{HPHW_NPROC,0x5C8,0x4,0x81,"Lego 360 processor"},
{HPHW_NPROC,0x5C9,0x4,0x91,"Rhapsody DC- 440"},
{HPHW_NPROC,0x5CA,0x4,0x91,"Rhapsody DC- 360"},
{HPHW_NPROC,0x5CB,0x4,0x91,"Crescendo 440"},
{HPHW_NPROC,0x5CC,0x4,0x91,"Prelude W 440"},
{HPHW_NPROC,0x5CD,0x4,0x91,"SPP2600"},
{HPHW_NPROC,0x5CE,0x4,0x91,"M2600"},
{HPHW_NPROC,0x5CF,0x4,0x81,"Allegro W+"},
{HPHW_NPROC,0x5D0,0x4,0x81,"Kazoo W+"},
{HPHW_NPROC,0x5D1,0x4,0x91,"Forte W+ 2w"},
{HPHW_NPROC,0x5D2,0x4,0x91,"Forte W+ 4w"},
{HPHW_NPROC,0x5D3,0x4,0x91,"Prelude W+ 540"},
{HPHW_NPROC,0x5D4,0x4,0x91,"Duet W+"},
{HPHW_NPROC,0x5D5,0x4,0x91,"Crescendo 550"},
{HPHW_NPROC,0x5D6,0x4,0x81,"Crescendo DC- 440"},
{HPHW_NPROC,0x5D7,0x4,0x91,"Keystone W+"},
{HPHW_NPROC,0x5D8,0x4,0x91,"Rhapsody wave 2 W+ DC-"},
{HPHW_NPROC,0x5D9,0x4,0x91,"Rhapsody wave 2 W+"},
{HPHW_NPROC,0x5DA,0x4,0x91,"Marcato W+ DC-"},
{HPHW_NPROC,0x5DB,0x4,0x91,"Marcato W+"},
{HPHW_NPROC,0x5DC,0x4,0x91,"Allegro W2"},
{HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
{HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
{HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
{HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+ (rp5470)"},
{HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
{HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
{HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
{HPHW_NPROC,0x5E3,0x4,0x91,"Crescendo 750 W2"},
{HPHW_NPROC,0x5E4,0x4,0x91,"Keystone/Matterhorn W2 750"},
{HPHW_NPROC,0x5E5,0x4,0x91,"PowerBar W+"},
{HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"},
{HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"},
{HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"},
{HPHW_NPROC,0x5EB,0x4,0x91,"Perf/Leone 875 W2+"},
{HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"},
{HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"},
{HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"},
{HPHW_NPROC,0x602,0x4,0x81,"Gecko 100 (712/100)"},
{HPHW_NPROC,0x603,0x4,0x81,"Anole 64 (743/64)"},
{HPHW_NPROC,0x604,0x4,0x81,"Anole 100 (743/100)"},
{HPHW_NPROC,0x605,0x4,0x81,"Gecko 120 (712/120)"},
{HPHW_NPROC,0x606,0x4,0x81,"Gila 80"},
{HPHW_NPROC,0x607,0x4,0x81,"Gila 100"},
{HPHW_NPROC,0x608,0x4,0x81,"Gila 120"},
{HPHW_NPROC,0x609,0x4,0x81,"Scorpio-L 80"},
{HPHW_NPROC,0x60A,0x4,0x81,"Mirage Jr (715/64)"},
{HPHW_NPROC,0x60B,0x4,0x81,"Mirage 100"},
{HPHW_NPROC,0x60C,0x4,0x81,"Mirage 100+"},
{HPHW_NPROC,0x60D,0x4,0x81,"Electra 100"},
{HPHW_NPROC,0x60E,0x4,0x81,"Electra 120"},
{HPHW_NPROC,0x610,0x4,0x81,"Scorpio-L 100"},
{HPHW_NPROC,0x611,0x4,0x81,"Scorpio-L 120"},
{HPHW_NPROC,0x612,0x4,0x81,"Spectra-L 80"},
{HPHW_NPROC,0x613,0x4,0x81,"Spectra-L 100"},
{HPHW_NPROC,0x614,0x4,0x81,"Spectra-L 120"},
{HPHW_NPROC,0x615,0x4,0x81,"Piranha 100"},
{HPHW_NPROC,0x616,0x4,0x81,"Piranha 120"},
{HPHW_NPROC,0x617,0x4,0x81,"Jason 50"},
{HPHW_NPROC,0x618,0x4,0x81,"Jason 100"},
{HPHW_NPROC,0x619,0x4,0x81,"Mirage 80"},
{HPHW_NPROC,0x61A,0x4,0x81,"SAIC L-80"},
{HPHW_NPROC,0x61B,0x4,0x81,"Rocky1 L-60"},
{HPHW_NPROC,0x61C,0x4,0x81,"Anole T (743/T)"},
{HPHW_NPROC,0x67E,0x4,0x81,"Hitachi Tiny 80"},
{HPHW_NPROC,0x67F,0x4,0x81,"Hitachi Tiny 64"},
{HPHW_NPROC,0x700,0x4,0x91,"NEC Aska Processor"},
{HPHW_NPROC,0x880,0x4,0x91,"Orca Mako"},
{HPHW_NPROC,0x881,0x4,0x91,"Everest Mako"},
{HPHW_NPROC,0x882,0x4,0x91,"Rainier/Medel Mako Slow"},
{HPHW_NPROC,0x883,0x4,0x91,"Rainier/Medel Mako Fast"},
{HPHW_NPROC,0x884,0x4,0x91,"Mt. Hamilton"},
{HPHW_NPROC,0x885,0x4,0x91,"Mt. Hamilton DC-"},
{HPHW_NPROC,0x886,0x4,0x91,"Storm Peak Slow DC-"},
{HPHW_NPROC,0x887,0x4,0x91,"Storm Peak Slow"},
{HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"},
{HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"},
{HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak Slow"},
{HPHW_NPROC,0x88B,0x4,0x91,"Crestone Peak Fast"},
{HPHW_NPROC,0x88C,0x4,0x91,"Orca Mako+"},
{HPHW_NPROC,0x88D,0x4,0x91,"Rainier/Medel Mako+ Slow"},
{HPHW_NPROC,0x88E,0x4,0x91,"Rainier/Medel Mako+ Fast"},
{HPHW_NPROC,0x892,0x4,0x91,"Mt. Hamilton Slow Mako+"},
{HPHW_NPROC,0x894,0x4,0x91,"Mt. Hamilton Fast Mako+"},
{HPHW_NPROC,0x895,0x4,0x91,"Storm Peak Slow Mako+"},
{HPHW_NPROC,0x896,0x4,0x91,"Storm Peak Fast Mako+"},
{HPHW_NPROC,0x897,0x4,0x91,"Storm Peak DC- Slow Mako+"},
{HPHW_NPROC,0x898,0x4,0x91,"Storm Peak DC- Fast Mako+"},
{HPHW_NPROC,0x899,0x4,0x91,"Mt. Hamilton Slow Mako+"},
{HPHW_NPROC,0x89B,0x4,0x91,"Crestone Peak Mako+ Slow"},
{HPHW_NPROC,0x89C,0x4,0x91,"Crestone Peak Mako+ Fast"},
{HPHW_A_DIRECT, 0x004, 0x0000D, 0x00, "Arrakis MUX"},
{HPHW_A_DIRECT, 0x005, 0x0000D, 0x00, "Dyun Kiuh MUX"},
{HPHW_A_DIRECT, 0x006, 0x0000D, 0x00, "Baat Kiuh AP/MUX (40299B)"},
{HPHW_A_DIRECT, 0x007, 0x0000D, 0x00, "Dino AP"},
{HPHW_A_DIRECT, 0x009, 0x0000D, 0x00, "Solaris Direct Connect MUX (J2092A)"},
{HPHW_A_DIRECT, 0x00A, 0x0000D, 0x00, "Solaris RS-422/423 MUX (J2093A)"},
{HPHW_A_DIRECT, 0x00B, 0x0000D, 0x00, "Solaris RS-422/423 Quadriloops MUX"},
{HPHW_A_DIRECT, 0x00C, 0x0000D, 0x00, "Solaris Modem MUX (J2094A)"},
{HPHW_A_DIRECT, 0x00D, 0x0000D, 0x00, "Twins Direct Connect MUX"},
{HPHW_A_DIRECT, 0x00E, 0x0000D, 0x00, "Twins Modem MUX"},
{HPHW_A_DIRECT, 0x00F, 0x0000D, 0x00, "Nautilus RS-485"},
{HPHW_A_DIRECT, 0x010, 0x0000D, 0x00, "UltraLight CAP/MUX"},
{HPHW_A_DIRECT, 0x015, 0x0000D, 0x00, "Eole CAP/MUX"},
{HPHW_A_DIRECT, 0x024, 0x0000D, 0x00, "Sahp Kiuh AP/MUX"},
{HPHW_A_DIRECT, 0x034, 0x0000D, 0x00, "Sahp Kiuh Low AP/MUX"},
{HPHW_A_DIRECT, 0x044, 0x0000D, 0x00, "Sahp Baat Kiuh AP/MUX"},
{HPHW_A_DIRECT, 0x004, 0x0000E, 0x80, "Burgundy RS-232"},
{HPHW_A_DIRECT, 0x005, 0x0000E, 0x80, "Silverfox RS-232"},
{HPHW_A_DIRECT, 0x006, 0x0000E, 0x80, "Lego RS-232"},
{HPHW_A_DIRECT, 0x004, 0x0000F, 0x00, "Peacock Graphics"},
{HPHW_A_DIRECT, 0x004, 0x00014, 0x80, "Burgundy HIL"},
{HPHW_A_DIRECT, 0x005, 0x00014, 0x80, "Peacock HIL"},
{HPHW_A_DIRECT, 0x004, 0x00015, 0x80, "Leonardo"},
{HPHW_A_DIRECT, 0x004, 0x00016, 0x80, "HP-PB HRM"},
{HPHW_A_DIRECT, 0x004, 0x00017, 0x80, "HP-PB HRC"},
{HPHW_A_DIRECT, 0x004, 0x0003A, 0x80, "Skunk Centronics (28655A)"},
{HPHW_A_DIRECT, 0x024, 0x0003A, 0x80, "Sahp Kiuh Centronics"},
{HPHW_A_DIRECT, 0x044, 0x0003A, 0x80, "Sahp Baat Kiuh Centronics"},
{HPHW_A_DIRECT, 0x004, 0x0004E, 0x80, "AT&T DataKit (AMSO)"},
{HPHW_A_DIRECT, 0x004, 0x0009B, 0x80, "Test&Meas GSC HPIB"},
{HPHW_A_DIRECT, 0x004, 0x000A8, 0x00, "Rocky2-120 Front Keyboard"},
{HPHW_A_DIRECT, 0x005, 0x000A8, 0x00, "Rocky2-150 Front Keyboard"},
{HPHW_A_DIRECT, 0x004, 0x00101, 0x80, "Hitachi Console Module"},
{HPHW_A_DIRECT, 0x004, 0x00102, 0x80, "Hitachi Boot Module"},
{HPHW_A_DIRECT, 0x004, 0x00203, 0x80, "MELCO HBMLA MLAIT"},
{HPHW_A_DIRECT, 0x004, 0x00208, 0x80, "MELCO HBDPC"},
{HPHW_A_DIRECT, 0x004, 0x00300, 0x00, "DCI TWINAX TERM IO MUX"},
{HPHW_A_DMA, 0x004, 0x00039, 0x80, "Skunk SCSI (28655A)"},
{HPHW_A_DMA, 0x005, 0x00039, 0x80, "KittyHawk CSY Core SCSI"},
{HPHW_A_DMA, 0x014, 0x00039, 0x80, "Diablo SCSI"},
{HPHW_A_DMA, 0x024, 0x00039, 0x80, "Sahp Kiuh SCSI"},
{HPHW_A_DMA, 0x034, 0x00039, 0x80, "Sahp Kiuh Low SCSI"},
{HPHW_A_DMA, 0x044, 0x00039, 0x80, "Sahp Baat Kiuh SCSI"},
{HPHW_A_DMA, 0x004, 0x0003B, 0x80, "Wizard SCSI"},
{HPHW_A_DMA, 0x005, 0x0003B, 0x80, "KittyHawk CSY Core FW-SCSI"},
{HPHW_A_DMA, 0x006, 0x0003B, 0x80, "Symbios EPIC FW-SCSI"},
{HPHW_A_DMA, 0x004, 0x00040, 0x80, "HP-PB Shazam HPIB (28650A)"},
{HPHW_A_DMA, 0x005, 0x00040, 0x80, "Burgundy HPIB"},
{HPHW_A_DMA, 0x004, 0x00041, 0x80, "HP-PB HP-FL"},
{HPHW_A_DMA, 0x004, 0x00042, 0x80, "HP-PB LoQuix HPIB (28650B)"},
{HPHW_A_DMA, 0x004, 0x00043, 0x80, "HP-PB Crypt LoQuix"},
{HPHW_A_DMA, 0x004, 0x00044, 0x80, "HP-PB Shazam GPIO (28651A)"},
{HPHW_A_DMA, 0x004, 0x00045, 0x80, "HP-PB LoQuix GPIO"},
{HPHW_A_DMA, 0x004, 0x00046, 0x80, "2-Port X.25 NIO_ACC (AMSO)"},
{HPHW_A_DMA, 0x004, 0x00047, 0x80, "4-Port X.25 NIO_ACC (AMSO)"},
{HPHW_A_DMA, 0x004, 0x0004B, 0x80, "LGB Control"},
{HPHW_A_DMA, 0x004, 0x0004C, 0x80, "Martian RTI (AMSO)"},
{HPHW_A_DMA, 0x004, 0x0004D, 0x80, "ACC Mux (AMSO)"},
{HPHW_A_DMA, 0x004, 0x00050, 0x80, "Lanbrusca 802.3 (36967A)"},
{HPHW_A_DMA, 0x004, 0x00056, 0x80, "HP-PB LoQuix FDDI"},
{HPHW_A_DMA, 0x004, 0x00057, 0x80, "HP-PB LoQuix FDDI (28670A)"},
{HPHW_A_DMA, 0x004, 0x0005E, 0x00, "Gecko Add-on Token Ring"},
{HPHW_A_DMA, 0x012, 0x00089, 0x80, "Barracuda Add-on FW-SCSI"},
{HPHW_A_DMA, 0x013, 0x00089, 0x80, "Bluefish Add-on FW-SCSI"},
{HPHW_A_DMA, 0x014, 0x00089, 0x80, "Shrike Add-on FW-SCSI"},
{HPHW_A_DMA, 0x015, 0x00089, 0x80, "KittyHawk GSY Core FW-SCSI"},
{HPHW_A_DMA, 0x017, 0x00089, 0x80, "Shrike Jade Add-on FW-SCSI (A3644A)"},
{HPHW_A_DMA, 0x01F, 0x00089, 0x80, "SkyHawk 100/120 FW-SCSI"},
{HPHW_A_DMA, 0x027, 0x00089, 0x80, "Piranha 100 FW-SCSI"},
{HPHW_A_DMA, 0x032, 0x00089, 0x80, "Raven T' Core FW-SCSI"},
{HPHW_A_DMA, 0x03B, 0x00089, 0x80, "Raven U/L2 Core FW-SCSI"},
{HPHW_A_DMA, 0x03C, 0x00089, 0x80, "Merlin 132 Core FW-SCSI"},
{HPHW_A_DMA, 0x03D, 0x00089, 0x80, "Merlin 160 Core FW-SCSI"},
{HPHW_A_DMA, 0x044, 0x00089, 0x80, "Mohawk Core FW-SCSI"},
{HPHW_A_DMA, 0x051, 0x00089, 0x80, "Firehawk FW-SCSI"},
{HPHW_A_DMA, 0x058, 0x00089, 0x80, "FireHawk 200 FW-SCSI"},
{HPHW_A_DMA, 0x05C, 0x00089, 0x80, "SummitHawk 230 Ultra-SCSI"},
{HPHW_A_DMA, 0x014, 0x00091, 0x80, "Baby Hugo Add-on Net FC (A3406A)"},
{HPHW_A_DMA, 0x020, 0x00091, 0x80, "Baby Jade Add-on Net FC (A3638A)"},
{HPHW_A_DMA, 0x004, 0x00092, 0x80, "GSC+ YLIASTER ATM"},
{HPHW_A_DMA, 0x004, 0x00095, 0x80, "Hamlyn GSC+ Network Card"},
{HPHW_A_DMA, 0x004, 0x00098, 0x80, "Lo-fat Emulator"},
{HPHW_A_DMA, 0x004, 0x0009A, 0x80, "GSC+ Venus ATM"},
{HPHW_A_DMA, 0x005, 0x0009A, 0x80, "GSC+ Samorobrive ATM"},
{HPHW_A_DMA, 0x004, 0x0009D, 0x80, "HP HSC-PCI Cards"},
{HPHW_A_DMA, 0x004, 0x0009E, 0x80, "Alaxis GSC+ 155Mb ATM"},
{HPHW_A_DMA, 0x005, 0x0009E, 0x80, "Alaxis GSC+ 622Mb ATM"},
{HPHW_A_DMA, 0x05C, 0x0009F, 0x80, "SummitHawk 230 USB"},
{HPHW_A_DMA, 0x05C, 0x000A0, 0x80, "SummitHawk 230 100BaseT"},
{HPHW_A_DMA, 0x015, 0x000A7, 0x80, "Baby Hugo Add-on mass FC (A3404A)"},
{HPHW_A_DMA, 0x018, 0x000A7, 0x80, "Mombasa GS Add-on mass FC (A3591)"},
{HPHW_A_DMA, 0x021, 0x000A7, 0x80, "Baby Jade Add-on mass FC (A3636A)"},
{HPHW_A_DMA, 0x004, 0x00201, 0x80, "MELCO HCMAP"},
{HPHW_A_DMA, 0x004, 0x00202, 0x80, "MELCO HBMLA MLAMA"},
{HPHW_A_DMA, 0x004, 0x00205, 0x80, "MELCO HBRFU"},
{HPHW_A_DMA, 0x004, 0x00380, 0x80, "Interphase NIO-FC"},
{HPHW_A_DMA, 0x004, 0x00381, 0x80, "Interphase NIO-ATM"},
{HPHW_A_DMA, 0x004, 0x00382, 0x80, "Interphase NIO-100BaseTX"},
{HPHW_BA, 0x004, 0x00070, 0x0, "Cobra Core BA"},
{HPHW_BA, 0x005, 0x00070, 0x0, "Coral Core BA"},
{HPHW_BA, 0x006, 0x00070, 0x0, "Bushmaster Core BA"},
{HPHW_BA, 0x007, 0x00070, 0x0, "Scorpio Core BA"},
{HPHW_BA, 0x008, 0x00070, 0x0, "Flounder Core BA"},
{HPHW_BA, 0x009, 0x00070, 0x0, "Outfield Core BA"},
{HPHW_BA, 0x00A, 0x00070, 0x0, "CoralII Core BA"},
{HPHW_BA, 0x00B, 0x00070, 0x0, "Scorpio Jr. Core BA"},
{HPHW_BA, 0x00C, 0x00070, 0x0, "Strider-50 Core BA"},
{HPHW_BA, 0x00D, 0x00070, 0x0, "Strider-33 Core BA"},
{HPHW_BA, 0x00E, 0x00070, 0x0, "Trailways-50 Core BA"},
{HPHW_BA, 0x00F, 0x00070, 0x0, "Trailways-33 Core BA"},
{HPHW_BA, 0x010, 0x00070, 0x0, "Pace Core BA"},
{HPHW_BA, 0x011, 0x00070, 0x0, "Sidewinder Core BA"},
{HPHW_BA, 0x019, 0x00070, 0x0, "Scorpio Sr. Core BA"},
{HPHW_BA, 0x020, 0x00070, 0x0, "Scorpio 100 Core BA"},
{HPHW_BA, 0x021, 0x00070, 0x0, "Spectra 50 Core BA"},
{HPHW_BA, 0x022, 0x00070, 0x0, "Spectra 75 Core BA"},
{HPHW_BA, 0x023, 0x00070, 0x0, "Spectra 100 Core BA"},
{HPHW_BA, 0x024, 0x00070, 0x0, "Fast Pace Core BA"},
{HPHW_BA, 0x026, 0x00070, 0x0, "CoralII Jaguar Core BA"},
{HPHW_BA, 0x004, 0x00076, 0x0, "Cobra EISA BA"},
{HPHW_BA, 0x005, 0x00076, 0x0, "Coral EISA BA"},
{HPHW_BA, 0x007, 0x00076, 0x0, "Scorpio EISA BA"},
{HPHW_BA, 0x00A, 0x00076, 0x0, "CoralII EISA BA"},
{HPHW_BA, 0x00B, 0x00076, 0x0, "Scorpio Jr. EISA BA"},
{HPHW_BA, 0x00C, 0x00076, 0x0, "Strider-50 Core EISA"},
{HPHW_BA, 0x00D, 0x00076, 0x0, "Strider-33 Core EISA"},
{HPHW_BA, 0x00E, 0x00076, 0x0, "Trailways-50 Core EISA"},
{HPHW_BA, 0x00F, 0x00076, 0x0, "Trailways-33 Core EISA"},
{HPHW_BA, 0x010, 0x00076, 0x0, "Pace Core EISA"},
{HPHW_BA, 0x019, 0x00076, 0x0, "Scorpio Sr. EISA BA"},
{HPHW_BA, 0x020, 0x00076, 0x0, "Scorpio 100 EISA BA"},
{HPHW_BA, 0x021, 0x00076, 0x0, "Spectra 50 EISA BA"},
{HPHW_BA, 0x022, 0x00076, 0x0, "Spectra 75 EISA BA"},
{HPHW_BA, 0x023, 0x00076, 0x0, "Spectra 100 EISA BA"},
{HPHW_BA, 0x026, 0x00076, 0x0, "CoralII Jaguar EISA BA"},
{HPHW_BA, 0x010, 0x00078, 0x0, "Pace VME BA"},
{HPHW_BA, 0x011, 0x00078, 0x0, "Sidewinder VME BA"},
{HPHW_BA, 0x01A, 0x00078, 0x0, "Anole 64 VME BA"},
{HPHW_BA, 0x01B, 0x00078, 0x0, "Anole 100 VME BA"},
{HPHW_BA, 0x024, 0x00078, 0x0, "Fast Pace VME BA"},
{HPHW_BA, 0x034, 0x00078, 0x0, "Anole T VME BA"},
{HPHW_BA, 0x04A, 0x00078, 0x0, "Anole L2 132 VME BA"},
{HPHW_BA, 0x04C, 0x00078, 0x0, "Anole L2 165 VME BA"},
{HPHW_BA, 0x011, 0x00081, 0x0, "WB-96 Core BA"},
{HPHW_BA, 0x012, 0x00081, 0x0, "Orville UX Core BA"},
{HPHW_BA, 0x013, 0x00081, 0x0, "Wilbur UX Core BA"},
{HPHW_BA, 0x014, 0x00081, 0x0, "WB-80 Core BA"},
{HPHW_BA, 0x015, 0x00081, 0x0, "KittyHawk GSY Core BA"},
{HPHW_BA, 0x016, 0x00081, 0x0, "Gecko Core BA"},
{HPHW_BA, 0x018, 0x00081, 0x0, "Gecko Optional BA"},
{HPHW_BA, 0x01A, 0x00081, 0x0, "Anole 64 Core BA"},
{HPHW_BA, 0x01B, 0x00081, 0x0, "Anole 100 Core BA"},
{HPHW_BA, 0x01C, 0x00081, 0x0, "Gecko 80 Core BA"},
{HPHW_BA, 0x01D, 0x00081, 0x0, "Gecko 100 Core BA"},
{HPHW_BA, 0x01F, 0x00081, 0x0, "SkyHawk 100/120 Core BA"},
{HPHW_BA, 0x027, 0x00081, 0x0, "Piranha 100 Core BA"},
{HPHW_BA, 0x028, 0x00081, 0x0, "Mirage Jr Core BA"},
{HPHW_BA, 0x029, 0x00081, 0x0, "Mirage Core BA"},
{HPHW_BA, 0x02A, 0x00081, 0x0, "Electra Core BA"},
{HPHW_BA, 0x02B, 0x00081, 0x0, "Mirage 80 Core BA"},
{HPHW_BA, 0x02C, 0x00081, 0x0, "Mirage 100+ Core BA"},
{HPHW_BA, 0x02E, 0x00081, 0x0, "UL 350 Lasi Core BA"},
{HPHW_BA, 0x02F, 0x00081, 0x0, "UL 550 Lasi Core BA"},
{HPHW_BA, 0x032, 0x00081, 0x0, "Raven T' Core BA"},
{HPHW_BA, 0x033, 0x00081, 0x0, "Anole T Core BA"},
{HPHW_BA, 0x034, 0x00081, 0x0, "SAIC L-80 Core BA"},
{HPHW_BA, 0x035, 0x00081, 0x0, "PCX-L2 712/132 Core BA"},
{HPHW_BA, 0x036, 0x00081, 0x0, "PCX-L2 712/160 Core BA"},
{HPHW_BA, 0x03B, 0x00081, 0x0, "Raven U/L2 Core BA"},
{HPHW_BA, 0x03C, 0x00081, 0x0, "Merlin 132 Core BA"},
{HPHW_BA, 0x03D, 0x00081, 0x0, "Merlin 160 Core BA"},
{HPHW_BA, 0x03E, 0x00081, 0x0, "Merlin+ 132 Core BA"},
{HPHW_BA, 0x03F, 0x00081, 0x0, "Merlin+ 180 Core BA"},
{HPHW_BA, 0x044, 0x00081, 0x0, "Mohawk Core BA"},
{HPHW_BA, 0x045, 0x00081, 0x0, "Rocky1 Core BA"},
{HPHW_BA, 0x046, 0x00081, 0x0, "Rocky2 120 Core BA"},
{HPHW_BA, 0x047, 0x00081, 0x0, "Rocky2 150 Core BA"},
{HPHW_BA, 0x04B, 0x00081, 0x0, "Anole L2 132 Core BA"},
{HPHW_BA, 0x04D, 0x00081, 0x0, "Anole L2 165 Core BA"},
{HPHW_BA, 0x04E, 0x00081, 0x0, "Kiji L2 132 Core BA"},
{HPHW_BA, 0x050, 0x00081, 0x0, "Merlin Jr 132 Core BA"},
{HPHW_BA, 0x051, 0x00081, 0x0, "Firehawk Core BA"},
{HPHW_BA, 0x056, 0x00081, 0x0, "Raven+ w SE FWSCSI Core BA"},
{HPHW_BA, 0x057, 0x00081, 0x0, "Raven+ w Diff FWSCSI Core BA"},
{HPHW_BA, 0x058, 0x00081, 0x0, "FireHawk 200 Core BA"},
{HPHW_BA, 0x05C, 0x00081, 0x0, "SummitHawk 230 Core BA"},
{HPHW_BA, 0x05E, 0x00081, 0x0, "Staccato 132 Core BA"},
{HPHW_BA, 0x05E, 0x00081, 0x0, "Staccato 180 Core BA"},
{HPHW_BA, 0x05F, 0x00081, 0x0, "Staccato 180 Lasi"},
{HPHW_BA, 0x800, 0x00081, 0x0, "Hitachi Tiny 64 Core BA"},
{HPHW_BA, 0x801, 0x00081, 0x0, "Hitachi Tiny 80 Core BA"},
{HPHW_BA, 0x004, 0x0008B, 0x0, "Anole Optional PCMCIA BA"},
{HPHW_BA, 0x004, 0x0008E, 0x0, "GSC ITR Wax BA"},
{HPHW_BA, 0x00C, 0x0008E, 0x0, "Gecko Optional Wax BA"},
{HPHW_BA, 0x010, 0x0008E, 0x0, "Pace Wax BA"},
{HPHW_BA, 0x011, 0x0008E, 0x0, "SuperPace Wax BA"},
{HPHW_BA, 0x012, 0x0008E, 0x0, "Mirage Jr Wax BA"},
{HPHW_BA, 0x013, 0x0008E, 0x0, "Mirage Wax BA"},
{HPHW_BA, 0x014, 0x0008E, 0x0, "Electra Wax BA"},
{HPHW_BA, 0x017, 0x0008E, 0x0, "Raven Backplane Wax BA"},
{HPHW_BA, 0x01E, 0x0008E, 0x0, "Raven T' Wax BA"},
{HPHW_BA, 0x01F, 0x0008E, 0x0, "SkyHawk Wax BA"},
{HPHW_BA, 0x023, 0x0008E, 0x0, "Rocky1 Wax BA"},
{HPHW_BA, 0x02B, 0x0008E, 0x0, "Mirage 80 Wax BA"},
{HPHW_BA, 0x02C, 0x0008E, 0x0, "Mirage 100+ Wax BA"},
{HPHW_BA, 0x030, 0x0008E, 0x0, "UL 350 Core Wax BA"},
{HPHW_BA, 0x031, 0x0008E, 0x0, "UL 550 Core Wax BA"},
{HPHW_BA, 0x034, 0x0008E, 0x0, "SAIC L-80 Wax BA"},
{HPHW_BA, 0x03A, 0x0008E, 0x0, "Merlin+ Wax BA"},
{HPHW_BA, 0x040, 0x0008E, 0x0, "Merlin 132 Wax BA"},
{HPHW_BA, 0x041, 0x0008E, 0x0, "Merlin 160 Wax BA"},
{HPHW_BA, 0x043, 0x0008E, 0x0, "Merlin 132/160 Wax BA"},
{HPHW_BA, 0x052, 0x0008E, 0x0, "Raven+ Hi Power Backplane w/EISA Wax BA"},
{HPHW_BA, 0x054, 0x0008E, 0x0, "Raven+ Lo Power Backplane w/EISA Wax BA"},
{HPHW_BA, 0x059, 0x0008E, 0x0, "FireHawk 200 Wax BA"},
{HPHW_BA, 0x05A, 0x0008E, 0x0, "Raven+ L2 Backplane w/EISA Wax BA"},
{HPHW_BA, 0x05D, 0x0008E, 0x0, "SummitHawk Wax BA"},
{HPHW_BA, 0x800, 0x0008E, 0x0, "Hitachi Tiny 64 Wax BA"},
{HPHW_BA, 0x801, 0x0008E, 0x0, "Hitachi Tiny 80 Wax BA"},
{HPHW_BA, 0x011, 0x00090, 0x0, "SuperPace Wax EISA BA"},
{HPHW_BA, 0x017, 0x00090, 0x0, "Raven Backplane Wax EISA BA"},
{HPHW_BA, 0x01E, 0x00090, 0x0, "Raven T' Wax EISA BA"},
{HPHW_BA, 0x01F, 0x00090, 0x0, "SkyHawk 100/120 Wax EISA BA"},
{HPHW_BA, 0x027, 0x00090, 0x0, "Piranha 100 Wax EISA BA"},
{HPHW_BA, 0x028, 0x00090, 0x0, "Mirage Jr Wax EISA BA"},
{HPHW_BA, 0x029, 0x00090, 0x0, "Mirage Wax EISA BA"},
{HPHW_BA, 0x02A, 0x00090, 0x0, "Electra Wax EISA BA"},
{HPHW_BA, 0x02B, 0x00090, 0x0, "Mirage 80 Wax EISA BA"},
{HPHW_BA, 0x02C, 0x00090, 0x0, "Mirage 100+ Wax EISA BA"},
{HPHW_BA, 0x030, 0x00090, 0x0, "UL 350 Wax EISA BA"},
{HPHW_BA, 0x031, 0x00090, 0x0, "UL 550 Wax EISA BA"},
{HPHW_BA, 0x034, 0x00090, 0x0, "SAIC L-80 Wax EISA BA"},
{HPHW_BA, 0x03A, 0x00090, 0x0, "Merlin+ Wax EISA BA"},
{HPHW_BA, 0x040, 0x00090, 0x0, "Merlin 132 Wax EISA BA"},
{HPHW_BA, 0x041, 0x00090, 0x0, "Merlin 160 Wax EISA BA"},
{HPHW_BA, 0x043, 0x00090, 0x0, "Merlin 132/160 Wax EISA BA"},
{HPHW_BA, 0x052, 0x00090, 0x0, "Raven Hi Power Backplane Wax EISA BA"},
{HPHW_BA, 0x054, 0x00090, 0x0, "Raven Lo Power Backplane Wax EISA BA"},
{HPHW_BA, 0x059, 0x00090, 0x0, "FireHawk 200 Wax EISA BA"},
{HPHW_BA, 0x05A, 0x00090, 0x0, "Raven L2 Backplane Wax EISA BA"},
{HPHW_BA, 0x05D, 0x00090, 0x0, "SummitHawk Wax EISA BA"},
{HPHW_BA, 0x800, 0x00090, 0x0, "Hitachi Tiny 64 Wax EISA BA"},
{HPHW_BA, 0x801, 0x00090, 0x0, "Hitachi Tiny 80 Wax EISA BA"},
{HPHW_BA, 0x01A, 0x00093, 0x0, "Anole 64 TIMI BA"},
{HPHW_BA, 0x01B, 0x00093, 0x0, "Anole 100 TIMI BA"},
{HPHW_BA, 0x034, 0x00093, 0x0, "Anole T TIMI BA"},
{HPHW_BA, 0x04A, 0x00093, 0x0, "Anole L2 132 TIMI BA"},
{HPHW_BA, 0x04C, 0x00093, 0x0, "Anole L2 165 TIMI BA"},
{HPHW_BA, 0x582, 0x000A5, 0x00, "Epic PCI Bridge"},
{HPHW_BCPORT, 0x504, 0x00000, 0x00, "Phantom PseudoBC GSC+ Port"},
{HPHW_BCPORT, 0x505, 0x00000, 0x00, "Phantom PseudoBC GSC+ Port"},
{HPHW_BCPORT, 0x503, 0x0000C, 0x00, "Java BC GSC+ Port"},
{HPHW_BCPORT, 0x57F, 0x0000C, 0x00, "Hitachi Ghostview GSC+ Port"},
{HPHW_BCPORT, 0x501, 0x0000C, 0x00, "U2-IOA BC GSC+ Port"},
{HPHW_BCPORT, 0x502, 0x0000C, 0x00, "Uturn-IOA BC GSC+ Port"},
{HPHW_BCPORT, 0x780, 0x0000C, 0x00, "Astro BC Ropes Port"},
{HPHW_BCPORT, 0x506, 0x0000C, 0x00, "NEC-IOS BC HSC Port"},
{HPHW_BCPORT, 0x004, 0x0000C, 0x00, "Cheetah BC SMB Port"},
{HPHW_BCPORT, 0x006, 0x0000C, 0x00, "Cheetah BC MID_BUS Port"},
{HPHW_BCPORT, 0x005, 0x0000C, 0x00, "Condor BC MID_BUS Port"},
{HPHW_BCPORT, 0x100, 0x0000C, 0x00, "Condor BC HP-PB Port"},
{HPHW_BCPORT, 0x184, 0x0000C, 0x00, "Summit BC Port"},
{HPHW_BCPORT, 0x101, 0x0000C, 0x00, "Summit BC HP-PB Port"},
{HPHW_BCPORT, 0x102, 0x0000C, 0x00, "HP-PB Port (prefetch)"},
{HPHW_BCPORT, 0x500, 0x0000C, 0x00, "Gecko BOA BC GSC+ Port"},
{HPHW_BCPORT, 0x103, 0x0000C, 0x00, "Gecko BOA BC HP-PB Port"},
{HPHW_BCPORT, 0x507, 0x0000C, 0x00, "Keyaki BC GSC+ Port"},
{HPHW_BCPORT, 0x508, 0x0000C, 0x00, "Keyaki-DX BC GSC+ Port"},
{HPHW_BCPORT, 0x584, 0x0000C, 0x10, "DEW BC Runway Port"},
{HPHW_BCPORT, 0x800, 0x0000C, 0x10, "DEW BC Merced Port"},
{HPHW_BCPORT, 0x801, 0x0000C, 0x10, "SMC Bus Interface Merced Bus0"},
{HPHW_BCPORT, 0x802, 0x0000C, 0x10, "SMC Bus INterface Merced Bus1"},
{HPHW_BCPORT, 0x803, 0x0000C, 0x10, "IKE I/O BC Merced Port"},
{HPHW_BCPORT, 0x781, 0x0000C, 0x00, "IKE I/O BC Ropes Port"},
{HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"},
{HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"},
{HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"},
{HPHW_BRIDGE, 0x05D, 0x0000A, 0x00, "SummitHawk Dino PCI Bridge"},
{HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"},
{HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"},
{HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"},
{HPHW_BRIDGE, 0x583, 0x000A5, 0x00, "Saga PCI Bridge"},
{HPHW_BRIDGE, 0x783, 0x0000A, 0x00, "Mercury PCI Bridge"},
{HPHW_BRIDGE, 0x784, 0x0000A, 0x00, "Quicksilver AGP Bridge"},
{HPHW_B_DMA, 0x004, 0x00018, 0x00, "Parallel I/O"},
{HPHW_B_DMA, 0x004, 0x00019, 0x00, "Parallel RDB"},
{HPHW_B_DMA, 0x004, 0x00020, 0x80, "MID_BUS PSI"},
{HPHW_B_DMA, 0x004, 0x0002F, 0x80, "HP-PB Transit PSI (36960A)"},
{HPHW_B_DMA, 0x008, 0x00051, 0x80, "HP-PB Transit 802.3"},
{HPHW_B_DMA, 0x004, 0x00052, 0x80, "Miura LAN/Console (J2146A)"},
{HPHW_B_DMA, 0x008, 0x00058, 0x80, "HP-PB Transit 802.4"},
{HPHW_B_DMA, 0x005, 0x00060, 0x80, "KittyHawk CSY Core LAN/Console"},
{HPHW_B_DMA, 0x014, 0x00060, 0x80, "Diablo LAN/Console"},
{HPHW_B_DMA, 0x054, 0x00060, 0x80, "Countach LAN/Console"},
{HPHW_B_DMA, 0x004, 0x00094, 0x80, "KittyHawk GSC+ Exerciser"},
{HPHW_B_DMA, 0x004, 0x00100, 0x80, "HP-PB HF Interface"},
{HPHW_B_DMA, 0x000, 0x00206, 0x80, "MELCO HMPHA"},
{HPHW_B_DMA, 0x005, 0x00206, 0x80, "MELCO HMPHA_10"},
{HPHW_B_DMA, 0x006, 0x00206, 0x80, "MELCO HMQHA"},
{HPHW_B_DMA, 0x007, 0x00206, 0x80, "MELCO HMQHA_10"},
{HPHW_B_DMA, 0x004, 0x207, 0x80, "MELCO HNDWA MDWS-70"},
{HPHW_CIO, 0x004, 0x00010, 0x00, "VLSI CIO"},
{HPHW_CIO, 0x005, 0x00010, 0x00, "Silverfox CIO"},
{HPHW_CIO, 0x006, 0x00010, 0x00, "Emerald CIO"},
{HPHW_CIO, 0x008, 0x00010, 0x00, "Discrete CIO"},
{HPHW_CONSOLE, 0x004, 0x0001C, 0x00, "Cheetah console"},
{HPHW_CONSOLE, 0x005, 0x0001C, 0x00, "Emerald console"},
{HPHW_CONSOLE, 0x01A, 0x0001F, 0x00, "Jason/Anole 64 Null Console"},
{HPHW_CONSOLE, 0x01B, 0x0001F, 0x00, "Jason/Anole 100 Null Console"},
{HPHW_FABRIC, 0x004, 0x000AA, 0x80, "Halfdome DNA Central Agent"},
{HPHW_FABRIC, 0x005, 0x000AA, 0x80, "Keystone DNA Central Agent"},
{HPHW_FABRIC, 0x007, 0x000AA, 0x80, "Caribe DNA Central Agent"},
{HPHW_FABRIC, 0x004, 0x000AB, 0x00, "Halfdome TOGO Fabric Crossbar"},
{HPHW_FABRIC, 0x005, 0x000AB, 0x00, "Keystone TOGO Fabric Crossbar"},
{HPHW_FABRIC, 0x004, 0x000AC, 0x00, "Halfdome Sakura Fabric Router"},
{HPHW_FIO, 0x025, 0x0002E, 0x80, "Armyknife Optional X.25"},
{HPHW_FIO, 0x004, 0x0004F, 0x0, "8-Port X.25 EISA-ACC (AMSO)"},
{HPHW_FIO, 0x004, 0x00071, 0x0, "Cobra Core SCSI"},
{HPHW_FIO, 0x005, 0x00071, 0x0, "Coral Core SCSI"},
{HPHW_FIO, 0x006, 0x00071, 0x0, "Bushmaster Core SCSI"},
{HPHW_FIO, 0x007, 0x00071, 0x0, "Scorpio Core SCSI"},
{HPHW_FIO, 0x008, 0x00071, 0x0, "Flounder Core SCSI"},
{HPHW_FIO, 0x009, 0x00071, 0x0, "Outfield Core SCSI"},
{HPHW_FIO, 0x00A, 0x00071, 0x0, "CoralII Core SCSI"},
{HPHW_FIO, 0x00B, 0x00071, 0x0, "Scorpio Jr. Core SCSI"},
{HPHW_FIO, 0x00C, 0x00071, 0x0, "Strider-50 Core SCSI"},
{HPHW_FIO, 0x00D, 0x00071, 0x0, "Strider-33 Core SCSI"},
{HPHW_FIO, 0x00E, 0x00071, 0x0, "Trailways-50 Core SCSI"},
{HPHW_FIO, 0x00F, 0x00071, 0x0, "Trailways-33 Core SCSI"},
{HPHW_FIO, 0x010, 0x00071, 0x0, "Pace Core SCSI"},
{HPHW_FIO, 0x011, 0x00071, 0x0, "Sidewinder Core SCSI"},
{HPHW_FIO, 0x019, 0x00071, 0x0, "Scorpio Sr. Core SCSI"},
{HPHW_FIO, 0x020, 0x00071, 0x0, "Scorpio 100 Core SCSI"},
{HPHW_FIO, 0x021, 0x00071, 0x0, "Spectra 50 Core SCSI"},
{HPHW_FIO, 0x022, 0x00071, 0x0, "Spectra 75 Core SCSI"},
{HPHW_FIO, 0x023, 0x00071, 0x0, "Spectra 100 Core SCSI"},
{HPHW_FIO, 0x024, 0x00071, 0x0, "Fast Pace Core SCSI"},
{HPHW_FIO, 0x026, 0x00071, 0x0, "CoralII Jaguar Core SCSI"},
{HPHW_FIO, 0x004, 0x00072, 0x0, "Cobra Core LAN (802.3)"},
{HPHW_FIO, 0x005, 0x00072, 0x0, "Coral Core LAN (802.3)"},
{HPHW_FIO, 0x006, 0x00072, 0x0, "Bushmaster Core LAN (802.3)"},
{HPHW_FIO, 0x007, 0x00072, 0x0, "Scorpio Core LAN (802.3)"},
{HPHW_FIO, 0x008, 0x00072, 0x0, "Flounder Core LAN (802.3)"},
{HPHW_FIO, 0x009, 0x00072, 0x0, "Outfield Core LAN (802.3)"},
{HPHW_FIO, 0x00A, 0x00072, 0x0, "CoralII Core LAN (802.3)"},
{HPHW_FIO, 0x00B, 0x00072, 0x0, "Scorpio Jr. Core LAN (802.3)"},
{HPHW_FIO, 0x00C, 0x00072, 0x0, "Strider-50 Core LAN (802.3)"},
{HPHW_FIO, 0x00D, 0x00072, 0x0, "Strider-33 Core LAN (802.3)"},
{HPHW_FIO, 0x00E, 0x00072, 0x0, "Trailways-50 Core LAN (802.3)"},
{HPHW_FIO, 0x00F, 0x00072, 0x0, "Trailways-33 Core LAN (802.3)"},
{HPHW_FIO, 0x010, 0x00072, 0x0, "Pace Core LAN (802.3)"},
{HPHW_FIO, 0x011, 0x00072, 0x0, "Sidewinder Core LAN (802.3)"},
{HPHW_FIO, 0x019, 0x00072, 0x0, "Scorpio Sr. Core LAN (802.3)"},
{HPHW_FIO, 0x020, 0x00072, 0x0, "Scorpio 100 Core LAN (802.3)"},
{HPHW_FIO, 0x021, 0x00072, 0x0, "Spectra 50 Core LAN (802.3)"},
{HPHW_FIO, 0x022, 0x00072, 0x0, "Spectra 75 Core LAN (802.3)"},
{HPHW_FIO, 0x023, 0x00072, 0x0, "Spectra 100 Core LAN (802.3)"},
{HPHW_FIO, 0x024, 0x00072, 0x0, "Fast Pace Core LAN (802.3)"},
{HPHW_FIO, 0x026, 0x00072, 0x0, "CoralII Jaguar Core LAN (802.3)"},
{HPHW_FIO, 0x004, 0x00073, 0x0, "Cobra Core HIL"},
{HPHW_FIO, 0x005, 0x00073, 0x0, "Coral Core HIL"},
{HPHW_FIO, 0x006, 0x00073, 0x0, "Bushmaster Core HIL"},
{HPHW_FIO, 0x007, 0x00073, 0x0, "Scorpio Core HIL"},
{HPHW_FIO, 0x008, 0x00073, 0x0, "Flounder Core HIL"},
{HPHW_FIO, 0x009, 0x00073, 0x0, "Outfield Core HIL"},
{HPHW_FIO, 0x00A, 0x00073, 0x0, "CoralII Core HIL"},
{HPHW_FIO, 0x00B, 0x00073, 0x0, "Scorpio Jr. Core HIL"},
{HPHW_FIO, 0x00C, 0x00073, 0x0, "Strider-50 Core HIL"},
{HPHW_FIO, 0x00D, 0x00073, 0x0, "Strider-33 Core HIL"},
{HPHW_FIO, 0x00E, 0x00073, 0x0, "Trailways-50 Core HIL"},
{HPHW_FIO, 0x00F, 0x00073, 0x0, "Trailways-33 Core HIL"},
{HPHW_FIO, 0x010, 0x00073, 0x0, "Pace Core HIL"},
{HPHW_FIO, 0x011, 0x00073, 0xcc, "SuperPace Wax HIL"},
{HPHW_FIO, 0x012, 0x00073, 0x0, "Mirage Jr Wax HIL"},
{HPHW_FIO, 0x013, 0x00073, 0x0, "Mirage 100 Wax HIL"},
{HPHW_FIO, 0x014, 0x00073, 0x0, "Electra Wax HIL"},
{HPHW_FIO, 0x017, 0x00073, 0x0, "Raven Backplane Wax HIL"},
{HPHW_FIO, 0x019, 0x00073, 0x0, "Scorpio Sr. Core HIL"},
{HPHW_FIO, 0x01E, 0x00073, 0x0, "Raven T' Wax HIL"},
{HPHW_FIO, 0x01F, 0x00073, 0x0, "SkyHawk 100/120 Wax HIL"},
{HPHW_FIO, 0x020, 0x00073, 0x0, "Scorpio 100 Core HIL"},
{HPHW_FIO, 0x021, 0x00073, 0x0, "Spectra 50 Core HIL"},
{HPHW_FIO, 0x022, 0x00073, 0x0, "Spectra 75 Core HIL"},
{HPHW_FIO, 0x023, 0x00073, 0x0, "Spectra 100 Core HIL"},
{HPHW_FIO, 0x024, 0x00073, 0x0, "Fast Pace Core HIL"},
{HPHW_FIO, 0x026, 0x00073, 0x0, "CoralII Jaguar Core HIL"},
{HPHW_FIO, 0x02B, 0x00073, 0x0, "Mirage 80 Wax HIL"},
{HPHW_FIO, 0x02C, 0x00073, 0x0, "Mirage 100+ Wax HIL"},
{HPHW_FIO, 0x03A, 0x00073, 0x0, "Merlin+ Wax HIL"},
{HPHW_FIO, 0x040, 0x00073, 0x0, "Merlin 132 Wax HIL"},
{HPHW_FIO, 0x041, 0x00073, 0x0, "Merlin 160 Wax HIL"},
{HPHW_FIO, 0x043, 0x00073, 0x0, "Merlin 132/160 Wax HIL"},
{HPHW_FIO, 0x052, 0x00073, 0x0, "Raven+ Hi Power Backplane w/EISA Wax HIL"},
{HPHW_FIO, 0x053, 0x00073, 0x0, "Raven+ Hi Power Backplane wo/EISA Wax HIL"},
{HPHW_FIO, 0x054, 0x00073, 0x0, "Raven+ Lo Power Backplane w/EISA Wax HIL"},
{HPHW_FIO, 0x055, 0x00073, 0x0, "Raven+ Lo Power Backplane wo/EISA Wax HIL"},
{HPHW_FIO, 0x059, 0x00073, 0x0, "FireHawk 200 Wax HIL"},
{HPHW_FIO, 0x05A, 0x00073, 0x0, "Raven+ L2 Backplane w/EISA Wax HIL"},
{HPHW_FIO, 0x05B, 0x00073, 0x0, "Raven+ L2 Backplane wo/EISA Wax HIL"},
{HPHW_FIO, 0x05D, 0x00073, 0x0, "SummitHawk Wax HIL"},
{HPHW_FIO, 0x800, 0x00073, 0x0, "Hitachi Tiny 64 Wax HIL"},
{HPHW_FIO, 0x801, 0x00073, 0x0, "Hitachi Tiny 80 Wax HIL"},
{HPHW_FIO, 0x004, 0x00074, 0x0, "Cobra Core Centronics"},
{HPHW_FIO, 0x005, 0x00074, 0x0, "Coral Core Centronics"},
{HPHW_FIO, 0x006, 0x00074, 0x0, "Bushmaster Core Centronics"},
{HPHW_FIO, 0x007, 0x00074, 0x0, "Scorpio Core Centronics"},
{HPHW_FIO, 0x008, 0x00074, 0x0, "Flounder Core Centronics"},
{HPHW_FIO, 0x009, 0x00074, 0x0, "Outfield Core Centronics"},
{HPHW_FIO, 0x00A, 0x00074, 0x0, "CoralII Core Centronics"},
{HPHW_FIO, 0x00B, 0x00074, 0x0, "Scorpio Jr. Core Centronics"},
{HPHW_FIO, 0x00C, 0x00074, 0x0, "Strider-50 Core Centronics"},
{HPHW_FIO, 0x00D, 0x00074, 0x0, "Strider-33 Core Centronics"},
{HPHW_FIO, 0x00E, 0x00074, 0x0, "Trailways-50 Core Centronics"},
{HPHW_FIO, 0x00F, 0x00074, 0x0, "Trailways-33 Core Centronics"},
{HPHW_FIO, 0x010, 0x00074, 0x0, "Pace Core Centronics"},
{HPHW_FIO, 0x011, 0x00074, 0x0, "Sidewinder Core Centronics"},
{HPHW_FIO, 0x015, 0x00074, 0x0, "KittyHawk GSY Core Centronics"},
{HPHW_FIO, 0x016, 0x00074, 0x0, "Gecko Core Centronics"},
{HPHW_FIO, 0x019, 0x00074, 0x0, "Scorpio Sr. Core Centronics"},
{HPHW_FIO, 0x01A, 0x00074, 0x0, "Anole 64 Core Centronics"},
{HPHW_FIO, 0x01B, 0x00074, 0x0, "Anole 100 Core Centronics"},
{HPHW_FIO, 0x01C, 0x00074, 0x0, "Gecko 80 Core Centronics"},
{HPHW_FIO, 0x01D, 0x00074, 0x0, "Gecko 100 Core Centronics"},
{HPHW_FIO, 0x01F, 0x00074, 0x0, "SkyHawk 100/120 Core Centronics"},
{HPHW_FIO, 0x020, 0x00074, 0x0, "Scorpio 100 Core Centronics"},
{HPHW_FIO, 0x021, 0x00074, 0x0, "Spectra 50 Core Centronics"},
{HPHW_FIO, 0x022, 0x00074, 0x0, "Spectra 75 Core Centronics"},
{HPHW_FIO, 0x023, 0x00074, 0x0, "Spectra 100 Core Centronics"},
{HPHW_FIO, 0x024, 0x00074, 0x0, "Fast Pace Core Centronics"},
{HPHW_FIO, 0x026, 0x00074, 0x0, "CoralII Jaguar Core Centronics"},
{HPHW_FIO, 0x027, 0x00074, 0x0, "Piranha 100 Core Centronics"},
{HPHW_FIO, 0x028, 0x00074, 0x0, "Mirage Jr Core Centronics"},
{HPHW_FIO, 0x029, 0x00074, 0x0, "Mirage Core Centronics"},
{HPHW_FIO, 0x02A, 0x00074, 0x0, "Electra Core Centronics"},
{HPHW_FIO, 0x02B, 0x00074, 0x0, "Mirage 80 Core Centronics"},
{HPHW_FIO, 0x02C, 0x00074, 0x0, "Mirage 100+ Core Centronics"},
{HPHW_FIO, 0x02E, 0x00074, 0x0, "UL 350 Core Centronics"},
{HPHW_FIO, 0x02F, 0x00074, 0x0, "UL 550 Core Centronics"},
{HPHW_FIO, 0x032, 0x00074, 0x0, "Raven T' Core Centronics"},
{HPHW_FIO, 0x033, 0x00074, 0x0, "Anole T Core Centronics"},
{HPHW_FIO, 0x034, 0x00074, 0x0, "SAIC L-80 Core Centronics"},
{HPHW_FIO, 0x035, 0x00074, 0x0, "PCX-L2 712/132 Core Centronics"},
{HPHW_FIO, 0x036, 0x00074, 0x0, "PCX-L2 712/160 Core Centronics"},
{HPHW_FIO, 0x03B, 0x00074, 0x0, "Raven U/L2 Core Centronics"},
{HPHW_FIO, 0x03C, 0x00074, 0x0, "Merlin 132 Core Centronics"},
{HPHW_FIO, 0x03D, 0x00074, 0x0, "Merlin 160 Core Centronics"},
{HPHW_FIO, 0x03E, 0x00074, 0x0, "Merlin+ 132 Core Centronics"},
{HPHW_FIO, 0x03F, 0x00074, 0x0, "Merlin+ 180 Core Centronics"},
{HPHW_FIO, 0x044, 0x00074, 0x0, "Mohawk Core Centronics"},
{HPHW_FIO, 0x045, 0x00074, 0x0, "Rocky1 Core Centronics"},
{HPHW_FIO, 0x046, 0x00074, 0x0, "Rocky2 120 Core Centronics"},
{HPHW_FIO, 0x047, 0x00074, 0x0, "Rocky2 150 Core Centronics"},
{HPHW_FIO, 0x04B, 0x00074, 0x0, "Anole L2 132 Core Centronics"},
{HPHW_FIO, 0x04D, 0x00074, 0x0, "Anole L2 165 Core Centronics"},
{HPHW_FIO, 0x050, 0x00074, 0x0, "Merlin Jr 132 Core Centronics"},
{HPHW_FIO, 0x051, 0x00074, 0x0, "Firehawk Core Centronics"},
{HPHW_FIO, 0x056, 0x00074, 0x0, "Raven+ w SE FWSCSI Core Centronics"},
{HPHW_FIO, 0x057, 0x00074, 0x0, "Raven+ w Diff FWSCSI Core Centronics"},
{HPHW_FIO, 0x058, 0x00074, 0x0, "FireHawk 200 Core Centronics"},
{HPHW_FIO, 0x05C, 0x00074, 0x0, "SummitHawk 230 Core Centronics"},
{HPHW_FIO, 0x800, 0x00074, 0x0, "Hitachi Tiny 64 Core Centronics"},
{HPHW_FIO, 0x801, 0x00074, 0x0, "Hitachi Tiny 80 Core Centronics"},
{HPHW_FIO, 0x004, 0x00075, 0x0, "Cobra Core RS-232"},
{HPHW_FIO, 0x005, 0x00075, 0x0, "Coral Core RS-232"},
{HPHW_FIO, 0x006, 0x00075, 0x0, "Bushmaster Core RS-232"},
{HPHW_FIO, 0x007, 0x00075, 0x0, "Scorpio Core RS-232"},
{HPHW_FIO, 0x008, 0x00075, 0x0, "Flounder Core RS-232"},
{HPHW_FIO, 0x009, 0x00075, 0x0, "Outfield Core RS-232"},
{HPHW_FIO, 0x00A, 0x00075, 0x0, "CoralII Core RS-232"},
{HPHW_FIO, 0x00B, 0x00075, 0x0, "Scorpio Jr. Core RS-232"},
{HPHW_FIO, 0x00C, 0x00075, 0x0, "Strider-50 Core RS-232"},
{HPHW_FIO, 0x00D, 0x00075, 0x0, "Strider-33 Core RS-232"},
{HPHW_FIO, 0x00E, 0x00075, 0x0, "Trailways-50 Core RS-232"},
{HPHW_FIO, 0x00F, 0x00075, 0x0, "Trailways-33 Core RS-232"},
{HPHW_FIO, 0x010, 0x00075, 0x0, "Pace Core RS-232"},
{HPHW_FIO, 0x011, 0x00075, 0x0, "Sidewinder Core RS-232"},
{HPHW_FIO, 0x019, 0x00075, 0x0, "Scorpio Sr. Core RS-232"},
{HPHW_FIO, 0x020, 0x00075, 0x0, "Scorpio 100 Core RS-232"},
{HPHW_FIO, 0x021, 0x00075, 0x0, "Spectra 50 Core RS-232"},
{HPHW_FIO, 0x022, 0x00075, 0x0, "Spectra 75 Core RS-232"},
{HPHW_FIO, 0x023, 0x00075, 0x0, "Spectra 100 Core RS-232"},
{HPHW_FIO, 0x024, 0x00075, 0x0, "Fast Pace Core RS-232"},
{HPHW_FIO, 0x026, 0x00075, 0x0, "CoralII Jaguar Core RS-232"},
{HPHW_FIO, 0x004, 0x00077, 0x0, "Coral SGC Graphics"},
{HPHW_FIO, 0x005, 0x00077, 0x0, "Hyperdrive Optional Graphics"},
{HPHW_FIO, 0x006, 0x00077, 0x0, "Stinger Optional Graphics"},
{HPHW_FIO, 0x007, 0x00077, 0x0, "Scorpio Builtin Graphics"},
{HPHW_FIO, 0x008, 0x00077, 0x0, "Anole Hyperdrive Optional Graphics"},
{HPHW_FIO, 0x009, 0x00077, 0x0, "Thunder II graphics EISA form"},
{HPHW_FIO, 0x00A, 0x00077, 0x0, "Thunder II graphics GSA form"},
{HPHW_FIO, 0x00B, 0x00077, 0x0, "Scorpio Jr Builtin Graphics"},
{HPHW_FIO, 0x00C, 0x00077, 0x0, "Strider-50 SSC Graphics"},
{HPHW_FIO, 0x00D, 0x00077, 0x0, "Strider-33 SSC Graphics"},
{HPHW_FIO, 0x00E, 0x00077, 0x0, "Trailways-50 SSC Graphics"},
{HPHW_FIO, 0x00F, 0x00077, 0x0, "Trailways-33 SSC Graphics"},
{HPHW_FIO, 0x010, 0x00077, 0x0, "Pace SGC Graphics"},
{HPHW_FIO, 0x011, 0x00077, 0x0, "Mohawk Opt. 2D Graphics (Kid)"},
{HPHW_FIO, 0x012, 0x00077, 0x0, "Raven Opt. 2D Graphics (Goat)"},
{HPHW_FIO, 0x016, 0x00077, 0x0, "Lego 24 SCG Graphics"},
{HPHW_FIO, 0x017, 0x00077, 0x0, "Lego 24Z SCG Graphics"},
{HPHW_FIO, 0x018, 0x00077, 0x0, "Lego 48Z SCG Graphics"},
{HPHW_FIO, 0x019, 0x00077, 0x0, "Scorpio Sr Builtin Graphics"},
{HPHW_FIO, 0x020, 0x00077, 0x0, "Scorpio 100 Builtin Graphics"},
{HPHW_FIO, 0x021, 0x00077, 0x0, "Spectra 50 Builtin Graphics"},
{HPHW_FIO, 0x022, 0x00077, 0x0, "Spectra 75 Builtin Graphics"},
{HPHW_FIO, 0x023, 0x00077, 0x0, "Spectra 100 Builtin Graphics"},
{HPHW_FIO, 0x024, 0x00077, 0x0, "Fast Pace SGC Graphics"},
{HPHW_FIO, 0x006, 0x0007A, 0x0, "Bushmaster Audio"},
{HPHW_FIO, 0x008, 0x0007A, 0x0, "Flounder Audio"},
{HPHW_FIO, 0x004, 0x0007B, 0x0, "UL Optional Audio"},
{HPHW_FIO, 0x007, 0x0007B, 0x0, "Scorpio Audio"},
{HPHW_FIO, 0x00B, 0x0007B, 0x0, "Scorpio Jr. Audio"},
{HPHW_FIO, 0x00C, 0x0007B, 0x0, "Strider-50 Audio"},
{HPHW_FIO, 0x00D, 0x0007B, 0x0, "Strider-33 Audio"},
{HPHW_FIO, 0x00E, 0x0007B, 0x0, "Trailways-50 Audio"},
{HPHW_FIO, 0x00F, 0x0007B, 0x0, "Trailways-33 Audio"},
{HPHW_FIO, 0x015, 0x0007B, 0x0, "KittyHawk GSY Core Audio"},
{HPHW_FIO, 0x016, 0x0007B, 0x0, "Gecko Audio"},
{HPHW_FIO, 0x019, 0x0007B, 0x0, "Scorpio Sr. Audio"},
{HPHW_FIO, 0x01A, 0x0007B, 0x0, "Anole 64 Audio"},
{HPHW_FIO, 0x01B, 0x0007B, 0x0, "Anole 100 Audio"},
{HPHW_FIO, 0x01C, 0x0007B, 0x0, "Gecko 80 Audio"},
{HPHW_FIO, 0x01D, 0x0007B, 0x0, "Gecko 100 Audio"},
{HPHW_FIO, 0x01F, 0x0007B, 0x0, "SkyHawk 100/120 Audio"},
{HPHW_FIO, 0x020, 0x0007B, 0x0, "Scorpio 100 Audio"},
{HPHW_FIO, 0x021, 0x0007B, 0x0, "Spectra 50 Audio"},
{HPHW_FIO, 0x022, 0x0007B, 0x0, "Spectra 75 Audio"},
{HPHW_FIO, 0x023, 0x0007B, 0x0, "Spectra 100 Audio"},
{HPHW_FIO, 0x028, 0x0007B, 0x0, "Mirage Jr Audio"},
{HPHW_FIO, 0x029, 0x0007B, 0x0, "Mirage Audio"},
{HPHW_FIO, 0x02A, 0x0007B, 0x0, "Electra Audio"},
{HPHW_FIO, 0x02B, 0x0007B, 0x0, "Mirage 80 Audio"},
{HPHW_FIO, 0x02C, 0x0007B, 0x0, "Mirage 100+ Audio"},
{HPHW_FIO, 0x032, 0x0007B, 0x0, "Raven T' Audio"},
{HPHW_FIO, 0x034, 0x0007B, 0x0, "SAIC L-80 Audio"},
{HPHW_FIO, 0x035, 0x0007B, 0x0, "PCX-L2 712/132 Core Audio"},
{HPHW_FIO, 0x036, 0x0007B, 0x0, "PCX-L2 712/160 Core Audio"},
{HPHW_FIO, 0x03B, 0x0007B, 0x0, "Raven U/L2 Core Audio"},
{HPHW_FIO, 0x03C, 0x0007B, 0x0, "Merlin 132 Core Audio"},
{HPHW_FIO, 0x03D, 0x0007B, 0x0, "Merlin 160 Core Audio"},
{HPHW_FIO, 0x03E, 0x0007B, 0x0, "Merlin+ 132 Core Audio"},
{HPHW_FIO, 0x03F, 0x0007B, 0x0, "Merlin+ 180 Core Audio"},
{HPHW_FIO, 0x044, 0x0007B, 0x0, "Mohawk Core Audio"},
{HPHW_FIO, 0x046, 0x0007B, 0x0, "Rocky2 120 Core Audio"},
{HPHW_FIO, 0x047, 0x0007B, 0x0, "Rocky2 150 Core Audio"},
{HPHW_FIO, 0x04B, 0x0007B, 0x0, "Anole L2 132 Core Audio"},
{HPHW_FIO, 0x04D, 0x0007B, 0x0, "Anole L2 165 Core Audio"},
{HPHW_FIO, 0x04E, 0x0007B, 0x0, "Kiji L2 132 Core Audio"},
{HPHW_FIO, 0x050, 0x0007B, 0x0, "Merlin Jr 132 Core Audio"},
{HPHW_FIO, 0x051, 0x0007B, 0x0, "Firehawk Audio"},
{HPHW_FIO, 0x056, 0x0007B, 0x0, "Raven+ w SE FWSCSI Core Audio"},
{HPHW_FIO, 0x057, 0x0007B, 0x0, "Raven+ w Diff FWSCSI Core Audio"},
{HPHW_FIO, 0x058, 0x0007B, 0x0, "FireHawk 200 Audio"},
{HPHW_FIO, 0x05C, 0x0007B, 0x0, "SummitHawk 230 Core Audio"},
{HPHW_FIO, 0x800, 0x0007B, 0x0, "Hitachi Tiny 64 Audio"},
{HPHW_FIO, 0x801, 0x0007B, 0x0, "Hitachi Tiny 80 Audio"},
{HPHW_FIO, 0x009, 0x0007C, 0x0, "Outfield FW SCSI"},
{HPHW_FIO, 0x00A, 0x0007C, 0x0, "CoralII FW SCSI"},
{HPHW_FIO, 0x026, 0x0007C, 0x0, "CoralII Jaguar FW SCSI"},
{HPHW_FIO, 0x009, 0x0007D, 0x0, "Outfield FDDI"},
{HPHW_FIO, 0x00A, 0x0007D, 0x0, "CoralII FDDI"},
{HPHW_FIO, 0x026, 0x0007D, 0x0, "CoralII Jaguar FDDI"},
{HPHW_FIO, 0x010, 0x0007E, 0x0, "Pace Audio"},
{HPHW_FIO, 0x024, 0x0007E, 0x0, "Fast Pace Audio"},
{HPHW_FIO, 0x009, 0x0007F, 0x0, "Outfield Audio"},
{HPHW_FIO, 0x00A, 0x0007F, 0x0, "CoralII Audio"},
{HPHW_FIO, 0x026, 0x0007F, 0x0, "CoralII Jaguar Audio"},
{HPHW_FIO, 0x010, 0x00080, 0x0, "Pace Core HPIB"},
{HPHW_FIO, 0x024, 0x00080, 0x0, "Fast Pace Core HPIB"},
{HPHW_FIO, 0x015, 0x00082, 0x0, "KittyHawk GSY Core SCSI"},
{HPHW_FIO, 0x016, 0x00082, 0x0, "Gecko Core SCSI"},
{HPHW_FIO, 0x01A, 0x00082, 0x0, "Anole 64 Core SCSI"},
{HPHW_FIO, 0x01B, 0x00082, 0x0, "Anole 100 Core SCSI"},
{HPHW_FIO, 0x01C, 0x00082, 0x0, "Gecko 80 Core SCSI"},
{HPHW_FIO, 0x01D, 0x00082, 0x0, "Gecko 100 Core SCSI"},
{HPHW_FIO, 0x01F, 0x00082, 0x0, "SkyHawk 100/120 Core SCSI"},
{HPHW_FIO, 0x027, 0x00082, 0x0, "Piranha 100 Core SCSI"},
{HPHW_FIO, 0x028, 0x00082, 0x0, "Mirage Jr Core SCSI"},
{HPHW_FIO, 0x029, 0x00082, 0x0, "Mirage Core SCSI"},
{HPHW_FIO, 0x02A, 0x00082, 0x0, "Electra Core SCSI"},
{HPHW_FIO, 0x02B, 0x00082, 0x0, "Mirage 80 Core SCSI"},
{HPHW_FIO, 0x02C, 0x00082, 0x0, "Mirage 100+ Core SCSI"},
{HPHW_FIO, 0x02E, 0x00082, 0x0, "UL 350 Core SCSI"},
{HPHW_FIO, 0x02F, 0x00082, 0x0, "UL 550 Core SCSI"},
{HPHW_FIO, 0x032, 0x00082, 0x0, "Raven T' Core SCSI"},
{HPHW_FIO, 0x033, 0x00082, 0x0, "Anole T Core SCSI"},
{HPHW_FIO, 0x034, 0x00082, 0x0, "SAIC L-80 Core SCSI"},
{HPHW_FIO, 0x035, 0x00082, 0x0, "PCX-L2 712/132 Core SCSI"},
{HPHW_FIO, 0x036, 0x00082, 0x0, "PCX-L2 712/160 Core SCSI"},
{HPHW_FIO, 0x03B, 0x00082, 0x0, "Raven U/L2 Core SCSI"},
{HPHW_FIO, 0x03C, 0x00082, 0x0, "Merlin 132 Core SCSI"},
{HPHW_FIO, 0x03D, 0x00082, 0x0, "Merlin 160 Core SCSI"},
{HPHW_FIO, 0x03E, 0x00082, 0x0, "Merlin+ 132 Core SCSI"},
{HPHW_FIO, 0x03F, 0x00082, 0x0, "Merlin+ 180 Core SCSI"},
{HPHW_FIO, 0x044, 0x00082, 0x0, "Mohawk Core SCSI"},
{HPHW_FIO, 0x045, 0x00082, 0x0, "Rocky1 Core SCSI"},
{HPHW_FIO, 0x046, 0x00082, 0x0, "Rocky2 120 Core SCSI"},
{HPHW_FIO, 0x047, 0x00082, 0x0, "Rocky2 150 Core SCSI"},
{HPHW_FIO, 0x04B, 0x00082, 0x0, "Anole L2 132 Core SCSI"},
{HPHW_FIO, 0x04D, 0x00082, 0x0, "Anole L2 165 Core SCSI"},
{HPHW_FIO, 0x04E, 0x00082, 0x0, "Kiji L2 132 Core SCSI"},
{HPHW_FIO, 0x050, 0x00082, 0x0, "Merlin Jr 132 Core SCSI"},
{HPHW_FIO, 0x051, 0x00082, 0x0, "Firehawk Core SCSI"},
{HPHW_FIO, 0x056, 0x00082, 0x0, "Raven+ w SE FWSCSI Core SCSI"},
{HPHW_FIO, 0x057, 0x00082, 0x0, "Raven+ w Diff FWSCSI Core SCSI"},
{HPHW_FIO, 0x058, 0x00082, 0x0, "FireHawk 200 Core SCSI"},
{HPHW_FIO, 0x05C, 0x00082, 0x0, "SummitHawk 230 Core SCSI"},
{HPHW_FIO, 0x05E, 0x00082, 0x0, "Staccato 132 Core SCSI"},
{HPHW_FIO, 0x05F, 0x00082, 0x0, "Staccato 180 Core SCSI"},
{HPHW_FIO, 0x800, 0x00082, 0x0, "Hitachi Tiny 64 Core SCSI"},
{HPHW_FIO, 0x801, 0x00082, 0x0, "Hitachi Tiny 80 Core SCSI"},
{HPHW_FIO, 0x016, 0x00083, 0x0, "Gecko Core PC Floppy"},
{HPHW_FIO, 0x01C, 0x00083, 0x0, "Gecko 80 Core PC Floppy"},
{HPHW_FIO, 0x01D, 0x00083, 0x0, "Gecko 100 Core PC Floppy"},
{HPHW_FIO, 0x051, 0x00083, 0x0, "Firehawk Core PC Floppy"},
{HPHW_FIO, 0x058, 0x00083, 0x0, "FireHawk 200 Core PC Floppy"},
{HPHW_FIO, 0x027, 0x00083, 0x0, "Piranha 100 Core PC Floppy"},
{HPHW_FIO, 0x028, 0x00083, 0x0, "Mirage Jr Core PC Floppy"},
{HPHW_FIO, 0x029, 0x00083, 0x0, "Mirage Core PC Floppy"},
{HPHW_FIO, 0x02A, 0x00083, 0x0, "Electra Core PC Floppy"},
{HPHW_FIO, 0x02B, 0x00083, 0x0, "Mirage 80 Core PC Floppy"},
{HPHW_FIO, 0x02C, 0x00083, 0x0, "Mirage 100+ Core PC Floppy"},
{HPHW_FIO, 0x02E, 0x00083, 0x0, "UL 350 Core PC Floppy"},
{HPHW_FIO, 0x02F, 0x00083, 0x0, "UL 550 Core PC Floppy"},
{HPHW_FIO, 0x032, 0x00083, 0x0, "Raven T' Core PC Floppy"},
{HPHW_FIO, 0x034, 0x00083, 0x0, "SAIC L-80 Core PC Floppy"},
{HPHW_FIO, 0x035, 0x00083, 0x0, "PCX-L2 712/132 Core Floppy"},
{HPHW_FIO, 0x036, 0x00083, 0x0, "PCX-L2 712/160 Core Floppy"},
{HPHW_FIO, 0x03B, 0x00083, 0x0, "Raven U/L2 Core PC Floppy"},
{HPHW_FIO, 0x03C, 0x00083, 0x0, "Merlin 132 Core PC Floppy"},
{HPHW_FIO, 0x03D, 0x00083, 0x0, "Merlin 160 Core PC Floppy"},
{HPHW_FIO, 0x03E, 0x00083, 0x0, "Merlin+ 132 Core PC Floppy"},
{HPHW_FIO, 0x03F, 0x00083, 0x0, "Merlin+ 180 Core PC Floppy"},
{HPHW_FIO, 0x045, 0x00083, 0x0, "Rocky1 Core PC Floppy"},
{HPHW_FIO, 0x046, 0x00083, 0x0, "Rocky2 120 Core PC Floppy"},
{HPHW_FIO, 0x047, 0x00083, 0x0, "Rocky2 150 Core PC Floppy"},
{HPHW_FIO, 0x04E, 0x00083, 0x0, "Kiji L2 132 Core PC Floppy"},
{HPHW_FIO, 0x050, 0x00083, 0x0, "Merlin Jr 132 Core PC Floppy"},
{HPHW_FIO, 0x056, 0x00083, 0x0, "Raven+ w SE FWSCSI Core PC Floppy"},
{HPHW_FIO, 0x057, 0x00083, 0x0, "Raven+ w Diff FWSCSI Core PC Floppy"},
{HPHW_FIO, 0x800, 0x00083, 0x0, "Hitachi Tiny 64 Core PC Floppy"},
{HPHW_FIO, 0x801, 0x00083, 0x0, "Hitachi Tiny 80 Core PC Floppy"},
{HPHW_FIO, 0x015, 0x00084, 0x0, "KittyHawk GSY Core PS/2 Port"},
{HPHW_FIO, 0x016, 0x00084, 0x0, "Gecko Core PS/2 Port"},
{HPHW_FIO, 0x018, 0x00084, 0x0, "Gecko Optional PS/2 Port"},
{HPHW_FIO, 0x01A, 0x00084, 0x0, "Anole 64 Core PS/2 Port"},
{HPHW_FIO, 0x01B, 0x00084, 0x0, "Anole 100 Core PS/2 Port"},
{HPHW_FIO, 0x01C, 0x00084, 0x0, "Gecko 80 Core PS/2 Port"},
{HPHW_FIO, 0x01D, 0x00084, 0x0, "Gecko 100 Core PS/2 Port"},
{HPHW_FIO, 0x01F, 0x00084, 0x0, "SkyHawk 100/120 Core PS/2 Port"},
{HPHW_FIO, 0x027, 0x00084, 0x0, "Piranha 100 Core PS/2 Port"},
{HPHW_FIO, 0x028, 0x00084, 0x0, "Mirage Jr Core PS/2 Port"},
{HPHW_FIO, 0x029, 0x00084, 0x0, "Mirage Core PS/2 Port"},
{HPHW_FIO, 0x02A, 0x00084, 0x0, "Electra Core PS/2 Port"},
{HPHW_FIO, 0x02B, 0x00084, 0x0, "Mirage 80 Core PS/2 Port"},
{HPHW_FIO, 0x02C, 0x00084, 0x0, "Mirage 100+ Core PS/2 Port"},
{HPHW_FIO, 0x02E, 0x00084, 0x0, "UL 350 Core PS/2 Port"},
{HPHW_FIO, 0x02F, 0x00084, 0x0, "UL 550 Core PS/2 Port"},
{HPHW_FIO, 0x032, 0x00084, 0x0, "Raven T' Core PS/2 Port"},
{HPHW_FIO, 0x033, 0x00084, 0x0, "Anole T Core PS/2 Port"},
{HPHW_FIO, 0x034, 0x00084, 0x0, "SAIC L-80 Core PS/2 Port"},
{HPHW_FIO, 0x035, 0x00084, 0x0, "PCX-L2 712/132 Core PS/2 Port"},
{HPHW_FIO, 0x036, 0x00084, 0x0, "PCX-L2 712/160 Core PS/2 Port"},
{HPHW_FIO, 0x03B, 0x00084, 0x0, "Raven U/L2 Core PS/2 Port"},
{HPHW_FIO, 0x03C, 0x00084, 0x0, "Merlin 132 Core PS/2 Port"},
{HPHW_FIO, 0x03D, 0x00084, 0x0, "Merlin 160 Core PS/2 Port"},
{HPHW_FIO, 0x03E, 0x00084, 0x0, "Merlin+ 132 Core PS/2 Port"},
{HPHW_FIO, 0x03F, 0x00084, 0x0, "Merlin+ 180 Core PS/2 Port"},
{HPHW_FIO, 0x044, 0x00084, 0x0, "Mohawk Core PS/2 Port"},
{HPHW_FIO, 0x045, 0x00084, 0x0, "Rocky1 Core PS/2 Port"},
{HPHW_FIO, 0x046, 0x00084, 0x0, "Rocky2 120 Core PS/2 Port"},
{HPHW_FIO, 0x047, 0x00084, 0x0, "Rocky2 150 Core PS/2 Port"},
{HPHW_FIO, 0x048, 0x00084, 0x0, "Rocky2 120 Dino PS/2 Port"},
{HPHW_FIO, 0x049, 0x00084, 0x0, "Rocky2 150 Dino PS/2 Port"},
{HPHW_FIO, 0x04B, 0x00084, 0x0, "Anole L2 132 Core PS/2 Port"},
{HPHW_FIO, 0x04D, 0x00084, 0x0, "Anole L2 165 Core PS/2 Port"},
{HPHW_FIO, 0x04E, 0x00084, 0x0, "Kiji L2 132 Core PS/2 Port"},
{HPHW_FIO, 0x050, 0x00084, 0x0, "Merlin Jr 132 Core PS/2 Port"},
{HPHW_FIO, 0x051, 0x00084, 0x0, "Firehawk Core PS/2 Port"},
{HPHW_FIO, 0x056, 0x00084, 0x0, "Raven+ w SE FWSCSI Core PS/2 Port"},
{HPHW_FIO, 0x057, 0x00084, 0x0, "Raven+ w Diff FWSCSI Core PS/2 Port"},
{HPHW_FIO, 0x058, 0x00084, 0x0, "FireHawk 200 Core PS/2 Port"},
{HPHW_FIO, 0x05C, 0x00084, 0x0, "SummitHawk 230 Core PS/2 Port"},
{HPHW_FIO, 0x800, 0x00084, 0x0, "Hitachi Tiny 64 Core PS/2 Port"},
{HPHW_FIO, 0x801, 0x00084, 0x0, "Hitachi Tiny 80 Core PS/2 Port"},
{HPHW_FIO, 0x004, 0x00085, 0x0, "Solo GSC Optional Graphics"},
{HPHW_FIO, 0x005, 0x00085, 0x0, "Duet GSC Optional Graphics"},
{HPHW_FIO, 0x008, 0x00085, 0x0, "Anole Artist Optional Graphics"},
{HPHW_FIO, 0x010, 0x00085, 0x0, "Mirage 80 GSC Builtin Graphics"},
{HPHW_FIO, 0x011, 0x00085, 0x0, "Mirage 100+ GSC Builtin Graphics"},
{HPHW_FIO, 0x012, 0x00085, 0x0, "Mirage Jr GSC Builtin Graphics"},
{HPHW_FIO, 0x013, 0x00085, 0x0, "Mirage GSC Builtin Graphics"},
{HPHW_FIO, 0x014, 0x00085, 0x0, "Electra GSC Builtin Graphics"},
{HPHW_FIO, 0x016, 0x00085, 0x0, "Gecko GSC Core Graphics"},
{HPHW_FIO, 0x017, 0x00085, 0x0, "Gecko GSC Optional Graphics"},
{HPHW_FIO, 0x01A, 0x00085, 0x0, "Anole 64 Artist Builtin Graphics"},
{HPHW_FIO, 0x01B, 0x00085, 0x0, "Anole 100 Artist Builtin Graphics"},
{HPHW_FIO, 0x01C, 0x00085, 0x0, "Gecko 80 GSC Core Graphics"},
{HPHW_FIO, 0x01D, 0x00085, 0x0, "Gecko 100 GSC Core Graphics"},
{HPHW_FIO, 0x032, 0x00085, 0x0, "Raven T' GSC Core Graphics"},
{HPHW_FIO, 0x033, 0x00085, 0x0, "Anole T Artist Builtin Graphics"},
{HPHW_FIO, 0x034, 0x00085, 0x0, "SAIC L-80 GSC Core Graphics"},
{HPHW_FIO, 0x035, 0x00085, 0x0, "PCX-L2 712/132 Core Graphics"},
{HPHW_FIO, 0x036, 0x00085, 0x0, "PCX-L2 712/160 Core Graphics"},
{HPHW_FIO, 0x03B, 0x00085, 0x0, "Raven U/L2 Core Graphics"},
{HPHW_FIO, 0x03C, 0x00085, 0x0, "Merlin 132 Core Graphics"},
{HPHW_FIO, 0x03D, 0x00085, 0x0, "Merlin 160 Core Graphics"},
{HPHW_FIO, 0x03E, 0x00085, 0x0, "Merlin+ 132 Core Graphics"},
{HPHW_FIO, 0x03F, 0x00085, 0x0, "Merlin+ 180 Core Graphics"},
{HPHW_FIO, 0x045, 0x00085, 0x0, "Rocky1 Core Graphics"},
{HPHW_FIO, 0x046, 0x00085, 0x0, "Rocky2 120 Core Graphics"},
{HPHW_FIO, 0x047, 0x00085, 0x0, "Rocky2 150 Core Graphics"},
{HPHW_FIO, 0x04B, 0x00085, 0x0, "Anole L2 132 Core Graphics"},
{HPHW_FIO, 0x04D, 0x00085, 0x0, "Anole L2 165 Core Graphics"},
{HPHW_FIO, 0x04E, 0x00085, 0x0, "Kiji L2 132 Core Graphics"},
{HPHW_FIO, 0x050, 0x00085, 0x0, "Merlin Jr 132 Core Graphics"},
{HPHW_FIO, 0x056, 0x00085, 0x0, "Raven+ w SE FWSCSI Core Graphics"},
{HPHW_FIO, 0x057, 0x00085, 0x0, "Raven+ w Diff FWSCSI Core Graphics"},
{HPHW_FIO, 0x800, 0x00085, 0x0, "Hitachi Tiny 64 Core Graphics"},
{HPHW_FIO, 0x801, 0x00085, 0x0, "Hitachi Tiny 80 Core Graphics"},
{HPHW_FIO, 0x004, 0x00086, 0x0, "GSC IBM Token Ring"},
{HPHW_FIO, 0x015, 0x00087, 0x0, "Gecko Optional ISDN"},
{HPHW_FIO, 0x016, 0x00087, 0x0, "Gecko Core ISDN"},
{HPHW_FIO, 0x01C, 0x00087, 0x0, "Gecko 80 Core ISDN"},
{HPHW_FIO, 0x01D, 0x00087, 0x0, "Gecko 100 Core ISDN"},
{HPHW_FIO, 0x010, 0x00088, 0x0, "Pace VME Networking"},
{HPHW_FIO, 0x011, 0x00088, 0x0, "Sidewinder VME Networking"},
{HPHW_FIO, 0x01A, 0x00088, 0x0, "Anole 64 VME Networking"},
{HPHW_FIO, 0x01B, 0x00088, 0x0, "Anole 100 VME Networking"},
{HPHW_FIO, 0x024, 0x00088, 0x0, "Fast Pace VME Networking"},
{HPHW_FIO, 0x034, 0x00088, 0x0, "Anole T VME Networking"},
{HPHW_FIO, 0x04A, 0x00088, 0x0, "Anole L2 132 VME Networking"},
{HPHW_FIO, 0x04C, 0x00088, 0x0, "Anole L2 165 VME Networking"},
{HPHW_FIO, 0x011, 0x0008A, 0x0, "WB-96 Core LAN (802.3)"},
{HPHW_FIO, 0x012, 0x0008A, 0x0, "Orville Core LAN (802.3)"},
{HPHW_FIO, 0x013, 0x0008A, 0x0, "Wilbur Core LAN (802.3)"},
{HPHW_FIO, 0x014, 0x0008A, 0x0, "WB-80 Core LAN (802.3)"},
{HPHW_FIO, 0x015, 0x0008A, 0x0, "KittyHawk GSY Core LAN (802.3)"},
{HPHW_FIO, 0x016, 0x0008A, 0x0, "Gecko Core LAN (802.3)"},
{HPHW_FIO, 0x018, 0x0008A, 0x0, "Gecko Optional LAN (802.3)"},
{HPHW_FIO, 0x01A, 0x0008A, 0x0, "Anole 64 Core LAN (802.3)"},
{HPHW_FIO, 0x01B, 0x0008A, 0x0, "Anole 100 Core LAN (802.3)"},
{HPHW_FIO, 0x01C, 0x0008A, 0x0, "Gecko 80 Core LAN (802.3)"},
{HPHW_FIO, 0x01D, 0x0008A, 0x0, "Gecko 100 Core LAN (802.3)"},
{HPHW_FIO, 0x01F, 0x0008A, 0x0, "SkyHawk 100/120 Core LAN (802.3)"},
{HPHW_FIO, 0x027, 0x0008A, 0x0, "Piranha 100 Core LAN (802.3)"},
{HPHW_FIO, 0x028, 0x0008A, 0x0, "Mirage Jr Core LAN (802.3)"},
{HPHW_FIO, 0x029, 0x0008A, 0x0, "Mirage Core LAN (802.3)"},
{HPHW_FIO, 0x02A, 0x0008A, 0x0, "Electra Core LAN (802.3)"},
{HPHW_FIO, 0x02B, 0x0008A, 0x0, "Mirage 80 Core LAN (802.3)"},
{HPHW_FIO, 0x02C, 0x0008A, 0x0, "Mirage 100+ Core LAN (802.3)"},
{HPHW_FIO, 0x02E, 0x0008A, 0x0, "UL 350 Core LAN (802.3)"},
{HPHW_FIO, 0x02F, 0x0008A, 0x0, "UL 350 Core LAN (802.3)"},
{HPHW_FIO, 0x032, 0x0008A, 0x0, "Raven T' Core LAN (802.3)"},
{HPHW_FIO, 0x033, 0x0008A, 0x0, "Anole T Core LAN (802.3)"},
{HPHW_FIO, 0x034, 0x0008A, 0x0, "SAIC L-80 Core LAN (802.3)"},
{HPHW_FIO, 0x035, 0x0008A, 0x0, "PCX-L2 712/132 Core LAN (802.3)"},
{HPHW_FIO, 0x036, 0x0008A, 0x0, "PCX-L2 712/160 Core LAN (802.3)"},
{HPHW_FIO, 0x03B, 0x0008A, 0x0, "Raven U/L2 Core LAN (802.3)"},
{HPHW_FIO, 0x03C, 0x0008A, 0x0, "Merlin 132 Core LAN (802.3)"},
{HPHW_FIO, 0x03D, 0x0008A, 0x0, "Merlin 160 Core LAN (802.3)"},
{HPHW_FIO, 0x044, 0x0008A, 0x0, "Mohawk Core LAN (802.3)"},
{HPHW_FIO, 0x045, 0x0008A, 0x0, "Rocky1 Core LAN (802.3)"},
{HPHW_FIO, 0x046, 0x0008A, 0x0, "Rocky2 120 Core LAN (802.3)"},
{HPHW_FIO, 0x047, 0x0008A, 0x0, "Rocky2 150 Core LAN (802.3)"},
{HPHW_FIO, 0x04B, 0x0008A, 0x0, "Anole L2 132 Core LAN (802.3)"},
{HPHW_FIO, 0x04D, 0x0008A, 0x0, "Anole L2 165 Core LAN (802.3)"},
{HPHW_FIO, 0x04E, 0x0008A, 0x0, "Kiji L2 132 Core LAN (802.3)"},
{HPHW_FIO, 0x050, 0x0008A, 0x0, "Merlin Jr 132 Core LAN (802.3)"},
{HPHW_FIO, 0x058, 0x0008A, 0x0, "FireHawk 200 Core LAN (802.3)"},
{HPHW_FIO, 0x800, 0x0008A, 0x0, "Hitachi Tiny 64 Core LAN (802.3)"},
{HPHW_FIO, 0x801, 0x0008A, 0x0, "Hitachi Tiny 80 Core LAN (802.3)"},
{HPHW_FIO, 0x004, 0x0008C, 0x0, "SkyHawk 100/120 Wax RS-232"},
{HPHW_FIO, 0x005, 0x0008C, 0x0, "SAIC L-80 Wax RS-232"},
{HPHW_FIO, 0x006, 0x0008C, 0x0, "Raven U/L2 Dino RS-232"},
{HPHW_FIO, 0x007, 0x0008C, 0x0, "Dino RS-232"},
{HPHW_FIO, 0x008, 0x0008C, 0x0, "Merlin 132 Dino RS-232"},
{HPHW_FIO, 0x009, 0x0008C, 0x0, "Merlin 160 Dino RS-232"},
{HPHW_FIO, 0x00A, 0x0008C, 0x0, "Merlin Jr 132 Dino RS-232"},
{HPHW_FIO, 0x010, 0x0008C, 0x0, "Mirage 80 Wax RS-232"},
{HPHW_FIO, 0x011, 0x0008C, 0x0, "Mirage 100+ Wax RS-232"},
{HPHW_FIO, 0x012, 0x0008C, 0x0, "Mirage Jr Wax RS-232"},
{HPHW_FIO, 0x013, 0x0008C, 0x0, "Mirage Wax RS-232"},
{HPHW_FIO, 0x014, 0x0008C, 0x0, "Electra Wax RS-232"},
{HPHW_FIO, 0x015, 0x0008C, 0x0, "KittyHawk GSY Core RS-232"},
{HPHW_FIO, 0x016, 0x0008C, 0x0, "Gecko Core RS-232"},
{HPHW_FIO, 0x017, 0x0008C, 0x0, "Raven Backplane RS-232"},
{HPHW_FIO, 0x018, 0x0008C, 0x0, "Gecko Optional RS-232"},
{HPHW_FIO, 0x019, 0x0008C, 0x0, "Merlin+ 180 Dino RS-232"},
{HPHW_FIO, 0x01A, 0x0008C, 0x0, "Anole 64 Core RS-232"},
{HPHW_FIO, 0x01B, 0x0008C, 0x0, "Anole 100 Core RS-232"},
{HPHW_FIO, 0x01C, 0x0008C, 0x0, "Gecko 80 Core RS-232"},
{HPHW_FIO, 0x01D, 0x0008C, 0x0, "Gecko 100 Core RS-232"},
{HPHW_FIO, 0x01E, 0x0008C, 0x0, "Raven T' Wax RS-232"},
{HPHW_FIO, 0x01F, 0x0008C, 0x0, "SkyHawk 100/120 Core RS-232"},
{HPHW_FIO, 0x020, 0x0008C, 0x0, "Anole 64 Timi RS-232"},
{HPHW_FIO, 0x021, 0x0008C, 0x0, "Anole 100 Timi RS-232"},
{HPHW_FIO, 0x022, 0x0008C, 0x0, "Merlin+ 132 Dino RS-232"},
{HPHW_FIO, 0x023, 0x0008C, 0x0, "Rocky1 Wax RS-232"},
{HPHW_FIO, 0x025, 0x0008C, 0x0, "Armyknife Optional RS-232"},
{HPHW_FIO, 0x026, 0x0008C, 0x0, "Piranha 100 Wax RS-232"},
{HPHW_FIO, 0x027, 0x0008C, 0x0, "Piranha 100 Core RS-232"},
{HPHW_FIO, 0x028, 0x0008C, 0x0, "Mirage Jr Core RS-232"},
{HPHW_FIO, 0x029, 0x0008C, 0x0, "Mirage Core RS-232"},
{HPHW_FIO, 0x02A, 0x0008C, 0x0, "Electra Core RS-232"},
{HPHW_FIO, 0x02B, 0x0008C, 0x0, "Mirage 80 Core RS-232"},
{HPHW_FIO, 0x02C, 0x0008C, 0x0, "Mirage 100+ Core RS-232"},
{HPHW_FIO, 0x02E, 0x0008C, 0x0, "UL 350 Lasi Core RS-232"},
{HPHW_FIO, 0x02F, 0x0008C, 0x0, "UL 550 Lasi Core RS-232"},
{HPHW_FIO, 0x030, 0x0008C, 0x0, "UL 350 Wax Core RS-232"},
{HPHW_FIO, 0x031, 0x0008C, 0x0, "UL 550 Wax Core RS-232"},
{HPHW_FIO, 0x032, 0x0008C, 0x0, "Raven T' Lasi Core RS-232"},
{HPHW_FIO, 0x033, 0x0008C, 0x0, "Anole T Core RS-232"},
{HPHW_FIO, 0x034, 0x0008C, 0x0, "SAIC L-80 Core RS-232"},
{HPHW_FIO, 0x035, 0x0008C, 0x0, "PCX-L2 712/132 Core RS-232"},
{HPHW_FIO, 0x036, 0x0008C, 0x0, "PCX-L2 712/160 Core RS-232"},
{HPHW_FIO, 0x03A, 0x0008C, 0x0, "Merlin+ Wax RS-232"},
{HPHW_FIO, 0x03B, 0x0008C, 0x0, "Raven U/L2 Core RS-232"},
{HPHW_FIO, 0x03C, 0x0008C, 0x0, "Merlin 132 Core RS-232"},
{HPHW_FIO, 0x03D, 0x0008C, 0x0, "Merlin 160 Core RS-232"},
{HPHW_FIO, 0x03E, 0x0008C, 0x0, "Merlin+ 132 Core RS-232"},
{HPHW_FIO, 0x03F, 0x0008C, 0x0, "Merlin+ 180 Core RS-232"},
{HPHW_FIO, 0x040, 0x0008C, 0x0, "Merlin 132 Wax RS-232"},
{HPHW_FIO, 0x041, 0x0008C, 0x0, "Merlin 160 Wax RS-232"},
{HPHW_FIO, 0x043, 0x0008C, 0x0, "Merlin 132/160 Wax RS-232"},
{HPHW_FIO, 0x044, 0x0008C, 0x0, "Mohawk Core RS-232"},
{HPHW_FIO, 0x045, 0x0008C, 0x0, "Rocky1 Core RS-232"},
{HPHW_FIO, 0x046, 0x0008C, 0x0, "Rocky2 120 Core RS-232"},
{HPHW_FIO, 0x047, 0x0008C, 0x0, "Rocky2 150 Core RS-232"},
{HPHW_FIO, 0x048, 0x0008C, 0x0, "Rocky2 120 Dino RS-232"},
{HPHW_FIO, 0x049, 0x0008C, 0x0, "Rocky2 150 Dino RS-232"},
{HPHW_FIO, 0x04A, 0x0008C, 0x0, "Anole L2 132 TIMI RS-232"},
{HPHW_FIO, 0x04B, 0x0008C, 0x0, "Anole L2 l32 Core RS-232"},
{HPHW_FIO, 0x04C, 0x0008D, 0x0, "Anole L2 165 TIMI RS-232"},
{HPHW_FIO, 0x04D, 0x0008C, 0x0, "Anole L2 165 Core RS-232"},
{HPHW_FIO, 0x04E, 0x0008C, 0x0, "Kiji L2 132 Core RS-232"},
{HPHW_FIO, 0x04F, 0x0008C, 0x0, "Kiji L2 132 Dino RS-232"},
{HPHW_FIO, 0x050, 0x0008C, 0x0, "Merlin Jr 132 Core RS-232"},
{HPHW_FIO, 0x051, 0x0008C, 0x0, "Firehawk Core RS-232"},
{HPHW_FIO, 0x052, 0x0008C, 0x0, "Raven+ Hi Power Backplane w EISA RS-232"},
{HPHW_FIO, 0x053, 0x0008C, 0x0, "Raven+ Hi Power Backplane w/o EISA RS-232"},
{HPHW_FIO, 0x054, 0x0008C, 0x0, "Raven+ Lo Power Backplane w EISA RS-232"},
{HPHW_FIO, 0x055, 0x0008C, 0x0, "Raven+ Lo Power Backplane w/o EISA RS-232"},
{HPHW_FIO, 0x056, 0x0008C, 0x0, "Raven+ w SE FWSCSI Core RS-232"},
{HPHW_FIO, 0x057, 0x0008C, 0x0, "Raven+ w Diff FWSCSI Core RS-232"},
{HPHW_FIO, 0x058, 0x0008C, 0x0, "FireHawk 200 Core RS-232"},
{HPHW_FIO, 0x059, 0x0008C, 0x0, "FireHawk 200 Wax RS-232"},
{HPHW_FIO, 0x05A, 0x0008C, 0x0, "Raven+ L2 Backplane w EISA RS-232"},
{HPHW_FIO, 0x05B, 0x0008C, 0x0, "Raven+ L2 Backplane w/o EISA RS-232"},
{HPHW_FIO, 0x05D, 0x0008C, 0x0, "SummitHawk Dino RS-232"},
{HPHW_FIO, 0x05E, 0x0008C, 0x0, "Staccato 132 Core LAN RS-232"},
{HPHW_FIO, 0x05F, 0x0008C, 0x0, "Staccato 180 Core LAN RS-232"},
{HPHW_FIO, 0x800, 0x0008C, 0x0, "Hitachi Tiny 64 Core RS-232"},
{HPHW_FIO, 0x801, 0x0008C, 0x0, "Hitachi Tiny 80 Core RS-232"},
{HPHW_FIO, 0x015, 0x0008D, 0x0, "Gecko Optional RJ-16"},
{HPHW_FIO, 0x016, 0x0008D, 0x0, "Gecko Core RJ-16"},
{HPHW_FIO, 0x01C, 0x0008D, 0x0, "Gecko 80 Core RJ-16"},
{HPHW_FIO, 0x01D, 0x0008D, 0x0, "Gecko 100 Core RJ-16"},
{HPHW_FIO, 0x004, 0x0008F, 0x0, "Anole Boot Rom"},
{HPHW_FIO, 0x005, 0x0008F, 0x0, "Rocky1 Boot Rom"},
{HPHW_FIO, 0x006, 0x0008F, 0x0, "Rocky2 120 Boot Rom"},
{HPHW_FIO, 0x007, 0x0008F, 0x0, "Rocky2 150 Boot Rom"},
{HPHW_FIO, 0x01B, 0x0008F, 0x0, "Anole 100 Boot Rom"},
{HPHW_FIO, 0x006, 0x00096, 0x0, "Raven U/L2 Dino PS/2 Port"},
{HPHW_FIO, 0x007, 0x00096, 0x0, "Dino PS/2 Port"},
{HPHW_FIO, 0x008, 0x00096, 0x0, "Merlin 132 Dino PS/2 Port"},
{HPHW_FIO, 0x009, 0x00096, 0x0, "Merlin 160 Dino PS/2 Port"},
{HPHW_FIO, 0x00A, 0x00096, 0x0, "Merlin Jr 132 Dino PS/2 Port"},
{HPHW_FIO, 0x019, 0x00096, 0x0, "Merlin+ 180 Dino PS/2 Port"},
{HPHW_FIO, 0x022, 0x00096, 0x0, "Merlin+ 132 Dino PS/2 Port"},
{HPHW_FIO, 0x004, 0x00097, 0x0, "Cascade EISA 100VG LAN"},
{HPHW_FIO, 0x023, 0x00099, 0x0, "Rocky1 Wax HPIB"},
{HPHW_FIO, 0x048, 0x00099, 0x0, "Rocky2 120 Clark/Dino HPIB"},
{HPHW_FIO, 0x049, 0x00099, 0x0, "Rocky2 150 Clark/Dino HPIB"},
{HPHW_FIO, 0x004, 0x000A1, 0x0, "SPP2000 Console TTY"},
{HPHW_FIO, 0x004, 0x000A2, 0x0, "Forte Core PCI 10/100BT LAN"},
{HPHW_FIO, 0x005, 0x000A2, 0x0, "AllegroLow PCI 10/100BT LAN"},
{HPHW_FIO, 0x006, 0x000A2, 0x0, "AllegroHIgh Core PCI 10/100BT LAN"},
{HPHW_FIO, 0x007, 0x000A2, 0x0, "PCI Plug-in LAN"},
{HPHW_FIO, 0x00A, 0x000A2, 0x0, "Lego 360 Core PCI 10/100BT LAN"},
{HPHW_FIO, 0x03E, 0x000A2, 0x0, "Merlin+ 132 Core PCI LAN"},
{HPHW_FIO, 0x03F, 0x000A2, 0x0, "Merlin+ 180 Core PCI LAN"},
{HPHW_FIO, 0x056, 0x000A2, 0x0, "Raven+ w SE FWSCSI Core PCI LAN"},
{HPHW_FIO, 0x057, 0x000A2, 0x0, "Raven+ w Diff FWSCSI Core PCI LAN"},
{HPHW_FIO, 0x05E, 0x000A2, 0x0, "Staccato 132 PCI LAN"},
{HPHW_FIO, 0x05F, 0x000A2, 0x0, "Staccato 180 PCI LAN"},
{HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI LVD Ultra2 SCSI"},
{HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI SE UltraSCSI"},
{HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI IDE/ATAPI CD-ROM"},
{HPHW_FIO, 0x005, 0x000A3, 0x0, "AllegroLow Core PCI LVD Ultra2 SCSI"},
{HPHW_FIO, 0x005, 0x000A3, 0x0, "AllegroLow Core PCI IDE/ATAPI CD-ROM"},
{HPHW_FIO, 0x006, 0x000A3, 0x0, "AllegroHigh Core PCI LVD Ultra2 SCSI"},
{HPHW_FIO, 0x006, 0x000A3, 0x0, "AllegroHigh Core PCI IDE/ATAPI CD-ROM"},
{HPHW_FIO, 0x007, 0x000A3, 0x0, "PCI Plug-in Disk"},
{HPHW_FIO, 0x008, 0x000A3, 0x0, "A5158A S FC Tachlite HBA"},
{HPHW_FIO, 0x009, 0x000A3, 0x0, "A5157A D FC HBA"},
{HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI LVD Ultra2 SCSI"},
{HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI NSE UltraSCSI"},
{HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI WSE UltraSCSI"},
{HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI IDE/ATAPI CD-ROM"},
{HPHW_FIO, 0x03E, 0x000A3, 0x0, "Merlin+ 132 Core SE FWSCSI PCI Disk"},
{HPHW_FIO, 0x03F, 0x000A3, 0x0, "Merlin+ 180 Core SE FWSCSI PCI Disk"},
{HPHW_FIO, 0x056, 0x000A3, 0x0, "Raven+ w SE FWSCSI Core PCI Disk"},
{HPHW_FIO, 0x057, 0x000A3, 0x0, "Raven+ w Diff FWSCSI Core PCI Disk"},
{HPHW_FIO, 0x004, 0x000A4, 0x0, "SPP2000 Core BA"},
{HPHW_FIO, 0x004, 0x000A6, 0x0, "Sonic Ethernet 802.3 Card"},
{HPHW_FIO, 0x004, 0x000A9, 0x00, "Forte Core PCI SuperIO RS-232"},
{HPHW_FIO, 0x004, 0x000A9, 0x00, "Forte Core PCI USB KB"},
{HPHW_FIO, 0x005, 0x000A9, 0x00, "AllegroLow Core PCI SuperIO RS-232"},
{HPHW_FIO, 0x005, 0x000A9, 0x00, "AllegroLow Core PCI USB KB"},
{HPHW_FIO, 0x006, 0x000A9, 0x00, "AllegroHigh Core PCI SuperIO RS-232"},
{HPHW_FIO, 0x006, 0x000A9, 0x00, "AllegroHigh Core PCI USB KB"},
{HPHW_FIO, 0x007, 0x000A9, 0x0, "Miscellaneous PCI Plug-in"},
{HPHW_FIO, 0x00A, 0x000A9, 0x0, "Lego 360 Core PCI SuperIO RS-232"},
{HPHW_FIO, 0x00A, 0x000A9, 0x0, "Lego 360 Core PCI USB KB"},
{HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
{HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"},
{HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast Core RS-232"},
{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
{HPHW_IOA, 0x581, 0x0000B, 0x10, "Uturn-IOA BC Runway Port"},
{HPHW_IOA, 0x582, 0x0000B, 0x10, "Astro BC Runway Port"},
{HPHW_IOA, 0x700, 0x0000B, 0x00, "NEC-IOS BC System Bus Port"},
{HPHW_IOA, 0x880, 0x0000C, 0x10, "Pluto BC McKinley Port"},
{HPHW_MEMORY, 0x002, 0x00008, 0x00, "MID_BUS"},
{HPHW_MEMORY, 0x063, 0x00009, 0x00, "712/132 L2 Upgrade"},
{HPHW_MEMORY, 0x064, 0x00009, 0x00, "712/160 L2 Upgrade"},
{HPHW_MEMORY, 0x065, 0x00009, 0x00, "715/132 L2 Upgrade"},
{HPHW_MEMORY, 0x066, 0x00009, 0x00, "715/160 L2 Upgrade"},
{HPHW_MEMORY, 0x0AF, 0x00009, 0x00, "Everest Mako Memory"},
{HPHW_OTHER, 0x004, 0x00030, 0x00, "Master"},
{HPHW_OTHER, 0x004, 0x00034, 0x00, "Slave"},
{HPHW_OTHER, 0x004, 0x00038, 0x00, "EDU"},
{HPHW_OTHER, 0x004, 0x00049, 0x00, "LGB Control"},
{HPHW_MC, 0x004, 0x000C0, 0x00, "BMC IPMI Mgmt Ctlr"},
{HPHW_FAULTY, 0, } /* Special Marker for last entry */
};
static struct hp_cpu_type_mask {
unsigned short model;
unsigned short mask;
enum cpu_type cpu;
} hp_cpu_type_mask_list[] __initdata = {
{ 0x0000, 0x0ff0, pcx }, /* 0x0000 - 0x000f */
{ 0x0048, 0x0ff0, pcxl }, /* 0x0040 - 0x004f */
{ 0x0080, 0x0ff0, pcx }, /* 0x0080 - 0x008f */
{ 0x0100, 0x0ff0, pcx }, /* 0x0100 - 0x010f */
{ 0x0182, 0x0ffe, pcx }, /* 0x0182 - 0x0183 */
{ 0x0182, 0x0ffe, pcxt }, /* 0x0182 - 0x0183 */
{ 0x0184, 0x0fff, pcxu }, /* 0x0184 - 0x0184 */
{ 0x0200, 0x0ffe, pcxs }, /* 0x0200 - 0x0201 */
{ 0x0202, 0x0fff, pcxs }, /* 0x0202 - 0x0202 */
{ 0x0203, 0x0fff, pcxt }, /* 0x0203 - 0x0203 */
{ 0x0204, 0x0ffc, pcxt }, /* 0x0204 - 0x0207 */
{ 0x0280, 0x0ffc, pcxs }, /* 0x0280 - 0x0283 */
{ 0x0284, 0x0ffc, pcxt }, /* 0x0284 - 0x0287 */
{ 0x0288, 0x0fff, pcxt }, /* 0x0288 - 0x0288 */
{ 0x0300, 0x0ffc, pcxs }, /* 0x0300 - 0x0303 */
{ 0x0310, 0x0ff0, pcxt }, /* 0x0310 - 0x031f */
{ 0x0320, 0x0ff0, pcxt }, /* 0x0320 - 0x032f */
{ 0x0400, 0x0ff0, pcxt }, /* 0x0400 - 0x040f */
{ 0x0480, 0x0ff0, pcxl }, /* 0x0480 - 0x048f */
{ 0x0500, 0x0ff0, pcxl2 }, /* 0x0500 - 0x050f */
{ 0x0510, 0x0ff0, pcxl2 }, /* 0x0510 - 0x051f */
{ 0x0580, 0x0ff8, pcxt_ }, /* 0x0580 - 0x0587 */
{ 0x0588, 0x0ffc, pcxt_ }, /* 0x0588 - 0x058b */
{ 0x058c, 0x0ffe, pcxt_ }, /* 0x058c - 0x058d */
{ 0x058e, 0x0fff, pcxt_ }, /* 0x058e - 0x058e */
{ 0x058f, 0x0fff, pcxu }, /* 0x058f - 0x058f */
{ 0x0590, 0x0ffe, pcxu }, /* 0x0590 - 0x0591 */
{ 0x0592, 0x0fff, pcxt_ }, /* 0x0592 - 0x0592 */
{ 0x0593, 0x0fff, pcxu }, /* 0x0593 - 0x0593 */
{ 0x0594, 0x0ffc, pcxu }, /* 0x0594 - 0x0597 */
{ 0x0598, 0x0ffe, pcxu_ }, /* 0x0598 - 0x0599 */
{ 0x059a, 0x0ffe, pcxu }, /* 0x059a - 0x059b */
{ 0x059c, 0x0fff, pcxu }, /* 0x059c - 0x059c */
{ 0x059d, 0x0fff, pcxu_ }, /* 0x059d - 0x059d */
{ 0x059e, 0x0fff, pcxt_ }, /* 0x059e - 0x059e */
{ 0x059f, 0x0fff, pcxu }, /* 0x059f - 0x059f */
{ 0x05a0, 0x0ffe, pcxt_ }, /* 0x05a0 - 0x05a1 */
{ 0x05a2, 0x0ffe, pcxu }, /* 0x05a2 - 0x05a3 */
{ 0x05a4, 0x0ffc, pcxu }, /* 0x05a4 - 0x05a7 */
{ 0x05a8, 0x0ffc, pcxu }, /* 0x05a8 - 0x05ab */
{ 0x05ad, 0x0fff, pcxu_ }, /* 0x05ad - 0x05ad */
{ 0x05ae, 0x0ffe, pcxu_ }, /* 0x05ae - 0x05af */
{ 0x05b0, 0x0ffe, pcxu_ }, /* 0x05b0 - 0x05b1 */
{ 0x05b2, 0x0fff, pcxu_ }, /* 0x05b2 - 0x05b2 */
{ 0x05b3, 0x0fff, pcxu }, /* 0x05b3 - 0x05b3 */
{ 0x05b4, 0x0fff, pcxw }, /* 0x05b4 - 0x05b4 */
{ 0x05b5, 0x0fff, pcxu_ }, /* 0x05b5 - 0x05b5 */
{ 0x05b6, 0x0ffe, pcxu_ }, /* 0x05b6 - 0x05b7 */
{ 0x05b8, 0x0ffe, pcxu_ }, /* 0x05b8 - 0x05b9 */
{ 0x05ba, 0x0fff, pcxu_ }, /* 0x05ba - 0x05ba */
{ 0x05bb, 0x0fff, pcxw }, /* 0x05bb - 0x05bb */
{ 0x05bc, 0x0ffc, pcxw }, /* 0x05bc - 0x05bf */
{ 0x05c0, 0x0ffc, pcxw }, /* 0x05c0 - 0x05c3 */
{ 0x05c4, 0x0ffe, pcxw }, /* 0x05c4 - 0x05c5 */
{ 0x05c6, 0x0fff, pcxw }, /* 0x05c6 - 0x05c6 */
{ 0x05c7, 0x0fff, pcxw_ }, /* 0x05c7 - 0x05c7 */
{ 0x05c8, 0x0ffc, pcxw }, /* 0x05c8 - 0x05cb */
{ 0x05cc, 0x0ffe, pcxw }, /* 0x05cc - 0x05cd */
{ 0x05ce, 0x0ffe, pcxw_ }, /* 0x05ce - 0x05cf */
{ 0x05d0, 0x0ffc, pcxw_ }, /* 0x05d0 - 0x05d3 */
{ 0x05d4, 0x0ffe, pcxw_ }, /* 0x05d4 - 0x05d5 */
{ 0x05d6, 0x0fff, pcxw }, /* 0x05d6 - 0x05d6 */
{ 0x05d7, 0x0fff, pcxw_ }, /* 0x05d7 - 0x05d7 */
{ 0x05d8, 0x0ffc, pcxw_ }, /* 0x05d8 - 0x05db */
{ 0x05dc, 0x0ffe, pcxw2 }, /* 0x05dc - 0x05dd */
{ 0x05de, 0x0fff, pcxw_ }, /* 0x05de - 0x05de */
{ 0x05df, 0x0fff, pcxw2 }, /* 0x05df - 0x05df */
{ 0x05e0, 0x0ffc, pcxw2 }, /* 0x05e0 - 0x05e3 */
{ 0x05e4, 0x0fff, pcxw2 }, /* 0x05e4 - 0x05e4 */
{ 0x05e5, 0x0fff, pcxw_ }, /* 0x05e5 - 0x05e5 */
{ 0x05e6, 0x0ffe, pcxw2 }, /* 0x05e6 - 0x05e7 */
{ 0x05e8, 0x0ff8, pcxw2 }, /* 0x05e8 - 0x05ef */
{ 0x05f0, 0x0ff0, pcxw2 }, /* 0x05f0 - 0x05ff */
{ 0x0600, 0x0fe0, pcxl }, /* 0x0600 - 0x061f */
{ 0x0880, 0x0ff0, mako }, /* 0x0880 - 0x088f */
{ 0x0890, 0x0ff0, mako2 }, /* 0x0890 - 0x089f */
{ 0x0000, 0x0000, pcx } /* terminate table */
};
const char * const cpu_name_version[][2] = {
[pcx] = { "PA7000 (PCX)", "1.0" },
[pcxs] = { "PA7000 (PCX-S)", "1.1a" },
[pcxt] = { "PA7100 (PCX-T)", "1.1b" },
[pcxt_] = { "PA7200 (PCX-T')", "1.1c" },
[pcxl] = { "PA7100LC (PCX-L)", "1.1d" },
[pcxl2] = { "PA7300LC (PCX-L2)","1.1e" },
[pcxu] = { "PA8000 (PCX-U)", "2.0" },
[pcxu_] = { "PA8200 (PCX-U+)", "2.0" },
[pcxw] = { "PA8500 (PCX-W)", "2.0" },
[pcxw_] = { "PA8600 (PCX-W+)", "2.0" },
[pcxw2] = { "PA8700 (PCX-W2)", "2.0" },
[mako] = { "PA8800 (Mako)", "2.0" },
[mako2] = { "PA8900 (Shortfin)","2.0" }
};
const char * __init parisc_hardware_description(struct parisc_device_id *id)
{
struct hp_hardware *listptr;
for (listptr = hp_hardware_list; listptr->hw_type != HPHW_FAULTY; listptr++) {
if ((listptr->hw_type == id->hw_type) &&
(listptr->hversion == id->hversion) &&
(listptr->sversion == id->sversion)){
return listptr->name;
}
}
/*
* ok, the above hardware table isn't complete, and we haven't found
* our device in this table. So let's now try to find a generic name
* to describe the given hardware...
*/
switch (id->hw_type) {
case HPHW_NPROC:
return "Unknown machine";
case HPHW_A_DIRECT:
switch (id->sversion) {
case 0x0D: return "MUX port";
case 0x0E: return "RS-232 port";
}
break;
case HPHW_MEMORY:
return "Memory";
}
return "unknown device";
}
/* Interpret hversion (ret[0]) from PDC_MODEL(4)/PDC_MODEL_INFO(0) */
enum cpu_type __init
parisc_get_cpu_type(unsigned long hversion)
{
struct hp_cpu_type_mask *ptr;
unsigned short model = ((unsigned short) (hversion)) >> 4;
for (ptr = hp_cpu_type_mask_list; 0 != ptr->mask; ptr++) {
if (ptr->model == (model & ptr->mask))
return ptr->cpu;
}
panic("could not identify CPU type\n");
return pcx; /* not reached: */
}
| linux-master | arch/parisc/kernel/hardware.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generate definitions needed by assembly language modules.
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*
* Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000 Sam Creasey <[email protected]>
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2001 Richard Hirst <rhirst at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/kbuild.h>
#include <linux/pgtable.h>
#include <asm/assembly.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/pdc.h>
#include <uapi/asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/rt_sigframe.h>
#include <linux/uaccess.h>
#include "signal32.h"
/* Add FRAME_SIZE to the size x and align it to y. All definitions
* that use align_frame will include space for a frame.
*/
#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
int main(void)
{
DEFINE(TASK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
#ifdef CONFIG_SMP
DEFINE(TASK_TI_CPU, offsetof(struct task_struct, thread_info.cpu));
#endif
DEFINE(TASK_STACK, offsetof(struct task_struct, stack));
DEFINE(TASK_PAGEFAULT_DISABLED, offsetof(struct task_struct, pagefault_disabled));
BLANK();
DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1]));
DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2]));
DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3]));
DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4]));
DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5]));
DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6]));
DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7]));
DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8]));
DEFINE(TASK_PT_GR9, offsetof(struct task_struct, thread.regs.gr[ 9]));
DEFINE(TASK_PT_GR10, offsetof(struct task_struct, thread.regs.gr[10]));
DEFINE(TASK_PT_GR11, offsetof(struct task_struct, thread.regs.gr[11]));
DEFINE(TASK_PT_GR12, offsetof(struct task_struct, thread.regs.gr[12]));
DEFINE(TASK_PT_GR13, offsetof(struct task_struct, thread.regs.gr[13]));
DEFINE(TASK_PT_GR14, offsetof(struct task_struct, thread.regs.gr[14]));
DEFINE(TASK_PT_GR15, offsetof(struct task_struct, thread.regs.gr[15]));
DEFINE(TASK_PT_GR16, offsetof(struct task_struct, thread.regs.gr[16]));
DEFINE(TASK_PT_GR17, offsetof(struct task_struct, thread.regs.gr[17]));
DEFINE(TASK_PT_GR18, offsetof(struct task_struct, thread.regs.gr[18]));
DEFINE(TASK_PT_GR19, offsetof(struct task_struct, thread.regs.gr[19]));
DEFINE(TASK_PT_GR20, offsetof(struct task_struct, thread.regs.gr[20]));
DEFINE(TASK_PT_GR21, offsetof(struct task_struct, thread.regs.gr[21]));
DEFINE(TASK_PT_GR22, offsetof(struct task_struct, thread.regs.gr[22]));
DEFINE(TASK_PT_GR23, offsetof(struct task_struct, thread.regs.gr[23]));
DEFINE(TASK_PT_GR24, offsetof(struct task_struct, thread.regs.gr[24]));
DEFINE(TASK_PT_GR25, offsetof(struct task_struct, thread.regs.gr[25]));
DEFINE(TASK_PT_GR26, offsetof(struct task_struct, thread.regs.gr[26]));
DEFINE(TASK_PT_GR27, offsetof(struct task_struct, thread.regs.gr[27]));
DEFINE(TASK_PT_GR28, offsetof(struct task_struct, thread.regs.gr[28]));
DEFINE(TASK_PT_GR29, offsetof(struct task_struct, thread.regs.gr[29]));
DEFINE(TASK_PT_GR30, offsetof(struct task_struct, thread.regs.gr[30]));
DEFINE(TASK_PT_GR31, offsetof(struct task_struct, thread.regs.gr[31]));
DEFINE(TASK_PT_FR0, offsetof(struct task_struct, thread.regs.fr[ 0]));
DEFINE(TASK_PT_FR1, offsetof(struct task_struct, thread.regs.fr[ 1]));
DEFINE(TASK_PT_FR2, offsetof(struct task_struct, thread.regs.fr[ 2]));
DEFINE(TASK_PT_FR3, offsetof(struct task_struct, thread.regs.fr[ 3]));
DEFINE(TASK_PT_FR4, offsetof(struct task_struct, thread.regs.fr[ 4]));
DEFINE(TASK_PT_FR5, offsetof(struct task_struct, thread.regs.fr[ 5]));
DEFINE(TASK_PT_FR6, offsetof(struct task_struct, thread.regs.fr[ 6]));
DEFINE(TASK_PT_FR7, offsetof(struct task_struct, thread.regs.fr[ 7]));
DEFINE(TASK_PT_FR8, offsetof(struct task_struct, thread.regs.fr[ 8]));
DEFINE(TASK_PT_FR9, offsetof(struct task_struct, thread.regs.fr[ 9]));
DEFINE(TASK_PT_FR10, offsetof(struct task_struct, thread.regs.fr[10]));
DEFINE(TASK_PT_FR11, offsetof(struct task_struct, thread.regs.fr[11]));
DEFINE(TASK_PT_FR12, offsetof(struct task_struct, thread.regs.fr[12]));
DEFINE(TASK_PT_FR13, offsetof(struct task_struct, thread.regs.fr[13]));
DEFINE(TASK_PT_FR14, offsetof(struct task_struct, thread.regs.fr[14]));
DEFINE(TASK_PT_FR15, offsetof(struct task_struct, thread.regs.fr[15]));
DEFINE(TASK_PT_FR16, offsetof(struct task_struct, thread.regs.fr[16]));
DEFINE(TASK_PT_FR17, offsetof(struct task_struct, thread.regs.fr[17]));
DEFINE(TASK_PT_FR18, offsetof(struct task_struct, thread.regs.fr[18]));
DEFINE(TASK_PT_FR19, offsetof(struct task_struct, thread.regs.fr[19]));
DEFINE(TASK_PT_FR20, offsetof(struct task_struct, thread.regs.fr[20]));
DEFINE(TASK_PT_FR21, offsetof(struct task_struct, thread.regs.fr[21]));
DEFINE(TASK_PT_FR22, offsetof(struct task_struct, thread.regs.fr[22]));
DEFINE(TASK_PT_FR23, offsetof(struct task_struct, thread.regs.fr[23]));
DEFINE(TASK_PT_FR24, offsetof(struct task_struct, thread.regs.fr[24]));
DEFINE(TASK_PT_FR25, offsetof(struct task_struct, thread.regs.fr[25]));
DEFINE(TASK_PT_FR26, offsetof(struct task_struct, thread.regs.fr[26]));
DEFINE(TASK_PT_FR27, offsetof(struct task_struct, thread.regs.fr[27]));
DEFINE(TASK_PT_FR28, offsetof(struct task_struct, thread.regs.fr[28]));
DEFINE(TASK_PT_FR29, offsetof(struct task_struct, thread.regs.fr[29]));
DEFINE(TASK_PT_FR30, offsetof(struct task_struct, thread.regs.fr[30]));
DEFINE(TASK_PT_FR31, offsetof(struct task_struct, thread.regs.fr[31]));
DEFINE(TASK_PT_SR0, offsetof(struct task_struct, thread.regs.sr[ 0]));
DEFINE(TASK_PT_SR1, offsetof(struct task_struct, thread.regs.sr[ 1]));
DEFINE(TASK_PT_SR2, offsetof(struct task_struct, thread.regs.sr[ 2]));
DEFINE(TASK_PT_SR3, offsetof(struct task_struct, thread.regs.sr[ 3]));
DEFINE(TASK_PT_SR4, offsetof(struct task_struct, thread.regs.sr[ 4]));
DEFINE(TASK_PT_SR5, offsetof(struct task_struct, thread.regs.sr[ 5]));
DEFINE(TASK_PT_SR6, offsetof(struct task_struct, thread.regs.sr[ 6]));
DEFINE(TASK_PT_SR7, offsetof(struct task_struct, thread.regs.sr[ 7]));
DEFINE(TASK_PT_IASQ0, offsetof(struct task_struct, thread.regs.iasq[0]));
DEFINE(TASK_PT_IASQ1, offsetof(struct task_struct, thread.regs.iasq[1]));
DEFINE(TASK_PT_IAOQ0, offsetof(struct task_struct, thread.regs.iaoq[0]));
DEFINE(TASK_PT_IAOQ1, offsetof(struct task_struct, thread.regs.iaoq[1]));
DEFINE(TASK_PT_CR27, offsetof(struct task_struct, thread.regs.cr27));
DEFINE(TASK_PT_ORIG_R28, offsetof(struct task_struct, thread.regs.orig_r28));
DEFINE(TASK_PT_KSP, offsetof(struct task_struct, thread.regs.ksp));
DEFINE(TASK_PT_KPC, offsetof(struct task_struct, thread.regs.kpc));
DEFINE(TASK_PT_SAR, offsetof(struct task_struct, thread.regs.sar));
DEFINE(TASK_PT_IIR, offsetof(struct task_struct, thread.regs.iir));
DEFINE(TASK_PT_ISR, offsetof(struct task_struct, thread.regs.isr));
DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
BLANK();
DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
DEFINE(PT_GR2, offsetof(struct pt_regs, gr[ 2]));
DEFINE(PT_GR3, offsetof(struct pt_regs, gr[ 3]));
DEFINE(PT_GR4, offsetof(struct pt_regs, gr[ 4]));
DEFINE(PT_GR5, offsetof(struct pt_regs, gr[ 5]));
DEFINE(PT_GR6, offsetof(struct pt_regs, gr[ 6]));
DEFINE(PT_GR7, offsetof(struct pt_regs, gr[ 7]));
DEFINE(PT_GR8, offsetof(struct pt_regs, gr[ 8]));
DEFINE(PT_GR9, offsetof(struct pt_regs, gr[ 9]));
DEFINE(PT_GR10, offsetof(struct pt_regs, gr[10]));
DEFINE(PT_GR11, offsetof(struct pt_regs, gr[11]));
DEFINE(PT_GR12, offsetof(struct pt_regs, gr[12]));
DEFINE(PT_GR13, offsetof(struct pt_regs, gr[13]));
DEFINE(PT_GR14, offsetof(struct pt_regs, gr[14]));
DEFINE(PT_GR15, offsetof(struct pt_regs, gr[15]));
DEFINE(PT_GR16, offsetof(struct pt_regs, gr[16]));
DEFINE(PT_GR17, offsetof(struct pt_regs, gr[17]));
DEFINE(PT_GR18, offsetof(struct pt_regs, gr[18]));
DEFINE(PT_GR19, offsetof(struct pt_regs, gr[19]));
DEFINE(PT_GR20, offsetof(struct pt_regs, gr[20]));
DEFINE(PT_GR21, offsetof(struct pt_regs, gr[21]));
DEFINE(PT_GR22, offsetof(struct pt_regs, gr[22]));
DEFINE(PT_GR23, offsetof(struct pt_regs, gr[23]));
DEFINE(PT_GR24, offsetof(struct pt_regs, gr[24]));
DEFINE(PT_GR25, offsetof(struct pt_regs, gr[25]));
DEFINE(PT_GR26, offsetof(struct pt_regs, gr[26]));
DEFINE(PT_GR27, offsetof(struct pt_regs, gr[27]));
DEFINE(PT_GR28, offsetof(struct pt_regs, gr[28]));
DEFINE(PT_GR29, offsetof(struct pt_regs, gr[29]));
DEFINE(PT_GR30, offsetof(struct pt_regs, gr[30]));
DEFINE(PT_GR31, offsetof(struct pt_regs, gr[31]));
DEFINE(PT_FR0, offsetof(struct pt_regs, fr[ 0]));
DEFINE(PT_FR1, offsetof(struct pt_regs, fr[ 1]));
DEFINE(PT_FR2, offsetof(struct pt_regs, fr[ 2]));
DEFINE(PT_FR3, offsetof(struct pt_regs, fr[ 3]));
DEFINE(PT_FR4, offsetof(struct pt_regs, fr[ 4]));
DEFINE(PT_FR5, offsetof(struct pt_regs, fr[ 5]));
DEFINE(PT_FR6, offsetof(struct pt_regs, fr[ 6]));
DEFINE(PT_FR7, offsetof(struct pt_regs, fr[ 7]));
DEFINE(PT_FR8, offsetof(struct pt_regs, fr[ 8]));
DEFINE(PT_FR9, offsetof(struct pt_regs, fr[ 9]));
DEFINE(PT_FR10, offsetof(struct pt_regs, fr[10]));
DEFINE(PT_FR11, offsetof(struct pt_regs, fr[11]));
DEFINE(PT_FR12, offsetof(struct pt_regs, fr[12]));
DEFINE(PT_FR13, offsetof(struct pt_regs, fr[13]));
DEFINE(PT_FR14, offsetof(struct pt_regs, fr[14]));
DEFINE(PT_FR15, offsetof(struct pt_regs, fr[15]));
DEFINE(PT_FR16, offsetof(struct pt_regs, fr[16]));
DEFINE(PT_FR17, offsetof(struct pt_regs, fr[17]));
DEFINE(PT_FR18, offsetof(struct pt_regs, fr[18]));
DEFINE(PT_FR19, offsetof(struct pt_regs, fr[19]));
DEFINE(PT_FR20, offsetof(struct pt_regs, fr[20]));
DEFINE(PT_FR21, offsetof(struct pt_regs, fr[21]));
DEFINE(PT_FR22, offsetof(struct pt_regs, fr[22]));
DEFINE(PT_FR23, offsetof(struct pt_regs, fr[23]));
DEFINE(PT_FR24, offsetof(struct pt_regs, fr[24]));
DEFINE(PT_FR25, offsetof(struct pt_regs, fr[25]));
DEFINE(PT_FR26, offsetof(struct pt_regs, fr[26]));
DEFINE(PT_FR27, offsetof(struct pt_regs, fr[27]));
DEFINE(PT_FR28, offsetof(struct pt_regs, fr[28]));
DEFINE(PT_FR29, offsetof(struct pt_regs, fr[29]));
DEFINE(PT_FR30, offsetof(struct pt_regs, fr[30]));
DEFINE(PT_FR31, offsetof(struct pt_regs, fr[31]));
DEFINE(PT_SR0, offsetof(struct pt_regs, sr[ 0]));
DEFINE(PT_SR1, offsetof(struct pt_regs, sr[ 1]));
DEFINE(PT_SR2, offsetof(struct pt_regs, sr[ 2]));
DEFINE(PT_SR3, offsetof(struct pt_regs, sr[ 3]));
DEFINE(PT_SR4, offsetof(struct pt_regs, sr[ 4]));
DEFINE(PT_SR5, offsetof(struct pt_regs, sr[ 5]));
DEFINE(PT_SR6, offsetof(struct pt_regs, sr[ 6]));
DEFINE(PT_SR7, offsetof(struct pt_regs, sr[ 7]));
DEFINE(PT_IASQ0, offsetof(struct pt_regs, iasq[0]));
DEFINE(PT_IASQ1, offsetof(struct pt_regs, iasq[1]));
DEFINE(PT_IAOQ0, offsetof(struct pt_regs, iaoq[0]));
DEFINE(PT_IAOQ1, offsetof(struct pt_regs, iaoq[1]));
DEFINE(PT_CR27, offsetof(struct pt_regs, cr27));
DEFINE(PT_ORIG_R28, offsetof(struct pt_regs, orig_r28));
DEFINE(PT_KSP, offsetof(struct pt_regs, ksp));
DEFINE(PT_KPC, offsetof(struct pt_regs, kpc));
DEFINE(PT_SAR, offsetof(struct pt_regs, sar));
DEFINE(PT_IIR, offsetof(struct pt_regs, iir));
DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
/* PT_SZ_ALGN includes space for a stack frame. */
DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PRE_COUNT, offsetof(struct task_struct, thread_info.preempt_count));
BLANK();
DEFINE(ASM_SIGFRAME_SIZE, PARISC_RT_SIGFRAME_SIZE);
DEFINE(SIGFRAME_CONTEXT_REGS, offsetof(struct rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE);
#ifdef CONFIG_64BIT
DEFINE(ASM_SIGFRAME_SIZE32, PARISC_RT_SIGFRAME_SIZE32);
DEFINE(SIGFRAME_CONTEXT_REGS32, offsetof(struct compat_rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE32);
#else
DEFINE(ASM_SIGFRAME_SIZE32, PARISC_RT_SIGFRAME_SIZE);
DEFINE(SIGFRAME_CONTEXT_REGS32, offsetof(struct rt_sigframe, uc.uc_mcontext) - PARISC_RT_SIGFRAME_SIZE);
#endif
BLANK();
DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
DEFINE(ICACHE_LOOP, offsetof(struct pdc_cache_info, ic_loop));
DEFINE(DCACHE_BASE, offsetof(struct pdc_cache_info, dc_base));
DEFINE(DCACHE_STRIDE, offsetof(struct pdc_cache_info, dc_stride));
DEFINE(DCACHE_COUNT, offsetof(struct pdc_cache_info, dc_count));
DEFINE(DCACHE_LOOP, offsetof(struct pdc_cache_info, dc_loop));
DEFINE(ITLB_SID_BASE, offsetof(struct pdc_cache_info, it_sp_base));
DEFINE(ITLB_SID_STRIDE, offsetof(struct pdc_cache_info, it_sp_stride));
DEFINE(ITLB_SID_COUNT, offsetof(struct pdc_cache_info, it_sp_count));
DEFINE(ITLB_OFF_BASE, offsetof(struct pdc_cache_info, it_off_base));
DEFINE(ITLB_OFF_STRIDE, offsetof(struct pdc_cache_info, it_off_stride));
DEFINE(ITLB_OFF_COUNT, offsetof(struct pdc_cache_info, it_off_count));
DEFINE(ITLB_LOOP, offsetof(struct pdc_cache_info, it_loop));
DEFINE(DTLB_SID_BASE, offsetof(struct pdc_cache_info, dt_sp_base));
DEFINE(DTLB_SID_STRIDE, offsetof(struct pdc_cache_info, dt_sp_stride));
DEFINE(DTLB_SID_COUNT, offsetof(struct pdc_cache_info, dt_sp_count));
DEFINE(DTLB_OFF_BASE, offsetof(struct pdc_cache_info, dt_off_base));
DEFINE(DTLB_OFF_STRIDE, offsetof(struct pdc_cache_info, dt_off_stride));
DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
BLANK();
DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
BLANK();
DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD);
DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD);
DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE);
DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT));
DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT);
DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
BLANK();
/* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
* and kernel data on physical huge pages */
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
#elif !defined(CONFIG_64BIT)
DEFINE(HUGEPAGE_SIZE, 4*1024*1024);
#else
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
#endif
BLANK();
DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long));
BLANK();
return 0;
}
| linux-master | arch/parisc/kernel/asm-offsets.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1997, 1998 Ralf Baechle
* Copyright (C) 1999 SuSE GmbH
* Copyright (C) 1999-2001 Hewlett-Packard Company
* Copyright (C) 1999-2001 Grant Grundler
*/
#include <linux/eisa.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/superio.h>
#define DEBUG_RESOURCES 0
#define DEBUG_CONFIG 0
#if DEBUG_CONFIG
# define DBGC(x...) printk(KERN_DEBUG x)
#else
# define DBGC(x...)
#endif
#if DEBUG_RESOURCES
#define DBG_RES(x...) printk(KERN_DEBUG x)
#else
#define DBG_RES(x...)
#endif
struct pci_port_ops *pci_port __ro_after_init;
struct pci_bios_ops *pci_bios __ro_after_init;
static int pci_hba_count __ro_after_init;
/* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */
#define PCI_HBA_MAX 32
static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __ro_after_init;
/********************************************************************
**
** I/O port space support
**
*********************************************************************/
/* EISA port numbers and PCI port numbers share the same interface. Some
* machines have both EISA and PCI adapters installed. Rather than turn
* pci_port into an array, we reserve bus 0 for EISA and call the EISA
* routines if the access is to a port on bus 0. We don't want to fix
* EISA and ISA drivers which assume port space is <= 0xffff.
*/
#ifdef CONFIG_EISA
#define EISA_IN(size) if (EISA_bus && (b == 0)) return eisa_in##size(addr)
#define EISA_OUT(size) if (EISA_bus && (b == 0)) return eisa_out##size(d, addr)
#else
#define EISA_IN(size)
#define EISA_OUT(size)
#endif
#define PCI_PORT_IN(type, size) \
u##size in##type (int addr) \
{ \
int b = PCI_PORT_HBA(addr); \
EISA_IN(size); \
if (!parisc_pci_hba[b]) return (u##size) -1; \
return pci_port->in##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr)); \
} \
EXPORT_SYMBOL(in##type);
PCI_PORT_IN(b, 8)
PCI_PORT_IN(w, 16)
PCI_PORT_IN(l, 32)
#define PCI_PORT_OUT(type, size) \
void out##type (u##size d, int addr) \
{ \
int b = PCI_PORT_HBA(addr); \
EISA_OUT(size); \
if (!parisc_pci_hba[b]) return; \
pci_port->out##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr), d); \
} \
EXPORT_SYMBOL(out##type);
PCI_PORT_OUT(b, 8)
PCI_PORT_OUT(w, 16)
PCI_PORT_OUT(l, 32)
/*
* BIOS32 replacement.
*/
static int __init pcibios_init(void)
{
if (!pci_bios)
return -1;
if (pci_bios->init) {
pci_bios->init();
} else {
printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
}
/* Set the CLS for PCI as early as possible. */
pci_cache_line_size = pci_dfl_cache_line_size;
return 0;
}
/* Called from pci_do_scan_bus() *after* walking a bus but before walking PPBs. */
void pcibios_fixup_bus(struct pci_bus *bus)
{
if (pci_bios->fixup_bus) {
pci_bios->fixup_bus(bus);
} else {
printk(KERN_WARNING "pci_bios != NULL but fixup_bus() is!\n");
}
}
/*
* Called by pci_set_master() - a driver interface.
*
* Legacy PDC guarantees to set:
* Map Memory BAR's into PA IO space.
* Map Expansion ROM BAR into one common PA IO space per bus.
* Map IO BAR's into PCI IO space.
* Command (see below)
* Cache Line Size
* Latency Timer
* Interrupt Line
* PPB: secondary latency timer, io/mmio base/limit,
* bus numbers, bridge control
*
*/
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
/* If someone already mucked with this, don't touch it. */
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
if (lat >= 16) return;
/*
** HP generally has fewer devices on the bus than other architectures.
** upper byte is PCI_LATENCY_TIMER.
*/
pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
(0x80 << 8) | pci_cache_line_size);
}
/*
* pcibios_init_bridge() initializes cache line and default latency
* for pci controllers and pci-pci bridges
*/
void __ref pcibios_init_bridge(struct pci_dev *dev)
{
unsigned short bridge_ctl, bridge_ctl_new;
/* We deal only with pci controllers and pci-pci bridges. */
if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
return;
/* PCI-PCI bridge - set the cache line and default latency
* (32) for primary and secondary buses.
*/
pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
bridge_ctl_new = bridge_ctl | PCI_BRIDGE_CTL_PARITY |
PCI_BRIDGE_CTL_SERR | PCI_BRIDGE_CTL_MASTER_ABORT;
dev_info(&dev->dev, "Changing bridge control from 0x%08x to 0x%08x\n",
bridge_ctl, bridge_ctl_new);
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl_new);
}
/*
* pcibios align resources() is called every time generic PCI code
* wants to generate a new address. The process of looking for
* an available address, each candidate is first "aligned" and
* then checked if the resource is available until a match is found.
*
* Since we are just checking candidates, don't use any fields other
* than res->start.
*/
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t alignment)
{
resource_size_t mask, align, start = res->start;
DBG_RES("pcibios_align_resource(%s, (%p) [%lx,%lx]/%x, 0x%lx, 0x%lx)\n",
pci_name(((struct pci_dev *) data)),
res->parent, res->start, res->end,
(int) res->flags, size, alignment);
/* If it's not IO, then it's gotta be MEM */
align = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
/* Align to largest of MIN or input size */
mask = max(alignment, align) - 1;
start += mask;
start &= ~mask;
return start;
}
/*
* A driver is enabling the device. We make sure that all the appropriate
* bits are set to allow the device to operate as the driver is expecting.
* We enable the port IO and memory IO bits if the device has any BARs of
* that type, and we enable the PERR and SERR bits unconditionally.
* Drivers that do not need parity (eg graphics and possibly networking)
* can clear these bits if they want.
*/
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
int err;
u16 cmd, old_cmd;
err = pci_enable_resources(dev, mask);
if (err < 0)
return err;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
cmd |= (PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
#if 0
/* If bridge/bus controller has FBB enabled, child must too. */
if (dev->bus->bridge_ctl & PCI_BRIDGE_CTL_FAST_BACK)
cmd |= PCI_COMMAND_FAST_BACK;
#endif
if (cmd != old_cmd) {
dev_info(&dev->dev, "enabling SERR and PARITY (%04x -> %04x)\n",
old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
/* PA-RISC specific */
void pcibios_register_hba(struct pci_hba_data *hba)
{
if (pci_hba_count >= PCI_HBA_MAX) {
printk(KERN_ERR "PCI: Too many Host Bus Adapters\n");
return;
}
parisc_pci_hba[pci_hba_count] = hba;
hba->hba_num = pci_hba_count++;
}
subsys_initcall(pcibios_init);
| linux-master | arch/parisc/kernel/pci.c |
/*
* arch/parisc/kernel/topology.c
*
* Copyright (C) 2017 Helge Deller <[email protected]>
*
* based on arch/arm/kernel/topology.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/sched/topology.h>
#include <linux/cpu.h>
#include <asm/topology.h>
#include <asm/sections.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
/*
* store_cpu_topology is called at boot when only one cpu is running
* and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
* which prevents simultaneous write access to cpu_topology array
*/
void store_cpu_topology(unsigned int cpuid)
{
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
struct cpuinfo_parisc *p;
int max_socket = -1;
unsigned long cpu;
/* If the cpu topology has been already set, just return */
if (cpuid_topo->core_id != -1)
return;
#ifdef CONFIG_HOTPLUG_CPU
per_cpu(cpu_devices, cpuid).hotpluggable = 1;
#endif
if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid))
pr_warn("Failed to register CPU%d device", cpuid);
/* create cpu topology mapping */
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = 0;
p = &per_cpu(cpu_data, cpuid);
for_each_online_cpu(cpu) {
const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
if (cpu == cpuid) /* ignore current cpu */
continue;
if (cpuinfo->cpu_loc == p->cpu_loc) {
cpuid_topo->core_id = cpu_topology[cpu].core_id;
if (p->cpu_loc) {
cpuid_topo->core_id++;
cpuid_topo->package_id = cpu_topology[cpu].package_id;
continue;
}
}
if (cpuid_topo->package_id == -1)
max_socket = max(max_socket, cpu_topology[cpu].package_id);
}
if (cpuid_topo->package_id == -1)
cpuid_topo->package_id = max_socket + 1;
update_siblings_masks(cpuid);
pr_info("CPU%u: cpu core %d of socket %d\n",
cpuid,
cpu_topology[cpuid].core_id,
cpu_topology[cpuid].package_id);
}
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
*/
void __init init_cpu_topology(void)
{
reset_cpu_topology();
}
| linux-master | arch/parisc/kernel/topology.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
extern void relocate_new_kernel(unsigned long head,
unsigned long start,
unsigned long phys);
extern const unsigned int relocate_new_kernel_size;
extern unsigned int kexec_initrd_start_offset;
extern unsigned int kexec_initrd_end_offset;
extern unsigned int kexec_cmdline_offset;
extern unsigned int kexec_free_mem_offset;
static void kexec_show_segment_info(const struct kimage *kimage,
unsigned long n)
{
pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
n,
kimage->segment[n].mem,
kimage->segment[n].mem + kimage->segment[n].memsz,
(unsigned long)kimage->segment[n].memsz,
(unsigned long)kimage->segment[n].memsz / PAGE_SIZE);
}
static void kexec_image_info(const struct kimage *kimage)
{
unsigned long i;
pr_debug("kexec kimage info:\n");
pr_debug(" type: %d\n", kimage->type);
pr_debug(" start: %lx\n", kimage->start);
pr_debug(" head: %lx\n", kimage->head);
pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
for (i = 0; i < kimage->nr_segments; i++)
kexec_show_segment_info(kimage, i);
#ifdef CONFIG_KEXEC_FILE
if (kimage->file_mode) {
pr_debug("cmdline: %.*s\n", (int)kimage->cmdline_buf_len,
kimage->cmdline_buf);
}
#endif
}
void machine_kexec_cleanup(struct kimage *kimage)
{
}
void machine_crash_shutdown(struct pt_regs *regs)
{
}
void machine_shutdown(void)
{
smp_send_stop();
while (num_online_cpus() > 1) {
cpu_relax();
mdelay(1);
}
}
void machine_kexec(struct kimage *image)
{
#ifdef CONFIG_64BIT
Elf64_Fdesc desc;
#endif
void (*reloc)(unsigned long head,
unsigned long start,
unsigned long phys);
unsigned long phys = page_to_phys(image->control_code_page);
void *virt = (void *)__fix_to_virt(FIX_TEXT_KEXEC);
struct kimage_arch *arch = &image->arch;
set_fixmap(FIX_TEXT_KEXEC, phys);
flush_cache_all();
#ifdef CONFIG_64BIT
reloc = (void *)&desc;
desc.addr = (long long)virt;
#else
reloc = (void *)virt;
#endif
memcpy(virt, dereference_function_descriptor(relocate_new_kernel),
relocate_new_kernel_size);
*(unsigned long *)(virt + kexec_cmdline_offset) = arch->cmdline;
*(unsigned long *)(virt + kexec_initrd_start_offset) = arch->initrd_start;
*(unsigned long *)(virt + kexec_initrd_end_offset) = arch->initrd_end;
*(unsigned long *)(virt + kexec_free_mem_offset) = PAGE0->mem_free;
flush_cache_all();
flush_tlb_all();
local_irq_disable();
reloc(image->head & PAGE_MASK, image->start, phys);
}
int machine_kexec_prepare(struct kimage *image)
{
kexec_image_info(image);
return 0;
}
| linux-master | arch/parisc/kernel/kexec.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Architecture-specific kernel symbols
*
* Copyright (C) 2000-2001 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2001 Dave Kennedy
* Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2001-2003 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2002-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq at parisc-linux.org>
* Copyright (C) 2002-2007 Helge Deller <deller with parisc-linux.org>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/libgcc.h>
#include <linux/string.h>
EXPORT_SYMBOL(memset);
#include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__atomic_hash);
#endif
#ifdef CONFIG_64BIT
EXPORT_SYMBOL(__xchg64);
#endif
#include <linux/uaccess.h>
EXPORT_SYMBOL(lclear_user);
#ifndef CONFIG_64BIT
/* Needed so insmod can set dp value */
extern int $global$;
EXPORT_SYMBOL($global$);
#endif
#include <asm/io.h>
EXPORT_SYMBOL(memcpy_toio);
EXPORT_SYMBOL(memcpy_fromio);
EXPORT_SYMBOL(memset_io);
extern void $$divI(void);
extern void $$divU(void);
extern void $$remI(void);
extern void $$remU(void);
extern void $$mulI(void);
extern void $$divU_3(void);
extern void $$divU_5(void);
extern void $$divU_6(void);
extern void $$divU_9(void);
extern void $$divU_10(void);
extern void $$divU_12(void);
extern void $$divU_7(void);
extern void $$divU_14(void);
extern void $$divU_15(void);
extern void $$divI_3(void);
extern void $$divI_5(void);
extern void $$divI_6(void);
extern void $$divI_7(void);
extern void $$divI_9(void);
extern void $$divI_10(void);
extern void $$divI_12(void);
extern void $$divI_14(void);
extern void $$divI_15(void);
EXPORT_SYMBOL($$divI);
EXPORT_SYMBOL($$divU);
EXPORT_SYMBOL($$remI);
EXPORT_SYMBOL($$remU);
EXPORT_SYMBOL($$mulI);
EXPORT_SYMBOL($$divU_3);
EXPORT_SYMBOL($$divU_5);
EXPORT_SYMBOL($$divU_6);
EXPORT_SYMBOL($$divU_9);
EXPORT_SYMBOL($$divU_10);
EXPORT_SYMBOL($$divU_12);
EXPORT_SYMBOL($$divU_7);
EXPORT_SYMBOL($$divU_14);
EXPORT_SYMBOL($$divU_15);
EXPORT_SYMBOL($$divI_3);
EXPORT_SYMBOL($$divI_5);
EXPORT_SYMBOL($$divI_6);
EXPORT_SYMBOL($$divI_7);
EXPORT_SYMBOL($$divI_9);
EXPORT_SYMBOL($$divI_10);
EXPORT_SYMBOL($$divI_12);
EXPORT_SYMBOL($$divI_14);
EXPORT_SYMBOL($$divI_15);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__ucmpdi2);
asmlinkage void * __canonicalize_funcptr_for_compare(void *);
EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
#ifdef CONFIG_64BIT
extern void __divdi3(void);
extern void __udivdi3(void);
extern void __umoddi3(void);
extern void __moddi3(void);
EXPORT_SYMBOL(__divdi3);
EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__umoddi3);
EXPORT_SYMBOL(__moddi3);
#endif
#ifndef CONFIG_64BIT
extern void $$dyncall(void);
EXPORT_SYMBOL($$dyncall);
#endif
#ifdef CONFIG_FUNCTION_TRACER
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
#endif
/* from pacache.S -- needed for clear/copy_page */
EXPORT_SYMBOL(clear_page_asm);
EXPORT_SYMBOL(copy_page_asm);
| linux-master | arch/parisc/kernel/parisc_ksyms.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Kernel dynamically loadable module help for PARISC.
*
* The best reference for this stuff is probably the Processor-
* Specific ELF Supplement for PA-RISC:
* https://parisc.wiki.kernel.org/index.php/File:Elf-pa-hp.pdf
*
* Linux/PA-RISC Project
* Copyright (C) 2003 Randolph Chung <tausq at debian . org>
* Copyright (C) 2008 Helge Deller <[email protected]>
*
* Notes:
* - PLT stub handling
* On 32bit (and sometimes 64bit) and with big kernel modules like xfs or
* ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may
* fail to reach their PLT stub if we only create one big stub array for
* all sections at the beginning of the core or init section.
* Instead we now insert individual PLT stub entries directly in front of
* of the code sections where the stubs are actually called.
* This reduces the distance between the PCREL location and the stub entry
* so that the relocations can be fulfilled.
* While calculating the final layout of the kernel module in memory, the
* kernel module loader calls arch_mod_section_prepend() to request the
* to be reserved amount of memory in front of each individual section.
*
* - SEGREL32 handling
* We are not doing SEGREL32 handling correctly. According to the ABI, we
* should do a value offset, like this:
* if (in_init(me, (void *)val))
* val -= (uint32_t)me->mem[MOD_INIT_TEXT].base;
* else
* val -= (uint32_t)me->mem[MOD_TEXT].base;
* However, SEGREL32 is used only for PARISC unwind entries, and we want
* those entries to have an absolute address, and not just an offset.
*
* The unwind table mechanism has the ability to specify an offset for
* the unwind table; however, because we split off the init functions into
* a different piece of memory, it is not possible to do this using a
* single offset. Instead, we use the above hack for now.
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/ftrace.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/unwind.h>
#include <asm/sections.h>
#define RELOC_REACHABLE(val, bits) \
(( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \
0 : 1)
#define CHECK_RELOC(val, bits) \
if (!RELOC_REACHABLE(val, bits)) { \
printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \
me->name, strtab + sym->st_name, (unsigned long)val, bits); \
return -ENOEXEC; \
}
/* Maximum number of GOT entries. We use a long displacement ldd from
* the bottom of the table, which has a maximum signed displacement of
* 0x3fff; however, since we're only going forward, this becomes
* 0x1fff, and thus, since each GOT entry is 8 bytes long we can have
* at most 1023 entries.
* To overcome this 14bit displacement with some kernel modules, we'll
* use instead the unusal 16bit displacement method (see reassemble_16a)
* which gives us a maximum positive displacement of 0x7fff, and as such
* allows us to allocate up to 4095 GOT entries. */
#define MAX_GOTS 4095
#ifndef CONFIG_64BIT
struct got_entry {
Elf32_Addr addr;
};
struct stub_entry {
Elf32_Word insns[2]; /* each stub entry has two insns */
};
#else
struct got_entry {
Elf64_Addr addr;
};
struct stub_entry {
Elf64_Word insns[4]; /* each stub entry has four insns */
};
#endif
/* Field selection types defined by hppa */
#define rnd(x) (((x)+0x1000)&~0x1fff)
/* fsel: full 32 bits */
#define fsel(v,a) ((v)+(a))
/* lsel: select left 21 bits */
#define lsel(v,a) (((v)+(a))>>11)
/* rsel: select right 11 bits */
#define rsel(v,a) (((v)+(a))&0x7ff)
/* lrsel with rounding of addend to nearest 8k */
#define lrsel(v,a) (((v)+rnd(a))>>11)
/* rrsel with rounding of addend to nearest 8k */
#define rrsel(v,a) ((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
#define mask(x,sz) ((x) & ~((1<<(sz))-1))
/* The reassemble_* functions prepare an immediate value for
insertion into an opcode. pa-risc uses all sorts of weird bitfields
in the instruction to hold the value. */
static inline int sign_unext(int x, int len)
{
int len_ones;
len_ones = (1 << len) - 1;
return x & len_ones;
}
static inline int low_sign_unext(int x, int len)
{
int sign, temp;
sign = (x >> (len-1)) & 1;
temp = sign_unext(x, len-1);
return (temp << 1) | sign;
}
static inline int reassemble_14(int as14)
{
return (((as14 & 0x1fff) << 1) |
((as14 & 0x2000) >> 13));
}
static inline int reassemble_16a(int as16)
{
int s, t;
/* Unusual 16-bit encoding, for wide mode only. */
t = (as16 << 1) & 0xffff;
s = (as16 & 0x8000);
return (t ^ s ^ (s >> 1)) | (s >> 15);
}
static inline int reassemble_17(int as17)
{
return (((as17 & 0x10000) >> 16) |
((as17 & 0x0f800) << 5) |
((as17 & 0x00400) >> 8) |
((as17 & 0x003ff) << 3));
}
static inline int reassemble_21(int as21)
{
return (((as21 & 0x100000) >> 20) |
((as21 & 0x0ffe00) >> 8) |
((as21 & 0x000180) << 7) |
((as21 & 0x00007c) << 14) |
((as21 & 0x000003) << 12));
}
static inline int reassemble_22(int as22)
{
return (((as22 & 0x200000) >> 21) |
((as22 & 0x1f0000) << 5) |
((as22 & 0x00f800) << 5) |
((as22 & 0x000400) >> 8) |
((as22 & 0x0003ff) << 3));
}
void *module_alloc(unsigned long size)
{
/* using RWX means less protection for modules, but it's
* easier than trying to map the text, data, init_text and
* init_data correctly */
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL,
PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#ifndef CONFIG_64BIT
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
{
return 0;
}
static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
{
return 0;
}
static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
{
unsigned long cnt = 0;
for (; n > 0; n--, rela++)
{
switch (ELF32_R_TYPE(rela->r_info)) {
case R_PARISC_PCREL17F:
case R_PARISC_PCREL22F:
cnt++;
}
}
return cnt;
}
#else
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
{
unsigned long cnt = 0;
for (; n > 0; n--, rela++)
{
switch (ELF64_R_TYPE(rela->r_info)) {
case R_PARISC_LTOFF21L:
case R_PARISC_LTOFF14R:
case R_PARISC_PCREL22F:
cnt++;
}
}
return cnt;
}
static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
{
unsigned long cnt = 0;
for (; n > 0; n--, rela++)
{
switch (ELF64_R_TYPE(rela->r_info)) {
case R_PARISC_FPTR64:
cnt++;
}
}
return cnt;
}
static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
{
unsigned long cnt = 0;
for (; n > 0; n--, rela++)
{
switch (ELF64_R_TYPE(rela->r_info)) {
case R_PARISC_PCREL22F:
cnt++;
}
}
return cnt;
}
#endif
void module_arch_freeing_init(struct module *mod)
{
kfree(mod->arch.section);
mod->arch.section = NULL;
}
/* Additional bytes needed in front of individual sections */
unsigned int arch_mod_section_prepend(struct module *mod,
unsigned int section)
{
/* size needed for all stubs of this section (including
* one additional for correct alignment of the stubs) */
return (mod->arch.section[section].stub_entries + 1)
* sizeof(struct stub_entry);
}
#define CONST
int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
CONST Elf_Shdr *sechdrs,
CONST char *secstrings,
struct module *me)
{
unsigned long gots = 0, fdescs = 0, len;
unsigned int i;
struct module_memory *mod_mem;
len = hdr->e_shnum * sizeof(me->arch.section[0]);
me->arch.section = kzalloc(len, GFP_KERNEL);
if (!me->arch.section)
return -ENOMEM;
for (i = 1; i < hdr->e_shnum; i++) {
const Elf_Rela *rels = (void *)sechdrs[i].sh_addr;
unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels);
unsigned int count, s;
if (strncmp(secstrings + sechdrs[i].sh_name,
".PARISC.unwind", 14) == 0)
me->arch.unwind_section = i;
if (sechdrs[i].sh_type != SHT_RELA)
continue;
/* some of these are not relevant for 32-bit/64-bit
* we leave them here to make the code common. the
* compiler will do its thing and optimize out the
* stuff we don't need
*/
gots += count_gots(rels, nrels);
fdescs += count_fdescs(rels, nrels);
/* XXX: By sorting the relocs and finding duplicate entries
* we could reduce the number of necessary stubs and save
* some memory. */
count = count_stubs(rels, nrels);
if (!count)
continue;
/* so we need relocation stubs. reserve necessary memory. */
/* sh_info gives the section for which we need to add stubs. */
s = sechdrs[i].sh_info;
/* each code section should only have one relocation section */
WARN_ON(me->arch.section[s].stub_entries);
/* store number of stubs we need for this section */
me->arch.section[s].stub_entries += count;
}
mod_mem = &me->mem[MOD_TEXT];
/* align things a bit */
mod_mem->size = ALIGN(mod_mem->size, 16);
me->arch.got_offset = mod_mem->size;
mod_mem->size += gots * sizeof(struct got_entry);
mod_mem->size = ALIGN(mod_mem->size, 16);
me->arch.fdesc_offset = mod_mem->size;
mod_mem->size += fdescs * sizeof(Elf_Fdesc);
me->arch.got_max = gots;
me->arch.fdesc_max = fdescs;
return 0;
}
#ifdef CONFIG_64BIT
static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
{
unsigned int i;
struct got_entry *got;
value += addend;
BUG_ON(value == 0);
got = me->mem[MOD_TEXT].base + me->arch.got_offset;
for (i = 0; got[i].addr; i++)
if (got[i].addr == value)
goto out;
BUG_ON(++me->arch.got_count > me->arch.got_max);
got[i].addr = value;
out:
pr_debug("GOT ENTRY %d[%lx] val %lx\n", i, i*sizeof(struct got_entry),
value);
return i * sizeof(struct got_entry);
}
#endif /* CONFIG_64BIT */
#ifdef CONFIG_64BIT
static Elf_Addr get_fdesc(struct module *me, unsigned long value)
{
Elf_Fdesc *fdesc = me->mem[MOD_TEXT].base + me->arch.fdesc_offset;
if (!value) {
printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
return 0;
}
/* Look for existing fdesc entry. */
while (fdesc->addr) {
if (fdesc->addr == value)
return (Elf_Addr)fdesc;
fdesc++;
}
BUG_ON(++me->arch.fdesc_count > me->arch.fdesc_max);
/* Create new one */
fdesc->addr = value;
fdesc->gp = (Elf_Addr)me->mem[MOD_TEXT].base + me->arch.got_offset;
return (Elf_Addr)fdesc;
}
#endif /* CONFIG_64BIT */
enum elf_stub_type {
ELF_STUB_GOT,
ELF_STUB_MILLI,
ELF_STUB_DIRECT,
};
static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec)
{
struct stub_entry *stub;
int __maybe_unused d;
/* initialize stub_offset to point in front of the section */
if (!me->arch.section[targetsec].stub_offset) {
loc0 -= (me->arch.section[targetsec].stub_entries + 1) *
sizeof(struct stub_entry);
/* get correct alignment for the stubs */
loc0 = ALIGN(loc0, sizeof(struct stub_entry));
me->arch.section[targetsec].stub_offset = loc0;
}
/* get address of stub entry */
stub = (void *) me->arch.section[targetsec].stub_offset;
me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry);
/* do not write outside available stub area */
BUG_ON(0 == me->arch.section[targetsec].stub_entries--);
#ifndef CONFIG_64BIT
/* for 32-bit the stub looks like this:
* ldil L'XXX,%r1
* be,n R'XXX(%sr4,%r1)
*/
//value = *(unsigned long *)((value + addend) & ~3); /* why? */
stub->insns[0] = 0x20200000; /* ldil L'XXX,%r1 */
stub->insns[1] = 0xe0202002; /* be,n R'XXX(%sr4,%r1) */
stub->insns[0] |= reassemble_21(lrsel(value, addend));
stub->insns[1] |= reassemble_17(rrsel(value, addend) / 4);
#else
/* for 64-bit we have three kinds of stubs:
* for normal function calls:
* ldd 0(%dp),%dp
* ldd 10(%dp), %r1
* bve (%r1)
* ldd 18(%dp), %dp
*
* for millicode:
* ldil 0, %r1
* ldo 0(%r1), %r1
* ldd 10(%r1), %r1
* bve,n (%r1)
*
* for direct branches (jumps between different section of the
* same module):
* ldil 0, %r1
* ldo 0(%r1), %r1
* bve,n (%r1)
*/
switch (stub_type) {
case ELF_STUB_GOT:
d = get_got(me, value, addend);
if (d <= 15) {
/* Format 5 */
stub->insns[0] = 0x0f6010db; /* ldd 0(%dp),%dp */
stub->insns[0] |= low_sign_unext(d, 5) << 16;
} else {
/* Format 3 */
stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */
stub->insns[0] |= reassemble_16a(d);
}
stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */
stub->insns[2] = 0xe820d000; /* bve (%r1) */
stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */
break;
case ELF_STUB_MILLI:
stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
stub->insns[1] = 0x34210000; /* ldo 0(%r1), %r1 */
stub->insns[2] = 0x50210020; /* ldd 10(%r1),%r1 */
stub->insns[3] = 0xe820d002; /* bve,n (%r1) */
stub->insns[0] |= reassemble_21(lrsel(value, addend));
stub->insns[1] |= reassemble_14(rrsel(value, addend));
break;
case ELF_STUB_DIRECT:
stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
stub->insns[1] = 0x34210000; /* ldo 0(%r1), %r1 */
stub->insns[2] = 0xe820d002; /* bve,n (%r1) */
stub->insns[0] |= reassemble_21(lrsel(value, addend));
stub->insns[1] |= reassemble_14(rrsel(value, addend));
break;
}
#endif
return (Elf_Addr)stub;
}
#ifndef CONFIG_64BIT
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
Elf32_Word *loc;
Elf32_Addr val;
Elf32_Sword addend;
Elf32_Addr dot;
Elf_Addr loc0;
unsigned int targetsec = sechdrs[relsec].sh_info;
//unsigned long dp = (unsigned long)$global$;
register unsigned long dp asm ("r27");
pr_debug("Applying relocate section %u to %u\n", relsec,
targetsec);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
loc = (void *)sechdrs[targetsec].sh_addr
+ rel[i].r_offset;
/* This is the start of the target section */
loc0 = sechdrs[targetsec].sh_addr;
/* This is the symbol it is referring to */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
if (!sym->st_value) {
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
//dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
dot = (Elf32_Addr)loc & ~0x03;
val = sym->st_value;
addend = rel[i].r_addend;
#if 0
#define r(t) ELF32_R_TYPE(rel[i].r_info)==t ? #t :
pr_debug("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n",
strtab + sym->st_name,
(uint32_t)loc, val, addend,
r(R_PARISC_PLABEL32)
r(R_PARISC_DIR32)
r(R_PARISC_DIR21L)
r(R_PARISC_DIR14R)
r(R_PARISC_SEGREL32)
r(R_PARISC_DPREL21L)
r(R_PARISC_DPREL14R)
r(R_PARISC_PCREL17F)
r(R_PARISC_PCREL22F)
"UNKNOWN");
#undef r
#endif
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_PARISC_PLABEL32:
/* 32-bit function address */
/* no function descriptors... */
*loc = fsel(val, addend);
break;
case R_PARISC_DIR32:
/* direct 32-bit ref */
*loc = fsel(val, addend);
break;
case R_PARISC_DIR21L:
/* left 21 bits of effective address */
val = lrsel(val, addend);
*loc = mask(*loc, 21) | reassemble_21(val);
break;
case R_PARISC_DIR14R:
/* right 14 bits of effective address */
val = rrsel(val, addend);
*loc = mask(*loc, 14) | reassemble_14(val);
break;
case R_PARISC_SEGREL32:
/* 32-bit segment relative address */
/* See note about special handling of SEGREL32 at
* the beginning of this file.
*/
*loc = fsel(val, addend);
break;
case R_PARISC_SECREL32:
/* 32-bit section relative address. */
*loc = fsel(val, addend);
break;
case R_PARISC_DPREL21L:
/* left 21 bit of relative address */
val = lrsel(val - dp, addend);
*loc = mask(*loc, 21) | reassemble_21(val);
break;
case R_PARISC_DPREL14R:
/* right 14 bit of relative address */
val = rrsel(val - dp, addend);
*loc = mask(*loc, 14) | reassemble_14(val);
break;
case R_PARISC_PCREL17F:
/* 17-bit PC relative address */
/* calculate direct call offset */
val += addend;
val = (val - dot - 8)/4;
if (!RELOC_REACHABLE(val, 17)) {
/* direct distance too far, create
* stub entry instead */
val = get_stub(me, sym->st_value, addend,
ELF_STUB_DIRECT, loc0, targetsec);
val = (val - dot - 8)/4;
CHECK_RELOC(val, 17);
}
*loc = (*loc & ~0x1f1ffd) | reassemble_17(val);
break;
case R_PARISC_PCREL22F:
/* 22-bit PC relative address; only defined for pa20 */
/* calculate direct call offset */
val += addend;
val = (val - dot - 8)/4;
if (!RELOC_REACHABLE(val, 22)) {
/* direct distance too far, create
* stub entry instead */
val = get_stub(me, sym->st_value, addend,
ELF_STUB_DIRECT, loc0, targetsec);
val = (val - dot - 8)/4;
CHECK_RELOC(val, 22);
}
*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
break;
case R_PARISC_PCREL32:
/* 32-bit PC relative address */
*loc = val - dot - 8 + addend;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
#else
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
int i;
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf64_Sym *sym;
Elf64_Word *loc;
Elf64_Xword *loc64;
Elf64_Addr val;
Elf64_Sxword addend;
Elf64_Addr dot;
Elf_Addr loc0;
unsigned int targetsec = sechdrs[relsec].sh_info;
pr_debug("Applying relocate section %u to %u\n", relsec,
targetsec);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
loc = (void *)sechdrs[targetsec].sh_addr
+ rel[i].r_offset;
/* This is the start of the target section */
loc0 = sechdrs[targetsec].sh_addr;
/* This is the symbol it is referring to */
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rel[i].r_info);
if (!sym->st_value) {
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
//dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
dot = (Elf64_Addr)loc & ~0x03;
loc64 = (Elf64_Xword *)loc;
val = sym->st_value;
addend = rel[i].r_addend;
#if 0
#define r(t) ELF64_R_TYPE(rel[i].r_info)==t ? #t :
printk("Symbol %s loc %p val 0x%Lx addend 0x%Lx: %s\n",
strtab + sym->st_name,
loc, val, addend,
r(R_PARISC_LTOFF14R)
r(R_PARISC_LTOFF21L)
r(R_PARISC_PCREL22F)
r(R_PARISC_DIR64)
r(R_PARISC_SEGREL32)
r(R_PARISC_FPTR64)
"UNKNOWN");
#undef r
#endif
switch (ELF64_R_TYPE(rel[i].r_info)) {
case R_PARISC_LTOFF21L:
/* LT-relative; left 21 bits */
val = get_got(me, val, addend);
pr_debug("LTOFF21L Symbol %s loc %p val %llx\n",
strtab + sym->st_name,
loc, val);
val = lrsel(val, 0);
*loc = mask(*loc, 21) | reassemble_21(val);
break;
case R_PARISC_LTOFF14R:
/* L(ltoff(val+addend)) */
/* LT-relative; right 14 bits */
val = get_got(me, val, addend);
val = rrsel(val, 0);
pr_debug("LTOFF14R Symbol %s loc %p val %llx\n",
strtab + sym->st_name,
loc, val);
*loc = mask(*loc, 14) | reassemble_14(val);
break;
case R_PARISC_PCREL22F:
/* PC-relative; 22 bits */
pr_debug("PCREL22F Symbol %s loc %p val %llx\n",
strtab + sym->st_name,
loc, val);
val += addend;
/* can we reach it locally? */
if (within_module(val, me)) {
/* this is the case where the symbol is local
* to the module, but in a different section,
* so stub the jump in case it's more than 22
* bits away */
val = (val - dot - 8)/4;
if (!RELOC_REACHABLE(val, 22)) {
/* direct distance too far, create
* stub entry instead */
val = get_stub(me, sym->st_value,
addend, ELF_STUB_DIRECT,
loc0, targetsec);
} else {
/* Ok, we can reach it directly. */
val = sym->st_value;
val += addend;
}
} else {
val = sym->st_value;
if (strncmp(strtab + sym->st_name, "$$", 2)
== 0)
val = get_stub(me, val, addend, ELF_STUB_MILLI,
loc0, targetsec);
else
val = get_stub(me, val, addend, ELF_STUB_GOT,
loc0, targetsec);
}
pr_debug("STUB FOR %s loc %px, val %llx+%llx at %llx\n",
strtab + sym->st_name, loc, sym->st_value,
addend, val);
val = (val - dot - 8)/4;
CHECK_RELOC(val, 22);
*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
break;
case R_PARISC_PCREL32:
/* 32-bit PC relative address */
*loc = val - dot - 8 + addend;
break;
case R_PARISC_PCREL64:
/* 64-bit PC relative address */
*loc64 = val - dot - 8 + addend;
break;
case R_PARISC_DIR64:
/* 64-bit effective address */
*loc64 = val + addend;
break;
case R_PARISC_SEGREL32:
/* 32-bit segment relative address */
/* See note about special handling of SEGREL32 at
* the beginning of this file.
*/
*loc = fsel(val, addend);
break;
case R_PARISC_SECREL32:
/* 32-bit section relative address. */
*loc = fsel(val, addend);
break;
case R_PARISC_FPTR64:
/* 64-bit function address */
if (within_module(val + addend, me)) {
*loc64 = get_fdesc(me, val+addend);
pr_debug("FDESC for %s at %llx points to %llx\n",
strtab + sym->st_name, *loc64,
((Elf_Fdesc *)*loc64)->addr);
} else {
/* if the symbol is not local to this
* module then val+addend is a pointer
* to the function descriptor */
pr_debug("Non local FPTR64 Symbol %s loc %p val %llx\n",
strtab + sym->st_name,
loc, val);
*loc64 = val + addend;
}
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %Lu\n",
me->name, ELF64_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
#endif
static void
register_unwind_table(struct module *me,
const Elf_Shdr *sechdrs)
{
unsigned char *table, *end;
unsigned long gp;
if (!me->arch.unwind_section)
return;
table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
end = table + sechdrs[me->arch.unwind_section].sh_size;
gp = (Elf_Addr)me->mem[MOD_TEXT].base + me->arch.got_offset;
pr_debug("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
me->arch.unwind_section, table, end, gp);
me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end);
}
static void
deregister_unwind_table(struct module *me)
{
if (me->arch.unwind)
unwind_table_remove(me->arch.unwind);
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
int i;
unsigned long nsyms;
const char *strtab = NULL;
const Elf_Shdr *s;
char *secstrings;
int symindex __maybe_unused = -1;
Elf_Sym *newptr, *oldptr;
Elf_Shdr *symhdr = NULL;
#ifdef DEBUG
Elf_Fdesc *entry;
u32 *addr;
entry = (Elf_Fdesc *)me->init;
printk("FINALIZE, ->init FPTR is %p, GP %lx ADDR %lx\n", entry,
entry->gp, entry->addr);
addr = (u32 *)entry->addr;
printk("INSNS: %x %x %x %x\n",
addr[0], addr[1], addr[2], addr[3]);
printk("got entries used %ld, gots max %ld\n"
"fdescs used %ld, fdescs max %ld\n",
me->arch.got_count, me->arch.got_max,
me->arch.fdesc_count, me->arch.fdesc_max);
#endif
register_unwind_table(me, sechdrs);
/* haven't filled in me->symtab yet, so have to find it
* ourselves */
for (i = 1; i < hdr->e_shnum; i++) {
if(sechdrs[i].sh_type == SHT_SYMTAB
&& (sechdrs[i].sh_flags & SHF_ALLOC)) {
int strindex = sechdrs[i].sh_link;
symindex = i;
/* FIXME: AWFUL HACK
* The cast is to drop the const from
* the sechdrs pointer */
symhdr = (Elf_Shdr *)&sechdrs[i];
strtab = (char *)sechdrs[strindex].sh_addr;
break;
}
}
pr_debug("module %s: strtab %p, symhdr %p\n",
me->name, strtab, symhdr);
if(me->arch.got_count > MAX_GOTS) {
printk(KERN_ERR "%s: Global Offset Table overflow (used %ld, allowed %d)\n",
me->name, me->arch.got_count, MAX_GOTS);
return -EINVAL;
}
kfree(me->arch.section);
me->arch.section = NULL;
/* no symbol table */
if(symhdr == NULL)
return 0;
oldptr = (void *)symhdr->sh_addr;
newptr = oldptr + 1; /* we start counting at 1 */
nsyms = symhdr->sh_size / sizeof(Elf_Sym);
pr_debug("OLD num_symtab %lu\n", nsyms);
for (i = 1; i < nsyms; i++) {
oldptr++; /* note, count starts at 1 so preincrement */
if(strncmp(strtab + oldptr->st_name,
".L", 2) == 0)
continue;
if(newptr != oldptr)
*newptr++ = *oldptr;
else
newptr++;
}
nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
pr_debug("NEW num_symtab %lu\n", nsyms);
symhdr->sh_size = nsyms * sizeof(Elf_Sym);
/* find .altinstructions section */
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
void *aseg = (void *) s->sh_addr;
char *secname = secstrings + s->sh_name;
if (!strcmp(".altinstructions", secname))
/* patch .altinstructions */
apply_alternatives(aseg, aseg + s->sh_size, me->name);
#ifdef CONFIG_DYNAMIC_FTRACE
/* For 32 bit kernels we're compiling modules with
* -ffunction-sections so we must relocate the addresses in the
* ftrace callsite section.
*/
if (symindex != -1 && !strcmp(secname, FTRACE_CALLSITE_SECTION)) {
int err;
if (s->sh_type == SHT_REL)
err = apply_relocate((Elf_Shdr *)sechdrs,
strtab, symindex,
s - sechdrs, me);
else if (s->sh_type == SHT_RELA)
err = apply_relocate_add((Elf_Shdr *)sechdrs,
strtab, symindex,
s - sechdrs, me);
if (err)
return err;
}
#endif
}
return 0;
}
void module_arch_cleanup(struct module *mod)
{
deregister_unwind_table(mod);
}
#ifdef CONFIG_64BIT
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
{
unsigned long start_opd = (Elf64_Addr)mod->mem[MOD_TEXT].base +
mod->arch.fdesc_offset;
unsigned long end_opd = start_opd +
mod->arch.fdesc_count * sizeof(Elf64_Fdesc);
if (ptr < (void *)start_opd || ptr >= (void *)end_opd)
return ptr;
return dereference_function_descriptor(ptr);
}
#endif
| linux-master | arch/parisc/kernel/module.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* interfaces to Chassis Codes via PDC (firmware)
*
* Copyright (C) 2002 Laurent Canet <[email protected]>
* Copyright (C) 2002-2006 Thibaut VARENE <[email protected]>
*
* TODO: poll chassis warns, trigger (configurable) machine shutdown when
* needed.
* Find out how to get Chassis warnings out of PAT boxes?
*/
#undef PDC_CHASSIS_DEBUG
#ifdef PDC_CHASSIS_DEBUG
#define DPRINTK(fmt, args...) printk(fmt, ## args)
#else
#define DPRINTK(fmt, args...)
#endif
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <linux/cache.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/pdc_chassis.h>
#include <asm/processor.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/led.h>
#define PDC_CHASSIS_VER "0.05"
#ifdef CONFIG_PDC_CHASSIS
static unsigned int pdc_chassis_enabled __read_mostly = 1;
/**
* pdc_chassis_setup() - Enable/disable pdc_chassis code at boot time.
* @str: configuration param: 0 to disable chassis log
* @return 1
*/
static int __init pdc_chassis_setup(char *str)
{
/*panic_timeout = simple_strtoul(str, NULL, 0);*/
get_option(&str, &pdc_chassis_enabled);
return 1;
}
__setup("pdcchassis=", pdc_chassis_setup);
/**
* pdc_chassis_checkold() - Checks for old PDC_CHASSIS compatibility
*
* Currently, only E class and A180 are known to work with this.
* Inspired by Christoph Plattner
*/
#if 0
static void __init pdc_chassis_checkold(void)
{
switch(CPU_HVERSION) {
case 0x480: /* E25 */
case 0x481: /* E35 */
case 0x482: /* E45 */
case 0x483: /* E55 */
case 0x516: /* A180 */
break;
default:
break;
}
DPRINTK(KERN_DEBUG "%s: pdc_chassis_checkold(); pdc_chassis_old = %d\n", __FILE__, pdc_chassis_old);
}
#endif
/**
* pdc_chassis_panic_event() - Called by the panic handler.
* @this: unused
* @event: unused
* @ptr: unused
*
* As soon as a panic occurs, we should inform the PDC.
*/
static int pdc_chassis_panic_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
return NOTIFY_DONE;
}
static struct notifier_block pdc_chassis_panic_block = {
.notifier_call = pdc_chassis_panic_event,
.priority = INT_MAX,
};
/**
* pdc_chassis_reboot_event() - Called by the reboot handler.
* @this: unused
* @event: unused
* @ptr: unused
*
* As soon as a reboot occurs, we should inform the PDC.
*/
static int pdc_chassis_reboot_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
return NOTIFY_DONE;
}
static struct notifier_block pdc_chassis_reboot_block = {
.notifier_call = pdc_chassis_reboot_event,
.priority = INT_MAX,
};
#endif /* CONFIG_PDC_CHASSIS */
/**
* parisc_pdc_chassis_init() - Called at boot time.
*/
void __init parisc_pdc_chassis_init(void)
{
#ifdef CONFIG_PDC_CHASSIS
if (likely(pdc_chassis_enabled)) {
DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__);
/* Let see if we have something to handle... */
printk(KERN_INFO "Enabling %s chassis codes support v%s\n",
is_pdc_pat() ? "PDC_PAT" : "regular",
PDC_CHASSIS_VER);
/* initialize panic notifier chain */
atomic_notifier_chain_register(&panic_notifier_list,
&pdc_chassis_panic_block);
/* initialize reboot notifier chain */
register_reboot_notifier(&pdc_chassis_reboot_block);
}
#endif /* CONFIG_PDC_CHASSIS */
}
/**
* pdc_chassis_send_status() - Sends a predefined message to the chassis,
* and changes the front panel LEDs according to the new system state
* @message: Type of message, one of PDC_CHASSIS_DIRECT_* values.
*
* Only machines with 64 bits PDC PAT and those reported in
* pdc_chassis_checkold() are supported atm.
*
* returns 0 if no error, -1 if no supported PDC is present or invalid message,
* else returns the appropriate PDC error code.
*
* For a list of predefined messages, see asm-parisc/pdc_chassis.h
*/
int pdc_chassis_send_status(int message)
{
/* Maybe we should do that in an other way ? */
int retval = 0;
#ifdef CONFIG_PDC_CHASSIS
if (likely(pdc_chassis_enabled)) {
DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message);
#ifdef CONFIG_64BIT
if (is_pdc_pat()) {
switch(message) {
case PDC_CHASSIS_DIRECT_BSTART:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BSTART, PDC_CHASSIS_LSTATE_RUN_NORMAL);
break;
case PDC_CHASSIS_DIRECT_BCOMPLETE:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BCOMPLETE, PDC_CHASSIS_LSTATE_RUN_NORMAL);
break;
case PDC_CHASSIS_DIRECT_SHUTDOWN:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_SHUTDOWN, PDC_CHASSIS_LSTATE_NONOS);
break;
case PDC_CHASSIS_DIRECT_PANIC:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_PANIC, PDC_CHASSIS_LSTATE_RUN_CRASHREC);
break;
case PDC_CHASSIS_DIRECT_LPMC:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_LPMC, PDC_CHASSIS_LSTATE_RUN_SYSINT);
break;
case PDC_CHASSIS_DIRECT_HPMC:
retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_HPMC, PDC_CHASSIS_LSTATE_RUN_NCRIT);
break;
default:
retval = -1;
}
} else retval = -1;
#else
if (1) {
switch (message) {
case PDC_CHASSIS_DIRECT_BSTART:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_INIT));
break;
case PDC_CHASSIS_DIRECT_BCOMPLETE:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_RUN));
break;
case PDC_CHASSIS_DIRECT_SHUTDOWN:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_SHUT));
break;
case PDC_CHASSIS_DIRECT_HPMC:
case PDC_CHASSIS_DIRECT_PANIC:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_FLT));
break;
case PDC_CHASSIS_DIRECT_LPMC:
retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_WARN));
break;
default:
retval = -1;
}
} else retval = -1;
#endif /* CONFIG_64BIT */
} /* if (pdc_chassis_enabled) */
/* if system has LCD display, update current string */
if (retval != -1 && IS_ENABLED(CONFIG_CHASSIS_LCD_LED))
lcd_print(NULL);
#endif /* CONFIG_PDC_CHASSIS */
return retval;
}
#ifdef CONFIG_PDC_CHASSIS_WARN
#ifdef CONFIG_PROC_FS
static int pdc_chassis_warn_show(struct seq_file *m, void *v)
{
unsigned long warn;
u32 warnreg;
if (pdc_chassis_warn(&warn) != PDC_OK)
return -EIO;
warnreg = (warn & 0xFFFFFFFF);
if ((warnreg >> 24) & 0xFF)
seq_printf(m, "Chassis component failure! (eg fan or PSU): 0x%.2x\n",
(warnreg >> 24) & 0xFF);
seq_printf(m, "Battery: %s\n", (warnreg & 0x04) ? "Low!" : "OK");
seq_printf(m, "Temp low: %s\n", (warnreg & 0x02) ? "Exceeded!" : "OK");
seq_printf(m, "Temp mid: %s\n", (warnreg & 0x01) ? "Exceeded!" : "OK");
return 0;
}
static int __init pdc_chassis_create_procfs(void)
{
unsigned long test;
int ret;
ret = pdc_chassis_warn(&test);
if ((ret == PDC_BAD_PROC) || (ret == PDC_BAD_OPTION)) {
/* seems that some boxes (eg L1000) do not implement this */
printk(KERN_INFO "Chassis warnings not supported.\n");
return 0;
}
printk(KERN_INFO "Enabling PDC chassis warnings support v%s\n",
PDC_CHASSIS_VER);
proc_create_single("chassis", 0400, NULL, pdc_chassis_warn_show);
return 0;
}
__initcall(pdc_chassis_create_procfs);
#endif /* CONFIG_PROC_FS */
#endif /* CONFIG_PDC_CHASSIS_WARN */
| linux-master | arch/parisc/kernel/pdc_chassis.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Load ELF vmlinux file for the kexec_file_load syscall.
*
* Copyright (c) 2019 Sven Schnelle <[email protected]>
*
*/
#include <linux/elf.h>
#include <linux/kexec.h>
#include <linux/libfdt.h>
#include <linux/module.h>
#include <linux/of_fdt.h>
#include <linux/slab.h>
#include <linux/types.h>
static void *elf_load(struct kimage *image, char *kernel_buf,
unsigned long kernel_len, char *initrd,
unsigned long initrd_len, char *cmdline,
unsigned long cmdline_len)
{
int ret, i;
unsigned long kernel_load_addr;
struct elfhdr ehdr;
struct kexec_elf_info elf_info;
struct kexec_buf kbuf = { .image = image, .buf_min = 0,
.buf_max = -1UL, };
ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info);
if (ret)
goto out;
ret = kexec_elf_load(image, &ehdr, &elf_info, &kbuf, &kernel_load_addr);
if (ret)
goto out;
image->start = __pa(elf_info.ehdr->e_entry);
for (i = 0; i < image->nr_segments; i++)
image->segment[i].mem = __pa(image->segment[i].mem);
pr_debug("Loaded the kernel at 0x%lx, entry at 0x%lx\n",
kernel_load_addr, image->start);
if (initrd != NULL) {
kbuf.buffer = initrd;
kbuf.bufsz = kbuf.memsz = initrd_len;
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = false;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out;
pr_debug("Loaded initrd at 0x%lx\n", kbuf.mem);
image->arch.initrd_start = kbuf.mem;
image->arch.initrd_end = kbuf.mem + initrd_len;
}
if (cmdline != NULL) {
kbuf.buffer = cmdline;
kbuf.bufsz = kbuf.memsz = ALIGN(cmdline_len, 8);
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = false;
kbuf.buf_min = PAGE0->mem_free + PAGE_SIZE;
kbuf.buf_max = kernel_load_addr;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
goto out;
pr_debug("Loaded cmdline at 0x%lx\n", kbuf.mem);
image->arch.cmdline = kbuf.mem;
}
out:
return NULL;
}
const struct kexec_file_ops kexec_elf_ops = {
.probe = kexec_elf_probe,
.load = elf_load,
};
const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_elf_ops,
NULL
};
| linux-master | arch/parisc/kernel/kexec_file.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 Helge Deller <[email protected]>
*
* based on arch/s390/kernel/vdso.c which is
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky ([email protected])
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/elf.h>
#include <linux/timekeeper_internal.h>
#include <linux/compat.h>
#include <linux/nsproxy.h>
#include <linux/time_namespace.h>
#include <linux/random.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/vdso.h>
#include <asm/cacheflush.h>
extern char vdso32_start, vdso32_end;
extern char vdso64_start, vdso64_end;
static int vdso_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *vma)
{
current->mm->context.vdso_base = vma->vm_start;
return 0;
}
#ifdef CONFIG_64BIT
static struct vm_special_mapping vdso64_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
};
#endif
static struct vm_special_mapping vdso32_mapping = {
.name = "[vdso]",
.mremap = vdso_mremap,
};
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack)
{
unsigned long vdso_text_start, vdso_text_len, map_base;
struct vm_special_mapping *vdso_mapping;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int rc;
if (mmap_write_lock_killable(mm))
return -EINTR;
#ifdef CONFIG_64BIT
if (!is_compat_task()) {
vdso_text_len = &vdso64_end - &vdso64_start;
vdso_mapping = &vdso64_mapping;
} else
#endif
{
vdso_text_len = &vdso32_end - &vdso32_start;
vdso_mapping = &vdso32_mapping;
}
map_base = mm->mmap_base;
if (current->flags & PF_RANDOMIZE)
map_base -= get_random_u32_below(0x20) * PAGE_SIZE;
vdso_text_start = get_unmapped_area(NULL, map_base, vdso_text_len, 0, 0);
/* VM_MAYWRITE for COW so gdb can set breakpoints */
vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_mapping);
if (IS_ERR(vma)) {
do_munmap(mm, vdso_text_start, PAGE_SIZE, NULL);
rc = PTR_ERR(vma);
} else {
current->mm->context.vdso_base = vdso_text_start;
rc = 0;
}
mmap_write_unlock(mm);
return rc;
}
static struct page ** __init vdso_setup_pages(void *start, void *end)
{
int pages = (end - start) >> PAGE_SHIFT;
struct page **pagelist;
int i;
pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
if (!pagelist)
panic("%s: Cannot allocate page list for VDSO", __func__);
for (i = 0; i < pages; i++)
pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
return pagelist;
}
static int __init vdso_init(void)
{
#ifdef CONFIG_64BIT
vdso64_mapping.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
#endif
if (IS_ENABLED(CONFIG_COMPAT) || !IS_ENABLED(CONFIG_64BIT))
vdso32_mapping.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
return 0;
}
arch_initcall(vdso_init);
| linux-master | arch/parisc/kernel/vdso.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Alternative live-patching for parisc.
* Copyright (C) 2018 Helge Deller <[email protected]>
*
*/
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <linux/module.h>
static int no_alternatives;
static int __init setup_no_alternatives(char *str)
{
no_alternatives = 1;
return 1;
}
__setup("no-alternatives", setup_no_alternatives);
void __init_or_module apply_alternatives(struct alt_instr *start,
struct alt_instr *end, const char *module_name)
{
struct alt_instr *entry;
int index = 0, applied = 0;
int num_cpus = num_present_cpus();
u16 cond_check;
cond_check = ALT_COND_ALWAYS |
((num_cpus == 1) ? ALT_COND_NO_SMP : 0) |
((cache_info.dc_size == 0) ? ALT_COND_NO_DCACHE : 0) |
((cache_info.ic_size == 0) ? ALT_COND_NO_ICACHE : 0) |
(running_on_qemu ? ALT_COND_RUN_ON_QEMU : 0) |
((split_tlb == 0) ? ALT_COND_NO_SPLIT_TLB : 0) |
/*
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
* set (bit #61, big endian), we have to flush and sync every
* time IO-PDIR is changed in Ike/Astro.
*/
(((boot_cpu_data.cpu_type > pcxw_) &&
((boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) == 0))
? ALT_COND_NO_IOC_FDC : 0);
for (entry = start; entry < end; entry++, index++) {
u32 *from, replacement;
u16 cond;
s16 len;
from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset);
len = entry->len;
cond = entry->cond;
replacement = entry->replacement;
WARN_ON(!cond);
if ((cond & ALT_COND_ALWAYS) == 0 && no_alternatives)
continue;
pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
index, cond, len, from, replacement);
/* Bounce out if none of the conditions are true. */
if ((cond & cond_check) == 0)
continue;
/* Want to replace pdtlb by a pdtlb,l instruction? */
if (replacement == INSN_PxTLB) {
replacement = *from;
if (boot_cpu_data.cpu_type >= pcxu) /* >= pa2.0 ? */
replacement |= (1 << 10); /* set el bit */
}
/*
* Replace instruction with NOPs?
* For long distance insert a branch instruction instead.
*/
if (replacement == INSN_NOP && len > 1)
replacement = 0xe8000002 + (len-2)*8; /* "b,n .+8" */
pr_debug("ALTERNATIVE %3d: Cond %2x, Replace %2d instructions to 0x%08x @ 0x%px (%pS)\n",
index, cond, len, replacement, from, from);
if (len < 0) {
/* Replace multiple instruction by new code */
u32 *source;
len = -len;
source = (u32 *)((ulong)&entry->replacement + entry->replacement);
memcpy(from, source, 4 * len);
} else {
/* Replace by one instruction */
*from = replacement;
}
applied++;
}
pr_info("%s%salternatives: applied %d out of %d patches\n",
module_name ? : "", module_name ? " " : "",
applied, index);
}
void __init apply_alternatives_all(void)
{
set_kernel_text_rw(1);
apply_alternatives((struct alt_instr *) &__alt_instructions,
(struct alt_instr *) &__alt_instructions_end, NULL);
if (cache_info.dc_size == 0 && cache_info.ic_size == 0) {
pr_info("alternatives: optimizing cache-flushes.\n");
static_branch_disable(&parisc_has_cache);
}
if (cache_info.dc_size == 0)
static_branch_disable(&parisc_has_dcache);
if (cache_info.ic_size == 0)
static_branch_disable(&parisc_has_icache);
set_kernel_text_rw(0);
}
| linux-master | arch/parisc/kernel/alternative.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Kernel unwinding support
*
* (c) 2002-2004 Randolph Chung <[email protected]>
*
* Derived partially from the IA64 implementation. The PA-RISC
* Runtime Architecture Document is also a useful reference to
* understand what is happening here
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/sched/task_stack.h>
#include <linux/uaccess.h>
#include <asm/assembly.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/unwind.h>
#include <asm/switch_to.h>
#include <asm/sections.h>
#include <asm/ftrace.h>
/* #define DEBUG 1 */
#ifdef DEBUG
#define dbg(x...) pr_debug(x)
#else
#define dbg(x...) do { } while (0)
#endif
#define KERNEL_START (KERNEL_BINARY_TEXT_START)
extern struct unwind_table_entry __start___unwind[];
extern struct unwind_table_entry __stop___unwind[];
static DEFINE_SPINLOCK(unwind_lock);
/*
* the kernel unwind block is not dynamically allocated so that
* we can call unwind_init as early in the bootup process as
* possible (before the slab allocator is initialized)
*/
static struct unwind_table kernel_unwind_table __ro_after_init;
static LIST_HEAD(unwind_tables);
static inline const struct unwind_table_entry *
find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
{
const struct unwind_table_entry *e = NULL;
unsigned long lo, hi, mid;
lo = 0;
hi = table->length - 1;
while (lo <= hi) {
mid = (hi - lo) / 2 + lo;
e = &table->table[mid];
if (addr < e->region_start)
hi = mid - 1;
else if (addr > e->region_end)
lo = mid + 1;
else
return e;
}
return NULL;
}
static const struct unwind_table_entry *
find_unwind_entry(unsigned long addr)
{
struct unwind_table *table;
const struct unwind_table_entry *e = NULL;
if (addr >= kernel_unwind_table.start &&
addr <= kernel_unwind_table.end)
e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
else {
unsigned long flags;
spin_lock_irqsave(&unwind_lock, flags);
list_for_each_entry(table, &unwind_tables, list) {
if (addr >= table->start &&
addr <= table->end)
e = find_unwind_entry_in_table(table, addr);
if (e) {
/* Move-to-front to exploit common traces */
list_move(&table->list, &unwind_tables);
break;
}
}
spin_unlock_irqrestore(&unwind_lock, flags);
}
return e;
}
static void
unwind_table_init(struct unwind_table *table, const char *name,
unsigned long base_addr, unsigned long gp,
void *table_start, void *table_end)
{
struct unwind_table_entry *start = table_start;
struct unwind_table_entry *end =
(struct unwind_table_entry *)table_end - 1;
table->name = name;
table->base_addr = base_addr;
table->gp = gp;
table->start = base_addr + start->region_start;
table->end = base_addr + end->region_end;
table->table = (struct unwind_table_entry *)table_start;
table->length = end - start + 1;
INIT_LIST_HEAD(&table->list);
for (; start <= end; start++) {
if (start < end &&
start->region_end > (start+1)->region_start) {
pr_warn("Out of order unwind entry! %px and %px\n",
start, start+1);
}
start->region_start += base_addr;
start->region_end += base_addr;
}
}
static int cmp_unwind_table_entry(const void *a, const void *b)
{
return ((const struct unwind_table_entry *)a)->region_start
- ((const struct unwind_table_entry *)b)->region_start;
}
static void
unwind_table_sort(struct unwind_table_entry *start,
struct unwind_table_entry *finish)
{
sort(start, finish - start, sizeof(struct unwind_table_entry),
cmp_unwind_table_entry, NULL);
}
struct unwind_table *
unwind_table_add(const char *name, unsigned long base_addr,
unsigned long gp,
void *start, void *end)
{
struct unwind_table *table;
unsigned long flags;
struct unwind_table_entry *s = (struct unwind_table_entry *)start;
struct unwind_table_entry *e = (struct unwind_table_entry *)end;
unwind_table_sort(s, e);
table = kmalloc(sizeof(struct unwind_table), GFP_USER);
if (table == NULL)
return NULL;
unwind_table_init(table, name, base_addr, gp, start, end);
spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&table->list, &unwind_tables);
spin_unlock_irqrestore(&unwind_lock, flags);
return table;
}
void unwind_table_remove(struct unwind_table *table)
{
unsigned long flags;
spin_lock_irqsave(&unwind_lock, flags);
list_del(&table->list);
spin_unlock_irqrestore(&unwind_lock, flags);
kfree(table);
}
/* Called from setup_arch to import the kernel unwind info */
int __init unwind_init(void)
{
long start __maybe_unused, stop __maybe_unused;
register unsigned long gp __asm__ ("r27");
start = (long)&__start___unwind[0];
stop = (long)&__stop___unwind[0];
dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
start, stop,
(stop - start) / sizeof(struct unwind_table_entry));
unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
gp,
&__start___unwind[0], &__stop___unwind[0]);
#if 0
{
int i;
for (i = 0; i < 10; i++)
{
printk("region 0x%x-0x%x\n",
__start___unwind[i].region_start,
__start___unwind[i].region_end);
}
}
#endif
return 0;
}
static bool pc_is_kernel_fn(unsigned long pc, void *fn)
{
return (unsigned long)dereference_kernel_function_descriptor(fn) == pc;
}
static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
{
/*
* We have to use void * instead of a function pointer, because
* function pointers aren't a pointer to the function on 64-bit.
* Make them const so the compiler knows they live in .text
* Note: We could use dereference_kernel_function_descriptor()
* instead but we want to keep it simple here.
*/
extern void * const ret_from_kernel_thread;
extern void * const syscall_exit;
extern void * const intr_return;
extern void * const _switch_to_ret;
#ifdef CONFIG_IRQSTACKS
extern void * const _call_on_stack;
#endif /* CONFIG_IRQSTACKS */
void *ptr;
ptr = dereference_kernel_function_descriptor(&handle_interruption);
if (pc_is_kernel_fn(pc, ptr)) {
struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
dbg("Unwinding through handle_interruption()\n");
info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0];
return 1;
}
if (pc_is_kernel_fn(pc, ret_from_kernel_thread) ||
pc_is_kernel_fn(pc, syscall_exit)) {
info->prev_sp = info->prev_ip = 0;
return 1;
}
if (pc_is_kernel_fn(pc, intr_return)) {
struct pt_regs *regs;
dbg("Found intr_return()\n");
regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
info->prev_sp = regs->gr[30];
info->prev_ip = regs->iaoq[0];
info->rp = regs->gr[2];
return 1;
}
if (pc_is_kernel_fn(pc, _switch_to) ||
pc_is_kernel_fn(pc, _switch_to_ret)) {
info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
return 1;
}
#ifdef CONFIG_IRQSTACKS
if (pc_is_kernel_fn(pc, _call_on_stack)) {
info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
return 1;
}
#endif
return 0;
}
static void unwind_frame_regs(struct unwind_frame_info *info)
{
const struct unwind_table_entry *e;
unsigned long npc;
unsigned int insn;
long frame_size = 0;
int looking_for_rp, rpoffset = 0;
e = find_unwind_entry(info->ip);
if (e == NULL) {
unsigned long sp;
dbg("Cannot find unwind entry for %pS; forced unwinding\n",
(void *) info->ip);
/* Since we are doing the unwinding blind, we don't know if
we are adjusting the stack correctly or extracting the rp
correctly. The rp is checked to see if it belongs to the
kernel text section, if not we assume we don't have a
correct stack frame and we continue to unwind the stack.
This is not quite correct, and will fail for loadable
modules. */
sp = info->sp & ~63;
do {
unsigned long tmp;
info->prev_sp = sp - 64;
info->prev_ip = 0;
/* Check if stack is inside kernel stack area */
if ((info->prev_sp - (unsigned long) task_stack_page(info->t))
>= THREAD_SIZE) {
info->prev_sp = 0;
break;
}
if (copy_from_kernel_nofault(&tmp,
(void *)info->prev_sp - RP_OFFSET, sizeof(tmp)))
break;
info->prev_ip = tmp;
sp = info->prev_sp;
} while (!kernel_text_address(info->prev_ip));
info->rp = 0;
dbg("analyzing func @ %lx with no unwind info, setting "
"prev_sp=%lx prev_ip=%lx\n", info->ip,
info->prev_sp, info->prev_ip);
} else {
dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
"Save_RP = %d, Millicode = %d size = %u\n",
e->region_start, e->region_end, e->Save_SP, e->Save_RP,
e->Millicode, e->Total_frame_size);
looking_for_rp = e->Save_RP;
for (npc = e->region_start;
(frame_size < (e->Total_frame_size << 3) ||
looking_for_rp) &&
npc < info->ip;
npc += 4) {
insn = *(unsigned int *)npc;
if ((insn & 0xffffc001) == 0x37de0000 ||
(insn & 0xffe00001) == 0x6fc00000) {
/* ldo X(sp), sp, or stwm X,D(sp) */
frame_size += (insn & 0x3fff) >> 1;
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
} else if ((insn & 0xffe00009) == 0x73c00008) {
/* std,ma X,D(sp) */
frame_size += ((insn >> 4) & 0x3ff) << 3;
dbg("analyzing func @ %lx, insn=%08x @ "
"%lx, frame_size = %ld\n", info->ip,
insn, npc, frame_size);
} else if (insn == 0x6bc23fd9) {
/* stw rp,-20(sp) */
rpoffset = 20;
looking_for_rp = 0;
dbg("analyzing func @ %lx, insn=stw rp,"
"-20(sp) @ %lx\n", info->ip, npc);
} else if (insn == 0x0fc212c1) {
/* std rp,-16(sr0,sp) */
rpoffset = 16;
looking_for_rp = 0;
dbg("analyzing func @ %lx, insn=std rp,"
"-16(sp) @ %lx\n", info->ip, npc);
}
}
if (frame_size > e->Total_frame_size << 3)
frame_size = e->Total_frame_size << 3;
if (!unwind_special(info, e->region_start, frame_size)) {
info->prev_sp = info->sp - frame_size;
if (e->Millicode)
info->rp = info->r31;
else if (rpoffset)
info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
info->prev_ip = info->rp;
info->rp = 0;
}
dbg("analyzing func @ %lx, setting prev_sp=%lx "
"prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
info->prev_ip, npc);
}
}
void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
struct pt_regs *regs)
{
memset(info, 0, sizeof(struct unwind_frame_info));
info->t = t;
info->sp = regs->gr[30];
info->ip = regs->iaoq[0];
info->rp = regs->gr[2];
info->r31 = regs->gr[31];
dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
t ? (int)t->pid : -1, info->sp, info->ip);
}
void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
{
struct pt_regs *r = &t->thread.regs;
struct pt_regs *r2;
r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
if (!r2)
return;
*r2 = *r;
r2->gr[30] = r->ksp;
r2->iaoq[0] = r->kpc;
unwind_frame_init(info, t, r2);
kfree(r2);
}
#define get_parisc_stackpointer() ({ \
unsigned long sp; \
__asm__("copy %%r30, %0" : "=r"(sp)); \
(sp); \
})
void unwind_frame_init_task(struct unwind_frame_info *info,
struct task_struct *task, struct pt_regs *regs)
{
task = task ? task : current;
if (task == current) {
struct pt_regs r;
if (!regs) {
memset(&r, 0, sizeof(r));
r.iaoq[0] = _THIS_IP_;
r.gr[2] = _RET_IP_;
r.gr[30] = get_parisc_stackpointer();
regs = &r;
}
unwind_frame_init(info, task, regs);
} else {
unwind_frame_init_from_blocked_task(info, task);
}
}
int unwind_once(struct unwind_frame_info *next_frame)
{
unwind_frame_regs(next_frame);
if (next_frame->prev_sp == 0 ||
next_frame->prev_ip == 0)
return -1;
next_frame->sp = next_frame->prev_sp;
next_frame->ip = next_frame->prev_ip;
next_frame->prev_sp = 0;
next_frame->prev_ip = 0;
dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
next_frame->t ? (int)next_frame->t->pid : -1,
next_frame->sp, next_frame->ip);
return 0;
}
int unwind_to_user(struct unwind_frame_info *info)
{
int ret;
do {
ret = unwind_once(info);
} while (!ret && !(info->ip & 3));
return ret;
}
unsigned long return_address(unsigned int level)
{
struct unwind_frame_info info;
/* initialize unwind info */
unwind_frame_init_task(&info, current, NULL);
/* unwind stack */
level += 2;
do {
if (unwind_once(&info) < 0 || info.ip == 0)
return 0;
if (!kernel_text_address(info.ip))
return 0;
} while (info.ip && level--);
return info.ip;
}
| linux-master | arch/parisc/kernel/unwind.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Initial setup-routines for HP 9000 based hardware.
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for PA-RISC (C) 1999 Helge Deller <[email protected]>
* Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
* Modifications copyright 2000 Martin K. Petersen <[email protected]>
* Modifications copyright 2000 Philipp Rumpf <[email protected]>
* Modifications copyright 2001 Ryan Bradetich <[email protected]>
*
* Initial PA-RISC Version: 04-23-1999 by Helge Deller
*/
#include <linux/kernel.h>
#include <linux/initrd.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/seq_file.h>
#define PCI_DEBUG
#include <linux/pci.h>
#undef PCI_DEBUG
#include <linux/proc_fs.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/start_kernel.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/pdc.h>
#include <asm/led.h>
#include <asm/pdc_chassis.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/unwind.h>
#include <asm/smp.h>
static char __initdata command_line[COMMAND_LINE_SIZE];
static void __init setup_cmdline(char **cmdline_p)
{
extern unsigned int boot_args[];
char *p;
*cmdline_p = command_line;
/* boot_args[0] is free-mem start, boot_args[1] is ptr to command line */
if (boot_args[0] < 64)
return; /* return if called from hpux boot loader */
/* Collect stuff passed in from the boot loader */
strscpy(boot_command_line, (char *)__va(boot_args[1]),
COMMAND_LINE_SIZE);
/* autodetect console type (if not done by palo yet) */
p = boot_command_line;
if (!str_has_prefix(p, "console=") && !strstr(p, " console=")) {
strlcat(p, " console=", COMMAND_LINE_SIZE);
if (PAGE0->mem_cons.cl_class == CL_DUPLEX)
strlcat(p, "ttyS0", COMMAND_LINE_SIZE);
else
strlcat(p, "tty0", COMMAND_LINE_SIZE);
}
/* default to use early console */
if (!strstr(p, "earlycon"))
strlcat(p, " earlycon=pdc", COMMAND_LINE_SIZE);
#ifdef CONFIG_BLK_DEV_INITRD
/* did palo pass us a ramdisk? */
if (boot_args[2] != 0) {
initrd_start = (unsigned long)__va(boot_args[2]);
initrd_end = (unsigned long)__va(boot_args[3]);
}
#endif
strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
}
#ifdef CONFIG_PA11
static void __init dma_ops_init(void)
{
switch (boot_cpu_data.cpu_type) {
case pcx:
/*
* We've got way too many dependencies on 1.1 semantics
* to support 1.0 boxes at this point.
*/
panic( "PA-RISC Linux currently only supports machines that conform to\n"
"the PA-RISC 1.1 or 2.0 architecture specification.\n");
case pcxl2:
default:
break;
}
}
#endif
void __init setup_arch(char **cmdline_p)
{
#ifdef CONFIG_64BIT
extern int parisc_narrow_firmware;
#endif
unwind_init();
init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */
#ifdef CONFIG_64BIT
printk(KERN_INFO "The 64-bit Kernel has started...\n");
#else
printk(KERN_INFO "The 32-bit Kernel has started...\n");
#endif
printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ",
(int)(PAGE_SIZE / 1024));
#ifdef CONFIG_HUGETLB_PAGE
printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size",
1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20));
#else
printk(KERN_CONT "disabled");
#endif
printk(KERN_CONT ".\n");
/*
* Check if initial kernel page mappings are sufficient.
* panic early if not, else we may access kernel functions
* and variables which can't be reached.
*/
if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
panic("KERNEL_INITIAL_ORDER too small!");
#ifdef CONFIG_64BIT
if(parisc_narrow_firmware) {
printk(KERN_INFO "Kernel is using PDC in 32-bit mode.\n");
}
#endif
setup_pdc();
setup_cmdline(cmdline_p);
collect_boot_cpu_data();
do_memory_inventory(); /* probe for physical memory */
parisc_cache_init();
paging_init();
#ifdef CONFIG_PA11
dma_ops_init();
#endif
clear_sched_clock_stable();
}
/*
* Display CPU info for all CPUs.
*/
static void *
c_start (struct seq_file *m, loff_t *pos)
{
/* Looks like the caller will call repeatedly until we return
* 0, signaling EOF perhaps. This could be used to sequence
* through CPUs for example. Since we print all cpu info in our
* show_cpuinfo() disregarding 'pos' (which I assume is 'v' above)
* we only allow for one "position". */
return ((long)*pos < 1) ? (void *)1 : NULL;
}
static void *
c_next (struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void
c_stop (struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo
};
static struct resource central_bus = {
.name = "Central Bus",
.start = F_EXTEND(0xfff80000),
.end = F_EXTEND(0xfffaffff),
.flags = IORESOURCE_MEM,
};
static struct resource local_broadcast = {
.name = "Local Broadcast",
.start = F_EXTEND(0xfffb0000),
.end = F_EXTEND(0xfffdffff),
.flags = IORESOURCE_MEM,
};
static struct resource global_broadcast = {
.name = "Global Broadcast",
.start = F_EXTEND(0xfffe0000),
.end = F_EXTEND(0xffffffff),
.flags = IORESOURCE_MEM,
};
static int __init parisc_init_resources(void)
{
int result;
result = request_resource(&iomem_resource, ¢ral_bus);
if (result < 0) {
printk(KERN_ERR
"%s: failed to claim %s address space!\n",
__FILE__, central_bus.name);
return result;
}
result = request_resource(&iomem_resource, &local_broadcast);
if (result < 0) {
printk(KERN_ERR
"%s: failed to claim %s address space!\n",
__FILE__, local_broadcast.name);
return result;
}
result = request_resource(&iomem_resource, &global_broadcast);
if (result < 0) {
printk(KERN_ERR
"%s: failed to claim %s address space!\n",
__FILE__, global_broadcast.name);
return result;
}
return 0;
}
static int __init parisc_init(void)
{
u32 osid = (OS_ID_LINUX << 16);
parisc_init_resources();
do_device_inventory(); /* probe for hardware */
parisc_pdc_chassis_init();
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BSTART);
/* tell PDC we're Linux. Nevermind failure. */
pdc_stable_write(0x40, &osid, sizeof(osid));
/* start with known state */
flush_cache_all_local();
flush_tlb_all_local(NULL);
processor_init();
#ifdef CONFIG_SMP
pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
num_online_cpus(), num_present_cpus(),
#else
pr_info("CPU(s): 1 x %s at %d.%06d MHz\n",
#endif
boot_cpu_data.cpu_name,
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
/* Don't serialize TLB flushes if we run on one CPU only. */
if (num_online_cpus() == 1)
pa_serialize_tlb_flushes = 0;
#endif
apply_alternatives_all();
parisc_setup_cache_timing();
return 0;
}
arch_initcall(parisc_init);
void __init start_parisc(void)
{
int ret, cpunum;
struct pdc_coproc_cfg coproc_cfg;
/* check QEMU/SeaBIOS marker in PAGE0 */
running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
cpunum = smp_processor_id();
init_cpu_topology();
set_firmware_width_unlocked();
ret = pdc_coproc_cfg_unlocked(&coproc_cfg);
if (ret >= 0 && coproc_cfg.ccr_functional) {
mtctl(coproc_cfg.ccr_functional, 10);
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
asm volatile ("fstd %fr0,8(%sp)");
} else {
panic("must have an fpu to boot linux");
}
early_trap_init(); /* initialize checksum of fault_vector */
start_kernel();
// not reached
}
| linux-master | arch/parisc/kernel/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PARISC specific syscalls
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
* Copyright (C) 1999-2020 Helge Deller <[email protected]>
*/
#include <linux/uaccess.h>
#include <asm/elf.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/shm.h>
#include <linux/syscalls.h>
#include <linux/utsname.h>
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/compat.h>
#include <linux/elf-randomize.h>
/*
* Construct an artificial page offset for the mapping based on the physical
* address of the kernel file mapping variable.
*/
#define GET_FILP_PGOFF(filp) \
(filp ? (((unsigned long) filp->f_mapping) >> 8) \
& ((SHM_COLOUR-1) >> PAGE_SHIFT) : 0UL)
static unsigned long shared_align_offset(unsigned long filp_pgoff,
unsigned long pgoff)
{
return (filp_pgoff + pgoff) << PAGE_SHIFT;
}
static inline unsigned long COLOR_ALIGN(unsigned long addr,
unsigned long filp_pgoff, unsigned long pgoff)
{
unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
unsigned long off = (SHM_COLOUR-1) &
shared_align_offset(filp_pgoff, pgoff);
return base + off;
}
#define STACK_SIZE_DEFAULT (USER_WIDE_MODE \
? (1 << 30) /* 1 GB */ \
: (CONFIG_STACK_MAX_DEFAULT_SIZE_MB*1024*1024))
unsigned long calc_max_stack_size(unsigned long stack_max)
{
#ifdef CONFIG_COMPAT
if (!USER_WIDE_MODE && (stack_max == COMPAT_RLIM_INFINITY))
stack_max = STACK_SIZE_DEFAULT;
else
#endif
if (stack_max == RLIM_INFINITY)
stack_max = STACK_SIZE_DEFAULT;
return stack_max;
}
/*
* Top of mmap area (just below the process stack).
*/
/*
* When called from arch_get_unmapped_area(), rlim_stack will be NULL,
* indicating that "current" should be used instead of a passed-in
* value from the exec bprm as done with arch_pick_mmap_layout().
*/
static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
{
unsigned long stack_base;
/* Limit stack size - see setup_arg_pages() in fs/exec.c */
stack_base = rlim_stack ? rlim_stack->rlim_max
: rlimit_max(RLIMIT_STACK);
stack_base = calc_max_stack_size(stack_base);
/* Add space for stack randomization. */
if (current->flags & PF_RANDOMIZE)
stack_base += (STACK_RND_MASK << PAGE_SHIFT);
return PAGE_ALIGN(STACK_TOP - stack_base);
}
enum mmap_allocation_direction {UP, DOWN};
static unsigned long arch_get_unmapped_area_common(struct file *filp,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags, enum mmap_allocation_direction dir)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
unsigned long filp_pgoff;
int do_color_align;
struct vm_unmapped_area_info info;
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
filp_pgoff = GET_FILP_PGOFF(filp);
if (flags & MAP_FIXED) {
/* Even MAP_FIXED mappings must reside within TASK_SIZE */
if (TASK_SIZE - len < addr)
return -EINVAL;
if ((flags & MAP_SHARED) && filp &&
(addr - shared_align_offset(filp_pgoff, pgoff))
& (SHM_COLOUR - 1))
return -EINVAL;
return addr;
}
if (addr) {
if (do_color_align)
addr = COLOR_ALIGN(addr, filp_pgoff, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
return addr;
}
info.length = len;
info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(filp_pgoff, pgoff);
if (dir == DOWN) {
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
return addr;
VM_BUG_ON(addr != -ENOMEM);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
}
info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = mmap_upper_limit(NULL);
return vm_unmapped_area(&info);
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
return arch_get_unmapped_area_common(filp,
addr, len, pgoff, flags, UP);
}
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
return arch_get_unmapped_area_common(filp,
addr, len, pgoff, flags, DOWN);
}
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long pgoff)
{
/* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
we have. */
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
pgoff >> (PAGE_SHIFT - 12));
}
asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags, unsigned long fd,
unsigned long offset)
{
if (!(offset & ~PAGE_MASK)) {
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
offset >> PAGE_SHIFT);
} else {
return -EINVAL;
}
}
/* Fucking broken ABI */
#ifdef CONFIG_64BIT
asmlinkage long parisc_truncate64(const char __user * path,
unsigned int high, unsigned int low)
{
return ksys_truncate(path, (long)high << 32 | low);
}
asmlinkage long parisc_ftruncate64(unsigned int fd,
unsigned int high, unsigned int low)
{
return ksys_ftruncate(fd, (long)high << 32 | low);
}
/* stubs for the benefit of the syscall_table since truncate64 and truncate
* are identical on LP64 */
asmlinkage long sys_truncate64(const char __user * path, unsigned long length)
{
return ksys_truncate(path, length);
}
asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length)
{
return ksys_ftruncate(fd, length);
}
asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
{
return sys_fcntl(fd, cmd, arg);
}
#else
asmlinkage long parisc_truncate64(const char __user * path,
unsigned int high, unsigned int low)
{
return ksys_truncate(path, (loff_t)high << 32 | low);
}
asmlinkage long parisc_ftruncate64(unsigned int fd,
unsigned int high, unsigned int low)
{
return sys_ftruncate64(fd, (loff_t)high << 32 | low);
}
#endif
asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count,
unsigned int high, unsigned int low)
{
return ksys_pread64(fd, buf, count, (loff_t)high << 32 | low);
}
asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf,
size_t count, unsigned int high, unsigned int low)
{
return ksys_pwrite64(fd, buf, count, (loff_t)high << 32 | low);
}
asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low,
size_t count)
{
return ksys_readahead(fd, (loff_t)high << 32 | low, count);
}
asmlinkage long parisc_fadvise64_64(int fd,
unsigned int high_off, unsigned int low_off,
unsigned int high_len, unsigned int low_len, int advice)
{
return ksys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
(loff_t)high_len << 32 | low_len, advice);
}
asmlinkage long parisc_sync_file_range(int fd,
u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes,
unsigned int flags)
{
return ksys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off,
(loff_t)hi_nbytes << 32 | lo_nbytes, flags);
}
asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
u32 lenhi, u32 lenlo)
{
return ksys_fallocate(fd, mode, ((u64)offhi << 32) | offlo,
((u64)lenhi << 32) | lenlo);
}
asmlinkage long parisc_personality(unsigned long personality)
{
long err;
if (personality(current->personality) == PER_LINUX32
&& personality(personality) == PER_LINUX)
personality = (personality & ~PER_MASK) | PER_LINUX32;
err = sys_personality(personality);
if (personality(err) == PER_LINUX32)
err = (err & ~PER_MASK) | PER_LINUX;
return err;
}
/*
* Up to kernel v5.9 we defined O_NONBLOCK as 000200004,
* since then O_NONBLOCK is defined as 000200000.
*
* The following wrapper functions mask out the old
* O_NDELAY bit from calls which use O_NONBLOCK.
*
* XXX: Remove those in year 2022 (or later)?
*/
#define O_NONBLOCK_OLD 000200004
#define O_NONBLOCK_MASK_OUT (O_NONBLOCK_OLD & ~O_NONBLOCK)
static int FIX_O_NONBLOCK(int flags)
{
if ((flags & O_NONBLOCK_MASK_OUT) &&
!test_thread_flag(TIF_NONBLOCK_WARNING)) {
set_thread_flag(TIF_NONBLOCK_WARNING);
pr_warn("%s(%d) uses a deprecated O_NONBLOCK value."
" Please recompile with newer glibc.\n",
current->comm, current->pid);
}
return flags & ~O_NONBLOCK_MASK_OUT;
}
asmlinkage long parisc_timerfd_create(int clockid, int flags)
{
flags = FIX_O_NONBLOCK(flags);
return sys_timerfd_create(clockid, flags);
}
asmlinkage long parisc_signalfd4(int ufd, sigset_t __user *user_mask,
size_t sizemask, int flags)
{
flags = FIX_O_NONBLOCK(flags);
return sys_signalfd4(ufd, user_mask, sizemask, flags);
}
#ifdef CONFIG_COMPAT
asmlinkage long parisc_compat_signalfd4(int ufd,
compat_sigset_t __user *user_mask,
compat_size_t sizemask, int flags)
{
flags = FIX_O_NONBLOCK(flags);
return compat_sys_signalfd4(ufd, user_mask, sizemask, flags);
}
#endif
asmlinkage long parisc_eventfd2(unsigned int count, int flags)
{
flags = FIX_O_NONBLOCK(flags);
return sys_eventfd2(count, flags);
}
asmlinkage long parisc_userfaultfd(int flags)
{
flags = FIX_O_NONBLOCK(flags);
return sys_userfaultfd(flags);
}
asmlinkage long parisc_pipe2(int __user *fildes, int flags)
{
flags = FIX_O_NONBLOCK(flags);
return sys_pipe2(fildes, flags);
}
asmlinkage long parisc_inotify_init1(int flags)
{
flags = FIX_O_NONBLOCK(flags);
return sys_inotify_init1(flags);
}
/*
* madvise() wrapper
*
* Up to kernel v6.1 parisc has different values than all other
* platforms for the MADV_xxx flags listed below.
* To keep binary compatibility with existing userspace programs
* translate the former values to the new values.
*
* XXX: Remove this wrapper in year 2025 (or later)
*/
asmlinkage notrace long parisc_madvise(unsigned long start, size_t len_in, int behavior)
{
switch (behavior) {
case 65: behavior = MADV_MERGEABLE; break;
case 66: behavior = MADV_UNMERGEABLE; break;
case 67: behavior = MADV_HUGEPAGE; break;
case 68: behavior = MADV_NOHUGEPAGE; break;
case 69: behavior = MADV_DONTDUMP; break;
case 70: behavior = MADV_DODUMP; break;
case 71: behavior = MADV_WIPEONFORK; break;
case 72: behavior = MADV_KEEPONFORK; break;
case 73: behavior = MADV_COLLAPSE; break;
}
return sys_madvise(start, len_in, behavior);
}
| linux-master | arch/parisc/kernel/sys_parisc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Unaligned memory access handler
*
* Copyright (C) 2001 Randolph Chung <[email protected]>
* Copyright (C) 2022 Helge Deller <[email protected]>
* Significantly tweaked by LaMont Jones <[email protected]>
*/
#include <linux/sched/signal.h>
#include <linux/signal.h>
#include <linux/ratelimit.h>
#include <linux/uaccess.h>
#include <linux/sysctl.h>
#include <asm/unaligned.h>
#include <asm/hardirq.h>
#include <asm/traps.h>
/* #define DEBUG_UNALIGNED 1 */
#ifdef DEBUG_UNALIGNED
#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
#else
#define DPRINTF(fmt, args...)
#endif
#define RFMT "%#08lx"
/* 1111 1100 0000 0000 0001 0011 1100 0000 */
#define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6)
#define OPCODE2(a,b) ((a)<<26|(b)<<1)
#define OPCODE3(a,b) ((a)<<26|(b)<<2)
#define OPCODE4(a) ((a)<<26)
#define OPCODE1_MASK OPCODE1(0x3f,1,0xf)
#define OPCODE2_MASK OPCODE2(0x3f,1)
#define OPCODE3_MASK OPCODE3(0x3f,1)
#define OPCODE4_MASK OPCODE4(0x3f)
/* skip LDB - never unaligned (index) */
#define OPCODE_LDH_I OPCODE1(0x03,0,0x1)
#define OPCODE_LDW_I OPCODE1(0x03,0,0x2)
#define OPCODE_LDD_I OPCODE1(0x03,0,0x3)
#define OPCODE_LDDA_I OPCODE1(0x03,0,0x4)
#define OPCODE_LDCD_I OPCODE1(0x03,0,0x5)
#define OPCODE_LDWA_I OPCODE1(0x03,0,0x6)
#define OPCODE_LDCW_I OPCODE1(0x03,0,0x7)
/* skip LDB - never unaligned (short) */
#define OPCODE_LDH_S OPCODE1(0x03,1,0x1)
#define OPCODE_LDW_S OPCODE1(0x03,1,0x2)
#define OPCODE_LDD_S OPCODE1(0x03,1,0x3)
#define OPCODE_LDDA_S OPCODE1(0x03,1,0x4)
#define OPCODE_LDCD_S OPCODE1(0x03,1,0x5)
#define OPCODE_LDWA_S OPCODE1(0x03,1,0x6)
#define OPCODE_LDCW_S OPCODE1(0x03,1,0x7)
/* skip STB - never unaligned */
#define OPCODE_STH OPCODE1(0x03,1,0x9)
#define OPCODE_STW OPCODE1(0x03,1,0xa)
#define OPCODE_STD OPCODE1(0x03,1,0xb)
/* skip STBY - never unaligned */
/* skip STDBY - never unaligned */
#define OPCODE_STWA OPCODE1(0x03,1,0xe)
#define OPCODE_STDA OPCODE1(0x03,1,0xf)
#define OPCODE_FLDWX OPCODE1(0x09,0,0x0)
#define OPCODE_FLDWXR OPCODE1(0x09,0,0x1)
#define OPCODE_FSTWX OPCODE1(0x09,0,0x8)
#define OPCODE_FSTWXR OPCODE1(0x09,0,0x9)
#define OPCODE_FLDWS OPCODE1(0x09,1,0x0)
#define OPCODE_FLDWSR OPCODE1(0x09,1,0x1)
#define OPCODE_FSTWS OPCODE1(0x09,1,0x8)
#define OPCODE_FSTWSR OPCODE1(0x09,1,0x9)
#define OPCODE_FLDDX OPCODE1(0x0b,0,0x0)
#define OPCODE_FSTDX OPCODE1(0x0b,0,0x8)
#define OPCODE_FLDDS OPCODE1(0x0b,1,0x0)
#define OPCODE_FSTDS OPCODE1(0x0b,1,0x8)
#define OPCODE_LDD_L OPCODE2(0x14,0)
#define OPCODE_FLDD_L OPCODE2(0x14,1)
#define OPCODE_STD_L OPCODE2(0x1c,0)
#define OPCODE_FSTD_L OPCODE2(0x1c,1)
#define OPCODE_LDW_M OPCODE3(0x17,1)
#define OPCODE_FLDW_L OPCODE3(0x17,0)
#define OPCODE_FSTW_L OPCODE3(0x1f,0)
#define OPCODE_STW_M OPCODE3(0x1f,1)
#define OPCODE_LDH_L OPCODE4(0x11)
#define OPCODE_LDW_L OPCODE4(0x12)
#define OPCODE_LDWM OPCODE4(0x13)
#define OPCODE_STH_L OPCODE4(0x19)
#define OPCODE_STW_L OPCODE4(0x1A)
#define OPCODE_STWM OPCODE4(0x1B)
#define MAJOR_OP(i) (((i)>>26)&0x3f)
#define R1(i) (((i)>>21)&0x1f)
#define R2(i) (((i)>>16)&0x1f)
#define R3(i) ((i)&0x1f)
#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
#define IM5_2(i) IM((i)>>16,5)
#define IM5_3(i) IM((i),5)
#define IM14(i) IM((i),14)
#define ERR_NOTHANDLED -1
int unaligned_enabled __read_mostly = 1;
static int emulate_ldh(struct pt_regs *regs, int toreg)
{
unsigned long saddr = regs->ior;
unsigned long val = 0, temp1;
ASM_EXCEPTIONTABLE_VAR(ret);
DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n",
regs->isr, regs->ior, toreg);
__asm__ __volatile__ (
" mtsp %4, %%sr1\n"
"1: ldbs 0(%%sr1,%3), %2\n"
"2: ldbs 1(%%sr1,%3), %0\n"
" depw %2, 23, 24, %0\n"
"3: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
: "+r" (val), "+r" (ret), "=&r" (temp1)
: "r" (saddr), "r" (regs->isr) );
DPRINTF("val = " RFMT "\n", val);
if (toreg)
regs->gr[toreg] = val;
return ret;
}
static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
{
unsigned long saddr = regs->ior;
unsigned long val = 0, temp1, temp2;
ASM_EXCEPTIONTABLE_VAR(ret);
DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n",
regs->isr, regs->ior, toreg);
__asm__ __volatile__ (
" zdep %4,28,2,%2\n" /* r19=(ofs&3)*8 */
" mtsp %5, %%sr1\n"
" depw %%r0,31,2,%4\n"
"1: ldw 0(%%sr1,%4),%0\n"
"2: ldw 4(%%sr1,%4),%3\n"
" subi 32,%2,%2\n"
" mtctl %2,11\n"
" vshd %0,%3,%0\n"
"3: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
: "+r" (val), "+r" (ret), "=&r" (temp1), "=&r" (temp2)
: "r" (saddr), "r" (regs->isr) );
DPRINTF("val = " RFMT "\n", val);
if (flop)
((__u32*)(regs->fr))[toreg] = val;
else if (toreg)
regs->gr[toreg] = val;
return ret;
}
static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
{
unsigned long saddr = regs->ior;
__u64 val = 0;
ASM_EXCEPTIONTABLE_VAR(ret);
DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n",
regs->isr, regs->ior, toreg);
if (!IS_ENABLED(CONFIG_64BIT) && !flop)
return ERR_NOTHANDLED;
#ifdef CONFIG_64BIT
__asm__ __volatile__ (
" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */
" mtsp %4, %%sr1\n"
" depd %%r0,63,3,%3\n"
"1: ldd 0(%%sr1,%3),%0\n"
"2: ldd 8(%%sr1,%3),%%r20\n"
" subi 64,%%r19,%%r19\n"
" mtsar %%r19\n"
" shrpd %0,%%r20,%%sar,%0\n"
"3: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
: "=r" (val), "+r" (ret)
: "0" (val), "r" (saddr), "r" (regs->isr)
: "r19", "r20" );
#else
{
unsigned long shift, temp1;
__asm__ __volatile__ (
" zdep %2,29,2,%3\n" /* r19=(ofs&3)*8 */
" mtsp %5, %%sr1\n"
" dep %%r0,31,2,%2\n"
"1: ldw 0(%%sr1,%2),%0\n"
"2: ldw 4(%%sr1,%2),%R0\n"
"3: ldw 8(%%sr1,%2),%4\n"
" subi 32,%3,%3\n"
" mtsar %3\n"
" vshd %0,%R0,%0\n"
" vshd %R0,%4,%R0\n"
"4: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b)
: "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
: "r" (regs->isr) );
}
#endif
DPRINTF("val = 0x%llx\n", val);
if (flop)
regs->fr[toreg] = val;
else if (toreg)
regs->gr[toreg] = val;
return ret;
}
static int emulate_sth(struct pt_regs *regs, int frreg)
{
unsigned long val = regs->gr[frreg], temp1;
ASM_EXCEPTIONTABLE_VAR(ret);
if (!frreg)
val = 0;
DPRINTF("store r%d (" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg,
val, regs->isr, regs->ior);
__asm__ __volatile__ (
" mtsp %4, %%sr1\n"
" extrw,u %2, 23, 8, %1\n"
"1: stb %1, 0(%%sr1, %3)\n"
"2: stb %2, 1(%%sr1, %3)\n"
"3: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
: "+r" (ret), "=&r" (temp1)
: "r" (val), "r" (regs->ior), "r" (regs->isr) );
return ret;
}
static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
{
unsigned long val;
ASM_EXCEPTIONTABLE_VAR(ret);
if (flop)
val = ((__u32*)(regs->fr))[frreg];
else if (frreg)
val = regs->gr[frreg];
else
val = 0;
DPRINTF("store r%d (" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg,
val, regs->isr, regs->ior);
__asm__ __volatile__ (
" mtsp %3, %%sr1\n"
" zdep %2, 28, 2, %%r19\n"
" dep %%r0, 31, 2, %2\n"
" mtsar %%r19\n"
" depwi,z -2, %%sar, 32, %%r19\n"
"1: ldw 0(%%sr1,%2),%%r20\n"
"2: ldw 4(%%sr1,%2),%%r21\n"
" vshd %%r0, %1, %%r22\n"
" vshd %1, %%r0, %%r1\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %%r22, %%r20, %%r20\n"
" or %%r1, %%r21, %%r21\n"
" stw %%r20,0(%%sr1,%2)\n"
" stw %%r21,4(%%sr1,%2)\n"
"3: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b)
: "+r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1" );
return ret;
}
static int emulate_std(struct pt_regs *regs, int frreg, int flop)
{
__u64 val;
ASM_EXCEPTIONTABLE_VAR(ret);
if (flop)
val = regs->fr[frreg];
else if (frreg)
val = regs->gr[frreg];
else
val = 0;
DPRINTF("store r%d (0x%016llx) to " RFMT ":" RFMT " for 8 bytes\n", frreg,
val, regs->isr, regs->ior);
if (!IS_ENABLED(CONFIG_64BIT) && !flop)
return ERR_NOTHANDLED;
#ifdef CONFIG_64BIT
__asm__ __volatile__ (
" mtsp %3, %%sr1\n"
" depd,z %2, 60, 3, %%r19\n"
" depd %%r0, 63, 3, %2\n"
" mtsar %%r19\n"
" depdi,z -2, %%sar, 64, %%r19\n"
"1: ldd 0(%%sr1,%2),%%r20\n"
"2: ldd 8(%%sr1,%2),%%r21\n"
" shrpd %%r0, %1, %%sar, %%r22\n"
" shrpd %1, %%r0, %%sar, %%r1\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %%r22, %%r20, %%r20\n"
" or %%r1, %%r21, %%r21\n"
"3: std %%r20,0(%%sr1,%2)\n"
"4: std %%r21,8(%%sr1,%2)\n"
"5: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b)
: "+r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1" );
#else
{
__asm__ __volatile__ (
" mtsp %3, %%sr1\n"
" zdep %R1, 29, 2, %%r19\n"
" dep %%r0, 31, 2, %2\n"
" mtsar %%r19\n"
" zvdepi -2, 32, %%r19\n"
"1: ldw 0(%%sr1,%2),%%r20\n"
"2: ldw 8(%%sr1,%2),%%r21\n"
" vshd %1, %R1, %%r1\n"
" vshd %%r0, %1, %1\n"
" vshd %R1, %%r0, %R1\n"
" and %%r20, %%r19, %%r20\n"
" andcm %%r21, %%r19, %%r21\n"
" or %1, %%r20, %1\n"
" or %R1, %%r21, %R1\n"
"3: stw %1,0(%%sr1,%2)\n"
"4: stw %%r1,4(%%sr1,%2)\n"
"5: stw %R1,8(%%sr1,%2)\n"
"6: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b)
: "+r" (ret)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r1" );
}
#endif
return ret;
}
void handle_unaligned(struct pt_regs *regs)
{
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0;
int modify = 0;
int ret = ERR_NOTHANDLED;
__inc_irq_stat(irq_unaligned_count);
/* log a message with pacing */
if (user_mode(regs)) {
if (current->thread.flags & PARISC_UAC_SIGBUS) {
goto force_sigbus;
}
if (!(current->thread.flags & PARISC_UAC_NOPRINT) &&
__ratelimit(&ratelimit)) {
printk(KERN_WARNING "%s(%d): unaligned access to " RFMT
" at ip " RFMT " (iir " RFMT ")\n",
current->comm, task_pid_nr(current), regs->ior,
regs->iaoq[0], regs->iir);
#ifdef DEBUG_UNALIGNED
show_regs(regs);
#endif
}
if (!unaligned_enabled)
goto force_sigbus;
}
/* handle modification - OK, it's ugly, see the instruction manual */
switch (MAJOR_OP(regs->iir))
{
case 0x03:
case 0x09:
case 0x0b:
if (regs->iir&0x20)
{
modify = 1;
if (regs->iir&0x1000) /* short loads */
if (regs->iir&0x200)
newbase += IM5_3(regs->iir);
else
newbase += IM5_2(regs->iir);
else if (regs->iir&0x2000) /* scaled indexed */
{
int shift=0;
switch (regs->iir & OPCODE1_MASK)
{
case OPCODE_LDH_I:
shift= 1; break;
case OPCODE_LDW_I:
shift= 2; break;
case OPCODE_LDD_I:
case OPCODE_LDDA_I:
shift= 3; break;
}
newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0)<<shift;
} else /* simple indexed */
newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0);
}
break;
case 0x13:
case 0x1b:
modify = 1;
newbase += IM14(regs->iir);
break;
case 0x14:
case 0x1c:
if (regs->iir&8)
{
modify = 1;
newbase += IM14(regs->iir&~0xe);
}
break;
case 0x16:
case 0x1e:
modify = 1;
newbase += IM14(regs->iir&6);
break;
case 0x17:
case 0x1f:
if (regs->iir&4)
{
modify = 1;
newbase += IM14(regs->iir&~4);
}
break;
}
/* TODO: make this cleaner... */
switch (regs->iir & OPCODE1_MASK)
{
case OPCODE_LDH_I:
case OPCODE_LDH_S:
ret = emulate_ldh(regs, R3(regs->iir));
break;
case OPCODE_LDW_I:
case OPCODE_LDWA_I:
case OPCODE_LDW_S:
case OPCODE_LDWA_S:
ret = emulate_ldw(regs, R3(regs->iir), 0);
break;
case OPCODE_STH:
ret = emulate_sth(regs, R2(regs->iir));
break;
case OPCODE_STW:
case OPCODE_STWA:
ret = emulate_stw(regs, R2(regs->iir), 0);
break;
#ifdef CONFIG_64BIT
case OPCODE_LDD_I:
case OPCODE_LDDA_I:
case OPCODE_LDD_S:
case OPCODE_LDDA_S:
ret = emulate_ldd(regs, R3(regs->iir), 0);
break;
case OPCODE_STD:
case OPCODE_STDA:
ret = emulate_std(regs, R2(regs->iir), 0);
break;
#endif
case OPCODE_FLDWX:
case OPCODE_FLDWS:
case OPCODE_FLDWXR:
case OPCODE_FLDWSR:
ret = emulate_ldw(regs, FR3(regs->iir), 1);
break;
case OPCODE_FLDDX:
case OPCODE_FLDDS:
ret = emulate_ldd(regs, R3(regs->iir), 1);
break;
case OPCODE_FSTWX:
case OPCODE_FSTWS:
case OPCODE_FSTWXR:
case OPCODE_FSTWSR:
ret = emulate_stw(regs, FR3(regs->iir), 1);
break;
case OPCODE_FSTDX:
case OPCODE_FSTDS:
ret = emulate_std(regs, R3(regs->iir), 1);
break;
case OPCODE_LDCD_I:
case OPCODE_LDCW_I:
case OPCODE_LDCD_S:
case OPCODE_LDCW_S:
ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
break;
}
switch (regs->iir & OPCODE2_MASK)
{
case OPCODE_FLDD_L:
ret = emulate_ldd(regs,R2(regs->iir),1);
break;
case OPCODE_FSTD_L:
ret = emulate_std(regs, R2(regs->iir),1);
break;
#ifdef CONFIG_64BIT
case OPCODE_LDD_L:
ret = emulate_ldd(regs, R2(regs->iir),0);
break;
case OPCODE_STD_L:
ret = emulate_std(regs, R2(regs->iir),0);
break;
#endif
}
switch (regs->iir & OPCODE3_MASK)
{
case OPCODE_FLDW_L:
ret = emulate_ldw(regs, R2(regs->iir), 1);
break;
case OPCODE_LDW_M:
ret = emulate_ldw(regs, R2(regs->iir), 0);
break;
case OPCODE_FSTW_L:
ret = emulate_stw(regs, R2(regs->iir),1);
break;
case OPCODE_STW_M:
ret = emulate_stw(regs, R2(regs->iir),0);
break;
}
switch (regs->iir & OPCODE4_MASK)
{
case OPCODE_LDH_L:
ret = emulate_ldh(regs, R2(regs->iir));
break;
case OPCODE_LDW_L:
case OPCODE_LDWM:
ret = emulate_ldw(regs, R2(regs->iir),0);
break;
case OPCODE_STH_L:
ret = emulate_sth(regs, R2(regs->iir));
break;
case OPCODE_STW_L:
case OPCODE_STWM:
ret = emulate_stw(regs, R2(regs->iir),0);
break;
}
if (ret == 0 && modify && R1(regs->iir))
regs->gr[R1(regs->iir)] = newbase;
if (ret == ERR_NOTHANDLED)
printk(KERN_CRIT "Not-handled unaligned insn 0x%08lx\n", regs->iir);
DPRINTF("ret = %d\n", ret);
if (ret)
{
/*
* The unaligned handler failed.
* If we were called by __get_user() or __put_user() jump
* to it's exception fixup handler instead of crashing.
*/
if (!user_mode(regs) && fixup_exception(regs))
return;
printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
die_if_kernel("Unaligned data reference", regs, 28);
if (ret == -EFAULT)
{
force_sig_fault(SIGSEGV, SEGV_MAPERR,
(void __user *)regs->ior);
}
else
{
force_sigbus:
/* couldn't handle it ... */
force_sig_fault(SIGBUS, BUS_ADRALN,
(void __user *)regs->ior);
}
return;
}
/* else we handled it, let life go on. */
regs->gr[0]|=PSW_N;
}
/*
* NB: check_unaligned() is only used for PCXS processors right
* now, so we only check for PA1.1 encodings at this point.
*/
int
check_unaligned(struct pt_regs *regs)
{
unsigned long align_mask;
/* Get alignment mask */
align_mask = 0UL;
switch (regs->iir & OPCODE1_MASK) {
case OPCODE_LDH_I:
case OPCODE_LDH_S:
case OPCODE_STH:
align_mask = 1UL;
break;
case OPCODE_LDW_I:
case OPCODE_LDWA_I:
case OPCODE_LDW_S:
case OPCODE_LDWA_S:
case OPCODE_STW:
case OPCODE_STWA:
align_mask = 3UL;
break;
default:
switch (regs->iir & OPCODE4_MASK) {
case OPCODE_LDH_L:
case OPCODE_STH_L:
align_mask = 1UL;
break;
case OPCODE_LDW_L:
case OPCODE_LDWM:
case OPCODE_STW_L:
case OPCODE_STWM:
align_mask = 3UL;
break;
}
break;
}
return (int)(regs->ior & align_mask);
}
| linux-master | arch/parisc/kernel/unaligned.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
static unsigned dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
static unsigned read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
static unsigned write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
static unsigned chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
static unsigned signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int audit_classify_arch(int arch)
{
#ifdef CONFIG_COMPAT
if (arch == AUDIT_ARCH_PARISC)
return 1;
#endif
return 0;
}
int audit_classify_syscall(int abi, unsigned syscall)
{
switch (syscall) {
case __NR_open:
return AUDITSC_OPEN;
case __NR_openat:
return AUDITSC_OPENAT;
case __NR_execve:
return AUDITSC_EXECVE;
case __NR_openat2:
return AUDITSC_OPENAT2;
default:
#ifdef CONFIG_COMPAT
if (abi == AUDIT_ARCH_PARISC)
return AUDITSC_COMPAT;
#endif
return AUDITSC_NATIVE;
}
}
static int __init audit_classes_init(void)
{
#ifdef CONFIG_COMPAT
extern __u32 parisc32_dir_class[];
extern __u32 parisc32_write_class[];
extern __u32 parisc32_read_class[];
extern __u32 parisc32_chattr_class[];
extern __u32 parisc32_signal_class[];
audit_register_class(AUDIT_CLASS_WRITE_32, parisc32_write_class);
audit_register_class(AUDIT_CLASS_READ_32, parisc32_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, parisc32_dir_class);
audit_register_class(AUDIT_CLASS_CHATTR_32, parisc32_chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL_32, parisc32_signal_class);
#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
return 0;
}
__initcall(audit_classes_init);
| linux-master | arch/parisc/kernel/audit.c |
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/parisc/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
* Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, [email protected])
*
* 1994-07-02 Alan Modra
* fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
* 1998-12-20 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched_clock.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/profile.h>
#include <linux/clocksource.h>
#include <linux/platform_device.h>
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/param.h>
#include <asm/pdc.h>
#include <asm/led.h>
#include <linux/timex.h>
int time_keeper_id __read_mostly; /* CPU used for timekeeping. */
static unsigned long clocktick __ro_after_init; /* timer cycles per tick */
/*
* We keep time on PA-RISC Linux by using the Interval Timer which is
* a pair of registers; one is read-only and one is write-only; both
* accessed through CR16. The read-only register is 32 or 64 bits wide,
* and increments by 1 every CPU clock tick. The architecture only
* guarantees us a rate between 0.5 and 2, but all implementations use a
* rate of 1. The write-only register is 32-bits wide. When the lowest
* 32 bits of the read-only register compare equal to the write-only
* register, it raises a maskable external interrupt. Each processor has
* an Interval Timer of its own and they are not synchronised.
*
* We want to generate an interrupt every 1/HZ seconds. So we program
* CR16 to interrupt every @clocktick cycles. The it_value in cpu_data
* is programmed with the intended time of the next tick. We can be
* held off for an arbitrarily long period of time by interrupts being
* disabled, so we may miss one or more ticks.
*/
irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
{
unsigned long now;
unsigned long next_tick;
unsigned long ticks_elapsed = 0;
unsigned int cpu = smp_processor_id();
struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
/* gcc can optimize for "read-only" case with a local clocktick */
unsigned long cpt = clocktick;
/* Initialize next_tick to the old expected tick time. */
next_tick = cpuinfo->it_value;
/* Calculate how many ticks have elapsed. */
now = mfctl(16);
do {
++ticks_elapsed;
next_tick += cpt;
} while (next_tick - now > cpt);
/* Store (in CR16 cycles) up to when we are accounting right now. */
cpuinfo->it_value = next_tick;
/* Go do system house keeping. */
if (IS_ENABLED(CONFIG_SMP) && (cpu != time_keeper_id))
ticks_elapsed = 0;
legacy_timer_tick(ticks_elapsed);
/* Skip clockticks on purpose if we know we would miss those.
* The new CR16 must be "later" than current CR16 otherwise
* itimer would not fire until CR16 wrapped - e.g 4 seconds
* later on a 1Ghz processor. We'll account for the missed
* ticks on the next timer interrupt.
* We want IT to fire modulo clocktick even if we miss/skip some.
* But those interrupts don't in fact get delivered that regularly.
*
* "next_tick - now" will always give the difference regardless
* if one or the other wrapped. If "now" is "bigger" we'll end up
* with a very large unsigned number.
*/
now = mfctl(16);
while (next_tick - now > cpt)
next_tick += cpt;
/* Program the IT when to deliver the next interrupt.
* Only bottom 32-bits of next_tick are writable in CR16!
* Timer interrupt will be delivered at least a few hundred cycles
* after the IT fires, so if we are too close (<= 8000 cycles) to the
* next cycle, simply skip it.
*/
if (next_tick - now <= 8000)
next_tick += cpt;
mtctl(next_tick, 16);
return IRQ_HANDLED;
}
unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
if (regs->gr[0] & PSW_N)
pc -= 4;
#ifdef CONFIG_SMP
if (in_lock_functions(pc))
pc = regs->gr[2];
#endif
return pc;
}
EXPORT_SYMBOL(profile_pc);
/* clock source code */
static u64 notrace read_cr16(struct clocksource *cs)
{
return get_cycles();
}
static struct clocksource clocksource_cr16 = {
.name = "cr16",
.rating = 300,
.read = read_cr16,
.mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
void start_cpu_itimer(void)
{
unsigned int cpu = smp_processor_id();
unsigned long next_tick = mfctl(16) + clocktick;
mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
per_cpu(cpu_data, cpu).it_value = next_tick;
}
#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
{
struct pdc_tod tod_data;
memset(tm, 0, sizeof(*tm));
if (pdc_tod_read(&tod_data) < 0)
return -EOPNOTSUPP;
/* we treat tod_sec as unsigned, so this can work until year 2106 */
rtc_time64_to_tm(tod_data.tod_sec, tm);
return 0;
}
static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
{
time64_t secs = rtc_tm_to_time64(tm);
int ret;
/* hppa has Y2K38 problem: pdc_tod_set() takes an u32 value! */
ret = pdc_tod_set(secs, 0);
if (ret != 0) {
pr_warn("pdc_tod_set(%lld) returned error %d\n", secs, ret);
if (ret == PDC_INVALID_ARG)
return -EINVAL;
return -EOPNOTSUPP;
}
return 0;
}
static const struct rtc_class_ops rtc_generic_ops = {
.read_time = rtc_generic_get_time,
.set_time = rtc_generic_set_time,
};
static int __init rtc_init(void)
{
struct platform_device *pdev;
pdev = platform_device_register_data(NULL, "rtc-generic", -1,
&rtc_generic_ops,
sizeof(rtc_generic_ops));
return PTR_ERR_OR_ZERO(pdev);
}
device_initcall(rtc_init);
#endif
void read_persistent_clock64(struct timespec64 *ts)
{
static struct pdc_tod tod_data;
if (pdc_tod_read(&tod_data) == 0) {
ts->tv_sec = tod_data.tod_sec;
ts->tv_nsec = tod_data.tod_usec * 1000;
} else {
printk(KERN_ERR "Error reading tod clock\n");
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
}
static u64 notrace read_cr16_sched_clock(void)
{
return get_cycles();
}
/*
* timer interrupt and sched_clock() initialization
*/
void __init time_init(void)
{
unsigned long cr16_hz;
clocktick = (100 * PAGE0->mem_10msec) / HZ;
start_cpu_itimer(); /* get CPU 0 started */
cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
/* register as sched_clock source */
sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
}
static int __init init_cr16_clocksource(void)
{
/*
* The cr16 interval timers are not synchronized across CPUs.
*/
if (num_online_cpus() > 1 && !running_on_qemu) {
clocksource_cr16.name = "cr16_unstable";
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
clocksource_cr16.rating = 0;
}
/* register at clocksource framework */
clocksource_register_hz(&clocksource_cr16,
100 * PAGE0->mem_10msec);
return 0;
}
device_initcall(init_cr16_clocksource);
| linux-master | arch/parisc/kernel/time.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 Helge Deller <[email protected]>
*
* Based on arch/arm64/kernel/jump_label.c
*/
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <linux/bug.h>
#include <asm/alternative.h>
#include <asm/patch.h>
static inline int reassemble_17(int as17)
{
return (((as17 & 0x10000) >> 16) |
((as17 & 0x0f800) << 5) |
((as17 & 0x00400) >> 8) |
((as17 & 0x003ff) << 3));
}
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
void *addr = (void *)jump_entry_code(entry);
u32 insn;
if (type == JUMP_LABEL_JMP) {
void *target = (void *)jump_entry_target(entry);
int distance = target - addr;
/*
* Encode the PA1.1 "b,n" instruction with a 17-bit
* displacement. In case we hit the BUG(), we could use
* another branch instruction with a 22-bit displacement on
* 64-bit CPUs instead. But this seems sufficient for now.
*/
distance -= 8;
BUG_ON(distance > 262143 || distance < -262144);
insn = 0xe8000002 | reassemble_17(distance >> 2);
} else {
insn = INSN_NOP;
}
patch_text(addr, insn);
}
| linux-master | arch/parisc/kernel/jump_label.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Initial setup-routines for HP 9000 based hardware.
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
* Modifications for PA-RISC (C) 1999-2008 Helge Deller <[email protected]>
* Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
* Modifications copyright 2000 Martin K. Petersen <[email protected]>
* Modifications copyright 2000 Philipp Rumpf <[email protected]>
* Modifications copyright 2001 Ryan Bradetich <[email protected]>
*
* Initial PA-RISC Version: 04-23-1999 by Helge Deller
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <asm/topology.h>
#include <asm/param.h>
#include <asm/cache.h>
#include <asm/hardware.h> /* for register_parisc_driver() stuff */
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/pdc.h>
#include <asm/smp.h>
#include <asm/pdcpat.h>
#include <asm/irq.h> /* for struct irq_region */
#include <asm/parisc-device.h>
struct system_cpuinfo_parisc boot_cpu_data __ro_after_init;
EXPORT_SYMBOL(boot_cpu_data);
#ifdef CONFIG_PA8X00
int _parisc_requires_coherency __ro_after_init;
EXPORT_SYMBOL(_parisc_requires_coherency);
#endif
DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
/*
** PARISC CPU driver - claim "device" and initialize CPU data structures.
**
** Consolidate per CPU initialization into (mostly) one module.
** Monarch CPU will initialize boot_cpu_data which shouldn't
** change once the system has booted.
**
** The callback *should* do per-instance initialization of
** everything including the monarch. "Per CPU" init code in
** setup.c:start_parisc() has migrated here and start_parisc()
** will call register_parisc_driver(&cpu_driver) before calling do_inventory().
**
** The goal of consolidating CPU initialization into one place is
** to make sure all CPUs get initialized the same way.
** The code path not shared is how PDC hands control of the CPU to the OS.
** The initialization of OS data structures is the same (done below).
*/
/**
* init_percpu_prof - enable/setup per cpu profiling hooks.
* @cpunum: The processor instance.
*
* FIXME: doesn't do much yet...
*/
static void
init_percpu_prof(unsigned long cpunum)
{
}
/**
* processor_probe - Determine if processor driver should claim this device.
* @dev: The device which has been found.
*
* Determine if processor driver should claim this chip (return 0) or not
* (return 1). If so, initialize the chip and tell other partners in crime
* they have work to do.
*/
static int __init processor_probe(struct parisc_device *dev)
{
unsigned long txn_addr;
unsigned long cpuid;
struct cpuinfo_parisc *p;
struct pdc_pat_cpu_num cpu_info = { };
#ifdef CONFIG_SMP
if (num_online_cpus() >= nr_cpu_ids) {
printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
return 1;
}
#else
if (boot_cpu_data.cpu_count > 0) {
printk(KERN_INFO "CONFIG_SMP=n ignoring additional CPUs\n");
return 1;
}
#endif
/* logical CPU ID and update global counter
* May get overwritten by PAT code.
*/
cpuid = boot_cpu_data.cpu_count;
txn_addr = dev->hpa.start; /* for legacy PDC */
cpu_info.cpu_num = cpu_info.cpu_loc = cpuid;
#ifdef CONFIG_64BIT
if (is_pdc_pat()) {
ulong status;
unsigned long bytecnt;
pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
if (!pa_pdc_cell)
panic("couldn't allocate memory for PDC_PAT_CELL!");
status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
dev->mod_index, PA_VIEW, pa_pdc_cell);
BUG_ON(PDC_OK != status);
/* verify it's the same as what do_pat_inventory() found */
BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);
txn_addr = pa_pdc_cell->mod[0]; /* id_eid for IO sapic */
kfree(pa_pdc_cell);
/* get the cpu number */
status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
BUG_ON(PDC_OK != status);
pr_info("Logical CPU #%lu is physical cpu #%lu at location "
"0x%lx with hpa %pa\n",
cpuid, cpu_info.cpu_num, cpu_info.cpu_loc,
&dev->hpa.start);
#undef USE_PAT_CPUID
#ifdef USE_PAT_CPUID
/* We need contiguous numbers for cpuid. Firmware's notion
* of cpuid is for physical CPUs and we just don't care yet.
* We'll care when we need to query PAT PDC about a CPU *after*
* boot time (ie shutdown a CPU from an OS perspective).
*/
if (cpu_info.cpu_num >= NR_CPUS) {
printk(KERN_WARNING "IGNORING CPU at %pa,"
" cpu_slot_id > NR_CPUS"
" (%ld > %d)\n",
&dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
/* Ignore CPU since it will only crash */
boot_cpu_data.cpu_count--;
return 1;
} else {
cpuid = cpu_info.cpu_num;
}
#endif
}
#endif
p = &per_cpu(cpu_data, cpuid);
boot_cpu_data.cpu_count++;
/* initialize counters - CPU 0 gets it_value set in time_init() */
if (cpuid)
memset(p, 0, sizeof(struct cpuinfo_parisc));
p->dev = dev; /* Save IODC data in case we need it */
p->hpa = dev->hpa.start; /* save CPU hpa */
p->cpuid = cpuid; /* save CPU id */
p->txn_addr = txn_addr; /* save CPU IRQ address */
p->cpu_num = cpu_info.cpu_num;
p->cpu_loc = cpu_info.cpu_loc;
set_cpu_possible(cpuid, true);
store_cpu_topology(cpuid);
#ifdef CONFIG_SMP
/*
** FIXME: review if any other initialization is clobbered
** for boot_cpu by the above memset().
*/
init_percpu_prof(cpuid);
#endif
/*
** CONFIG_SMP: init_smp_config() will attempt to get CPUs into
** OS control. RENDEZVOUS is the default state - see mem_set above.
** p->state = STATE_RENDEZVOUS;
*/
#if 0
/* CPU 0 IRQ table is statically allocated/initialized */
if (cpuid) {
struct irqaction actions[];
/*
** itimer and ipi IRQ handlers are statically initialized in
** arch/parisc/kernel/irq.c. ie Don't need to register them.
*/
actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
if (!actions) {
/* not getting it's own table, share with monarch */
actions = cpu_irq_actions[0];
}
cpu_irq_actions[cpuid] = actions;
}
#endif
/*
* Bring this CPU up now! (ignore bootstrap cpuid == 0)
*/
#ifdef CONFIG_SMP
if (cpuid) {
set_cpu_present(cpuid, true);
add_cpu(cpuid);
}
#endif
return 0;
}
/**
* collect_boot_cpu_data - Fill the boot_cpu_data structure.
*
* This function collects and stores the generic processor information
* in the boot_cpu_data structure.
*/
void __init collect_boot_cpu_data(void)
{
unsigned long cr16_seed;
char orig_prod_num[64], current_prod_num[64], serial_no[64];
memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
cr16_seed = get_cycles();
add_device_randomness(&cr16_seed, sizeof(cr16_seed));
boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
/* get CPU-Model Information... */
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) {
printk(KERN_INFO
"model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
add_device_randomness(&boot_cpu_data.pdc.model,
sizeof(boot_cpu_data.pdc.model));
}
#undef p
if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK) {
printk(KERN_INFO "vers %08lx\n",
boot_cpu_data.pdc.versions);
add_device_randomness(&boot_cpu_data.pdc.versions,
sizeof(boot_cpu_data.pdc.versions));
}
if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK) {
printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
(boot_cpu_data.pdc.cpuid >> 5) & 127,
boot_cpu_data.pdc.cpuid & 31,
boot_cpu_data.pdc.cpuid);
add_device_randomness(&boot_cpu_data.pdc.cpuid,
sizeof(boot_cpu_data.pdc.cpuid));
}
if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
printk(KERN_INFO "capabilities 0x%lx\n",
boot_cpu_data.pdc.capabilities);
if (pdc_model_sysmodel(OS_ID_HPUX, boot_cpu_data.pdc.sys_model_name) == PDC_OK)
pr_info("HP-UX model name: %s\n",
boot_cpu_data.pdc.sys_model_name);
serial_no[0] = 0;
if (pdc_model_sysmodel(OS_ID_MPEXL, serial_no) == PDC_OK &&
serial_no[0])
pr_info("MPE/iX model name: %s\n", serial_no);
dump_stack_set_arch_desc("%s", boot_cpu_data.pdc.sys_model_name);
boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion;
boot_cpu_data.sversion = boot_cpu_data.pdc.model.sversion;
boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion);
boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0];
boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1];
#ifdef CONFIG_PA8X00
_parisc_requires_coherency = (boot_cpu_data.cpu_type == mako) ||
(boot_cpu_data.cpu_type == mako2);
#endif
if (pdc_model_platform_info(orig_prod_num, current_prod_num, serial_no) == PDC_OK) {
printk(KERN_INFO "product %s, original product %s, S/N: %s\n",
current_prod_num[0] ? current_prod_num : "n/a",
orig_prod_num, serial_no);
add_device_randomness(orig_prod_num, strlen(orig_prod_num));
add_device_randomness(current_prod_num, strlen(current_prod_num));
add_device_randomness(serial_no, strlen(serial_no));
}
}
/**
* init_per_cpu - Handle individual processor initializations.
* @cpunum: logical processor number.
*
* This function handles initialization for *every* CPU
* in the system:
*
* o Set "default" CPU width for trap handlers
*
* o Enable FP coprocessor
* REVISIT: this could be done in the "code 22" trap handler.
* (frowands idea - that way we know which processes need FP
* registers saved on the interrupt stack.)
* NEWS FLASH: wide kernels need FP coprocessor enabled to handle
* formatted printing of %lx for example (double divides I think)
*
* o Enable CPU profiling hooks.
*/
int init_per_cpu(int cpunum)
{
int ret;
struct pdc_coproc_cfg coproc_cfg;
set_firmware_width();
ret = pdc_coproc_cfg(&coproc_cfg);
if(ret >= 0 && coproc_cfg.ccr_functional) {
mtctl(coproc_cfg.ccr_functional, 10); /* 10 == Coprocessor Control Reg */
/* FWIW, FP rev/model is a more accurate way to determine
** CPU type. CPU rev/model has some ambiguous cases.
*/
per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
if (cpunum == 0)
printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
cpunum, coproc_cfg.revision, coproc_cfg.model);
/*
** store status register to stack (hopefully aligned)
** and clear the T-bit.
*/
asm volatile ("fstd %fr0,8(%sp)");
} else {
printk(KERN_WARNING "WARNING: No FP CoProcessor?!"
" (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
#ifdef CONFIG_64BIT
"Halting Machine - FP required\n"
#endif
, coproc_cfg.ccr_functional);
#ifdef CONFIG_64BIT
mdelay(100); /* previous chars get pushed to console */
panic("FP CoProc not reported");
#endif
}
/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
init_percpu_prof(cpunum);
btlb_init_per_cpu();
return ret;
}
/*
* Display CPU info for all CPUs.
*/
int
show_cpuinfo (struct seq_file *m, void *v)
{
unsigned long cpu;
char cpu_name[60], *p;
/* strip PA path from CPU name to not confuse lscpu */
strlcpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name));
p = strrchr(cpu_name, '[');
if (p)
*(--p) = 0;
for_each_online_cpu(cpu) {
#ifdef CONFIG_SMP
const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
if (0 == cpuinfo->hpa)
continue;
#endif
seq_printf(m, "processor\t: %lu\n"
"cpu family\t: PA-RISC %s\n",
cpu, boot_cpu_data.family_name);
seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name );
/* cpu MHz */
seq_printf(m, "cpu MHz\t\t: %d.%06d\n",
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
seq_printf(m, "physical id\t: %d\n",
topology_physical_package_id(cpu));
seq_printf(m, "siblings\t: %d\n",
cpumask_weight(topology_core_cpumask(cpu)));
seq_printf(m, "core id\t\t: %d\n", topology_core_id(cpu));
#endif
seq_printf(m, "capabilities\t:");
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
seq_puts(m, " os32");
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
seq_puts(m, " os64");
if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)
seq_puts(m, " iopdir_fdc");
switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) {
case PDC_MODEL_NVA_SUPPORTED:
seq_puts(m, " nva_supported");
break;
case PDC_MODEL_NVA_SLOW:
seq_puts(m, " nva_slow");
break;
case PDC_MODEL_NVA_UNSUPPORTED:
seq_puts(m, " needs_equivalent_aliasing");
break;
}
seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
seq_printf(m, "model\t\t: %s - %s\n",
boot_cpu_data.pdc.sys_model_name,
cpu_name);
seq_printf(m, "hversion\t: 0x%08x\n"
"sversion\t: 0x%08x\n",
boot_cpu_data.hversion,
boot_cpu_data.sversion );
/* print cachesize info */
show_cache_info(m);
seq_printf(m, "bogomips\t: %lu.%02lu\n",
loops_per_jiffy / (500000 / HZ),
loops_per_jiffy / (5000 / HZ) % 100);
seq_printf(m, "software id\t: %ld\n\n",
boot_cpu_data.pdc.model.sw_id);
}
return 0;
}
static const struct parisc_device_id processor_tbl[] __initconst = {
{ HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
{ 0, }
};
static struct parisc_driver cpu_driver __refdata = {
.name = "CPU",
.id_table = processor_tbl,
.probe = processor_probe
};
/**
* processor_init - Processor initialization procedure.
*
* Register this driver.
*/
void __init processor_init(void)
{
unsigned int cpu;
reset_cpu_topology();
/* reset possible mask. We will mark those which are possible. */
for_each_possible_cpu(cpu)
set_cpu_possible(cpu, false);
register_parisc_driver(&cpu_driver);
}
| linux-master | arch/parisc/kernel/processor.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Stack trace management functions
*
* Copyright (C) 2009-2021 Helge Deller <[email protected]>
* based on arch/x86/kernel/stacktrace.c by Ingo Molnar <[email protected]>
* and parisc unwind functions by Randolph Chung <[email protected]>
*
* TODO: Userspace stacktrace (CONFIG_USER_STACKTRACE_SUPPORT)
*/
#include <linux/kernel.h>
#include <linux/stacktrace.h>
#include <asm/unwind.h>
static void notrace walk_stackframe(struct task_struct *task,
struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *cookie)
{
struct unwind_frame_info info;
unwind_frame_init_task(&info, task, NULL);
while (1) {
if (unwind_once(&info) < 0 || info.ip == 0)
break;
if (__kernel_text_address(info.ip))
if (!fn(cookie, info.ip))
break;
}
}
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
walk_stackframe(task, regs, consume_entry, cookie);
}
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task)
{
walk_stackframe(task, NULL, consume_entry, cookie);
return 1;
}
| linux-master | arch/parisc/kernel/stacktrace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PA-RISC architecture-specific signal handling support.
*
* Copyright (C) 2000 David Huggins-Daines <[email protected]>
* Copyright (C) 2000 Linuxcare, Inc.
* Copyright (C) 2000-2022 Helge Deller <[email protected]>
* Copyright (C) 2022 John David Anglin <[email protected]>
*
* Based on the ia64, i386, and alpha versions.
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/resume_user_mode.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/compat.h>
#include <linux/elf.h>
#include <asm/ucontext.h>
#include <asm/rt_sigframe.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/asm-offsets.h>
#include <asm/vdso.h>
#ifdef CONFIG_COMPAT
#include "signal32.h"
#endif
#define DEBUG_SIG 0
#define DEBUG_SIG_LEVEL 2
#if DEBUG_SIG
#define DBG(LEVEL, ...) \
((DEBUG_SIG_LEVEL >= LEVEL) \
? printk(__VA_ARGS__) : (void) 0)
#else
#define DBG(LEVEL, ...)
#endif
/* gcc will complain if a pointer is cast to an integer of different
* size. If you really need to do this (and we do for an ELF32 user
* application in an ELF64 kernel) then you have to do a cast to an
* integer of the same size first. The A() macro accomplishes
* this. */
#define A(__x) ((unsigned long)(__x))
/*
* Do a signal return - restore sigcontext.
*/
static long
restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
{
long err = 0;
err |= __copy_from_user(regs->gr, sc->sc_gr, sizeof(regs->gr));
err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr));
err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq));
err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq));
err |= __get_user(regs->sar, &sc->sc_sar);
DBG(2, "%s: iaoq is %#lx / %#lx\n",
__func__, regs->iaoq[0], regs->iaoq[1]);
DBG(2, "%s: r28 is %ld\n", __func__, regs->gr[28]);
return err;
}
asmlinkage void
sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
{
struct rt_sigframe __user *frame;
sigset_t set;
unsigned long usp = (regs->gr[30] & ~(0x01UL));
unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE;
#ifdef CONFIG_64BIT
struct compat_rt_sigframe __user * compat_frame;
if (is_compat_task())
sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
#endif
current->restart_block.fn = do_no_restart_syscall;
/* Unwind the user stack to get the rt_sigframe structure. */
frame = (struct rt_sigframe __user *)
(usp - sigframe_size);
DBG(2, "%s: frame is %p pid %d\n", __func__, frame, task_pid_nr(current));
regs->orig_r28 = 1; /* no restarts for sigreturn */
#ifdef CONFIG_64BIT
compat_frame = (struct compat_rt_sigframe __user *)frame;
if (is_compat_task()) {
if (get_compat_sigset(&set, &compat_frame->uc.uc_sigmask))
goto give_sigsegv;
} else
#endif
{
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto give_sigsegv;
}
set_current_blocked(&set);
/* Good thing we saved the old gr[30], eh? */
#ifdef CONFIG_64BIT
if (is_compat_task()) {
DBG(1, "%s: compat_frame->uc.uc_mcontext 0x%p\n",
__func__, &compat_frame->uc.uc_mcontext);
// FIXME: Load upper half from register file
if (restore_sigcontext32(&compat_frame->uc.uc_mcontext,
&compat_frame->regs, regs))
goto give_sigsegv;
DBG(1, "%s: usp %#08lx stack 0x%p\n",
__func__, usp, &compat_frame->uc.uc_stack);
if (compat_restore_altstack(&compat_frame->uc.uc_stack))
goto give_sigsegv;
} else
#endif
{
DBG(1, "%s: frame->uc.uc_mcontext 0x%p\n",
__func__, &frame->uc.uc_mcontext);
if (restore_sigcontext(&frame->uc.uc_mcontext, regs))
goto give_sigsegv;
DBG(1, "%s: usp %#08lx stack 0x%p\n",
__func__, usp, &frame->uc.uc_stack);
if (restore_altstack(&frame->uc.uc_stack))
goto give_sigsegv;
}
/* If we are on the syscall path IAOQ will not be restored, and
* if we are on the interrupt path we must not corrupt gr31.
*/
if (in_syscall)
regs->gr[31] = regs->iaoq[0];
return;
give_sigsegv:
DBG(1, "%s: Sending SIGSEGV\n", __func__);
force_sig(SIGSEGV);
return;
}
/*
* Set up a signal frame.
*/
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
/*FIXME: ELF32 vs. ELF64 has different frame_size, but since we
don't use the parameter it doesn't matter */
DBG(1, "%s: ka = %#lx, sp = %#lx, frame_size = %zu\n",
__func__, (unsigned long)ka, sp, frame_size);
/* Align alternate stack and reserve 64 bytes for the signal
handler's frame marker. */
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */
DBG(1, "%s: Returning sp = %#lx\n", __func__, (unsigned long)sp);
return (void __user *) sp; /* Stacks grow up. Fun. */
}
static long
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, long in_syscall)
{
unsigned long flags = 0;
long err = 0;
if (on_sig_stack((unsigned long) sc))
flags |= PARISC_SC_FLAG_ONSTACK;
if (in_syscall) {
flags |= PARISC_SC_FLAG_IN_SYSCALL;
/* regs->iaoq is undefined in the syscall return path */
err |= __put_user(regs->gr[31], &sc->sc_iaoq[0]);
err |= __put_user(regs->gr[31]+4, &sc->sc_iaoq[1]);
err |= __put_user(regs->sr[3], &sc->sc_iasq[0]);
err |= __put_user(regs->sr[3], &sc->sc_iasq[1]);
DBG(1, "%s: iaoq %#lx / %#lx (in syscall)\n",
__func__, regs->gr[31], regs->gr[31]+4);
} else {
err |= __copy_to_user(sc->sc_iaoq, regs->iaoq, sizeof(regs->iaoq));
err |= __copy_to_user(sc->sc_iasq, regs->iasq, sizeof(regs->iasq));
DBG(1, "%s: iaoq %#lx / %#lx (not in syscall)\n",
__func__, regs->iaoq[0], regs->iaoq[1]);
}
err |= __put_user(flags, &sc->sc_flags);
err |= __copy_to_user(sc->sc_gr, regs->gr, sizeof(regs->gr));
err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr));
err |= __put_user(regs->sar, &sc->sc_sar);
DBG(1, "%s: r28 is %ld\n", __func__, regs->gr[28]);
return err;
}
static long
setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
long in_syscall)
{
struct rt_sigframe __user *frame;
unsigned long rp, usp;
unsigned long haddr, sigframe_size;
unsigned long start;
int err = 0;
#ifdef CONFIG_64BIT
struct compat_rt_sigframe __user * compat_frame;
#endif
usp = (regs->gr[30] & ~(0x01UL));
sigframe_size = PARISC_RT_SIGFRAME_SIZE;
#ifdef CONFIG_64BIT
if (is_compat_task()) {
/* The gcc alloca implementation leaves garbage in the upper 32 bits of sp */
usp = (compat_uint_t)usp;
sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
}
#endif
frame = get_sigframe(&ksig->ka, usp, sigframe_size);
DBG(1, "%s: frame %p info %p\n", __func__, frame, &ksig->info);
start = (unsigned long) frame;
if (start >= TASK_SIZE_MAX - sigframe_size)
return -EFAULT;
#ifdef CONFIG_64BIT
compat_frame = (struct compat_rt_sigframe __user *)frame;
if (is_compat_task()) {
DBG(1, "%s: frame->info = 0x%p\n", __func__, &compat_frame->info);
err |= copy_siginfo_to_user32(&compat_frame->info, &ksig->info);
err |= __compat_save_altstack( &compat_frame->uc.uc_stack, regs->gr[30]);
DBG(1, "%s: frame->uc = 0x%p\n", __func__, &compat_frame->uc);
DBG(1, "%s: frame->uc.uc_mcontext = 0x%p\n",
__func__, &compat_frame->uc.uc_mcontext);
err |= setup_sigcontext32(&compat_frame->uc.uc_mcontext,
&compat_frame->regs, regs, in_syscall);
err |= put_compat_sigset(&compat_frame->uc.uc_sigmask, set,
sizeof(compat_sigset_t));
} else
#endif
{
DBG(1, "%s: frame->info = 0x%p\n", __func__, &frame->info);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
err |= __save_altstack(&frame->uc.uc_stack, regs->gr[30]);
DBG(1, "%s: frame->uc = 0x%p\n", __func__, &frame->uc);
DBG(1, "%s: frame->uc.uc_mcontext = 0x%p\n",
__func__, &frame->uc.uc_mcontext);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, in_syscall);
/* FIXME: Should probably be converted as well for the compat case */
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
}
if (err)
return -EFAULT;
#ifdef CONFIG_64BIT
if (!is_compat_task())
rp = VDSO64_SYMBOL(current, sigtramp_rt);
else
#endif
rp = VDSO32_SYMBOL(current, sigtramp_rt);
if (in_syscall)
rp += 4*4; /* skip 4 instructions and start at ldi 1,%r25 */
haddr = A(ksig->ka.sa.sa_handler);
/* The sa_handler may be a pointer to a function descriptor */
#ifdef CONFIG_64BIT
if (is_compat_task()) {
#endif
if (haddr & PA_PLABEL_FDESC) {
Elf32_Fdesc fdesc;
Elf32_Fdesc __user *ufdesc = (Elf32_Fdesc __user *)A(haddr & ~3);
err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
if (err)
return -EFAULT;
haddr = fdesc.addr;
regs->gr[19] = fdesc.gp;
}
#ifdef CONFIG_64BIT
} else {
Elf64_Fdesc fdesc;
Elf64_Fdesc __user *ufdesc = (Elf64_Fdesc __user *)A(haddr & ~3);
err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
if (err)
return -EFAULT;
haddr = fdesc.addr;
regs->gr[19] = fdesc.gp;
DBG(1, "%s: 64 bit signal, exe=%#lx, r19=%#lx, in_syscall=%d\n",
__func__, haddr, regs->gr[19], in_syscall);
}
#endif
/* The syscall return path will create IAOQ values from r31.
*/
if (in_syscall) {
regs->gr[31] = haddr;
#ifdef CONFIG_64BIT
if (!test_thread_flag(TIF_32BIT))
sigframe_size |= 1; /* XXX ???? */
#endif
} else {
unsigned long psw = USER_PSW;
#ifdef CONFIG_64BIT
if (!test_thread_flag(TIF_32BIT))
psw |= PSW_W;
#endif
/* If we are singlestepping, arrange a trap to be delivered
when we return to userspace. Note the semantics -- we
should trap before the first insn in the handler is
executed. Ref:
http://sources.redhat.com/ml/gdb/2004-11/msg00245.html
*/
if (pa_psw(current)->r) {
pa_psw(current)->r = 0;
psw |= PSW_R;
mtctl(-1, 0);
}
regs->gr[0] = psw;
regs->iaoq[0] = haddr | PRIV_USER;
regs->iaoq[1] = regs->iaoq[0] + 4;
}
regs->gr[2] = rp; /* userland return pointer */
regs->gr[26] = ksig->sig; /* signal number */
#ifdef CONFIG_64BIT
if (is_compat_task()) {
regs->gr[25] = A(&compat_frame->info); /* siginfo pointer */
regs->gr[24] = A(&compat_frame->uc); /* ucontext pointer */
} else
#endif
{
regs->gr[25] = A(&frame->info); /* siginfo pointer */
regs->gr[24] = A(&frame->uc); /* ucontext pointer */
}
DBG(1, "%s: making sigreturn frame: %#lx + %#lx = %#lx\n", __func__,
regs->gr[30], sigframe_size,
regs->gr[30] + sigframe_size);
/* Raise the user stack pointer to make a proper call frame. */
regs->gr[30] = (A(frame) + sigframe_size);
DBG(1, "%s: sig deliver (%s,%d) frame=0x%p sp=%#lx iaoq=%#lx/%#lx rp=%#lx\n",
__func__, current->comm, current->pid, frame, regs->gr[30],
regs->iaoq[0], regs->iaoq[1], rp);
return 0;
}
/*
* OK, we're invoking a handler.
*/
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs, long in_syscall)
{
int ret;
sigset_t *oldset = sigmask_to_save();
DBG(1, "%s: sig=%d, ka=%p, info=%p, oldset=%p, regs=%p\n",
__func__, ksig->sig, &ksig->ka, &ksig->info, oldset, regs);
/* Set up the stack frame */
ret = setup_rt_frame(ksig, oldset, regs, in_syscall);
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP) ||
test_thread_flag(TIF_BLOCKSTEP));
DBG(1, "%s: Exit (success), regs->gr[28] = %ld\n",
__func__, regs->gr[28]);
}
/*
* Check how the syscall number gets loaded into %r20 within
* the delay branch in userspace and adjust as needed.
*/
static void check_syscallno_in_delay_branch(struct pt_regs *regs)
{
u32 opcode, source_reg;
u32 __user *uaddr;
int err;
/* Usually we don't have to restore %r20 (the system call number)
* because it gets loaded in the delay slot of the branch external
* instruction via the ldi instruction.
* In some cases a register-to-register copy instruction might have
* been used instead, in which case we need to copy the syscall
* number into the source register before returning to userspace.
*/
/* A syscall is just a branch, so all we have to do is fiddle the
* return pointer so that the ble instruction gets executed again.
*/
regs->gr[31] -= 8; /* delayed branching */
/* Get assembler opcode of code in delay branch */
uaddr = (u32 __user *) ((regs->gr[31] & ~3) + 4);
err = get_user(opcode, uaddr);
if (err)
return;
/* Check if delay branch uses "ldi int,%r20" */
if ((opcode & 0xffff0000) == 0x34140000)
return; /* everything ok, just return */
/* Check if delay branch uses "nop" */
if (opcode == INSN_NOP)
return;
/* Check if delay branch uses "copy %rX,%r20" */
if ((opcode & 0xffe0ffff) == 0x08000254) {
source_reg = (opcode >> 16) & 31;
regs->gr[source_reg] = regs->gr[20];
return;
}
pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n",
current->comm, task_pid_nr(current), opcode);
}
static inline void
syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
{
if (regs->orig_r28)
return;
regs->orig_r28 = 1; /* no more restarts */
DBG(1, "%s: orig_r28 = %ld pid %d r20 %ld\n",
__func__, regs->orig_r28, task_pid_nr(current), regs->gr[20]);
/* Check the return code */
switch (regs->gr[28]) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
DBG(1, "%s: ERESTARTNOHAND: returning -EINTR\n", __func__);
regs->gr[28] = -EINTR;
break;
case -ERESTARTSYS:
if (!(ka->sa.sa_flags & SA_RESTART)) {
DBG(1, "%s: ERESTARTSYS: putting -EINTR pid %d\n",
__func__, task_pid_nr(current));
regs->gr[28] = -EINTR;
break;
}
fallthrough;
case -ERESTARTNOINTR:
DBG(1, "%s: %ld\n", __func__, regs->gr[28]);
check_syscallno_in_delay_branch(regs);
break;
}
}
static inline void
insert_restart_trampoline(struct pt_regs *regs)
{
if (regs->orig_r28)
return;
regs->orig_r28 = 1; /* no more restarts */
DBG(2, "%s: gr28 = %ld pid %d\n",
__func__, regs->gr[28], task_pid_nr(current));
switch (regs->gr[28]) {
case -ERESTART_RESTARTBLOCK: {
/* Restart the system call - no handlers present */
unsigned int *usp = (unsigned int *)regs->gr[30];
unsigned long rp;
long err = 0;
/* check that we don't exceed the stack */
if (A(&usp[0]) >= TASK_SIZE_MAX - 5 * sizeof(int))
return;
/* Call trampoline in vdso to restart the syscall
* with __NR_restart_syscall.
* Original return addresses are on stack like this:
*
* 0: <return address (orig r31)>
* 4: <2nd half for 64-bit>
*/
#ifdef CONFIG_64BIT
if (!is_compat_task()) {
err |= put_user(regs->gr[31] >> 32, &usp[0]);
err |= put_user(regs->gr[31] & 0xffffffff, &usp[1]);
rp = VDSO64_SYMBOL(current, restart_syscall);
} else
#endif
{
err |= put_user(regs->gr[31], &usp[0]);
rp = VDSO32_SYMBOL(current, restart_syscall);
}
WARN_ON(err);
regs->gr[31] = rp;
DBG(1, "%s: ERESTART_RESTARTBLOCK\n", __func__);
return;
}
case -EINTR:
/* ok, was handled before and should be returned. */
break;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
DBG(1, "%s: Type %ld\n", __func__, regs->gr[28]);
check_syscallno_in_delay_branch(regs);
return;
default:
break;
}
}
/*
* We need to be able to restore the syscall arguments (r21-r26) to
* restart syscalls. Thus, the syscall path should save them in the
* pt_regs structure (it's okay to do so since they are caller-save
* registers). As noted below, the syscall number gets restored for
* us due to the magic of delayed branching.
*/
static void do_signal(struct pt_regs *regs, long in_syscall)
{
struct ksignal ksig;
int restart_syscall;
bool has_handler;
has_handler = get_signal(&ksig);
restart_syscall = 0;
if (in_syscall)
restart_syscall = 1;
if (has_handler) {
/* Restart a system call if necessary. */
if (restart_syscall)
syscall_restart(regs, &ksig.ka);
handle_signal(&ksig, regs, in_syscall);
DBG(1, "%s: Handled signal pid %d\n",
__func__, task_pid_nr(current));
return;
}
/* Do we need to restart the system call? */
if (restart_syscall)
insert_restart_trampoline(regs);
DBG(1, "%s: Exit (not delivered), regs->gr[28] = %ld orig_r28 = %ld pid %d\n",
__func__, regs->gr[28], regs->orig_r28, task_pid_nr(current));
restore_saved_sigmask();
}
asmlinkage void do_notify_resume(struct pt_regs *regs, long in_syscall)
{
if (test_thread_flag(TIF_SIGPENDING) ||
test_thread_flag(TIF_NOTIFY_SIGNAL))
do_signal(regs, in_syscall);
if (test_thread_flag(TIF_NOTIFY_RESUME))
resume_user_mode_work(regs);
}
| linux-master | arch/parisc/kernel/signal.c |
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2006 Helge Deller <[email protected]> (07-13-1999)
* Copyright (C) 1999 SuSE GmbH Nuernberg
* Copyright (C) 2000 Philipp Rumpf ([email protected])
*
* Cache and TLB management
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/syscalls.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/shmparam.h>
#include <asm/mmu_context.h>
#include <asm/cachectl.h>
int split_tlb __ro_after_init;
int dcache_stride __ro_after_init;
int icache_stride __ro_after_init;
EXPORT_SYMBOL(dcache_stride);
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_data_cache_local(void *); /* flushes local data-cache only */
void flush_instruction_cache_local(void); /* flushes local code-cache only */
/* On some machines (i.e., ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We need a spinlock around all TLB flushes to ensure
* this.
*/
DEFINE_SPINLOCK(pa_tlb_flush_lock);
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
int pa_serialize_tlb_flushes __ro_after_init;
#endif
struct pdc_cache_info cache_info __ro_after_init;
#ifndef CONFIG_PA20
struct pdc_btlb_info btlb_info __ro_after_init;
#endif
DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
static void cache_flush_local_cpu(void *dummy)
{
if (static_branch_likely(&parisc_has_icache))
flush_instruction_cache_local();
if (static_branch_likely(&parisc_has_dcache))
flush_data_cache_local(NULL);
}
void flush_cache_all_local(void)
{
cache_flush_local_cpu(NULL);
}
void flush_cache_all(void)
{
if (static_branch_likely(&parisc_has_cache))
on_each_cpu(cache_flush_local_cpu, NULL, 1);
}
static inline void flush_data_cache(void)
{
if (static_branch_likely(&parisc_has_dcache))
on_each_cpu(flush_data_cache_local, NULL, 1);
}
/* Kernel virtual address of pfn. */
#define pfn_va(pfn) __va(PFN_PHYS(pfn))
void __update_cache(pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
struct folio *folio;
unsigned int nr;
/* We don't have pte special. As a result, we can be called with
an invalid pfn and we don't need to flush the kernel dcache page.
This occurs with FireGL card in C8000. */
if (!pfn_valid(pfn))
return;
folio = page_folio(pfn_to_page(pfn));
pfn = folio_pfn(folio);
nr = folio_nr_pages(folio);
if (folio_flush_mapping(folio) &&
test_bit(PG_dcache_dirty, &folio->flags)) {
while (nr--)
flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
clear_bit(PG_dcache_dirty, &folio->flags);
} else if (parisc_requires_coherency())
while (nr--)
flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
}
void
show_cache_info(struct seq_file *m)
{
char buf[32];
seq_printf(m, "I-cache\t\t: %ld KB\n",
cache_info.ic_size/1024 );
if (cache_info.dc_loop != 1)
snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
cache_info.dc_size/1024,
(cache_info.dc_conf.cc_wt ? "WT":"WB"),
(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
((cache_info.dc_loop == 1) ? "direct mapped" : buf),
cache_info.dc_conf.cc_alias
);
seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
cache_info.it_size,
cache_info.dt_size,
cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
);
#ifndef CONFIG_PA20
/* BTLB - Block TLB */
if (btlb_info.max_size==0) {
seq_printf(m, "BTLB\t\t: not supported\n" );
} else {
seq_printf(m,
"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
btlb_info.max_size, (int)4096,
btlb_info.max_size>>8,
btlb_info.fixed_range_info.num_i,
btlb_info.fixed_range_info.num_d,
btlb_info.fixed_range_info.num_comb,
btlb_info.variable_range_info.num_i,
btlb_info.variable_range_info.num_d,
btlb_info.variable_range_info.num_comb
);
}
#endif
}
void __init
parisc_cache_init(void)
{
if (pdc_cache_info(&cache_info) < 0)
panic("parisc_cache_init: pdc_cache_info failed");
#if 0
printk("ic_size %lx dc_size %lx it_size %lx\n",
cache_info.ic_size,
cache_info.dc_size,
cache_info.it_size);
printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
cache_info.dc_base,
cache_info.dc_stride,
cache_info.dc_count,
cache_info.dc_loop);
printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
*(unsigned long *) (&cache_info.dc_conf),
cache_info.dc_conf.cc_alias,
cache_info.dc_conf.cc_block,
cache_info.dc_conf.cc_line,
cache_info.dc_conf.cc_shift);
printk(" wt %d sh %d cst %d hv %d\n",
cache_info.dc_conf.cc_wt,
cache_info.dc_conf.cc_sh,
cache_info.dc_conf.cc_cst,
cache_info.dc_conf.cc_hv);
printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
cache_info.ic_base,
cache_info.ic_stride,
cache_info.ic_count,
cache_info.ic_loop);
printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
cache_info.it_sp_base,
cache_info.it_sp_stride,
cache_info.it_sp_count,
cache_info.it_loop,
cache_info.it_off_base,
cache_info.it_off_stride,
cache_info.it_off_count);
printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
cache_info.dt_sp_base,
cache_info.dt_sp_stride,
cache_info.dt_sp_count,
cache_info.dt_loop,
cache_info.dt_off_base,
cache_info.dt_off_stride,
cache_info.dt_off_count);
printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
*(unsigned long *) (&cache_info.ic_conf),
cache_info.ic_conf.cc_alias,
cache_info.ic_conf.cc_block,
cache_info.ic_conf.cc_line,
cache_info.ic_conf.cc_shift);
printk(" wt %d sh %d cst %d hv %d\n",
cache_info.ic_conf.cc_wt,
cache_info.ic_conf.cc_sh,
cache_info.ic_conf.cc_cst,
cache_info.ic_conf.cc_hv);
printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
cache_info.dt_conf.tc_sh,
cache_info.dt_conf.tc_page,
cache_info.dt_conf.tc_cst,
cache_info.dt_conf.tc_aid,
cache_info.dt_conf.tc_sr);
printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
cache_info.it_conf.tc_sh,
cache_info.it_conf.tc_page,
cache_info.it_conf.tc_cst,
cache_info.it_conf.tc_aid,
cache_info.it_conf.tc_sr);
#endif
split_tlb = 0;
if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
if (cache_info.dt_conf.tc_sh == 2)
printk(KERN_WARNING "Unexpected TLB configuration. "
"Will flush I/D separately (could be optimized).\n");
split_tlb = 1;
}
/* "New and Improved" version from Jim Hull
* (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
* The following CAFL_STRIDE is an optimized version, see
* http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
* http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
*/
#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
icache_stride = CAFL_STRIDE(cache_info.ic_conf);
#undef CAFL_STRIDE
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
PDC_MODEL_NVA_UNSUPPORTED) {
printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
#if 0
panic("SMP kernel required to avoid non-equivalent aliasing");
#endif
}
}
void disable_sr_hashing(void)
{
int srhash_type, retval;
unsigned long space_bits;
switch (boot_cpu_data.cpu_type) {
case pcx: /* We shouldn't get this far. setup.c should prevent it. */
BUG();
return;
case pcxs:
case pcxt:
case pcxt_:
srhash_type = SRHASH_PCXST;
break;
case pcxl:
srhash_type = SRHASH_PCXL;
break;
case pcxl2: /* pcxl2 doesn't support space register hashing */
return;
default: /* Currently all PA2.0 machines use the same ins. sequence */
srhash_type = SRHASH_PA20;
break;
}
disable_sr_hashing_asm(srhash_type);
retval = pdc_spaceid_bits(&space_bits);
/* If this procedure isn't implemented, don't panic. */
if (retval < 0 && retval != PDC_BAD_OPTION)
panic("pdc_spaceid_bits call failed.\n");
if (space_bits != 0)
panic("SpaceID hashing is still on!\n");
}
static inline void
__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
unsigned long physaddr)
{
if (!static_branch_likely(&parisc_has_cache))
return;
preempt_disable();
flush_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
flush_icache_page_asm(physaddr, vmaddr);
preempt_enable();
}
static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
unsigned long flags, space, pgd, prot;
#ifdef CONFIG_TLB_PTLOCK
unsigned long pgd_lock;
#endif
vmaddr &= PAGE_MASK;
preempt_disable();
/* Set context for flush */
local_irq_save(flags);
prot = mfctl(8);
space = mfsp(SR_USER);
pgd = mfctl(25);
#ifdef CONFIG_TLB_PTLOCK
pgd_lock = mfctl(28);
#endif
switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
local_irq_restore(flags);
flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
flush_tlb_page(vma, vmaddr);
/* Restore previous context */
local_irq_save(flags);
#ifdef CONFIG_TLB_PTLOCK
mtctl(pgd_lock, 28);
#endif
mtctl(pgd, 25);
mtsp(space, SR_USER);
mtctl(prot, 8);
local_irq_restore(flags);
preempt_enable();
}
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
unsigned int nr)
{
void *kaddr = page_address(page);
for (;;) {
flush_kernel_dcache_page_addr(kaddr);
flush_kernel_icache_page(kaddr);
if (--nr == 0)
break;
kaddr += PAGE_SIZE;
}
}
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
{
pte_t *ptep = NULL;
pgd_t *pgd = mm->pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
if (!pgd_none(*pgd)) {
p4d = p4d_offset(pgd, addr);
if (!p4d_none(*p4d)) {
pud = pud_offset(p4d, addr);
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd))
ptep = pte_offset_map(pmd, addr);
}
}
}
return ptep;
}
static inline bool pte_needs_flush(pte_t pte)
{
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
== (_PAGE_PRESENT | _PAGE_ACCESSED);
}
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping = folio_flush_mapping(folio);
struct vm_area_struct *vma;
unsigned long addr, old_addr = 0;
void *kaddr;
unsigned long count = 0;
unsigned long i, nr, flags;
pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_dcache_dirty, &folio->flags);
return;
}
nr = folio_nr_pages(folio);
kaddr = folio_address(folio);
for (i = 0; i < nr; i++)
flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
if (!mapping)
return;
pgoff = folio->index;
/*
* We have carefully arranged in arch_get_unmapped_area() that
* *any* mappings of a file are always congruently mapped (whether
* declared as MAP_PRIVATE or MAP_SHARED), so we only need
* to flush one address here for them all to become coherent
* on machines that support equivalent aliasing
*/
flush_dcache_mmap_lock_irqsave(mapping, flags);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
unsigned long offset = pgoff - vma->vm_pgoff;
unsigned long pfn = folio_pfn(folio);
addr = vma->vm_start;
nr = folio_nr_pages(folio);
if (offset > -nr) {
pfn -= offset;
nr += offset;
} else {
addr += offset * PAGE_SIZE;
}
if (addr + nr * PAGE_SIZE > vma->vm_end)
nr = (vma->vm_end - addr) / PAGE_SIZE;
if (parisc_requires_coherency()) {
for (i = 0; i < nr; i++) {
pte_t *ptep = get_ptep(vma->vm_mm,
addr + i * PAGE_SIZE);
if (!ptep)
continue;
if (pte_needs_flush(*ptep))
flush_user_cache_page(vma,
addr + i * PAGE_SIZE);
/* Optimise accesses to the same table? */
pte_unmap(ptep);
}
} else {
/*
* The TLB is the engine of coherence on parisc:
* The CPU is entitled to speculate any page
* with a TLB mapping, so here we kill the
* mapping then flush the page along a special
* flush only alias mapping. This guarantees that
* the page is no-longer in the cache for any
* process and nor may it be speculatively read
* in (until the user or kernel specifically
* accesses it, of course)
*/
for (i = 0; i < nr; i++)
flush_tlb_page(vma, addr + i * PAGE_SIZE);
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
!= (addr & (SHM_COLOUR - 1))) {
for (i = 0; i < nr; i++)
__flush_cache_page(vma,
addr + i * PAGE_SIZE,
(pfn + i) * PAGE_SIZE);
/*
* Software is allowed to have any number
* of private mappings to a page.
*/
if (!(vma->vm_flags & VM_SHARED))
continue;
if (old_addr)
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
old_addr, addr, vma->vm_file);
if (nr == folio_nr_pages(folio))
old_addr = addr;
}
}
WARN_ON(++count == 4096);
}
flush_dcache_mmap_unlock_irqrestore(mapping, flags);
}
EXPORT_SYMBOL(flush_dcache_folio);
/* Defined in arch/parisc/kernel/pacache.S */
EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
EXPORT_SYMBOL(flush_kernel_icache_range_asm);
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
void __init parisc_setup_cache_timing(void)
{
unsigned long rangetime, alltime;
unsigned long size;
unsigned long threshold, threshold2;
alltime = mfctl(16);
flush_data_cache();
alltime = mfctl(16) - alltime;
size = (unsigned long)(_end - _text);
rangetime = mfctl(16);
flush_kernel_dcache_range((unsigned long)_text, size);
rangetime = mfctl(16) - rangetime;
printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
alltime, size, rangetime);
threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
pr_info("Calculated flush threshold is %lu KiB\n",
threshold/1024);
/*
* The threshold computed above isn't very reliable. The following
* heuristic works reasonably well on c8000/rp3440.
*/
threshold2 = cache_info.dc_size * num_online_cpus();
parisc_cache_flush_threshold = threshold2;
printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
parisc_cache_flush_threshold/1024);
/* calculate TLB flush threshold */
/* On SMP machines, skip the TLB measure of kernel text which
* has been mapped as huge pages. */
if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
threshold = max(cache_info.it_size, cache_info.dt_size);
threshold *= PAGE_SIZE;
threshold /= num_online_cpus();
goto set_tlb_threshold;
}
size = (unsigned long)_end - (unsigned long)_text;
rangetime = mfctl(16);
flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
rangetime = mfctl(16) - rangetime;
alltime = mfctl(16);
flush_tlb_all();
alltime = mfctl(16) - alltime;
printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
alltime, size, rangetime);
threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
threshold/1024);
set_tlb_threshold:
if (threshold > FLUSH_TLB_THRESHOLD)
parisc_tlb_flush_threshold = threshold;
else
parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
parisc_tlb_flush_threshold/1024);
}
extern void purge_kernel_dcache_page_asm(unsigned long);
extern void clear_user_page_asm(void *, unsigned long);
extern void copy_user_page_asm(void *, void *, unsigned long);
void flush_kernel_dcache_page_addr(const void *addr)
{
unsigned long flags;
flush_kernel_dcache_page_asm(addr);
purge_tlb_start(flags);
pdtlb(SR_KERNEL, addr);
purge_tlb_end(flags);
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
static void flush_cache_page_if_present(struct vm_area_struct *vma,
unsigned long vmaddr, unsigned long pfn)
{
bool needs_flush = false;
pte_t *ptep;
/*
* The pte check is racy and sometimes the flush will trigger
* a non-access TLB miss. Hopefully, the page has already been
* flushed.
*/
ptep = get_ptep(vma->vm_mm, vmaddr);
if (ptep) {
needs_flush = pte_needs_flush(*ptep);
pte_unmap(ptep);
}
if (needs_flush)
flush_cache_page(vma, vmaddr, pfn);
}
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kfrom = kmap_local_page(from);
kto = kmap_local_page(to);
flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
copy_page_asm(kto, kfrom);
kunmap_local(kto);
kunmap_local(kfrom);
}
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
memcpy(dst, src, len);
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
memcpy(dst, src, len);
}
/* __flush_tlb_range()
*
* returns 1 if all TLBs were flushed.
*/
int __flush_tlb_range(unsigned long sid, unsigned long start,
unsigned long end)
{
unsigned long flags;
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
end - start >= parisc_tlb_flush_threshold) {
flush_tlb_all();
return 1;
}
/* Purge TLB entries for small ranges using the pdtlb and
pitlb instructions. These instructions execute locally
but cause a purge request to be broadcast to other TLBs. */
while (start < end) {
purge_tlb_start(flags);
mtsp(sid, SR_TEMP1);
pdtlb(SR_TEMP1, start);
pitlb(SR_TEMP1, start);
purge_tlb_end(flags);
start += PAGE_SIZE;
}
return 0;
}
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
unsigned long addr, pfn;
pte_t *ptep;
for (addr = start; addr < end; addr += PAGE_SIZE) {
bool needs_flush = false;
/*
* The vma can contain pages that aren't present. Although
* the pte search is expensive, we need the pte to find the
* page pfn and to check whether the page should be flushed.
*/
ptep = get_ptep(vma->vm_mm, addr);
if (ptep) {
needs_flush = pte_needs_flush(*ptep);
pfn = pte_pfn(*ptep);
pte_unmap(ptep);
}
if (needs_flush) {
if (parisc_requires_coherency()) {
flush_user_cache_page(vma, addr);
} else {
if (WARN_ON(!pfn_valid(pfn)))
return;
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
}
static inline unsigned long mm_total_size(struct mm_struct *mm)
{
struct vm_area_struct *vma;
unsigned long usize = 0;
VMA_ITERATOR(vmi, mm, 0);
for_each_vma(vmi, vma) {
if (usize >= parisc_cache_flush_threshold)
break;
usize += vma->vm_end - vma->vm_start;
}
return usize;
}
void flush_cache_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
/*
* Flushing the whole cache on each cpu takes forever on
* rp3440, etc. So, avoid it if the mm isn't too big.
*
* Note that we must flush the entire cache on machines
* with aliasing caches to prevent random segmentation
* faults.
*/
if (!parisc_requires_coherency()
|| mm_total_size(mm) >= parisc_cache_flush_threshold) {
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
return;
flush_tlb_all();
flush_cache_all();
return;
}
/* Flush mm */
for_each_vma(vmi, vma)
flush_cache_pages(vma, vma->vm_start, vma->vm_end);
}
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (!parisc_requires_coherency()
|| end - start >= parisc_cache_flush_threshold) {
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
return;
flush_tlb_range(vma, start, end);
flush_cache_all();
return;
}
flush_cache_pages(vma, start, end);
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
if (WARN_ON(!pfn_valid(pfn)))
return;
if (parisc_requires_coherency())
flush_user_cache_page(vma, vmaddr);
else
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
if (!PageAnon(page))
return;
if (parisc_requires_coherency()) {
if (vma->vm_flags & VM_SHARED)
flush_data_cache();
else
flush_user_cache_page(vma, vmaddr);
return;
}
flush_tlb_page(vma, vmaddr);
preempt_disable();
flush_dcache_page_asm(page_to_phys(page), vmaddr);
preempt_enable();
}
void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache();
return;
}
flush_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
void invalidate_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
/* Ensure DMA is complete */
asm_syncdma();
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache();
return;
}
purge_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
unsigned int, cache)
{
unsigned long start, end;
ASM_EXCEPTIONTABLE_VAR(error);
if (bytes == 0)
return 0;
if (!access_ok((void __user *) addr, bytes))
return -EFAULT;
end = addr + bytes;
if (cache & DCACHE) {
start = addr;
__asm__ __volatile__ (
#ifdef CONFIG_64BIT
"1: cmpb,*<<,n %0,%2,1b\n"
#else
"1: cmpb,<<,n %0,%2,1b\n"
#endif
" fic,m %3(%4,%0)\n"
"2: sync\n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
: "+r" (start), "+r" (error)
: "r" (end), "r" (dcache_stride), "i" (SR_USER));
}
if (cache & ICACHE && error == 0) {
start = addr;
__asm__ __volatile__ (
#ifdef CONFIG_64BIT
"1: cmpb,*<<,n %0,%2,1b\n"
#else
"1: cmpb,<<,n %0,%2,1b\n"
#endif
" fdc,m %3(%4,%0)\n"
"2: sync\n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
: "+r" (start), "+r" (error)
: "r" (end), "r" (icache_stride), "i" (SR_USER));
}
return error;
}
| linux-master | arch/parisc/kernel/cache.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PA-RISC KGDB support
*
* Copyright (c) 2019 Sven Schnelle <[email protected]>
* Copyright (c) 2022 Helge Deller <[email protected]>
*
*/
#include <linux/kgdb.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/notifier.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
#include <asm/processor.h>
#include <asm/patch.h>
#include <asm/cacheflush.h>
const struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = { 0x03, 0xff, 0xa0, 0x1f }
};
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
if (kgdb_handle_exception(1, args->signr, cmd, regs))
return NOTIFY_DONE;
return NOTIFY_STOP;
}
static int kgdb_notify(struct notifier_block *self,
unsigned long cmd, void *ptr)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = __kgdb_notify(ptr, cmd);
local_irq_restore(flags);
return ret;
}
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,
.priority = -INT_MAX,
};
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
}
void kgdb_arch_exit(void)
{
unregister_die_notifier(&kgdb_notifier);
}
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct parisc_gdb_regs *gr = (struct parisc_gdb_regs *)gdb_regs;
memset(gr, 0, sizeof(struct parisc_gdb_regs));
memcpy(gr->gpr, regs->gr, sizeof(gr->gpr));
memcpy(gr->fr, regs->fr, sizeof(gr->fr));
gr->sr0 = regs->sr[0];
gr->sr1 = regs->sr[1];
gr->sr2 = regs->sr[2];
gr->sr3 = regs->sr[3];
gr->sr4 = regs->sr[4];
gr->sr5 = regs->sr[5];
gr->sr6 = regs->sr[6];
gr->sr7 = regs->sr[7];
gr->sar = regs->sar;
gr->iir = regs->iir;
gr->isr = regs->isr;
gr->ior = regs->ior;
gr->ipsw = regs->ipsw;
gr->cr27 = regs->cr27;
gr->iaoq_f = regs->iaoq[0];
gr->iasq_f = regs->iasq[0];
gr->iaoq_b = regs->iaoq[1];
gr->iasq_b = regs->iasq[1];
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct parisc_gdb_regs *gr = (struct parisc_gdb_regs *)gdb_regs;
memcpy(regs->gr, gr->gpr, sizeof(regs->gr));
memcpy(regs->fr, gr->fr, sizeof(regs->fr));
regs->sr[0] = gr->sr0;
regs->sr[1] = gr->sr1;
regs->sr[2] = gr->sr2;
regs->sr[3] = gr->sr3;
regs->sr[4] = gr->sr4;
regs->sr[5] = gr->sr5;
regs->sr[6] = gr->sr6;
regs->sr[7] = gr->sr7;
regs->sar = gr->sar;
regs->iir = gr->iir;
regs->isr = gr->isr;
regs->ior = gr->ior;
regs->ipsw = gr->ipsw;
regs->cr27 = gr->cr27;
regs->iaoq[0] = gr->iaoq_f;
regs->iasq[0] = gr->iasq_f;
regs->iaoq[1] = gr->iaoq_b;
regs->iasq[1] = gr->iasq_b;
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
struct task_struct *task)
{
struct pt_regs *regs = task_pt_regs(task);
unsigned long gr30, iaoq;
gr30 = regs->gr[30];
iaoq = regs->iaoq[0];
regs->gr[30] = regs->ksp;
regs->iaoq[0] = regs->kpc;
pt_regs_to_gdb_regs(gdb_regs, regs);
regs->gr[30] = gr30;
regs->iaoq[0] = iaoq;
}
static void step_instruction_queue(struct pt_regs *regs)
{
regs->iaoq[0] = regs->iaoq[1];
regs->iaoq[1] += 4;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->iaoq[0] = ip;
regs->iaoq[1] = ip + 4;
}
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
int ret = copy_from_kernel_nofault(bpt->saved_instr,
(char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
if (ret)
return ret;
__patch_text((void *)bpt->bpt_addr,
*(unsigned int *)&arch_kgdb_ops.gdb_bpt_instr);
return ret;
}
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
__patch_text((void *)bpt->bpt_addr, *(unsigned int *)&bpt->saved_instr);
return 0;
}
int kgdb_arch_handle_exception(int trap, int signo,
int err_code, char *inbuf, char *outbuf,
struct pt_regs *regs)
{
unsigned long addr;
char *p = inbuf + 1;
switch (inbuf[0]) {
case 'D':
case 'c':
case 'k':
kgdb_contthread = NULL;
kgdb_single_step = 0;
if (kgdb_hex2long(&p, &addr))
kgdb_arch_set_pc(regs, addr);
else if (trap == 9 && regs->iir ==
PARISC_KGDB_COMPILED_BREAK_INSN)
step_instruction_queue(regs);
return 0;
case 's':
kgdb_single_step = 1;
if (kgdb_hex2long(&p, &addr)) {
kgdb_arch_set_pc(regs, addr);
} else if (trap == 9 && regs->iir ==
PARISC_KGDB_COMPILED_BREAK_INSN) {
step_instruction_queue(regs);
mtctl(-1, 0);
} else {
mtctl(0, 0);
}
regs->gr[0] |= PSW_R;
return 0;
}
return -1;
}
| linux-master | arch/parisc/kernel/kgdb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/parisc/kernel/kprobes.c
*
* PA-RISC kprobes implementation
*
* Copyright (c) 2019 Sven Schnelle <[email protected]>
* Copyright (c) 2022 Helge Deller <[email protected]>
*/
#include <linux/types.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <asm/patch.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
if ((unsigned long)p->addr & 3UL)
return -EINVAL;
p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn)
return -ENOMEM;
/*
* Set up new instructions. Second break instruction will
* trigger call of parisc_kprobe_ss_handler().
*/
p->opcode = *p->addr;
p->ainsn.insn[0] = p->opcode;
p->ainsn.insn[1] = PARISC_KPROBES_BREAK_INSN2;
flush_insn_slot(p);
return 0;
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
if (!p->ainsn.insn)
return;
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
patch_text(p->addr, PARISC_KPROBES_BREAK_INSN);
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
patch_text(p->addr, p->opcode);
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static inline void __kprobes set_current_kprobe(struct kprobe *p)
{
__this_cpu_write(current_kprobe, p);
}
static void __kprobes setup_singlestep(struct kprobe *p,
struct kprobe_ctlblk *kcb, struct pt_regs *regs)
{
kcb->iaoq[0] = regs->iaoq[0];
kcb->iaoq[1] = regs->iaoq[1];
instruction_pointer_set(regs, (unsigned long)p->ainsn.insn);
}
int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
preempt_disable();
kcb = get_kprobe_ctlblk();
p = get_kprobe((unsigned long *)regs->iaoq[0]);
if (!p) {
preempt_enable_no_resched();
return 0;
}
if (kprobe_running()) {
/*
* We have reentered the kprobe_handler, since another kprobe
* was hit while within the handler, we save the original
* kprobes and single step on the instruction of the new probe
* without calling any user handlers to avoid recursive
* kprobes.
*/
save_previous_kprobe(kcb);
set_current_kprobe(p);
kprobes_inc_nmissed_count(p);
setup_singlestep(p, kcb, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
}
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/* If we have no pre-handler or it returned 0, we continue with
* normal processing. If we have a pre-handler and it returned
* non-zero - which means user handler setup registers to exit
* to another instruction, we must skip the single stepping.
*/
if (!p->pre_handler || !p->pre_handler(p, regs)) {
setup_singlestep(p, kcb, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
} else {
reset_current_kprobe();
preempt_enable_no_resched();
}
return 1;
}
int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
struct kprobe *p = kprobe_running();
if (!p)
return 0;
if (regs->iaoq[0] != (unsigned long)p->ainsn.insn+4)
return 0;
/* restore back original saved kprobe variables and continue */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
return 1;
}
/* for absolute branch instructions we can copy iaoq_b. for relative
* branch instructions we need to calculate the new address based on the
* difference between iaoq_f and iaoq_b. We cannot use iaoq_b without
* modifications because it's based on our ainsn.insn address.
*/
if (p->post_handler)
p->post_handler(p, regs, 0);
switch (regs->iir >> 26) {
case 0x38: /* BE */
case 0x39: /* BE,L */
case 0x3a: /* BV */
case 0x3b: /* BVE */
/* for absolute branches, regs->iaoq[1] has already the right
* address
*/
regs->iaoq[0] = kcb->iaoq[1];
break;
default:
regs->iaoq[0] = kcb->iaoq[1];
regs->iaoq[1] = regs->iaoq[0] + 4;
break;
}
kcb->kprobe_status = KPROBE_HIT_SSDONE;
reset_current_kprobe();
return 1;
}
void __kretprobe_trampoline(void)
{
asm volatile("nop");
asm volatile("nop");
}
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs);
static struct kprobe trampoline_p = {
.pre_handler = trampoline_probe_handler
};
static int __kprobes trampoline_probe_handler(struct kprobe *p,
struct pt_regs *regs)
{
__kretprobe_trampoline_handler(regs, NULL);
return 1;
}
void arch_kretprobe_fixup_return(struct pt_regs *regs,
kprobe_opcode_t *correct_ret_addr)
{
regs->gr[2] = (unsigned long)correct_ret_addr;
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->gr[2];
ri->fp = NULL;
/* Replace the return addr with trampoline addr. */
regs->gr[2] = (unsigned long)trampoline_p.addr;
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
return p->addr == trampoline_p.addr;
}
int __init arch_init_kprobes(void)
{
trampoline_p.addr = (kprobe_opcode_t *)
dereference_function_descriptor(__kretprobe_trampoline);
return register_kprobe(&trampoline_p);
}
| linux-master | arch/parisc/kernel/kprobes.c |
// SPDX-License-Identifier: GPL-2.0
/*
* sys_parisc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 2000-2001 Hewlett Packard Company
* Copyright (C) 2000 John Marvin
* Copyright (C) 2001 Matthew Wilcox
* Copyright (C) 2014 Helge Deller <[email protected]>
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment. Based heavily on sys_ia32.c and sys_sparc32.c.
*/
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
int r22, int r21, int r20)
{
printk(KERN_ERR "%s(%d): Unimplemented 32 on 64 syscall #%d!\n",
current->comm, current->pid, r20);
return -ENOSYS;
}
asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
const char __user * pathname)
{
return sys_fanotify_mark(fanotify_fd, flags,
((__u64)mask1 << 32) | mask0,
dfd, pathname);
}
| linux-master | arch/parisc/kernel/sys_parisc32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
** SMP Support
**
** Copyright (C) 1999 Walt Drummond <[email protected]>
** Copyright (C) 1999 David Mosberger-Tang <[email protected]>
** Copyright (C) 2001,2004 Grant Grundler <[email protected]>
**
** Lots of stuff stolen from arch/alpha/kernel/smp.c
** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
**
** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work.
** -grant (1/12/2001)
**
*/
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched/mm.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/kgdb.h>
#include <linux/sched/hotplug.h>
#include <linux/atomic.h>
#include <asm/current.h>
#include <asm/delay.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#undef DEBUG_SMP
#ifdef DEBUG_SMP
static int smp_debug_lvl = 0;
#define smp_debug(lvl, printargs...) \
if (lvl >= smp_debug_lvl) \
printk(printargs);
#else
#define smp_debug(lvl, ...) do { } while(0)
#endif /* DEBUG_SMP */
volatile struct task_struct *smp_init_current_idle_task;
/* track which CPU is booting */
static volatile int cpu_now_booting;
static DEFINE_PER_CPU(spinlock_t, ipi_lock);
enum ipi_message_type {
IPI_NOP=0,
IPI_RESCHEDULE=1,
IPI_CALL_FUNC,
IPI_CPU_START,
IPI_CPU_STOP,
IPI_CPU_TEST,
#ifdef CONFIG_KGDB
IPI_ENTER_KGDB,
#endif
};
/********** SMP inter processor interrupt and communication routines */
#undef PER_CPU_IRQ_REGION
#ifdef PER_CPU_IRQ_REGION
/* XXX REVISIT Ignore for now.
** *May* need this "hook" to register IPI handler
** once we have perCPU ExtIntr switch tables.
*/
static void
ipi_init(int cpuid)
{
#error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
if(cpu_online(cpuid) )
{
switch_to_idle_task(current);
}
return;
}
#endif
/*
** Yoink this CPU from the runnable list...
**
*/
static void
halt_processor(void)
{
/* REVISIT : redirect I/O Interrupts to another CPU? */
/* REVISIT : does PM *know* this CPU isn't available? */
set_cpu_online(smp_processor_id(), false);
local_irq_disable();
__pdc_cpu_rendezvous();
for (;;)
;
}
irqreturn_t __irq_entry
ipi_interrupt(int irq, void *dev_id)
{
int this_cpu = smp_processor_id();
struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
unsigned long ops;
unsigned long flags;
for (;;) {
spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
spin_lock_irqsave(lock, flags);
ops = p->pending_ipi;
p->pending_ipi = 0;
spin_unlock_irqrestore(lock, flags);
mb(); /* Order bit clearing and data access. */
if (!ops)
break;
while (ops) {
unsigned long which = ffz(~ops);
ops &= ~(1 << which);
switch (which) {
case IPI_NOP:
smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
break;
case IPI_RESCHEDULE:
smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
inc_irq_stat(irq_resched_count);
scheduler_ipi();
break;
case IPI_CALL_FUNC:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
inc_irq_stat(irq_call_count);
generic_smp_call_function_interrupt();
break;
case IPI_CPU_START:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
break;
case IPI_CPU_STOP:
smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
halt_processor();
break;
case IPI_CPU_TEST:
smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
break;
#ifdef CONFIG_KGDB
case IPI_ENTER_KGDB:
smp_debug(100, KERN_DEBUG "CPU%d ENTER_KGDB\n", this_cpu);
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
break;
#endif
default:
printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
this_cpu, which);
return IRQ_NONE;
} /* Switch */
/* before doing more, let in any pending interrupts */
if (ops) {
local_irq_enable();
local_irq_disable();
}
} /* while (ops) */
}
return IRQ_HANDLED;
}
static inline void
ipi_send(int cpu, enum ipi_message_type op)
{
struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
spinlock_t *lock = &per_cpu(ipi_lock, cpu);
unsigned long flags;
spin_lock_irqsave(lock, flags);
p->pending_ipi |= 1 << op;
gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
spin_unlock_irqrestore(lock, flags);
}
static void
send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
{
int cpu;
for_each_cpu(cpu, mask)
ipi_send(cpu, op);
}
static inline void
send_IPI_single(int dest_cpu, enum ipi_message_type op)
{
BUG_ON(dest_cpu == NO_PROC_ID);
ipi_send(dest_cpu, op);
}
static inline void
send_IPI_allbutself(enum ipi_message_type op)
{
int i;
preempt_disable();
for_each_online_cpu(i) {
if (i != smp_processor_id())
send_IPI_single(i, op);
}
preempt_enable();
}
#ifdef CONFIG_KGDB
void kgdb_roundup_cpus(void)
{
send_IPI_allbutself(IPI_ENTER_KGDB);
}
#endif
inline void
smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
void
arch_smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
void
smp_send_all_nop(void)
{
send_IPI_allbutself(IPI_NOP);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_IPI_mask(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
send_IPI_single(cpu, IPI_CALL_FUNC);
}
/*
* Called by secondaries to update state and initialize CPU registers.
*/
static void
smp_cpu_init(int cpunum)
{
/* Set modes and Enable floating point coprocessor */
init_per_cpu(cpunum);
disable_sr_hashing();
mb();
/* Well, support 2.4 linux scheme as well. */
if (cpu_online(cpunum)) {
extern void machine_halt(void); /* arch/parisc.../process.c */
printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
machine_halt();
}
notify_cpu_starting(cpunum);
set_cpu_online(cpunum, true);
/* Initialise the idle task for this CPU */
mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQs are enabled or pending */
start_cpu_itimer();
}
/*
* Slaves start using C here. Indirectly called from smp_slave_stext.
* Do what start_kernel() and main() do for boot strap processor (aka monarch)
*/
void smp_callin(unsigned long pdce_proc)
{
int slave_id = cpu_now_booting;
#ifdef CONFIG_64BIT
WARN_ON(((unsigned long)(PAGE0->mem_pdc_hi) << 32
| PAGE0->mem_pdc) != pdce_proc);
#endif
smp_cpu_init(slave_id);
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local(NULL);
local_irq_enable(); /* Interrupts have been off until now */
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
/* NOTREACHED */
panic("smp_callin() AAAAaaaaahhhh....\n");
}
/*
* Bring one cpu online.
*/
static int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
long timeout;
#ifdef CONFIG_HOTPLUG_CPU
int i;
/* reset irq statistics for this CPU */
memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t));
for (i = 0; i < NR_IRQS; i++) {
struct irq_desc *desc = irq_to_desc(i);
if (desc && desc->kstat_irqs)
*per_cpu_ptr(desc->kstat_irqs, cpuid) = 0;
}
#endif
/* wait until last booting CPU has started. */
while (cpu_now_booting)
;
/* Let _start know what logical CPU we're booting
** (offset into init_tasks[],cpu_data[])
*/
cpu_now_booting = cpuid;
/*
** boot strap code needs to know the task address since
** it also contains the process stack.
*/
smp_init_current_idle_task = idle ;
mb();
printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
/*
** This gets PDC to release the CPU from a very tight loop.
**
** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
** is executed after receiving the rendezvous signal (an interrupt to
** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
** contents of memory are valid."
*/
gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
mb();
/*
* OK, wait a bit for that CPU to finish staggering about.
* Slave will set a bit when it reaches smp_cpu_init().
* Once the "monarch CPU" sees the bit change, it can move on.
*/
for (timeout = 0; timeout < 10000; timeout++) {
if(cpu_online(cpuid)) {
/* Which implies Slave has started up */
cpu_now_booting = 0;
goto alive ;
}
udelay(100);
barrier();
}
printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
return -1;
alive:
/* Remember the Slave data */
smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
cpuid, timeout * 100);
return 0;
}
void __init smp_prepare_boot_cpu(void)
{
int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
/* Setup BSP mappings */
printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
set_cpu_online(bootstrap_processor, true);
set_cpu_present(bootstrap_processor, true);
}
/*
** inventory.c:do_inventory() hasn't yet been run and thus we
** don't 'discover' the additional CPUs until later.
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int cpu;
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(ipi_lock, cpu));
init_cpu_present(cpumask_of(0));
}
void __init smp_cpus_done(unsigned int cpu_max)
{
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
if (cpu_online(cpu))
return 0;
if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle))
return -EIO;
return cpu_online(cpu) ? 0 : -EIO;
}
/*
* __cpu_disable runs on the processor to be shutdown.
*/
int __cpu_disable(void)
{
#ifdef CONFIG_HOTPLUG_CPU
unsigned int cpu = smp_processor_id();
remove_cpu_topology(cpu);
/*
* Take this CPU offline. Once we clear this, we can't return,
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
/* Find a new timesync master */
if (cpu == time_keeper_id) {
time_keeper_id = cpumask_first(cpu_online_mask);
pr_info("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
}
disable_percpu_irq(IPI_IRQ);
irq_migrate_all_off_this_cpu();
flush_cache_all_local();
flush_tlb_all_local(NULL);
/* disable all irqs, including timer irq */
local_irq_disable();
/* wait for next timer irq ... */
mdelay(1000/HZ+100);
/* ... and then clear all pending external irqs */
set_eiem(0);
mtctl(~0UL, CR_EIRR);
mfctl(CR_EIRR);
mtctl(0, CR_EIRR);
#endif
return 0;
}
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
void __cpu_die(unsigned int cpu)
{
pdc_cpu_rendezvous_lock();
}
void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
{
pr_info("CPU%u: is shutting down\n", cpu);
/* set task's state to interruptible sleep */
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout((IS_ENABLED(CONFIG_64BIT) ? 8:2) * HZ);
pdc_cpu_rendezvous_unlock();
}
| linux-master | arch/parisc/kernel/smp.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.