python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 #include <linux/termios_internal.h> int user_termio_to_kernel_termios(struct ktermios *termios, struct termio __user *termio) { struct termio v; bool canon; if (copy_from_user(&v, termio, sizeof(struct termio))) return -EFAULT; termios->c_iflag = (0xffff0000 & termios->c_iflag) | v.c_iflag; termios->c_oflag = (0xffff0000 & termios->c_oflag) | v.c_oflag; termios->c_cflag = (0xffff0000 & termios->c_cflag) | v.c_cflag; termios->c_lflag = (0xffff0000 & termios->c_lflag) | v.c_lflag; termios->c_line = (0xffff0000 & termios->c_lflag) | v.c_line; canon = v.c_lflag & ICANON; termios->c_cc[VINTR] = v.c_cc[_VINTR]; termios->c_cc[VQUIT] = v.c_cc[_VQUIT]; termios->c_cc[VERASE] = v.c_cc[_VERASE]; termios->c_cc[VKILL] = v.c_cc[_VKILL]; termios->c_cc[VEOL2] = v.c_cc[_VEOL2]; termios->c_cc[VSWTC] = v.c_cc[_VSWTC]; termios->c_cc[canon ? VEOF : VMIN] = v.c_cc[_VEOF]; termios->c_cc[canon ? VEOL : VTIME] = v.c_cc[_VEOL]; return 0; } int kernel_termios_to_user_termio(struct termio __user *termio, struct ktermios *termios) { struct termio v; bool canon; memset(&v, 0, sizeof(struct termio)); v.c_iflag = termios->c_iflag; v.c_oflag = termios->c_oflag; v.c_cflag = termios->c_cflag; v.c_lflag = termios->c_lflag; v.c_line = termios->c_line; canon = v.c_lflag & ICANON; v.c_cc[_VINTR] = termios->c_cc[VINTR]; v.c_cc[_VQUIT] = termios->c_cc[VQUIT]; v.c_cc[_VERASE] = termios->c_cc[VERASE]; v.c_cc[_VKILL] = termios->c_cc[VKILL]; v.c_cc[_VEOF] = termios->c_cc[canon ? VEOF : VMIN]; v.c_cc[_VEOL] = termios->c_cc[canon ? VEOL : VTIME]; v.c_cc[_VEOL2] = termios->c_cc[VEOL2]; v.c_cc[_VSWTC] = termios->c_cc[VSWTC]; return copy_to_user(termio, &v, sizeof(struct termio)); }
linux-master
arch/alpha/kernel/termios.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_miata.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999, 2000 Richard Henderson * * Code supporting the MIATA (EV56+PYXIS). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/reboot.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_cia.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" static void miata_srm_device_interrupt(unsigned long vector) { int irq; irq = (vector - 0x800) >> 4; /* * I really hate to do this, but the MIATA SRM console ignores the * low 8 bits in the interrupt summary register, and reports the * vector 0x80 *lower* than I expected from the bit numbering in * the documentation. * This was done because the low 8 summary bits really aren't used * for reporting any interrupts (the PCI-ISA bridge, bit 7, isn't * used for this purpose, as PIC interrupts are delivered as the * vectors 0x800-0x8f0). * But I really don't want to change the fixup code for allocation * of IRQs, nor the alpha_irq_mask maintenance stuff, both of which * look nice and clean now. * So, here's this grotty hack... :-( */ if (irq >= 16) irq = irq + 8; handle_irq(irq); } static void __init miata_init_irq(void) { if (alpha_using_srm) alpha_mv.device_interrupt = miata_srm_device_interrupt; #if 0 /* These break on MiataGL so we'll try not to do it at all. */ *(vulp)PYXIS_INT_HILO = 0x000000B2UL; mb(); /* ISA/NMI HI */ *(vulp)PYXIS_RT_COUNT = 0UL; mb(); /* clear count */ #endif init_i8259a_irqs(); /* Not interested in the bogus interrupts (3,10), Fan Fault (0), NMI (1), or EIDE (9). We also disable the risers (4,5), since we don't know how to route the interrupts behind the bridge. */ init_pyxis_irqs(0x63b0000); common_init_isa_dma(); if (request_irq(16 + 2, no_action, 0, "halt-switch", NULL)) pr_err("Failed to register halt-switch interrupt\n"); if (request_irq(16 + 6, no_action, 0, "timer-cascade", NULL)) pr_err("Failed to register timer-cascade interrupt\n"); } /* * PCI Fixup configuration. * * Summary @ PYXIS_INT_REQ: * Bit Meaning * 0 Fan Fault * 1 NMI * 2 Halt/Reset switch * 3 none * 4 CID0 (Riser ID) * 5 CID1 (Riser ID) * 6 Interval timer * 7 PCI-ISA Bridge * 8 Ethernet * 9 EIDE (deprecated, ISA 14/15 used) *10 none *11 USB *12 Interrupt Line A from slot 4 *13 Interrupt Line B from slot 4 *14 Interrupt Line C from slot 4 *15 Interrupt Line D from slot 4 *16 Interrupt Line A from slot 5 *17 Interrupt line B from slot 5 *18 Interrupt Line C from slot 5 *19 Interrupt Line D from slot 5 *20 Interrupt Line A from slot 1 *21 Interrupt Line B from slot 1 *22 Interrupt Line C from slot 1 *23 Interrupt Line D from slot 1 *24 Interrupt Line A from slot 2 *25 Interrupt Line B from slot 2 *26 Interrupt Line C from slot 2 *27 Interrupt Line D from slot 2 *27 Interrupt Line A from slot 3 *29 Interrupt Line B from slot 3 *30 Interrupt Line C from slot 3 *31 Interrupt Line D from slot 3 * * The device to slot mapping looks like: * * Slot Device * 3 DC21142 Ethernet * 4 EIDE CMD646 * 5 none * 6 USB * 7 PCI-ISA bridge * 8 PCI-PCI Bridge (SBU Riser) * 9 none * 10 none * 11 PCI on board slot 4 (SBU Riser) * 12 PCI on board slot 5 (SBU Riser) * * These are behind the bridge, so I'm not sure what to do... * * 13 PCI on board slot 1 (SBU Riser) * 14 PCI on board slot 2 (SBU Riser) * 15 PCI on board slot 3 (SBU Riser) * * * This two layered interrupt approach means that we allocate IRQ 16 and * above for PCI interrupts. The IRQ relates to which bit the interrupt * comes in on. This makes interrupt processing much easier. */ static int miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[18][5] = { /*INT INTA INTB INTC INTD */ {16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */ { -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */ { -1, -1, -1, -1, -1}, /* IdSel 16, none */ { -1, -1, -1, -1, -1}, /* IdSel 17, none */ { -1, -1, -1, -1, -1}, /* IdSel 18, PCI-ISA */ { -1, -1, -1, -1, -1}, /* IdSel 19, PCI-PCI */ { -1, -1, -1, -1, -1}, /* IdSel 20, none */ { -1, -1, -1, -1, -1}, /* IdSel 21, none */ {16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 22, slot 4 */ {16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 23, slot 5 */ /* the next 7 are actually on PCI bus 1, across the bridge */ {16+11, 16+11, 16+11, 16+11, 16+11}, /* IdSel 24, QLISP/GL*/ { -1, -1, -1, -1, -1}, /* IdSel 25, none */ { -1, -1, -1, -1, -1}, /* IdSel 26, none */ { -1, -1, -1, -1, -1}, /* IdSel 27, none */ {16+20, 16+20, 16+21, 16+22, 16+23}, /* IdSel 28, slot 1 */ {16+24, 16+24, 16+25, 16+26, 16+27}, /* IdSel 29, slot 2 */ {16+28, 16+28, 16+29, 16+30, 16+31}, /* IdSel 30, slot 3 */ /* This bridge is on the main bus of the later orig MIATA */ { -1, -1, -1, -1, -1}, /* IdSel 31, PCI-PCI */ }; const long min_idsel = 3, max_idsel = 20, irqs_per_slot = 5; /* the USB function of the 82c693 has it's interrupt connected to the 2nd 8259 controller. So we have to check for it first. */ if((slot == 7) && (PCI_FUNC(dev->devfn) == 3)) { u8 irq=0; struct pci_dev *pdev = pci_get_slot(dev->bus, dev->devfn & ~7); if(pdev == NULL || pci_read_config_byte(pdev, 0x40,&irq) != PCIBIOS_SUCCESSFUL) { pci_dev_put(pdev); return -1; } else { pci_dev_put(pdev); return irq; } } return COMMON_TABLE_LOOKUP; } static u8 miata_swizzle(struct pci_dev *dev, u8 *pinp) { int slot, pin = *pinp; if (dev->bus->number == 0) { slot = PCI_SLOT(dev->devfn); } /* Check for the built-in bridge. */ else if ((PCI_SLOT(dev->bus->self->devfn) == 8) || (PCI_SLOT(dev->bus->self->devfn) == 20)) { slot = PCI_SLOT(dev->devfn) + 9; } else { /* Must be a card-based bridge. */ do { if ((PCI_SLOT(dev->bus->self->devfn) == 8) || (PCI_SLOT(dev->bus->self->devfn) == 20)) { slot = PCI_SLOT(dev->devfn) + 9; break; } pin = pci_swizzle_interrupt_pin(dev, pin); /* Move up the chain of bridges. */ dev = dev->bus->self; /* Slot of the next bridge. */ slot = PCI_SLOT(dev->devfn); } while (dev->bus->self); } *pinp = pin; return slot; } static void __init miata_init_pci(void) { cia_init_pci(); SMC669_Init(0); /* it might be a GL (fails harmlessly if not) */ es1888_init(); } static void miata_kill_arch(int mode) { cia_kill_arch(mode); #ifndef ALPHA_RESTORE_SRM_SETUP switch(mode) { case LINUX_REBOOT_CMD_RESTART: /* Who said DEC engineers have no sense of humor? ;-) */ if (alpha_using_srm) { *(vuip) PYXIS_RESET = 0x0000dead; mb(); } break; case LINUX_REBOOT_CMD_HALT: break; case LINUX_REBOOT_CMD_POWER_OFF: break; } halt(); #endif } /* * The System Vector */ struct alpha_machine_vector miata_mv __initmv = { .vector_name = "Miata", DO_EV5_MMU, DO_DEFAULT_RTC, DO_PYXIS_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = PYXIS_DAC_OFFSET, .nr_irqs = 48, .device_interrupt = pyxis_device_interrupt, .init_arch = pyxis_init_arch, .init_irq = miata_init_irq, .init_rtc = common_init_rtc, .init_pci = miata_init_pci, .kill_arch = miata_kill_arch, .pci_map_irq = miata_map_irq, .pci_swizzle = miata_swizzle, }; ALIAS_MV(miata)
linux-master
arch/alpha/kernel/sys_miata.c
// SPDX-License-Identifier: GPL-2.0 /* * Handle interrupts from the SRM, assuming no additional weirdness. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/irq.h> #include "proto.h" #include "irq_impl.h" /* * Is the palcode SMP safe? In other words: can we call cserve_ena/dis * at the same time in multiple CPUs? To be safe I added a spinlock * but it can be removed trivially if the palcode is robust against smp. */ DEFINE_SPINLOCK(srm_irq_lock); static inline void srm_enable_irq(struct irq_data *d) { spin_lock(&srm_irq_lock); cserve_ena(d->irq - 16); spin_unlock(&srm_irq_lock); } static void srm_disable_irq(struct irq_data *d) { spin_lock(&srm_irq_lock); cserve_dis(d->irq - 16); spin_unlock(&srm_irq_lock); } /* Handle interrupts from the SRM, assuming no additional weirdness. */ static struct irq_chip srm_irq_type = { .name = "SRM", .irq_unmask = srm_enable_irq, .irq_mask = srm_disable_irq, .irq_mask_ack = srm_disable_irq, }; void __init init_srm_irqs(long max, unsigned long ignore_mask) { long i; if (NR_IRQS <= 16) return; for (i = 16; i < max; ++i) { if (i < 64 && ((ignore_mask >> i) & 1)) continue; irq_set_chip_and_handler(i, &srm_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } } void srm_device_interrupt(unsigned long vector) { int irq = (vector - 0x800) >> 4; handle_irq(irq); }
linux-master
arch/alpha/kernel/irq_srm.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/osf_sys.c * * Copyright (C) 1995 Linus Torvalds */ /* * This file handles some of the stranger OSF/1 system call interfaces. * Some of the system calls expect a non-C calling standard, others have * special parameter blocks.. */ #include <linux/errno.h> #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/sched/task_stack.h> #include <linux/sched/cputime.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/utsname.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/major.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/shm.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/types.h> #include <linux/ipc.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/uio.h> #include <linux/vfs.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <asm/fpu.h> #include <asm/io.h> #include <linux/uaccess.h> #include <asm/sysinfo.h> #include <asm/thread_info.h> #include <asm/hwrpb.h> #include <asm/processor.h> /* * Brk needs to return an error. Still support Linux's brk(0) query idiom, * which OSF programs just shouldn't be doing. We're still not quite * identical to OSF as we don't return 0 on success, but doing otherwise * would require changes to libc. Hopefully this is good enough. */ SYSCALL_DEFINE1(osf_brk, unsigned long, brk) { unsigned long retval = sys_brk(brk); if (brk && brk != retval) retval = -ENOMEM; return retval; } /* * This is pure guess-work.. */ SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start, unsigned long, text_len, unsigned long, bss_start, unsigned long, bss_len) { struct mm_struct *mm; mm = current->mm; mm->end_code = bss_start + bss_len; mm->start_brk = bss_start + bss_len; mm->brk = bss_start + bss_len; #if 0 printk("set_program_attributes(%lx %lx %lx %lx)\n", text_start, text_len, bss_start, bss_len); #endif return 0; } /* * OSF/1 directory handling functions... * * The "getdents()" interface is much more sane: the "basep" stuff is * braindamage (it can't really handle filesystems where the directory * offset differences aren't the same as "d_reclen"). */ #define NAME_OFFSET offsetof (struct osf_dirent, d_name) struct osf_dirent { unsigned int d_ino; unsigned short d_reclen; unsigned short d_namlen; char d_name[]; }; struct osf_dirent_callback { struct dir_context ctx; struct osf_dirent __user *dirent; long __user *basep; unsigned int count; int error; }; static bool osf_filldir(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct osf_dirent __user *dirent; struct osf_dirent_callback *buf = container_of(ctx, struct osf_dirent_callback, ctx); unsigned int reclen = ALIGN(NAME_OFFSET + namlen + 1, sizeof(u32)); unsigned int d_ino; buf->error = -EINVAL; /* only used if we fail */ if (reclen > buf->count) return false; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->error = -EOVERFLOW; return false; } if (buf->basep) { if (put_user(offset, buf->basep)) goto Efault; buf->basep = NULL; } dirent = buf->dirent; if (put_user(d_ino, &dirent->d_ino) || put_user(namlen, &dirent->d_namlen) || put_user(reclen, &dirent->d_reclen) || copy_to_user(dirent->d_name, name, namlen) || put_user(0, dirent->d_name + namlen)) goto Efault; dirent = (void __user *)dirent + reclen; buf->dirent = dirent; buf->count -= reclen; return true; Efault: buf->error = -EFAULT; return false; } SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd, struct osf_dirent __user *, dirent, unsigned int, count, long __user *, basep) { int error; struct fd arg = fdget_pos(fd); struct osf_dirent_callback buf = { .ctx.actor = osf_filldir, .dirent = dirent, .basep = basep, .count = count }; if (!arg.file) return -EBADF; error = iterate_dir(arg.file, &buf.ctx); if (error >= 0) error = buf.error; if (count != buf.count) error = count - buf.count; fdput_pos(arg); return error; } #undef NAME_OFFSET SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { unsigned long ret = -EINVAL; #if 0 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) printk("%s: unimplemented OSF mmap flags %04lx\n", current->comm, flags); #endif if ((off + PAGE_ALIGN(len)) < off) goto out; if (off & ~PAGE_MASK) goto out; ret = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return ret; } struct osf_stat { int st_dev; int st_pad1; unsigned st_mode; unsigned short st_nlink; short st_nlink_reserved; unsigned st_uid; unsigned st_gid; int st_rdev; int st_ldev; long st_size; int st_pad2; int st_uatime; int st_pad3; int st_umtime; int st_pad4; int st_uctime; int st_pad5; int st_pad6; unsigned st_flags; unsigned st_gen; long st_spare[4]; unsigned st_ino; int st_ino_reserved; int st_atime; int st_atime_reserved; int st_mtime; int st_mtime_reserved; int st_ctime; int st_ctime_reserved; long st_blksize; long st_blocks; }; /* * The OSF/1 statfs structure is much larger, but this should * match the beginning, at least. */ struct osf_statfs { short f_type; short f_flags; int f_fsize; int f_bsize; int f_blocks; int f_bfree; int f_bavail; int f_files; int f_ffree; __kernel_fsid_t f_fsid; }; struct osf_statfs64 { short f_type; short f_flags; int f_pad1; int f_pad2; int f_pad3; int f_pad4; int f_pad5; int f_pad6; int f_pad7; __kernel_fsid_t f_fsid; u_short f_namemax; short f_reserved1; int f_spare[8]; char f_pad8[90]; char f_pad9[90]; long mount_info[10]; u_long f_flags2; long f_spare2[14]; long f_fsize; long f_bsize; long f_blocks; long f_bfree; long f_bavail; long f_files; long f_ffree; }; static int linux_to_osf_stat(struct kstat *lstat, struct osf_stat __user *osf_stat) { struct osf_stat tmp = { 0 }; tmp.st_dev = lstat->dev; tmp.st_mode = lstat->mode; tmp.st_nlink = lstat->nlink; tmp.st_uid = from_kuid_munged(current_user_ns(), lstat->uid); tmp.st_gid = from_kgid_munged(current_user_ns(), lstat->gid); tmp.st_rdev = lstat->rdev; tmp.st_ldev = lstat->rdev; tmp.st_size = lstat->size; tmp.st_uatime = lstat->atime.tv_nsec / 1000; tmp.st_umtime = lstat->mtime.tv_nsec / 1000; tmp.st_uctime = lstat->ctime.tv_nsec / 1000; tmp.st_ino = lstat->ino; tmp.st_atime = lstat->atime.tv_sec; tmp.st_mtime = lstat->mtime.tv_sec; tmp.st_ctime = lstat->ctime.tv_sec; tmp.st_blksize = lstat->blksize; tmp.st_blocks = lstat->blocks; return copy_to_user(osf_stat, &tmp, sizeof(tmp)) ? -EFAULT : 0; } static int linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat, unsigned long bufsiz) { struct osf_statfs tmp_stat; tmp_stat.f_type = linux_stat->f_type; tmp_stat.f_flags = 0; /* mount flags */ tmp_stat.f_fsize = linux_stat->f_frsize; tmp_stat.f_bsize = linux_stat->f_bsize; tmp_stat.f_blocks = linux_stat->f_blocks; tmp_stat.f_bfree = linux_stat->f_bfree; tmp_stat.f_bavail = linux_stat->f_bavail; tmp_stat.f_files = linux_stat->f_files; tmp_stat.f_ffree = linux_stat->f_ffree; tmp_stat.f_fsid = linux_stat->f_fsid; if (bufsiz > sizeof(tmp_stat)) bufsiz = sizeof(tmp_stat); return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; } static int linux_to_osf_statfs64(struct kstatfs *linux_stat, struct osf_statfs64 __user *osf_stat, unsigned long bufsiz) { struct osf_statfs64 tmp_stat = { 0 }; tmp_stat.f_type = linux_stat->f_type; tmp_stat.f_fsize = linux_stat->f_frsize; tmp_stat.f_bsize = linux_stat->f_bsize; tmp_stat.f_blocks = linux_stat->f_blocks; tmp_stat.f_bfree = linux_stat->f_bfree; tmp_stat.f_bavail = linux_stat->f_bavail; tmp_stat.f_files = linux_stat->f_files; tmp_stat.f_ffree = linux_stat->f_ffree; tmp_stat.f_fsid = linux_stat->f_fsid; if (bufsiz > sizeof(tmp_stat)) bufsiz = sizeof(tmp_stat); return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0; } SYSCALL_DEFINE3(osf_statfs, const char __user *, pathname, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = user_statfs(pathname, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE2(osf_stat, char __user *, name, struct osf_stat __user *, buf) { struct kstat stat; int error; error = vfs_stat(name, &stat); if (error) return error; return linux_to_osf_stat(&stat, buf); } SYSCALL_DEFINE2(osf_lstat, char __user *, name, struct osf_stat __user *, buf) { struct kstat stat; int error; error = vfs_lstat(name, &stat); if (error) return error; return linux_to_osf_stat(&stat, buf); } SYSCALL_DEFINE2(osf_fstat, int, fd, struct osf_stat __user *, buf) { struct kstat stat; int error; error = vfs_fstat(fd, &stat); if (error) return error; return linux_to_osf_stat(&stat, buf); } SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd, struct osf_statfs __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = fd_statfs(fd, &linux_stat); if (!error) error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE3(osf_statfs64, char __user *, pathname, struct osf_statfs64 __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = user_statfs(pathname, &linux_stat); if (!error) error = linux_to_osf_statfs64(&linux_stat, buffer, bufsiz); return error; } SYSCALL_DEFINE3(osf_fstatfs64, unsigned long, fd, struct osf_statfs64 __user *, buffer, unsigned long, bufsiz) { struct kstatfs linux_stat; int error = fd_statfs(fd, &linux_stat); if (!error) error = linux_to_osf_statfs64(&linux_stat, buffer, bufsiz); return error; } /* * Uhh.. OSF/1 mount parameters aren't exactly obvious.. * * Although to be frank, neither are the native Linux/i386 ones.. */ struct ufs_args { char __user *devname; int flags; uid_t exroot; }; struct cdfs_args { char __user *devname; int flags; uid_t exroot; /* This has lots more here, which Linux handles with the option block but I'm too lazy to do the translation into ASCII. */ }; struct procfs_args { char __user *devname; int flags; uid_t exroot; }; /* * We can't actually handle ufs yet, so we translate UFS mounts to * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS * layout is so braindead it's a major headache doing it. * * Just how long ago was it written? OTOH our UFS driver may be still * unhappy with OSF UFS. [CHECKME] */ static int osf_ufs_mount(const char __user *dirname, struct ufs_args __user *args, int flags) { int retval; struct cdfs_args tmp; struct filename *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname->name, dirname, "ext2", flags, NULL); putname(devname); out: return retval; } static int osf_cdfs_mount(const char __user *dirname, struct cdfs_args __user *args, int flags) { int retval; struct cdfs_args tmp; struct filename *devname; retval = -EFAULT; if (copy_from_user(&tmp, args, sizeof(tmp))) goto out; devname = getname(tmp.devname); retval = PTR_ERR(devname); if (IS_ERR(devname)) goto out; retval = do_mount(devname->name, dirname, "iso9660", flags, NULL); putname(devname); out: return retval; } static int osf_procfs_mount(const char __user *dirname, struct procfs_args __user *args, int flags) { struct procfs_args tmp; if (copy_from_user(&tmp, args, sizeof(tmp))) return -EFAULT; return do_mount("", dirname, "proc", flags, NULL); } SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path, int, flag, void __user *, data) { int retval; switch (typenr) { case 1: retval = osf_ufs_mount(path, data, flag); break; case 6: retval = osf_cdfs_mount(path, data, flag); break; case 9: retval = osf_procfs_mount(path, data, flag); break; default: retval = -EINVAL; printk_ratelimited("osf_mount(%ld, %x)\n", typenr, flag); } return retval; } SYSCALL_DEFINE1(osf_utsname, char __user *, name) { char tmp[5 * 32]; down_read(&uts_sem); memcpy(tmp + 0 * 32, utsname()->sysname, 32); memcpy(tmp + 1 * 32, utsname()->nodename, 32); memcpy(tmp + 2 * 32, utsname()->release, 32); memcpy(tmp + 3 * 32, utsname()->version, 32); memcpy(tmp + 4 * 32, utsname()->machine, 32); up_read(&uts_sem); if (copy_to_user(name, tmp, sizeof(tmp))) return -EFAULT; return 0; } SYSCALL_DEFINE0(getpagesize) { return PAGE_SIZE; } SYSCALL_DEFINE0(getdtablesize) { return sysctl_nr_open; } /* * For compatibility with OSF/1 only. Use utsname(2) instead. */ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) { int len; char *kname; char tmp[32]; if (namelen < 0 || namelen > 32) namelen = 32; down_read(&uts_sem); kname = utsname()->domainname; len = strnlen(kname, namelen); len = min(len + 1, namelen); memcpy(tmp, kname, len); up_read(&uts_sem); if (copy_to_user(name, tmp, len)) return -EFAULT; return 0; } /* * The following stuff should move into a header file should it ever * be labeled "officially supported." Right now, there is just enough * support to avoid applications (such as tar) printing error * messages. The attributes are not really implemented. */ /* * Values for Property list entry flag */ #define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry by default */ #define PLE_FLAG_MASK 0x1 /* Valid flag values */ #define PLE_FLAG_ALL -1 /* All flag value */ struct proplistname_args { unsigned int pl_mask; unsigned int pl_numnames; char **pl_names; }; union pl_args { struct setargs { char __user *path; long follow; long nbytes; char __user *buf; } set; struct fsetargs { long fd; long nbytes; char __user *buf; } fset; struct getargs { char __user *path; long follow; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } get; struct fgetargs { long fd; struct proplistname_args __user *name_args; long nbytes; char __user *buf; int __user *min_buf_size; } fget; struct delargs { char __user *path; long follow; struct proplistname_args __user *name_args; } del; struct fdelargs { long fd; struct proplistname_args __user *name_args; } fdel; }; enum pl_code { PL_SET = 1, PL_FSET = 2, PL_GET = 3, PL_FGET = 4, PL_DEL = 5, PL_FDEL = 6 }; SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code, union pl_args __user *, args) { long error; int __user *min_buf_size_ptr; switch (code) { case PL_SET: if (get_user(error, &args->set.nbytes)) error = -EFAULT; break; case PL_FSET: if (get_user(error, &args->fset.nbytes)) error = -EFAULT; break; case PL_GET: error = get_user(min_buf_size_ptr, &args->get.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_FGET: error = get_user(min_buf_size_ptr, &args->fget.min_buf_size); if (error) break; error = put_user(0, min_buf_size_ptr); break; case PL_DEL: case PL_FDEL: error = 0; break; default: error = -EOPNOTSUPP; break; } return error; } SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss, struct sigstack __user *, uoss) { unsigned long usp = rdusp(); unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size; unsigned long oss_os = on_sig_stack(usp); int error; if (uss) { void __user *ss_sp; error = -EFAULT; if (get_user(ss_sp, &uss->ss_sp)) goto out; /* If the current stack was set with sigaltstack, don't swap stacks while we are on it. */ error = -EPERM; if (current->sas_ss_sp && on_sig_stack(usp)) goto out; /* Since we don't know the extent of the stack, and we don't track onstack-ness, but rather calculate it, we must presume a size. Ho hum this interface is lossy. */ current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; current->sas_ss_size = SIGSTKSZ; } if (uoss) { error = -EFAULT; if (put_user(oss_sp, &uoss->ss_sp) || put_user(oss_os, &uoss->ss_onstack)) goto out; } error = 0; out: return error; } SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) { const char *sysinfo_table[] = { utsname()->sysname, utsname()->nodename, utsname()->release, utsname()->version, utsname()->machine, "alpha", /* instruction set architecture */ "dummy", /* hardware serial number */ "dummy", /* hardware manufacturer */ "dummy", /* secure RPC domain */ }; unsigned long offset; const char *res; long len; char tmp[__NEW_UTS_LEN + 1]; offset = command-1; if (offset >= ARRAY_SIZE(sysinfo_table)) { /* Digital UNIX has a few unpublished interfaces here */ printk("sysinfo(%d)", command); return -EINVAL; } down_read(&uts_sem); res = sysinfo_table[offset]; len = strlen(res)+1; if ((unsigned long)len > (unsigned long)count) len = count; memcpy(tmp, res, len); up_read(&uts_sem); if (copy_to_user(buf, tmp, len)) return -EFAULT; return 0; } SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { unsigned long w; struct percpu_struct *cpu; switch (op) { case GSI_IEEE_FP_CONTROL: /* Return current software fp control & status bits. */ /* Note that DU doesn't verify available space here. */ w = current_thread_info()->ieee_state & IEEE_SW_MASK; w = swcr_update_status(w, rdfpcr()); if (put_user(w, (unsigned long __user *) buffer)) return -EFAULT; return 0; case GSI_IEEE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case GSI_UACPROC: if (nbytes < sizeof(unsigned int)) return -EINVAL; w = current_thread_info()->status & UAC_BITMASK; if (put_user(w, (unsigned int __user *)buffer)) return -EFAULT; return 1; case GSI_PROC_TYPE: if (nbytes < sizeof(unsigned long)) return -EINVAL; cpu = (struct percpu_struct*) ((char*)hwrpb + hwrpb->processor_offset); w = cpu->type; if (put_user(w, (unsigned long __user*)buffer)) return -EFAULT; return 1; case GSI_GET_HWRPB: if (nbytes > sizeof(*hwrpb)) return -EINVAL; if (copy_to_user(buffer, hwrpb, nbytes) != 0) return -EFAULT; return 1; default: break; } return -EOPNOTSUPP; } SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, unsigned long, nbytes, int __user *, start, void __user *, arg) { switch (op) { case SSI_IEEE_FP_CONTROL: { unsigned long swcr, fpcr; unsigned int *state; /* * Alpha Architecture Handbook 4.7.7.3: * To be fully IEEE compiant, we must track the current IEEE * exception state in software, because spurious bits can be * set in the trap shadow of a software-complete insn. */ if (get_user(swcr, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; /* Update software trap enable bits. */ *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); /* Update the real fpcr. */ fpcr = rdfpcr() & FPCR_DYN_MASK; fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); return 0; } case SSI_IEEE_RAISE_EXCEPTION: { unsigned long exc, swcr, fpcr, fex; unsigned int *state; if (get_user(exc, (unsigned long __user *)buffer)) return -EFAULT; state = &current_thread_info()->ieee_state; exc &= IEEE_STATUS_MASK; /* Update software trap enable bits. */ swcr = (*state & IEEE_SW_MASK) | exc; *state |= exc; /* Update the real fpcr. */ fpcr = rdfpcr(); fpcr |= ieee_swcr_to_fpcr(swcr); wrfpcr(fpcr); /* If any exceptions set by this call, and are unmasked, send a signal. Old exceptions are not signaled. */ fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; if (fex) { int si_code = FPE_FLTUNK; if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES; if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND; if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF; if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV; if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV; send_sig_fault_trapno(SIGFPE, si_code, (void __user *)NULL, /* FIXME */ 0, current); } return 0; } case SSI_IEEE_STATE_AT_SIGNAL: case SSI_IEEE_IGNORE_STATE_AT_SIGNAL: /* * Not sure anybody will ever use this weird stuff. These * ops can be used (under OSF/1) to set the fpcr that should * be used when a signal handler starts executing. */ break; case SSI_NVPAIRS: { unsigned __user *p = buffer; unsigned i; for (i = 0, p = buffer; i < nbytes; ++i, p += 2) { unsigned v, w, status; if (get_user(v, p) || get_user(w, p + 1)) return -EFAULT; switch (v) { case SSIN_UACPROC: w &= UAC_BITMASK; status = current_thread_info()->status; status = (status & ~UAC_BITMASK) | w; current_thread_info()->status = status; break; default: return -EOPNOTSUPP; } } return 0; } case SSI_LMF: return 0; default: break; } return -EOPNOTSUPP; } /* Translations due to the fact that OSF's time_t is an int. Which affects all sorts of things, like timeval and itimerval. */ extern struct timezone sys_tz; struct timeval32 { int tv_sec, tv_usec; }; struct itimerval32 { struct timeval32 it_interval; struct timeval32 it_value; }; static inline long get_tv32(struct timespec64 *o, struct timeval32 __user *i) { struct timeval32 tv; if (copy_from_user(&tv, i, sizeof(struct timeval32))) return -EFAULT; o->tv_sec = tv.tv_sec; o->tv_nsec = tv.tv_usec * NSEC_PER_USEC; return 0; } static inline long put_tv32(struct timeval32 __user *o, struct timespec64 *i) { return copy_to_user(o, &(struct timeval32){ .tv_sec = i->tv_sec, .tv_usec = i->tv_nsec / NSEC_PER_USEC}, sizeof(struct timeval32)); } static inline long put_tv_to_tv32(struct timeval32 __user *o, struct __kernel_old_timeval *i) { return copy_to_user(o, &(struct timeval32){ .tv_sec = i->tv_sec, .tv_usec = i->tv_usec}, sizeof(struct timeval32)); } static inline void jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value) { value->tv_usec = (jiffies % HZ) * (1000000L / HZ); value->tv_sec = jiffies / HZ; } SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { if (tv) { struct timespec64 kts; ktime_get_real_ts64(&kts); if (put_tv32(tv, &kts)) return -EFAULT; } if (tz) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv, struct timezone __user *, tz) { struct timespec64 kts; struct timezone ktz; if (tv) { if (get_tv32(&kts, tv)) return -EFAULT; } if (tz) { if (copy_from_user(&ktz, tz, sizeof(*tz))) return -EFAULT; } return do_sys_settimeofday64(tv ? &kts : NULL, tz ? &ktz : NULL); } SYSCALL_DEFINE2(osf_utimes, const char __user *, filename, struct timeval32 __user *, tvs) { struct timespec64 tv[2]; if (tvs) { if (get_tv32(&tv[0], &tvs[0]) || get_tv32(&tv[1], &tvs[1])) return -EFAULT; if (tv[0].tv_nsec < 0 || tv[0].tv_nsec >= 1000000000 || tv[1].tv_nsec < 0 || tv[1].tv_nsec >= 1000000000) return -EINVAL; } return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0); } SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct timeval32 __user *, tvp) { struct timespec64 end_time, *to = NULL; if (tvp) { struct timespec64 tv; to = &end_time; if (get_tv32(&tv, tvp)) return -EFAULT; if (tv.tv_sec < 0 || tv.tv_nsec < 0) return -EINVAL; if (poll_select_set_timeout(to, tv.tv_sec, tv.tv_nsec)) return -EINVAL; } /* OSF does not copy back the remaining time. */ return core_sys_select(n, inp, outp, exp, to); } struct rusage32 { struct timeval32 ru_utime; /* user time used */ struct timeval32 ru_stime; /* system time used */ long ru_maxrss; /* maximum resident set size */ long ru_ixrss; /* integral shared memory size */ long ru_idrss; /* integral unshared data size */ long ru_isrss; /* integral unshared stack size */ long ru_minflt; /* page reclaims */ long ru_majflt; /* page faults */ long ru_nswap; /* swaps */ long ru_inblock; /* block input operations */ long ru_oublock; /* block output operations */ long ru_msgsnd; /* messages sent */ long ru_msgrcv; /* messages received */ long ru_nsignals; /* signals received */ long ru_nvcsw; /* voluntary context switches */ long ru_nivcsw; /* involuntary " */ }; SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) { struct rusage32 r; u64 utime, stime; unsigned long utime_jiffies, stime_jiffies; if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) return -EINVAL; memset(&r, 0, sizeof(r)); switch (who) { case RUSAGE_SELF: task_cputime(current, &utime, &stime); utime_jiffies = nsecs_to_jiffies(utime); stime_jiffies = nsecs_to_jiffies(stime); jiffies_to_timeval32(utime_jiffies, &r.ru_utime); jiffies_to_timeval32(stime_jiffies, &r.ru_stime); r.ru_minflt = current->min_flt; r.ru_majflt = current->maj_flt; break; case RUSAGE_CHILDREN: utime_jiffies = nsecs_to_jiffies(current->signal->cutime); stime_jiffies = nsecs_to_jiffies(current->signal->cstime); jiffies_to_timeval32(utime_jiffies, &r.ru_utime); jiffies_to_timeval32(stime_jiffies, &r.ru_stime); r.ru_minflt = current->signal->cmin_flt; r.ru_majflt = current->signal->cmaj_flt; break; } return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; } SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, struct rusage32 __user *, ur) { struct rusage r; long err = kernel_wait4(pid, ustatus, options, &r); if (err <= 0) return err; if (!ur) return err; if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime)) return -EFAULT; if (put_tv_to_tv32(&ur->ru_stime, &r.ru_stime)) return -EFAULT; if (copy_to_user(&ur->ru_maxrss, &r.ru_maxrss, sizeof(struct rusage32) - offsetof(struct rusage32, ru_maxrss))) return -EFAULT; return err; } /* * I don't know what the parameters are: the first one * seems to be a timeval pointer, and I suspect the second * one is the time remaining.. Ho humm.. No documentation. */ SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep, struct timeval32 __user *, remain) { struct timespec64 tmp; unsigned long ticks; if (get_tv32(&tmp, sleep)) goto fault; ticks = timespec64_to_jiffies(&tmp); ticks = schedule_timeout_interruptible(ticks); if (remain) { jiffies_to_timespec64(ticks, &tmp); if (put_tv32(remain, &tmp)) goto fault; } return 0; fault: return -EFAULT; } struct timex32 { unsigned int modes; /* mode selector */ long offset; /* time offset (usec) */ long freq; /* frequency offset (scaled ppm) */ long maxerror; /* maximum error (usec) */ long esterror; /* estimated error (usec) */ int status; /* clock command/status */ long constant; /* pll time constant */ long precision; /* clock precision (usec) (read only) */ long tolerance; /* clock frequency tolerance (ppm) * (read only) */ struct timeval32 time; /* (read only) */ long tick; /* (modified) usecs between clock ticks */ long ppsfreq; /* pps frequency (scaled ppm) (ro) */ long jitter; /* pps jitter (us) (ro) */ int shift; /* interval duration (s) (shift) (ro) */ long stabil; /* pps stability (scaled ppm) (ro) */ long jitcnt; /* jitter limit exceeded (ro) */ long calcnt; /* calibration intervals (ro) */ long errcnt; /* calibration errors (ro) */ long stbcnt; /* stability limit exceeded (ro) */ int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; int :32; }; SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) { struct __kernel_timex txc; int ret; /* copy relevant bits of struct timex. */ if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - offsetof(struct timex32, tick))) return -EFAULT; ret = do_adjtimex(&txc); if (ret < 0) return ret; /* copy back to timex32 */ if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) || (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) - offsetof(struct timex32, tick))) || (put_user(txc.time.tv_sec, &txc_p->time.tv_sec)) || (put_user(txc.time.tv_usec, &txc_p->time.tv_usec))) return -EFAULT; return ret; } /* Get an address range which is currently unmapped. Similar to the generic version except that we know how to honor ADDR_LIMIT_32BIT. */ static unsigned long arch_get_unmapped_area_1(unsigned long addr, unsigned long len, unsigned long limit) { struct vm_unmapped_area_info info; info.flags = 0; info.length = len; info.low_limit = addr; info.high_limit = limit; info.align_mask = 0; info.align_offset = 0; return vm_unmapped_area(&info); } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long limit; /* "32 bit" actually means 31 bit, since pointers sign extend. */ if (current->personality & ADDR_LIMIT_32BIT) limit = 0x80000000; else limit = TASK_SIZE; if (len > limit) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* First, see if the given suggestion fits. The OSF/1 loader (/sbin/loader) relies on us returning an address larger than the requested if one exists, which is a terribly broken way to program. That said, I can see the use in being able to suggest not merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ if (addr) { addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; } /* Next, try allocating at TASK_UNMAPPED_BASE. */ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), len, limit); if (addr != (unsigned long) -ENOMEM) return addr; /* Finally, try allocating in low memory. */ addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); return addr; } SYSCALL_DEFINE2(osf_getpriority, int, which, int, who) { int prio = sys_getpriority(which, who); if (prio >= 0) { /* Return value is the unbiased priority, i.e. 20 - prio. This does result in negative return values, so signal no error */ force_successful_syscall_return(); prio = 20 - prio; } return prio; } SYSCALL_DEFINE0(getxuid) { current_pt_regs()->r20 = sys_geteuid(); return sys_getuid(); } SYSCALL_DEFINE0(getxgid) { current_pt_regs()->r20 = sys_getegid(); return sys_getgid(); } SYSCALL_DEFINE0(getxpid) { current_pt_regs()->r20 = sys_getppid(); return sys_getpid(); } SYSCALL_DEFINE0(alpha_pipe) { int fd[2]; int res = do_pipe_flags(fd, 0); if (!res) { /* The return values are in $0 and $20. */ current_pt_regs()->r20 = fd[1]; res = fd[0]; } return res; } SYSCALL_DEFINE1(sethae, unsigned long, val) { current_pt_regs()->hae = val; return 0; }
linux-master
arch/alpha/kernel/osf_sys.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_sio.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code for all boards that route the PCI interrupts through the SIO * PCI/ISA bridge. This includes Noname (AXPpci33), Multia (UDB), * Kenetics's Platform 2000, Avanti (AlphaStation), XL, and AlphaBook1. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/screen_info.h> #include <asm/compiler.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_apecs.h> #include <asm/core_lca.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" #include "pc873xx.h" #if defined(ALPHA_RESTORE_SRM_SETUP) /* Save LCA configuration data as the console had it set up. */ struct { unsigned int orig_route_tab; /* for SAVE/RESTORE */ } saved_config __attribute((common)); #endif static void __init sio_init_irq(void) { if (alpha_using_srm) alpha_mv.device_interrupt = srm_device_interrupt; init_i8259a_irqs(); common_init_isa_dma(); } static inline void __init alphabook1_init_arch(void) { /* The AlphaBook1 has LCD video fixed at 800x600, 37 rows and 100 cols. */ screen_info.orig_y = 37; screen_info.orig_video_cols = 100; screen_info.orig_video_lines = 37; lca_init_arch(); } /* * sio_route_tab selects irq routing in PCI/ISA bridge so that: * PIRQ0 -> irq 15 * PIRQ1 -> irq 9 * PIRQ2 -> irq 10 * PIRQ3 -> irq 11 * * This probably ought to be configurable via MILO. For * example, sound boards seem to like using IRQ 9. * * This is NOT how we should do it. PIRQ0-X should have * their own IRQs, the way intel uses the IO-APIC IRQs. */ static void __init sio_pci_route(void) { unsigned int orig_route_tab; /* First, ALWAYS read and print the original setting. */ pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60, &orig_route_tab); printk("%s: PIRQ original 0x%x new 0x%x\n", __func__, orig_route_tab, alpha_mv.sys.sio.route_tab); #if defined(ALPHA_RESTORE_SRM_SETUP) saved_config.orig_route_tab = orig_route_tab; #endif /* Now override with desired setting. */ pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60, alpha_mv.sys.sio.route_tab); } static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev) { if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) && (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA)) return false; return true; } static unsigned int __init sio_collect_irq_levels(void) { unsigned int level_bits = 0; struct pci_dev *dev = NULL; /* Iterate through the devices, collecting IRQ levels. */ for_each_pci_dev(dev) { if (!sio_pci_dev_irq_needs_level(dev)) continue; if (dev->irq) level_bits |= (1 << dev->irq); } return level_bits; } static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset) { unsigned int old_level_bits; /* * Now, make all PCI interrupts level sensitive. Notice: * these registers must be accessed byte-wise. inw()/outw() * don't work. * * Make sure to turn off any level bits set for IRQs 9,10,11,15, * so that the only bits getting set are for devices actually found. * Note that we do preserve the remainder of the bits, which we hope * will be set correctly by ARC/SRM. * * Note: we at least preserve any level-set bits on AlphaBook1 */ old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8); if (reset) old_level_bits &= 0x71ff; level_bits |= old_level_bits; outb((level_bits >> 0) & 0xff, 0x4d0); outb((level_bits >> 8) & 0xff, 0x4d1); } static inline void sio_fixup_irq_levels(unsigned int level_bits) { __sio_fixup_irq_levels(level_bits, true); } static inline int noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { /* * The Noname board has 5 PCI slots with each of the 4 * interrupt pins routed to different pins on the PCI/ISA * bridge (PIRQ0-PIRQ3). The table below is based on * information available at: * * http://ftp.digital.com/pub/DEC/axppci/ref_interrupts.txt * * I have no information on the Avanti interrupt routing, but * the routing seems to be identical to the Noname except * that the Avanti has an additional slot whose routing I'm * unsure of. * * pirq_tab[0] is a fake entry to deal with old PCI boards * that have the interrupt pin number hardwired to 0 (meaning * that they use the default INTA line, if they are interrupt * driven at all). */ static char irq_tab[][5] = { /*INT A B C D */ { 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */ {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ { 2, 2, -1, -1, -1}, /* idsel 8 (Hack: slot closest ISA) */ {-1, -1, -1, -1, -1}, /* idsel 9 (unused) */ {-1, -1, -1, -1, -1}, /* idsel 10 (unused) */ { 0, 0, 2, 1, 0}, /* idsel 11 KN25_PCI_SLOT0 */ { 1, 1, 0, 2, 1}, /* idsel 12 KN25_PCI_SLOT1 */ { 2, 2, 1, 0, 2}, /* idsel 13 KN25_PCI_SLOT2 */ { 0, 0, 0, 0, 0}, /* idsel 14 AS255 TULIP */ }; const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5; int irq = COMMON_TABLE_LOOKUP, tmp; tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq); irq = irq >= 0 ? tmp : -1; /* Fixup IRQ level if an actual IRQ mapping is detected */ if (sio_pci_dev_irq_needs_level(dev) && irq >= 0) __sio_fixup_irq_levels(1 << irq, false); return irq; } static inline int p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[][5] = { /*INT A B C D */ { 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */ {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ { 1, 1, 2, 3, 0}, /* idsel 8 (slot A) */ { 2, 2, 3, 0, 1}, /* idsel 9 (slot B) */ {-1, -1, -1, -1, -1}, /* idsel 10 (unused) */ {-1, -1, -1, -1, -1}, /* idsel 11 (unused) */ { 3, 3, -1, -1, -1}, /* idsel 12 (CMD0646) */ }; const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5; int irq = COMMON_TABLE_LOOKUP, tmp; tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq); return irq >= 0 ? tmp : -1; } static inline void __init noname_init_pci(void) { common_init_pci(); sio_pci_route(); sio_fixup_irq_levels(sio_collect_irq_levels()); if (pc873xx_probe() == -1) { printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n"); } else { printk(KERN_INFO "Found %s Super IO chip at 0x%x\n", pc873xx_get_model(), pc873xx_get_base()); /* Enabling things in the Super IO chip doesn't actually * configure and enable things, the legacy drivers still * need to do the actual configuration and enabling. * This only unblocks them. */ #if !defined(CONFIG_ALPHA_AVANTI) /* Don't bother on the Avanti family. * None of them had on-board IDE. */ pc873xx_enable_ide(); #endif pc873xx_enable_epp19(); } } static inline void __init alphabook1_init_pci(void) { struct pci_dev *dev; unsigned char orig, config; common_init_pci(); sio_pci_route(); /* * On the AlphaBook1, the PCMCIA chip (Cirrus 6729) * is sensitive to PCI bus bursts, so we must DISABLE * burst mode for the NCR 8xx SCSI... :-( * * Note that the NCR810 SCSI driver must preserve the * setting of the bit in order for this to work. At the * moment (2.0.29), ncr53c8xx.c does NOT do this, but * 53c7,8xx.c DOES. */ dev = NULL; while ((dev = pci_get_device(PCI_VENDOR_ID_NCR, PCI_ANY_ID, dev))) { if (dev->device == PCI_DEVICE_ID_NCR_53C810 || dev->device == PCI_DEVICE_ID_NCR_53C815 || dev->device == PCI_DEVICE_ID_NCR_53C820 || dev->device == PCI_DEVICE_ID_NCR_53C825) { unsigned long io_port; unsigned char ctest4; io_port = dev->resource[0].start; ctest4 = inb(io_port+0x21); if (!(ctest4 & 0x80)) { printk("AlphaBook1 NCR init: setting" " burst disable\n"); outb(ctest4 | 0x80, io_port+0x21); } } } /* Do not set *ANY* level triggers for AlphaBook1. */ sio_fixup_irq_levels(0); /* Make sure that register PR1 indicates 1Mb mem */ outb(0x0f, 0x3ce); orig = inb(0x3cf); /* read PR5 */ outb(0x0f, 0x3ce); outb(0x05, 0x3cf); /* unlock PR0-4 */ outb(0x0b, 0x3ce); config = inb(0x3cf); /* read PR1 */ if ((config & 0xc0) != 0xc0) { printk("AlphaBook1 VGA init: setting 1Mb memory\n"); config |= 0xc0; outb(0x0b, 0x3ce); outb(config, 0x3cf); /* write PR1 */ } outb(0x0f, 0x3ce); outb(orig, 0x3cf); /* (re)lock PR0-4 */ } void sio_kill_arch(int mode) { #if defined(ALPHA_RESTORE_SRM_SETUP) /* Since we cannot read the PCI DMA Window CSRs, we * cannot restore them here. * * However, we CAN read the PIRQ route register, so restore it * now... */ pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60, saved_config.orig_route_tab); #endif } /* * The System Vectors */ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_BOOK1) struct alpha_machine_vector alphabook1_mv __initmv = { .vector_name = "AlphaBook1", DO_EV4_MMU, DO_DEFAULT_RTC, DO_LCA_IO, .machine_check = lca_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 16, .device_interrupt = isa_device_interrupt, .init_arch = alphabook1_init_arch, .init_irq = sio_init_irq, .init_rtc = common_init_rtc, .init_pci = alphabook1_init_pci, .kill_arch = sio_kill_arch, .pci_map_irq = noname_map_irq, .pci_swizzle = common_swizzle, .sys = { .sio = { /* NCR810 SCSI is 14, PCMCIA controller is 15. */ .route_tab = 0x0e0f0a0a, }} }; ALIAS_MV(alphabook1) #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_AVANTI) struct alpha_machine_vector avanti_mv __initmv = { .vector_name = "Avanti", DO_EV4_MMU, DO_DEFAULT_RTC, DO_APECS_IO, .machine_check = apecs_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 16, .device_interrupt = isa_device_interrupt, .init_arch = apecs_init_arch, .init_irq = sio_init_irq, .init_rtc = common_init_rtc, .init_pci = noname_init_pci, .kill_arch = sio_kill_arch, .pci_map_irq = noname_map_irq, .pci_swizzle = common_swizzle, .sys = { .sio = { .route_tab = 0x0b0a050f, /* leave 14 for IDE, 9 for SND */ }} }; ALIAS_MV(avanti) #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_NONAME) struct alpha_machine_vector noname_mv __initmv = { .vector_name = "Noname", DO_EV4_MMU, DO_DEFAULT_RTC, DO_LCA_IO, .machine_check = lca_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 16, .device_interrupt = srm_device_interrupt, .init_arch = lca_init_arch, .init_irq = sio_init_irq, .init_rtc = common_init_rtc, .init_pci = noname_init_pci, .kill_arch = sio_kill_arch, .pci_map_irq = noname_map_irq, .pci_swizzle = common_swizzle, .sys = { .sio = { /* For UDB, the only available PCI slot must not map to IRQ 9, since that's the builtin MSS sound chip. That PCI slot will map to PIRQ1 (for INTA at least), so we give it IRQ 15 instead. Unfortunately we have to do this for NONAME as well, since they are co-indicated when the platform type "Noname" is selected... :-( */ .route_tab = 0x0b0a0f0d, }} }; ALIAS_MV(noname) #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_P2K) struct alpha_machine_vector p2k_mv __initmv = { .vector_name = "Platform2000", DO_EV4_MMU, DO_DEFAULT_RTC, DO_LCA_IO, .machine_check = lca_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 16, .device_interrupt = srm_device_interrupt, .init_arch = lca_init_arch, .init_irq = sio_init_irq, .init_rtc = common_init_rtc, .init_pci = noname_init_pci, .kill_arch = sio_kill_arch, .pci_map_irq = p2k_map_irq, .pci_swizzle = common_swizzle, .sys = { .sio = { .route_tab = 0x0b0a090f, }} }; ALIAS_MV(p2k) #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_XL) struct alpha_machine_vector xl_mv __initmv = { .vector_name = "XL", DO_EV4_MMU, DO_DEFAULT_RTC, DO_APECS_IO, .machine_check = apecs_machine_check, .max_isa_dma_address = ALPHA_XL_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = XL_DEFAULT_MEM_BASE, .nr_irqs = 16, .device_interrupt = isa_device_interrupt, .init_arch = apecs_init_arch, .init_irq = sio_init_irq, .init_rtc = common_init_rtc, .init_pci = noname_init_pci, .kill_arch = sio_kill_arch, .pci_map_irq = noname_map_irq, .pci_swizzle = common_swizzle, .sys = { .sio = { .route_tab = 0x0b0a090f, }} }; ALIAS_MV(xl) #endif
linux-master
arch/alpha/kernel/sys_sio.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_sable.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the Sable, Sable-Gamma, and Lynx systems. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_t2.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" DEFINE_SPINLOCK(sable_lynx_irq_lock); typedef struct irq_swizzle_struct { char irq_to_mask[64]; char mask_to_irq[64]; /* Note mask bit is true for DISABLED irqs. */ unsigned long shadow_mask; void (*update_irq_hw)(unsigned long bit, unsigned long mask); void (*ack_irq_hw)(unsigned long bit); } irq_swizzle_t; static irq_swizzle_t *sable_lynx_irq_swizzle; static void sable_lynx_init_irq(int nr_of_irqs); #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) /***********************************************************************/ /* * For SABLE, which is really baroque, we manage 40 IRQ's, but the * hardware really only supports 24, not via normal ISA PIC, * but cascaded custom 8259's, etc. * 0-7 (char at 536) * 8-15 (char at 53a) * 16-23 (char at 53c) * * Summary Registers (536/53a/53c): * * Bit Meaning Kernel IRQ *------------------------------------------ * 0 PCI slot 0 34 * 1 NCR810 (builtin) 33 * 2 TULIP (builtin) 32 * 3 mouse 12 * 4 PCI slot 1 35 * 5 PCI slot 2 36 * 6 keyboard 1 * 7 floppy 6 * 8 COM2 3 * 9 parallel port 7 *10 EISA irq 3 - *11 EISA irq 4 - *12 EISA irq 5 5 *13 EISA irq 6 - *14 EISA irq 7 - *15 COM1 4 *16 EISA irq 9 9 *17 EISA irq 10 10 *18 EISA irq 11 11 *19 EISA irq 12 - *20 EISA irq 13 - *21 EISA irq 14 14 *22 NC 15 *23 IIC - */ static void sable_update_irq_hw(unsigned long bit, unsigned long mask) { int port = 0x537; if (bit >= 16) { port = 0x53d; mask >>= 16; } else if (bit >= 8) { port = 0x53b; mask >>= 8; } outb(mask, port); } static void sable_ack_irq_hw(unsigned long bit) { int port, val1, val2; if (bit >= 16) { port = 0x53c; val1 = 0xE0 | (bit - 16); val2 = 0xE0 | 4; } else if (bit >= 8) { port = 0x53a; val1 = 0xE0 | (bit - 8); val2 = 0xE0 | 3; } else { port = 0x536; val1 = 0xE0 | (bit - 0); val2 = 0xE0 | 1; } outb(val1, port); /* ack the slave */ outb(val2, 0x534); /* ack the master */ } static irq_swizzle_t sable_irq_swizzle = { { -1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */ -1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */ -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 0-7 */ -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 8-15 */ 2, 1, 0, 4, 5, -1, -1, -1, /* pseudo PCI */ -1, -1, -1, -1, -1, -1, -1, -1, /* */ -1, -1, -1, -1, -1, -1, -1, -1, /* */ -1, -1, -1, -1, -1, -1, -1, -1 /* */ }, { 34, 33, 32, 12, 35, 36, 1, 6, /* mask 0-7 */ 3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */ 9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */ -1, -1, -1, -1, -1, -1, -1, -1, /* */ -1, -1, -1, -1, -1, -1, -1, -1, /* */ -1, -1, -1, -1, -1, -1, -1, -1, /* */ -1, -1, -1, -1, -1, -1, -1, -1, /* */ -1, -1, -1, -1, -1, -1, -1, -1 /* */ }, -1, sable_update_irq_hw, sable_ack_irq_hw }; static void __init sable_init_irq(void) { outb(-1, 0x537); /* slave 0 */ outb(-1, 0x53b); /* slave 1 */ outb(-1, 0x53d); /* slave 2 */ outb(0x44, 0x535); /* enable cascades in master */ sable_lynx_irq_swizzle = &sable_irq_swizzle; sable_lynx_init_irq(40); } /* * PCI Fixup configuration for ALPHA SABLE (2100). * * The device to slot mapping looks like: * * Slot Device * 0 TULIP * 1 SCSI * 2 PCI-EISA bridge * 3 none * 4 none * 5 none * 6 PCI on board slot 0 * 7 PCI on board slot 1 * 8 PCI on board slot 2 * * * This two layered interrupt approach means that we allocate IRQ 16 and * above for PCI interrupts. The IRQ relates to which bit the interrupt * comes in on. This makes interrupt processing much easier. */ /* * NOTE: the IRQ assignments below are arbitrary, but need to be consistent * with the values in the irq swizzling tables above. */ static int sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[9][5] = { /*INT INTA INTB INTC INTD */ { 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */ { 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */ { -1, -1, -1, -1, -1}, /* IdSel 2, SIO */ { -1, -1, -1, -1, -1}, /* IdSel 3, none */ { -1, -1, -1, -1, -1}, /* IdSel 4, none */ { -1, -1, -1, -1, -1}, /* IdSel 5, none */ { 32+2, 32+2, 32+2, 32+2, 32+2}, /* IdSel 6, slot 0 */ { 32+3, 32+3, 32+3, 32+3, 32+3}, /* IdSel 7, slot 1 */ { 32+4, 32+4, 32+4, 32+4, 32+4} /* IdSel 8, slot 2 */ }; long min_idsel = 0, max_idsel = 8, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } #endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) */ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) /***********************************************************************/ /* LYNX hardware specifics */ /* * For LYNX, which is also baroque, we manage 64 IRQs, via a custom IC. * * Bit Meaning Kernel IRQ *------------------------------------------ * 0 * 1 * 2 * 3 mouse 12 * 4 * 5 * 6 keyboard 1 * 7 floppy 6 * 8 COM2 3 * 9 parallel port 7 *10 EISA irq 3 - *11 EISA irq 4 - *12 EISA irq 5 5 *13 EISA irq 6 - *14 EISA irq 7 - *15 COM1 4 *16 EISA irq 9 9 *17 EISA irq 10 10 *18 EISA irq 11 11 *19 EISA irq 12 - *20 *21 EISA irq 14 14 *22 EISA irq 15 15 *23 IIC - *24 VGA (builtin) - *25 *26 *27 *28 NCR810 (builtin) 28 *29 *30 *31 *32 PCI 0 slot 4 A primary bus 32 *33 PCI 0 slot 4 B primary bus 33 *34 PCI 0 slot 4 C primary bus 34 *35 PCI 0 slot 4 D primary bus *36 PCI 0 slot 5 A primary bus *37 PCI 0 slot 5 B primary bus *38 PCI 0 slot 5 C primary bus *39 PCI 0 slot 5 D primary bus *40 PCI 0 slot 6 A primary bus *41 PCI 0 slot 6 B primary bus *42 PCI 0 slot 6 C primary bus *43 PCI 0 slot 6 D primary bus *44 PCI 0 slot 7 A primary bus *45 PCI 0 slot 7 B primary bus *46 PCI 0 slot 7 C primary bus *47 PCI 0 slot 7 D primary bus *48 PCI 0 slot 0 A secondary bus *49 PCI 0 slot 0 B secondary bus *50 PCI 0 slot 0 C secondary bus *51 PCI 0 slot 0 D secondary bus *52 PCI 0 slot 1 A secondary bus *53 PCI 0 slot 1 B secondary bus *54 PCI 0 slot 1 C secondary bus *55 PCI 0 slot 1 D secondary bus *56 PCI 0 slot 2 A secondary bus *57 PCI 0 slot 2 B secondary bus *58 PCI 0 slot 2 C secondary bus *59 PCI 0 slot 2 D secondary bus *60 PCI 0 slot 3 A secondary bus *61 PCI 0 slot 3 B secondary bus *62 PCI 0 slot 3 C secondary bus *63 PCI 0 slot 3 D secondary bus */ static void lynx_update_irq_hw(unsigned long bit, unsigned long mask) { /* * Write the AIR register on the T3/T4 with the * address of the IC mask register (offset 0x40) */ *(vulp)T2_AIR = 0x40; mb(); *(vulp)T2_AIR; /* re-read to force write */ mb(); *(vulp)T2_DIR = mask; mb(); mb(); } static void lynx_ack_irq_hw(unsigned long bit) { *(vulp)T2_VAR = (u_long) bit; mb(); mb(); } static irq_swizzle_t lynx_irq_swizzle = { { /* irq_to_mask */ -1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */ -1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */ -1, -1, -1, -1, -1, -1, -1, -1, /* pseudo */ -1, -1, -1, -1, 28, -1, -1, -1, /* pseudo */ 32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */ 40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */ 48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */ 56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */ }, { /* mask_to_irq */ -1, -1, -1, 12, -1, -1, 1, 6, /* mask 0-7 */ 3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */ 9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */ -1, -1, -1, -1, 28, -1, -1, -1, /* mask 24-31 */ 32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */ 40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */ 48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */ 56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */ }, -1, lynx_update_irq_hw, lynx_ack_irq_hw }; static void __init lynx_init_irq(void) { sable_lynx_irq_swizzle = &lynx_irq_swizzle; sable_lynx_init_irq(64); } /* * PCI Fixup configuration for ALPHA LYNX (2100A) * * The device to slot mapping looks like: * * Slot Device * 0 none * 1 none * 2 PCI-EISA bridge * 3 PCI-PCI bridge * 4 NCR 810 (Demi-Lynx only) * 5 none * 6 PCI on board slot 4 * 7 PCI on board slot 5 * 8 PCI on board slot 6 * 9 PCI on board slot 7 * * And behind the PPB we have: * * 11 PCI on board slot 0 * 12 PCI on board slot 1 * 13 PCI on board slot 2 * 14 PCI on board slot 3 */ /* * NOTE: the IRQ assignments below are arbitrary, but need to be consistent * with the values in the irq swizzling tables above. */ static int lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[19][5] = { /*INT INTA INTB INTC INTD */ { -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */ { -1, -1, -1, -1, -1}, /* IdSel 14, PPB */ { 28, 28, 28, 28, 28}, /* IdSel 15, NCR demi */ { -1, -1, -1, -1, -1}, /* IdSel 16, none */ { 32, 32, 33, 34, 35}, /* IdSel 17, slot 4 */ { 36, 36, 37, 38, 39}, /* IdSel 18, slot 5 */ { 40, 40, 41, 42, 43}, /* IdSel 19, slot 6 */ { 44, 44, 45, 46, 47}, /* IdSel 20, slot 7 */ { -1, -1, -1, -1, -1}, /* IdSel 22, none */ /* The following are actually behind the PPB. */ { -1, -1, -1, -1, -1}, /* IdSel 16 none */ { 28, 28, 28, 28, 28}, /* IdSel 17 NCR lynx */ { -1, -1, -1, -1, -1}, /* IdSel 18 none */ { -1, -1, -1, -1, -1}, /* IdSel 19 none */ { -1, -1, -1, -1, -1}, /* IdSel 20 none */ { -1, -1, -1, -1, -1}, /* IdSel 21 none */ { 48, 48, 49, 50, 51}, /* IdSel 22 slot 0 */ { 52, 52, 53, 54, 55}, /* IdSel 23 slot 1 */ { 56, 56, 57, 58, 59}, /* IdSel 24 slot 2 */ { 60, 60, 61, 62, 63} /* IdSel 25 slot 3 */ }; const long min_idsel = 2, max_idsel = 20, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static u8 lynx_swizzle(struct pci_dev *dev, u8 *pinp) { int slot, pin = *pinp; if (dev->bus->number == 0) { slot = PCI_SLOT(dev->devfn); } /* Check for the built-in bridge */ else if (PCI_SLOT(dev->bus->self->devfn) == 3) { slot = PCI_SLOT(dev->devfn) + 11; } else { /* Must be a card-based bridge. */ do { if (PCI_SLOT(dev->bus->self->devfn) == 3) { slot = PCI_SLOT(dev->devfn) + 11; break; } pin = pci_swizzle_interrupt_pin(dev, pin); /* Move up the chain of bridges. */ dev = dev->bus->self; /* Slot of the next bridge. */ slot = PCI_SLOT(dev->devfn); } while (dev->bus->self); } *pinp = pin; return slot; } #endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) */ /***********************************************************************/ /* GENERIC irq routines */ static inline void sable_lynx_enable_irq(struct irq_data *d) { unsigned long bit, mask; bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; spin_lock(&sable_lynx_irq_lock); mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); sable_lynx_irq_swizzle->update_irq_hw(bit, mask); spin_unlock(&sable_lynx_irq_lock); #if 0 printk("%s: mask 0x%lx bit 0x%lx irq 0x%x\n", __func__, mask, bit, irq); #endif } static void sable_lynx_disable_irq(struct irq_data *d) { unsigned long bit, mask; bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; spin_lock(&sable_lynx_irq_lock); mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; sable_lynx_irq_swizzle->update_irq_hw(bit, mask); spin_unlock(&sable_lynx_irq_lock); #if 0 printk("%s: mask 0x%lx bit 0x%lx irq 0x%x\n", __func__, mask, bit, irq); #endif } static void sable_lynx_mask_and_ack_irq(struct irq_data *d) { unsigned long bit, mask; bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; spin_lock(&sable_lynx_irq_lock); mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; sable_lynx_irq_swizzle->update_irq_hw(bit, mask); sable_lynx_irq_swizzle->ack_irq_hw(bit); spin_unlock(&sable_lynx_irq_lock); } static struct irq_chip sable_lynx_irq_type = { .name = "SABLE/LYNX", .irq_unmask = sable_lynx_enable_irq, .irq_mask = sable_lynx_disable_irq, .irq_mask_ack = sable_lynx_mask_and_ack_irq, }; static void sable_lynx_srm_device_interrupt(unsigned long vector) { /* Note that the vector reported by the SRM PALcode corresponds to the interrupt mask bits, but we have to manage via the so-called legacy IRQs for many common devices. */ int bit, irq; bit = (vector - 0x800) >> 4; irq = sable_lynx_irq_swizzle->mask_to_irq[bit]; #if 0 printk("%s: vector 0x%lx bit 0x%x irq 0x%x\n", __func__, vector, bit, irq); #endif handle_irq(irq); } static void __init sable_lynx_init_irq(int nr_of_irqs) { long i; for (i = 0; i < nr_of_irqs; ++i) { irq_set_chip_and_handler(i, &sable_lynx_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } common_init_isa_dma(); } static void __init sable_lynx_init_pci(void) { common_init_pci(); } /*****************************************************************/ /* * The System Vectors * * In order that T2_HAE_ADDRESS should be a constant, we play * these games with GAMMA_BIAS. */ #if defined(CONFIG_ALPHA_GENERIC) || \ (defined(CONFIG_ALPHA_SABLE) && !defined(CONFIG_ALPHA_GAMMA)) #undef GAMMA_BIAS #define GAMMA_BIAS 0 struct alpha_machine_vector sable_mv __initmv = { .vector_name = "Sable", DO_EV4_MMU, DO_DEFAULT_RTC, DO_T2_IO, .machine_check = t2_machine_check, .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = T2_DEFAULT_MEM_BASE, .nr_irqs = 40, .device_interrupt = sable_lynx_srm_device_interrupt, .init_arch = t2_init_arch, .init_irq = sable_init_irq, .init_rtc = common_init_rtc, .init_pci = sable_lynx_init_pci, .kill_arch = t2_kill_arch, .pci_map_irq = sable_map_irq, .pci_swizzle = common_swizzle, .sys = { .t2 = { .gamma_bias = 0 } } }; ALIAS_MV(sable) #endif /* GENERIC || (SABLE && !GAMMA) */ #if defined(CONFIG_ALPHA_GENERIC) || \ (defined(CONFIG_ALPHA_SABLE) && defined(CONFIG_ALPHA_GAMMA)) #undef GAMMA_BIAS #define GAMMA_BIAS _GAMMA_BIAS struct alpha_machine_vector sable_gamma_mv __initmv = { .vector_name = "Sable-Gamma", DO_EV5_MMU, DO_DEFAULT_RTC, DO_T2_IO, .machine_check = t2_machine_check, .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = T2_DEFAULT_MEM_BASE, .nr_irqs = 40, .device_interrupt = sable_lynx_srm_device_interrupt, .init_arch = t2_init_arch, .init_irq = sable_init_irq, .init_rtc = common_init_rtc, .init_pci = sable_lynx_init_pci, .kill_arch = t2_kill_arch, .pci_map_irq = sable_map_irq, .pci_swizzle = common_swizzle, .sys = { .t2 = { .gamma_bias = _GAMMA_BIAS } } }; ALIAS_MV(sable_gamma) #endif /* GENERIC || (SABLE && GAMMA) */ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) #undef GAMMA_BIAS #define GAMMA_BIAS _GAMMA_BIAS struct alpha_machine_vector lynx_mv __initmv = { .vector_name = "Lynx", DO_EV4_MMU, DO_DEFAULT_RTC, DO_T2_IO, .machine_check = t2_machine_check, .max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS, .min_io_address = EISA_DEFAULT_IO_BASE, .min_mem_address = T2_DEFAULT_MEM_BASE, .nr_irqs = 64, .device_interrupt = sable_lynx_srm_device_interrupt, .init_arch = t2_init_arch, .init_irq = lynx_init_irq, .init_rtc = common_init_rtc, .init_pci = sable_lynx_init_pci, .kill_arch = t2_kill_arch, .pci_map_irq = lynx_map_irq, .pci_swizzle = lynx_swizzle, .sys = { .t2 = { .gamma_bias = _GAMMA_BIAS } } }; ALIAS_MV(lynx) #endif /* GENERIC || LYNX */
linux-master
arch/alpha/kernel/sys_sable.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/setup.c * * Copyright (C) 1995 Linus Torvalds */ /* 2.3.x bootmem, 1999 Andrea Arcangeli <[email protected]> */ /* * Bootup setup stuff. */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/screen_info.h> #include <linux/delay.h> #include <linux/mc146818rtc.h> #include <linux/console.h> #include <linux/cpu.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/panic_notifier.h> #include <linux/platform_device.h> #include <linux/memblock.h> #include <linux/pci.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/initrd.h> #include <linux/eisa.h> #include <linux/pfn.h> #ifdef CONFIG_MAGIC_SYSRQ #include <linux/sysrq.h> #include <linux/reboot.h> #endif #include <linux/notifier.h> #include <asm/setup.h> #include <asm/io.h> #include <linux/log2.h> #include <linux/export.h> static int alpha_panic_event(struct notifier_block *, unsigned long, void *); static struct notifier_block alpha_panic_block = { alpha_panic_event, NULL, INT_MAX /* try to do it first */ }; #include <linux/uaccess.h> #include <asm/hwrpb.h> #include <asm/dma.h> #include <asm/mmu_context.h> #include <asm/console.h> #include "proto.h" #include "pci_impl.h" struct hwrpb_struct *hwrpb; EXPORT_SYMBOL(hwrpb); unsigned long srm_hae; int alpha_l1i_cacheshape; int alpha_l1d_cacheshape; int alpha_l2_cacheshape; int alpha_l3_cacheshape; #ifdef CONFIG_VERBOSE_MCHECK /* 0=minimum, 1=verbose, 2=all */ /* These can be overridden via the command line, ie "verbose_mcheck=2") */ unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON; #endif /* Which processor we booted from. */ int boot_cpuid; /* * Using SRM callbacks for initial console output. This works from * setup_arch() time through the end of time_init(), as those places * are under our (Alpha) control. * "srmcons" specified in the boot command arguments allows us to * see kernel messages during the period of time before the true * console device is "registered" during console_init(). * As of this version (2.5.59), console_init() will call * disable_early_printk() as the last action before initializing * the console drivers. That's the last possible time srmcons can be * unregistered without interfering with console behavior. * * By default, OFF; set it with a bootcommand arg of "srmcons" or * "console=srm". The meaning of these two args is: * "srmcons" - early callback prints * "console=srm" - full callback based console, including early prints */ int srmcons_output = 0; /* Enforce a memory size limit; useful for testing. By default, none. */ unsigned long mem_size_limit = 0; /* Set AGP GART window size (0 means disabled). */ unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE; #ifdef CONFIG_ALPHA_GENERIC struct alpha_machine_vector alpha_mv; EXPORT_SYMBOL(alpha_mv); #endif #ifndef alpha_using_srm int alpha_using_srm; EXPORT_SYMBOL(alpha_using_srm); #endif #ifndef alpha_using_qemu int alpha_using_qemu; #endif static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long, unsigned long); static struct alpha_machine_vector *get_sysvec_byname(const char *); static void get_sysnames(unsigned long, unsigned long, unsigned long, char **, char **); static void determine_cpu_caches (unsigned int); static char __initdata command_line[COMMAND_LINE_SIZE]; /* * The format of "screen_info" is strange, and due to early * i386-setup code. This is just enough to make the console * code think we're on a VGA color display. */ struct screen_info screen_info = { .orig_x = 0, .orig_y = 25, .orig_video_cols = 80, .orig_video_lines = 25, .orig_video_isVGA = 1, .orig_video_points = 16 }; EXPORT_SYMBOL(screen_info); /* * The direct map I/O window, if any. This should be the same * for all busses, since it's used by virt_to_bus. */ unsigned long __direct_map_base; unsigned long __direct_map_size; EXPORT_SYMBOL(__direct_map_base); EXPORT_SYMBOL(__direct_map_size); /* * Declare all of the machine vectors. */ /* GCC 2.7.2 (on alpha at least) is lame. It does not support either __attribute__((weak)) or #pragma weak. Bypass it and talk directly to the assembler. */ #define WEAK(X) \ extern struct alpha_machine_vector X; \ asm(".weak "#X) WEAK(alcor_mv); WEAK(alphabook1_mv); WEAK(avanti_mv); WEAK(cabriolet_mv); WEAK(clipper_mv); WEAK(dp264_mv); WEAK(eb164_mv); WEAK(eb64p_mv); WEAK(eb66_mv); WEAK(eb66p_mv); WEAK(eiger_mv); WEAK(jensen_mv); WEAK(lx164_mv); WEAK(lynx_mv); WEAK(marvel_ev7_mv); WEAK(miata_mv); WEAK(mikasa_mv); WEAK(mikasa_primo_mv); WEAK(monet_mv); WEAK(nautilus_mv); WEAK(noname_mv); WEAK(noritake_mv); WEAK(noritake_primo_mv); WEAK(p2k_mv); WEAK(pc164_mv); WEAK(privateer_mv); WEAK(rawhide_mv); WEAK(ruffian_mv); WEAK(rx164_mv); WEAK(sable_mv); WEAK(sable_gamma_mv); WEAK(shark_mv); WEAK(sx164_mv); WEAK(takara_mv); WEAK(titan_mv); WEAK(webbrick_mv); WEAK(wildfire_mv); WEAK(xl_mv); WEAK(xlt_mv); #undef WEAK /* * I/O resources inherited from PeeCees. Except for perhaps the * turbochannel alphas, everyone has these on some sort of SuperIO chip. * * ??? If this becomes less standard, move the struct out into the * machine vector. */ static void __init reserve_std_resources(void) { static struct resource standard_io_resources[] = { { .name = "rtc", .start = -1, .end = -1 }, { .name = "dma1", .start = 0x00, .end = 0x1f }, { .name = "pic1", .start = 0x20, .end = 0x3f }, { .name = "timer", .start = 0x40, .end = 0x5f }, { .name = "keyboard", .start = 0x60, .end = 0x6f }, { .name = "dma page reg", .start = 0x80, .end = 0x8f }, { .name = "pic2", .start = 0xa0, .end = 0xbf }, { .name = "dma2", .start = 0xc0, .end = 0xdf }, }; struct resource *io = &ioport_resource; size_t i; if (hose_head) { struct pci_controller *hose; for (hose = hose_head; hose; hose = hose->next) if (hose->index == 0) { io = hose->io_space; break; } } /* Fix up for the Jensen's queer RTC placement. */ standard_io_resources[0].start = RTC_PORT(0); standard_io_resources[0].end = RTC_PORT(0) + 0x0f; for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i) request_resource(io, standard_io_resources+i); } #define PFN_MAX PFN_DOWN(0x80000000) #define for_each_mem_cluster(memdesc, _cluster, i) \ for ((_cluster) = (memdesc)->cluster, (i) = 0; \ (i) < (memdesc)->numclusters; (i)++, (_cluster)++) static unsigned long __init get_mem_size_limit(char *s) { unsigned long end = 0; char *from = s; end = simple_strtoul(from, &from, 0); if ( *from == 'K' || *from == 'k' ) { end = end << 10; from++; } else if ( *from == 'M' || *from == 'm' ) { end = end << 20; from++; } else if ( *from == 'G' || *from == 'g' ) { end = end << 30; from++; } return end >> PAGE_SHIFT; /* Return the PFN of the limit. */ } #ifdef CONFIG_BLK_DEV_INITRD void * __init move_initrd(unsigned long mem_limit) { void *start; unsigned long size; size = initrd_end - initrd_start; start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE); if (!start || __pa(start) + size > mem_limit) { initrd_start = initrd_end = 0; return NULL; } memmove(start, (void *)initrd_start, size); initrd_start = (unsigned long)start; initrd_end = initrd_start + size; printk("initrd moved to %p\n", start); return start; } #endif static void __init setup_memory(void *kernel_end) { struct memclust_struct * cluster; struct memdesc_struct * memdesc; unsigned long kernel_size; unsigned long i; /* Find free clusters, and init and free the bootmem accordingly. */ memdesc = (struct memdesc_struct *) (hwrpb->mddt_offset + (unsigned long) hwrpb); for_each_mem_cluster(memdesc, cluster, i) { unsigned long end; printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n", i, cluster->usage, cluster->start_pfn, cluster->start_pfn + cluster->numpages); end = cluster->start_pfn + cluster->numpages; if (end > max_low_pfn) max_low_pfn = end; memblock_add(PFN_PHYS(cluster->start_pfn), cluster->numpages << PAGE_SHIFT); /* Bit 0 is console/PALcode reserved. Bit 1 is non-volatile memory -- we might want to mark this for later. */ if (cluster->usage & 3) memblock_reserve(PFN_PHYS(cluster->start_pfn), cluster->numpages << PAGE_SHIFT); } /* * Except for the NUMA systems (wildfire, marvel) all of the * Alpha systems we run on support 32GB of memory or less. * Since the NUMA systems introduce large holes in memory addressing, * we can get into a situation where there is not enough contiguous * memory for the memory map. * * Limit memory to the first 32GB to limit the NUMA systems to * memory on their first node (wildfire) or 2 (marvel) to avoid * not being able to produce the memory map. In order to access * all of the memory on the NUMA systems, build with discontiguous * memory support. * * If the user specified a memory limit, let that memory limit stand. */ if (!mem_size_limit) mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT; if (mem_size_limit && max_low_pfn >= mem_size_limit) { printk("setup: forcing memory size to %ldK (from %ldK).\n", mem_size_limit << (PAGE_SHIFT - 10), max_low_pfn << (PAGE_SHIFT - 10)); max_low_pfn = mem_size_limit; } /* Reserve the kernel memory. */ kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS; memblock_reserve(KERNEL_START_PHYS, kernel_size); #ifdef CONFIG_BLK_DEV_INITRD initrd_start = INITRD_START; if (initrd_start) { initrd_end = initrd_start+INITRD_SIZE; printk("Initial ramdisk at: 0x%p (%lu bytes)\n", (void *) initrd_start, INITRD_SIZE); if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { if (!move_initrd(PFN_PHYS(max_low_pfn))) printk("initrd extends beyond end of memory " "(0x%08lx > 0x%p)\ndisabling initrd\n", initrd_end, phys_to_virt(PFN_PHYS(max_low_pfn))); } else { memblock_reserve(virt_to_phys((void *)initrd_start), INITRD_SIZE); } } #endif /* CONFIG_BLK_DEV_INITRD */ } int page_is_ram(unsigned long pfn) { struct memclust_struct * cluster; struct memdesc_struct * memdesc; unsigned long i; memdesc = (struct memdesc_struct *) (hwrpb->mddt_offset + (unsigned long) hwrpb); for_each_mem_cluster(memdesc, cluster, i) { if (pfn >= cluster->start_pfn && pfn < cluster->start_pfn + cluster->numpages) { return (cluster->usage & 3) ? 0 : 1; } } return 0; } static int __init register_cpus(void) { int i; for_each_possible_cpu(i) { struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; register_cpu(p, i); } return 0; } arch_initcall(register_cpus); #ifdef CONFIG_MAGIC_SYSRQ static void sysrq_reboot_handler(u8 unused) { machine_halt(); } static const struct sysrq_key_op srm_sysrq_reboot_op = { .handler = sysrq_reboot_handler, .help_msg = "reboot(b)", .action_msg = "Resetting", .enable_mask = SYSRQ_ENABLE_BOOT, }; #endif void __init setup_arch(char **cmdline_p) { extern char _end[]; struct alpha_machine_vector *vec = NULL; struct percpu_struct *cpu; char *type_name, *var_name, *p; void *kernel_end = _end; /* end of kernel */ char *args = command_line; hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr); boot_cpuid = hard_smp_processor_id(); /* * Pre-process the system type to make sure it will be valid. * * This may restore real CABRIO and EB66+ family names, ie * EB64+ and EB66. * * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series) * and AS1200 (DIGITAL Server 5000 series) have the type as * the negative of the real one. */ if ((long)hwrpb->sys_type < 0) { hwrpb->sys_type = -((long)hwrpb->sys_type); hwrpb_update_checksum(hwrpb); } /* Register a call for panic conditions. */ atomic_notifier_chain_register(&panic_notifier_list, &alpha_panic_block); #ifndef alpha_using_srm /* Assume that we've booted from SRM if we haven't booted from MILO. Detect the later by looking for "MILO" in the system serial nr. */ alpha_using_srm = !str_has_prefix((const char *)hwrpb->ssn, "MILO"); #endif #ifndef alpha_using_qemu /* Similarly, look for QEMU. */ alpha_using_qemu = strstr((const char *)hwrpb->ssn, "QEMU") != 0; #endif /* If we are using SRM, we want to allow callbacks as early as possible, so do this NOW, and then they should work immediately thereafter. */ kernel_end = callback_init(kernel_end); /* * Locate the command line. */ /* Hack for Jensen... since we're restricted to 8 or 16 chars for boot flags depending on the boot mode, we need some shorthand. This should do for installation. */ if (strcmp(COMMAND_LINE, "INSTALL") == 0) { strscpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof(command_line)); } else { strscpy(command_line, COMMAND_LINE, sizeof(command_line)); } strcpy(boot_command_line, command_line); *cmdline_p = command_line; /* * Process command-line arguments. */ while ((p = strsep(&args, " \t")) != NULL) { if (!*p) continue; if (strncmp(p, "alpha_mv=", 9) == 0) { vec = get_sysvec_byname(p+9); continue; } if (strncmp(p, "cycle=", 6) == 0) { est_cycle_freq = simple_strtol(p+6, NULL, 0); continue; } if (strncmp(p, "mem=", 4) == 0) { mem_size_limit = get_mem_size_limit(p+4); continue; } if (strncmp(p, "srmcons", 7) == 0) { srmcons_output |= 1; continue; } if (strncmp(p, "console=srm", 11) == 0) { srmcons_output |= 2; continue; } if (strncmp(p, "gartsize=", 9) == 0) { alpha_agpgart_size = get_mem_size_limit(p+9) << PAGE_SHIFT; continue; } #ifdef CONFIG_VERBOSE_MCHECK if (strncmp(p, "verbose_mcheck=", 15) == 0) { alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0); continue; } #endif } /* Replace the command line, now that we've killed it with strsep. */ strcpy(command_line, boot_command_line); /* If we want SRM console printk echoing early, do it now. */ if (alpha_using_srm && srmcons_output) { register_srm_console(); /* * If "console=srm" was specified, clear the srmcons_output * flag now so that time.c won't unregister_srm_console */ if (srmcons_output & 2) srmcons_output = 0; } #ifdef CONFIG_MAGIC_SYSRQ /* If we're using SRM, make sysrq-b halt back to the prom, not auto-reboot. */ if (alpha_using_srm) { unregister_sysrq_key('b', __sysrq_reboot_op); register_sysrq_key('b', &srm_sysrq_reboot_op); } #endif /* * Identify and reconfigure for the current system. */ cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset); get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, cpu->type, &type_name, &var_name); if (*var_name == '0') var_name = ""; if (!vec) { vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation, cpu->type); } if (!vec) { panic("Unsupported system type: %s%s%s (%ld %ld)\n", type_name, (*var_name ? " variation " : ""), var_name, hwrpb->sys_type, hwrpb->sys_variation); } if (vec != &alpha_mv) { alpha_mv = *vec; } printk("Booting " #ifdef CONFIG_ALPHA_GENERIC "GENERIC " #endif "on %s%s%s using machine vector %s from %s\n", type_name, (*var_name ? " variation " : ""), var_name, alpha_mv.vector_name, (alpha_using_srm ? "SRM" : "MILO")); printk("Major Options: " #ifdef CONFIG_SMP "SMP " #endif #ifdef CONFIG_ALPHA_EV56 "EV56 " #endif #ifdef CONFIG_ALPHA_EV67 "EV67 " #endif #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS "LEGACY_START " #endif #ifdef CONFIG_VERBOSE_MCHECK "VERBOSE_MCHECK " #endif #ifdef CONFIG_DEBUG_SPINLOCK "DEBUG_SPINLOCK " #endif #ifdef CONFIG_MAGIC_SYSRQ "MAGIC_SYSRQ " #endif "\n"); printk("Command line: %s\n", command_line); /* * Sync up the HAE. * Save the SRM's current value for restoration. */ srm_hae = *alpha_mv.hae_register; __set_hae(alpha_mv.hae_cache); /* Reset enable correctable error reports. */ wrmces(0x7); /* Find our memory. */ setup_memory(kernel_end); memblock_set_bottom_up(true); sparse_init(); /* First guess at cpu cache sizes. Do this before init_arch. */ determine_cpu_caches(cpu->type); /* Initialize the machine. Usually has to do with setting up DMA windows and the like. */ if (alpha_mv.init_arch) alpha_mv.init_arch(); /* Reserve standard resources. */ reserve_std_resources(); /* * Give us a default console. TGA users will see nothing until * chr_dev_init is called, rather late in the boot sequence. */ #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #endif #endif /* Default root filesystem to sda2. */ ROOT_DEV = MKDEV(SCSI_DISK0_MAJOR, 2); #ifdef CONFIG_EISA /* FIXME: only set this when we actually have EISA in this box? */ EISA_bus = 1; #endif /* * Check ASN in HWRPB for validity, report if bad. * FIXME: how was this failing? Should we trust it instead, * and copy the value into alpha_mv.max_asn? */ if (hwrpb->max_asn != MAX_ASN) { printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn); } /* * Identify the flock of penguins. */ #ifdef CONFIG_SMP setup_smp(); #endif paging_init(); } static char sys_unknown[] = "Unknown"; static char systype_names[][16] = { "0", "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen", "Pelican", "Morgan", "Sable", "Medulla", "Noname", "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind", "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1", "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake", "Cortex", "29", "Miata", "XXM", "Takara", "Yukon", "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel" }; static char unofficial_names[][8] = {"100", "Ruffian"}; static char api_names[][16] = {"200", "Nautilus"}; static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"}; static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4}; static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"}; static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2}; static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"}; static int eb64p_indices[] = {0,0,1,2}; static char eb66_names[][8] = {"EB66", "EB66+"}; static int eb66_indices[] = {0,0,1}; static char marvel_names[][16] = { "Marvel/EV7" }; static int marvel_indices[] = { 0 }; static char rawhide_names[][16] = { "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci" }; static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4}; static char titan_names[][16] = { "DEFAULT", "Privateer", "Falcon", "Granite" }; static int titan_indices[] = {0,1,2,2,3}; static char tsunami_names[][16] = { "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper", "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne", "Flying Clipper", "Shark" }; static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12}; static struct alpha_machine_vector * __init get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu) { static struct alpha_machine_vector *systype_vecs[] __initdata = { NULL, /* 0 */ NULL, /* ADU */ NULL, /* Cobra */ NULL, /* Ruby */ NULL, /* Flamingo */ NULL, /* Mannequin */ &jensen_mv, NULL, /* Pelican */ NULL, /* Morgan */ NULL, /* Sable -- see below. */ NULL, /* Medulla */ &noname_mv, NULL, /* Turbolaser */ &avanti_mv, NULL, /* Mustang */ NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */ NULL, /* Tradewind */ NULL, /* Mikasa -- see below. */ NULL, /* EB64 */ NULL, /* EB66 -- see variation. */ NULL, /* EB64+ -- see variation. */ &alphabook1_mv, &rawhide_mv, NULL, /* K2 */ &lynx_mv, /* Lynx */ &xl_mv, NULL, /* EB164 -- see variation. */ NULL, /* Noritake -- see below. */ NULL, /* Cortex */ NULL, /* 29 */ &miata_mv, NULL, /* XXM */ &takara_mv, NULL, /* Yukon */ NULL, /* Tsunami -- see variation. */ &wildfire_mv, /* Wildfire */ NULL, /* CUSCO */ &eiger_mv, /* Eiger */ NULL, /* Titan */ NULL, /* Marvel */ }; static struct alpha_machine_vector *unofficial_vecs[] __initdata = { NULL, /* 100 */ &ruffian_mv, }; static struct alpha_machine_vector *api_vecs[] __initdata = { NULL, /* 200 */ &nautilus_mv, }; static struct alpha_machine_vector *alcor_vecs[] __initdata = { &alcor_mv, &xlt_mv, &xlt_mv }; static struct alpha_machine_vector *eb164_vecs[] __initdata = { &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv }; static struct alpha_machine_vector *eb64p_vecs[] __initdata = { &eb64p_mv, &cabriolet_mv, &cabriolet_mv /* AlphaPCI64 */ }; static struct alpha_machine_vector *eb66_vecs[] __initdata = { &eb66_mv, &eb66p_mv }; static struct alpha_machine_vector *marvel_vecs[] __initdata = { &marvel_ev7_mv, }; static struct alpha_machine_vector *titan_vecs[] __initdata = { &titan_mv, /* default */ &privateer_mv, /* privateer */ &titan_mv, /* falcon */ &privateer_mv, /* granite */ }; static struct alpha_machine_vector *tsunami_vecs[] __initdata = { NULL, &dp264_mv, /* dp264 */ &dp264_mv, /* warhol */ &dp264_mv, /* windjammer */ &monet_mv, /* monet */ &clipper_mv, /* clipper */ &dp264_mv, /* goldrush */ &webbrick_mv, /* webbrick */ &dp264_mv, /* catamaran */ NULL, /* brisbane? */ NULL, /* melbourne? */ NULL, /* flying clipper? */ &shark_mv, /* shark */ }; /* ??? Do we need to distinguish between Rawhides? */ struct alpha_machine_vector *vec; /* Search the system tables first... */ vec = NULL; if (type < ARRAY_SIZE(systype_vecs)) { vec = systype_vecs[type]; } else if ((type > ST_API_BIAS) && (type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) { vec = api_vecs[type - ST_API_BIAS]; } else if ((type > ST_UNOFFICIAL_BIAS) && (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) { vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS]; } /* If we've not found one, try for a variation. */ if (!vec) { /* Member ID is a bit-field. */ unsigned long member = (variation >> 10) & 0x3f; cpu &= 0xffffffff; /* make it usable */ switch (type) { case ST_DEC_ALCOR: if (member < ARRAY_SIZE(alcor_indices)) vec = alcor_vecs[alcor_indices[member]]; break; case ST_DEC_EB164: if (member < ARRAY_SIZE(eb164_indices)) vec = eb164_vecs[eb164_indices[member]]; /* PC164 may show as EB164 variation with EV56 CPU, but, since no true EB164 had anything but EV5... */ if (vec == &eb164_mv && cpu == EV56_CPU) vec = &pc164_mv; break; case ST_DEC_EB64P: if (member < ARRAY_SIZE(eb64p_indices)) vec = eb64p_vecs[eb64p_indices[member]]; break; case ST_DEC_EB66: if (member < ARRAY_SIZE(eb66_indices)) vec = eb66_vecs[eb66_indices[member]]; break; case ST_DEC_MARVEL: if (member < ARRAY_SIZE(marvel_indices)) vec = marvel_vecs[marvel_indices[member]]; break; case ST_DEC_TITAN: vec = titan_vecs[0]; /* default */ if (member < ARRAY_SIZE(titan_indices)) vec = titan_vecs[titan_indices[member]]; break; case ST_DEC_TSUNAMI: if (member < ARRAY_SIZE(tsunami_indices)) vec = tsunami_vecs[tsunami_indices[member]]; break; case ST_DEC_1000: if (cpu == EV5_CPU || cpu == EV56_CPU) vec = &mikasa_primo_mv; else vec = &mikasa_mv; break; case ST_DEC_NORITAKE: if (cpu == EV5_CPU || cpu == EV56_CPU) vec = &noritake_primo_mv; else vec = &noritake_mv; break; case ST_DEC_2100_A500: if (cpu == EV5_CPU || cpu == EV56_CPU) vec = &sable_gamma_mv; else vec = &sable_mv; break; } } return vec; } static struct alpha_machine_vector * __init get_sysvec_byname(const char *name) { static struct alpha_machine_vector *all_vecs[] __initdata = { &alcor_mv, &alphabook1_mv, &avanti_mv, &cabriolet_mv, &clipper_mv, &dp264_mv, &eb164_mv, &eb64p_mv, &eb66_mv, &eb66p_mv, &eiger_mv, &jensen_mv, &lx164_mv, &lynx_mv, &miata_mv, &mikasa_mv, &mikasa_primo_mv, &monet_mv, &nautilus_mv, &noname_mv, &noritake_mv, &noritake_primo_mv, &p2k_mv, &pc164_mv, &privateer_mv, &rawhide_mv, &ruffian_mv, &rx164_mv, &sable_mv, &sable_gamma_mv, &shark_mv, &sx164_mv, &takara_mv, &webbrick_mv, &wildfire_mv, &xl_mv, &xlt_mv }; size_t i; for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) { struct alpha_machine_vector *mv = all_vecs[i]; if (strcasecmp(mv->vector_name, name) == 0) return mv; } return NULL; } static void get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu, char **type_name, char **variation_name) { unsigned long member; /* If not in the tables, make it UNKNOWN, else set type name to family */ if (type < ARRAY_SIZE(systype_names)) { *type_name = systype_names[type]; } else if ((type > ST_API_BIAS) && (type - ST_API_BIAS) < ARRAY_SIZE(api_names)) { *type_name = api_names[type - ST_API_BIAS]; } else if ((type > ST_UNOFFICIAL_BIAS) && (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) { *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS]; } else { *type_name = sys_unknown; *variation_name = sys_unknown; return; } /* Set variation to "0"; if variation is zero, done. */ *variation_name = systype_names[0]; if (variation == 0) { return; } member = (variation >> 10) & 0x3f; /* member ID is a bit-field */ cpu &= 0xffffffff; /* make it usable */ switch (type) { /* select by family */ default: /* default to variation "0" for now */ break; case ST_DEC_EB164: if (member >= ARRAY_SIZE(eb164_indices)) break; *variation_name = eb164_names[eb164_indices[member]]; /* PC164 may show as EB164 variation, but with EV56 CPU, so, since no true EB164 had anything but EV5... */ if (eb164_indices[member] == 0 && cpu == EV56_CPU) *variation_name = eb164_names[1]; /* make it PC164 */ break; case ST_DEC_ALCOR: if (member < ARRAY_SIZE(alcor_indices)) *variation_name = alcor_names[alcor_indices[member]]; break; case ST_DEC_EB64P: if (member < ARRAY_SIZE(eb64p_indices)) *variation_name = eb64p_names[eb64p_indices[member]]; break; case ST_DEC_EB66: if (member < ARRAY_SIZE(eb66_indices)) *variation_name = eb66_names[eb66_indices[member]]; break; case ST_DEC_MARVEL: if (member < ARRAY_SIZE(marvel_indices)) *variation_name = marvel_names[marvel_indices[member]]; break; case ST_DEC_RAWHIDE: if (member < ARRAY_SIZE(rawhide_indices)) *variation_name = rawhide_names[rawhide_indices[member]]; break; case ST_DEC_TITAN: *variation_name = titan_names[0]; /* default */ if (member < ARRAY_SIZE(titan_indices)) *variation_name = titan_names[titan_indices[member]]; break; case ST_DEC_TSUNAMI: if (member < ARRAY_SIZE(tsunami_indices)) *variation_name = tsunami_names[tsunami_indices[member]]; break; } } /* * A change was made to the HWRPB via an ECO and the following code * tracks a part of the ECO. In HWRPB versions less than 5, the ECO * was not implemented in the console firmware. If it's revision 5 or * greater we can get the name of the platform as an ASCII string from * the HWRPB. That's what this function does. It checks the revision * level and if the string is in the HWRPB it returns the address of * the string--a pointer to the name of the platform. * * Returns: * - Pointer to a ASCII string if it's in the HWRPB * - Pointer to a blank string if the data is not in the HWRPB. */ static char * platform_string(void) { struct dsr_struct *dsr; static char unk_system_string[] = "N/A"; /* Go to the console for the string pointer. * If the rpb_vers is not 5 or greater the rpb * is old and does not have this data in it. */ if (hwrpb->revision < 5) return (unk_system_string); else { /* The Dynamic System Recognition struct * has the system platform name starting * after the character count of the string. */ dsr = ((struct dsr_struct *) ((char *)hwrpb + hwrpb->dsr_offset)); return ((char *)dsr + (dsr->sysname_off + sizeof(long))); } } static int get_nr_processors(struct percpu_struct *cpubase, unsigned long num) { struct percpu_struct *cpu; unsigned long i; int count = 0; for (i = 0; i < num; i++) { cpu = (struct percpu_struct *) ((char *)cpubase + i*hwrpb->processor_size); if ((cpu->flags & 0x1cc) == 0x1cc) count++; } return count; } static void show_cache_size (struct seq_file *f, const char *which, int shape) { if (shape == -1) seq_printf (f, "%s\t\t: n/a\n", which); else if (shape == 0) seq_printf (f, "%s\t\t: unknown\n", which); else seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n", which, shape >> 10, shape & 15, 1 << ((shape >> 4) & 15)); } static int show_cpuinfo(struct seq_file *f, void *slot) { extern struct unaligned_stat { unsigned long count, va, pc; } unaligned[2]; static char cpu_names[][8] = { "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56", "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL", "EV68CX", "EV7", "EV79", "EV69" }; struct percpu_struct *cpu = slot; unsigned int cpu_index; char *cpu_name; char *systype_name; char *sysvariation_name; int nr_processors; unsigned long timer_freq; cpu_index = (unsigned) (cpu->type - 1); cpu_name = "Unknown"; if (cpu_index < ARRAY_SIZE(cpu_names)) cpu_name = cpu_names[cpu_index]; get_sysnames(hwrpb->sys_type, hwrpb->sys_variation, cpu->type, &systype_name, &sysvariation_name); nr_processors = get_nr_processors(cpu, hwrpb->nr_processors); #if CONFIG_HZ == 1024 || CONFIG_HZ == 1200 timer_freq = (100UL * hwrpb->intr_freq) / 4096; #else timer_freq = 100UL * CONFIG_HZ; #endif seq_printf(f, "cpu\t\t\t: Alpha\n" "cpu model\t\t: %s\n" "cpu variation\t\t: %ld\n" "cpu revision\t\t: %ld\n" "cpu serial number\t: %s\n" "system type\t\t: %s\n" "system variation\t: %s\n" "system revision\t\t: %ld\n" "system serial number\t: %s\n" "cycle frequency [Hz]\t: %lu %s\n" "timer frequency [Hz]\t: %lu.%02lu\n" "page size [bytes]\t: %ld\n" "phys. address bits\t: %ld\n" "max. addr. space #\t: %ld\n" "BogoMIPS\t\t: %lu.%02lu\n" "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n" "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n" "platform string\t\t: %s\n" "cpus detected\t\t: %d\n", cpu_name, cpu->variation, cpu->revision, (char*)cpu->serial_no, systype_name, sysvariation_name, hwrpb->sys_revision, (char*)hwrpb->ssn, est_cycle_freq ? : hwrpb->cycle_freq, est_cycle_freq ? "est." : "", timer_freq / 100, timer_freq % 100, hwrpb->pagesize, hwrpb->pa_bits, hwrpb->max_asn, loops_per_jiffy / (500000/HZ), (loops_per_jiffy / (5000/HZ)) % 100, unaligned[0].count, unaligned[0].pc, unaligned[0].va, unaligned[1].count, unaligned[1].pc, unaligned[1].va, platform_string(), nr_processors); #ifdef CONFIG_SMP seq_printf(f, "cpus active\t\t: %u\n" "cpu active mask\t\t: %016lx\n", num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]); #endif show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape); show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape); show_cache_size (f, "L2 cache", alpha_l2_cacheshape); show_cache_size (f, "L3 cache", alpha_l3_cacheshape); return 0; } static int __init read_mem_block(int *addr, int stride, int size) { long nloads = size / stride, cnt, tmp; __asm__ __volatile__( " rpcc %0\n" "1: ldl %3,0(%2)\n" " subq %1,1,%1\n" /* Next two XORs introduce an explicit data dependency between consecutive loads in the loop, which will give us true load latency. */ " xor %3,%2,%2\n" " xor %3,%2,%2\n" " addq %2,%4,%2\n" " bne %1,1b\n" " rpcc %3\n" " subl %3,%0,%0\n" : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp) : "r" (stride), "1" (nloads), "2" (addr)); return cnt / (size / stride); } #define CSHAPE(totalsize, linesize, assoc) \ ((totalsize & ~0xff) | (linesize << 4) | assoc) /* ??? EV5 supports up to 64M, but did the systems with more than 16M of BCACHE ever exist? */ #define MAX_BCACHE_SIZE 16*1024*1024 /* Note that the offchip caches are direct mapped on all Alphas. */ static int __init external_cache_probe(int minsize, int width) { int cycles, prev_cycles = 1000000; int stride = 1 << width; long size = minsize, maxsize = MAX_BCACHE_SIZE * 2; if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT) maxsize = 1 << (ilog2(max_low_pfn + 1) + PAGE_SHIFT); /* Get the first block cached. */ read_mem_block(__va(0), stride, size); while (size < maxsize) { /* Get an average load latency in cycles. */ cycles = read_mem_block(__va(0), stride, size); if (cycles > prev_cycles * 2) { /* Fine, we exceed the cache. */ printk("%ldK Bcache detected; load hit latency %d " "cycles, load miss latency %d cycles\n", size >> 11, prev_cycles, cycles); return CSHAPE(size >> 1, width, 1); } /* Try to get the next block cached. */ read_mem_block(__va(size), stride, size); prev_cycles = cycles; size <<= 1; } return -1; /* No BCACHE found. */ } static void __init determine_cpu_caches (unsigned int cpu_type) { int L1I, L1D, L2, L3; switch (cpu_type) { case EV4_CPU: case EV45_CPU: { if (cpu_type == EV4_CPU) L1I = CSHAPE(8*1024, 5, 1); else L1I = CSHAPE(16*1024, 5, 1); L1D = L1I; L3 = -1; /* BIU_CTL is a write-only Abox register. PALcode has a shadow copy, and may be available from some versions of the CSERVE PALcall. If we can get it, then unsigned long biu_ctl, size; size = 128*1024 * (1 << ((biu_ctl >> 28) & 7)); L2 = CSHAPE (size, 5, 1); Unfortunately, we can't rely on that. */ L2 = external_cache_probe(128*1024, 5); break; } case LCA4_CPU: { unsigned long car, size; L1I = L1D = CSHAPE(8*1024, 5, 1); L3 = -1; car = *(vuip) phys_to_virt (0x120000078UL); size = 64*1024 * (1 << ((car >> 5) & 7)); /* No typo -- 8 byte cacheline size. Whodathunk. */ L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1); break; } case EV5_CPU: case EV56_CPU: { unsigned long sc_ctl, width; L1I = L1D = CSHAPE(8*1024, 5, 1); /* Check the line size of the Scache. */ sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL); width = sc_ctl & 0x1000 ? 6 : 5; L2 = CSHAPE (96*1024, width, 3); /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode has a shadow copy, and may be available from some versions of the CSERVE PALcall. If we can get it, then unsigned long bc_control, bc_config, size; size = 1024*1024 * (1 << ((bc_config & 7) - 1)); L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1); Unfortunately, we can't rely on that. */ L3 = external_cache_probe(1024*1024, width); break; } case PCA56_CPU: case PCA57_CPU: { if (cpu_type == PCA56_CPU) { L1I = CSHAPE(16*1024, 6, 1); L1D = CSHAPE(8*1024, 5, 1); } else { L1I = CSHAPE(32*1024, 6, 2); L1D = CSHAPE(16*1024, 5, 1); } L3 = -1; #if 0 unsigned long cbox_config, size; cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); #else L2 = external_cache_probe(512*1024, 6); #endif break; } case EV6_CPU: case EV67_CPU: case EV68CB_CPU: case EV68AL_CPU: case EV68CX_CPU: case EV69_CPU: L1I = L1D = CSHAPE(64*1024, 6, 2); L2 = external_cache_probe(1024*1024, 6); L3 = -1; break; case EV7_CPU: case EV79_CPU: L1I = L1D = CSHAPE(64*1024, 6, 2); L2 = CSHAPE(7*1024*1024/4, 6, 7); L3 = -1; break; default: /* Nothing known about this cpu type. */ L1I = L1D = L2 = L3 = 0; break; } alpha_l1i_cacheshape = L1I; alpha_l1d_cacheshape = L1D; alpha_l2_cacheshape = L2; alpha_l3_cacheshape = L3; } /* * We show only CPU #0 info. */ static void * c_start(struct seq_file *f, loff_t *pos) { return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset; } static void * c_next(struct seq_file *f, void *v, loff_t *pos) { (*pos)++; return NULL; } static void c_stop(struct seq_file *f, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo, }; static int alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { #if 1 /* FIXME FIXME FIXME */ /* If we are using SRM and serial console, just hard halt here. */ if (alpha_using_srm && srmcons_output) __halt(); #endif return NOTIFY_DONE; } static __init int add_pcspkr(void) { struct platform_device *pd; int ret; pd = platform_device_alloc("pcspkr", -1); if (!pd) return -ENOMEM; ret = platform_device_add(pd); if (ret) platform_device_put(pd); return ret; } device_initcall(add_pcspkr);
linux-master
arch/alpha/kernel/setup.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/err_common.c * * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) * * Error handling code supporting Alpha systems */ #include <linux/init.h> #include <linux/sched.h> #include <asm/io.h> #include <asm/hwrpb.h> #include <asm/smp.h> #include <asm/err_common.h> #include "err_impl.h" #include "proto.h" /* * err_print_prefix -- error handling print routines should prefix * all prints with this */ char *err_print_prefix = KERN_NOTICE; /* * Generic */ void mchk_dump_mem(void *data, size_t length, char **annotation) { unsigned long *ldata = data; size_t i; for (i = 0; (i * sizeof(*ldata)) < length; i++) { if (annotation && !annotation[i]) annotation = NULL; printk("%s %08x: %016lx %s\n", err_print_prefix, (unsigned)(i * sizeof(*ldata)), ldata[i], annotation ? annotation[i] : ""); } } void mchk_dump_logout_frame(struct el_common *mchk_header) { printk("%s -- Frame Header --\n" " Frame Size: %d (0x%x) bytes\n" " Flags: %s%s\n" " MCHK Code: 0x%x\n" " Frame Rev: %d\n" " Proc Offset: 0x%08x\n" " Sys Offset: 0x%08x\n" " -- Processor Region --\n", err_print_prefix, mchk_header->size, mchk_header->size, mchk_header->retry ? "RETRY " : "", mchk_header->err2 ? "SECOND_ERR " : "", mchk_header->code, mchk_header->frame_rev, mchk_header->proc_offset, mchk_header->sys_offset); mchk_dump_mem((void *) ((unsigned long)mchk_header + mchk_header->proc_offset), mchk_header->sys_offset - mchk_header->proc_offset, NULL); printk("%s -- System Region --\n", err_print_prefix); mchk_dump_mem((void *) ((unsigned long)mchk_header + mchk_header->sys_offset), mchk_header->size - mchk_header->sys_offset, NULL); printk("%s -- End of Frame --\n", err_print_prefix); } /* * Console Data Log */ /* Data */ static struct el_subpacket_handler *subpacket_handler_list = NULL; static struct el_subpacket_annotation *subpacket_annotation_list = NULL; static struct el_subpacket * el_process_header_subpacket(struct el_subpacket *header) { union el_timestamp timestamp; char *name = "UNKNOWN EVENT"; int packet_count = 0; int length = 0; if (header->class != EL_CLASS__HEADER) { printk("%s** Unexpected header CLASS %d TYPE %d, aborting\n", err_print_prefix, header->class, header->type); return NULL; } switch(header->type) { case EL_TYPE__HEADER__SYSTEM_ERROR_FRAME: name = "SYSTEM ERROR"; length = header->by_type.sys_err.frame_length; packet_count = header->by_type.sys_err.frame_packet_count; timestamp.as_int = 0; break; case EL_TYPE__HEADER__SYSTEM_EVENT_FRAME: name = "SYSTEM EVENT"; length = header->by_type.sys_event.frame_length; packet_count = header->by_type.sys_event.frame_packet_count; timestamp = header->by_type.sys_event.timestamp; break; case EL_TYPE__HEADER__HALT_FRAME: name = "ERROR HALT"; length = header->by_type.err_halt.frame_length; packet_count = header->by_type.err_halt.frame_packet_count; timestamp = header->by_type.err_halt.timestamp; break; case EL_TYPE__HEADER__LOGOUT_FRAME: name = "LOGOUT FRAME"; length = header->by_type.logout_header.frame_length; packet_count = 1; timestamp.as_int = 0; break; default: /* Unknown */ printk("%s** Unknown header - CLASS %d TYPE %d, aborting\n", err_print_prefix, header->class, header->type); return NULL; } printk("%s*** %s:\n" " CLASS %d, TYPE %d\n", err_print_prefix, name, header->class, header->type); el_print_timestamp(&timestamp); /* * Process the subpackets */ el_process_subpackets(header, packet_count); /* return the next header */ header = (struct el_subpacket *) ((unsigned long)header + header->length + length); return header; } static struct el_subpacket * el_process_subpacket_reg(struct el_subpacket *header) { struct el_subpacket *next = NULL; struct el_subpacket_handler *h = subpacket_handler_list; for (; h && h->class != header->class; h = h->next); if (h) next = h->handler(header); return next; } void el_print_timestamp(union el_timestamp *timestamp) { if (timestamp->as_int) printk("%s TIMESTAMP: %d/%d/%02d %d:%02d:%0d\n", err_print_prefix, timestamp->b.month, timestamp->b.day, timestamp->b.year, timestamp->b.hour, timestamp->b.minute, timestamp->b.second); } void el_process_subpackets(struct el_subpacket *header, int packet_count) { struct el_subpacket *subpacket; int i; subpacket = (struct el_subpacket *) ((unsigned long)header + header->length); for (i = 0; subpacket && i < packet_count; i++) { printk("%sPROCESSING SUBPACKET %d\n", err_print_prefix, i); subpacket = el_process_subpacket(subpacket); } } struct el_subpacket * el_process_subpacket(struct el_subpacket *header) { struct el_subpacket *next = NULL; switch(header->class) { case EL_CLASS__TERMINATION: /* Termination packet, there are no more */ break; case EL_CLASS__HEADER: next = el_process_header_subpacket(header); break; default: if (NULL == (next = el_process_subpacket_reg(header))) { printk("%s** Unexpected header CLASS %d TYPE %d" " -- aborting.\n", err_print_prefix, header->class, header->type); } break; } return next; } void el_annotate_subpacket(struct el_subpacket *header) { struct el_subpacket_annotation *a; char **annotation = NULL; for (a = subpacket_annotation_list; a; a = a->next) { if (a->class == header->class && a->type == header->type && a->revision == header->revision) { /* * We found the annotation */ annotation = a->annotation; printk("%s %s\n", err_print_prefix, a->description); break; } } mchk_dump_mem(header, header->length, annotation); } static void __init cdl_process_console_data_log(int cpu, struct percpu_struct *pcpu) { struct el_subpacket *header = (struct el_subpacket *) (IDENT_ADDR | pcpu->console_data_log_pa); int err; printk("%s******* CONSOLE DATA LOG FOR CPU %d. *******\n" "*** Error(s) were logged on a previous boot\n", err_print_prefix, cpu); for (err = 0; header && (header->class != EL_CLASS__TERMINATION); err++) header = el_process_subpacket(header); /* let the console know it's ok to clear the error(s) at restart */ pcpu->console_data_log_pa = 0; printk("%s*** %d total error(s) logged\n" "**** END OF CONSOLE DATA LOG FOR CPU %d ****\n", err_print_prefix, err, cpu); } void __init cdl_check_console_data_log(void) { struct percpu_struct *pcpu; unsigned long cpu; for (cpu = 0; cpu < hwrpb->nr_processors; cpu++) { pcpu = (struct percpu_struct *) ((unsigned long)hwrpb + hwrpb->processor_offset + cpu * hwrpb->processor_size); if (pcpu->console_data_log_pa) cdl_process_console_data_log(cpu, pcpu); } } int __init cdl_register_subpacket_annotation(struct el_subpacket_annotation *new) { struct el_subpacket_annotation *a = subpacket_annotation_list; if (a == NULL) subpacket_annotation_list = new; else { for (; a->next != NULL; a = a->next) { if ((a->class == new->class && a->type == new->type) || a == new) { printk("Attempted to re-register " "subpacket annotation\n"); return -EINVAL; } } a->next = new; } new->next = NULL; return 0; } int __init cdl_register_subpacket_handler(struct el_subpacket_handler *new) { struct el_subpacket_handler *h = subpacket_handler_list; if (h == NULL) subpacket_handler_list = new; else { for (; h->next != NULL; h = h->next) { if (h->class == new->class || h == new) { printk("Attempted to re-register " "subpacket handler\n"); return -EINVAL; } } h->next = new; } new->next = NULL; return 0; }
linux-master
arch/alpha/kernel/err_common.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_takara.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the TAKARA. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_cia.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" #include "pc873xx.h" /* Note mask bit is true for DISABLED irqs. */ static unsigned long cached_irq_mask[2] = { -1, -1 }; static inline void takara_update_irq_hw(unsigned long irq, unsigned long mask) { int regaddr; mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30)); regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c); outl(mask & 0xffff0000UL, regaddr); } static inline void takara_enable_irq(struct irq_data *d) { unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); takara_update_irq_hw(irq, mask); } static void takara_disable_irq(struct irq_data *d) { unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); takara_update_irq_hw(irq, mask); } static struct irq_chip takara_irq_type = { .name = "TAKARA", .irq_unmask = takara_enable_irq, .irq_mask = takara_disable_irq, .irq_mask_ack = takara_disable_irq, }; static void takara_device_interrupt(unsigned long vector) { unsigned intstatus; /* * The PALcode will have passed us vectors 0x800 or 0x810, * which are fairly arbitrary values and serve only to tell * us whether an interrupt has come in on IRQ0 or IRQ1. If * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's * probably ISA, but PCI interrupts can come through IRQ0 * as well if the interrupt controller isn't in accelerated * mode. * * OTOH, the accelerator thing doesn't seem to be working * overly well, so what we'll do instead is try directly * examining the Master Interrupt Register to see if it's a * PCI interrupt, and if _not_ then we'll pass it on to the * ISA handler. */ intstatus = inw(0x500) & 15; if (intstatus) { /* * This is a PCI interrupt. Check each bit and * despatch an interrupt if it's set. */ if (intstatus & 8) handle_irq(16+3); if (intstatus & 4) handle_irq(16+2); if (intstatus & 2) handle_irq(16+1); if (intstatus & 1) handle_irq(16+0); } else { isa_device_interrupt (vector); } } static void takara_srm_device_interrupt(unsigned long vector) { int irq = (vector - 0x800) >> 4; handle_irq(irq); } static void __init takara_init_irq(void) { long i; init_i8259a_irqs(); if (alpha_using_srm) { alpha_mv.device_interrupt = takara_srm_device_interrupt; } else { unsigned int ctlreg = inl(0x500); /* Return to non-accelerated mode. */ ctlreg &= ~0x8000; outl(ctlreg, 0x500); /* Enable the PCI interrupt register. */ ctlreg = 0x05107c00; outl(ctlreg, 0x500); } for (i = 16; i < 128; i += 16) takara_update_irq_hw(i, -1); for (i = 16; i < 128; ++i) { irq_set_chip_and_handler(i, &takara_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } common_init_isa_dma(); } /* * The Takara has PCI devices 1, 2, and 3 configured to slots 20, * 19, and 18 respectively, in the default configuration. They can * also be jumpered to slots 8, 7, and 6 respectively, which is fun * because the SIO ISA bridge can also be slot 7. However, the SIO * doesn't explicitly generate PCI-type interrupts, so we can * assign it whatever the hell IRQ we like and it doesn't matter. */ static int takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[15][5] = { { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ { -1, -1, -1, -1, -1}, /* slot 9 == nothing */ { -1, -1, -1, -1, -1}, /* slot 10 == nothing */ { -1, -1, -1, -1, -1}, /* slot 11 == nothing */ /* These are behind the bridges. */ { 12, 12, 13, 14, 15}, /* slot 12 == nothing */ { 8, 8, 9, 19, 11}, /* slot 13 == nothing */ { 4, 4, 5, 6, 7}, /* slot 14 == nothing */ { 0, 0, 1, 2, 3}, /* slot 15 == nothing */ { -1, -1, -1, -1, -1}, /* slot 16 == nothing */ {64+ 0, 64+0, 64+1, 64+2, 64+3}, /* slot 17= device 4 */ {48+ 0, 48+0, 48+1, 48+2, 48+3}, /* slot 18= device 3 */ {32+ 0, 32+0, 32+1, 32+2, 32+3}, /* slot 19= device 2 */ {16+ 0, 16+0, 16+1, 16+2, 16+3}, /* slot 20= device 1 */ }; const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5; int irq = COMMON_TABLE_LOOKUP; if (irq >= 0 && irq < 16) { /* Guess that we are behind a bridge. */ unsigned int busslot = PCI_SLOT(dev->bus->self->devfn); irq += irq_tab[busslot-min_idsel][0]; } return irq; } static int __init takara_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[15][5] __initdata = { { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ { -1, -1, -1, -1, -1}, /* slot 9 == nothing */ { -1, -1, -1, -1, -1}, /* slot 10 == nothing */ { -1, -1, -1, -1, -1}, /* slot 11 == nothing */ { -1, -1, -1, -1, -1}, /* slot 12 == nothing */ { -1, -1, -1, -1, -1}, /* slot 13 == nothing */ { -1, -1, -1, -1, -1}, /* slot 14 == nothing */ { -1, -1, -1, -1, -1}, /* slot 15 == nothing */ { -1, -1, -1, -1, -1}, /* slot 16 == nothing */ { -1, -1, -1, -1, -1}, /* slot 17 == nothing */ { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 18 == device 3 */ { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 19 == device 2 */ { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 20 == device 1 */ }; const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static u8 takara_swizzle(struct pci_dev *dev, u8 *pinp) { int slot = PCI_SLOT(dev->devfn); int pin = *pinp; unsigned int ctlreg = inl(0x500); unsigned int busslot; if (!dev->bus->self) return slot; busslot = PCI_SLOT(dev->bus->self->devfn); /* Check for built-in bridges. */ if (dev->bus->number != 0 && busslot > 16 && ((1<<(36-busslot)) & ctlreg)) { if (pin == 1) pin += (20 - busslot); else { printk(KERN_WARNING "takara_swizzle: can only " "handle cards with INTA IRQ pin.\n"); } } else { /* Must be a card-based bridge. */ printk(KERN_WARNING "takara_swizzle: cannot handle " "card-bridge behind builtin bridge yet.\n"); } *pinp = pin; return slot; } static void __init takara_init_pci(void) { if (alpha_using_srm) alpha_mv.pci_map_irq = takara_map_irq_srm; cia_init_pci(); if (pc873xx_probe() == -1) { printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n"); } else { printk(KERN_INFO "Found %s Super IO chip at 0x%x\n", pc873xx_get_model(), pc873xx_get_base()); pc873xx_enable_ide(); } } /* * The System Vector */ struct alpha_machine_vector takara_mv __initmv = { .vector_name = "Takara", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 128, .device_interrupt = takara_device_interrupt, .init_arch = cia_init_arch, .init_irq = takara_init_irq, .init_rtc = common_init_rtc, .init_pci = takara_init_pci, .kill_arch = cia_kill_arch, .pci_map_irq = takara_map_irq, .pci_swizzle = takara_swizzle, }; ALIAS_MV(takara)
linux-master
arch/alpha/kernel/sys_takara.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_dp264.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996, 1999 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Modified by Christopher C. Chimelis, 2001 to * add support for the addition of Shark to the * Tsunami family. * * Code supporting the DP264 (EV6+TSUNAMI). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_tsunami.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note mask bit is true for ENABLED irqs. */ static unsigned long cached_irq_mask; /* dp264 boards handle at max four CPUs */ static unsigned long cpu_irq_affinity[4] = { 0UL, 0UL, 0UL, 0UL }; DEFINE_SPINLOCK(dp264_irq_lock); static void tsunami_update_irq_hw(unsigned long mask) { register tsunami_cchip *cchip = TSUNAMI_cchip; unsigned long isa_enable = 1UL << 55; register int bcpu = boot_cpuid; #ifdef CONFIG_SMP volatile unsigned long *dim0, *dim1, *dim2, *dim3; unsigned long mask0, mask1, mask2, mask3, dummy; mask &= ~isa_enable; mask0 = mask & cpu_irq_affinity[0]; mask1 = mask & cpu_irq_affinity[1]; mask2 = mask & cpu_irq_affinity[2]; mask3 = mask & cpu_irq_affinity[3]; if (bcpu == 0) mask0 |= isa_enable; else if (bcpu == 1) mask1 |= isa_enable; else if (bcpu == 2) mask2 |= isa_enable; else mask3 |= isa_enable; dim0 = &cchip->dim0.csr; dim1 = &cchip->dim1.csr; dim2 = &cchip->dim2.csr; dim3 = &cchip->dim3.csr; if (!cpu_possible(0)) dim0 = &dummy; if (!cpu_possible(1)) dim1 = &dummy; if (!cpu_possible(2)) dim2 = &dummy; if (!cpu_possible(3)) dim3 = &dummy; *dim0 = mask0; *dim1 = mask1; *dim2 = mask2; *dim3 = mask3; mb(); *dim0; *dim1; *dim2; *dim3; #else volatile unsigned long *dimB; if (bcpu == 0) dimB = &cchip->dim0.csr; else if (bcpu == 1) dimB = &cchip->dim1.csr; else if (bcpu == 2) dimB = &cchip->dim2.csr; else dimB = &cchip->dim3.csr; *dimB = mask | isa_enable; mb(); *dimB; #endif } static void dp264_enable_irq(struct irq_data *d) { spin_lock(&dp264_irq_lock); cached_irq_mask |= 1UL << d->irq; tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); } static void dp264_disable_irq(struct irq_data *d) { spin_lock(&dp264_irq_lock); cached_irq_mask &= ~(1UL << d->irq); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); } static void clipper_enable_irq(struct irq_data *d) { spin_lock(&dp264_irq_lock); cached_irq_mask |= 1UL << (d->irq - 16); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); } static void clipper_disable_irq(struct irq_data *d) { spin_lock(&dp264_irq_lock); cached_irq_mask &= ~(1UL << (d->irq - 16)); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); } static void cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) { int cpu; for (cpu = 0; cpu < 4; cpu++) { unsigned long aff = cpu_irq_affinity[cpu]; if (cpumask_test_cpu(cpu, &affinity)) aff |= 1UL << irq; else aff &= ~(1UL << irq); cpu_irq_affinity[cpu] = aff; } } static int dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) { spin_lock(&dp264_irq_lock); cpu_set_irq_affinity(d->irq, *affinity); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); return 0; } static int clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) { spin_lock(&dp264_irq_lock); cpu_set_irq_affinity(d->irq - 16, *affinity); tsunami_update_irq_hw(cached_irq_mask); spin_unlock(&dp264_irq_lock); return 0; } static struct irq_chip dp264_irq_type = { .name = "DP264", .irq_unmask = dp264_enable_irq, .irq_mask = dp264_disable_irq, .irq_mask_ack = dp264_disable_irq, .irq_set_affinity = dp264_set_affinity, }; static struct irq_chip clipper_irq_type = { .name = "CLIPPER", .irq_unmask = clipper_enable_irq, .irq_mask = clipper_disable_irq, .irq_mask_ack = clipper_disable_irq, .irq_set_affinity = clipper_set_affinity, }; static void dp264_device_interrupt(unsigned long vector) { unsigned long pld; unsigned int i; /* Read the interrupt summary register of TSUNAMI */ pld = TSUNAMI_cchip->dir0.csr; /* * Now for every possible bit set, work through them and call * the appropriate interrupt handler. */ while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ if (i == 55) isa_device_interrupt(vector); else handle_irq(16 + i); } } static void dp264_srm_device_interrupt(unsigned long vector) { int irq; irq = (vector - 0x800) >> 4; /* * The SRM console reports PCI interrupts with a vector calculated by: * * 0x900 + (0x10 * DRIR-bit) * * So bit 16 shows up as IRQ 32, etc. * * On DP264/BRICK/MONET, we adjust it down by 16 because at least * that many of the low order bits of the DRIR are not used, and * so we don't count them. */ if (irq >= 32) irq -= 16; handle_irq(irq); } static void clipper_srm_device_interrupt(unsigned long vector) { int irq; irq = (vector - 0x800) >> 4; /* * The SRM console reports PCI interrupts with a vector calculated by: * * 0x900 + (0x10 * DRIR-bit) * * So bit 16 shows up as IRQ 32, etc. * * CLIPPER uses bits 8-47 for PCI interrupts, so we do not need * to scale down the vector reported, we just use it. * * Eg IRQ 24 is DRIR bit 8, etc, etc */ handle_irq(irq); } static void __init init_tsunami_irqs(struct irq_chip * ops, int imin, int imax) { long i; for (i = imin; i <= imax; ++i) { irq_set_chip_and_handler(i, ops, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } } static void __init dp264_init_irq(void) { outb(0, DMA1_RESET_REG); outb(0, DMA2_RESET_REG); outb(DMA_MODE_CASCADE, DMA2_MODE_REG); outb(0, DMA2_MASK_REG); if (alpha_using_srm) alpha_mv.device_interrupt = dp264_srm_device_interrupt; tsunami_update_irq_hw(0); init_i8259a_irqs(); init_tsunami_irqs(&dp264_irq_type, 16, 47); } static void __init clipper_init_irq(void) { outb(0, DMA1_RESET_REG); outb(0, DMA2_RESET_REG); outb(DMA_MODE_CASCADE, DMA2_MODE_REG); outb(0, DMA2_MASK_REG); if (alpha_using_srm) alpha_mv.device_interrupt = clipper_srm_device_interrupt; tsunami_update_irq_hw(0); init_i8259a_irqs(); init_tsunami_irqs(&clipper_irq_type, 24, 63); } /* * PCI Fixup configuration. * * Summary @ TSUNAMI_CSR_DIM0: * Bit Meaning * 0-17 Unused *18 Interrupt SCSI B (Adaptec 7895 builtin) *19 Interrupt SCSI A (Adaptec 7895 builtin) *20 Interrupt Line D from slot 2 PCI0 *21 Interrupt Line C from slot 2 PCI0 *22 Interrupt Line B from slot 2 PCI0 *23 Interrupt Line A from slot 2 PCI0 *24 Interrupt Line D from slot 1 PCI0 *25 Interrupt Line C from slot 1 PCI0 *26 Interrupt Line B from slot 1 PCI0 *27 Interrupt Line A from slot 1 PCI0 *28 Interrupt Line D from slot 0 PCI0 *29 Interrupt Line C from slot 0 PCI0 *30 Interrupt Line B from slot 0 PCI0 *31 Interrupt Line A from slot 0 PCI0 * *32 Interrupt Line D from slot 3 PCI1 *33 Interrupt Line C from slot 3 PCI1 *34 Interrupt Line B from slot 3 PCI1 *35 Interrupt Line A from slot 3 PCI1 *36 Interrupt Line D from slot 2 PCI1 *37 Interrupt Line C from slot 2 PCI1 *38 Interrupt Line B from slot 2 PCI1 *39 Interrupt Line A from slot 2 PCI1 *40 Interrupt Line D from slot 1 PCI1 *41 Interrupt Line C from slot 1 PCI1 *42 Interrupt Line B from slot 1 PCI1 *43 Interrupt Line A from slot 1 PCI1 *44 Interrupt Line D from slot 0 PCI1 *45 Interrupt Line C from slot 0 PCI1 *46 Interrupt Line B from slot 0 PCI1 *47 Interrupt Line A from slot 0 PCI1 *48-52 Unused *53 PCI0 NMI (from Cypress) *54 PCI0 SMI INT (from Cypress) *55 PCI0 ISA Interrupt (from Cypress) *56-60 Unused *61 PCI1 Bus Error *62 PCI0 Bus Error *63 Reserved * * IdSel * 5 Cypress Bridge I/O * 6 SCSI Adaptec builtin * 7 64 bit PCI option slot 0 (all busses) * 8 64 bit PCI option slot 1 (all busses) * 9 64 bit PCI option slot 2 (all busses) * 10 64 bit PCI option slot 3 (not bus 0) */ static int isa_irq_fixup(const struct pci_dev *dev, int irq) { u8 irq8; if (irq > 0) return irq; /* This interrupt is routed via ISA bridge, so we'll just have to trust whatever value the console might have assigned. */ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq8); return irq8 & 0xf; } static int dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[6][5] = { /*INT INTA INTB INTC INTD */ { -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */ { 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/ { 16+15, 16+15, 16+14, 16+13, 16+12}, /* IdSel 7 slot 0 */ { 16+11, 16+11, 16+10, 16+ 9, 16+ 8}, /* IdSel 8 slot 1 */ { 16+ 7, 16+ 7, 16+ 6, 16+ 5, 16+ 4}, /* IdSel 9 slot 2 */ { 16+ 3, 16+ 3, 16+ 2, 16+ 1, 16+ 0} /* IdSel 10 slot 3 */ }; const long min_idsel = 5, max_idsel = 10, irqs_per_slot = 5; struct pci_controller *hose = dev->sysdata; int irq = COMMON_TABLE_LOOKUP; if (irq > 0) irq += 16 * hose->index; return isa_irq_fixup(dev, irq); } static int monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[13][5] = { /*INT INTA INTB INTC INTD */ { 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */ { -1, -1, -1, -1, -1}, /* IdSel 4 unused */ { -1, -1, -1, -1, -1}, /* IdSel 5 unused */ { 47, 47, 47, 47, 47}, /* IdSel 6 SCSI PCI1 */ { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */ { -1, -1, -1, -1, -1}, /* IdSel 8 P2P PCI1 */ #if 1 { 28, 28, 29, 30, 31}, /* IdSel 14 slot 4 PCI2*/ { 24, 24, 25, 26, 27}, /* IdSel 15 slot 5 PCI2*/ #else { -1, -1, -1, -1, -1}, /* IdSel 9 unused */ { -1, -1, -1, -1, -1}, /* IdSel 10 unused */ #endif { 40, 40, 41, 42, 43}, /* IdSel 11 slot 1 PCI0*/ { 36, 36, 37, 38, 39}, /* IdSel 12 slot 2 PCI0*/ { 32, 32, 33, 34, 35}, /* IdSel 13 slot 3 PCI0*/ { 28, 28, 29, 30, 31}, /* IdSel 14 slot 4 PCI2*/ { 24, 24, 25, 26, 27} /* IdSel 15 slot 5 PCI2*/ }; const long min_idsel = 3, max_idsel = 15, irqs_per_slot = 5; return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP); } static u8 monet_swizzle(struct pci_dev *dev, u8 *pinp) { struct pci_controller *hose = dev->sysdata; int slot, pin = *pinp; if (!dev->bus->parent) { slot = PCI_SLOT(dev->devfn); } /* Check for the built-in bridge on hose 1. */ else if (hose->index == 1 && PCI_SLOT(dev->bus->self->devfn) == 8) { slot = PCI_SLOT(dev->devfn); } else { /* Must be a card-based bridge. */ do { /* Check for built-in bridge on hose 1. */ if (hose->index == 1 && PCI_SLOT(dev->bus->self->devfn) == 8) { slot = PCI_SLOT(dev->devfn); break; } pin = pci_swizzle_interrupt_pin(dev, pin); /* Move up the chain of bridges. */ dev = dev->bus->self; /* Slot of the next bridge. */ slot = PCI_SLOT(dev->devfn); } while (dev->bus->self); } *pinp = pin; return slot; } static int webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[13][5] = { /*INT INTA INTB INTC INTD */ { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */ { -1, -1, -1, -1, -1}, /* IdSel 8 unused */ { 29, 29, 29, 29, 29}, /* IdSel 9 21143 #1 */ { -1, -1, -1, -1, -1}, /* IdSel 10 unused */ { 30, 30, 30, 30, 30}, /* IdSel 11 21143 #2 */ { -1, -1, -1, -1, -1}, /* IdSel 12 unused */ { -1, -1, -1, -1, -1}, /* IdSel 13 unused */ { 35, 35, 34, 33, 32}, /* IdSel 14 slot 0 */ { 39, 39, 38, 37, 36}, /* IdSel 15 slot 1 */ { 43, 43, 42, 41, 40}, /* IdSel 16 slot 2 */ { 47, 47, 46, 45, 44}, /* IdSel 17 slot 3 */ }; const long min_idsel = 7, max_idsel = 17, irqs_per_slot = 5; return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP); } static int clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[7][5] = { /*INT INTA INTB INTC INTD */ { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */ { 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */ { 16+16, 16+16, 16+17, 16+18, 16+19}, /* IdSel 3 slot 3 */ { 16+20, 16+20, 16+21, 16+22, 16+23}, /* IdSel 4 slot 4 */ { 16+24, 16+24, 16+25, 16+26, 16+27}, /* IdSel 5 slot 5 */ { 16+28, 16+28, 16+29, 16+30, 16+31}, /* IdSel 6 slot 6 */ { -1, -1, -1, -1, -1} /* IdSel 7 ISA Bridge */ }; const long min_idsel = 1, max_idsel = 7, irqs_per_slot = 5; struct pci_controller *hose = dev->sysdata; int irq = COMMON_TABLE_LOOKUP; if (irq > 0) irq += 16 * hose->index; return isa_irq_fixup(dev, irq); } static void __init dp264_init_pci(void) { common_init_pci(); SMC669_Init(0); locate_and_init_vga(NULL); } static void __init monet_init_pci(void) { common_init_pci(); SMC669_Init(1); es1888_init(); locate_and_init_vga(NULL); } static void __init clipper_init_pci(void) { common_init_pci(); locate_and_init_vga(NULL); } static void __init webbrick_init_arch(void) { tsunami_init_arch(); /* Tsunami caches 4 PTEs at a time; DS10 has only 1 hose. */ hose_head->sg_isa->align_entry = 4; hose_head->sg_pci->align_entry = 4; } /* * The System Vectors */ struct alpha_machine_vector dp264_mv __initmv = { .vector_name = "DP264", DO_EV6_MMU, DO_DEFAULT_RTC, DO_TSUNAMI_IO, .machine_check = tsunami_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = TSUNAMI_DAC_OFFSET, .nr_irqs = 64, .device_interrupt = dp264_device_interrupt, .init_arch = tsunami_init_arch, .init_irq = dp264_init_irq, .init_rtc = common_init_rtc, .init_pci = dp264_init_pci, .kill_arch = tsunami_kill_arch, .pci_map_irq = dp264_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(dp264) struct alpha_machine_vector monet_mv __initmv = { .vector_name = "Monet", DO_EV6_MMU, DO_DEFAULT_RTC, DO_TSUNAMI_IO, .machine_check = tsunami_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = TSUNAMI_DAC_OFFSET, .nr_irqs = 64, .device_interrupt = dp264_device_interrupt, .init_arch = tsunami_init_arch, .init_irq = dp264_init_irq, .init_rtc = common_init_rtc, .init_pci = monet_init_pci, .kill_arch = tsunami_kill_arch, .pci_map_irq = monet_map_irq, .pci_swizzle = monet_swizzle, }; struct alpha_machine_vector webbrick_mv __initmv = { .vector_name = "Webbrick", DO_EV6_MMU, DO_DEFAULT_RTC, DO_TSUNAMI_IO, .machine_check = tsunami_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = TSUNAMI_DAC_OFFSET, .nr_irqs = 64, .device_interrupt = dp264_device_interrupt, .init_arch = webbrick_init_arch, .init_irq = dp264_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .kill_arch = tsunami_kill_arch, .pci_map_irq = webbrick_map_irq, .pci_swizzle = common_swizzle, }; struct alpha_machine_vector clipper_mv __initmv = { .vector_name = "Clipper", DO_EV6_MMU, DO_DEFAULT_RTC, DO_TSUNAMI_IO, .machine_check = tsunami_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = TSUNAMI_DAC_OFFSET, .nr_irqs = 64, .device_interrupt = dp264_device_interrupt, .init_arch = tsunami_init_arch, .init_irq = clipper_init_irq, .init_rtc = common_init_rtc, .init_pci = clipper_init_pci, .kill_arch = tsunami_kill_arch, .pci_map_irq = clipper_map_irq, .pci_swizzle = common_swizzle, }; /* Sharks strongly resemble Clipper, at least as far * as interrupt routing, etc, so we're using the * same functions as Clipper does */ struct alpha_machine_vector shark_mv __initmv = { .vector_name = "Shark", DO_EV6_MMU, DO_DEFAULT_RTC, DO_TSUNAMI_IO, .machine_check = tsunami_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = TSUNAMI_DAC_OFFSET, .nr_irqs = 64, .device_interrupt = dp264_device_interrupt, .init_arch = tsunami_init_arch, .init_irq = clipper_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .kill_arch = tsunami_kill_arch, .pci_map_irq = clipper_map_irq, .pci_swizzle = common_swizzle, }; /* No alpha_mv alias for webbrick/monet/clipper, since we compile them in unconditionally with DP264; setup_arch knows how to cope. */
linux-master
arch/alpha/kernel/sys_dp264.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/console.c * * Architecture-specific specific support for VGA device on * non-0 I/O hose */ #include <linux/pci.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/vt.h> #include <asm/vga.h> #include <asm/machvec.h> #include "pci_impl.h" #ifdef CONFIG_VGA_HOSE struct pci_controller *pci_vga_hose; static struct resource alpha_vga = { .name = "alpha-vga+", .flags = IORESOURCE_IO, .start = 0x3C0, .end = 0x3DF }; static struct pci_controller * __init default_vga_hose_select(struct pci_controller *h1, struct pci_controller *h2) { if (h2->index < h1->index) return h2; return h1; } void __init locate_and_init_vga(void *(*sel_func)(void *, void *)) { struct pci_controller *hose = NULL; struct pci_dev *dev = NULL; /* Default the select function */ if (!sel_func) sel_func = (void *)default_vga_hose_select; /* Find the console VGA device */ for(dev=NULL; (dev=pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, dev));) { if (!hose) hose = dev->sysdata; else hose = sel_func(hose, dev->sysdata); } /* Did we already initialize the correct one? Is there one? */ if (!hose || (conswitchp == &vga_con && pci_vga_hose == hose)) return; /* Create a new VGA ioport resource WRT the hose it is on. */ alpha_vga.start += hose->io_space->start; alpha_vga.end += hose->io_space->start; request_resource(hose->io_space, &alpha_vga); /* Set the VGA hose and init the new console. */ pci_vga_hose = hose; console_lock(); do_take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1); console_unlock(); } void __init find_console_vga_hose(void) { u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset); if (pu64[7] == 3) { /* TERM_TYPE == graphics */ struct pci_controller *hose; int h = (pu64[30] >> 24) & 0xff; /* console hose # */ /* * Our hose numbering DOES match the console's, so find * the right one... */ for (hose = hose_head; hose; hose = hose->next) { if (hose->index == h) break; } if (hose) { printk("Console graphics on hose %d\n", h); pci_vga_hose = hose; } } } #endif
linux-master
arch/alpha/kernel/console.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/srmcons.c * * Callback based driver for SRM Console console device. * (TTY driver and console driver) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <asm/console.h> #include <linux/uaccess.h> static DEFINE_SPINLOCK(srmcons_callback_lock); static int srm_is_registered_console = 0; /* * The TTY driver */ #define MAX_SRM_CONSOLE_DEVICES 1 /* only support 1 console device */ struct srmcons_private { struct tty_port port; struct timer_list timer; } srmcons_singleton; typedef union _srmcons_result { struct { unsigned long c :61; unsigned long status :3; } bits; long as_long; } srmcons_result; /* called with callback_lock held */ static int srmcons_do_receive_chars(struct tty_port *port) { srmcons_result result; int count = 0, loops = 0; do { result.as_long = callback_getc(0); if (result.bits.status < 2) { tty_insert_flip_char(port, (char)result.bits.c, 0); count++; } } while((result.bits.status & 1) && (++loops < 10)); if (count) tty_flip_buffer_push(port); return count; } static void srmcons_receive_chars(struct timer_list *t) { struct srmcons_private *srmconsp = from_timer(srmconsp, t, timer); struct tty_port *port = &srmconsp->port; unsigned long flags; int incr = 10; local_irq_save(flags); if (spin_trylock(&srmcons_callback_lock)) { if (!srmcons_do_receive_chars(port)) incr = 100; spin_unlock(&srmcons_callback_lock); } spin_lock(&port->lock); if (port->tty) mod_timer(&srmconsp->timer, jiffies + incr); spin_unlock(&port->lock); local_irq_restore(flags); } /* called with callback_lock held */ static int srmcons_do_write(struct tty_port *port, const char *buf, int count) { static char str_cr[1] = "\r"; long c, remaining = count; srmcons_result result; char *cur; int need_cr; for (cur = (char *)buf; remaining > 0; ) { need_cr = 0; /* * Break it up into reasonable size chunks to allow a chance * for input to get in */ for (c = 0; c < min_t(long, 128L, remaining) && !need_cr; c++) if (cur[c] == '\n') need_cr = 1; while (c > 0) { result.as_long = callback_puts(0, cur, c); c -= result.bits.c; remaining -= result.bits.c; cur += result.bits.c; /* * Check for pending input iff a tty port was provided */ if (port) srmcons_do_receive_chars(port); } while (need_cr) { result.as_long = callback_puts(0, str_cr, 1); if (result.bits.c > 0) need_cr = 0; } } return count; } static ssize_t srmcons_write(struct tty_struct *tty, const u8 *buf, size_t count) { unsigned long flags; spin_lock_irqsave(&srmcons_callback_lock, flags); srmcons_do_write(tty->port, (const char *) buf, count); spin_unlock_irqrestore(&srmcons_callback_lock, flags); return count; } static unsigned int srmcons_write_room(struct tty_struct *tty) { return 512; } static int srmcons_open(struct tty_struct *tty, struct file *filp) { struct srmcons_private *srmconsp = &srmcons_singleton; struct tty_port *port = &srmconsp->port; unsigned long flags; spin_lock_irqsave(&port->lock, flags); if (!port->tty) { tty->driver_data = srmconsp; tty->port = port; port->tty = tty; /* XXX proper refcounting */ mod_timer(&srmconsp->timer, jiffies + 10); } spin_unlock_irqrestore(&port->lock, flags); return 0; } static void srmcons_close(struct tty_struct *tty, struct file *filp) { struct srmcons_private *srmconsp = tty->driver_data; struct tty_port *port = &srmconsp->port; unsigned long flags; spin_lock_irqsave(&port->lock, flags); if (tty->count == 1) { port->tty = NULL; del_timer(&srmconsp->timer); } spin_unlock_irqrestore(&port->lock, flags); } static struct tty_driver *srmcons_driver; static const struct tty_operations srmcons_ops = { .open = srmcons_open, .close = srmcons_close, .write = srmcons_write, .write_room = srmcons_write_room, }; static int __init srmcons_init(void) { timer_setup(&srmcons_singleton.timer, srmcons_receive_chars, 0); if (srm_is_registered_console) { struct tty_driver *driver; int err; driver = tty_alloc_driver(MAX_SRM_CONSOLE_DEVICES, 0); if (IS_ERR(driver)) return PTR_ERR(driver); tty_port_init(&srmcons_singleton.port); driver->driver_name = "srm"; driver->name = "srm"; driver->major = 0; /* dynamic */ driver->minor_start = 0; driver->type = TTY_DRIVER_TYPE_SYSTEM; driver->subtype = SYSTEM_TYPE_SYSCONS; driver->init_termios = tty_std_termios; tty_set_operations(driver, &srmcons_ops); tty_port_link_device(&srmcons_singleton.port, driver, 0); err = tty_register_driver(driver); if (err) { tty_driver_kref_put(driver); tty_port_destroy(&srmcons_singleton.port); return err; } srmcons_driver = driver; } return -ENODEV; } device_initcall(srmcons_init); /* * The console driver */ static void srm_console_write(struct console *co, const char *s, unsigned count) { unsigned long flags; spin_lock_irqsave(&srmcons_callback_lock, flags); srmcons_do_write(NULL, s, count); spin_unlock_irqrestore(&srmcons_callback_lock, flags); } static struct tty_driver * srm_console_device(struct console *co, int *index) { *index = co->index; return srmcons_driver; } static int srm_console_setup(struct console *co, char *options) { return 0; } static struct console srmcons = { .name = "srm", .write = srm_console_write, .device = srm_console_device, .setup = srm_console_setup, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1, }; void __init register_srm_console(void) { if (!srm_is_registered_console) { callback_open_console(); register_console(&srmcons); srm_is_registered_console = 1; } } void __init unregister_srm_console(void) { if (srm_is_registered_console) { callback_close_console(); unregister_console(&srmcons); srm_is_registered_console = 0; } }
linux-master
arch/alpha/kernel/srmcons.c
#include <linux/init.h> #include <linux/types.h> #include <linux/audit.h> #include <asm/unistd.h> static unsigned dir_class[] = { #include <asm-generic/audit_dir_write.h> ~0U }; static unsigned read_class[] = { #include <asm-generic/audit_read.h> ~0U }; static unsigned write_class[] = { #include <asm-generic/audit_write.h> ~0U }; static unsigned chattr_class[] = { #include <asm-generic/audit_change_attr.h> ~0U }; static unsigned signal_class[] = { #include <asm-generic/audit_signal.h> ~0U }; int audit_classify_arch(int arch) { return 0; } int audit_classify_syscall(int abi, unsigned syscall) { switch(syscall) { case __NR_open: return AUDITSC_OPEN; case __NR_openat: return AUDITSC_OPENAT; case __NR_execve: return AUDITSC_EXECVE; case __NR_openat2: return AUDITSC_OPENAT2; default: return AUDITSC_NATIVE; } } static int __init audit_classes_init(void) { audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } __initcall(audit_classes_init);
linux-master
arch/alpha/kernel/audit.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/time.c * * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds * * This file contains the clocksource time handling. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 * "A Kernel Model for Precision Timekeeping" by Dave Mills * 1997-01-09 Adrian Sun * use interval timer if CONFIG_RTC=y * 1997-10-29 John Bowman ([email protected]) * fixed tick loss calculation in timer_interrupt * (round system clock to nearest tick instead of truncating) * fixed algorithm in time_init for getting time from CMOS clock * 1999-04-16 Thorsten Kranzkowski ([email protected]) * fixed algorithm in do_gettimeofday() for calculating the precise time * from processor cycle counter (now taking lost_ticks into account) * 2003-06-03 R. Scott Bailey <[email protected]> * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM */ #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/bcd.h> #include <linux/profile.h> #include <linux/irq_work.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/hwrpb.h> #include <linux/mc146818rtc.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include "proto.h" #include "irq_impl.h" DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); unsigned long est_cycle_freq; #ifdef CONFIG_IRQ_WORK DEFINE_PER_CPU(u8, irq_work_pending); #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1) #define test_irq_work_pending() __this_cpu_read(irq_work_pending) #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) void arch_irq_work_raise(void) { set_irq_work_pending_flag(); } #else /* CONFIG_IRQ_WORK */ #define test_irq_work_pending() 0 #define clear_irq_work_pending() #endif /* CONFIG_IRQ_WORK */ static inline __u32 rpcc(void) { return __builtin_alpha_rpcc(); } /* * The RTC as a clock_event_device primitive. */ static DEFINE_PER_CPU(struct clock_event_device, cpu_ce); irqreturn_t rtc_timer_interrupt(int irq, void *dev) { int cpu = smp_processor_id(); struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); /* Don't run the hook for UNUSED or SHUTDOWN. */ if (likely(clockevent_state_periodic(ce))) ce->event_handler(ce); if (test_irq_work_pending()) { clear_irq_work_pending(); irq_work_run(); } return IRQ_HANDLED; } static int rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce) { /* This hook is for oneshot mode, which we don't support. */ return -EINVAL; } static void __init init_rtc_clockevent(void) { int cpu = smp_processor_id(); struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); *ce = (struct clock_event_device){ .name = "rtc", .features = CLOCK_EVT_FEAT_PERIODIC, .rating = 100, .cpumask = cpumask_of(cpu), .set_next_event = rtc_ce_set_next_event, }; clockevents_config_and_register(ce, CONFIG_HZ, 0, 0); } /* * The QEMU clock as a clocksource primitive. */ static u64 qemu_cs_read(struct clocksource *cs) { return qemu_get_vmtime(); } static struct clocksource qemu_cs = { .name = "qemu", .rating = 400, .read = qemu_cs_read, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, .max_idle_ns = LONG_MAX }; /* * The QEMU alarm as a clock_event_device primitive. */ static int qemu_ce_shutdown(struct clock_event_device *ce) { /* The mode member of CE is updated for us in generic code. Just make sure that the event is disabled. */ qemu_set_alarm_abs(0); return 0; } static int qemu_ce_set_next_event(unsigned long evt, struct clock_event_device *ce) { qemu_set_alarm_rel(evt); return 0; } static irqreturn_t qemu_timer_interrupt(int irq, void *dev) { int cpu = smp_processor_id(); struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); ce->event_handler(ce); return IRQ_HANDLED; } static void __init init_qemu_clockevent(void) { int cpu = smp_processor_id(); struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); *ce = (struct clock_event_device){ .name = "qemu", .features = CLOCK_EVT_FEAT_ONESHOT, .rating = 400, .cpumask = cpumask_of(cpu), .set_state_shutdown = qemu_ce_shutdown, .set_state_oneshot = qemu_ce_shutdown, .tick_resume = qemu_ce_shutdown, .set_next_event = qemu_ce_set_next_event, }; clockevents_config_and_register(ce, NSEC_PER_SEC, 1000, LONG_MAX); } void __init common_init_rtc(void) { unsigned char x, sel = 0; /* Reset periodic interrupt frequency. */ #if CONFIG_HZ == 1024 || CONFIG_HZ == 1200 x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; /* Test includes known working values on various platforms where 0x26 is wrong; we refuse to change those. */ if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { sel = RTC_REF_CLCK_32KHZ + 6; } #elif CONFIG_HZ == 256 || CONFIG_HZ == 128 || CONFIG_HZ == 64 || CONFIG_HZ == 32 sel = RTC_REF_CLCK_32KHZ + __builtin_ffs(32768 / CONFIG_HZ); #else # error "Unknown HZ from arch/alpha/Kconfig" #endif if (sel) { printk(KERN_INFO "Setting RTC_FREQ to %d Hz (%x)\n", CONFIG_HZ, sel); CMOS_WRITE(sel, RTC_FREQ_SELECT); } /* Turn on periodic interrupts. */ x = CMOS_READ(RTC_CONTROL); if (!(x & RTC_PIE)) { printk("Turning on RTC interrupts.\n"); x |= RTC_PIE; x &= ~(RTC_AIE | RTC_UIE); CMOS_WRITE(x, RTC_CONTROL); } (void) CMOS_READ(RTC_INTR_FLAGS); outb(0x36, 0x43); /* pit counter 0: system timer */ outb(0x00, 0x40); outb(0x00, 0x40); outb(0xb6, 0x43); /* pit counter 2: speaker */ outb(0x31, 0x42); outb(0x13, 0x42); init_rtc_irq(NULL); } #ifndef CONFIG_ALPHA_WTINT /* * The RPCC as a clocksource primitive. * * While we have free-running timecounters running on all CPUs, and we make * a half-hearted attempt in init_rtc_rpcc_info to sync the timecounter * with the wall clock, that initialization isn't kept up-to-date across * different time counters in SMP mode. Therefore we can only use this * method when there's only one CPU enabled. * * When using the WTINT PALcall, the RPCC may shift to a lower frequency, * or stop altogether, while waiting for the interrupt. Therefore we cannot * use this method when WTINT is in use. */ static u64 read_rpcc(struct clocksource *cs) { return rpcc(); } static struct clocksource clocksource_rpcc = { .name = "rpcc", .rating = 300, .read = read_rpcc, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS }; #endif /* ALPHA_WTINT */ /* Validate a computed cycle counter result against the known bounds for the given processor core. There's too much brokenness in the way of timing hardware for any one method to work everywhere. :-( Return 0 if the result cannot be trusted, otherwise return the argument. */ static unsigned long __init validate_cc_value(unsigned long cc) { static struct bounds { unsigned int min, max; } cpu_hz[] __initdata = { [EV3_CPU] = { 50000000, 200000000 }, /* guess */ [EV4_CPU] = { 100000000, 300000000 }, [LCA4_CPU] = { 100000000, 300000000 }, /* guess */ [EV45_CPU] = { 200000000, 300000000 }, [EV5_CPU] = { 250000000, 433000000 }, [EV56_CPU] = { 333000000, 667000000 }, [PCA56_CPU] = { 400000000, 600000000 }, /* guess */ [PCA57_CPU] = { 500000000, 600000000 }, /* guess */ [EV6_CPU] = { 466000000, 600000000 }, [EV67_CPU] = { 600000000, 750000000 }, [EV68AL_CPU] = { 750000000, 940000000 }, [EV68CB_CPU] = { 1000000000, 1333333333 }, /* None of the following are shipping as of 2001-11-01. */ [EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */ [EV69_CPU] = { 1000000000, 1700000000 }, /* guess */ [EV7_CPU] = { 800000000, 1400000000 }, /* guess */ [EV79_CPU] = { 1000000000, 2000000000 }, /* guess */ }; /* Allow for some drift in the crystal. 10MHz is more than enough. */ const unsigned int deviation = 10000000; struct percpu_struct *cpu; unsigned int index; cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset); index = cpu->type & 0xffffffff; /* If index out of bounds, no way to validate. */ if (index >= ARRAY_SIZE(cpu_hz)) return cc; /* If index contains no data, no way to validate. */ if (cpu_hz[index].max == 0) return cc; if (cc < cpu_hz[index].min - deviation || cc > cpu_hz[index].max + deviation) return 0; return cc; } /* * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from * arch/i386/time.c. */ #define CALIBRATE_LATCH 0xffff #define TIMEOUT_COUNT 0x100000 static unsigned long __init calibrate_cc_with_pit(void) { int cc, count = 0; /* Set the Gate high, disable speaker */ outb((inb(0x61) & ~0x02) | 0x01, 0x61); /* * Now let's take care of CTC channel 2 * * Set the Gate high, program CTC channel 2 for mode 0, * (interrupt on terminal count mode), binary count, * load 5 * LATCH count, (LSB and MSB) to begin countdown. */ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ cc = rpcc(); do { count++; } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT); cc = rpcc() - cc; /* Error: ECTCNEVERSET or ECPUTOOFAST. */ if (count <= 1 || count == TIMEOUT_COUNT) return 0; return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1); } /* The Linux interpretation of the CMOS clock register contents: When the Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the second which has precisely just started. Let's hope other operating systems interpret the RTC the same way. */ static unsigned long __init rpcc_after_update_in_progress(void) { do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)); do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); return rpcc(); } void __init time_init(void) { unsigned int cc1, cc2; unsigned long cycle_freq, tolerance; long diff; if (alpha_using_qemu) { clocksource_register_hz(&qemu_cs, NSEC_PER_SEC); init_qemu_clockevent(); init_rtc_irq(qemu_timer_interrupt); return; } /* Calibrate CPU clock -- attempt #1. */ if (!est_cycle_freq) est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); cc1 = rpcc(); /* Calibrate CPU clock -- attempt #2. */ if (!est_cycle_freq) { cc1 = rpcc_after_update_in_progress(); cc2 = rpcc_after_update_in_progress(); est_cycle_freq = validate_cc_value(cc2 - cc1); cc1 = cc2; } cycle_freq = hwrpb->cycle_freq; if (est_cycle_freq) { /* If the given value is within 250 PPM of what we calculated, accept it. Otherwise, use what we found. */ tolerance = cycle_freq / 4000; diff = cycle_freq - est_cycle_freq; if (diff < 0) diff = -diff; if ((unsigned long)diff > tolerance) { cycle_freq = est_cycle_freq; printk("HWRPB cycle frequency bogus. " "Estimated %lu Hz\n", cycle_freq); } else { est_cycle_freq = 0; } } else if (! validate_cc_value (cycle_freq)) { printk("HWRPB cycle frequency bogus, " "and unable to estimate a proper value!\n"); } /* See above for restrictions on using clocksource_rpcc. */ #ifndef CONFIG_ALPHA_WTINT if (hwrpb->nr_processors == 1) clocksource_register_hz(&clocksource_rpcc, cycle_freq); #endif /* Startup the timer source. */ alpha_mv.init_rtc(); init_rtc_clockevent(); } /* Initialize the clock_event_device for secondary cpus. */ #ifdef CONFIG_SMP void __init init_clockevent(void) { if (alpha_using_qemu) init_qemu_clockevent(); else init_rtc_clockevent(); } #endif
linux-master
arch/alpha/kernel/time.c
// SPDX-License-Identifier: GPL-2.0 /* * Hardware performance events for the Alpha. * * We implement HW counts on the EV67 and subsequent CPUs only. * * (C) 2010 Michael J. Cree * * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and * ARM code, which are copyright by their respective authors. */ #include <linux/perf_event.h> #include <linux/kprobes.h> #include <linux/kernel.h> #include <linux/kdebug.h> #include <linux/mutex.h> #include <linux/init.h> #include <asm/hwrpb.h> #include <linux/atomic.h> #include <asm/irq.h> #include <asm/irq_regs.h> #include <asm/pal.h> #include <asm/wrperfmon.h> #include <asm/hw_irq.h> /* The maximum number of PMCs on any Alpha CPU whatsoever. */ #define MAX_HWEVENTS 3 #define PMC_NO_INDEX -1 /* For tracking PMCs and the hw events they monitor on each CPU. */ struct cpu_hw_events { int enabled; /* Number of events scheduled; also number entries valid in arrays below. */ int n_events; /* Number events added since last hw_perf_disable(). */ int n_added; /* Events currently scheduled. */ struct perf_event *event[MAX_HWEVENTS]; /* Event type of each scheduled event. */ unsigned long evtype[MAX_HWEVENTS]; /* Current index of each scheduled event; if not yet determined * contains PMC_NO_INDEX. */ int current_idx[MAX_HWEVENTS]; /* The active PMCs' config for easy use with wrperfmon(). */ unsigned long config; /* The active counters' indices for easy use with wrperfmon(). */ unsigned long idx_mask; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); /* * A structure to hold the description of the PMCs available on a particular * type of Alpha CPU. */ struct alpha_pmu_t { /* Mapping of the perf system hw event types to indigenous event types */ const int *event_map; /* The number of entries in the event_map */ int max_events; /* The number of PMCs on this Alpha */ int num_pmcs; /* * All PMC counters reside in the IBOX register PCTR. This is the * LSB of the counter. */ int pmc_count_shift[MAX_HWEVENTS]; /* * The mask that isolates the PMC bits when the LSB of the counter * is shifted to bit 0. */ unsigned long pmc_count_mask[MAX_HWEVENTS]; /* The maximum period the PMC can count. */ unsigned long pmc_max_period[MAX_HWEVENTS]; /* * The maximum value that may be written to the counter due to * hardware restrictions is pmc_max_period - pmc_left. */ long pmc_left[3]; /* Subroutine for allocation of PMCs. Enforces constraints. */ int (*check_constraints)(struct perf_event **, unsigned long *, int); /* Subroutine for checking validity of a raw event for this PMU. */ int (*raw_event_valid)(u64 config); }; /* * The Alpha CPU PMU description currently in operation. This is set during * the boot process to the specific CPU of the machine. */ static const struct alpha_pmu_t *alpha_pmu; #define HW_OP_UNSUPPORTED -1 /* * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs * follow. Since they are identical we refer to them collectively as the * EV67 henceforth. */ /* * EV67 PMC event types * * There is no one-to-one mapping of the possible hw event types to the * actual codes that are used to program the PMCs hence we introduce our * own hw event type identifiers. */ enum ev67_pmc_event_type { EV67_CYCLES = 1, EV67_INSTRUCTIONS, EV67_BCACHEMISS, EV67_MBOXREPLAY, EV67_LAST_ET }; #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES) /* Mapping of the hw event types to the perf tool interface */ static const int ev67_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS, [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS, }; struct ev67_mapping_t { int config; int idx; }; /* * The mapping used for one event only - these must be in same order as enum * ev67_pmc_event_type definition. */ static const struct ev67_mapping_t ev67_mapping[] = { {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */ {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */ {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */ {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */ }; /* * Check that a group of events can be simultaneously scheduled on to the * EV67 PMU. Also allocate counter indices and config. */ static int ev67_check_constraints(struct perf_event **event, unsigned long *evtype, int n_ev) { int idx0; unsigned long config; idx0 = ev67_mapping[evtype[0]-1].idx; config = ev67_mapping[evtype[0]-1].config; if (n_ev == 1) goto success; BUG_ON(n_ev != 2); if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) { /* MBOX replay traps must be on PMC 1 */ idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0; /* Only cycles can accompany MBOX replay traps */ if (evtype[idx0] == EV67_CYCLES) { config = EV67_PCTR_CYCLES_MBOX; goto success; } } if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) { /* Bcache misses must be on PMC 1 */ idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0; /* Only instructions can accompany Bcache misses */ if (evtype[idx0] == EV67_INSTRUCTIONS) { config = EV67_PCTR_INSTR_BCACHEMISS; goto success; } } if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) { /* Instructions must be on PMC 0 */ idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1; /* By this point only cycles can accompany instructions */ if (evtype[idx0^1] == EV67_CYCLES) { config = EV67_PCTR_INSTR_CYCLES; goto success; } } /* Otherwise, darn it, there is a conflict. */ return -1; success: event[0]->hw.idx = idx0; event[0]->hw.config_base = config; if (n_ev == 2) { event[1]->hw.idx = idx0 ^ 1; event[1]->hw.config_base = config; } return 0; } static int ev67_raw_event_valid(u64 config) { return config >= EV67_CYCLES && config < EV67_LAST_ET; }; static const struct alpha_pmu_t ev67_pmu = { .event_map = ev67_perfmon_event_map, .max_events = ARRAY_SIZE(ev67_perfmon_event_map), .num_pmcs = 2, .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0}, .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0}, .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0}, .pmc_left = {16, 4, 0}, .check_constraints = ev67_check_constraints, .raw_event_valid = ev67_raw_event_valid, }; /* * Helper routines to ensure that we read/write only the correct PMC bits * when calling the wrperfmon PALcall. */ static inline void alpha_write_pmc(int idx, unsigned long val) { val &= alpha_pmu->pmc_count_mask[idx]; val <<= alpha_pmu->pmc_count_shift[idx]; val |= (1<<idx); wrperfmon(PERFMON_CMD_WRITE, val); } static inline unsigned long alpha_read_pmc(int idx) { unsigned long val; val = wrperfmon(PERFMON_CMD_READ, 0); val >>= alpha_pmu->pmc_count_shift[idx]; val &= alpha_pmu->pmc_count_mask[idx]; return val; } /* Set a new period to sample over */ static int alpha_perf_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx) { long left = local64_read(&hwc->period_left); long period = hwc->sample_period; int ret = 0; if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } /* * Hardware restrictions require that the counters must not be * written with values that are too close to the maximum period. */ if (unlikely(left < alpha_pmu->pmc_left[idx])) left = alpha_pmu->pmc_left[idx]; if (left > (long)alpha_pmu->pmc_max_period[idx]) left = alpha_pmu->pmc_max_period[idx]; local64_set(&hwc->prev_count, (unsigned long)(-left)); alpha_write_pmc(idx, (unsigned long)(-left)); perf_event_update_userpage(event); return ret; } /* * Calculates the count (the 'delta') since the last time the PMC was read. * * As the PMCs' full period can easily be exceeded within the perf system * sampling period we cannot use any high order bits as a guard bit in the * PMCs to detect overflow as is done by other architectures. The code here * calculates the delta on the basis that there is no overflow when ovf is * zero. The value passed via ovf by the interrupt handler corrects for * overflow. * * This can be racey on rare occasions -- a call to this routine can occur * with an overflowed counter just before the PMI service routine is called. * The check for delta negative hopefully always rectifies this situation. */ static unsigned long alpha_perf_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx, long ovf) { long prev_raw_count, new_raw_count; long delta; again: prev_raw_count = local64_read(&hwc->prev_count); new_raw_count = alpha_read_pmc(idx); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) goto again; delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; /* It is possible on very rare occasions that the PMC has overflowed * but the interrupt is yet to come. Detect and fix this situation. */ if (unlikely(delta < 0)) { delta += alpha_pmu->pmc_max_period[idx] + 1; } local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); return new_raw_count; } /* * Collect all HW events into the array event[]. */ static int collect_events(struct perf_event *group, int max_count, struct perf_event *event[], unsigned long *evtype, int *current_idx) { struct perf_event *pe; int n = 0; if (!is_software_event(group)) { if (n >= max_count) return -1; event[n] = group; evtype[n] = group->hw.event_base; current_idx[n++] = PMC_NO_INDEX; } for_each_sibling_event(pe, group) { if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; event[n] = pe; evtype[n] = pe->hw.event_base; current_idx[n++] = PMC_NO_INDEX; } } return n; } /* * Check that a group of events can be simultaneously scheduled on to the PMU. */ static int alpha_check_constraints(struct perf_event **events, unsigned long *evtypes, int n_ev) { /* No HW events is possible from hw_perf_group_sched_in(). */ if (n_ev == 0) return 0; if (n_ev > alpha_pmu->num_pmcs) return -1; return alpha_pmu->check_constraints(events, evtypes, n_ev); } /* * If new events have been scheduled then update cpuc with the new * configuration. This may involve shifting cycle counts from one PMC to * another. */ static void maybe_change_configuration(struct cpu_hw_events *cpuc) { int j; if (cpuc->n_added == 0) return; /* Find counters that are moving to another PMC and update */ for (j = 0; j < cpuc->n_events; j++) { struct perf_event *pe = cpuc->event[j]; if (cpuc->current_idx[j] != PMC_NO_INDEX && cpuc->current_idx[j] != pe->hw.idx) { alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0); cpuc->current_idx[j] = PMC_NO_INDEX; } } /* Assign to counters all unassigned events. */ cpuc->idx_mask = 0; for (j = 0; j < cpuc->n_events; j++) { struct perf_event *pe = cpuc->event[j]; struct hw_perf_event *hwc = &pe->hw; int idx = hwc->idx; if (cpuc->current_idx[j] == PMC_NO_INDEX) { alpha_perf_event_set_period(pe, hwc, idx); cpuc->current_idx[j] = idx; } if (!(hwc->state & PERF_HES_STOPPED)) cpuc->idx_mask |= (1<<cpuc->current_idx[j]); } cpuc->config = cpuc->event[0]->hw.config_base; } /* Schedule perf HW event on to PMU. * - this function is called from outside this module via the pmu struct * returned from perf event initialisation. */ static int alpha_pmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; int n0; int ret; unsigned long irq_flags; /* * The Sparc code has the IRQ disable first followed by the perf * disable, however this can lead to an overflowed counter with the * PMI disabled on rare occasions. The alpha_perf_event_update() * routine should detect this situation by noting a negative delta, * nevertheless we disable the PMCs first to enable a potential * final PMI to occur before we disable interrupts. */ perf_pmu_disable(event->pmu); local_irq_save(irq_flags); /* Default to error to be returned */ ret = -EAGAIN; /* Insert event on to PMU and if successful modify ret to valid return */ n0 = cpuc->n_events; if (n0 < alpha_pmu->num_pmcs) { cpuc->event[n0] = event; cpuc->evtype[n0] = event->hw.event_base; cpuc->current_idx[n0] = PMC_NO_INDEX; if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) { cpuc->n_events++; cpuc->n_added++; ret = 0; } } hwc->state = PERF_HES_UPTODATE; if (!(flags & PERF_EF_START)) hwc->state |= PERF_HES_STOPPED; local_irq_restore(irq_flags); perf_pmu_enable(event->pmu); return ret; } /* Disable performance monitoring unit * - this function is called from outside this module via the pmu struct * returned from perf event initialisation. */ static void alpha_pmu_del(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; unsigned long irq_flags; int j; perf_pmu_disable(event->pmu); local_irq_save(irq_flags); for (j = 0; j < cpuc->n_events; j++) { if (event == cpuc->event[j]) { int idx = cpuc->current_idx[j]; /* Shift remaining entries down into the existing * slot. */ while (++j < cpuc->n_events) { cpuc->event[j - 1] = cpuc->event[j]; cpuc->evtype[j - 1] = cpuc->evtype[j]; cpuc->current_idx[j - 1] = cpuc->current_idx[j]; } /* Absorb the final count and turn off the event. */ alpha_perf_event_update(event, hwc, idx, 0); perf_event_update_userpage(event); cpuc->idx_mask &= ~(1UL<<idx); cpuc->n_events--; break; } } local_irq_restore(irq_flags); perf_pmu_enable(event->pmu); } static void alpha_pmu_read(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; alpha_perf_event_update(event, hwc, hwc->idx, 0); } static void alpha_pmu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (!(hwc->state & PERF_HES_STOPPED)) { cpuc->idx_mask &= ~(1UL<<hwc->idx); hwc->state |= PERF_HES_STOPPED; } if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { alpha_perf_event_update(event, hwc, hwc->idx, 0); hwc->state |= PERF_HES_UPTODATE; } if (cpuc->enabled) wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx)); } static void alpha_pmu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) return; if (flags & PERF_EF_RELOAD) { WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); alpha_perf_event_set_period(event, hwc, hwc->idx); } hwc->state = 0; cpuc->idx_mask |= 1UL<<hwc->idx; if (cpuc->enabled) wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); } /* * Check that CPU performance counters are supported. * - currently support EV67 and later CPUs. * - actually some later revisions of the EV6 have the same PMC model as the * EV67 but we don't do sufficiently deep CPU detection to detect them. * Bad luck to the very few people who might have one, I guess. */ static int supported_cpu(void) { struct percpu_struct *cpu; unsigned long cputype; /* Get cpu type from HW */ cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset); cputype = cpu->type & 0xffffffff; /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */ return (cputype >= EV67_CPU) && (cputype <= EV69_CPU); } static void hw_perf_event_destroy(struct perf_event *event) { /* Nothing to be done! */ return; } static int __hw_perf_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; struct perf_event *evts[MAX_HWEVENTS]; unsigned long evtypes[MAX_HWEVENTS]; int idx_rubbish_bin[MAX_HWEVENTS]; int ev; int n; /* We only support a limited range of HARDWARE event types with one * only programmable via a RAW event type. */ if (attr->type == PERF_TYPE_HARDWARE) { if (attr->config >= alpha_pmu->max_events) return -EINVAL; ev = alpha_pmu->event_map[attr->config]; } else if (attr->type == PERF_TYPE_HW_CACHE) { return -EOPNOTSUPP; } else if (attr->type == PERF_TYPE_RAW) { if (!alpha_pmu->raw_event_valid(attr->config)) return -EINVAL; ev = attr->config; } else { return -EOPNOTSUPP; } if (ev < 0) { return ev; } /* * We place the event type in event_base here and leave calculation * of the codes to programme the PMU for alpha_pmu_enable() because * it is only then we will know what HW events are actually * scheduled on to the PMU. At that point the code to programme the * PMU is put into config_base and the PMC to use is placed into * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that * it is yet to be determined. */ hwc->event_base = ev; /* Collect events in a group together suitable for calling * alpha_check_constraints() to verify that the group as a whole can * be scheduled on to the PMU. */ n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, alpha_pmu->num_pmcs - 1, evts, evtypes, idx_rubbish_bin); if (n < 0) return -EINVAL; } evtypes[n] = hwc->event_base; evts[n] = event; if (alpha_check_constraints(evts, evtypes, n + 1)) return -EINVAL; /* Indicate that PMU config and idx are yet to be determined. */ hwc->config_base = 0; hwc->idx = PMC_NO_INDEX; event->destroy = hw_perf_event_destroy; /* * Most architectures reserve the PMU for their use at this point. * As there is no existing mechanism to arbitrate usage and there * appears to be no other user of the Alpha PMU we just assume * that we can just use it, hence a NO-OP here. * * Maybe an alpha_reserve_pmu() routine should be implemented but is * anything else ever going to use it? */ if (!hwc->sample_period) { hwc->sample_period = alpha_pmu->pmc_max_period[0]; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } return 0; } /* * Main entry point to initialise a HW performance event. */ static int alpha_pmu_event_init(struct perf_event *event) { /* does not support taken branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP; switch (event->attr.type) { case PERF_TYPE_RAW: case PERF_TYPE_HARDWARE: case PERF_TYPE_HW_CACHE: break; default: return -ENOENT; } if (!alpha_pmu) return -ENODEV; /* Do the real initialisation work. */ return __hw_perf_event_init(event); } /* * Main entry point - enable HW performance counters. */ static void alpha_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (cpuc->enabled) return; cpuc->enabled = 1; barrier(); if (cpuc->n_events > 0) { /* Update cpuc with information from any new scheduled events. */ maybe_change_configuration(cpuc); /* Start counting the desired events. */ wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE); wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config); wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); } } /* * Main entry point - disable HW performance counters. */ static void alpha_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); if (!cpuc->enabled) return; cpuc->enabled = 0; cpuc->n_added = 0; wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); } static struct pmu pmu = { .pmu_enable = alpha_pmu_enable, .pmu_disable = alpha_pmu_disable, .event_init = alpha_pmu_event_init, .add = alpha_pmu_add, .del = alpha_pmu_del, .start = alpha_pmu_start, .stop = alpha_pmu_stop, .read = alpha_pmu_read, .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; /* * Main entry point - don't know when this is called but it * obviously dumps debug info. */ void perf_event_print_debug(void) { unsigned long flags; unsigned long pcr; int pcr0, pcr1; int cpu; if (!supported_cpu()) return; local_irq_save(flags); cpu = smp_processor_id(); pcr = wrperfmon(PERFMON_CMD_READ, 0); pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0]; pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1]; pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1); local_irq_restore(flags); } /* * Performance Monitoring Interrupt Service Routine called when a PMC * overflows. The PMC that overflowed is passed in la_ptr. */ static void alpha_perf_event_irq_handler(unsigned long la_ptr, struct pt_regs *regs) { struct cpu_hw_events *cpuc; struct perf_sample_data data; struct perf_event *event; struct hw_perf_event *hwc; int idx, j; __this_cpu_inc(irq_pmi_count); cpuc = this_cpu_ptr(&cpu_hw_events); /* Completely counting through the PMC's period to trigger a new PMC * overflow interrupt while in this interrupt routine is utterly * disastrous! The EV6 and EV67 counters are sufficiently large to * prevent this but to be really sure disable the PMCs. */ wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); /* la_ptr is the counter that overflowed. */ if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { /* This should never occur! */ irq_err_count++; pr_warn("PMI: silly index %ld\n", la_ptr); wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; } idx = la_ptr; for (j = 0; j < cpuc->n_events; j++) { if (cpuc->current_idx[j] == idx) break; } if (unlikely(j == cpuc->n_events)) { /* This can occur if the event is disabled right on a PMC overflow. */ wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; } event = cpuc->event[j]; if (unlikely(!event)) { /* This should never occur! */ irq_err_count++; pr_warn("PMI: No event at index %d!\n", idx); wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; } hwc = &event->hw; alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); perf_sample_data_init(&data, 0, hwc->last_period); if (alpha_perf_event_set_period(event, hwc, idx)) { if (perf_event_overflow(event, &data, regs)) { /* Interrupts coming too quickly; "throttle" the * counter, i.e., disable it for a little while. */ alpha_pmu_stop(event, 0); } } wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); return; } /* * Init call to initialise performance events at kernel startup. */ int __init init_hw_perf_events(void) { pr_info("Performance events: "); if (!supported_cpu()) { pr_cont("No support for your CPU.\n"); return 0; } pr_cont("Supported CPU type!\n"); /* Override performance counter IRQ vector */ perf_irq = alpha_perf_event_irq_handler; /* And set up PMU specification */ alpha_pmu = &ev67_pmu; perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); return 0; } early_initcall(init_hw_perf_events);
linux-master
arch/alpha/kernel/perf_event.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/core_lca.c * * Written by David Mosberger ([email protected]) with some code * taken from Dave Rusling's ([email protected]) 32-bit * bios code. * * Code common to all LCA core logic chips. */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_lca.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/tty.h> #include <asm/ptrace.h> #include <asm/irq_regs.h> #include <asm/smp.h> #include "proto.h" #include "pci_impl.h" /* * BIOS32-style PCI interface: */ /* * Machine check reasons. Defined according to PALcode sources * (osf.h and platform.h). */ #define MCHK_K_TPERR 0x0080 #define MCHK_K_TCPERR 0x0082 #define MCHK_K_HERR 0x0084 #define MCHK_K_ECC_C 0x0086 #define MCHK_K_ECC_NC 0x0088 #define MCHK_K_UNKNOWN 0x008A #define MCHK_K_CACKSOFT 0x008C #define MCHK_K_BUGCHECK 0x008E #define MCHK_K_OS_BUGCHECK 0x0090 #define MCHK_K_DCPERR 0x0092 #define MCHK_K_ICPERR 0x0094 /* * Platform-specific machine-check reasons: */ #define MCHK_K_SIO_SERR 0x204 /* all platforms so far */ #define MCHK_K_SIO_IOCHK 0x206 /* all platforms so far */ #define MCHK_K_DCSR 0x208 /* all but Noname */ /* * Given a bus, device, and function number, compute resulting * configuration space address and setup the LCA_IOC_CONF register * accordingly. It is therefore not safe to have concurrent * invocations to configuration space access routines, but there * really shouldn't be any need for this. * * Type 0: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:11 Device select bit. * 10:8 Function number * 7:2 Register number * * Type 1: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:24 reserved * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr) { unsigned long addr; u8 bus = pbus->number; if (bus == 0) { int device = device_fn >> 3; int func = device_fn & 0x7; /* Type 0 configuration cycle. */ if (device > 12) { return -1; } *(vulp)LCA_IOC_CONF = 0; addr = (1 << (11 + device)) | (func << 8) | where; } else { /* Type 1 configuration cycle. */ *(vulp)LCA_IOC_CONF = 1; addr = (bus << 16) | (device_fn << 8) | where; } *pci_addr = addr; return 0; } static unsigned int conf_read(unsigned long addr) { unsigned long flags, code, stat0; unsigned int value; local_irq_save(flags); /* Reset status register to avoid losing errors. */ stat0 = *(vulp)LCA_IOC_STAT0; *(vulp)LCA_IOC_STAT0 = stat0; mb(); /* Access configuration space. */ value = *(vuip)addr; draina(); stat0 = *(vulp)LCA_IOC_STAT0; if (stat0 & LCA_IOC_STAT0_ERR) { code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT) & LCA_IOC_STAT0_CODE_MASK); if (code != 1) { printk("lca.c:conf_read: got stat0=%lx\n", stat0); } /* Reset error status. */ *(vulp)LCA_IOC_STAT0 = stat0; mb(); /* Reset machine check. */ wrmces(0x7); value = 0xffffffff; } local_irq_restore(flags); return value; } static void conf_write(unsigned long addr, unsigned int value) { unsigned long flags, code, stat0; local_irq_save(flags); /* avoid getting hit by machine check */ /* Reset status register to avoid losing errors. */ stat0 = *(vulp)LCA_IOC_STAT0; *(vulp)LCA_IOC_STAT0 = stat0; mb(); /* Access configuration space. */ *(vuip)addr = value; draina(); stat0 = *(vulp)LCA_IOC_STAT0; if (stat0 & LCA_IOC_STAT0_ERR) { code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT) & LCA_IOC_STAT0_CODE_MASK); if (code != 1) { printk("lca.c:conf_write: got stat0=%lx\n", stat0); } /* Reset error status. */ *(vulp)LCA_IOC_STAT0 = stat0; mb(); /* Reset machine check. */ wrmces(0x7); } local_irq_restore(flags); } static int lca_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr, pci_addr; long mask; int shift; if (mk_conf_addr(bus, devfn, where, &pci_addr)) return PCIBIOS_DEVICE_NOT_FOUND; shift = (where & 3) * 8; mask = (size - 1) * 8; addr = (pci_addr << 5) + mask + LCA_CONF; *value = conf_read(addr) >> (shift); return PCIBIOS_SUCCESSFUL; } static int lca_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr, pci_addr; long mask; if (mk_conf_addr(bus, devfn, where, &pci_addr)) return PCIBIOS_DEVICE_NOT_FOUND; mask = (size - 1) * 8; addr = (pci_addr << 5) + mask + LCA_CONF; conf_write(addr, value << ((where & 3) * 8)); return PCIBIOS_SUCCESSFUL; } struct pci_ops lca_pci_ops = { .read = lca_read_config, .write = lca_write_config, }; void lca_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { wmb(); *(vulp)LCA_IOC_TBIA = 0; mb(); } void __init lca_init_arch(void) { struct pci_controller *hose; /* * Create our single hose. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hose->mem_space = &iomem_resource; hose->index = 0; hose->sparse_mem_base = LCA_SPARSE_MEM - IDENT_ADDR; hose->dense_mem_base = LCA_DENSE_MEM - IDENT_ADDR; hose->sparse_io_base = LCA_IO - IDENT_ADDR; hose->dense_io_base = 0; /* * Set up the PCI to main memory translation windows. * * Mimic the SRM settings for the direct-map window. * Window 0 is scatter-gather 8MB at 8MB (for isa). * Window 1 is direct access 1GB at 1GB. * * Note that we do not try to save any of the DMA window CSRs * before setting them, since we cannot read those CSRs on LCA. */ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, SMP_CACHE_BYTES); hose->sg_pci = NULL; __direct_map_base = 0x40000000; __direct_map_size = 0x40000000; *(vulp)LCA_IOC_W_BASE0 = hose->sg_isa->dma_base | (3UL << 32); *(vulp)LCA_IOC_W_MASK0 = (hose->sg_isa->size - 1) & 0xfff00000; *(vulp)LCA_IOC_T_BASE0 = virt_to_phys(hose->sg_isa->ptes); *(vulp)LCA_IOC_W_BASE1 = __direct_map_base | (2UL << 32); *(vulp)LCA_IOC_W_MASK1 = (__direct_map_size - 1) & 0xfff00000; *(vulp)LCA_IOC_T_BASE1 = 0; *(vulp)LCA_IOC_TB_ENA = 0x80; lca_pci_tbi(hose, 0, -1); /* * Disable PCI parity for now. The NCR53c810 chip has * troubles meeting the PCI spec which results in * data parity errors. */ *(vulp)LCA_IOC_PAR_DIS = 1UL<<5; /* * Finally, set up for restoring the correct HAE if using SRM. * Again, since we cannot read many of the CSRs on the LCA, * one of which happens to be the HAE, we save the value that * the SRM will expect... */ if (alpha_using_srm) srm_hae = 0x80000000UL; } /* * Constants used during machine-check handling. I suppose these * could be moved into lca.h but I don't see much reason why anybody * else would want to use them. */ #define ESR_EAV (1UL<< 0) /* error address valid */ #define ESR_CEE (1UL<< 1) /* correctable error */ #define ESR_UEE (1UL<< 2) /* uncorrectable error */ #define ESR_WRE (1UL<< 3) /* write-error */ #define ESR_SOR (1UL<< 4) /* error source */ #define ESR_CTE (1UL<< 7) /* cache-tag error */ #define ESR_MSE (1UL<< 9) /* multiple soft errors */ #define ESR_MHE (1UL<<10) /* multiple hard errors */ #define ESR_NXM (1UL<<12) /* non-existent memory */ #define IOC_ERR ( 1<<4) /* ioc logs an error */ #define IOC_CMD_SHIFT 0 #define IOC_CMD (0xf<<IOC_CMD_SHIFT) #define IOC_CODE_SHIFT 8 #define IOC_CODE (0xf<<IOC_CODE_SHIFT) #define IOC_LOST ( 1<<5) #define IOC_P_NBR ((__u32) ~((1<<13) - 1)) static void mem_error(unsigned long esr, unsigned long ear) { printk(" %s %s error to %s occurred at address %x\n", ((esr & ESR_CEE) ? "Correctable" : (esr & ESR_UEE) ? "Uncorrectable" : "A"), (esr & ESR_WRE) ? "write" : "read", (esr & ESR_SOR) ? "memory" : "b-cache", (unsigned) (ear & 0x1ffffff8)); if (esr & ESR_CTE) { printk(" A b-cache tag parity error was detected.\n"); } if (esr & ESR_MSE) { printk(" Several other correctable errors occurred.\n"); } if (esr & ESR_MHE) { printk(" Several other uncorrectable errors occurred.\n"); } if (esr & ESR_NXM) { printk(" Attempted to access non-existent memory.\n"); } } static void ioc_error(__u32 stat0, __u32 stat1) { static const char * const pci_cmd[] = { "Interrupt Acknowledge", "Special", "I/O Read", "I/O Write", "Rsvd 1", "Rsvd 2", "Memory Read", "Memory Write", "Rsvd3", "Rsvd4", "Configuration Read", "Configuration Write", "Memory Read Multiple", "Dual Address", "Memory Read Line", "Memory Write and Invalidate" }; static const char * const err_name[] = { "exceeded retry limit", "no device", "bad data parity", "target abort", "bad address parity", "page table read error", "invalid page", "data error" }; unsigned code = (stat0 & IOC_CODE) >> IOC_CODE_SHIFT; unsigned cmd = (stat0 & IOC_CMD) >> IOC_CMD_SHIFT; printk(" %s initiated PCI %s cycle to address %x" " failed due to %s.\n", code > 3 ? "PCI" : "CPU", pci_cmd[cmd], stat1, err_name[code]); if (code == 5 || code == 6) { printk(" (Error occurred at PCI memory address %x.)\n", (stat0 & ~IOC_P_NBR)); } if (stat0 & IOC_LOST) { printk(" Other PCI errors occurred simultaneously.\n"); } } void lca_machine_check(unsigned long vector, unsigned long la_ptr) { const char * reason; union el_lca el; el.c = (struct el_common *) la_ptr; wrmces(rdmces()); /* reset machine check pending flag */ printk(KERN_CRIT "LCA machine check: vector=%#lx pc=%#lx code=%#x\n", vector, get_irq_regs()->pc, (unsigned int) el.c->code); /* * The first quadword after the common header always seems to * be the machine check reason---don't know why this isn't * part of the common header instead. In the case of a long * logout frame, the upper 32 bits is the machine check * revision level, which we ignore for now. */ switch ((unsigned int) el.c->code) { case MCHK_K_TPERR: reason = "tag parity error"; break; case MCHK_K_TCPERR: reason = "tag control parity error"; break; case MCHK_K_HERR: reason = "access to non-existent memory"; break; case MCHK_K_ECC_C: reason = "correctable ECC error"; break; case MCHK_K_ECC_NC: reason = "non-correctable ECC error"; break; case MCHK_K_CACKSOFT: reason = "MCHK_K_CACKSOFT"; break; case MCHK_K_BUGCHECK: reason = "illegal exception in PAL mode"; break; case MCHK_K_OS_BUGCHECK: reason = "callsys in kernel mode"; break; case MCHK_K_DCPERR: reason = "d-cache parity error"; break; case MCHK_K_ICPERR: reason = "i-cache parity error"; break; case MCHK_K_SIO_SERR: reason = "SIO SERR occurred on PCI bus"; break; case MCHK_K_SIO_IOCHK: reason = "SIO IOCHK occurred on ISA bus"; break; case MCHK_K_DCSR: reason = "MCHK_K_DCSR"; break; case MCHK_K_UNKNOWN: default: reason = "unknown"; break; } switch (el.c->size) { case sizeof(struct el_lca_mcheck_short): printk(KERN_CRIT " Reason: %s (short frame%s, dc_stat=%#lx):\n", reason, el.c->retry ? ", retryable" : "", el.s->dc_stat); if (el.s->esr & ESR_EAV) { mem_error(el.s->esr, el.s->ear); } if (el.s->ioc_stat0 & IOC_ERR) { ioc_error(el.s->ioc_stat0, el.s->ioc_stat1); } break; case sizeof(struct el_lca_mcheck_long): printk(KERN_CRIT " Reason: %s (long frame%s):\n", reason, el.c->retry ? ", retryable" : ""); printk(KERN_CRIT " reason: %#lx exc_addr: %#lx dc_stat: %#lx\n", el.l->pt[0], el.l->exc_addr, el.l->dc_stat); printk(KERN_CRIT " car: %#lx\n", el.l->car); if (el.l->esr & ESR_EAV) { mem_error(el.l->esr, el.l->ear); } if (el.l->ioc_stat0 & IOC_ERR) { ioc_error(el.l->ioc_stat0, el.l->ioc_stat1); } break; default: printk(KERN_CRIT " Unknown errorlog size %d\n", el.c->size); } /* Dump the logout area to give all info. */ #ifdef CONFIG_VERBOSE_MCHECK if (alpha_verbose_mcheck > 1) { unsigned long * ptr = (unsigned long *) la_ptr; long i; for (i = 0; i < el.c->size / sizeof(long); i += 2) { printk(KERN_CRIT " +%8lx %016lx %016lx\n", i*sizeof(long), ptr[i], ptr[i+1]); } } #endif /* CONFIG_VERBOSE_MCHECK */ } /* * The following routines are needed to support the SPEED changing * necessary to successfully manage the thermal problem on the AlphaBook1. */ void lca_clock_print(void) { long pmr_reg; pmr_reg = LCA_READ_PMR; printk("Status of clock control:\n"); printk("\tPrimary clock divisor\t0x%lx\n", LCA_GET_PRIMARY(pmr_reg)); printk("\tOverride clock divisor\t0x%lx\n", LCA_GET_OVERRIDE(pmr_reg)); printk("\tInterrupt override is %s\n", (pmr_reg & LCA_PMR_INTO) ? "on" : "off"); printk("\tDMA override is %s\n", (pmr_reg & LCA_PMR_DMAO) ? "on" : "off"); } int lca_get_clock(void) { long pmr_reg; pmr_reg = LCA_READ_PMR; return(LCA_GET_PRIMARY(pmr_reg)); } void lca_clock_fiddle(int divisor) { long pmr_reg; pmr_reg = LCA_READ_PMR; LCA_SET_PRIMARY_CLOCK(pmr_reg, divisor); /* lca_norm_clock = divisor; */ LCA_WRITE_PMR(pmr_reg); mb(); }
linux-master
arch/alpha/kernel/core_lca.c
/* * SMC 37C669 initialization code */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <asm/hwrpb.h> #include <asm/io.h> #if 0 # define DBG_DEVS(args) printk args #else # define DBG_DEVS(args) #endif #define KB 1024 #define MB (1024*KB) #define GB (1024*MB) #define SMC_DEBUG 0 /* File: smcc669_def.h * * Copyright (C) 1997 by * Digital Equipment Corporation, Maynard, Massachusetts. * All rights reserved. * * This software is furnished under a license and may be used and copied * only in accordance of the terms of such license and with the * inclusion of the above copyright notice. This software or any other * copies thereof may not be provided or otherwise made available to any * other person. No title to and ownership of the software is hereby * transferred. * * The information in this software is subject to change without notice * and should not be construed as a commitment by Digital Equipment * Corporation. * * Digital assumes no responsibility for the use or reliability of its * software on equipment which is not supplied by Digital. * * * Abstract: * * This file contains header definitions for the SMC37c669 * Super I/O controller. * * Author: * * Eric Rasmussen * * Modification History: * * er 28-Jan-1997 Initial Entry */ #ifndef __SMC37c669_H #define __SMC37c669_H /* ** Macros for handling device IRQs ** ** The mask acts as a flag used in mapping actual ISA IRQs (0 - 15) ** to device IRQs (A - H). */ #define SMC37c669_DEVICE_IRQ_MASK 0x80000000 #define SMC37c669_DEVICE_IRQ( __i ) \ ((SMC37c669_DEVICE_IRQ_MASK) | (__i)) #define SMC37c669_IS_DEVICE_IRQ(__i) \ (((__i) & (SMC37c669_DEVICE_IRQ_MASK)) == (SMC37c669_DEVICE_IRQ_MASK)) #define SMC37c669_RAW_DEVICE_IRQ(__i) \ ((__i) & ~(SMC37c669_DEVICE_IRQ_MASK)) /* ** Macros for handling device DRQs ** ** The mask acts as a flag used in mapping actual ISA DMA ** channels to device DMA channels (A - C). */ #define SMC37c669_DEVICE_DRQ_MASK 0x80000000 #define SMC37c669_DEVICE_DRQ(__d) \ ((SMC37c669_DEVICE_DRQ_MASK) | (__d)) #define SMC37c669_IS_DEVICE_DRQ(__d) \ (((__d) & (SMC37c669_DEVICE_DRQ_MASK)) == (SMC37c669_DEVICE_DRQ_MASK)) #define SMC37c669_RAW_DEVICE_DRQ(__d) \ ((__d) & ~(SMC37c669_DEVICE_DRQ_MASK)) #define SMC37c669_DEVICE_ID 0x3 /* ** SMC37c669 Device Function Definitions */ #define SERIAL_0 0 #define SERIAL_1 1 #define PARALLEL_0 2 #define FLOPPY_0 3 #define IDE_0 4 #define NUM_FUNCS 5 /* ** Default Device Function Mappings */ #define COM1_BASE 0x3F8 #define COM1_IRQ 4 #define COM2_BASE 0x2F8 #define COM2_IRQ 3 #define PARP_BASE 0x3BC #define PARP_IRQ 7 #define PARP_DRQ 3 #define FDC_BASE 0x3F0 #define FDC_IRQ 6 #define FDC_DRQ 2 /* ** Configuration On/Off Key Definitions */ #define SMC37c669_CONFIG_ON_KEY 0x55 #define SMC37c669_CONFIG_OFF_KEY 0xAA /* ** SMC 37c669 Device IRQs */ #define SMC37c669_DEVICE_IRQ_A ( SMC37c669_DEVICE_IRQ( 0x01 ) ) #define SMC37c669_DEVICE_IRQ_B ( SMC37c669_DEVICE_IRQ( 0x02 ) ) #define SMC37c669_DEVICE_IRQ_C ( SMC37c669_DEVICE_IRQ( 0x03 ) ) #define SMC37c669_DEVICE_IRQ_D ( SMC37c669_DEVICE_IRQ( 0x04 ) ) #define SMC37c669_DEVICE_IRQ_E ( SMC37c669_DEVICE_IRQ( 0x05 ) ) #define SMC37c669_DEVICE_IRQ_F ( SMC37c669_DEVICE_IRQ( 0x06 ) ) /* SMC37c669_DEVICE_IRQ_G *** RESERVED ***/ #define SMC37c669_DEVICE_IRQ_H ( SMC37c669_DEVICE_IRQ( 0x08 ) ) /* ** SMC 37c669 Device DMA Channel Definitions */ #define SMC37c669_DEVICE_DRQ_A ( SMC37c669_DEVICE_DRQ( 0x01 ) ) #define SMC37c669_DEVICE_DRQ_B ( SMC37c669_DEVICE_DRQ( 0x02 ) ) #define SMC37c669_DEVICE_DRQ_C ( SMC37c669_DEVICE_DRQ( 0x03 ) ) /* ** Configuration Register Index Definitions */ #define SMC37c669_CR00_INDEX 0x00 #define SMC37c669_CR01_INDEX 0x01 #define SMC37c669_CR02_INDEX 0x02 #define SMC37c669_CR03_INDEX 0x03 #define SMC37c669_CR04_INDEX 0x04 #define SMC37c669_CR05_INDEX 0x05 #define SMC37c669_CR06_INDEX 0x06 #define SMC37c669_CR07_INDEX 0x07 #define SMC37c669_CR08_INDEX 0x08 #define SMC37c669_CR09_INDEX 0x09 #define SMC37c669_CR0A_INDEX 0x0A #define SMC37c669_CR0B_INDEX 0x0B #define SMC37c669_CR0C_INDEX 0x0C #define SMC37c669_CR0D_INDEX 0x0D #define SMC37c669_CR0E_INDEX 0x0E #define SMC37c669_CR0F_INDEX 0x0F #define SMC37c669_CR10_INDEX 0x10 #define SMC37c669_CR11_INDEX 0x11 #define SMC37c669_CR12_INDEX 0x12 #define SMC37c669_CR13_INDEX 0x13 #define SMC37c669_CR14_INDEX 0x14 #define SMC37c669_CR15_INDEX 0x15 #define SMC37c669_CR16_INDEX 0x16 #define SMC37c669_CR17_INDEX 0x17 #define SMC37c669_CR18_INDEX 0x18 #define SMC37c669_CR19_INDEX 0x19 #define SMC37c669_CR1A_INDEX 0x1A #define SMC37c669_CR1B_INDEX 0x1B #define SMC37c669_CR1C_INDEX 0x1C #define SMC37c669_CR1D_INDEX 0x1D #define SMC37c669_CR1E_INDEX 0x1E #define SMC37c669_CR1F_INDEX 0x1F #define SMC37c669_CR20_INDEX 0x20 #define SMC37c669_CR21_INDEX 0x21 #define SMC37c669_CR22_INDEX 0x22 #define SMC37c669_CR23_INDEX 0x23 #define SMC37c669_CR24_INDEX 0x24 #define SMC37c669_CR25_INDEX 0x25 #define SMC37c669_CR26_INDEX 0x26 #define SMC37c669_CR27_INDEX 0x27 #define SMC37c669_CR28_INDEX 0x28 #define SMC37c669_CR29_INDEX 0x29 /* ** Configuration Register Alias Definitions */ #define SMC37c669_DEVICE_ID_INDEX SMC37c669_CR0D_INDEX #define SMC37c669_DEVICE_REVISION_INDEX SMC37c669_CR0E_INDEX #define SMC37c669_FDC_BASE_ADDRESS_INDEX SMC37c669_CR20_INDEX #define SMC37c669_IDE_BASE_ADDRESS_INDEX SMC37c669_CR21_INDEX #define SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX SMC37c669_CR22_INDEX #define SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX SMC37c669_CR23_INDEX #define SMC37c669_SERIAL0_BASE_ADDRESS_INDEX SMC37c669_CR24_INDEX #define SMC37c669_SERIAL1_BASE_ADDRESS_INDEX SMC37c669_CR25_INDEX #define SMC37c669_PARALLEL_FDC_DRQ_INDEX SMC37c669_CR26_INDEX #define SMC37c669_PARALLEL_FDC_IRQ_INDEX SMC37c669_CR27_INDEX #define SMC37c669_SERIAL_IRQ_INDEX SMC37c669_CR28_INDEX /* ** Configuration Register Definitions ** ** The INDEX (write only) and DATA (read/write) ports are effective ** only when the chip is in the Configuration State. */ typedef struct _SMC37c669_CONFIG_REGS { unsigned char index_port; unsigned char data_port; } SMC37c669_CONFIG_REGS; /* ** CR00 - default value 0x28 ** ** IDE_EN (CR00<1:0>): ** 0x - 30ua pull-ups on nIDEEN, nHDCS0, NHDCS1 ** 11 - IRQ_H available as IRQ output, ** IRRX2, IRTX2 available as alternate IR pins ** 10 - nIDEEN, nHDCS0, nHDCS1 used to control IDE ** ** VALID (CR00<7>): ** A high level on this software controlled bit can ** be used to indicate that a valid configuration ** cycle has occurred. The control software must ** take care to set this bit at the appropriate times. ** Set to zero after power up. This bit has no ** effect on any other hardware in the chip. ** */ typedef union _SMC37c669_CR00 { unsigned char as_uchar; struct { unsigned ide_en : 2; /* See note above */ unsigned reserved1 : 1; /* RAZ */ unsigned fdc_pwr : 1; /* 1 = supply power to FDC */ unsigned reserved2 : 3; /* Read as 010b */ unsigned valid : 1; /* See note above */ } by_field; } SMC37c669_CR00; /* ** CR01 - default value 0x9C */ typedef union _SMC37c669_CR01 { unsigned char as_uchar; struct { unsigned reserved1 : 2; /* RAZ */ unsigned ppt_pwr : 1; /* 1 = supply power to PPT */ unsigned ppt_mode : 1; /* 1 = Printer mode, 0 = EPP */ unsigned reserved2 : 1; /* Read as 1 */ unsigned reserved3 : 2; /* RAZ */ unsigned lock_crx: 1; /* Lock CR00 - CR18 */ } by_field; } SMC37c669_CR01; /* ** CR02 - default value 0x88 */ typedef union _SMC37c669_CR02 { unsigned char as_uchar; struct { unsigned reserved1 : 3; /* RAZ */ unsigned uart1_pwr : 1; /* 1 = supply power to UART1 */ unsigned reserved2 : 3; /* RAZ */ unsigned uart2_pwr : 1; /* 1 = supply power to UART2 */ } by_field; } SMC37c669_CR02; /* ** CR03 - default value 0x78 ** ** CR03<7> CR03<2> Pin 94 ** ------- ------- ------ ** 0 X DRV2 (input) ** 1 0 ADRX ** 1 1 IRQ_B ** ** CR03<6> CR03<5> Op Mode ** ------- ------- ------- ** 0 0 Model 30 ** 0 1 PS/2 ** 1 0 Reserved ** 1 1 AT Mode */ typedef union _SMC37c669_CR03 { unsigned char as_uchar; struct { unsigned pwrgd_gamecs : 1; /* 1 = PWRGD, 0 = GAMECS */ unsigned fdc_mode2 : 1; /* 1 = Enhanced Mode 2 */ unsigned pin94_0 : 1; /* See note above */ unsigned reserved1 : 1; /* RAZ */ unsigned drvden : 1; /* 1 = high, 0 - output */ unsigned op_mode : 2; /* See note above */ unsigned pin94_1 : 1; /* See note above */ } by_field; } SMC37c669_CR03; /* ** CR04 - default value 0x00 ** ** PP_EXT_MODE: ** If CR01<PP_MODE> = 0 and PP_EXT_MODE = ** 00 - Standard and Bidirectional ** 01 - EPP mode and SPP ** 10 - ECP mode ** In this mode, 2 drives can be supported ** directly, 3 or 4 drives must use external ** 4 drive support. SPP can be selected ** through the ECR register of ECP as mode 000. ** 11 - ECP mode and EPP mode ** In this mode, 2 drives can be supported ** directly, 3 or 4 drives must use external ** 4 drive support. SPP can be selected ** through the ECR register of ECP as mode 000. ** In this mode, EPP can be selected through ** the ECR register of ECP as mode 100. ** ** PP_FDC: ** 00 - Normal ** 01 - PPFD1 ** 10 - PPFD2 ** 11 - Reserved ** ** MIDI1: ** Serial Clock Select: ** A low level on this bit disables MIDI support, ** clock = divide by 13. A high level on this ** bit enables MIDI support, clock = divide by 12. ** ** MIDI operates at 31.25 Kbps which can be derived ** from 125 KHz (24 MHz / 12 = 2 MHz, 2 MHz / 16 = 125 KHz) ** ** ALT_IO: ** 0 - Use pins IRRX, IRTX ** 1 - Use pins IRRX2, IRTX2 ** ** If this bit is set, the IR receive and transmit ** functions will not be available on pins 25 and 26 ** unless CR00<IDE_EN> = 11. */ typedef union _SMC37c669_CR04 { unsigned char as_uchar; struct { unsigned ppt_ext_mode : 2; /* See note above */ unsigned ppt_fdc : 2; /* See note above */ unsigned midi1 : 1; /* See note above */ unsigned midi2 : 1; /* See note above */ unsigned epp_type : 1; /* 0 = EPP 1.9, 1 = EPP 1.7 */ unsigned alt_io : 1; /* See note above */ } by_field; } SMC37c669_CR04; /* ** CR05 - default value 0x00 ** ** DEN_SEL: ** 00 - Densel output normal ** 01 - Reserved ** 10 - Densel output 1 ** 11 - Densel output 0 ** */ typedef union _SMC37c669_CR05 { unsigned char as_uchar; struct { unsigned reserved1 : 2; /* RAZ */ unsigned fdc_dma_mode : 1; /* 0 = burst, 1 = non-burst */ unsigned den_sel : 2; /* See note above */ unsigned swap_drv : 1; /* Swap the FDC motor selects */ unsigned extx4 : 1; /* 0 = 2 drive, 1 = external 4 drive decode */ unsigned reserved2 : 1; /* RAZ */ } by_field; } SMC37c669_CR05; /* ** CR06 - default value 0xFF */ typedef union _SMC37c669_CR06 { unsigned char as_uchar; struct { unsigned floppy_a : 2; /* Type of floppy drive A */ unsigned floppy_b : 2; /* Type of floppy drive B */ unsigned floppy_c : 2; /* Type of floppy drive C */ unsigned floppy_d : 2; /* Type of floppy drive D */ } by_field; } SMC37c669_CR06; /* ** CR07 - default value 0x00 ** ** Auto Power Management CR07<7:4>: ** 0 - Auto Powerdown disabled (default) ** 1 - Auto Powerdown enabled ** ** This bit is reset to the default state by POR or ** a hardware reset. ** */ typedef union _SMC37c669_CR07 { unsigned char as_uchar; struct { unsigned floppy_boot : 2; /* 0 = A:, 1 = B: */ unsigned reserved1 : 2; /* RAZ */ unsigned ppt_en : 1; /* See note above */ unsigned uart1_en : 1; /* See note above */ unsigned uart2_en : 1; /* See note above */ unsigned fdc_en : 1; /* See note above */ } by_field; } SMC37c669_CR07; /* ** CR08 - default value 0x00 */ typedef union _SMC37c669_CR08 { unsigned char as_uchar; struct { unsigned zero : 4; /* 0 */ unsigned addrx7_4 : 4; /* ADR<7:3> for ADRx decode */ } by_field; } SMC37c669_CR08; /* ** CR09 - default value 0x00 ** ** ADRx_CONFIG: ** 00 - ADRx disabled ** 01 - 1 byte decode A<3:0> = 0000b ** 10 - 8 byte block decode A<3:0> = 0XXXb ** 11 - 16 byte block decode A<3:0> = XXXXb ** */ typedef union _SMC37c669_CR09 { unsigned char as_uchar; struct { unsigned adra8 : 3; /* ADR<10:8> for ADRx decode */ unsigned reserved1 : 3; unsigned adrx_config : 2; /* See note above */ } by_field; } SMC37c669_CR09; /* ** CR0A - default value 0x00 */ typedef union _SMC37c669_CR0A { unsigned char as_uchar; struct { unsigned ecp_fifo_threshold : 4; unsigned reserved1 : 4; } by_field; } SMC37c669_CR0A; /* ** CR0B - default value 0x00 */ typedef union _SMC37c669_CR0B { unsigned char as_uchar; struct { unsigned fdd0_drtx : 2; /* FDD0 Data Rate Table */ unsigned fdd1_drtx : 2; /* FDD1 Data Rate Table */ unsigned fdd2_drtx : 2; /* FDD2 Data Rate Table */ unsigned fdd3_drtx : 2; /* FDD3 Data Rate Table */ } by_field; } SMC37c669_CR0B; /* ** CR0C - default value 0x00 ** ** UART2_MODE: ** 000 - Standard (default) ** 001 - IrDA (HPSIR) ** 010 - Amplitude Shift Keyed IR @500 KHz ** 011 - Reserved ** 1xx - Reserved ** */ typedef union _SMC37c669_CR0C { unsigned char as_uchar; struct { unsigned uart2_rcv_polarity : 1; /* 1 = invert RX */ unsigned uart2_xmit_polarity : 1; /* 1 = invert TX */ unsigned uart2_duplex : 1; /* 1 = full, 0 = half */ unsigned uart2_mode : 3; /* See note above */ unsigned uart1_speed : 1; /* 1 = high speed enabled */ unsigned uart2_speed : 1; /* 1 = high speed enabled */ } by_field; } SMC37c669_CR0C; /* ** CR0D - default value 0x03 ** ** Device ID Register - read only */ typedef union _SMC37c669_CR0D { unsigned char as_uchar; struct { unsigned device_id : 8; /* Returns 0x3 in this field */ } by_field; } SMC37c669_CR0D; /* ** CR0E - default value 0x02 ** ** Device Revision Register - read only */ typedef union _SMC37c669_CR0E { unsigned char as_uchar; struct { unsigned device_rev : 8; /* Returns 0x2 in this field */ } by_field; } SMC37c669_CR0E; /* ** CR0F - default value 0x00 */ typedef union _SMC37c669_CR0F { unsigned char as_uchar; struct { unsigned test0 : 1; /* Reserved - set to 0 */ unsigned test1 : 1; /* Reserved - set to 0 */ unsigned test2 : 1; /* Reserved - set to 0 */ unsigned test3 : 1; /* Reserved - set t0 0 */ unsigned test4 : 1; /* Reserved - set to 0 */ unsigned test5 : 1; /* Reserved - set t0 0 */ unsigned test6 : 1; /* Reserved - set t0 0 */ unsigned test7 : 1; /* Reserved - set to 0 */ } by_field; } SMC37c669_CR0F; /* ** CR10 - default value 0x00 */ typedef union _SMC37c669_CR10 { unsigned char as_uchar; struct { unsigned reserved1 : 3; /* RAZ */ unsigned pll_gain : 1; /* 1 = 3V, 2 = 5V operation */ unsigned pll_stop : 1; /* 1 = stop PLLs */ unsigned ace_stop : 1; /* 1 = stop UART clocks */ unsigned pll_clock_ctrl : 1; /* 0 = 14.318 MHz, 1 = 24 MHz */ unsigned ir_test : 1; /* Enable IR test mode */ } by_field; } SMC37c669_CR10; /* ** CR11 - default value 0x00 */ typedef union _SMC37c669_CR11 { unsigned char as_uchar; struct { unsigned ir_loopback : 1; /* Internal IR loop back */ unsigned test_10ms : 1; /* Test 10ms autopowerdown FDC timeout */ unsigned reserved1 : 6; /* RAZ */ } by_field; } SMC37c669_CR11; /* ** CR12 - CR1D are reserved registers */ /* ** CR1E - default value 0x80 ** ** GAMECS: ** 00 - GAMECS disabled ** 01 - 1 byte decode ADR<3:0> = 0001b ** 10 - 8 byte block decode ADR<3:0> = 0XXXb ** 11 - 16 byte block decode ADR<3:0> = XXXXb ** */ typedef union _SMC37c66_CR1E { unsigned char as_uchar; struct { unsigned gamecs_config: 2; /* See note above */ unsigned gamecs_addr9_4 : 6; /* GAMECS Addr<9:4> */ } by_field; } SMC37c669_CR1E; /* ** CR1F - default value 0x00 ** ** DT0 DT1 DRVDEN0 DRVDEN1 Drive Type ** --- --- ------- ------- ---------- ** 0 0 DENSEL DRATE0 4/2/1 MB 3.5" ** 2/1 MB 5.25" ** 2/1.6/1 MB 3.5" (3-mode) ** 0 1 DRATE1 DRATE0 ** 1 0 nDENSEL DRATE0 PS/2 ** 1 1 DRATE0 DRATE1 ** ** Note: DENSEL, DRATE1, and DRATE0 map onto two output ** pins - DRVDEN0 and DRVDEN1. ** */ typedef union _SMC37c669_CR1F { unsigned char as_uchar; struct { unsigned fdd0_drive_type : 2; /* FDD0 drive type */ unsigned fdd1_drive_type : 2; /* FDD1 drive type */ unsigned fdd2_drive_type : 2; /* FDD2 drive type */ unsigned fdd3_drive_type : 2; /* FDD3 drive type */ } by_field; } SMC37c669_CR1F; /* ** CR20 - default value 0x3C ** ** FDC Base Address Register ** - To disable this decode set Addr<9:8> = 0 ** - A<10> = 0, A<3:0> = 0XXXb to access. ** */ typedef union _SMC37c669_CR20 { unsigned char as_uchar; struct { unsigned zero : 2; /* 0 */ unsigned addr9_4 : 6; /* FDC Addr<9:4> */ } by_field; } SMC37c669_CR20; /* ** CR21 - default value 0x3C ** ** IDE Base Address Register ** - To disable this decode set Addr<9:8> = 0 ** - A<10> = 0, A<3:0> = 0XXXb to access. ** */ typedef union _SMC37c669_CR21 { unsigned char as_uchar; struct { unsigned zero : 2; /* 0 */ unsigned addr9_4 : 6; /* IDE Addr<9:4> */ } by_field; } SMC37c669_CR21; /* ** CR22 - default value 0x3D ** ** IDE Alternate Status Base Address Register ** - To disable this decode set Addr<9:8> = 0 ** - A<10> = 0, A<3:0> = 0110b to access. ** */ typedef union _SMC37c669_CR22 { unsigned char as_uchar; struct { unsigned zero : 2; /* 0 */ unsigned addr9_4 : 6; /* IDE Alt Status Addr<9:4> */ } by_field; } SMC37c669_CR22; /* ** CR23 - default value 0x00 ** ** Parallel Port Base Address Register ** - To disable this decode set Addr<9:8> = 0 ** - A<10> = 0 to access. ** - If EPP is enabled, A<2:0> = XXXb to access. ** If EPP is NOT enabled, A<1:0> = XXb to access ** */ typedef union _SMC37c669_CR23 { unsigned char as_uchar; struct { unsigned addr9_2 : 8; /* Parallel Port Addr<9:2> */ } by_field; } SMC37c669_CR23; /* ** CR24 - default value 0x00 ** ** UART1 Base Address Register ** - To disable this decode set Addr<9:8> = 0 ** - A<10> = 0, A<2:0> = XXXb to access. ** */ typedef union _SMC37c669_CR24 { unsigned char as_uchar; struct { unsigned zero : 1; /* 0 */ unsigned addr9_3 : 7; /* UART1 Addr<9:3> */ } by_field; } SMC37c669_CR24; /* ** CR25 - default value 0x00 ** ** UART2 Base Address Register ** - To disable this decode set Addr<9:8> = 0 ** - A<10> = 0, A<2:0> = XXXb to access. ** */ typedef union _SMC37c669_CR25 { unsigned char as_uchar; struct { unsigned zero : 1; /* 0 */ unsigned addr9_3 : 7; /* UART2 Addr<9:3> */ } by_field; } SMC37c669_CR25; /* ** CR26 - default value 0x00 ** ** Parallel Port / FDC DMA Select Register ** ** D3 - D0 DMA ** D7 - D4 Selected ** ------- -------- ** 0000 None ** 0001 DMA_A ** 0010 DMA_B ** 0011 DMA_C ** */ typedef union _SMC37c669_CR26 { unsigned char as_uchar; struct { unsigned ppt_drq : 4; /* See note above */ unsigned fdc_drq : 4; /* See note above */ } by_field; } SMC37c669_CR26; /* ** CR27 - default value 0x00 ** ** Parallel Port / FDC IRQ Select Register ** ** D3 - D0 IRQ ** D7 - D4 Selected ** ------- -------- ** 0000 None ** 0001 IRQ_A ** 0010 IRQ_B ** 0011 IRQ_C ** 0100 IRQ_D ** 0101 IRQ_E ** 0110 IRQ_F ** 0111 Reserved ** 1000 IRQ_H ** ** Any unselected IRQ REQ is in tristate ** */ typedef union _SMC37c669_CR27 { unsigned char as_uchar; struct { unsigned ppt_irq : 4; /* See note above */ unsigned fdc_irq : 4; /* See note above */ } by_field; } SMC37c669_CR27; /* ** CR28 - default value 0x00 ** ** UART IRQ Select Register ** ** D3 - D0 IRQ ** D7 - D4 Selected ** ------- -------- ** 0000 None ** 0001 IRQ_A ** 0010 IRQ_B ** 0011 IRQ_C ** 0100 IRQ_D ** 0101 IRQ_E ** 0110 IRQ_F ** 0111 Reserved ** 1000 IRQ_H ** 1111 share with UART1 (only for UART2) ** ** Any unselected IRQ REQ is in tristate ** ** To share an IRQ between UART1 and UART2, set ** UART1 to use the desired IRQ and set UART2 to ** 0xF to enable sharing mechanism. ** */ typedef union _SMC37c669_CR28 { unsigned char as_uchar; struct { unsigned uart2_irq : 4; /* See note above */ unsigned uart1_irq : 4; /* See note above */ } by_field; } SMC37c669_CR28; /* ** CR29 - default value 0x00 ** ** IRQIN IRQ Select Register ** ** D3 - D0 IRQ ** D7 - D4 Selected ** ------- -------- ** 0000 None ** 0001 IRQ_A ** 0010 IRQ_B ** 0011 IRQ_C ** 0100 IRQ_D ** 0101 IRQ_E ** 0110 IRQ_F ** 0111 Reserved ** 1000 IRQ_H ** ** Any unselected IRQ REQ is in tristate ** */ typedef union _SMC37c669_CR29 { unsigned char as_uchar; struct { unsigned irqin_irq : 4; /* See note above */ unsigned reserved1 : 4; /* RAZ */ } by_field; } SMC37c669_CR29; /* ** Aliases of Configuration Register formats (should match ** the set of index aliases). ** ** Note that CR24 and CR25 have the same format and are the ** base address registers for UART1 and UART2. Because of ** this we only define 1 alias here - for CR24 - as the serial ** base address register. ** ** Note that CR21 and CR22 have the same format and are the ** base address and alternate status address registers for ** the IDE controller. Because of this we only define 1 alias ** here - for CR21 - as the IDE address register. ** */ typedef SMC37c669_CR0D SMC37c669_DEVICE_ID_REGISTER; typedef SMC37c669_CR0E SMC37c669_DEVICE_REVISION_REGISTER; typedef SMC37c669_CR20 SMC37c669_FDC_BASE_ADDRESS_REGISTER; typedef SMC37c669_CR21 SMC37c669_IDE_ADDRESS_REGISTER; typedef SMC37c669_CR23 SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER; typedef SMC37c669_CR24 SMC37c669_SERIAL_BASE_ADDRESS_REGISTER; typedef SMC37c669_CR26 SMC37c669_PARALLEL_FDC_DRQ_REGISTER; typedef SMC37c669_CR27 SMC37c669_PARALLEL_FDC_IRQ_REGISTER; typedef SMC37c669_CR28 SMC37c669_SERIAL_IRQ_REGISTER; /* ** ISA/Device IRQ Translation Table Entry Definition */ typedef struct _SMC37c669_IRQ_TRANSLATION_ENTRY { int device_irq; int isa_irq; } SMC37c669_IRQ_TRANSLATION_ENTRY; /* ** ISA/Device DMA Translation Table Entry Definition */ typedef struct _SMC37c669_DRQ_TRANSLATION_ENTRY { int device_drq; int isa_drq; } SMC37c669_DRQ_TRANSLATION_ENTRY; /* ** External Interface Function Prototype Declarations */ SMC37c669_CONFIG_REGS *SMC37c669_detect( int ); unsigned int SMC37c669_enable_device( unsigned int func ); unsigned int SMC37c669_disable_device( unsigned int func ); unsigned int SMC37c669_configure_device( unsigned int func, int port, int irq, int drq ); void SMC37c669_display_device_info( void ); #endif /* __SMC37c669_H */ /* file: smcc669.c * * Copyright (C) 1997 by * Digital Equipment Corporation, Maynard, Massachusetts. * All rights reserved. * * This software is furnished under a license and may be used and copied * only in accordance of the terms of such license and with the * inclusion of the above copyright notice. This software or any other * copies thereof may not be provided or otherwise made available to any * other person. No title to and ownership of the software is hereby * transferred. * * The information in this software is subject to change without notice * and should not be construed as a commitment by digital equipment * corporation. * * Digital assumes no responsibility for the use or reliability of its * software on equipment which is not supplied by digital. */ /* *++ * FACILITY: * * Alpha SRM Console Firmware * * MODULE DESCRIPTION: * * SMC37c669 Super I/O controller configuration routines. * * AUTHORS: * * Eric Rasmussen * * CREATION DATE: * * 28-Jan-1997 * * MODIFICATION HISTORY: * * er 01-May-1997 Fixed pointer conversion errors in * SMC37c669_get_device_config(). * er 28-Jan-1997 Initial version. * *-- */ #ifndef TRUE #define TRUE 1 #endif #ifndef FALSE #define FALSE 0 #endif #define wb( _x_, _y_ ) outb( _y_, (unsigned int)((unsigned long)_x_) ) #define rb( _x_ ) inb( (unsigned int)((unsigned long)_x_) ) /* ** Local storage for device configuration information. ** ** Since the SMC37c669 does not provide an explicit ** mechanism for enabling/disabling individual device ** functions, other than unmapping the device, local ** storage for device configuration information is ** allocated here for use in implementing our own ** function enable/disable scheme. */ static struct DEVICE_CONFIG { unsigned int port1; unsigned int port2; int irq; int drq; } local_config [NUM_FUNCS]; /* ** List of all possible addresses for the Super I/O chip */ static unsigned long SMC37c669_Addresses[] __initdata = { 0x3F0UL, /* Primary address */ 0x370UL, /* Secondary address */ 0UL /* End of list */ }; /* ** Global Pointer to the Super I/O device */ static SMC37c669_CONFIG_REGS *SMC37c669 __initdata = NULL; /* ** IRQ Translation Table ** ** The IRQ translation table is a list of SMC37c669 device ** and standard ISA IRQs. ** */ static SMC37c669_IRQ_TRANSLATION_ENTRY *SMC37c669_irq_table __initdata; /* ** The following definition is for the default IRQ ** translation table. */ static SMC37c669_IRQ_TRANSLATION_ENTRY SMC37c669_default_irq_table[] __initdata = { { SMC37c669_DEVICE_IRQ_A, -1 }, { SMC37c669_DEVICE_IRQ_B, -1 }, { SMC37c669_DEVICE_IRQ_C, 7 }, { SMC37c669_DEVICE_IRQ_D, 6 }, { SMC37c669_DEVICE_IRQ_E, 4 }, { SMC37c669_DEVICE_IRQ_F, 3 }, { SMC37c669_DEVICE_IRQ_H, -1 }, { -1, -1 } /* End of table */ }; /* ** The following definition is for the MONET (XP1000) IRQ ** translation table. */ static SMC37c669_IRQ_TRANSLATION_ENTRY SMC37c669_monet_irq_table[] __initdata = { { SMC37c669_DEVICE_IRQ_A, -1 }, { SMC37c669_DEVICE_IRQ_B, -1 }, { SMC37c669_DEVICE_IRQ_C, 6 }, { SMC37c669_DEVICE_IRQ_D, 7 }, { SMC37c669_DEVICE_IRQ_E, 4 }, { SMC37c669_DEVICE_IRQ_F, 3 }, { SMC37c669_DEVICE_IRQ_H, -1 }, { -1, -1 } /* End of table */ }; static SMC37c669_IRQ_TRANSLATION_ENTRY *SMC37c669_irq_tables[] __initdata = { SMC37c669_default_irq_table, SMC37c669_monet_irq_table }; /* ** DRQ Translation Table ** ** The DRQ translation table is a list of SMC37c669 device and ** ISA DMA channels. ** */ static SMC37c669_DRQ_TRANSLATION_ENTRY *SMC37c669_drq_table __initdata; /* ** The following definition is the default DRQ ** translation table. */ static SMC37c669_DRQ_TRANSLATION_ENTRY SMC37c669_default_drq_table[] __initdata = { { SMC37c669_DEVICE_DRQ_A, 2 }, { SMC37c669_DEVICE_DRQ_B, 3 }, { SMC37c669_DEVICE_DRQ_C, -1 }, { -1, -1 } /* End of table */ }; /* ** Local Function Prototype Declarations */ static unsigned int SMC37c669_is_device_enabled( unsigned int func ); #if 0 static unsigned int SMC37c669_get_device_config( unsigned int func, int *port, int *irq, int *drq ); #endif static void SMC37c669_config_mode( unsigned int enable ); static unsigned char SMC37c669_read_config( unsigned char index ); static void SMC37c669_write_config( unsigned char index, unsigned char data ); static void SMC37c669_init_local_config( void ); static struct DEVICE_CONFIG *SMC37c669_get_config( unsigned int func ); static int SMC37c669_xlate_irq( int irq ); static int SMC37c669_xlate_drq( int drq ); static __cacheline_aligned DEFINE_SPINLOCK(smc_lock); /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function detects the presence of an SMC37c669 Super I/O ** controller. ** ** FORMAL PARAMETERS: ** ** None ** ** RETURN VALUE: ** ** Returns a pointer to the device if found, otherwise, ** the NULL pointer is returned. ** ** SIDE EFFECTS: ** ** None ** **-- */ SMC37c669_CONFIG_REGS * __init SMC37c669_detect( int index ) { int i; SMC37c669_DEVICE_ID_REGISTER id; for ( i = 0; SMC37c669_Addresses[i] != 0; i++ ) { /* ** Initialize the device pointer even though we don't yet know if ** the controller is at this address. The support functions access ** the controller through this device pointer so we need to set it ** even when we are looking ... */ SMC37c669 = ( SMC37c669_CONFIG_REGS * )SMC37c669_Addresses[i]; /* ** Enter configuration mode */ SMC37c669_config_mode( TRUE ); /* ** Read the device id */ id.as_uchar = SMC37c669_read_config( SMC37c669_DEVICE_ID_INDEX ); /* ** Exit configuration mode */ SMC37c669_config_mode( FALSE ); /* ** Does the device id match? If so, assume we have found an ** SMC37c669 controller at this address. */ if ( id.by_field.device_id == SMC37c669_DEVICE_ID ) { /* ** Initialize the IRQ and DRQ translation tables. */ SMC37c669_irq_table = SMC37c669_irq_tables[ index ]; SMC37c669_drq_table = SMC37c669_default_drq_table; /* ** erfix ** ** If the platform can't use the IRQ and DRQ defaults set up in this ** file, it should call a platform-specific external routine at this ** point to reset the IRQ and DRQ translation table pointers to point ** at the appropriate tables for the platform. If the defaults are ** acceptable, then the external routine should do nothing. */ /* ** Put the chip back into configuration mode */ SMC37c669_config_mode( TRUE ); /* ** Initialize local storage for configuration information */ SMC37c669_init_local_config( ); /* ** Exit configuration mode */ SMC37c669_config_mode( FALSE ); /* ** SMC37c669 controller found, break out of search loop */ break; } else { /* ** Otherwise, we did not find an SMC37c669 controller at this ** address so set the device pointer to NULL. */ SMC37c669 = NULL; } } return SMC37c669; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function enables an SMC37c669 device function. ** ** FORMAL PARAMETERS: ** ** func: ** Which device function to enable ** ** RETURN VALUE: ** ** Returns TRUE is the device function was enabled, otherwise, FALSE ** ** SIDE EFFECTS: ** ** {@description or none@} ** ** DESIGN: ** ** Enabling a device function in the SMC37c669 controller involves ** setting all of its mappings (port, irq, drq ...). A local ** "shadow" copy of the device configuration is kept so we can ** just set each mapping to what the local copy says. ** ** This function ALWAYS updates the local shadow configuration of ** the device function being enabled, even if the device is always ** enabled. To avoid replication of code, functions such as ** configure_device set up the local copy and then call this ** function to the update the real device. ** **-- */ unsigned int __init SMC37c669_enable_device ( unsigned int func ) { unsigned int ret_val = FALSE; /* ** Put the device into configuration mode */ SMC37c669_config_mode( TRUE ); switch ( func ) { case SERIAL_0: { SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; SMC37c669_SERIAL_IRQ_REGISTER irq; /* ** Enable the serial 1 IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); irq.by_field.uart1_irq = SMC37c669_RAW_DEVICE_IRQ( SMC37c669_xlate_irq( local_config[ func ].irq ) ); SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); /* ** Enable the serial 1 port base address mapping */ base_addr.as_uchar = 0; base_addr.by_field.addr9_3 = local_config[ func ].port1 >> 3; SMC37c669_write_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case SERIAL_1: { SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; SMC37c669_SERIAL_IRQ_REGISTER irq; /* ** Enable the serial 2 IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); irq.by_field.uart2_irq = SMC37c669_RAW_DEVICE_IRQ( SMC37c669_xlate_irq( local_config[ func ].irq ) ); SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); /* ** Enable the serial 2 port base address mapping */ base_addr.as_uchar = 0; base_addr.by_field.addr9_3 = local_config[ func ].port1 >> 3; SMC37c669_write_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case PARALLEL_0: { SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER base_addr; SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; /* ** Enable the parallel port DMA channel mapping */ drq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); drq.by_field.ppt_drq = SMC37c669_RAW_DEVICE_DRQ( SMC37c669_xlate_drq( local_config[ func ].drq ) ); SMC37c669_write_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX, drq.as_uchar ); /* ** Enable the parallel port IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); irq.by_field.ppt_irq = SMC37c669_RAW_DEVICE_IRQ( SMC37c669_xlate_irq( local_config[ func ].irq ) ); SMC37c669_write_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX, irq.as_uchar ); /* ** Enable the parallel port base address mapping */ base_addr.as_uchar = 0; base_addr.by_field.addr9_2 = local_config[ func ].port1 >> 2; SMC37c669_write_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case FLOPPY_0: { SMC37c669_FDC_BASE_ADDRESS_REGISTER base_addr; SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; /* ** Enable the floppy controller DMA channel mapping */ drq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); drq.by_field.fdc_drq = SMC37c669_RAW_DEVICE_DRQ( SMC37c669_xlate_drq( local_config[ func ].drq ) ); SMC37c669_write_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX, drq.as_uchar ); /* ** Enable the floppy controller IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); irq.by_field.fdc_irq = SMC37c669_RAW_DEVICE_IRQ( SMC37c669_xlate_irq( local_config[ func ].irq ) ); SMC37c669_write_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX, irq.as_uchar ); /* ** Enable the floppy controller base address mapping */ base_addr.as_uchar = 0; base_addr.by_field.addr9_4 = local_config[ func ].port1 >> 4; SMC37c669_write_config( SMC37c669_FDC_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case IDE_0: { SMC37c669_IDE_ADDRESS_REGISTER ide_addr; /* ** Enable the IDE alternate status base address mapping */ ide_addr.as_uchar = 0; ide_addr.by_field.addr9_4 = local_config[ func ].port2 >> 4; SMC37c669_write_config( SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX, ide_addr.as_uchar ); /* ** Enable the IDE controller base address mapping */ ide_addr.as_uchar = 0; ide_addr.by_field.addr9_4 = local_config[ func ].port1 >> 4; SMC37c669_write_config( SMC37c669_IDE_BASE_ADDRESS_INDEX, ide_addr.as_uchar ); ret_val = TRUE; break; } } /* ** Exit configuration mode and return */ SMC37c669_config_mode( FALSE ); return ret_val; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function disables a device function within the ** SMC37c669 Super I/O controller. ** ** FORMAL PARAMETERS: ** ** func: ** Which function to disable ** ** RETURN VALUE: ** ** Return TRUE if the device function was disabled, otherwise, FALSE ** ** SIDE EFFECTS: ** ** {@description or none@} ** ** DESIGN: ** ** Disabling a function in the SMC37c669 device involves ** disabling all the function's mappings (port, irq, drq ...). ** A shadow copy of the device configuration is maintained ** in local storage so we won't worry aboving saving the ** current configuration information. ** **-- */ unsigned int __init SMC37c669_disable_device ( unsigned int func ) { unsigned int ret_val = FALSE; /* ** Put the device into configuration mode */ SMC37c669_config_mode( TRUE ); switch ( func ) { case SERIAL_0: { SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; SMC37c669_SERIAL_IRQ_REGISTER irq; /* ** Disable the serial 1 IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); irq.by_field.uart1_irq = 0; SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); /* ** Disable the serial 1 port base address mapping */ base_addr.as_uchar = 0; SMC37c669_write_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case SERIAL_1: { SMC37c669_SERIAL_BASE_ADDRESS_REGISTER base_addr; SMC37c669_SERIAL_IRQ_REGISTER irq; /* ** Disable the serial 2 IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); irq.by_field.uart2_irq = 0; SMC37c669_write_config( SMC37c669_SERIAL_IRQ_INDEX, irq.as_uchar ); /* ** Disable the serial 2 port base address mapping */ base_addr.as_uchar = 0; SMC37c669_write_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case PARALLEL_0: { SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER base_addr; SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; /* ** Disable the parallel port DMA channel mapping */ drq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); drq.by_field.ppt_drq = 0; SMC37c669_write_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX, drq.as_uchar ); /* ** Disable the parallel port IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); irq.by_field.ppt_irq = 0; SMC37c669_write_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX, irq.as_uchar ); /* ** Disable the parallel port base address mapping */ base_addr.as_uchar = 0; SMC37c669_write_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case FLOPPY_0: { SMC37c669_FDC_BASE_ADDRESS_REGISTER base_addr; SMC37c669_PARALLEL_FDC_IRQ_REGISTER irq; SMC37c669_PARALLEL_FDC_DRQ_REGISTER drq; /* ** Disable the floppy controller DMA channel mapping */ drq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); drq.by_field.fdc_drq = 0; SMC37c669_write_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX, drq.as_uchar ); /* ** Disable the floppy controller IRQ mapping */ irq.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); irq.by_field.fdc_irq = 0; SMC37c669_write_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX, irq.as_uchar ); /* ** Disable the floppy controller base address mapping */ base_addr.as_uchar = 0; SMC37c669_write_config( SMC37c669_FDC_BASE_ADDRESS_INDEX, base_addr.as_uchar ); ret_val = TRUE; break; } case IDE_0: { SMC37c669_IDE_ADDRESS_REGISTER ide_addr; /* ** Disable the IDE alternate status base address mapping */ ide_addr.as_uchar = 0; SMC37c669_write_config( SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX, ide_addr.as_uchar ); /* ** Disable the IDE controller base address mapping */ ide_addr.as_uchar = 0; SMC37c669_write_config( SMC37c669_IDE_BASE_ADDRESS_INDEX, ide_addr.as_uchar ); ret_val = TRUE; break; } } /* ** Exit configuration mode and return */ SMC37c669_config_mode( FALSE ); return ret_val; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function configures a device function within the ** SMC37c669 Super I/O controller. ** ** FORMAL PARAMETERS: ** ** func: ** Which device function ** ** port: ** I/O port for the function to use ** ** irq: ** IRQ for the device function to use ** ** drq: ** DMA channel for the device function to use ** ** RETURN VALUE: ** ** Returns TRUE if the device function was configured, ** otherwise, FALSE. ** ** SIDE EFFECTS: ** ** {@description or none@} ** ** DESIGN: ** ** If this function returns TRUE, the local shadow copy of ** the configuration is also updated. If the device function ** is currently disabled, only the local shadow copy is ** updated and the actual device function will be updated ** if/when it is enabled. ** **-- */ unsigned int __init SMC37c669_configure_device ( unsigned int func, int port, int irq, int drq ) { struct DEVICE_CONFIG *cp; /* ** Check for a valid configuration */ if ( ( cp = SMC37c669_get_config ( func ) ) != NULL ) { /* ** Configuration is valid, update the local shadow copy */ if ( ( drq & ~0xFF ) == 0 ) { cp->drq = drq; } if ( ( irq & ~0xFF ) == 0 ) { cp->irq = irq; } if ( ( port & ~0xFFFF ) == 0 ) { cp->port1 = port; } /* ** If the device function is enabled, update the actual ** device configuration. */ if ( SMC37c669_is_device_enabled( func ) ) { SMC37c669_enable_device( func ); } return TRUE; } return FALSE; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function determines whether a device function ** within the SMC37c669 controller is enabled. ** ** FORMAL PARAMETERS: ** ** func: ** Which device function ** ** RETURN VALUE: ** ** Returns TRUE if the device function is enabled, otherwise, FALSE ** ** SIDE EFFECTS: ** ** {@description or none@} ** ** DESIGN: ** ** To check whether a device is enabled we will only look at ** the port base address mapping. According to the SMC37c669 ** specification, all of the port base address mappings are ** disabled if the addr<9:8> (bits <7:6> of the register) are ** zero. ** **-- */ static unsigned int __init SMC37c669_is_device_enabled ( unsigned int func ) { unsigned char base_addr = 0; unsigned int dev_ok = FALSE; unsigned int ret_val = FALSE; /* ** Enter configuration mode */ SMC37c669_config_mode( TRUE ); switch ( func ) { case SERIAL_0: base_addr = SMC37c669_read_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX ); dev_ok = TRUE; break; case SERIAL_1: base_addr = SMC37c669_read_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX ); dev_ok = TRUE; break; case PARALLEL_0: base_addr = SMC37c669_read_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX ); dev_ok = TRUE; break; case FLOPPY_0: base_addr = SMC37c669_read_config( SMC37c669_FDC_BASE_ADDRESS_INDEX ); dev_ok = TRUE; break; case IDE_0: base_addr = SMC37c669_read_config( SMC37c669_IDE_BASE_ADDRESS_INDEX ); dev_ok = TRUE; break; } /* ** If we have a valid device, check base_addr<7:6> to see if the ** device is enabled (mapped). */ if ( ( dev_ok ) && ( ( base_addr & 0xC0 ) != 0 ) ) { /* ** The mapping is not disabled, so assume that the function is ** enabled. */ ret_val = TRUE; } /* ** Exit configuration mode */ SMC37c669_config_mode( FALSE ); return ret_val; } #if 0 /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function retrieves the configuration information of a ** device function within the SMC37c699 Super I/O controller. ** ** FORMAL PARAMETERS: ** ** func: ** Which device function ** ** port: ** I/O port returned ** ** irq: ** IRQ returned ** ** drq: ** DMA channel returned ** ** RETURN VALUE: ** ** Returns TRUE if the device configuration was successfully ** retrieved, otherwise, FALSE. ** ** SIDE EFFECTS: ** ** The data pointed to by the port, irq, and drq parameters ** my be modified even if the configuration is not successfully ** retrieved. ** ** DESIGN: ** ** The device configuration is fetched from the local shadow ** copy. Any unused parameters will be set to -1. Any ** parameter which is not desired can specify the NULL ** pointer. ** **-- */ static unsigned int __init SMC37c669_get_device_config ( unsigned int func, int *port, int *irq, int *drq ) { struct DEVICE_CONFIG *cp; unsigned int ret_val = FALSE; /* ** Check for a valid device configuration */ if ( ( cp = SMC37c669_get_config( func ) ) != NULL ) { if ( drq != NULL ) { *drq = cp->drq; ret_val = TRUE; } if ( irq != NULL ) { *irq = cp->irq; ret_val = TRUE; } if ( port != NULL ) { *port = cp->port1; ret_val = TRUE; } } return ret_val; } #endif /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function displays the current state of the SMC37c699 ** Super I/O controller's device functions. ** ** FORMAL PARAMETERS: ** ** None ** ** RETURN VALUE: ** ** None ** ** SIDE EFFECTS: ** ** None ** **-- */ void __init SMC37c669_display_device_info ( void ) { if ( SMC37c669_is_device_enabled( SERIAL_0 ) ) { printk( " Serial 0: Enabled [ Port 0x%x, IRQ %d ]\n", local_config[ SERIAL_0 ].port1, local_config[ SERIAL_0 ].irq ); } else { printk( " Serial 0: Disabled\n" ); } if ( SMC37c669_is_device_enabled( SERIAL_1 ) ) { printk( " Serial 1: Enabled [ Port 0x%x, IRQ %d ]\n", local_config[ SERIAL_1 ].port1, local_config[ SERIAL_1 ].irq ); } else { printk( " Serial 1: Disabled\n" ); } if ( SMC37c669_is_device_enabled( PARALLEL_0 ) ) { printk( " Parallel: Enabled [ Port 0x%x, IRQ %d/%d ]\n", local_config[ PARALLEL_0 ].port1, local_config[ PARALLEL_0 ].irq, local_config[ PARALLEL_0 ].drq ); } else { printk( " Parallel: Disabled\n" ); } if ( SMC37c669_is_device_enabled( FLOPPY_0 ) ) { printk( " Floppy Ctrl: Enabled [ Port 0x%x, IRQ %d/%d ]\n", local_config[ FLOPPY_0 ].port1, local_config[ FLOPPY_0 ].irq, local_config[ FLOPPY_0 ].drq ); } else { printk( " Floppy Ctrl: Disabled\n" ); } if ( SMC37c669_is_device_enabled( IDE_0 ) ) { printk( " IDE 0: Enabled [ Port 0x%x, IRQ %d ]\n", local_config[ IDE_0 ].port1, local_config[ IDE_0 ].irq ); } else { printk( " IDE 0: Disabled\n" ); } } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function puts the SMC37c669 Super I/O controller into, ** and takes it out of, configuration mode. ** ** FORMAL PARAMETERS: ** ** enable: ** TRUE to enter configuration mode, FALSE to exit. ** ** RETURN VALUE: ** ** None ** ** SIDE EFFECTS: ** ** The SMC37c669 controller may be left in configuration mode. ** **-- */ static void __init SMC37c669_config_mode( unsigned int enable ) { if ( enable ) { /* ** To enter configuration mode, two writes in succession to the index ** port are required. If a write to another address or port occurs ** between these two writes, the chip does not enter configuration ** mode. Therefore, a spinlock is placed around the two writes to ** guarantee that they complete uninterrupted. */ spin_lock(&smc_lock); wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY ); wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY ); spin_unlock(&smc_lock); } else { wb( &SMC37c669->index_port, SMC37c669_CONFIG_OFF_KEY ); } } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function reads an SMC37c669 Super I/O controller ** configuration register. This function assumes that the ** device is already in configuration mode. ** ** FORMAL PARAMETERS: ** ** index: ** Index value of configuration register to read ** ** RETURN VALUE: ** ** Data read from configuration register ** ** SIDE EFFECTS: ** ** None ** **-- */ static unsigned char __init SMC37c669_read_config( unsigned char index ) { wb(&SMC37c669->index_port, index); return rb(&SMC37c669->data_port); } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function writes an SMC37c669 Super I/O controller ** configuration register. This function assumes that the ** device is already in configuration mode. ** ** FORMAL PARAMETERS: ** ** index: ** Index of configuration register to write ** ** data: ** Data to be written ** ** RETURN VALUE: ** ** None ** ** SIDE EFFECTS: ** ** None ** **-- */ static void __init SMC37c669_write_config( unsigned char index, unsigned char data ) { wb( &SMC37c669->index_port, index ); wb( &SMC37c669->data_port, data ); } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function initializes the local device ** configuration storage. This function assumes ** that the device is already in configuration ** mode. ** ** FORMAL PARAMETERS: ** ** None ** ** RETURN VALUE: ** ** None ** ** SIDE EFFECTS: ** ** Local storage for device configuration information ** is initialized. ** **-- */ static void __init SMC37c669_init_local_config ( void ) { SMC37c669_SERIAL_BASE_ADDRESS_REGISTER uart_base; SMC37c669_SERIAL_IRQ_REGISTER uart_irqs; SMC37c669_PARALLEL_BASE_ADDRESS_REGISTER ppt_base; SMC37c669_PARALLEL_FDC_IRQ_REGISTER ppt_fdc_irqs; SMC37c669_PARALLEL_FDC_DRQ_REGISTER ppt_fdc_drqs; SMC37c669_FDC_BASE_ADDRESS_REGISTER fdc_base; SMC37c669_IDE_ADDRESS_REGISTER ide_base; SMC37c669_IDE_ADDRESS_REGISTER ide_alt; /* ** Get serial port 1 base address */ uart_base.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL0_BASE_ADDRESS_INDEX ); /* ** Get IRQs for serial ports 1 & 2 */ uart_irqs.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL_IRQ_INDEX ); /* ** Store local configuration information for serial port 1 */ local_config[SERIAL_0].port1 = uart_base.by_field.addr9_3 << 3; local_config[SERIAL_0].irq = SMC37c669_xlate_irq( SMC37c669_DEVICE_IRQ( uart_irqs.by_field.uart1_irq ) ); /* ** Get serial port 2 base address */ uart_base.as_uchar = SMC37c669_read_config( SMC37c669_SERIAL1_BASE_ADDRESS_INDEX ); /* ** Store local configuration information for serial port 2 */ local_config[SERIAL_1].port1 = uart_base.by_field.addr9_3 << 3; local_config[SERIAL_1].irq = SMC37c669_xlate_irq( SMC37c669_DEVICE_IRQ( uart_irqs.by_field.uart2_irq ) ); /* ** Get parallel port base address */ ppt_base.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL0_BASE_ADDRESS_INDEX ); /* ** Get IRQs for parallel port and floppy controller */ ppt_fdc_irqs.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_IRQ_INDEX ); /* ** Get DRQs for parallel port and floppy controller */ ppt_fdc_drqs.as_uchar = SMC37c669_read_config( SMC37c669_PARALLEL_FDC_DRQ_INDEX ); /* ** Store local configuration information for parallel port */ local_config[PARALLEL_0].port1 = ppt_base.by_field.addr9_2 << 2; local_config[PARALLEL_0].irq = SMC37c669_xlate_irq( SMC37c669_DEVICE_IRQ( ppt_fdc_irqs.by_field.ppt_irq ) ); local_config[PARALLEL_0].drq = SMC37c669_xlate_drq( SMC37c669_DEVICE_DRQ( ppt_fdc_drqs.by_field.ppt_drq ) ); /* ** Get floppy controller base address */ fdc_base.as_uchar = SMC37c669_read_config( SMC37c669_FDC_BASE_ADDRESS_INDEX ); /* ** Store local configuration information for floppy controller */ local_config[FLOPPY_0].port1 = fdc_base.by_field.addr9_4 << 4; local_config[FLOPPY_0].irq = SMC37c669_xlate_irq( SMC37c669_DEVICE_IRQ( ppt_fdc_irqs.by_field.fdc_irq ) ); local_config[FLOPPY_0].drq = SMC37c669_xlate_drq( SMC37c669_DEVICE_DRQ( ppt_fdc_drqs.by_field.fdc_drq ) ); /* ** Get IDE controller base address */ ide_base.as_uchar = SMC37c669_read_config( SMC37c669_IDE_BASE_ADDRESS_INDEX ); /* ** Get IDE alternate status base address */ ide_alt.as_uchar = SMC37c669_read_config( SMC37c669_IDE_ALTERNATE_ADDRESS_INDEX ); /* ** Store local configuration information for IDE controller */ local_config[IDE_0].port1 = ide_base.by_field.addr9_4 << 4; local_config[IDE_0].port2 = ide_alt.by_field.addr9_4 << 4; local_config[IDE_0].irq = 14; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function returns a pointer to the local shadow ** configuration of the requested device function. ** ** FORMAL PARAMETERS: ** ** func: ** Which device function ** ** RETURN VALUE: ** ** Returns a pointer to the DEVICE_CONFIG structure for the ** requested function, otherwise, NULL. ** ** SIDE EFFECTS: ** ** {@description or none@} ** **-- */ static struct DEVICE_CONFIG * __init SMC37c669_get_config( unsigned int func ) { struct DEVICE_CONFIG *cp = NULL; switch ( func ) { case SERIAL_0: cp = &local_config[ SERIAL_0 ]; break; case SERIAL_1: cp = &local_config[ SERIAL_1 ]; break; case PARALLEL_0: cp = &local_config[ PARALLEL_0 ]; break; case FLOPPY_0: cp = &local_config[ FLOPPY_0 ]; break; case IDE_0: cp = &local_config[ IDE_0 ]; break; } return cp; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function translates IRQs back and forth between ISA ** IRQs and SMC37c669 device IRQs. ** ** FORMAL PARAMETERS: ** ** irq: ** The IRQ to translate ** ** RETURN VALUE: ** ** Returns the translated IRQ, otherwise, returns -1. ** ** SIDE EFFECTS: ** ** {@description or none@} ** **-- */ static int __init SMC37c669_xlate_irq ( int irq ) { int i, translated_irq = -1; if ( SMC37c669_IS_DEVICE_IRQ( irq ) ) { /* ** We are translating a device IRQ to an ISA IRQ */ for ( i = 0; ( SMC37c669_irq_table[i].device_irq != -1 ) || ( SMC37c669_irq_table[i].isa_irq != -1 ); i++ ) { if ( irq == SMC37c669_irq_table[i].device_irq ) { translated_irq = SMC37c669_irq_table[i].isa_irq; break; } } } else { /* ** We are translating an ISA IRQ to a device IRQ */ for ( i = 0; ( SMC37c669_irq_table[i].isa_irq != -1 ) || ( SMC37c669_irq_table[i].device_irq != -1 ); i++ ) { if ( irq == SMC37c669_irq_table[i].isa_irq ) { translated_irq = SMC37c669_irq_table[i].device_irq; break; } } } return translated_irq; } /* **++ ** FUNCTIONAL DESCRIPTION: ** ** This function translates DMA channels back and forth between ** ISA DMA channels and SMC37c669 device DMA channels. ** ** FORMAL PARAMETERS: ** ** drq: ** The DMA channel to translate ** ** RETURN VALUE: ** ** Returns the translated DMA channel, otherwise, returns -1 ** ** SIDE EFFECTS: ** ** {@description or none@} ** **-- */ static int __init SMC37c669_xlate_drq ( int drq ) { int i, translated_drq = -1; if ( SMC37c669_IS_DEVICE_DRQ( drq ) ) { /* ** We are translating a device DMA channel to an ISA DMA channel */ for ( i = 0; ( SMC37c669_drq_table[i].device_drq != -1 ) || ( SMC37c669_drq_table[i].isa_drq != -1 ); i++ ) { if ( drq == SMC37c669_drq_table[i].device_drq ) { translated_drq = SMC37c669_drq_table[i].isa_drq; break; } } } else { /* ** We are translating an ISA DMA channel to a device DMA channel */ for ( i = 0; ( SMC37c669_drq_table[i].isa_drq != -1 ) || ( SMC37c669_drq_table[i].device_drq != -1 ); i++ ) { if ( drq == SMC37c669_drq_table[i].isa_drq ) { translated_drq = SMC37c669_drq_table[i].device_drq; break; } } } return translated_drq; } #if 0 int __init smcc669_init ( void ) { struct INODE *ip; allocinode( smc_ddb.name, 1, &ip ); ip->dva = &smc_ddb; ip->attr = ATTR$M_WRITE | ATTR$M_READ; ip->len[0] = 0x30; ip->misc = 0; INODE_UNLOCK( ip ); return msg_success; } int __init smcc669_open( struct FILE *fp, char *info, char *next, char *mode ) { struct INODE *ip; /* ** Allow multiple readers but only one writer. ip->misc keeps track ** of the number of writers */ ip = fp->ip; INODE_LOCK( ip ); if ( fp->mode & ATTR$M_WRITE ) { if ( ip->misc ) { INODE_UNLOCK( ip ); return msg_failure; /* too many writers */ } ip->misc++; } /* ** Treat the information field as a byte offset */ *fp->offset = xtoi( info ); INODE_UNLOCK( ip ); return msg_success; } int __init smcc669_close( struct FILE *fp ) { struct INODE *ip; ip = fp->ip; if ( fp->mode & ATTR$M_WRITE ) { INODE_LOCK( ip ); ip->misc--; INODE_UNLOCK( ip ); } return msg_success; } int __init smcc669_read( struct FILE *fp, int size, int number, unsigned char *buf ) { int i; int length; int nbytes; struct INODE *ip; /* ** Always access a byte at a time */ ip = fp->ip; length = size * number; nbytes = 0; SMC37c669_config_mode( TRUE ); for ( i = 0; i < length; i++ ) { if ( !inrange( *fp->offset, 0, ip->len[0] ) ) break; *buf++ = SMC37c669_read_config( *fp->offset ); *fp->offset += 1; nbytes++; } SMC37c669_config_mode( FALSE ); return nbytes; } int __init smcc669_write( struct FILE *fp, int size, int number, unsigned char *buf ) { int i; int length; int nbytes; struct INODE *ip; /* ** Always access a byte at a time */ ip = fp->ip; length = size * number; nbytes = 0; SMC37c669_config_mode( TRUE ); for ( i = 0; i < length; i++ ) { if ( !inrange( *fp->offset, 0, ip->len[0] ) ) break; SMC37c669_write_config( *fp->offset, *buf ); *fp->offset += 1; buf++; nbytes++; } SMC37c669_config_mode( FALSE ); return nbytes; } #endif void __init SMC37c669_dump_registers(void) { int i; for (i = 0; i <= 0x29; i++) printk("-- CR%02x : %02x\n", i, SMC37c669_read_config(i)); } /*+ * ============================================================================ * = SMC_init - SMC37c669 Super I/O controller initialization = * ============================================================================ * * OVERVIEW: * * This routine configures and enables device functions on the * SMC37c669 Super I/O controller. * * FORM OF CALL: * * SMC_init( ); * * RETURNS: * * Nothing * * ARGUMENTS: * * None * * SIDE EFFECTS: * * None * */ void __init SMC669_Init ( int index ) { SMC37c669_CONFIG_REGS *SMC_base; unsigned long flags; local_irq_save(flags); if ( ( SMC_base = SMC37c669_detect( index ) ) != NULL ) { #if SMC_DEBUG SMC37c669_config_mode( TRUE ); SMC37c669_dump_registers( ); SMC37c669_config_mode( FALSE ); SMC37c669_display_device_info( ); #endif SMC37c669_disable_device( SERIAL_0 ); SMC37c669_configure_device( SERIAL_0, COM1_BASE, COM1_IRQ, -1 ); SMC37c669_enable_device( SERIAL_0 ); SMC37c669_disable_device( SERIAL_1 ); SMC37c669_configure_device( SERIAL_1, COM2_BASE, COM2_IRQ, -1 ); SMC37c669_enable_device( SERIAL_1 ); SMC37c669_disable_device( PARALLEL_0 ); SMC37c669_configure_device( PARALLEL_0, PARP_BASE, PARP_IRQ, PARP_DRQ ); SMC37c669_enable_device( PARALLEL_0 ); SMC37c669_disable_device( FLOPPY_0 ); SMC37c669_configure_device( FLOPPY_0, FDC_BASE, FDC_IRQ, FDC_DRQ ); SMC37c669_enable_device( FLOPPY_0 ); /* Wake up sometimes forgotten floppy, especially on DP264. */ outb(0xc, 0x3f2); SMC37c669_disable_device( IDE_0 ); #if SMC_DEBUG SMC37c669_config_mode( TRUE ); SMC37c669_dump_registers( ); SMC37c669_config_mode( FALSE ); SMC37c669_display_device_info( ); #endif local_irq_restore(flags); printk( "SMC37c669 Super I/O Controller found @ 0x%p\n", SMC_base ); } else { local_irq_restore(flags); #if SMC_DEBUG printk( "No SMC37c669 Super I/O Controller found\n" ); #endif } }
linux-master
arch/alpha/kernel/smc37c669.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/gct.c */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <asm/hwrpb.h> #include <asm/gct.h> int gct6_find_nodes(gct6_node *node, gct6_search_struct *search) { gct6_search_struct *wanted; int status = 0; /* First check the magic number. */ if (node->magic != GCT_NODE_MAGIC) { printk(KERN_ERR "GCT Node MAGIC incorrect - GCT invalid\n"); return -EINVAL; } /* Check against the search struct. */ for (wanted = search; wanted && (wanted->type | wanted->subtype); wanted++) { if (node->type != wanted->type) continue; if (node->subtype != wanted->subtype) continue; /* Found it -- call out. */ if (wanted->callout) wanted->callout(node); } /* Now walk the tree, siblings first. */ if (node->next) status |= gct6_find_nodes(GCT_NODE_PTR(node->next), search); /* Then the children. */ if (node->child) status |= gct6_find_nodes(GCT_NODE_PTR(node->child), search); return status; }
linux-master
arch/alpha/kernel/gct.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_wildfire.c * * Wildfire support. * * Copyright (C) 2000 Andrea Arcangeli <[email protected]> SuSE */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_wildfire.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" static unsigned long cached_irq_mask[WILDFIRE_NR_IRQS/(sizeof(long)*8)]; DEFINE_SPINLOCK(wildfire_irq_lock); static int doing_init_irq_hw = 0; static void wildfire_update_irq_hw(unsigned int irq) { int qbbno = (irq >> 8) & (WILDFIRE_MAX_QBB - 1); int pcano = (irq >> 6) & (WILDFIRE_PCA_PER_QBB - 1); wildfire_pca *pca; volatile unsigned long * enable0; if (!WILDFIRE_PCA_EXISTS(qbbno, pcano)) { if (!doing_init_irq_hw) { printk(KERN_ERR "wildfire_update_irq_hw:" " got irq %d for non-existent PCA %d" " on QBB %d.\n", irq, pcano, qbbno); } return; } pca = WILDFIRE_pca(qbbno, pcano); enable0 = (unsigned long *) &pca->pca_int[0].enable; /* ??? */ *enable0 = cached_irq_mask[qbbno * WILDFIRE_PCA_PER_QBB + pcano]; mb(); *enable0; } static void __init wildfire_init_irq_hw(void) { #if 0 register wildfire_pca * pca = WILDFIRE_pca(0, 0); volatile unsigned long * enable0, * enable1, * enable2, *enable3; volatile unsigned long * target0, * target1, * target2, *target3; enable0 = (unsigned long *) &pca->pca_int[0].enable; enable1 = (unsigned long *) &pca->pca_int[1].enable; enable2 = (unsigned long *) &pca->pca_int[2].enable; enable3 = (unsigned long *) &pca->pca_int[3].enable; target0 = (unsigned long *) &pca->pca_int[0].target; target1 = (unsigned long *) &pca->pca_int[1].target; target2 = (unsigned long *) &pca->pca_int[2].target; target3 = (unsigned long *) &pca->pca_int[3].target; *enable0 = *enable1 = *enable2 = *enable3 = 0; *target0 = (1UL<<8) | WILDFIRE_QBB(0); *target1 = *target2 = *target3 = 0; mb(); *enable0; *enable1; *enable2; *enable3; *target0; *target1; *target2; *target3; #else int i; doing_init_irq_hw = 1; /* Need to update only once for every possible PCA. */ for (i = 0; i < WILDFIRE_NR_IRQS; i+=WILDFIRE_IRQ_PER_PCA) wildfire_update_irq_hw(i); doing_init_irq_hw = 0; #endif } static void wildfire_enable_irq(struct irq_data *d) { unsigned int irq = d->irq; if (irq < 16) i8259a_enable_irq(d); spin_lock(&wildfire_irq_lock); set_bit(irq, &cached_irq_mask); wildfire_update_irq_hw(irq); spin_unlock(&wildfire_irq_lock); } static void wildfire_disable_irq(struct irq_data *d) { unsigned int irq = d->irq; if (irq < 16) i8259a_disable_irq(d); spin_lock(&wildfire_irq_lock); clear_bit(irq, &cached_irq_mask); wildfire_update_irq_hw(irq); spin_unlock(&wildfire_irq_lock); } static void wildfire_mask_and_ack_irq(struct irq_data *d) { unsigned int irq = d->irq; if (irq < 16) i8259a_mask_and_ack_irq(d); spin_lock(&wildfire_irq_lock); clear_bit(irq, &cached_irq_mask); wildfire_update_irq_hw(irq); spin_unlock(&wildfire_irq_lock); } static struct irq_chip wildfire_irq_type = { .name = "WILDFIRE", .irq_unmask = wildfire_enable_irq, .irq_mask = wildfire_disable_irq, .irq_mask_ack = wildfire_mask_and_ack_irq, }; static void __init wildfire_init_irq_per_pca(int qbbno, int pcano) { int i, irq_bias; irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) + pcano * WILDFIRE_IRQ_PER_PCA; #if 0 unsigned long io_bias; /* Only need the following for first PCI bus per PCA. */ io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; outb(0, DMA1_RESET_REG + io_bias); outb(0, DMA2_RESET_REG + io_bias); outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); outb(0, DMA2_MASK_REG + io_bias); #endif #if 0 /* ??? Not sure how to do this, yet... */ init_i8259a_irqs(); /* ??? */ #endif for (i = 0; i < 16; ++i) { if (i == 2) continue; irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type, handle_level_irq); irq_set_status_flags(i + irq_bias, IRQ_LEVEL); } irq_set_chip_and_handler(36 + irq_bias, &wildfire_irq_type, handle_level_irq); irq_set_status_flags(36 + irq_bias, IRQ_LEVEL); for (i = 40; i < 64; ++i) { irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type, handle_level_irq); irq_set_status_flags(i + irq_bias, IRQ_LEVEL); } if (request_irq(32 + irq_bias, no_action, 0, "isa_enable", NULL)) pr_err("Failed to register isa_enable interrupt\n"); } static void __init wildfire_init_irq(void) { int qbbno, pcano; #if 1 wildfire_init_irq_hw(); init_i8259a_irqs(); #endif for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) { if (WILDFIRE_QBB_EXISTS(qbbno)) { for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) { if (WILDFIRE_PCA_EXISTS(qbbno, pcano)) { wildfire_init_irq_per_pca(qbbno, pcano); } } } } } static void wildfire_device_interrupt(unsigned long vector) { int irq; irq = (vector - 0x800) >> 4; /* * bits 10-8: source QBB ID * bits 7-6: PCA * bits 5-0: irq in PCA */ handle_irq(irq); return; } /* * PCI Fixup configuration. * * Summary per PCA (2 PCI or HIPPI buses): * * Bit Meaning * 0-15 ISA * *32 ISA summary *33 SMI *34 NMI *36 builtin QLogic SCSI (or slot 0 if no IO module) *40 Interrupt Line A from slot 2 PCI0 *41 Interrupt Line B from slot 2 PCI0 *42 Interrupt Line C from slot 2 PCI0 *43 Interrupt Line D from slot 2 PCI0 *44 Interrupt Line A from slot 3 PCI0 *45 Interrupt Line B from slot 3 PCI0 *46 Interrupt Line C from slot 3 PCI0 *47 Interrupt Line D from slot 3 PCI0 * *48 Interrupt Line A from slot 4 PCI1 *49 Interrupt Line B from slot 4 PCI1 *50 Interrupt Line C from slot 4 PCI1 *51 Interrupt Line D from slot 4 PCI1 *52 Interrupt Line A from slot 5 PCI1 *53 Interrupt Line B from slot 5 PCI1 *54 Interrupt Line C from slot 5 PCI1 *55 Interrupt Line D from slot 5 PCI1 *56 Interrupt Line A from slot 6 PCI1 *57 Interrupt Line B from slot 6 PCI1 *58 Interrupt Line C from slot 6 PCI1 *50 Interrupt Line D from slot 6 PCI1 *60 Interrupt Line A from slot 7 PCI1 *61 Interrupt Line B from slot 7 PCI1 *62 Interrupt Line C from slot 7 PCI1 *63 Interrupt Line D from slot 7 PCI1 * * * IdSel * 0 Cypress Bridge I/O (ISA summary interrupt) * 1 64 bit PCI 0 option slot 1 (SCSI QLogic builtin) * 2 64 bit PCI 0 option slot 2 * 3 64 bit PCI 0 option slot 3 * 4 64 bit PCI 1 option slot 4 * 5 64 bit PCI 1 option slot 5 * 6 64 bit PCI 1 option slot 6 * 7 64 bit PCI 1 option slot 7 */ static int wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[8][5] = { /*INT INTA INTB INTC INTD */ { -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */ { 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */ { 40, 40, 40+1, 40+2, 40+3}, /* IdSel 2 PCI 0 slot 2 */ { 44, 44, 44+1, 44+2, 44+3}, /* IdSel 3 PCI 0 slot 3 */ { 48, 48, 48+1, 48+2, 48+3}, /* IdSel 4 PCI 1 slot 4 */ { 52, 52, 52+1, 52+2, 52+3}, /* IdSel 5 PCI 1 slot 5 */ { 56, 56, 56+1, 56+2, 56+3}, /* IdSel 6 PCI 1 slot 6 */ { 60, 60, 60+1, 60+2, 60+3}, /* IdSel 7 PCI 1 slot 7 */ }; long min_idsel = 0, max_idsel = 7, irqs_per_slot = 5; struct pci_controller *hose = dev->sysdata; int irq = COMMON_TABLE_LOOKUP; if (irq > 0) { int qbbno = hose->index >> 3; int pcano = (hose->index >> 1) & 3; irq += (qbbno << 8) + (pcano << 6); } return irq; } /* * The System Vectors */ struct alpha_machine_vector wildfire_mv __initmv = { .vector_name = "WILDFIRE", DO_EV6_MMU, DO_DEFAULT_RTC, DO_WILDFIRE_IO, .machine_check = wildfire_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .nr_irqs = WILDFIRE_NR_IRQS, .device_interrupt = wildfire_device_interrupt, .init_arch = wildfire_init_arch, .init_irq = wildfire_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .kill_arch = wildfire_kill_arch, .pci_map_irq = wildfire_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(wildfire)
linux-master
arch/alpha/kernel/sys_wildfire.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_rx164.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the RX164 (PCA56+POLARIS). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_polaris.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note mask bit is true for ENABLED irqs. */ static unsigned long cached_irq_mask; static inline void rx164_update_irq_hw(unsigned long mask) { volatile unsigned int *irq_mask; irq_mask = (void *)(POLARIS_DENSE_CONFIG_BASE + 0x74); *irq_mask = mask; mb(); *irq_mask; } static inline void rx164_enable_irq(struct irq_data *d) { rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); } static void rx164_disable_irq(struct irq_data *d) { rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); } static struct irq_chip rx164_irq_type = { .name = "RX164", .irq_unmask = rx164_enable_irq, .irq_mask = rx164_disable_irq, .irq_mask_ack = rx164_disable_irq, }; static void rx164_device_interrupt(unsigned long vector) { unsigned long pld; volatile unsigned int *dirr; long i; /* Read the interrupt summary register. On Polaris, this is the DIRR register in PCI config space (offset 0x84). */ dirr = (void *)(POLARIS_DENSE_CONFIG_BASE + 0x84); pld = *dirr; /* * Now for every possible bit set, work through them and call * the appropriate interrupt handler. */ while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ if (i == 20) { isa_no_iack_sc_device_interrupt(vector); } else { handle_irq(16+i); } } } static void __init rx164_init_irq(void) { long i; rx164_update_irq_hw(0); for (i = 16; i < 40; ++i) { irq_set_chip_and_handler(i, &rx164_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } init_i8259a_irqs(); common_init_isa_dma(); if (request_irq(16 + 20, no_action, 0, "isa-cascade", NULL)) pr_err("Failed to register isa-cascade interrupt\n"); } /* * The RX164 changed its interrupt routing between pass1 and pass2... * * PASS1: * * Slot IDSEL INTA INTB INTC INTD * 0 6 5 10 15 20 * 1 7 4 9 14 19 * 2 5 3 8 13 18 * 3 9 2 7 12 17 * 4 10 1 6 11 16 * * PASS2: * Slot IDSEL INTA INTB INTC INTD * 0 5 1 7 12 17 * 1 6 2 8 13 18 * 2 8 3 9 14 19 * 3 9 4 10 15 20 * 4 10 5 11 16 6 * */ /* * IdSel * 5 32 bit PCI option slot 0 * 6 64 bit PCI option slot 1 * 7 PCI-ISA bridge * 7 64 bit PCI option slot 2 * 9 32 bit PCI option slot 3 * 10 PCI-PCI bridge * */ static int rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { #if 0 static char irq_tab_pass1[6][5] __initdata = { /*INT INTA INTB INTC INTD */ { 16+3, 16+3, 16+8, 16+13, 16+18}, /* IdSel 5, slot 2 */ { 16+5, 16+5, 16+10, 16+15, 16+20}, /* IdSel 6, slot 0 */ { 16+4, 16+4, 16+9, 16+14, 16+19}, /* IdSel 7, slot 1 */ { -1, -1, -1, -1, -1}, /* IdSel 8, PCI/ISA bridge */ { 16+2, 16+2, 16+7, 16+12, 16+17}, /* IdSel 9, slot 3 */ { 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */ }; #else static char irq_tab[6][5] = { /*INT INTA INTB INTC INTD */ { 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */ { 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */ { -1, -1, -1, -1, -1}, /* IdSel 7, PCI/ISA bridge */ { 16+2, 16+2, 16+8, 16+13, 16+18}, /* IdSel 8, slot 2 */ { 16+3, 16+3, 16+9, 16+14, 16+19}, /* IdSel 9, slot 3 */ { 16+4, 16+4, 16+10, 16+15, 16+5}, /* IdSel 10, PCI-PCI */ }; #endif const long min_idsel = 5, max_idsel = 10, irqs_per_slot = 5; /* JRP - Need to figure out how to distinguish pass1 from pass2, and use the correct table. */ return COMMON_TABLE_LOOKUP; } /* * The System Vector */ struct alpha_machine_vector rx164_mv __initmv = { .vector_name = "RX164", DO_EV5_MMU, DO_DEFAULT_RTC, DO_POLARIS_IO, .machine_check = polaris_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .nr_irqs = 40, .device_interrupt = rx164_device_interrupt, .init_arch = polaris_init_arch, .init_irq = rx164_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .kill_arch = NULL, .pci_map_irq = rx164_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(rx164)
linux-master
arch/alpha/kernel/sys_rx164.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_eiger.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996, 1999 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * Copyright (C) 1999 Iain Grant * * Code supporting the EIGER (EV6+TSUNAMI). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_tsunami.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note that this interrupt code is identical to TAKARA. */ /* Note mask bit is true for DISABLED irqs. */ static unsigned long cached_irq_mask[2] = { -1, -1 }; static inline void eiger_update_irq_hw(unsigned long irq, unsigned long mask) { int regaddr; mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30)); regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c); outl(mask & 0xffff0000UL, regaddr); } static inline void eiger_enable_irq(struct irq_data *d) { unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); eiger_update_irq_hw(irq, mask); } static void eiger_disable_irq(struct irq_data *d) { unsigned int irq = d->irq; unsigned long mask; mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); eiger_update_irq_hw(irq, mask); } static struct irq_chip eiger_irq_type = { .name = "EIGER", .irq_unmask = eiger_enable_irq, .irq_mask = eiger_disable_irq, .irq_mask_ack = eiger_disable_irq, }; static void eiger_device_interrupt(unsigned long vector) { unsigned intstatus; /* * The PALcode will have passed us vectors 0x800 or 0x810, * which are fairly arbitrary values and serve only to tell * us whether an interrupt has come in on IRQ0 or IRQ1. If * it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's * probably ISA, but PCI interrupts can come through IRQ0 * as well if the interrupt controller isn't in accelerated * mode. * * OTOH, the accelerator thing doesn't seem to be working * overly well, so what we'll do instead is try directly * examining the Master Interrupt Register to see if it's a * PCI interrupt, and if _not_ then we'll pass it on to the * ISA handler. */ intstatus = inw(0x500) & 15; if (intstatus) { /* * This is a PCI interrupt. Check each bit and * despatch an interrupt if it's set. */ if (intstatus & 8) handle_irq(16+3); if (intstatus & 4) handle_irq(16+2); if (intstatus & 2) handle_irq(16+1); if (intstatus & 1) handle_irq(16+0); } else { isa_device_interrupt(vector); } } static void eiger_srm_device_interrupt(unsigned long vector) { int irq = (vector - 0x800) >> 4; handle_irq(irq); } static void __init eiger_init_irq(void) { long i; outb(0, DMA1_RESET_REG); outb(0, DMA2_RESET_REG); outb(DMA_MODE_CASCADE, DMA2_MODE_REG); outb(0, DMA2_MASK_REG); if (alpha_using_srm) alpha_mv.device_interrupt = eiger_srm_device_interrupt; for (i = 16; i < 128; i += 16) eiger_update_irq_hw(i, -1); init_i8259a_irqs(); for (i = 16; i < 128; ++i) { irq_set_chip_and_handler(i, &eiger_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } } static int eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { u8 irq_orig; /* The SRM console has already calculated out the IRQ value's for option cards. As this works lets just read in the value already set and change it to a useable value by Linux. All the IRQ values generated by the console are greater than 90, so we subtract 80 because it is (90 - allocated ISA IRQ's). */ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq_orig); return irq_orig - 0x80; } static u8 eiger_swizzle(struct pci_dev *dev, u8 *pinp) { struct pci_controller *hose = dev->sysdata; int slot, pin = *pinp; int bridge_count = 0; /* Find the number of backplane bridges. */ int backplane = inw(0x502) & 0x0f; switch (backplane) { case 0x00: bridge_count = 0; break; /* No bridges */ case 0x01: bridge_count = 1; break; /* 1 */ case 0x03: bridge_count = 2; break; /* 2 */ case 0x07: bridge_count = 3; break; /* 3 */ case 0x0f: bridge_count = 4; break; /* 4 */ } slot = PCI_SLOT(dev->devfn); while (dev->bus->self) { /* Check for built-in bridges on hose 0. */ if (hose->index == 0 && (PCI_SLOT(dev->bus->self->devfn) > 20 - bridge_count)) { slot = PCI_SLOT(dev->devfn); break; } /* Must be a card-based bridge. */ pin = pci_swizzle_interrupt_pin(dev, pin); /* Move up the chain of bridges. */ dev = dev->bus->self; } *pinp = pin; return slot; } /* * The System Vectors */ struct alpha_machine_vector eiger_mv __initmv = { .vector_name = "Eiger", DO_EV6_MMU, DO_DEFAULT_RTC, DO_TSUNAMI_IO, .machine_check = tsunami_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = TSUNAMI_DAC_OFFSET, .nr_irqs = 128, .device_interrupt = eiger_device_interrupt, .init_arch = tsunami_init_arch, .init_irq = eiger_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .kill_arch = tsunami_kill_arch, .pci_map_irq = eiger_map_irq, .pci_swizzle = eiger_swizzle, }; ALIAS_MV(eiger)
linux-master
arch/alpha/kernel/sys_eiger.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/signal.c * * Copyright (C) 1995 Linus Torvalds * * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson */ #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/bitops.h> #include <linux/syscalls.h> #include <linux/resume_user_mode.h> #include <linux/uaccess.h> #include <asm/sigcontext.h> #include <asm/ucontext.h> #include "proto.h" #define DEBUG_SIG 0 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) asmlinkage void ret_from_sys_call(void); /* * The OSF/1 sigprocmask calling sequence is different from the * C sigprocmask() sequence.. */ SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask) { sigset_t oldmask; sigset_t mask; unsigned long res; siginitset(&mask, newmask & _BLOCKABLE); res = sigprocmask(how, &mask, &oldmask); if (!res) { force_successful_syscall_return(); res = oldmask.sig[0]; } return res; } SYSCALL_DEFINE3(osf_sigaction, int, sig, const struct osf_sigaction __user *, act, struct osf_sigaction __user *, oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (!access_ok(act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(mask, &act->sa_mask)) return -EFAULT; siginitset(&new_ka.sa.sa_mask, mask); new_ka.ka_restorer = NULL; } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, struct sigaction __user *, oact, size_t, sigsetsize, void __user *, restorer) { struct k_sigaction new_ka, old_ka; int ret; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (act) { new_ka.ka_restorer = restorer; if (copy_from_user(&new_ka.sa, act, sizeof(*act))) return -EFAULT; } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (copy_to_user(oact, &old_ka.sa, sizeof(*oact))) return -EFAULT; } return ret; } /* * Do a signal return; undo the signal stack. */ #if _NSIG_WORDS > 1 # error "Non SA_SIGINFO frame needs rearranging" #endif struct sigframe { struct sigcontext sc; unsigned int retcode[3]; }; struct rt_sigframe { struct siginfo info; struct ucontext uc; unsigned int retcode[3]; }; /* If this changes, userland unwinders that Know Things about our signal frame will break. Do not undertake lightly. It also implies an ABI change wrt the size of siginfo_t, which may cause some pain. */ extern char compile_time_assert [offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1]; #define INSN_MOV_R30_R16 0x47fe0410 #define INSN_LDI_R0 0x201f0000 #define INSN_CALLSYS 0x00000083 static long restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) { unsigned long usp; struct switch_stack *sw = (struct switch_stack *)regs - 1; long err = __get_user(regs->pc, &sc->sc_pc); current->restart_block.fn = do_no_restart_syscall; current_thread_info()->status |= TS_SAVED_FP | TS_RESTORE_FP; sw->r26 = (unsigned long) ret_from_sys_call; err |= __get_user(regs->r0, sc->sc_regs+0); err |= __get_user(regs->r1, sc->sc_regs+1); err |= __get_user(regs->r2, sc->sc_regs+2); err |= __get_user(regs->r3, sc->sc_regs+3); err |= __get_user(regs->r4, sc->sc_regs+4); err |= __get_user(regs->r5, sc->sc_regs+5); err |= __get_user(regs->r6, sc->sc_regs+6); err |= __get_user(regs->r7, sc->sc_regs+7); err |= __get_user(regs->r8, sc->sc_regs+8); err |= __get_user(sw->r9, sc->sc_regs+9); err |= __get_user(sw->r10, sc->sc_regs+10); err |= __get_user(sw->r11, sc->sc_regs+11); err |= __get_user(sw->r12, sc->sc_regs+12); err |= __get_user(sw->r13, sc->sc_regs+13); err |= __get_user(sw->r14, sc->sc_regs+14); err |= __get_user(sw->r15, sc->sc_regs+15); err |= __get_user(regs->r16, sc->sc_regs+16); err |= __get_user(regs->r17, sc->sc_regs+17); err |= __get_user(regs->r18, sc->sc_regs+18); err |= __get_user(regs->r19, sc->sc_regs+19); err |= __get_user(regs->r20, sc->sc_regs+20); err |= __get_user(regs->r21, sc->sc_regs+21); err |= __get_user(regs->r22, sc->sc_regs+22); err |= __get_user(regs->r23, sc->sc_regs+23); err |= __get_user(regs->r24, sc->sc_regs+24); err |= __get_user(regs->r25, sc->sc_regs+25); err |= __get_user(regs->r26, sc->sc_regs+26); err |= __get_user(regs->r27, sc->sc_regs+27); err |= __get_user(regs->r28, sc->sc_regs+28); err |= __get_user(regs->gp, sc->sc_regs+29); err |= __get_user(usp, sc->sc_regs+30); wrusp(usp); err |= __copy_from_user(current_thread_info()->fp, sc->sc_fpregs, 31 * 8); err |= __get_user(current_thread_info()->fp[31], &sc->sc_fpcr); return err; } /* Note that this syscall is also used by setcontext(3) to install a given sigcontext. This because it's impossible to set *all* registers and transfer control from userland. */ asmlinkage void do_sigreturn(struct sigcontext __user *sc) { struct pt_regs *regs = current_pt_regs(); sigset_t set; /* Verify that it's a good sigcontext before using it */ if (!access_ok(sc, sizeof(*sc))) goto give_sigsegv; if (__get_user(set.sig[0], &sc->sc_mask)) goto give_sigsegv; set_current_blocked(&set); if (restore_sigcontext(sc, regs)) goto give_sigsegv; /* Send SIGTRAP if we're single-stepping: */ if (ptrace_cancel_bpt (current)) { send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *) regs->pc, current); } return; give_sigsegv: force_sig(SIGSEGV); } asmlinkage void do_rt_sigreturn(struct rt_sigframe __user *frame) { struct pt_regs *regs = current_pt_regs(); sigset_t set; /* Verify that it's a good ucontext_t before using it */ if (!access_ok(&frame->uc, sizeof(frame->uc))) goto give_sigsegv; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto give_sigsegv; set_current_blocked(&set); if (restore_sigcontext(&frame->uc.uc_mcontext, regs)) goto give_sigsegv; /* Send SIGTRAP if we're single-stepping: */ if (ptrace_cancel_bpt (current)) { send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *) regs->pc, current); } return; give_sigsegv: force_sig(SIGSEGV); } /* * Set up a signal frame. */ static inline void __user * get_sigframe(struct ksignal *ksig, unsigned long sp, size_t frame_size) { return (void __user *)((sigsp(sp, ksig) - frame_size) & -32ul); } static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, unsigned long sp) { struct switch_stack *sw = (struct switch_stack *)regs - 1; long err = 0; err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); err |= __put_user(mask, &sc->sc_mask); err |= __put_user(regs->pc, &sc->sc_pc); err |= __put_user(8, &sc->sc_ps); err |= __put_user(regs->r0 , sc->sc_regs+0); err |= __put_user(regs->r1 , sc->sc_regs+1); err |= __put_user(regs->r2 , sc->sc_regs+2); err |= __put_user(regs->r3 , sc->sc_regs+3); err |= __put_user(regs->r4 , sc->sc_regs+4); err |= __put_user(regs->r5 , sc->sc_regs+5); err |= __put_user(regs->r6 , sc->sc_regs+6); err |= __put_user(regs->r7 , sc->sc_regs+7); err |= __put_user(regs->r8 , sc->sc_regs+8); err |= __put_user(sw->r9 , sc->sc_regs+9); err |= __put_user(sw->r10 , sc->sc_regs+10); err |= __put_user(sw->r11 , sc->sc_regs+11); err |= __put_user(sw->r12 , sc->sc_regs+12); err |= __put_user(sw->r13 , sc->sc_regs+13); err |= __put_user(sw->r14 , sc->sc_regs+14); err |= __put_user(sw->r15 , sc->sc_regs+15); err |= __put_user(regs->r16, sc->sc_regs+16); err |= __put_user(regs->r17, sc->sc_regs+17); err |= __put_user(regs->r18, sc->sc_regs+18); err |= __put_user(regs->r19, sc->sc_regs+19); err |= __put_user(regs->r20, sc->sc_regs+20); err |= __put_user(regs->r21, sc->sc_regs+21); err |= __put_user(regs->r22, sc->sc_regs+22); err |= __put_user(regs->r23, sc->sc_regs+23); err |= __put_user(regs->r24, sc->sc_regs+24); err |= __put_user(regs->r25, sc->sc_regs+25); err |= __put_user(regs->r26, sc->sc_regs+26); err |= __put_user(regs->r27, sc->sc_regs+27); err |= __put_user(regs->r28, sc->sc_regs+28); err |= __put_user(regs->gp , sc->sc_regs+29); err |= __put_user(sp, sc->sc_regs+30); err |= __put_user(0, sc->sc_regs+31); err |= __copy_to_user(sc->sc_fpregs, current_thread_info()->fp, 31 * 8); err |= __put_user(0, sc->sc_fpregs+31); err |= __put_user(current_thread_info()->fp[31], &sc->sc_fpcr); err |= __put_user(regs->trap_a0, &sc->sc_traparg_a0); err |= __put_user(regs->trap_a1, &sc->sc_traparg_a1); err |= __put_user(regs->trap_a2, &sc->sc_traparg_a2); return err; } static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { unsigned long oldsp, r26, err = 0; struct sigframe __user *frame; oldsp = rdusp(); frame = get_sigframe(ksig, oldsp, sizeof(*frame)); if (!access_ok(frame, sizeof(*frame))) return -EFAULT; err |= setup_sigcontext(&frame->sc, regs, set->sig[0], oldsp); if (err) return -EFAULT; /* Set up to return from userspace. If provided, use a stub already in userspace. */ r26 = (unsigned long) ksig->ka.ka_restorer; if (!r26) { err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0); err |= __put_user(INSN_LDI_R0+__NR_sigreturn, frame->retcode+1); err |= __put_user(INSN_CALLSYS, frame->retcode+2); imb(); r26 = (unsigned long) frame->retcode; } /* Check that everything was written properly. */ if (err) return err; /* "Return" to the handler */ regs->r26 = r26; regs->r27 = regs->pc = (unsigned long) ksig->ka.sa.sa_handler; regs->r16 = ksig->sig; /* a0: signal number */ regs->r17 = 0; /* a1: exception code */ regs->r18 = (unsigned long) &frame->sc; /* a2: sigcontext pointer */ wrusp((unsigned long) frame); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", current->comm, current->pid, frame, regs->pc, regs->r26); #endif return 0; } static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { unsigned long oldsp, r26, err = 0; struct rt_sigframe __user *frame; oldsp = rdusp(); frame = get_sigframe(ksig, oldsp, sizeof(*frame)); if (!access_ok(frame, sizeof(*frame))) return -EFAULT; err |= copy_siginfo_to_user(&frame->info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(set->sig[0], &frame->uc.uc_osf_sigmask); err |= __save_altstack(&frame->uc.uc_stack, oldsp); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], oldsp); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) return -EFAULT; /* Set up to return from userspace. If provided, use a stub already in userspace. */ r26 = (unsigned long) ksig->ka.ka_restorer; if (!r26) { err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0); err |= __put_user(INSN_LDI_R0+__NR_rt_sigreturn, frame->retcode+1); err |= __put_user(INSN_CALLSYS, frame->retcode+2); imb(); r26 = (unsigned long) frame->retcode; } if (err) return -EFAULT; /* "Return" to the handler */ regs->r26 = r26; regs->r27 = regs->pc = (unsigned long) ksig->ka.sa.sa_handler; regs->r16 = ksig->sig; /* a0: signal number */ regs->r17 = (unsigned long) &frame->info; /* a1: siginfo pointer */ regs->r18 = (unsigned long) &frame->uc; /* a2: ucontext pointer */ wrusp((unsigned long) frame); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", current->comm, current->pid, frame, regs->pc, regs->r26); #endif return 0; } /* * OK, we're invoking a handler. */ static inline void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); int ret; if (ksig->ka.sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(ksig, oldset, regs); else ret = setup_frame(ksig, oldset, regs); signal_setup_done(ret, ksig, 0); } static inline void syscall_restart(unsigned long r0, unsigned long r19, struct pt_regs *regs, struct k_sigaction *ka) { switch (regs->r0) { case ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { case ERESTARTNOHAND: regs->r0 = EINTR; break; } fallthrough; case ERESTARTNOINTR: regs->r0 = r0; /* reset v0 and a3 and replay syscall */ regs->r19 = r19; regs->pc -= 4; break; case ERESTART_RESTARTBLOCK: regs->r0 = EINTR; break; } } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. * * "r0" and "r19" are the registers we need to restore for system call * restart. "r0" is also used as an indicator whether we can restart at * all (if we get here from anything but a syscall return, it will be 0) */ static void do_signal(struct pt_regs *regs, unsigned long r0, unsigned long r19) { unsigned long single_stepping = ptrace_cancel_bpt(current); struct ksignal ksig; /* This lets the debugger run, ... */ if (get_signal(&ksig)) { /* ... so re-check the single stepping. */ single_stepping |= ptrace_cancel_bpt(current); /* Whee! Actually deliver the signal. */ if (r0) syscall_restart(r0, r19, regs, &ksig.ka); handle_signal(&ksig, regs); } else { single_stepping |= ptrace_cancel_bpt(current); if (r0) { switch (regs->r0) { case ERESTARTNOHAND: case ERESTARTSYS: case ERESTARTNOINTR: /* Reset v0 and a3 and replay syscall. */ regs->r0 = r0; regs->r19 = r19; regs->pc -= 4; break; case ERESTART_RESTARTBLOCK: /* Set v0 to the restart_syscall and replay */ regs->r0 = __NR_restart_syscall; regs->pc -= 4; break; } } restore_saved_sigmask(); } if (single_stepping) ptrace_set_bpt(current); /* re-set breakpoint */ } void do_work_pending(struct pt_regs *regs, unsigned long thread_flags, unsigned long r0, unsigned long r19) { do { if (thread_flags & _TIF_NEED_RESCHED) { schedule(); } else { local_irq_enable(); if (thread_flags & (_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)) { preempt_disable(); save_fpu(); preempt_enable(); do_signal(regs, r0, r19); r0 = 0; } else { resume_user_mode_work(regs); } } local_irq_disable(); thread_flags = read_thread_flags(); } while (thread_flags & _TIF_WORK_MASK); }
linux-master
arch/alpha/kernel/signal.c
#include <asm/hwrpb.h> #include <linux/device.h> #ifdef CONFIG_SYSFS static int cpu_is_ev6_or_later(void) { struct percpu_struct *cpu; unsigned long cputype; cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset); cputype = cpu->type & 0xffffffff; /* Include all of EV6, EV67, EV68, EV7, EV79 and EV69. */ return (cputype == EV6_CPU) || ((cputype >= EV67_CPU) && (cputype <= EV69_CPU)); } ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { if (cpu_is_ev6_or_later()) return sprintf(buf, "Vulnerable\n"); else return sprintf(buf, "Not affected\n"); } ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) { if (cpu_is_ev6_or_later()) return sprintf(buf, "Vulnerable\n"); else return sprintf(buf, "Not affected\n"); } ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) { if (cpu_is_ev6_or_later()) return sprintf(buf, "Vulnerable\n"); else return sprintf(buf, "Not affected\n"); } #endif
linux-master
arch/alpha/kernel/bugs.c
// SPDX-License-Identifier: GPL-2.0-only /* * srm_env.c - Access to SRM environment * variables through linux' procfs * * (C) 2001,2002,2006 by Jan-Benedict Glaw <[email protected]> * * This driver is a modified version of Erik Mouw's example proc * interface, so: thank you, Erik! He can be reached via email at * <[email protected]>. It is based on an idea * provided by DEC^WCompaq^WIntel's "Jumpstart" CD. They * included a patch like this as well. Thanks for idea! */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/module.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/console.h> #include <linux/uaccess.h> #include <asm/machvec.h> #define BASE_DIR "srm_environment" /* Subdir in /proc/ */ #define NAMED_DIR "named_variables" /* Subdir for known variables */ #define NUMBERED_DIR "numbered_variables" /* Subdir for all variables */ #define VERSION "0.0.6" /* Module version */ #define NAME "srm_env" /* Module name */ MODULE_AUTHOR("Jan-Benedict Glaw <[email protected]>"); MODULE_DESCRIPTION("Accessing Alpha SRM environment through procfs interface"); MODULE_LICENSE("GPL"); typedef struct _srm_env { char *name; unsigned long id; } srm_env_t; static struct proc_dir_entry *base_dir; static struct proc_dir_entry *named_dir; static struct proc_dir_entry *numbered_dir; static srm_env_t srm_named_entries[] = { { "auto_action", ENV_AUTO_ACTION }, { "boot_dev", ENV_BOOT_DEV }, { "bootdef_dev", ENV_BOOTDEF_DEV }, { "booted_dev", ENV_BOOTED_DEV }, { "boot_file", ENV_BOOT_FILE }, { "booted_file", ENV_BOOTED_FILE }, { "boot_osflags", ENV_BOOT_OSFLAGS }, { "booted_osflags", ENV_BOOTED_OSFLAGS }, { "boot_reset", ENV_BOOT_RESET }, { "dump_dev", ENV_DUMP_DEV }, { "enable_audit", ENV_ENABLE_AUDIT }, { "license", ENV_LICENSE }, { "char_set", ENV_CHAR_SET }, { "language", ENV_LANGUAGE }, { "tty_dev", ENV_TTY_DEV }, { NULL, 0 }, }; static int srm_env_proc_show(struct seq_file *m, void *v) { unsigned long ret; unsigned long id = (unsigned long)m->private; char *page; page = (char *)__get_free_page(GFP_USER); if (!page) return -ENOMEM; ret = callback_getenv(id, page, PAGE_SIZE); if ((ret >> 61) == 0) { seq_write(m, page, ret); ret = 0; } else ret = -EFAULT; free_page((unsigned long)page); return ret; } static int srm_env_proc_open(struct inode *inode, struct file *file) { return single_open(file, srm_env_proc_show, pde_data(inode)); } static ssize_t srm_env_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { int res; unsigned long id = (unsigned long)pde_data(file_inode(file)); char *buf = (char *) __get_free_page(GFP_USER); unsigned long ret1, ret2; if (!buf) return -ENOMEM; res = -EINVAL; if (count >= PAGE_SIZE) goto out; res = -EFAULT; if (copy_from_user(buf, buffer, count)) goto out; buf[count] = '\0'; ret1 = callback_setenv(id, buf, count); if ((ret1 >> 61) == 0) { do ret2 = callback_save_env(); while((ret2 >> 61) == 1); res = (int) ret1; } out: free_page((unsigned long)buf); return res; } static const struct proc_ops srm_env_proc_ops = { .proc_open = srm_env_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = single_release, .proc_write = srm_env_proc_write, }; static int __init srm_env_init(void) { srm_env_t *entry; unsigned long var_num; /* * Check system */ if (!alpha_using_srm) { printk(KERN_INFO "%s: This Alpha system doesn't " "know about SRM (or you've booted " "SRM->MILO->Linux, which gets " "misdetected)...\n", __func__); return -ENODEV; } /* * Create base directory */ base_dir = proc_mkdir(BASE_DIR, NULL); if (!base_dir) { printk(KERN_ERR "Couldn't create base dir /proc/%s\n", BASE_DIR); return -ENOMEM; } /* * Create per-name subdirectory */ named_dir = proc_mkdir(NAMED_DIR, base_dir); if (!named_dir) { printk(KERN_ERR "Couldn't create dir /proc/%s/%s\n", BASE_DIR, NAMED_DIR); goto cleanup; } /* * Create per-number subdirectory */ numbered_dir = proc_mkdir(NUMBERED_DIR, base_dir); if (!numbered_dir) { printk(KERN_ERR "Couldn't create dir /proc/%s/%s\n", BASE_DIR, NUMBERED_DIR); goto cleanup; } /* * Create all named nodes */ entry = srm_named_entries; while (entry->name && entry->id) { if (!proc_create_data(entry->name, 0644, named_dir, &srm_env_proc_ops, (void *)entry->id)) goto cleanup; entry++; } /* * Create all numbered nodes */ for (var_num = 0; var_num <= 255; var_num++) { char name[4]; sprintf(name, "%ld", var_num); if (!proc_create_data(name, 0644, numbered_dir, &srm_env_proc_ops, (void *)var_num)) goto cleanup; } printk(KERN_INFO "%s: version %s loaded successfully\n", NAME, VERSION); return 0; cleanup: remove_proc_subtree(BASE_DIR, NULL); return -ENOMEM; } static void __exit srm_env_exit(void) { remove_proc_subtree(BASE_DIR, NULL); printk(KERN_INFO "%s: unloaded successfully\n", NAME); } module_init(srm_env_init); module_exit(srm_env_exit);
linux-master
arch/alpha/kernel/srm_env.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_nautilus.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1998 Richard Henderson * Copyright (C) 1999 Alpha Processor, Inc., * (David Daniel, Stig Telfer, Soohoon Lee) * * Code supporting NAUTILUS systems. * * * NAUTILUS has the following I/O features: * * a) Driven by AMD 751 aka IRONGATE (northbridge): * 4 PCI slots * 1 AGP slot * * b) Driven by ALI M1543C (southbridge) * 2 ISA slots * 2 IDE connectors * 1 dual drive capable FDD controller * 2 serial ports * 1 ECP/EPP/SP parallel port * 2 USB ports */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/reboot.h> #include <linux/memblock.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_irongate.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include "proto.h" #include "err_impl.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" static void __init nautilus_init_irq(void) { if (alpha_using_srm) { alpha_mv.device_interrupt = srm_device_interrupt; } init_i8259a_irqs(); common_init_isa_dma(); } static int nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { /* Preserve the IRQ set up by the console. */ u8 irq; /* UP1500: AGP INTA is actually routed to IRQ 5, not IRQ 10 as console reports. Check the device id of AGP bridge to distinguish UP1500 from UP1000/1100. Note: 'pin' is 2 due to bridge swizzle. */ if (slot == 1 && pin == 2 && dev->bus->self && dev->bus->self->device == 0x700f) return 5; pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); return irq; } void nautilus_kill_arch(int mode) { struct pci_bus *bus = pci_isa_hose->bus; u32 pmuport; int off; switch (mode) { case LINUX_REBOOT_CMD_RESTART: if (! alpha_using_srm) { u8 t8; pci_bus_read_config_byte(bus, 0x38, 0x43, &t8); pci_bus_write_config_byte(bus, 0x38, 0x43, t8 | 0x80); outb(1, 0x92); outb(0, 0x92); /* NOTREACHED */ } break; case LINUX_REBOOT_CMD_POWER_OFF: /* Assume M1543C */ off = 0x2000; /* SLP_TYPE = 0, SLP_EN = 1 */ pci_bus_read_config_dword(bus, 0x88, 0x10, &pmuport); if (!pmuport) { /* M1535D/D+ */ off = 0x3400; /* SLP_TYPE = 5, SLP_EN = 1 */ pci_bus_read_config_dword(bus, 0x88, 0xe0, &pmuport); } pmuport &= 0xfffe; outw(0xffff, pmuport); /* Clear pending events. */ outw(off, pmuport + 4); /* NOTREACHED */ break; } } /* Perform analysis of a machine check that arrived from the system (NMI) */ static void naut_sys_machine_check(unsigned long vector, unsigned long la_ptr, struct pt_regs *regs) { printk("PC %lx RA %lx\n", regs->pc, regs->r26); irongate_pci_clr_err(); } /* Machine checks can come from two sources - those on the CPU and those in the system. They are analysed separately but all starts here. */ void nautilus_machine_check(unsigned long vector, unsigned long la_ptr) { char *mchk_class; /* Now for some analysis. Machine checks fall into two classes -- those picked up by the system, and those picked up by the CPU. Add to that the two levels of severity - correctable or not. */ if (vector == SCB_Q_SYSMCHK && ((IRONGATE0->dramms & 0x300) == 0x300)) { unsigned long nmi_ctl; /* Clear ALI NMI */ nmi_ctl = inb(0x61); nmi_ctl |= 0x0c; outb(nmi_ctl, 0x61); nmi_ctl &= ~0x0c; outb(nmi_ctl, 0x61); /* Write again clears error bits. */ IRONGATE0->stat_cmd = IRONGATE0->stat_cmd & ~0x100; mb(); IRONGATE0->stat_cmd; /* Write again clears error bits. */ IRONGATE0->dramms = IRONGATE0->dramms; mb(); IRONGATE0->dramms; draina(); wrmces(0x7); mb(); return; } if (vector == SCB_Q_SYSERR) mchk_class = "Correctable"; else if (vector == SCB_Q_SYSMCHK) mchk_class = "Fatal"; else { ev6_machine_check(vector, la_ptr); return; } printk(KERN_CRIT "NAUTILUS Machine check 0x%lx " "[%s System Machine Check (NMI)]\n", vector, mchk_class); naut_sys_machine_check(vector, la_ptr, get_irq_regs()); /* Tell the PALcode to clear the machine check */ draina(); wrmces(0x7); mb(); } extern void pcibios_claim_one_bus(struct pci_bus *); static struct resource irongate_mem = { .name = "Irongate PCI MEM", .flags = IORESOURCE_MEM, }; static struct resource busn_resource = { .name = "PCI busn", .start = 0, .end = 255, .flags = IORESOURCE_BUS, }; void __init nautilus_init_pci(void) { struct pci_controller *hose = hose_head; struct pci_host_bridge *bridge; struct pci_bus *bus; unsigned long bus_align, bus_size, pci_mem; unsigned long memtop = max_low_pfn << PAGE_SHIFT; bridge = pci_alloc_host_bridge(0); if (!bridge) return; /* Use default IO. */ pci_add_resource(&bridge->windows, &ioport_resource); /* Irongate PCI memory aperture, calculate required size before setting it up. */ pci_add_resource(&bridge->windows, &irongate_mem); pci_add_resource(&bridge->windows, &busn_resource); bridge->dev.parent = NULL; bridge->sysdata = hose; bridge->busnr = 0; bridge->ops = alpha_mv.pci_ops; bridge->swizzle_irq = alpha_mv.pci_swizzle; bridge->map_irq = alpha_mv.pci_map_irq; bridge->size_windows = 1; /* Scan our single hose. */ if (pci_scan_root_bus_bridge(bridge)) { pci_free_host_bridge(bridge); return; } bus = hose->bus = bridge->bus; pcibios_claim_one_bus(bus); pci_bus_size_bridges(bus); /* Now we've got the size and alignment of PCI memory resources stored in irongate_mem. Set up the PCI memory range: limit is hardwired to 0xffffffff, base must be aligned to 16Mb. */ bus_align = irongate_mem.start; bus_size = irongate_mem.end + 1 - bus_align; if (bus_align < 0x1000000UL) bus_align = 0x1000000UL; pci_mem = (0x100000000UL - bus_size) & -bus_align; irongate_mem.start = pci_mem; irongate_mem.end = 0xffffffffUL; /* Register our newly calculated PCI memory window in the resource tree. */ if (request_resource(&iomem_resource, &irongate_mem) < 0) printk(KERN_ERR "Failed to request MEM on hose 0\n"); printk(KERN_INFO "Irongate pci_mem %pR\n", &irongate_mem); if (pci_mem < memtop) memtop = pci_mem; if (memtop > alpha_mv.min_mem_address) { free_reserved_area(__va(alpha_mv.min_mem_address), __va(memtop), -1, NULL); printk(KERN_INFO "nautilus_init_pci: %ldk freed\n", (memtop - alpha_mv.min_mem_address) >> 10); } if ((IRONGATE0->dev_vendor >> 16) > 0x7006) /* Albacore? */ IRONGATE0->pci_mem = pci_mem; pci_bus_assign_resources(bus); pci_bus_add_devices(bus); } /* * The System Vectors */ struct alpha_machine_vector nautilus_mv __initmv = { .vector_name = "Nautilus", DO_EV6_MMU, DO_DEFAULT_RTC, DO_IRONGATE_IO, .machine_check = nautilus_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = IRONGATE_DEFAULT_MEM_BASE, .nr_irqs = 16, .device_interrupt = isa_device_interrupt, .init_arch = irongate_init_arch, .init_irq = nautilus_init_irq, .init_rtc = common_init_rtc, .init_pci = nautilus_init_pci, .kill_arch = nautilus_kill_arch, .pci_map_irq = nautilus_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(nautilus)
linux-master
arch/alpha/kernel/sys_nautilus.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_jensen.c * * Copyright (C) 1995 Linus Torvalds * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the Jensen. */ #define __EXTERN_INLINE #include <asm/io.h> #include <asm/jensen.h> #undef __EXTERN_INLINE #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* * Jensen is special: the vector is 0x8X0 for EISA interrupt X, and * 0x9X0 for the local motherboard interrupts. * * Note especially that those local interrupts CANNOT be masked, * which causes much of the pain below... * * 0x660 - NMI * * 0x800 - IRQ0 interval timer (not used, as we use the RTC timer) * 0x810 - IRQ1 line printer (duh..) * 0x860 - IRQ6 floppy disk * * 0x900 - COM1 * 0x920 - COM2 * 0x980 - keyboard * 0x990 - mouse * * PCI-based systems are more sane: they don't have the local * interrupts at all, and have only normal PCI interrupts from * devices. Happily it's easy enough to do a sane mapping from the * Jensen. * * Note that this means that we may have to do a hardware * "local_op" to a different interrupt than we report to the rest of the * world. */ static void jensen_local_enable(struct irq_data *d) { /* the parport is really hw IRQ 1, silly Jensen. */ if (d->irq == 7) i8259a_enable_irq(d); } static void jensen_local_disable(struct irq_data *d) { /* the parport is really hw IRQ 1, silly Jensen. */ if (d->irq == 7) i8259a_disable_irq(d); } static void jensen_local_mask_ack(struct irq_data *d) { /* the parport is really hw IRQ 1, silly Jensen. */ if (d->irq == 7) i8259a_mask_and_ack_irq(d); } static struct irq_chip jensen_local_irq_type = { .name = "LOCAL", .irq_unmask = jensen_local_enable, .irq_mask = jensen_local_disable, .irq_mask_ack = jensen_local_mask_ack, }; static void jensen_device_interrupt(unsigned long vector) { int irq; switch (vector) { case 0x660: printk("Whee.. NMI received. Probable hardware error\n"); printk("61=%02x, 461=%02x\n", inb(0x61), inb(0x461)); return; /* local device interrupts: */ case 0x900: irq = 4; break; /* com1 -> irq 4 */ case 0x920: irq = 3; break; /* com2 -> irq 3 */ case 0x980: irq = 1; break; /* kbd -> irq 1 */ case 0x990: irq = 9; break; /* mouse -> irq 9 */ default: if (vector > 0x900) { printk("Unknown local interrupt %lx\n", vector); return; } irq = (vector - 0x800) >> 4; if (irq == 1) irq = 7; break; } /* If there is no handler yet... */ if (!irq_has_action(irq)) { /* If it is a local interrupt that cannot be masked... */ if (vector >= 0x900) { /* Clear keyboard/mouse state */ inb(0x64); inb(0x60); /* Reset serial ports */ inb(0x3fa); inb(0x2fa); outb(0x0c, 0x3fc); outb(0x0c, 0x2fc); /* Clear NMI */ outb(0,0x61); outb(0,0x461); } } #if 0 /* A useful bit of code to find out if an interrupt is going wild. */ { static unsigned int last_msg = 0, last_cc = 0; static int last_irq = -1, count = 0; unsigned int cc; __asm __volatile("rpcc %0" : "=r"(cc)); ++count; #define JENSEN_CYCLES_PER_SEC (150000000) if (cc - last_msg > ((JENSEN_CYCLES_PER_SEC) * 3) || irq != last_irq) { printk(KERN_CRIT " irq %d count %d cc %u @ %lx\n", irq, count, cc-last_cc, get_irq_regs()->pc); count = 0; last_msg = cc; last_irq = irq; } last_cc = cc; } #endif handle_irq(irq); } static void __init jensen_init_irq(void) { init_i8259a_irqs(); irq_set_chip_and_handler(1, &jensen_local_irq_type, handle_level_irq); irq_set_chip_and_handler(4, &jensen_local_irq_type, handle_level_irq); irq_set_chip_and_handler(3, &jensen_local_irq_type, handle_level_irq); irq_set_chip_and_handler(7, &jensen_local_irq_type, handle_level_irq); irq_set_chip_and_handler(9, &jensen_local_irq_type, handle_level_irq); common_init_isa_dma(); } static void __init jensen_init_arch(void) { struct pci_controller *hose; #ifdef CONFIG_PCI static struct pci_dev fake_isa_bridge = { .dma_mask = 0xffffffffUL, }; isa_bridge = &fake_isa_bridge; #endif /* Create a hose so that we can report i/o base addresses to userland. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hose->mem_space = &iomem_resource; hose->index = 0; hose->sparse_mem_base = EISA_MEM - IDENT_ADDR; hose->dense_mem_base = 0; hose->sparse_io_base = EISA_IO - IDENT_ADDR; hose->dense_io_base = 0; hose->sg_isa = hose->sg_pci = NULL; __direct_map_base = 0; __direct_map_size = 0xffffffff; } static void jensen_machine_check(unsigned long vector, unsigned long la) { printk(KERN_CRIT "Machine check\n"); } /* * The System Vector */ struct alpha_machine_vector jensen_mv __initmv = { .vector_name = "Jensen", DO_EV4_MMU, IO_LITE(JENSEN,jensen), .machine_check = jensen_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .rtc_port = 0x170, .nr_irqs = 16, .device_interrupt = jensen_device_interrupt, .init_arch = jensen_init_arch, .init_irq = jensen_init_irq, .init_rtc = common_init_rtc, .init_pci = NULL, .kill_arch = NULL, }; ALIAS_MV(jensen)
linux-master
arch/alpha/kernel/sys_jensen.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/core_irongate.c * * Based on code written by David A. Rusling ([email protected]). * * Copyright (C) 1999 Alpha Processor, Inc., * (David Daniel, Stig Telfer, Soohoon Lee) * * Code common to all IRONGATE core logic chips. */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_irongate.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/initrd.h> #include <linux/memblock.h> #include <asm/ptrace.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include "proto.h" #include "pci_impl.h" /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif igcsr32 *IronECC; /* * Given a bus, device, and function number, compute resulting * configuration space address accordingly. It is therefore not safe * to have concurrent invocations to configuration space access * routines, but there really shouldn't be any need for this. * * addr[31:24] reserved * addr[23:16] bus number (8 bits = 128 possible buses) * addr[15:11] Device number (5 bits) * addr[10: 8] function number * addr[ 7: 2] register number * * For IRONGATE: * if (bus = addr[23:16]) == 0 * then * type 0 config cycle: * addr_on_pci[31:11] = id selection for device = addr[15:11] * addr_on_pci[10: 2] = addr[10: 2] ??? * addr_on_pci[ 1: 0] = 00 * else * type 1 config cycle (pass on with no decoding): * addr_on_pci[31:24] = 0 * addr_on_pci[23: 2] = addr[23: 2] * addr_on_pci[ 1: 0] = 01 * fi * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, unsigned char *type1) { unsigned long addr; u8 bus = pbus->number; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " "pci_addr=0x%p, type1=0x%p)\n", bus, device_fn, where, pci_addr, type1)); *type1 = (bus != 0); addr = (bus << 16) | (device_fn << 8) | where; addr |= IRONGATE_CONF; *pci_addr = addr; DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return 0; } static int irongate_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int irongate_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops irongate_pci_ops = { .read = irongate_read_config, .write = irongate_write_config, }; int irongate_pci_clr_err(void) { unsigned int nmi_ctl=0; unsigned int IRONGATE_jd; again: IRONGATE_jd = IRONGATE0->stat_cmd; printk("Iron stat_cmd %x\n", IRONGATE_jd); IRONGATE0->stat_cmd = IRONGATE_jd; /* write again clears error bits */ mb(); IRONGATE_jd = IRONGATE0->stat_cmd; /* re-read to force write */ IRONGATE_jd = *IronECC; printk("Iron ECC %x\n", IRONGATE_jd); *IronECC = IRONGATE_jd; /* write again clears error bits */ mb(); IRONGATE_jd = *IronECC; /* re-read to force write */ /* Clear ALI NMI */ nmi_ctl = inb(0x61); nmi_ctl |= 0x0c; outb(nmi_ctl, 0x61); nmi_ctl &= ~0x0c; outb(nmi_ctl, 0x61); IRONGATE_jd = *IronECC; if (IRONGATE_jd & 0x300) goto again; return 0; } #define IRONGATE_3GB 0xc0000000UL /* On Albacore (aka UP1500) with 4Gb of RAM we have to reserve some memory for PCI. At this point we just reserve memory above 3Gb. Most of this memory will be freed after PCI setup is done. */ static void __init albacore_init_arch(void) { unsigned long memtop = max_low_pfn << PAGE_SHIFT; unsigned long pci_mem = (memtop + 0x1000000UL) & ~0xffffffUL; struct percpu_struct *cpu; int pal_rev, pal_var; cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset); pal_rev = cpu->pal_revision & 0xffff; pal_var = (cpu->pal_revision >> 16) & 0xff; /* Consoles earlier than A5.6-18 (OSF PALcode v1.62-2) set up the CPU incorrectly (leave speculative stores enabled), which causes memory corruption under certain conditions. Issue a warning for such consoles. */ if (alpha_using_srm && (pal_rev < 0x13e || (pal_rev == 0x13e && pal_var < 2))) printk(KERN_WARNING "WARNING! Upgrade to SRM A5.6-19 " "or later\n"); if (pci_mem > IRONGATE_3GB) pci_mem = IRONGATE_3GB; IRONGATE0->pci_mem = pci_mem; alpha_mv.min_mem_address = pci_mem; if (memtop > pci_mem) { #ifdef CONFIG_BLK_DEV_INITRD extern unsigned long initrd_start, initrd_end; extern void *move_initrd(unsigned long); /* Move the initrd out of the way. */ if (initrd_end && __pa(initrd_end) > pci_mem) { unsigned long size; size = initrd_end - initrd_start; memblock_free((void *)initrd_start, PAGE_ALIGN(size)); if (!move_initrd(pci_mem)) printk("irongate_init_arch: initrd too big " "(%ldK)\ndisabling initrd\n", size / 1024); } #endif memblock_reserve(pci_mem, memtop - pci_mem); printk("irongate_init_arch: temporarily reserving " "region %08lx-%08lx for PCI\n", pci_mem, memtop - 1); } } static void __init irongate_setup_agp(void) { /* Disable the GART window. AGPGART doesn't work due to yet unresolved memory coherency issues... */ IRONGATE0->agpva = IRONGATE0->agpva & ~0xf; alpha_agpgart_size = 0; } void __init irongate_init_arch(void) { struct pci_controller *hose; int amd761 = (IRONGATE0->dev_vendor >> 16) > 0x7006; /* Albacore? */ IronECC = amd761 ? &IRONGATE0->bacsr54_eccms761 : &IRONGATE0->dramms; irongate_pci_clr_err(); if (amd761) albacore_init_arch(); irongate_setup_agp(); /* * Create our single hose. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hose->mem_space = &iomem_resource; hose->index = 0; /* This is for userland consumption. For some reason, the 40-bit PIO bias that we use in the kernel through KSEG didn't work for the page table based user mappings. So make sure we get the 43-bit PIO bias. */ hose->sparse_mem_base = 0; hose->sparse_io_base = 0; hose->dense_mem_base = (IRONGATE_MEM & 0xffffffffffUL) | 0x80000000000UL; hose->dense_io_base = (IRONGATE_IO & 0xffffffffffUL) | 0x80000000000UL; hose->sg_isa = hose->sg_pci = NULL; __direct_map_base = 0; __direct_map_size = 0xffffffff; } /* * IO map and AGP support */ #include <linux/vmalloc.h> #include <linux/agp_backend.h> #include <linux/agpgart.h> #include <linux/export.h> #define GET_PAGE_DIR_OFF(addr) (addr >> 22) #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr)) #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12) #define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)]) void __iomem * irongate_ioremap(unsigned long addr, unsigned long size) { struct vm_struct *area; unsigned long vaddr; unsigned long baddr, last; u32 *mmio_regs, *gatt_pages, *cur_gatt, pte; unsigned long gart_bus_addr; if (!alpha_agpgart_size) return (void __iomem *)(addr + IRONGATE_MEM); gart_bus_addr = (unsigned long)IRONGATE0->bar0 & PCI_BASE_ADDRESS_MEM_MASK; /* * Check for within the AGP aperture... */ do { /* * Check the AGP area */ if (addr >= gart_bus_addr && addr + size - 1 < gart_bus_addr + alpha_agpgart_size) break; /* * Not found - assume legacy ioremap */ return (void __iomem *)(addr + IRONGATE_MEM); } while(0); mmio_regs = (u32 *)(((unsigned long)IRONGATE0->bar1 & PCI_BASE_ADDRESS_MEM_MASK) + IRONGATE_MEM); gatt_pages = (u32 *)(phys_to_virt(mmio_regs[1])); /* FIXME */ /* * Adjust the limits (mappings must be page aligned) */ if (addr & ~PAGE_MASK) { printk("AGP ioremap failed... addr not page aligned (0x%lx)\n", addr); return (void __iomem *)(addr + IRONGATE_MEM); } last = addr + size - 1; size = PAGE_ALIGN(last) - addr; #if 0 printk("irongate_ioremap(0x%lx, 0x%lx)\n", addr, size); printk("irongate_ioremap: gart_bus_addr 0x%lx\n", gart_bus_addr); printk("irongate_ioremap: gart_aper_size 0x%lx\n", gart_aper_size); printk("irongate_ioremap: mmio_regs %p\n", mmio_regs); printk("irongate_ioremap: gatt_pages %p\n", gatt_pages); for(baddr = addr; baddr <= last; baddr += PAGE_SIZE) { cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1); pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1; printk("irongate_ioremap: cur_gatt %p pte 0x%x\n", cur_gatt, pte); } #endif /* * Map it */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; for(baddr = addr, vaddr = (unsigned long)area->addr; baddr <= last; baddr += PAGE_SIZE, vaddr += PAGE_SIZE) { cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1); pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1; if (__alpha_remap_area_pages(vaddr, pte, PAGE_SIZE, 0)) { printk("AGP ioremap: FAILED to map...\n"); vfree(area->addr); return NULL; } } flush_tlb_all(); vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); #if 0 printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n", addr, size, vaddr); #endif return (void __iomem *)vaddr; } EXPORT_SYMBOL(irongate_ioremap); void irongate_iounmap(volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (((long)addr >> 41) == -2) return; /* kseg map, nothing to do */ if (addr) return vfree((void *)(PAGE_MASK & addr)); } EXPORT_SYMBOL(irongate_iounmap);
linux-master
arch/alpha/kernel/core_irongate.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/pci-noop.c * * Stub PCI interfaces for Jensen-specific kernels. */ #include <linux/pci.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/gfp.h> #include <linux/capability.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/syscalls.h> #include "proto.h" /* * The PCI controller list. */ struct pci_controller *hose_head, **hose_tail = &hose_head; struct pci_controller *pci_isa_hose; struct pci_controller * __init alloc_pci_controller(void) { struct pci_controller *hose; hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); if (!hose) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(*hose)); *hose_tail = hose; hose_tail = &hose->next; return hose; } struct resource * __init alloc_resource(void) { void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); if (!ptr) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(struct resource)); return ptr; } SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus, unsigned long, dfn) { struct pci_controller *hose; /* from hose or from bus.devfn */ if (which & IOBASE_FROM_HOSE) { for (hose = hose_head; hose; hose = hose->next) if (hose->index == bus) break; if (!hose) return -ENODEV; } else { /* Special hook for ISA access. */ if (bus == 0 && dfn == 0) hose = pci_isa_hose; else return -ENODEV; } switch (which & ~IOBASE_FROM_HOSE) { case IOBASE_HOSE: return hose->index; case IOBASE_SPARSE_MEM: return hose->sparse_mem_base; case IOBASE_DENSE_MEM: return hose->dense_mem_base; case IOBASE_SPARSE_IO: return hose->sparse_io_base; case IOBASE_DENSE_IO: return hose->dense_io_base; case IOBASE_ROOT_BUS: return hose->bus->number; } return -EOPNOTSUPP; } SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, unsigned long, off, unsigned long, len, void __user *, buf) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; else return -ENODEV; } SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, unsigned long, off, unsigned long, len, void __user *, buf) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; else return -ENODEV; }
linux-master
arch/alpha/kernel/pci-noop.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_eb64p.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the EB64+ and EB66. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_apecs.h> #include <asm/core_lca.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note mask bit is true for DISABLED irqs. */ static unsigned int cached_irq_mask = -1; static inline void eb64p_update_irq_hw(unsigned int irq, unsigned long mask) { outb(mask >> (irq >= 24 ? 24 : 16), (irq >= 24 ? 0x27 : 0x26)); } static inline void eb64p_enable_irq(struct irq_data *d) { eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); } static void eb64p_disable_irq(struct irq_data *d) { eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq); } static struct irq_chip eb64p_irq_type = { .name = "EB64P", .irq_unmask = eb64p_enable_irq, .irq_mask = eb64p_disable_irq, .irq_mask_ack = eb64p_disable_irq, }; static void eb64p_device_interrupt(unsigned long vector) { unsigned long pld; unsigned int i; /* Read the interrupt summary registers */ pld = inb(0x26) | (inb(0x27) << 8); /* * Now, for every possible bit set, work through * them and call the appropriate interrupt handler. */ while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ if (i == 5) { isa_device_interrupt(vector); } else { handle_irq(16 + i); } } } static void __init eb64p_init_irq(void) { long i; #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET) /* * CABRIO SRM may not set variation correctly, so here we test * the high word of the interrupt summary register for the RAZ * bits, and hope that a true EB64+ would read all ones... */ if (inw(0x806) != 0xffff) { extern struct alpha_machine_vector cabriolet_mv; printk("Detected Cabriolet: correcting HWRPB.\n"); hwrpb->sys_variation |= 2L << 10; hwrpb_update_checksum(hwrpb); alpha_mv = cabriolet_mv; alpha_mv.init_irq(); return; } #endif /* GENERIC */ outb(0xff, 0x26); outb(0xff, 0x27); init_i8259a_irqs(); for (i = 16; i < 32; ++i) { irq_set_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } common_init_isa_dma(); if (request_irq(16 + 5, no_action, 0, "isa-cascade", NULL)) pr_err("Failed to register isa-cascade interrupt\n"); } /* * PCI Fixup configuration. * * There are two 8 bit external summary registers as follows: * * Summary @ 0x26: * Bit Meaning * 0 Interrupt Line A from slot 0 * 1 Interrupt Line A from slot 1 * 2 Interrupt Line B from slot 0 * 3 Interrupt Line B from slot 1 * 4 Interrupt Line C from slot 0 * 5 Interrupt line from the two ISA PICs * 6 Tulip * 7 NCR SCSI * * Summary @ 0x27 * Bit Meaning * 0 Interrupt Line C from slot 1 * 1 Interrupt Line D from slot 0 * 2 Interrupt Line D from slot 1 * 3 RAZ * 4 RAZ * 5 RAZ * 6 RAZ * 7 RAZ * * The device to slot mapping looks like: * * Slot Device * 5 NCR SCSI controller * 6 PCI on board slot 0 * 7 PCI on board slot 1 * 8 Intel SIO PCI-ISA bridge chip * 9 Tulip - DECchip 21040 Ethernet controller * * * This two layered interrupt approach means that we allocate IRQ 16 and * above for PCI interrupts. The IRQ relates to which bit the interrupt * comes in on. This makes interrupt processing much easier. */ static int eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[5][5] = { /*INT INTA INTB INTC INTD */ {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */ {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */ {16+1, 16+1, 16+3, 16+8, 16+10}, /* IdSel 7, slot ?, ?? */ { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ {16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 9, TULIP */ }; const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } /* * The System Vector */ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB64P) struct alpha_machine_vector eb64p_mv __initmv = { .vector_name = "EB64+", DO_EV4_MMU, DO_DEFAULT_RTC, DO_APECS_IO, .machine_check = apecs_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 32, .device_interrupt = eb64p_device_interrupt, .init_arch = apecs_init_arch, .init_irq = eb64p_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .kill_arch = NULL, .pci_map_irq = eb64p_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(eb64p) #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66) struct alpha_machine_vector eb66_mv __initmv = { .vector_name = "EB66", DO_EV4_MMU, DO_DEFAULT_RTC, DO_LCA_IO, .machine_check = lca_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 32, .device_interrupt = eb64p_device_interrupt, .init_arch = lca_init_arch, .init_irq = eb64p_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .pci_map_irq = eb64p_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(eb66) #endif
linux-master
arch/alpha/kernel/sys_eb64p.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/core_titan.c * * Code common to all TITAN core logic chips. */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_titan.h> #undef __EXTERN_INLINE #include <linux/module.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/memblock.h> #include <asm/ptrace.h> #include <asm/smp.h> #include <asm/tlbflush.h> #include <asm/vga.h> #include "proto.h" #include "pci_impl.h" /* Save Titan configuration data as the console had it set up. */ struct { unsigned long wsba[4]; unsigned long wsm[4]; unsigned long tba[4]; } saved_config[4] __attribute__((common)); /* * Is PChip 1 present? No need to query it more than once. */ static int titan_pchip1_present; /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Routines to access TIG registers. */ static inline volatile unsigned long * mk_tig_addr(int offset) { return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6)); } static inline u8 titan_read_tig(int offset, u8 value) { volatile unsigned long *tig_addr = mk_tig_addr(offset); return (u8)(*tig_addr & 0xff); } static inline void titan_write_tig(int offset, u8 value) { volatile unsigned long *tig_addr = mk_tig_addr(offset); *tig_addr = (unsigned long)value; } /* * Given a bus, device, and function number, compute resulting * configuration space address * accordingly. It is therefore not safe to have concurrent * invocations to configuration space access routines, but there * really shouldn't be any need for this. * * Note that all config space accesses use Type 1 address format. * * Note also that type 1 is determined by non-zero bus number. * * Type 1: * * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 31:24 reserved * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., SCSI and Ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, unsigned char *type1) { struct pci_controller *hose = pbus->sysdata; unsigned long addr; u8 bus = pbus->number; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, " "pci_addr=0x%p, type1=0x%p)\n", bus, device_fn, where, pci_addr, type1)); if (!pbus->parent) /* No parent means peer PCI bus. */ bus = 0; *type1 = (bus != 0); addr = (bus << 16) | (device_fn << 8) | where; addr |= hose->config_space_base; *pci_addr = addr; DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr)); return 0; } static int titan_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int titan_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops titan_pci_ops = { .read = titan_read_config, .write = titan_write_config, }; void titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end) { titan_pachip *pachip = (hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0; titan_pachip_port *port; volatile unsigned long *csr; unsigned long value; /* Get the right hose. */ port = &pachip->g_port; if (hose->index & 2) port = &pachip->a_port; /* We can invalidate up to 8 tlb entries in a go. The flush matches against <31:16> in the pci address. Note that gtlbi* and atlbi* are in the same place in the g_port and a_port, respectively, so the g_port offset can be used even if hose is an a_port */ csr = &port->port_specific.g.gtlbia.csr; if (((start ^ end) & 0xffff0000) == 0) csr = &port->port_specific.g.gtlbiv.csr; /* For TBIA, it doesn't matter what value we write. For TBI, it's the shifted tag bits. */ value = (start & 0xffff0000) >> 12; wmb(); *csr = value; mb(); *csr; } static int titan_query_agp(titan_pachip_port *port) { union TPAchipPCTL pctl; /* set up APCTL */ pctl.pctl_q_whole = port->pctl.csr; return pctl.pctl_r_bits.apctl_v_agp_present; } static void __init titan_init_one_pachip_port(titan_pachip_port *port, int index) { struct pci_controller *hose; hose = alloc_pci_controller(); if (index == 0) pci_isa_hose = hose; hose->io_space = alloc_resource(); hose->mem_space = alloc_resource(); /* * This is for userland consumption. The 40-bit PIO bias that we * use in the kernel through KSEG doesn't work in the page table * based user mappings. (43-bit KSEG sign extends the physical * address from bit 40 to hit the I/O bit - mapped addresses don't). * So make sure we get the 43-bit PIO bias. */ hose->sparse_mem_base = 0; hose->sparse_io_base = 0; hose->dense_mem_base = (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL; hose->dense_io_base = (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL; hose->config_space_base = TITAN_CONF(index); hose->index = index; hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS; hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1; hose->io_space->name = pci_io_names[index]; hose->io_space->flags = IORESOURCE_IO; hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS; hose->mem_space->end = hose->mem_space->start + 0xffffffff; hose->mem_space->name = pci_mem_names[index]; hose->mem_space->flags = IORESOURCE_MEM; if (request_resource(&ioport_resource, hose->io_space) < 0) printk(KERN_ERR "Failed to request IO on hose %d\n", index); if (request_resource(&iomem_resource, hose->mem_space) < 0) printk(KERN_ERR "Failed to request MEM on hose %d\n", index); /* * Save the existing PCI window translations. SRM will * need them when we go to reboot. */ saved_config[index].wsba[0] = port->wsba[0].csr; saved_config[index].wsm[0] = port->wsm[0].csr; saved_config[index].tba[0] = port->tba[0].csr; saved_config[index].wsba[1] = port->wsba[1].csr; saved_config[index].wsm[1] = port->wsm[1].csr; saved_config[index].tba[1] = port->tba[1].csr; saved_config[index].wsba[2] = port->wsba[2].csr; saved_config[index].wsm[2] = port->wsm[2].csr; saved_config[index].tba[2] = port->tba[2].csr; saved_config[index].wsba[3] = port->wsba[3].csr; saved_config[index].wsm[3] = port->wsm[3].csr; saved_config[index].tba[3] = port->tba[3].csr; /* * Set up the PCI to main memory translation windows. * * Note: Window 3 on Titan is Scatter-Gather ONLY. * * Window 0 is scatter-gather 8MB at 8MB (for isa) * Window 1 is direct access 1GB at 2GB * Window 2 is scatter-gather 1GB at 3GB */ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, SMP_CACHE_BYTES); hose->sg_isa->align_entry = 8; /* 64KB for ISA */ hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, SMP_CACHE_BYTES); hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */ port->wsba[0].csr = hose->sg_isa->dma_base | 3; port->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000; port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes); port->wsba[1].csr = __direct_map_base | 1; port->wsm[1].csr = (__direct_map_size - 1) & 0xfff00000; port->tba[1].csr = 0; port->wsba[2].csr = hose->sg_pci->dma_base | 3; port->wsm[2].csr = (hose->sg_pci->size - 1) & 0xfff00000; port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes); port->wsba[3].csr = 0; /* Enable the Monster Window to make DAC pci64 possible. */ port->pctl.csr |= pctl_m_mwin; /* * If it's an AGP port, initialize agplastwr. */ if (titan_query_agp(port)) port->port_specific.a.agplastwr.csr = __direct_map_base; titan_pci_tbi(hose, 0, -1); } static void __init titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1) { titan_pchip1_present = TITAN_cchip->csc.csr & 1L<<14; /* Init the ports in hose order... */ titan_init_one_pachip_port(&pachip0->g_port, 0); /* hose 0 */ if (titan_pchip1_present) titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */ titan_init_one_pachip_port(&pachip0->a_port, 2); /* hose 2 */ if (titan_pchip1_present) titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */ } void __init titan_init_arch(void) { #if 0 printk("%s: titan_init_arch()\n", __func__); printk("%s: CChip registers:\n", __func__); printk("%s: CSR_CSC 0x%lx\n", __func__, TITAN_cchip->csc.csr); printk("%s: CSR_MTR 0x%lx\n", __func__, TITAN_cchip->mtr.csr); printk("%s: CSR_MISC 0x%lx\n", __func__, TITAN_cchip->misc.csr); printk("%s: CSR_DIM0 0x%lx\n", __func__, TITAN_cchip->dim0.csr); printk("%s: CSR_DIM1 0x%lx\n", __func__, TITAN_cchip->dim1.csr); printk("%s: CSR_DIR0 0x%lx\n", __func__, TITAN_cchip->dir0.csr); printk("%s: CSR_DIR1 0x%lx\n", __func__, TITAN_cchip->dir1.csr); printk("%s: CSR_DRIR 0x%lx\n", __func__, TITAN_cchip->drir.csr); printk("%s: DChip registers:\n", __func__); printk("%s: CSR_DSC 0x%lx\n", __func__, TITAN_dchip->dsc.csr); printk("%s: CSR_STR 0x%lx\n", __func__, TITAN_dchip->str.csr); printk("%s: CSR_DREV 0x%lx\n", __func__, TITAN_dchip->drev.csr); #endif boot_cpuid = __hard_smp_processor_id(); /* With multiple PCI busses, we play with I/O as physical addrs. */ ioport_resource.end = ~0UL; iomem_resource.end = ~0UL; /* PCI DMA Direct Mapping is 1GB at 2GB. */ __direct_map_base = 0x80000000; __direct_map_size = 0x40000000; /* Init the PA chip(s). */ titan_init_pachips(TITAN_pachip0, TITAN_pachip1); /* Check for graphic console location (if any). */ find_console_vga_hose(); } static void titan_kill_one_pachip_port(titan_pachip_port *port, int index) { port->wsba[0].csr = saved_config[index].wsba[0]; port->wsm[0].csr = saved_config[index].wsm[0]; port->tba[0].csr = saved_config[index].tba[0]; port->wsba[1].csr = saved_config[index].wsba[1]; port->wsm[1].csr = saved_config[index].wsm[1]; port->tba[1].csr = saved_config[index].tba[1]; port->wsba[2].csr = saved_config[index].wsba[2]; port->wsm[2].csr = saved_config[index].wsm[2]; port->tba[2].csr = saved_config[index].tba[2]; port->wsba[3].csr = saved_config[index].wsba[3]; port->wsm[3].csr = saved_config[index].wsm[3]; port->tba[3].csr = saved_config[index].tba[3]; } static void titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1) { if (titan_pchip1_present) { titan_kill_one_pachip_port(&pachip1->g_port, 1); titan_kill_one_pachip_port(&pachip1->a_port, 3); } titan_kill_one_pachip_port(&pachip0->g_port, 0); titan_kill_one_pachip_port(&pachip0->a_port, 2); } void titan_kill_arch(int mode) { titan_kill_pachips(TITAN_pachip0, TITAN_pachip1); } /* * IO map support. */ void __iomem * titan_ioportmap(unsigned long addr) { FIXUP_IOADDR_VGA(addr); return (void __iomem *)(addr + TITAN_IO_BIAS); } void __iomem * titan_ioremap(unsigned long addr, unsigned long size) { int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT; unsigned long baddr = addr & ~TITAN_HOSE_MASK; unsigned long last = baddr + size - 1; struct pci_controller *hose; struct vm_struct *area; unsigned long vaddr; unsigned long *ptes; unsigned long pfn; #ifdef CONFIG_VGA_HOSE /* * Adjust the address and hose, if necessary. */ if (pci_vga_hose && __is_mem_vga(addr)) { h = pci_vga_hose->index; addr += pci_vga_hose->mem_space->start; } #endif /* * Find the hose. */ for (hose = hose_head; hose; hose = hose->next) if (hose->index == h) break; if (!hose) return NULL; /* * Is it direct-mapped? */ if ((baddr >= __direct_map_base) && ((baddr + size - 1) < __direct_map_base + __direct_map_size)) { vaddr = addr - __direct_map_base + TITAN_MEM_BIAS; return (void __iomem *) vaddr; } /* * Check the scatter-gather arena. */ if (hose->sg_pci && baddr >= (unsigned long)hose->sg_pci->dma_base && last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){ /* * Adjust the limits (mappings must be page aligned) */ baddr -= hose->sg_pci->dma_base; last -= hose->sg_pci->dma_base; baddr &= PAGE_MASK; size = PAGE_ALIGN(last) - baddr; /* * Map it */ area = get_vm_area(size, VM_IOREMAP); if (!area) { printk("ioremap failed... no vm_area...\n"); return NULL; } ptes = hose->sg_pci->ptes; for (vaddr = (unsigned long)area->addr; baddr <= last; baddr += PAGE_SIZE, vaddr += PAGE_SIZE) { pfn = ptes[baddr >> PAGE_SHIFT]; if (!(pfn & 1)) { printk("ioremap failed... pte not valid...\n"); vfree(area->addr); return NULL; } pfn >>= 1; /* make it a true pfn */ if (__alpha_remap_area_pages(vaddr, pfn << PAGE_SHIFT, PAGE_SIZE, 0)) { printk("FAILED to remap_area_pages...\n"); vfree(area->addr); return NULL; } } flush_tlb_all(); vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK); return (void __iomem *) vaddr; } /* Assume a legacy (read: VGA) address, and return appropriately. */ return (void __iomem *)(addr + TITAN_MEM_BIAS); } void titan_iounmap(volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr >= VMALLOC_START) vfree((void *)(PAGE_MASK & addr)); } int titan_is_mmio(const volatile void __iomem *xaddr) { unsigned long addr = (unsigned long) xaddr; if (addr >= VMALLOC_START) return 1; else return (addr & 0x100000000UL) == 0; } #ifndef CONFIG_ALPHA_GENERIC EXPORT_SYMBOL(titan_ioportmap); EXPORT_SYMBOL(titan_ioremap); EXPORT_SYMBOL(titan_iounmap); EXPORT_SYMBOL(titan_is_mmio); #endif /* * AGP GART Support. */ #include <linux/agp_backend.h> #include <asm/agp_backend.h> #include <linux/slab.h> #include <linux/delay.h> struct titan_agp_aperture { struct pci_iommu_arena *arena; long pg_start; long pg_count; }; static int titan_agp_setup(alpha_agp_info *agp) { struct titan_agp_aperture *aper; if (!alpha_agpgart_size) return -ENOMEM; aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL); if (aper == NULL) return -ENOMEM; aper->arena = agp->hose->sg_pci; aper->pg_count = alpha_agpgart_size / PAGE_SIZE; aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, aper->pg_count - 1); if (aper->pg_start < 0) { printk(KERN_ERR "Failed to reserve AGP memory\n"); kfree(aper); return -ENOMEM; } agp->aperture.bus_base = aper->arena->dma_base + aper->pg_start * PAGE_SIZE; agp->aperture.size = aper->pg_count * PAGE_SIZE; agp->aperture.sysdata = aper; return 0; } static void titan_agp_cleanup(alpha_agp_info *agp) { struct titan_agp_aperture *aper = agp->aperture.sysdata; int status; status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); if (status == -EBUSY) { printk(KERN_WARNING "Attempted to release bound AGP memory - unbinding\n"); iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); } if (status < 0) printk(KERN_ERR "Failed to release AGP memory\n"); kfree(aper); kfree(agp); } static int titan_agp_configure(alpha_agp_info *agp) { union TPAchipPCTL pctl; titan_pachip_port *port = agp->private; pctl.pctl_q_whole = port->pctl.csr; /* Side-Band Addressing? */ pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba; /* AGP Rate? */ pctl.pctl_r_bits.apctl_v_agp_rate = 0; /* 1x */ if (agp->mode.bits.rate & 2) pctl.pctl_r_bits.apctl_v_agp_rate = 1; /* 2x */ #if 0 if (agp->mode.bits.rate & 4) pctl.pctl_r_bits.apctl_v_agp_rate = 2; /* 4x */ #endif /* RQ Depth? */ pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2; pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7; /* * AGP Enable. */ pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable; /* Tell the user. */ printk("Enabling AGP: %dX%s\n", 1 << pctl.pctl_r_bits.apctl_v_agp_rate, pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : ""); /* Write it. */ port->pctl.csr = pctl.pctl_q_whole; /* And wait at least 5000 66MHz cycles (per Titan spec). */ udelay(100); return 0; } static int titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) { struct titan_agp_aperture *aper = agp->aperture.sysdata; return iommu_bind(aper->arena, aper->pg_start + pg_start, mem->page_count, mem->pages); } static int titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem) { struct titan_agp_aperture *aper = agp->aperture.sysdata; return iommu_unbind(aper->arena, aper->pg_start + pg_start, mem->page_count); } static unsigned long titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr) { struct titan_agp_aperture *aper = agp->aperture.sysdata; unsigned long baddr = addr - aper->arena->dma_base; unsigned long pte; if (addr < agp->aperture.bus_base || addr >= agp->aperture.bus_base + agp->aperture.size) { printk("%s: addr out of range\n", __func__); return -EINVAL; } pte = aper->arena->ptes[baddr >> PAGE_SHIFT]; if (!(pte & 1)) { printk("%s: pte not valid\n", __func__); return -EINVAL; } return (pte >> 1) << PAGE_SHIFT; } struct alpha_agp_ops titan_agp_ops = { .setup = titan_agp_setup, .cleanup = titan_agp_cleanup, .configure = titan_agp_configure, .bind = titan_agp_bind_memory, .unbind = titan_agp_unbind_memory, .translate = titan_agp_translate }; alpha_agp_info * titan_agp_info(void) { alpha_agp_info *agp; struct pci_controller *hose; titan_pachip_port *port; int hosenum = -1; union TPAchipPCTL pctl; /* * Find the AGP port. */ port = &TITAN_pachip0->a_port; if (titan_query_agp(port)) hosenum = 2; if (hosenum < 0 && titan_pchip1_present && titan_query_agp(port = &TITAN_pachip1->a_port)) hosenum = 3; /* * Find the hose the port is on. */ for (hose = hose_head; hose; hose = hose->next) if (hose->index == hosenum) break; if (!hose || !hose->sg_pci) return NULL; /* * Allocate the info structure. */ agp = kmalloc(sizeof(*agp), GFP_KERNEL); if (!agp) return NULL; /* * Fill it in. */ agp->hose = hose; agp->private = port; agp->ops = &titan_agp_ops; /* * Aperture - not configured until ops.setup(). * * FIXME - should we go ahead and allocate it here? */ agp->aperture.bus_base = 0; agp->aperture.size = 0; agp->aperture.sysdata = NULL; /* * Capabilities. */ agp->capability.lw = 0; agp->capability.bits.rate = 3; /* 2x, 1x */ agp->capability.bits.sba = 1; agp->capability.bits.rq = 7; /* 8 - 1 */ /* * Mode. */ pctl.pctl_q_whole = port->pctl.csr; agp->mode.lw = 0; agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate; agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en; agp->mode.bits.rq = 7; /* RQ Depth? */ agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en; return agp; }
linux-master
arch/alpha/kernel/core_titan.c
// SPDX-License-Identifier: GPL-2.0 /* * Alpha specific irq code. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/irq.h> #include <linux/kernel_stat.h> #include <linux/module.h> #include <asm/machvec.h> #include <asm/dma.h> #include <asm/perf_event.h> #include <asm/mce.h> #include "proto.h" #include "irq_impl.h" /* Hack minimum IPL during interrupt processing for broken hardware. */ #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK int __min_ipl; EXPORT_SYMBOL(__min_ipl); #endif /* * Performance counter hook. A module can override this to * do something useful. */ static void dummy_perf(unsigned long vector, struct pt_regs *regs) { irq_err_count++; printk(KERN_CRIT "Performance counter interrupt!\n"); } void (*perf_irq)(unsigned long, struct pt_regs *) = dummy_perf; EXPORT_SYMBOL(perf_irq); /* * The main interrupt entry point. */ asmlinkage void do_entInt(unsigned long type, unsigned long vector, unsigned long la_ptr, struct pt_regs *regs) { struct pt_regs *old_regs; /* * Disable interrupts during IRQ handling. * Note that there is no matching local_irq_enable() due to * severe problems with RTI at IPL0 and some MILO PALcode * (namely LX164). */ local_irq_disable(); switch (type) { case 0: #ifdef CONFIG_SMP handle_ipi(regs); return; #else irq_err_count++; printk(KERN_CRIT "Interprocessor interrupt? " "You must be kidding!\n"); #endif break; case 1: old_regs = set_irq_regs(regs); handle_irq(RTC_IRQ); set_irq_regs(old_regs); return; case 2: old_regs = set_irq_regs(regs); alpha_mv.machine_check(vector, la_ptr); set_irq_regs(old_regs); return; case 3: old_regs = set_irq_regs(regs); alpha_mv.device_interrupt(vector); set_irq_regs(old_regs); return; case 4: perf_irq(la_ptr, regs); return; default: printk(KERN_CRIT "Hardware intr %ld %lx? Huh?\n", type, vector); } printk(KERN_CRIT "PC = %016lx PS=%04lx\n", regs->pc, regs->ps); } void __init common_init_isa_dma(void) { outb(0, DMA1_RESET_REG); outb(0, DMA2_RESET_REG); outb(0, DMA1_CLR_MASK_REG); outb(0, DMA2_CLR_MASK_REG); } void __init init_IRQ(void) { /* Just in case the platform init_irq() causes interrupts/mchecks (as is the case with RAWHIDE, at least). */ wrent(entInt, 0); alpha_mv.init_irq(); } /* * machine error checks */ #define MCHK_K_TPERR 0x0080 #define MCHK_K_TCPERR 0x0082 #define MCHK_K_HERR 0x0084 #define MCHK_K_ECC_C 0x0086 #define MCHK_K_ECC_NC 0x0088 #define MCHK_K_OS_BUGCHECK 0x008A #define MCHK_K_PAL_BUGCHECK 0x0090 #ifndef CONFIG_SMP struct mcheck_info __mcheck_info; #endif void process_mcheck_info(unsigned long vector, unsigned long la_ptr, const char *machine, int expected) { struct el_common *mchk_header; const char *reason; /* * See if the machine check is due to a badaddr() and if so, * ignore it. */ #ifdef CONFIG_VERBOSE_MCHECK if (alpha_verbose_mcheck > 1) { printk(KERN_CRIT "%s machine check %s\n", machine, expected ? "expected." : "NOT expected!!!"); } #endif if (expected) { int cpu = smp_processor_id(); mcheck_expected(cpu) = 0; mcheck_taken(cpu) = 1; return; } mchk_header = (struct el_common *)la_ptr; printk(KERN_CRIT "%s machine check: vector=0x%lx pc=0x%lx code=0x%x\n", machine, vector, get_irq_regs()->pc, mchk_header->code); switch (mchk_header->code) { /* Machine check reasons. Defined according to PALcode sources. */ case 0x80: reason = "tag parity error"; break; case 0x82: reason = "tag control parity error"; break; case 0x84: reason = "generic hard error"; break; case 0x86: reason = "correctable ECC error"; break; case 0x88: reason = "uncorrectable ECC error"; break; case 0x8A: reason = "OS-specific PAL bugcheck"; break; case 0x90: reason = "callsys in kernel mode"; break; case 0x96: reason = "i-cache read retryable error"; break; case 0x98: reason = "processor detected hard error"; break; /* System specific (these are for Alcor, at least): */ case 0x202: reason = "system detected hard error"; break; case 0x203: reason = "system detected uncorrectable ECC error"; break; case 0x204: reason = "SIO SERR occurred on PCI bus"; break; case 0x205: reason = "parity error detected by core logic"; break; case 0x206: reason = "SIO IOCHK occurred on ISA bus"; break; case 0x207: reason = "non-existent memory error"; break; case 0x208: reason = "MCHK_K_DCSR"; break; case 0x209: reason = "PCI SERR detected"; break; case 0x20b: reason = "PCI data parity error detected"; break; case 0x20d: reason = "PCI address parity error detected"; break; case 0x20f: reason = "PCI master abort error"; break; case 0x211: reason = "PCI target abort error"; break; case 0x213: reason = "scatter/gather PTE invalid error"; break; case 0x215: reason = "flash ROM write error"; break; case 0x217: reason = "IOA timeout detected"; break; case 0x219: reason = "IOCHK#, EISA add-in board parity or other catastrophic error"; break; case 0x21b: reason = "EISA fail-safe timer timeout"; break; case 0x21d: reason = "EISA bus time-out"; break; case 0x21f: reason = "EISA software generated NMI"; break; case 0x221: reason = "unexpected ev5 IRQ[3] interrupt"; break; default: reason = "unknown"; break; } printk(KERN_CRIT "machine check type: %s%s\n", reason, mchk_header->retry ? " (retryable)" : ""); dik_show_regs(get_irq_regs(), NULL); #ifdef CONFIG_VERBOSE_MCHECK if (alpha_verbose_mcheck > 1) { /* Dump the logout area to give all info. */ unsigned long *ptr = (unsigned long *)la_ptr; long i; for (i = 0; i < mchk_header->size / sizeof(long); i += 2) { printk(KERN_CRIT " +%8lx %016lx %016lx\n", i*sizeof(long), ptr[i], ptr[i+1]); } } #endif /* CONFIG_VERBOSE_MCHECK */ } /* * The special RTC interrupt type. The interrupt itself was * processed by PALcode, and comes in via entInt vector 1. */ void __init init_rtc_irq(irq_handler_t handler) { irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, handle_percpu_irq, "RTC"); if (!handler) handler = rtc_timer_interrupt; if (request_irq(RTC_IRQ, handler, 0, "timer", NULL)) pr_err("Failed to register timer interrupt\n"); }
linux-master
arch/alpha/kernel/irq_alpha.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/irq_pyxis.c * * Based on code written by David A Rusling ([email protected]). * * IRQ Code common to all PYXIS core logic chips. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/irq.h> #include <asm/io.h> #include <asm/core_cia.h> #include "proto.h" #include "irq_impl.h" /* Note mask bit is true for ENABLED irqs. */ static unsigned long cached_irq_mask; static inline void pyxis_update_irq_hw(unsigned long mask) { *(vulp)PYXIS_INT_MASK = mask; mb(); *(vulp)PYXIS_INT_MASK; } static inline void pyxis_enable_irq(struct irq_data *d) { pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); } static void pyxis_disable_irq(struct irq_data *d) { pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); } static void pyxis_mask_and_ack_irq(struct irq_data *d) { unsigned long bit = 1UL << (d->irq - 16); unsigned long mask = cached_irq_mask &= ~bit; /* Disable the interrupt. */ *(vulp)PYXIS_INT_MASK = mask; wmb(); /* Ack PYXIS PCI interrupt. */ *(vulp)PYXIS_INT_REQ = bit; mb(); /* Re-read to force both writes. */ *(vulp)PYXIS_INT_MASK; } static struct irq_chip pyxis_irq_type = { .name = "PYXIS", .irq_mask_ack = pyxis_mask_and_ack_irq, .irq_mask = pyxis_disable_irq, .irq_unmask = pyxis_enable_irq, }; void pyxis_device_interrupt(unsigned long vector) { unsigned long pld; unsigned int i; /* Read the interrupt summary register of PYXIS */ pld = *(vulp)PYXIS_INT_REQ; pld &= cached_irq_mask; /* * Now for every possible bit set, work through them and call * the appropriate interrupt handler. */ while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ if (i == 7) isa_device_interrupt(vector); else handle_irq(16+i); } } void __init init_pyxis_irqs(unsigned long ignore_mask) { long i; *(vulp)PYXIS_INT_MASK = 0; /* disable all */ *(vulp)PYXIS_INT_REQ = -1; /* flush all */ mb(); /* Send -INTA pulses to clear any pending interrupts ...*/ *(vuip) CIA_IACK_SC; for (i = 16; i < 48; ++i) { if ((ignore_mask >> i) & 1) continue; irq_set_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } if (request_irq(16 + 7, no_action, 0, "isa-cascade", NULL)) pr_err("Failed to register isa-cascade interrupt\n"); }
linux-master
arch/alpha/kernel/irq_pyxis.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/err_titan.c * * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) * * Error handling code supporting TITAN systems */ #include <linux/init.h> #include <linux/pci.h> #include <linux/sched.h> #include <asm/io.h> #include <asm/core_titan.h> #include <asm/hwrpb.h> #include <asm/smp.h> #include <asm/err_common.h> #include <asm/err_ev6.h> #include <asm/irq_regs.h> #include "err_impl.h" #include "proto.h" static int titan_parse_c_misc(u64 c_misc, int print) { #ifdef CONFIG_VERBOSE_MCHECK char *src; int nxs = 0; #endif int status = MCHK_DISPOSITION_REPORT; #define TITAN__CCHIP_MISC__NXM (1UL << 28) #define TITAN__CCHIP_MISC__NXS__S (29) #define TITAN__CCHIP_MISC__NXS__M (0x7) if (!(c_misc & TITAN__CCHIP_MISC__NXM)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; nxs = EXTRACT(c_misc, TITAN__CCHIP_MISC__NXS); switch(nxs) { case 0: /* CPU 0 */ case 1: /* CPU 1 */ case 2: /* CPU 2 */ case 3: /* CPU 3 */ src = "CPU"; /* num is already the CPU number */ break; case 4: /* Pchip 0 */ case 5: /* Pchip 1 */ src = "Pchip"; nxs -= 4; break; default:/* reserved */ src = "Unknown, NXS ="; /* leave num untouched */ break; } printk("%s Non-existent memory access from: %s %d\n", err_print_prefix, src, nxs); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_serror(int which, u64 serror, int print) { int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK static const char * const serror_src[] = { "GPCI", "APCI", "AGP HP", "AGP LP" }; static const char * const serror_cmd[] = { "DMA Read", "DMA RMW", "SGTE Read", "Reserved" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0) #define TITAN__PCHIP_SERROR__UECC (1UL << 1) #define TITAN__PCHIP_SERROR__CRE (1UL << 2) #define TITAN__PCHIP_SERROR__NXIO (1UL << 3) #define TITAN__PCHIP_SERROR__LOST_CRE (1UL << 4) #define TITAN__PCHIP_SERROR__ECCMASK (TITAN__PCHIP_SERROR__UECC | \ TITAN__PCHIP_SERROR__CRE) #define TITAN__PCHIP_SERROR__ERRMASK (TITAN__PCHIP_SERROR__LOST_UECC | \ TITAN__PCHIP_SERROR__UECC | \ TITAN__PCHIP_SERROR__CRE | \ TITAN__PCHIP_SERROR__NXIO | \ TITAN__PCHIP_SERROR__LOST_CRE) #define TITAN__PCHIP_SERROR__SRC__S (52) #define TITAN__PCHIP_SERROR__SRC__M (0x3) #define TITAN__PCHIP_SERROR__CMD__S (54) #define TITAN__PCHIP_SERROR__CMD__M (0x3) #define TITAN__PCHIP_SERROR__SYN__S (56) #define TITAN__PCHIP_SERROR__SYN__M (0xff) #define TITAN__PCHIP_SERROR__ADDR__S (15) #define TITAN__PCHIP_SERROR__ADDR__M (0xffffffffUL) if (!(serror & TITAN__PCHIP_SERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s PChip %d SERROR: %016llx\n", err_print_prefix, which, serror); if (serror & TITAN__PCHIP_SERROR__ECCMASK) { printk("%s %sorrectable ECC Error:\n" " Source: %-6s Command: %-8s Syndrome: 0x%08x\n" " Address: 0x%llx\n", err_print_prefix, (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C", serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)], serror_cmd[EXTRACT(serror, TITAN__PCHIP_SERROR__CMD)], (unsigned)EXTRACT(serror, TITAN__PCHIP_SERROR__SYN), EXTRACT(serror, TITAN__PCHIP_SERROR__ADDR)); } if (serror & TITAN__PCHIP_SERROR__NXIO) printk("%s Non Existent I/O Error\n", err_print_prefix); if (serror & TITAN__PCHIP_SERROR__LOST_UECC) printk("%s Lost Uncorrectable ECC Error\n", err_print_prefix); if (serror & TITAN__PCHIP_SERROR__LOST_CRE) printk("%s Lost Correctable ECC Error\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_perror(int which, int port, u64 perror, int print) { int cmd; unsigned long addr; int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK static const char * const perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle", "I/O Read", "I/O Write", "Reserved", "Reserved", "Memory Read", "Memory Write", "Reserved", "Reserved", "Configuration Read", "Configuration Write", "Memory Read Multiple", "Dual Address Cycle", "Memory Read Line", "Memory Write and Invalidate" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_PERROR__LOST (1UL << 0) #define TITAN__PCHIP_PERROR__SERR (1UL << 1) #define TITAN__PCHIP_PERROR__PERR (1UL << 2) #define TITAN__PCHIP_PERROR__DCRTO (1UL << 3) #define TITAN__PCHIP_PERROR__SGE (1UL << 4) #define TITAN__PCHIP_PERROR__APE (1UL << 5) #define TITAN__PCHIP_PERROR__TA (1UL << 6) #define TITAN__PCHIP_PERROR__DPE (1UL << 7) #define TITAN__PCHIP_PERROR__NDS (1UL << 8) #define TITAN__PCHIP_PERROR__IPTPR (1UL << 9) #define TITAN__PCHIP_PERROR__IPTPW (1UL << 10) #define TITAN__PCHIP_PERROR__ERRMASK (TITAN__PCHIP_PERROR__LOST | \ TITAN__PCHIP_PERROR__SERR | \ TITAN__PCHIP_PERROR__PERR | \ TITAN__PCHIP_PERROR__DCRTO | \ TITAN__PCHIP_PERROR__SGE | \ TITAN__PCHIP_PERROR__APE | \ TITAN__PCHIP_PERROR__TA | \ TITAN__PCHIP_PERROR__DPE | \ TITAN__PCHIP_PERROR__NDS | \ TITAN__PCHIP_PERROR__IPTPR | \ TITAN__PCHIP_PERROR__IPTPW) #define TITAN__PCHIP_PERROR__DAC (1UL << 47) #define TITAN__PCHIP_PERROR__MWIN (1UL << 48) #define TITAN__PCHIP_PERROR__CMD__S (52) #define TITAN__PCHIP_PERROR__CMD__M (0x0f) #define TITAN__PCHIP_PERROR__ADDR__S (14) #define TITAN__PCHIP_PERROR__ADDR__M (0x1fffffffful) if (!(perror & TITAN__PCHIP_PERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; cmd = EXTRACT(perror, TITAN__PCHIP_PERROR__CMD); addr = EXTRACT(perror, TITAN__PCHIP_PERROR__ADDR) << 2; /* * Initializing the BIOS on a video card on a bus without * a south bridge (subtractive decode agent) can result in * master aborts as the BIOS probes the capabilities of the * card. XFree86 does such initialization. If the error * is a master abort (No DevSel as PCI Master) and the command * is an I/O read or write below the address where we start * assigning PCI I/O spaces (SRM uses 0x1000), then mark the * error as dismissable so starting XFree86 doesn't result * in a series of uncorrectable errors being reported. Also * dismiss master aborts to VGA frame buffer space * (0xA0000 - 0xC0000) and legacy BIOS space (0xC0000 - 0x100000) * for the same reason. * * Also mark the error dismissible if it looks like the right * error but only the Lost bit is set. Since the BIOS initialization * can cause multiple master aborts and the error interrupt can * be handled on a different CPU than the BIOS code is run on, * it is possible for a second master abort to occur between the * time the PALcode reads PERROR and the time it writes PERROR * to acknowledge the error. If this timing happens, a second * error will be signalled after the first, and if no additional * errors occur, will look like a Lost error with no additional * errors on the same transaction as the previous error. */ if (((perror & TITAN__PCHIP_PERROR__NDS) || ((perror & TITAN__PCHIP_PERROR__ERRMASK) == TITAN__PCHIP_PERROR__LOST)) && ((((cmd & 0xE) == 2) && (addr < 0x1000)) || (((cmd & 0xE) == 6) && (addr >= 0xA0000) && (addr < 0x100000)))) { status = MCHK_DISPOSITION_DISMISS; } #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; printk("%s PChip %d %cPERROR: %016llx\n", err_print_prefix, which, port ? 'A' : 'G', perror); if (perror & TITAN__PCHIP_PERROR__IPTPW) printk("%s Invalid Peer-to-Peer Write\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__IPTPR) printk("%s Invalid Peer-to-Peer Read\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__NDS) printk("%s No DEVSEL as PCI Master [Master Abort]\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__DPE) printk("%s Data Parity Error\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__TA) printk("%s Target Abort\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__APE) printk("%s Address Parity Error\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__SGE) printk("%s Scatter-Gather Error, Invalid PTE\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__DCRTO) printk("%s Delayed-Completion Retry Timeout\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__PERR) printk("%s PERR Asserted\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__SERR) printk("%s SERR Asserted\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__LOST) printk("%s Lost Error\n", err_print_prefix); printk("%s Command: 0x%x - %s\n" " Address: 0x%lx\n", err_print_prefix, cmd, perror_cmd[cmd], addr); if (perror & TITAN__PCHIP_PERROR__DAC) printk("%s Dual Address Cycle\n", err_print_prefix); if (perror & TITAN__PCHIP_PERROR__MWIN) printk("%s Hit in Monster Window\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_agperror(int which, u64 agperror, int print) { int status = MCHK_DISPOSITION_REPORT; #ifdef CONFIG_VERBOSE_MCHECK int cmd, len; unsigned long addr; static const char * const agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)", "Write (low-priority)", "Write (high-priority)", "Reserved", "Reserved", "Flush", "Fence" }; #endif /* CONFIG_VERBOSE_MCHECK */ #define TITAN__PCHIP_AGPERROR__LOST (1UL << 0) #define TITAN__PCHIP_AGPERROR__LPQFULL (1UL << 1) #define TITAN__PCHIP_AGPERROR__HPQFULL (1UL << 2) #define TITAN__PCHIP_AGPERROR__RESCMD (1UL << 3) #define TITAN__PCHIP_AGPERROR__IPTE (1UL << 4) #define TITAN__PCHIP_AGPERROR__PTP (1UL << 5) #define TITAN__PCHIP_AGPERROR__NOWINDOW (1UL << 6) #define TITAN__PCHIP_AGPERROR__ERRMASK (TITAN__PCHIP_AGPERROR__LOST | \ TITAN__PCHIP_AGPERROR__LPQFULL | \ TITAN__PCHIP_AGPERROR__HPQFULL | \ TITAN__PCHIP_AGPERROR__RESCMD | \ TITAN__PCHIP_AGPERROR__IPTE | \ TITAN__PCHIP_AGPERROR__PTP | \ TITAN__PCHIP_AGPERROR__NOWINDOW) #define TITAN__PCHIP_AGPERROR__DAC (1UL << 48) #define TITAN__PCHIP_AGPERROR__MWIN (1UL << 49) #define TITAN__PCHIP_AGPERROR__FENCE (1UL << 59) #define TITAN__PCHIP_AGPERROR__CMD__S (50) #define TITAN__PCHIP_AGPERROR__CMD__M (0x07) #define TITAN__PCHIP_AGPERROR__ADDR__S (15) #define TITAN__PCHIP_AGPERROR__ADDR__M (0xffffffffUL) #define TITAN__PCHIP_AGPERROR__LEN__S (53) #define TITAN__PCHIP_AGPERROR__LEN__M (0x3f) if (!(agperror & TITAN__PCHIP_AGPERROR__ERRMASK)) return MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK if (!print) return status; cmd = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__CMD); addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3; len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN); printk("%s PChip %d AGPERROR: %016llx\n", err_print_prefix, which, agperror); if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW) printk("%s No Window\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__PTP) printk("%s Peer-to-Peer set\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__IPTE) printk("%s Invalid PTE\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__RESCMD) printk("%s Reserved Command\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__HPQFULL) printk("%s HP Transaction Received while Queue Full\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__LPQFULL) printk("%s LP Transaction Received while Queue Full\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__LOST) printk("%s Lost Error\n", err_print_prefix); printk("%s Command: 0x%x - %s, %d Quadwords%s\n" " Address: 0x%lx\n", err_print_prefix, cmd, agperror_cmd[cmd], len, (agperror & TITAN__PCHIP_AGPERROR__FENCE) ? ", FENCE" : "", addr); if (agperror & TITAN__PCHIP_AGPERROR__DAC) printk("%s Dual Address Cycle\n", err_print_prefix); if (agperror & TITAN__PCHIP_AGPERROR__MWIN) printk("%s Hit in Monster Window\n", err_print_prefix); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } static int titan_parse_p_chip(int which, u64 serror, u64 gperror, u64 aperror, u64 agperror, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; status |= titan_parse_p_serror(which, serror, print); status |= titan_parse_p_perror(which, 0, gperror, print); status |= titan_parse_p_perror(which, 1, aperror, print); status |= titan_parse_p_agperror(which, agperror, print); return status; } int titan_process_logout_frame(struct el_common *mchk_header, int print) { struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); int status = MCHK_DISPOSITION_UNKNOWN_ERROR; status |= titan_parse_c_misc(tmchk->c_misc, print); status |= titan_parse_p_chip(0, tmchk->p0_serror, tmchk->p0_gperror, tmchk->p0_aperror, tmchk->p0_agperror, print); status |= titan_parse_p_chip(1, tmchk->p1_serror, tmchk->p1_gperror, tmchk->p1_aperror, tmchk->p1_agperror, print); return status; } void titan_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header = (struct el_common *)la_ptr; struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); u64 irqmask; /* * Mask of Titan interrupt sources which are reported as machine checks * * 63 - CChip Error * 62 - PChip 0 H_Error * 61 - PChip 1 H_Error * 60 - PChip 0 C_Error * 59 - PChip 1 C_Error */ #define TITAN_MCHECK_INTERRUPT_MASK 0xF800000000000000UL /* * Sync the processor */ mb(); draina(); /* * Only handle system errors here */ if ((vector != SCB_Q_SYSMCHK) && (vector != SCB_Q_SYSERR)) { ev6_machine_check(vector, la_ptr); return; } /* * It's a system error, handle it here * * The PALcode has already cleared the error, so just parse it */ /* * Parse the logout frame without printing first. If the only error(s) * found are classified as "dismissable", then just dismiss them and * don't print any message */ if (titan_process_logout_frame(mchk_header, 0) != MCHK_DISPOSITION_DISMISS) { char *saved_err_prefix = err_print_prefix; err_print_prefix = KERN_CRIT; /* * Either a nondismissable error was detected or no * recognized error was detected in the logout frame * -- report the error in either case */ printk("%s" "*System %s Error (Vector 0x%x) reported on CPU %d:\n", err_print_prefix, (vector == SCB_Q_SYSERR)?"Correctable":"Uncorrectable", (unsigned int)vector, (int)smp_processor_id()); #ifdef CONFIG_VERBOSE_MCHECK titan_process_logout_frame(mchk_header, alpha_verbose_mcheck); if (alpha_verbose_mcheck) dik_show_regs(get_irq_regs(), NULL); #endif /* CONFIG_VERBOSE_MCHECK */ err_print_prefix = saved_err_prefix; /* * Convert any pending interrupts which report as system * machine checks to interrupts */ irqmask = tmchk->c_dirx & TITAN_MCHECK_INTERRUPT_MASK; titan_dispatch_irqs(irqmask); } /* * Release the logout frame */ wrmces(0x7); mb(); } /* * Subpacket Annotations */ static char *el_titan_pchip0_extended_annotation[] = { "Subpacket Header", "P0_SCTL", "P0_SERREN", "P0_APCTL", "P0_APERREN", "P0_AGPERREN", "P0_ASPRST", "P0_AWSBA0", "P0_AWSBA1", "P0_AWSBA2", "P0_AWSBA3", "P0_AWSM0", "P0_AWSM1", "P0_AWSM2", "P0_AWSM3", "P0_ATBA0", "P0_ATBA1", "P0_ATBA2", "P0_ATBA3", "P0_GPCTL", "P0_GPERREN", "P0_GSPRST", "P0_GWSBA0", "P0_GWSBA1", "P0_GWSBA2", "P0_GWSBA3", "P0_GWSM0", "P0_GWSM1", "P0_GWSM2", "P0_GWSM3", "P0_GTBA0", "P0_GTBA1", "P0_GTBA2", "P0_GTBA3", NULL }; static char *el_titan_pchip1_extended_annotation[] = { "Subpacket Header", "P1_SCTL", "P1_SERREN", "P1_APCTL", "P1_APERREN", "P1_AGPERREN", "P1_ASPRST", "P1_AWSBA0", "P1_AWSBA1", "P1_AWSBA2", "P1_AWSBA3", "P1_AWSM0", "P1_AWSM1", "P1_AWSM2", "P1_AWSM3", "P1_ATBA0", "P1_ATBA1", "P1_ATBA2", "P1_ATBA3", "P1_GPCTL", "P1_GPERREN", "P1_GSPRST", "P1_GWSBA0", "P1_GWSBA1", "P1_GWSBA2", "P1_GWSBA3", "P1_GWSM0", "P1_GWSM1", "P1_GWSM2", "P1_GWSM3", "P1_GTBA0", "P1_GTBA1", "P1_GTBA2", "P1_GTBA3", NULL }; static char *el_titan_memory_extended_annotation[] = { "Subpacket Header", "AAR0", "AAR1", "AAR2", "AAR3", "P0_SCTL", "P0_GPCTL", "P0_APCTL", "P1_SCTL", "P1_GPCTL", "P1_SCTL", NULL }; static struct el_subpacket_annotation el_titan_annotations[] = { SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED, 1, "Titan PChip 0 Extended Frame", el_titan_pchip0_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED, 1, "Titan PChip 1 Extended Frame", el_titan_pchip1_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED, 1, "Titan Memory Extended Frame", el_titan_memory_extended_annotation), SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY, EL_TYPE__TERMINATION__TERMINATION, 1, "Termination Subpacket", NULL) }; static struct el_subpacket * el_process_regatta_subpacket(struct el_subpacket *header) { if (header->class != EL_CLASS__REGATTA_FAMILY) { printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", err_print_prefix, header->class, header->type); return NULL; } switch(header->type) { case EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME: case EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME: case EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME: case EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT: case EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT: printk("%s ** Occurred on CPU %d:\n", err_print_prefix, (int)header->by_type.regatta_frame.cpuid); privateer_process_logout_frame((struct el_common *) header->by_type.regatta_frame.data_start, 1); break; default: printk("%s ** REGATTA TYPE %d SUBPACKET\n", err_print_prefix, header->type); el_annotate_subpacket(header); break; } return (struct el_subpacket *)((unsigned long)header + header->length); } static struct el_subpacket_handler titan_subpacket_handler = SUBPACKET_HANDLER_INIT(EL_CLASS__REGATTA_FAMILY, el_process_regatta_subpacket); void __init titan_register_error_handlers(void) { size_t i; for (i = 0; i < ARRAY_SIZE (el_titan_annotations); i++) cdl_register_subpacket_annotation(&el_titan_annotations[i]); cdl_register_subpacket_handler(&titan_subpacket_handler); ev6_register_error_handlers(); } /* * Privateer */ static int privateer_process_680_frame(struct el_common *mchk_header, int print) { int status = MCHK_DISPOSITION_UNKNOWN_ERROR; #ifdef CONFIG_VERBOSE_MCHECK struct el_PRIVATEER_envdata_mcheck *emchk = (struct el_PRIVATEER_envdata_mcheck *) ((unsigned long)mchk_header + mchk_header->sys_offset); /* TODO - categorize errors, for now, no error */ if (!print) return status; /* TODO - decode instead of just dumping... */ printk("%s Summary Flags: %016llx\n" " CChip DIRx: %016llx\n" " System Management IR: %016llx\n" " CPU IR: %016llx\n" " Power Supply IR: %016llx\n" " LM78 Fault Status: %016llx\n" " System Doors: %016llx\n" " Temperature Warning: %016llx\n" " Fan Control: %016llx\n" " Fatal Power Down Code: %016llx\n", err_print_prefix, emchk->summary, emchk->c_dirx, emchk->smir, emchk->cpuir, emchk->psir, emchk->fault, emchk->sys_doors, emchk->temp_warn, emchk->fan_ctrl, emchk->code); #endif /* CONFIG_VERBOSE_MCHECK */ return status; } int privateer_process_logout_frame(struct el_common *mchk_header, int print) { struct el_common_EV6_mcheck *ev6mchk = (struct el_common_EV6_mcheck *)mchk_header; int status = MCHK_DISPOSITION_UNKNOWN_ERROR; /* * Machine check codes */ #define PRIVATEER_MCHK__CORR_ECC 0x86 /* 630 */ #define PRIVATEER_MCHK__DC_TAG_PERR 0x9E /* 630 */ #define PRIVATEER_MCHK__PAL_BUGCHECK 0x8E /* 670 */ #define PRIVATEER_MCHK__OS_BUGCHECK 0x90 /* 670 */ #define PRIVATEER_MCHK__PROC_HRD_ERR 0x98 /* 670 */ #define PRIVATEER_MCHK__ISTREAM_CMOV_PRX 0xA0 /* 670 */ #define PRIVATEER_MCHK__ISTREAM_CMOV_FLT 0xA2 /* 670 */ #define PRIVATEER_MCHK__SYS_HRD_ERR 0x202 /* 660 */ #define PRIVATEER_MCHK__SYS_CORR_ERR 0x204 /* 620 */ #define PRIVATEER_MCHK__SYS_ENVIRON 0x206 /* 680 */ switch(ev6mchk->MCHK_Code) { /* * Vector 630 - Processor, Correctable */ case PRIVATEER_MCHK__CORR_ECC: case PRIVATEER_MCHK__DC_TAG_PERR: /* * Fall through to vector 670 for processing... */ /* * Vector 670 - Processor, Uncorrectable */ case PRIVATEER_MCHK__PAL_BUGCHECK: case PRIVATEER_MCHK__OS_BUGCHECK: case PRIVATEER_MCHK__PROC_HRD_ERR: case PRIVATEER_MCHK__ISTREAM_CMOV_PRX: case PRIVATEER_MCHK__ISTREAM_CMOV_FLT: status |= ev6_process_logout_frame(mchk_header, print); break; /* * Vector 620 - System, Correctable */ case PRIVATEER_MCHK__SYS_CORR_ERR: /* * Fall through to vector 660 for processing... */ /* * Vector 660 - System, Uncorrectable */ case PRIVATEER_MCHK__SYS_HRD_ERR: status |= titan_process_logout_frame(mchk_header, print); break; /* * Vector 680 - System, Environmental */ case PRIVATEER_MCHK__SYS_ENVIRON: /* System, Environmental */ status |= privateer_process_680_frame(mchk_header, print); break; /* * Unknown */ default: status |= MCHK_DISPOSITION_REPORT; if (print) { printk("%s** Unknown Error, frame follows\n", err_print_prefix); mchk_dump_logout_frame(mchk_header); } } return status; } void privateer_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_common *mchk_header = (struct el_common *)la_ptr; struct el_TITAN_sysdata_mcheck *tmchk = (struct el_TITAN_sysdata_mcheck *) (la_ptr + mchk_header->sys_offset); u64 irqmask; char *saved_err_prefix = err_print_prefix; #define PRIVATEER_680_INTERRUPT_MASK (0xE00UL) #define PRIVATEER_HOTPLUG_INTERRUPT_MASK (0xE00UL) /* * Sync the processor. */ mb(); draina(); /* * Only handle system events here. */ if (vector != SCB_Q_SYSEVENT) return titan_machine_check(vector, la_ptr); /* * Report the event - System Events should be reported even if no * error is indicated since the event could indicate the return * to normal status. */ err_print_prefix = KERN_CRIT; printk("%s*System Event (Vector 0x%x) reported on CPU %d:\n", err_print_prefix, (unsigned int)vector, (int)smp_processor_id()); privateer_process_680_frame(mchk_header, 1); err_print_prefix = saved_err_prefix; /* * Convert any pending interrupts which report as 680 machine * checks to interrupts. */ irqmask = tmchk->c_dirx & PRIVATEER_680_INTERRUPT_MASK; /* * Dispatch the interrupt(s). */ titan_dispatch_irqs(irqmask); /* * Release the logout frame. */ wrmces(0x7); mb(); }
linux-master
arch/alpha/kernel/err_titan.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_mikasa.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999 Richard Henderson * * Code supporting the MIKASA (AlphaServer 1000). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/mce.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_apecs.h> #include <asm/core_cia.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" /* Note mask bit is true for ENABLED irqs. */ static int cached_irq_mask; static inline void mikasa_update_irq_hw(int mask) { outw(mask, 0x536); } static inline void mikasa_enable_irq(struct irq_data *d) { mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16)); } static void mikasa_disable_irq(struct irq_data *d) { mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16))); } static struct irq_chip mikasa_irq_type = { .name = "MIKASA", .irq_unmask = mikasa_enable_irq, .irq_mask = mikasa_disable_irq, .irq_mask_ack = mikasa_disable_irq, }; static void mikasa_device_interrupt(unsigned long vector) { unsigned long pld; unsigned int i; /* Read the interrupt summary registers */ pld = (((~inw(0x534) & 0x0000ffffUL) << 16) | (((unsigned long) inb(0xa0)) << 8) | inb(0x20)); /* * Now for every possible bit set, work through them and call * the appropriate interrupt handler. */ while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ if (i < 16) { isa_device_interrupt(vector); } else { handle_irq(i); } } } static void __init mikasa_init_irq(void) { long i; if (alpha_using_srm) alpha_mv.device_interrupt = srm_device_interrupt; mikasa_update_irq_hw(0); for (i = 16; i < 32; ++i) { irq_set_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } init_i8259a_irqs(); common_init_isa_dma(); } /* * PCI Fixup configuration. * * Summary @ 0x536: * Bit Meaning * 0 Interrupt Line A from slot 0 * 1 Interrupt Line B from slot 0 * 2 Interrupt Line C from slot 0 * 3 Interrupt Line D from slot 0 * 4 Interrupt Line A from slot 1 * 5 Interrupt line B from slot 1 * 6 Interrupt Line C from slot 1 * 7 Interrupt Line D from slot 1 * 8 Interrupt Line A from slot 2 * 9 Interrupt Line B from slot 2 *10 Interrupt Line C from slot 2 *11 Interrupt Line D from slot 2 *12 NCR 810 SCSI *13 Power Supply Fail *14 Temperature Warn *15 Reserved * * The device to slot mapping looks like: * * Slot Device * 6 NCR SCSI controller * 7 Intel PCI-EISA bridge chip * 11 PCI on board slot 0 * 12 PCI on board slot 1 * 13 PCI on board slot 2 * * * This two layered interrupt approach means that we allocate IRQ 16 and * above for PCI interrupts. The IRQ relates to which bit the interrupt * comes in on. This makes interrupt processing much easier. */ static int mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[8][5] = { /*INT INTA INTB INTC INTD */ {16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */ { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */ { -1, -1, -1, -1, -1}, /* IdSel 19, ???? */ { -1, -1, -1, -1, -1}, /* IdSel 20, ???? */ { -1, -1, -1, -1, -1}, /* IdSel 21, ???? */ { 16+0, 16+0, 16+1, 16+2, 16+3}, /* IdSel 22, slot 0 */ { 16+4, 16+4, 16+5, 16+6, 16+7}, /* IdSel 23, slot 1 */ { 16+8, 16+8, 16+9, 16+10, 16+11}, /* IdSel 24, slot 2 */ }; const long min_idsel = 6, max_idsel = 13, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } #if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) static void mikasa_apecs_machine_check(unsigned long vector, unsigned long la_ptr) { #define MCHK_NO_DEVSEL 0x205U #define MCHK_NO_TABT 0x204U struct el_common *mchk_header; unsigned int code; mchk_header = (struct el_common *)la_ptr; /* Clear the error before any reporting. */ mb(); mb(); /* magic */ draina(); apecs_pci_clr_err(); wrmces(0x7); mb(); code = mchk_header->code; process_mcheck_info(vector, la_ptr, "MIKASA APECS", (mcheck_expected(0) && (code == MCHK_NO_DEVSEL || code == MCHK_NO_TABT))); } #endif /* * The System Vector */ #if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_PRIMO) struct alpha_machine_vector mikasa_mv __initmv = { .vector_name = "Mikasa", DO_EV4_MMU, DO_DEFAULT_RTC, DO_APECS_IO, .machine_check = mikasa_apecs_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 32, .device_interrupt = mikasa_device_interrupt, .init_arch = apecs_init_arch, .init_irq = mikasa_init_irq, .init_rtc = common_init_rtc, .init_pci = common_init_pci, .pci_map_irq = mikasa_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(mikasa) #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PRIMO) struct alpha_machine_vector mikasa_primo_mv __initmv = { .vector_name = "Mikasa-Primo", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 32, .device_interrupt = mikasa_device_interrupt, .init_arch = cia_init_arch, .init_irq = mikasa_init_irq, .init_rtc = common_init_rtc, .init_pci = cia_init_pci, .kill_arch = cia_kill_arch, .pci_map_irq = mikasa_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(mikasa_primo) #endif
linux-master
arch/alpha/kernel/sys_mikasa.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/pci_iommu.c */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/gfp.h> #include <linux/memblock.h> #include <linux/export.h> #include <linux/scatterlist.h> #include <linux/log2.h> #include <linux/dma-map-ops.h> #include <linux/iommu-helper.h> #include <asm/io.h> #include <asm/hwrpb.h> #include "proto.h" #include "pci_impl.h" #define DEBUG_ALLOC 0 #if DEBUG_ALLOC > 0 # define DBGA(args...) printk(KERN_DEBUG args) #else # define DBGA(args...) #endif #if DEBUG_ALLOC > 1 # define DBGA2(args...) printk(KERN_DEBUG args) #else # define DBGA2(args...) #endif #define DEBUG_NODIRECT 0 #define ISA_DMA_MASK 0x00ffffff static inline unsigned long mk_iommu_pte(unsigned long paddr) { return (paddr >> (PAGE_SHIFT-1)) | 1; } /* Return the minimum of MAX or the first power of two larger than main memory. */ unsigned long size_for_memory(unsigned long max) { unsigned long mem = max_low_pfn << PAGE_SHIFT; if (mem < max) max = roundup_pow_of_two(mem); return max; } struct pci_iommu_arena * __init iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, unsigned long window_size, unsigned long align) { unsigned long mem_size; struct pci_iommu_arena *arena; mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long)); /* Note that the TLB lookup logic uses bitwise concatenation, not addition, so the required arena alignment is based on the size of the window. Retain the align parameter so that particular systems can over-align the arena. */ if (align < mem_size) align = mem_size; arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); if (!arena) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(*arena)); arena->ptes = memblock_alloc(mem_size, align); if (!arena->ptes) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, mem_size, align); spin_lock_init(&arena->lock); arena->hose = hose; arena->dma_base = base; arena->size = window_size; arena->next_entry = 0; /* Align allocations to a multiple of a page size. Not needed unless there are chip bugs. */ arena->align_entry = 1; return arena; } struct pci_iommu_arena * __init iommu_arena_new(struct pci_controller *hose, dma_addr_t base, unsigned long window_size, unsigned long align) { return iommu_arena_new_node(0, hose, base, window_size, align); } /* Must be called with the arena lock held */ static long iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena, long n, long mask) { unsigned long *ptes; long i, p, nent; int pass = 0; unsigned long base; unsigned long boundary_size; base = arena->dma_base >> PAGE_SHIFT; boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT); /* Search forward for the first mask-aligned sequence of N free ptes */ ptes = arena->ptes; nent = arena->size >> PAGE_SHIFT; p = ALIGN(arena->next_entry, mask + 1); i = 0; again: while (i < n && p+i < nent) { if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) { p = ALIGN(p + 1, mask + 1); goto again; } if (ptes[p+i]) { p = ALIGN(p + i + 1, mask + 1); i = 0; } else { i = i + 1; } } if (i < n) { if (pass < 1) { /* * Reached the end. Flush the TLB and restart * the search from the beginning. */ alpha_mv.mv_pci_tbi(arena->hose, 0, -1); pass++; p = 0; i = 0; goto again; } else return -1; } /* Success. It's the responsibility of the caller to mark them in use before releasing the lock */ return p; } static long iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n, unsigned int align) { unsigned long flags; unsigned long *ptes; long i, p, mask; spin_lock_irqsave(&arena->lock, flags); /* Search for N empty ptes */ ptes = arena->ptes; mask = max(align, arena->align_entry) - 1; p = iommu_arena_find_pages(dev, arena, n, mask); if (p < 0) { spin_unlock_irqrestore(&arena->lock, flags); return -1; } /* Success. Mark them all in use, ie not zero and invalid for the iommu tlb that could load them from under us. The chip specific bits will fill this in with something kosher when we return. */ for (i = 0; i < n; ++i) ptes[p+i] = IOMMU_INVALID_PTE; arena->next_entry = p + n; spin_unlock_irqrestore(&arena->lock, flags); return p; } static void iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) { unsigned long *p; long i; p = arena->ptes + ofs; for (i = 0; i < n; ++i) p[i] = 0; } /* * True if the machine supports DAC addressing, and DEV can * make use of it given MASK. */ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask) { dma_addr_t dac_offset = alpha_mv.pci_dac_offset; int ok = 1; /* If this is not set, the machine doesn't support DAC at all. */ if (dac_offset == 0) ok = 0; /* The device has to be able to address our DAC bit. */ if ((dac_offset & dev->dma_mask) != dac_offset) ok = 0; /* If both conditions above are met, we are fine. */ DBGA("pci_dac_dma_supported %s from %ps\n", ok ? "yes" : "no", __builtin_return_address(0)); return ok; } /* Map a single buffer of the indicated size for PCI DMA in streaming mode. The 32-bit PCI bus mastering address to use is returned. Once the device is given the dma address, the device owns this memory until either pci_unmap_single or pci_dma_sync_single is performed. */ static dma_addr_t pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, int dac_allowed) { struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; struct pci_iommu_arena *arena; long npages, dma_ofs, i; unsigned long paddr; dma_addr_t ret; unsigned int align = 0; struct device *dev = pdev ? &pdev->dev : NULL; paddr = __pa(cpu_addr); #if !DEBUG_NODIRECT /* First check to see if we can use the direct map window. */ if (paddr + size + __direct_map_base - 1 <= max_dma && paddr + size <= __direct_map_size) { ret = paddr + __direct_map_base; DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n", cpu_addr, size, ret, __builtin_return_address(0)); return ret; } #endif /* Next, use DAC if selected earlier. */ if (dac_allowed) { ret = paddr + alpha_mv.pci_dac_offset; DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n", cpu_addr, size, ret, __builtin_return_address(0)); return ret; } /* If the machine doesn't define a pci_tbi routine, we have to assume it doesn't support sg mapping, and, since we tried to use direct_map above, it now must be considered an error. */ if (! alpha_mv.mv_pci_tbi) { printk_once(KERN_WARNING "pci_map_single: no HW sg\n"); return DMA_MAPPING_ERROR; } arena = hose->sg_pci; if (!arena || arena->dma_base + arena->size - 1 > max_dma) arena = hose->sg_isa; npages = iommu_num_pages(paddr, size, PAGE_SIZE); /* Force allocation to 64KB boundary for ISA bridges. */ if (pdev && pdev == isa_bridge) align = 8; dma_ofs = iommu_arena_alloc(dev, arena, npages, align); if (dma_ofs < 0) { printk(KERN_WARNING "pci_map_single failed: " "could not allocate dma page tables\n"); return DMA_MAPPING_ERROR; } paddr &= PAGE_MASK; for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); ret = arena->dma_base + dma_ofs * PAGE_SIZE; ret += (unsigned long)cpu_addr & ~PAGE_MASK; DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n", cpu_addr, size, npages, ret, __builtin_return_address(0)); return ret; } /* Helper for generic DMA-mapping functions. */ static struct pci_dev *alpha_gendev_to_pci(struct device *dev) { if (dev && dev_is_pci(dev)) return to_pci_dev(dev); /* Assume that non-PCI devices asking for DMA are either ISA or EISA, BUG() otherwise. */ BUG_ON(!isa_bridge); /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA bridge is bus master then). */ if (!dev || !dev->dma_mask || !*dev->dma_mask) return isa_bridge; /* For EISA bus masters, return isa_bridge (it might have smaller dma_mask due to wiring limitations). */ if (*dev->dma_mask >= isa_bridge->dma_mask) return isa_bridge; /* This assumes ISA bus master with dma_mask 0xffffff. */ return NULL; } static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); int dac_allowed; BUG_ON(dir == DMA_NONE); dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; return pci_map_single_1(pdev, (char *)page_address(page) + offset, size, dac_allowed); } /* Unmap a single streaming mode DMA translation. The DMA_ADDR and SIZE must match what was provided for in a previous pci_map_single call. All other usages are undefined. After this call, reads by the cpu to the buffer are guaranteed to see whatever the device wrote there. */ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { unsigned long flags; struct pci_dev *pdev = alpha_gendev_to_pci(dev); struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; struct pci_iommu_arena *arena; long dma_ofs, npages; BUG_ON(dir == DMA_NONE); if (dma_addr >= __direct_map_base && dma_addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n", dma_addr, size, __builtin_return_address(0)); return; } if (dma_addr > 0xffffffff) { DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n", dma_addr, size, __builtin_return_address(0)); return; } arena = hose->sg_pci; if (!arena || dma_addr < arena->dma_base) arena = hose->sg_isa; dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; if (dma_ofs * PAGE_SIZE >= arena->size) { printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx " " base %llx size %x\n", dma_addr, arena->dma_base, arena->size); return; BUG(); } npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); spin_lock_irqsave(&arena->lock, flags); iommu_arena_free(arena, dma_ofs, npages); /* If we're freeing ptes above the `next_entry' pointer (they may have snuck back into the TLB since the last wrap flush), we need to flush the TLB before reallocating the latter. */ if (dma_ofs >= arena->next_entry) alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1); spin_unlock_irqrestore(&arena->lock, flags); DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n", dma_addr, size, npages, __builtin_return_address(0)); } /* Allocate and map kernel buffer using consistent mode DMA for PCI device. Returns non-NULL cpu-view pointer to the buffer if successful and sets *DMA_ADDRP to the pci side dma address as well, else DMA_ADDRP is undefined. */ static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp, unsigned long attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); void *cpu_addr; long order = get_order(size); gfp &= ~GFP_DMA; try_again: cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order); if (! cpu_addr) { printk(KERN_INFO "pci_alloc_consistent: " "get_free_pages failed from %ps\n", __builtin_return_address(0)); /* ??? Really atomic allocation? Otherwise we could play with vmalloc and sg if we can't find contiguous memory. */ return NULL; } memset(cpu_addr, 0, size); *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0); if (*dma_addrp == DMA_MAPPING_ERROR) { free_pages((unsigned long)cpu_addr, order); if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA)) return NULL; /* The address doesn't fit required mask and we do not have iommu. Try again with GFP_DMA. */ gfp |= GFP_DMA; goto try_again; } DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n", size, cpu_addr, *dma_addrp, __builtin_return_address(0)); return cpu_addr; } /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must be values that were returned from pci_alloc_consistent. SIZE must be the same as what as passed into pci_alloc_consistent. References to the memory and mappings associated with CPU_ADDR or DMA_ADDR past this call are illegal. */ static void alpha_pci_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); dma_unmap_single(&pdev->dev, dma_addr, size, DMA_BIDIRECTIONAL); free_pages((unsigned long)cpu_addr, get_order(size)); DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n", dma_addr, size, __builtin_return_address(0)); } /* Classify the elements of the scatterlist. Write dma_address of each element with: 0 : Followers all physically adjacent. 1 : Followers all virtually adjacent. -1 : Not leader, physically adjacent to previous. -2 : Not leader, virtually adjacent to previous. Write dma_length of each leader with the combined lengths of the mergable followers. */ #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG))) #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG)) static void sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end, int virt_ok) { unsigned long next_paddr; struct scatterlist *leader; long leader_flag, leader_length; unsigned int max_seg_size; leader = sg; leader_flag = 0; leader_length = leader->length; next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length; /* we will not marge sg without device. */ max_seg_size = dev ? dma_get_max_seg_size(dev) : 0; for (++sg; sg < end; ++sg) { unsigned long addr, len; addr = SG_ENT_PHYS_ADDRESS(sg); len = sg->length; if (leader_length + len > max_seg_size) goto new_segment; if (next_paddr == addr) { sg->dma_address = -1; leader_length += len; } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) { sg->dma_address = -2; leader_flag = 1; leader_length += len; } else { new_segment: leader->dma_address = leader_flag; leader->dma_length = leader_length; leader = sg; leader_flag = 0; leader_length = len; } next_paddr = addr + len; } leader->dma_address = leader_flag; leader->dma_length = leader_length; } /* Given a scatterlist leader, choose an allocation method and fill in the blanks. */ static int sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, struct scatterlist *out, struct pci_iommu_arena *arena, dma_addr_t max_dma, int dac_allowed) { unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader); long size = leader->dma_length; struct scatterlist *sg; unsigned long *ptes; long npages, dma_ofs, i; #if !DEBUG_NODIRECT /* If everything is physically contiguous, and the addresses fall into the direct-map window, use it. */ if (leader->dma_address == 0 && paddr + size + __direct_map_base - 1 <= max_dma && paddr + size <= __direct_map_size) { out->dma_address = paddr + __direct_map_base; out->dma_length = size; DBGA(" sg_fill: [%p,%lx] -> direct %llx\n", __va(paddr), size, out->dma_address); return 0; } #endif /* If physically contiguous and DAC is available, use it. */ if (leader->dma_address == 0 && dac_allowed) { out->dma_address = paddr + alpha_mv.pci_dac_offset; out->dma_length = size; DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n", __va(paddr), size, out->dma_address); return 0; } /* Otherwise, we'll use the iommu to make the pages virtually contiguous. */ paddr &= ~PAGE_MASK; npages = iommu_num_pages(paddr, size, PAGE_SIZE); dma_ofs = iommu_arena_alloc(dev, arena, npages, 0); if (dma_ofs < 0) { /* If we attempted a direct map above but failed, die. */ if (leader->dma_address == 0) return -1; /* Otherwise, break up the remaining virtually contiguous hunks into individual direct maps and retry. */ sg_classify(dev, leader, end, 0); return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed); } out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; out->dma_length = size; DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n", __va(paddr), size, out->dma_address, npages); /* All virtually contiguous. We need to find the length of each physically contiguous subsegment to fill in the ptes. */ ptes = &arena->ptes[dma_ofs]; sg = leader; do { #if DEBUG_ALLOC > 0 struct scatterlist *last_sg = sg; #endif size = sg->length; paddr = SG_ENT_PHYS_ADDRESS(sg); while (sg+1 < end && (int) sg[1].dma_address == -1) { size += sg[1].length; sg = sg_next(sg); } npages = iommu_num_pages(paddr, size, PAGE_SIZE); paddr &= PAGE_MASK; for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) *ptes++ = mk_iommu_pte(paddr); #if DEBUG_ALLOC > 0 DBGA(" (%ld) [%p,%x] np %ld\n", last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), last_sg->length, npages); while (++last_sg <= sg) { DBGA(" (%ld) [%p,%x] cont\n", last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), last_sg->length); } #endif } while (++sg < end && (int) sg->dma_address < 0); return 1; } static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); struct scatterlist *start, *end, *out; struct pci_controller *hose; struct pci_iommu_arena *arena; dma_addr_t max_dma; int dac_allowed; BUG_ON(dir == DMA_NONE); dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; /* Fast path single entry scatterlists. */ if (nents == 1) { sg->dma_length = sg->length; sg->dma_address = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), sg->length, dac_allowed); if (sg->dma_address == DMA_MAPPING_ERROR) return -EIO; return 1; } start = sg; end = sg + nents; /* First, prepare information about the entries. */ sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0); /* Second, figure out where we're going to map things. */ if (alpha_mv.mv_pci_tbi) { hose = pdev ? pdev->sysdata : pci_isa_hose; max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; arena = hose->sg_pci; if (!arena || arena->dma_base + arena->size - 1 > max_dma) arena = hose->sg_isa; } else { max_dma = -1; arena = NULL; hose = NULL; } /* Third, iterate over the scatterlist leaders and allocate dma space as needed. */ for (out = sg; sg < end; ++sg) { if ((int) sg->dma_address < 0) continue; if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0) goto error; out++; } /* Mark the end of the list for pci_unmap_sg. */ if (out < end) out->dma_length = 0; if (out - start == 0) { printk(KERN_WARNING "pci_map_sg failed: no entries?\n"); return -ENOMEM; } DBGA("pci_map_sg: %ld entries\n", out - start); return out - start; error: printk(KERN_WARNING "pci_map_sg failed: " "could not allocate dma page tables\n"); /* Some allocation failed while mapping the scatterlist entries. Unmap them now. */ if (out > start) dma_unmap_sg(&pdev->dev, start, out - start, dir); return -ENOMEM; } /* Unmap a set of streaming mode DMA translations. Again, cpu read rules concerning calls here are the same as for pci_unmap_single() above. */ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); unsigned long flags; struct pci_controller *hose; struct pci_iommu_arena *arena; struct scatterlist *end; dma_addr_t max_dma; dma_addr_t fbeg, fend; BUG_ON(dir == DMA_NONE); if (! alpha_mv.mv_pci_tbi) return; hose = pdev ? pdev->sysdata : pci_isa_hose; max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; arena = hose->sg_pci; if (!arena || arena->dma_base + arena->size - 1 > max_dma) arena = hose->sg_isa; fbeg = -1, fend = 0; spin_lock_irqsave(&arena->lock, flags); for (end = sg + nents; sg < end; ++sg) { dma_addr_t addr; size_t size; long npages, ofs; dma_addr_t tend; addr = sg->dma_address; size = sg->dma_length; if (!size) break; if (addr > 0xffffffff) { /* It's a DAC address -- nothing to do. */ DBGA(" (%ld) DAC [%llx,%zx]\n", sg - end + nents, addr, size); continue; } if (addr >= __direct_map_base && addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ DBGA(" (%ld) direct [%llx,%zx]\n", sg - end + nents, addr, size); continue; } DBGA(" (%ld) sg [%llx,%zx]\n", sg - end + nents, addr, size); npages = iommu_num_pages(addr, size, PAGE_SIZE); ofs = (addr - arena->dma_base) >> PAGE_SHIFT; iommu_arena_free(arena, ofs, npages); tend = addr + size - 1; if (fbeg > addr) fbeg = addr; if (fend < tend) fend = tend; } /* If we're freeing ptes above the `next_entry' pointer (they may have snuck back into the TLB since the last wrap flush), we need to flush the TLB before reallocating the latter. */ if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry) alpha_mv.mv_pci_tbi(hose, fbeg, fend); spin_unlock_irqrestore(&arena->lock, flags); DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); } /* Return whether the given PCI device DMA address mask can be supported properly. */ static int alpha_pci_supported(struct device *dev, u64 mask) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); struct pci_controller *hose; struct pci_iommu_arena *arena; /* If there exists a direct map, and the mask fits either the entire direct mapped space or the total system memory as shifted by the map base */ if (__direct_map_size != 0 && (__direct_map_base + __direct_map_size - 1 <= mask || __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask)) return 1; /* Check that we have a scatter-gather arena that fits. */ hose = pdev ? pdev->sysdata : pci_isa_hose; arena = hose->sg_isa; if (arena && arena->dma_base + arena->size - 1 <= mask) return 1; arena = hose->sg_pci; if (arena && arena->dma_base + arena->size - 1 <= mask) return 1; /* As last resort try ZONE_DMA. */ if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask) return 1; return 0; } /* * AGP GART extensions to the IOMMU */ int iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) { unsigned long flags; unsigned long *ptes; long i, p; if (!arena) return -EINVAL; spin_lock_irqsave(&arena->lock, flags); /* Search for N empty ptes. */ ptes = arena->ptes; p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask); if (p < 0) { spin_unlock_irqrestore(&arena->lock, flags); return -1; } /* Success. Mark them all reserved (ie not zero and invalid) for the iommu tlb that could load them from under us. They will be filled in with valid bits by _bind() */ for (i = 0; i < pg_count; ++i) ptes[p+i] = IOMMU_RESERVED_PTE; arena->next_entry = p + pg_count; spin_unlock_irqrestore(&arena->lock, flags); return p; } int iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) { unsigned long *ptes; long i; if (!arena) return -EINVAL; ptes = arena->ptes; /* Make sure they're all reserved first... */ for(i = pg_start; i < pg_start + pg_count; i++) if (ptes[i] != IOMMU_RESERVED_PTE) return -EBUSY; iommu_arena_free(arena, pg_start, pg_count); return 0; } int iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, struct page **pages) { unsigned long flags; unsigned long *ptes; long i, j; if (!arena) return -EINVAL; spin_lock_irqsave(&arena->lock, flags); ptes = arena->ptes; for(j = pg_start; j < pg_start + pg_count; j++) { if (ptes[j] != IOMMU_RESERVED_PTE) { spin_unlock_irqrestore(&arena->lock, flags); return -EBUSY; } } for(i = 0, j = pg_start; i < pg_count; i++, j++) ptes[j] = mk_iommu_pte(page_to_phys(pages[i])); spin_unlock_irqrestore(&arena->lock, flags); return 0; } int iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) { unsigned long *p; long i; if (!arena) return -EINVAL; p = arena->ptes + pg_start; for(i = 0; i < pg_count; i++) p[i] = IOMMU_RESERVED_PTE; return 0; } const struct dma_map_ops alpha_pci_ops = { .alloc = alpha_pci_alloc_coherent, .free = alpha_pci_free_coherent, .map_page = alpha_pci_map_page, .unmap_page = alpha_pci_unmap_page, .map_sg = alpha_pci_map_sg, .unmap_sg = alpha_pci_unmap_sg, .dma_supported = alpha_pci_supported, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, .alloc_pages = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; EXPORT_SYMBOL(alpha_pci_ops);
linux-master
arch/alpha/kernel/pci_iommu.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/smp.c * * 2001-07-09 Phil Ezolt ([email protected]) * Renamed modified smp_call_function to smp_call_function_on_cpu() * Created an function that conforms to the old calling convention * of smp_call_function(). * * This is helpful for DCPI. * */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/sched/mm.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/threads.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <linux/cache.h> #include <linux/profile.h> #include <linux/bitops.h> #include <linux/cpu.h> #include <asm/hwrpb.h> #include <asm/ptrace.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #define DEBUG_SMP 0 #if DEBUG_SMP #define DBGS(args) printk args #else #define DBGS(args) #endif /* A collection of per-processor data. */ struct cpuinfo_alpha cpu_data[NR_CPUS]; EXPORT_SYMBOL(cpu_data); /* A collection of single bit ipi messages. */ static struct { unsigned long bits ____cacheline_aligned; } ipi_data[NR_CPUS] __cacheline_aligned; enum ipi_message_type { IPI_RESCHEDULE, IPI_CALL_FUNC, IPI_CPU_STOP, }; /* Set to a secondary's cpuid when it comes online. */ static int smp_secondary_alive = 0; int smp_num_probed; /* Internal processor count */ int smp_num_cpus = 1; /* Number that came online. */ EXPORT_SYMBOL(smp_num_cpus); /* * Called by both boot and secondaries to move global data into * per-processor storage. */ static inline void __init smp_store_cpu_info(int cpuid) { cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy; cpu_data[cpuid].last_asn = ASN_FIRST_VERSION; cpu_data[cpuid].need_new_asn = 0; cpu_data[cpuid].asn_lock = 0; } /* * Ideally sets up per-cpu profiling hooks. Doesn't do much now... */ static inline void __init smp_setup_percpu_timer(int cpuid) { cpu_data[cpuid].prof_counter = 1; cpu_data[cpuid].prof_multiplier = 1; } static void __init wait_boot_cpu_to_stop(int cpuid) { unsigned long stop = jiffies + 10*HZ; while (time_before(jiffies, stop)) { if (!smp_secondary_alive) return; barrier(); } printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid); for (;;) barrier(); } /* * Where secondaries begin a life of C. */ void __init smp_callin(void) { int cpuid = hard_smp_processor_id(); if (cpu_online(cpuid)) { printk("??, cpu 0x%x already present??\n", cpuid); BUG(); } set_cpu_online(cpuid, true); /* Turn on machine checks. */ wrmces(7); /* Set trap vectors. */ trap_init(); /* Set interrupt vector. */ wrent(entInt, 0); /* Get our local ticker going. */ smp_setup_percpu_timer(cpuid); init_clockevent(); /* Call platform-specific callin, if specified */ if (alpha_mv.smp_callin) alpha_mv.smp_callin(); /* All kernel threads share the same mm context. */ mmgrab(&init_mm); current->active_mm = &init_mm; /* inform the notifiers about the new cpu */ notify_cpu_starting(cpuid); /* Must have completely accurate bogos. */ local_irq_enable(); /* Wait boot CPU to stop with irq enabled before running calibrate_delay. */ wait_boot_cpu_to_stop(cpuid); mb(); calibrate_delay(); smp_store_cpu_info(cpuid); /* Allow master to continue only after we written loops_per_jiffy. */ wmb(); smp_secondary_alive = 1; DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n", cpuid, current, current->active_mm)); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */ static int wait_for_txrdy (unsigned long cpumask) { unsigned long timeout; if (!(hwrpb->txrdy & cpumask)) return 0; timeout = jiffies + 10*HZ; while (time_before(jiffies, timeout)) { if (!(hwrpb->txrdy & cpumask)) return 0; udelay(10); barrier(); } return -1; } /* * Send a message to a secondary's console. "START" is one such * interesting message. ;-) */ static void send_secondary_console_msg(char *str, int cpuid) { struct percpu_struct *cpu; register char *cp1, *cp2; unsigned long cpumask; size_t len; cpu = (struct percpu_struct *) ((char*)hwrpb + hwrpb->processor_offset + cpuid * hwrpb->processor_size); cpumask = (1UL << cpuid); if (wait_for_txrdy(cpumask)) goto timeout; cp2 = str; len = strlen(cp2); *(unsigned int *)&cpu->ipc_buffer[0] = len; cp1 = (char *) &cpu->ipc_buffer[1]; memcpy(cp1, cp2, len); /* atomic test and set */ wmb(); set_bit(cpuid, &hwrpb->rxrdy); if (wait_for_txrdy(cpumask)) goto timeout; return; timeout: printk("Processor %x not ready\n", cpuid); } /* * A secondary console wants to send a message. Receive it. */ static void recv_secondary_console_msg(void) { int mycpu, i, cnt; unsigned long txrdy = hwrpb->txrdy; char *cp1, *cp2, buf[80]; struct percpu_struct *cpu; DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy)); mycpu = hard_smp_processor_id(); for (i = 0; i < NR_CPUS; i++) { if (!(txrdy & (1UL << i))) continue; DBGS(("recv_secondary_console_msg: " "TXRDY contains CPU %d.\n", i)); cpu = (struct percpu_struct *) ((char*)hwrpb + hwrpb->processor_offset + i * hwrpb->processor_size); DBGS(("recv_secondary_console_msg: on %d from %d" " HALT_REASON 0x%lx FLAGS 0x%lx\n", mycpu, i, cpu->halt_reason, cpu->flags)); cnt = cpu->ipc_buffer[0] >> 32; if (cnt <= 0 || cnt >= 80) strcpy(buf, "<<< BOGUS MSG >>>"); else { cp1 = (char *) &cpu->ipc_buffer[1]; cp2 = buf; memcpy(cp2, cp1, cnt); cp2[cnt] = '\0'; while ((cp2 = strchr(cp2, '\r')) != 0) { *cp2 = ' '; if (cp2[1] == '\n') cp2[1] = ' '; } } DBGS((KERN_INFO "recv_secondary_console_msg: on %d " "message is '%s'\n", mycpu, buf)); } hwrpb->txrdy = 0; } /* * Convince the console to have a secondary cpu begin execution. */ static int secondary_cpu_start(int cpuid, struct task_struct *idle) { struct percpu_struct *cpu; struct pcb_struct *hwpcb, *ipcb; unsigned long timeout; cpu = (struct percpu_struct *) ((char*)hwrpb + hwrpb->processor_offset + cpuid * hwrpb->processor_size); hwpcb = (struct pcb_struct *) cpu->hwpcb; ipcb = &task_thread_info(idle)->pcb; /* Initialize the CPU's HWPCB to something just good enough for us to get started. Immediately after starting, we'll swpctx to the target idle task's pcb. Reuse the stack in the mean time. Precalculate the target PCBB. */ hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16; hwpcb->usp = 0; hwpcb->ptbr = ipcb->ptbr; hwpcb->pcc = 0; hwpcb->asn = 0; hwpcb->unique = virt_to_phys(ipcb); hwpcb->flags = ipcb->flags; hwpcb->res1 = hwpcb->res2 = 0; #if 0 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n", hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique)); #endif DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n", cpuid, idle->state, ipcb->flags)); /* Setup HWRPB fields that SRM uses to activate secondary CPU */ hwrpb->CPU_restart = __smp_callin; hwrpb->CPU_restart_data = (unsigned long) __smp_callin; /* Recalculate and update the HWRPB checksum */ hwrpb_update_checksum(hwrpb); /* * Send a "start" command to the specified processor. */ /* SRM III 3.4.1.3 */ cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */ cpu->flags &= ~1; /* turn off Bootstrap In Progress */ wmb(); send_secondary_console_msg("START\r\n", cpuid); /* Wait 10 seconds for an ACK from the console. */ timeout = jiffies + 10*HZ; while (time_before(jiffies, timeout)) { if (cpu->flags & 1) goto started; udelay(10); barrier(); } printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid); return -1; started: DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid)); return 0; } /* * Bring one cpu online. */ static int smp_boot_one_cpu(int cpuid, struct task_struct *idle) { unsigned long timeout; /* Signal the secondary to wait a moment. */ smp_secondary_alive = -1; /* Whirrr, whirrr, whirrrrrrrrr... */ if (secondary_cpu_start(cpuid, idle)) return -1; /* Notify the secondary CPU it can run calibrate_delay. */ mb(); smp_secondary_alive = 0; /* We've been acked by the console; wait one second for the task to start up for real. */ timeout = jiffies + 1*HZ; while (time_before(jiffies, timeout)) { if (smp_secondary_alive == 1) goto alive; udelay(10); barrier(); } /* We failed to boot the CPU. */ printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid); return -1; alive: /* Another "Red Snapper". */ return 0; } /* * Called from setup_arch. Detect an SMP system and which processors * are present. */ void __init setup_smp(void) { struct percpu_struct *cpubase, *cpu; unsigned long i; if (boot_cpuid != 0) { printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n", boot_cpuid); } if (hwrpb->nr_processors > 1) { int boot_cpu_palrev; DBGS(("setup_smp: nr_processors %ld\n", hwrpb->nr_processors)); cpubase = (struct percpu_struct *) ((char*)hwrpb + hwrpb->processor_offset); boot_cpu_palrev = cpubase->pal_revision; for (i = 0; i < hwrpb->nr_processors; i++) { cpu = (struct percpu_struct *) ((char *)cpubase + i*hwrpb->processor_size); if ((cpu->flags & 0x1cc) == 0x1cc) { smp_num_probed++; set_cpu_possible(i, true); set_cpu_present(i, true); cpu->pal_revision = boot_cpu_palrev; } DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n", i, cpu->flags, cpu->type)); DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n", i, cpu->pal_revision)); } } else { smp_num_probed = 1; } printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n", smp_num_probed, cpumask_bits(cpu_present_mask)[0]); } /* * Called by smp_init prepare the secondaries */ void __init smp_prepare_cpus(unsigned int max_cpus) { /* Take care of some initial bookkeeping. */ memset(ipi_data, 0, sizeof(ipi_data)); current_thread_info()->cpu = boot_cpuid; smp_store_cpu_info(boot_cpuid); smp_setup_percpu_timer(boot_cpuid); /* Nothing to do on a UP box, or when told not to. */ if (smp_num_probed == 1 || max_cpus == 0) { init_cpu_possible(cpumask_of(boot_cpuid)); init_cpu_present(cpumask_of(boot_cpuid)); printk(KERN_INFO "SMP mode deactivated.\n"); return; } printk(KERN_INFO "SMP starting up secondaries.\n"); smp_num_cpus = smp_num_probed; } void smp_prepare_boot_cpu(void) { } int __cpu_up(unsigned int cpu, struct task_struct *tidle) { smp_boot_one_cpu(cpu, tidle); return cpu_online(cpu) ? 0 : -ENOSYS; } void __init smp_cpus_done(unsigned int max_cpus) { int cpu; unsigned long bogosum = 0; for(cpu = 0; cpu < NR_CPUS; cpu++) if (cpu_online(cpu)) bogosum += cpu_data[cpu].loops_per_jiffy; printk(KERN_INFO "SMP: Total of %d processors activated " "(%lu.%02lu BogoMIPS).\n", num_online_cpus(), (bogosum + 2500) / (500000/HZ), ((bogosum + 2500) / (5000/HZ)) % 100); } static void send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) { int i; mb(); for_each_cpu(i, to_whom) set_bit(operation, &ipi_data[i].bits); mb(); for_each_cpu(i, to_whom) wripir(i); } void handle_ipi(struct pt_regs *regs) { int this_cpu = smp_processor_id(); unsigned long *pending_ipis = &ipi_data[this_cpu].bits; unsigned long ops; #if 0 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n", this_cpu, *pending_ipis, regs->pc)); #endif mb(); /* Order interrupt and bit testing. */ while ((ops = xchg(pending_ipis, 0)) != 0) { mb(); /* Order bit clearing and data access. */ do { unsigned long which; which = ops & -ops; ops &= ~which; which = __ffs(which); switch (which) { case IPI_RESCHEDULE: scheduler_ipi(); break; case IPI_CALL_FUNC: generic_smp_call_function_interrupt(); break; case IPI_CPU_STOP: halt(); default: printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); break; } } while (ops); mb(); /* Order data access and bit testing. */ } cpu_data[this_cpu].ipi_count++; if (hwrpb->txrdy) recv_secondary_console_msg(); } void arch_smp_send_reschedule(int cpu) { #ifdef DEBUG_IPI_MSG if (cpu == hard_smp_processor_id()) printk(KERN_WARNING "smp_send_reschedule: Sending IPI to self.\n"); #endif send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); } void smp_send_stop(void) { cpumask_t to_whom; cpumask_copy(&to_whom, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &to_whom); #ifdef DEBUG_IPI_MSG if (hard_smp_processor_id() != boot_cpu_id) printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); #endif send_ipi_message(&to_whom, IPI_CPU_STOP); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_ipi_message(mask, IPI_CALL_FUNC); } void arch_send_call_function_single_ipi(int cpu) { send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); } static void ipi_imb(void *ignored) { imb(); } void smp_imb(void) { /* Must wait other processors to flush their icache before continue. */ on_each_cpu(ipi_imb, NULL, 1); } EXPORT_SYMBOL(smp_imb); static void ipi_flush_tlb_all(void *ignored) { tbia(); } void flush_tlb_all(void) { /* Although we don't have any data to pass, we do want to synchronize with the other processors. */ on_each_cpu(ipi_flush_tlb_all, NULL, 1); } #define asn_locked() (cpu_data[smp_processor_id()].asn_lock) static void ipi_flush_tlb_mm(void *x) { struct mm_struct *mm = x; if (mm == current->active_mm && !asn_locked()) flush_tlb_current(mm); else flush_tlb_other(mm); } void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); if (mm == current->active_mm) { flush_tlb_current(mm); if (atomic_read(&mm->mm_users) <= 1) { int cpu, this_cpu = smp_processor_id(); for (cpu = 0; cpu < NR_CPUS; cpu++) { if (!cpu_online(cpu) || cpu == this_cpu) continue; if (mm->context[cpu]) mm->context[cpu] = 0; } preempt_enable(); return; } } smp_call_function(ipi_flush_tlb_mm, mm, 1); preempt_enable(); } EXPORT_SYMBOL(flush_tlb_mm); struct flush_tlb_page_struct { struct vm_area_struct *vma; struct mm_struct *mm; unsigned long addr; }; static void ipi_flush_tlb_page(void *x) { struct flush_tlb_page_struct *data = x; struct mm_struct * mm = data->mm; if (mm == current->active_mm && !asn_locked()) flush_tlb_current_page(mm, data->vma, data->addr); else flush_tlb_other(mm); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { struct flush_tlb_page_struct data; struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (mm == current->active_mm) { flush_tlb_current_page(mm, vma, addr); if (atomic_read(&mm->mm_users) <= 1) { int cpu, this_cpu = smp_processor_id(); for (cpu = 0; cpu < NR_CPUS; cpu++) { if (!cpu_online(cpu) || cpu == this_cpu) continue; if (mm->context[cpu]) mm->context[cpu] = 0; } preempt_enable(); return; } } data.vma = vma; data.mm = mm; data.addr = addr; smp_call_function(ipi_flush_tlb_page, &data, 1); preempt_enable(); } EXPORT_SYMBOL(flush_tlb_page); void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { /* On the Alpha we always flush the whole user tlb. */ flush_tlb_mm(vma->vm_mm); } EXPORT_SYMBOL(flush_tlb_range); static void ipi_flush_icache_page(void *x) { struct mm_struct *mm = (struct mm_struct *) x; if (mm == current->active_mm && !asn_locked()) __load_new_mm_context(mm); else flush_tlb_other(mm); } void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { struct mm_struct *mm = vma->vm_mm; if ((vma->vm_flags & VM_EXEC) == 0) return; preempt_disable(); if (mm == current->active_mm) { __load_new_mm_context(mm); if (atomic_read(&mm->mm_users) <= 1) { int cpu, this_cpu = smp_processor_id(); for (cpu = 0; cpu < NR_CPUS; cpu++) { if (!cpu_online(cpu) || cpu == this_cpu) continue; if (mm->context[cpu]) mm->context[cpu] = 0; } preempt_enable(); return; } } smp_call_function(ipi_flush_icache_page, mm, 1); preempt_enable(); }
linux-master
arch/alpha/kernel/smp.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_sx164.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999, 2000 Richard Henderson * * Code supporting the SX164 (PCA56+PYXIS). */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_cia.h> #include <asm/hwrpb.h> #include <asm/tlbflush.h> #include <asm/special_insns.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" static void __init sx164_init_irq(void) { outb(0, DMA1_RESET_REG); outb(0, DMA2_RESET_REG); outb(DMA_MODE_CASCADE, DMA2_MODE_REG); outb(0, DMA2_MASK_REG); if (alpha_using_srm) alpha_mv.device_interrupt = srm_device_interrupt; init_i8259a_irqs(); /* Not interested in the bogus interrupts (0,3,4,5,40-47), NMI (1), or HALT (2). */ if (alpha_using_srm) init_srm_irqs(40, 0x3f0000); else init_pyxis_irqs(0xff00003f0000UL); if (request_irq(16 + 6, no_action, 0, "timer-cascade", NULL)) pr_err("Failed to register timer-cascade interrupt\n"); } /* * PCI Fixup configuration. * * Summary @ PYXIS_INT_REQ: * Bit Meaning * 0 RSVD * 1 NMI * 2 Halt/Reset switch * 3 MBZ * 4 RAZ * 5 RAZ * 6 Interval timer (RTC) * 7 PCI-ISA Bridge * 8 Interrupt Line A from slot 3 * 9 Interrupt Line A from slot 2 *10 Interrupt Line A from slot 1 *11 Interrupt Line A from slot 0 *12 Interrupt Line B from slot 3 *13 Interrupt Line B from slot 2 *14 Interrupt Line B from slot 1 *15 Interrupt line B from slot 0 *16 Interrupt Line C from slot 3 *17 Interrupt Line C from slot 2 *18 Interrupt Line C from slot 1 *19 Interrupt Line C from slot 0 *20 Interrupt Line D from slot 3 *21 Interrupt Line D from slot 2 *22 Interrupt Line D from slot 1 *23 Interrupt Line D from slot 0 * * IdSel * 5 32 bit PCI option slot 2 * 6 64 bit PCI option slot 0 * 7 64 bit PCI option slot 1 * 8 Cypress I/O * 9 32 bit PCI option slot 3 */ static int sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[5][5] = { /*INT INTA INTB INTC INTD */ { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */ { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */ { 16+10, 16+10, 16+14, 16+18, 16+22}, /* IdSel 7 slot 1 J18 */ { -1, -1, -1, -1, -1}, /* IdSel 8 SIO */ { 16+ 8, 16+ 8, 16+12, 16+16, 16+20} /* IdSel 9 slot 3 J15 */ }; const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static void __init sx164_init_pci(void) { cia_init_pci(); SMC669_Init(0); } static void __init sx164_init_arch(void) { /* * OSF palcode v1.23 forgets to enable PCA56 Motion Video * Instructions. Let's enable it. * We have to check palcode revision because CSERVE interface * is subject to change without notice. For example, it * has been changed completely since v1.16 (found in MILO * distribution). -ink */ struct percpu_struct *cpu = (struct percpu_struct*) ((char*)hwrpb + hwrpb->processor_offset); if (amask(AMASK_MAX) != 0 && alpha_using_srm && (cpu->pal_revision & 0xffff) <= 0x117) { __asm__ __volatile__( "lda $16,8($31)\n" "call_pal 9\n" /* Allow PALRES insns in kernel mode */ ".long 0x64000118\n\n" /* hw_mfpr $0,icsr */ "ldah $16,(1<<(19-16))($31)\n" "or $0,$16,$0\n" /* set MVE bit */ ".long 0x74000118\n" /* hw_mtpr $0,icsr */ "lda $16,9($31)\n" "call_pal 9" /* Disable PALRES insns */ : : : "$0", "$16"); printk("PCA56 MVI set enabled\n"); } pyxis_init_arch(); } /* * The System Vector */ struct alpha_machine_vector sx164_mv __initmv = { .vector_name = "SX164", DO_EV5_MMU, DO_DEFAULT_RTC, DO_PYXIS_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = PYXIS_DAC_OFFSET, .nr_irqs = 48, .device_interrupt = pyxis_device_interrupt, .init_arch = sx164_init_arch, .init_irq = sx164_init_irq, .init_rtc = common_init_rtc, .init_pci = sx164_init_pci, .kill_arch = cia_kill_arch, .pci_map_irq = sx164_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(sx164)
linux-master
arch/alpha/kernel/sys_sx164.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/core_polaris.c * * POLARIS chip-specific code */ #define __EXTERN_INLINE inline #include <asm/io.h> #include <asm/core_polaris.h> #undef __EXTERN_INLINE #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/init.h> #include <asm/ptrace.h> #include "proto.h" #include "pci_impl.h" /* * BIOS32-style PCI interface: */ #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBG_CFG(args) printk args #else # define DBG_CFG(args) #endif /* * Given a bus, device, and function number, compute resulting * configuration space address. This is fairly straightforward * on POLARIS, since the chip itself generates Type 0 or Type 1 * cycles automatically depending on the bus number (Bus 0 is * hardwired to Type 0, all others are Type 1. Peer bridges * are not supported). * * All types: * * 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 * 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |1|1|1|1|1|0|0|1|1|1|1|1|1|1|1|0|B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|x|x| * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * 23:16 bus number (8 bits = 128 possible buses) * 15:11 Device number (5 bits) * 10:8 function number * 7:2 register number * * Notes: * The function number selects which function of a multi-function device * (e.g., scsi and ethernet). * * The register selects a DWORD (32 bit) register offset. Hence it * doesn't get shifted by 2 bits as we want to "drop" the bottom two * bits. */ static int mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where, unsigned long *pci_addr, u8 *type1) { u8 bus = pbus->number; *type1 = (bus == 0) ? 0 : 1; *pci_addr = (bus << 16) | (device_fn << 8) | (where) | POLARIS_DENSE_CONFIG_BASE; DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x," " returning address 0x%p\n" bus, device_fn, where, *pci_addr)); return 0; } static int polaris_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: *value = __kernel_ldbu(*(vucp)addr); break; case 2: *value = __kernel_ldwu(*(vusp)addr); break; case 4: *value = *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } static int polaris_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr; unsigned char type1; if (mk_conf_addr(bus, devfn, where, &addr, &type1)) return PCIBIOS_DEVICE_NOT_FOUND; switch (size) { case 1: __kernel_stb(value, *(vucp)addr); mb(); __kernel_ldbu(*(vucp)addr); break; case 2: __kernel_stw(value, *(vusp)addr); mb(); __kernel_ldwu(*(vusp)addr); break; case 4: *(vuip)addr = value; mb(); *(vuip)addr; break; } return PCIBIOS_SUCCESSFUL; } struct pci_ops polaris_pci_ops = { .read = polaris_read_config, .write = polaris_write_config, }; void __init polaris_init_arch(void) { struct pci_controller *hose; /* May need to initialize error reporting (see PCICTL0/1), but * for now assume that the firmware has done the right thing * already. */ #if 0 printk("polaris_init_arch(): trusting firmware for setup\n"); #endif /* * Create our single hose. */ pci_isa_hose = hose = alloc_pci_controller(); hose->io_space = &ioport_resource; hose->mem_space = &iomem_resource; hose->index = 0; hose->sparse_mem_base = 0; hose->dense_mem_base = POLARIS_DENSE_MEM_BASE - IDENT_ADDR; hose->sparse_io_base = 0; hose->dense_io_base = POLARIS_DENSE_IO_BASE - IDENT_ADDR; hose->sg_isa = hose->sg_pci = NULL; /* The I/O window is fixed at 2G @ 2G. */ __direct_map_base = 0x80000000; __direct_map_size = 0x80000000; } static inline void polaris_pci_clr_err(void) { *(vusp)POLARIS_W_STATUS; /* Write 1's to settable bits to clear errors */ *(vusp)POLARIS_W_STATUS = 0x7800; mb(); *(vusp)POLARIS_W_STATUS; } void polaris_machine_check(unsigned long vector, unsigned long la_ptr) { /* Clear the error before any reporting. */ mb(); mb(); draina(); polaris_pci_clr_err(); wrmces(0x7); mb(); process_mcheck_info(vector, la_ptr, "POLARIS", mcheck_expected(0)); }
linux-master
arch/alpha/kernel/core_polaris.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_ruffian.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999, 2000 Richard Henderson * * Code supporting the RUFFIAN. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/timex.h> #include <linux/init.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_cia.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" static void __init ruffian_init_irq(void) { /* Invert 6&7 for i82371 */ *(vulp)PYXIS_INT_HILO = 0x000000c0UL; mb(); *(vulp)PYXIS_INT_CNFG = 0x00002064UL; mb(); /* all clear */ outb(0x11,0xA0); outb(0x08,0xA1); outb(0x02,0xA1); outb(0x01,0xA1); outb(0xFF,0xA1); outb(0x11,0x20); outb(0x00,0x21); outb(0x04,0x21); outb(0x01,0x21); outb(0xFF,0x21); /* Finish writing the 82C59A PIC Operation Control Words */ outb(0x20,0xA0); outb(0x20,0x20); init_i8259a_irqs(); /* Not interested in the bogus interrupts (0,3,6), NMI (1), HALT (2), flash (5), or 21142 (8). */ init_pyxis_irqs(0x16f0000); common_init_isa_dma(); } #define RUFFIAN_LATCH DIV_ROUND_CLOSEST(PIT_TICK_RATE, HZ) static void __init ruffian_init_rtc(void) { /* Ruffian does not have the RTC connected to the CPU timer interrupt. Instead, it uses the PIT connected to IRQ 0. */ /* Setup interval timer. */ outb(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */ outb(RUFFIAN_LATCH & 0xff, 0x40); /* LSB */ outb(RUFFIAN_LATCH >> 8, 0x40); /* MSB */ outb(0xb6, 0x43); /* pit counter 2: speaker */ outb(0x31, 0x42); outb(0x13, 0x42); if (request_irq(0, rtc_timer_interrupt, 0, "timer", NULL)) pr_err("Failed to request irq 0 (timer)\n"); } static void ruffian_kill_arch (int mode) { cia_kill_arch(mode); #if 0 /* This only causes re-entry to ARCSBIOS */ /* Perhaps this works for other PYXIS as well? */ *(vuip) PYXIS_RESET = 0x0000dead; mb(); #endif } /* * Interrupt routing: * * Primary bus * IdSel INTA INTB INTC INTD * 21052 13 - - - - * SIO 14 23 - - - * 21143 15 44 - - - * Slot 0 17 43 42 41 40 * * Secondary bus * IdSel INTA INTB INTC INTD * Slot 0 8 (18) 19 18 17 16 * Slot 1 9 (19) 31 30 29 28 * Slot 2 10 (20) 27 26 25 24 * Slot 3 11 (21) 39 38 37 36 * Slot 4 12 (22) 35 34 33 32 * 53c875 13 (23) 20 - - - * */ static int ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[11][5] = { /*INT INTA INTB INTC INTD */ {-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */ {-1, -1, -1, -1, -1}, /* IdSel 14, SIO */ {44, 44, 44, 44, 44}, /* IdSel 15, 21143 */ {-1, -1, -1, -1, -1}, /* IdSel 16, none */ {43, 43, 42, 41, 40}, /* IdSel 17, 64-bit slot */ /* the next 6 are actually on PCI bus 1, across the bridge */ {19, 19, 18, 17, 16}, /* IdSel 8, slot 0 */ {31, 31, 30, 29, 28}, /* IdSel 9, slot 1 */ {27, 27, 26, 25, 24}, /* IdSel 10, slot 2 */ {39, 39, 38, 37, 36}, /* IdSel 11, slot 3 */ {35, 35, 34, 33, 32}, /* IdSel 12, slot 4 */ {20, 20, 20, 20, 20}, /* IdSel 13, 53c875 */ }; const long min_idsel = 13, max_idsel = 23, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static u8 ruffian_swizzle(struct pci_dev *dev, u8 *pinp) { int slot, pin = *pinp; if (dev->bus->number == 0) { slot = PCI_SLOT(dev->devfn); } /* Check for the built-in bridge. */ else if (PCI_SLOT(dev->bus->self->devfn) == 13) { slot = PCI_SLOT(dev->devfn) + 10; } else { /* Must be a card-based bridge. */ do { if (PCI_SLOT(dev->bus->self->devfn) == 13) { slot = PCI_SLOT(dev->devfn) + 10; break; } pin = pci_swizzle_interrupt_pin(dev, pin); /* Move up the chain of bridges. */ dev = dev->bus->self; /* Slot of the next bridge. */ slot = PCI_SLOT(dev->devfn); } while (dev->bus->self); } *pinp = pin; return slot; } #ifdef BUILDING_FOR_MILO /* * The DeskStation Ruffian motherboard firmware does not place * the memory size in the PALimpure area. Therefore, we use * the Bank Configuration Registers in PYXIS to obtain the size. */ static unsigned long __init ruffian_get_bank_size(unsigned long offset) { unsigned long bank_addr, bank, ret = 0; /* Valid offsets are: 0x800, 0x840 and 0x880 since Ruffian only uses three banks. */ bank_addr = (unsigned long)PYXIS_MCR + offset; bank = *(vulp)bank_addr; /* Check BANK_ENABLE */ if (bank & 0x01) { static unsigned long size[] __initdata = { 0x40000000UL, /* 0x00, 1G */ 0x20000000UL, /* 0x02, 512M */ 0x10000000UL, /* 0x04, 256M */ 0x08000000UL, /* 0x06, 128M */ 0x04000000UL, /* 0x08, 64M */ 0x02000000UL, /* 0x0a, 32M */ 0x01000000UL, /* 0x0c, 16M */ 0x00800000UL, /* 0x0e, 8M */ 0x80000000UL, /* 0x10, 2G */ }; bank = (bank & 0x1e) >> 1; if (bank < ARRAY_SIZE(size)) ret = size[bank]; } return ret; } #endif /* BUILDING_FOR_MILO */ /* * The System Vector */ struct alpha_machine_vector ruffian_mv __initmv = { .vector_name = "Ruffian", DO_EV5_MMU, DO_DEFAULT_RTC, DO_PYXIS_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = PYXIS_DAC_OFFSET, .nr_irqs = 48, .device_interrupt = pyxis_device_interrupt, .init_arch = pyxis_init_arch, .init_irq = ruffian_init_irq, .init_rtc = ruffian_init_rtc, .init_pci = cia_init_pci, .kill_arch = ruffian_kill_arch, .pci_map_irq = ruffian_map_irq, .pci_swizzle = ruffian_swizzle, }; ALIAS_MV(ruffian)
linux-master
arch/alpha/kernel/sys_ruffian.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/err_ev7.c * * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) * * Error handling code supporting Alpha systems */ #include <linux/init.h> #include <linux/sched.h> #include <asm/io.h> #include <asm/hwrpb.h> #include <asm/smp.h> #include <asm/err_common.h> #include <asm/err_ev7.h> #include "err_impl.h" #include "proto.h" struct ev7_lf_subpackets * ev7_collect_logout_frame_subpackets(struct el_subpacket *el_ptr, struct ev7_lf_subpackets *lf_subpackets) { struct el_subpacket *subpacket; int i; /* * A Marvel machine check frame is always packaged in an * el_subpacket of class HEADER, type LOGOUT_FRAME. */ if (el_ptr->class != EL_CLASS__HEADER || el_ptr->type != EL_TYPE__HEADER__LOGOUT_FRAME) return NULL; /* * It is a logout frame header. Look at the one subpacket. */ el_ptr = (struct el_subpacket *) ((unsigned long)el_ptr + el_ptr->length); /* * It has to be class PAL, type LOGOUT_FRAME. */ if (el_ptr->class != EL_CLASS__PAL || el_ptr->type != EL_TYPE__PAL__LOGOUT_FRAME) return NULL; lf_subpackets->logout = (struct ev7_pal_logout_subpacket *) el_ptr->by_type.raw.data_start; /* * Process the subpackets. */ subpacket = (struct el_subpacket *) ((unsigned long)el_ptr + el_ptr->length); for (i = 0; subpacket && i < lf_subpackets->logout->subpacket_count; subpacket = (struct el_subpacket *) ((unsigned long)subpacket + subpacket->length), i++) { /* * All subpackets should be class PAL. */ if (subpacket->class != EL_CLASS__PAL) { printk("%s**UNEXPECTED SUBPACKET CLASS %d " "IN LOGOUT FRAME (packet %d\n", err_print_prefix, subpacket->class, i); return NULL; } /* * Remember the subpacket. */ switch(subpacket->type) { case EL_TYPE__PAL__EV7_PROCESSOR: lf_subpackets->ev7 = (struct ev7_pal_processor_subpacket *) subpacket->by_type.raw.data_start; break; case EL_TYPE__PAL__EV7_RBOX: lf_subpackets->rbox = (struct ev7_pal_rbox_subpacket *) subpacket->by_type.raw.data_start; break; case EL_TYPE__PAL__EV7_ZBOX: lf_subpackets->zbox = (struct ev7_pal_zbox_subpacket *) subpacket->by_type.raw.data_start; break; case EL_TYPE__PAL__EV7_IO: lf_subpackets->io = (struct ev7_pal_io_subpacket *) subpacket->by_type.raw.data_start; break; case EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE: case EL_TYPE__PAL__ENV__AIRMOVER_FAN: case EL_TYPE__PAL__ENV__VOLTAGE: case EL_TYPE__PAL__ENV__INTRUSION: case EL_TYPE__PAL__ENV__POWER_SUPPLY: case EL_TYPE__PAL__ENV__LAN: case EL_TYPE__PAL__ENV__HOT_PLUG: lf_subpackets->env[ev7_lf_env_index(subpacket->type)] = (struct ev7_pal_environmental_subpacket *) subpacket->by_type.raw.data_start; break; default: /* * Don't know what kind of frame this is. */ return NULL; } } return lf_subpackets; } void ev7_machine_check(unsigned long vector, unsigned long la_ptr) { struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr; char *saved_err_prefix = err_print_prefix; /* * Sync the processor */ mb(); draina(); err_print_prefix = KERN_CRIT; printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d\n", err_print_prefix, (vector == SCB_Q_PROCERR) ? "Correctable" : "Uncorrectable", (unsigned int)vector, (int)smp_processor_id()); el_process_subpacket(el_ptr); err_print_prefix = saved_err_prefix; /* * Release the logout frame */ wrmces(0x7); mb(); } static char *el_ev7_processor_subpacket_annotation[] = { "Subpacket Header", "I_STAT", "DC_STAT", "C_ADDR", "C_SYNDROME_1", "C_SYNDROME_0", "C_STAT", "C_STS", "MM_STAT", "EXC_ADDR", "IER_CM", "ISUM", "PAL_BASE", "I_CTL", "PROCESS_CONTEXT", "CBOX_CTL", "CBOX_STP_CTL", "CBOX_ACC_CTL", "CBOX_LCL_SET", "CBOX_GLB_SET", "BBOX_CTL", "BBOX_ERR_STS", "BBOX_ERR_IDX", "CBOX_DDP_ERR_STS", "BBOX_DAT_RMP", NULL }; static char *el_ev7_zbox_subpacket_annotation[] = { "Subpacket Header", "ZBOX(0): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1", "ZBOX(0): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3", "ZBOX(0): DIFT_TIMEOUT / DRAM_ERR_ADR", "ZBOX(0): FRC_ERR_ADR / DRAM_MAPPER_CTL", "ZBOX(0): reserved / DIFT_ERR_STATUS", "ZBOX(1): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1", "ZBOX(1): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3", "ZBOX(1): DIFT_TIMEOUT / DRAM_ERR_ADR", "ZBOX(1): FRC_ERR_ADR / DRAM_MAPPER_CTL", "ZBOX(1): reserved / DIFT_ERR_STATUS", "CBOX_CTL", "CBOX_STP_CTL", "ZBOX(0)_ERROR_PA", "ZBOX(1)_ERROR_PA", "ZBOX(0)_ORED_SYNDROME","ZBOX(1)_ORED_SYNDROME", NULL }; static char *el_ev7_rbox_subpacket_annotation[] = { "Subpacket Header", "RBOX_CFG", "RBOX_N_CFG", "RBOX_S_CFG", "RBOX_E_CFG", "RBOX_W_CFG", "RBOX_N_ERR", "RBOX_S_ERR", "RBOX_E_ERR", "RBOX_W_ERR", "RBOX_IO_CFG", "RBOX_IO_ERR", "RBOX_L_ERR", "RBOX_WHOAMI", "RBOX_IMASL", "RBOX_INTQ", "RBOX_INT", NULL }; static char *el_ev7_io_subpacket_annotation[] = { "Subpacket Header", "IO_ASIC_REV", "IO_SYS_REV", "IO7_UPH", "HPI_CTL", "CRD_CTL", "HEI_CTL", "PO7_ERROR_SUM","PO7_UNCRR_SYM", "PO7_CRRCT_SYM", "PO7_UGBGE_SYM","PO7_ERR_PKT0", "PO7_ERR_PKT1", "reserved", "reserved", "PO0_ERR_SUM", "PO0_TLB_ERR", "PO0_SPL_COMPLT", "PO0_TRANS_SUM", "PO0_FIRST_ERR","PO0_MULT_ERR", "DM CSR PH", "DM CSR PH", "DM CSR PH", "DM CSR PH", "reserved", "PO1_ERR_SUM", "PO1_TLB_ERR", "PO1_SPL_COMPLT", "PO1_TRANS_SUM", "PO1_FIRST_ERR","PO1_MULT_ERR", "DM CSR PH", "DM CSR PH", "DM CSR PH", "DM CSR PH", "reserved", "PO2_ERR_SUM", "PO2_TLB_ERR", "PO2_SPL_COMPLT", "PO2_TRANS_SUM", "PO2_FIRST_ERR","PO2_MULT_ERR", "DM CSR PH", "DM CSR PH", "DM CSR PH", "DM CSR PH", "reserved", "PO3_ERR_SUM", "PO3_TLB_ERR", "PO3_SPL_COMPLT", "PO3_TRANS_SUM", "PO3_FIRST_ERR","PO3_MULT_ERR", "DM CSR PH", "DM CSR PH", "DM CSR PH", "DM CSR PH", "reserved", NULL }; static struct el_subpacket_annotation el_ev7_pal_annotations[] = { SUBPACKET_ANNOTATION(EL_CLASS__PAL, EL_TYPE__PAL__EV7_PROCESSOR, 1, "EV7 Processor Subpacket", el_ev7_processor_subpacket_annotation), SUBPACKET_ANNOTATION(EL_CLASS__PAL, EL_TYPE__PAL__EV7_ZBOX, 1, "EV7 ZBOX Subpacket", el_ev7_zbox_subpacket_annotation), SUBPACKET_ANNOTATION(EL_CLASS__PAL, EL_TYPE__PAL__EV7_RBOX, 1, "EV7 RBOX Subpacket", el_ev7_rbox_subpacket_annotation), SUBPACKET_ANNOTATION(EL_CLASS__PAL, EL_TYPE__PAL__EV7_IO, 1, "EV7 IO Subpacket", el_ev7_io_subpacket_annotation) }; static struct el_subpacket * ev7_process_pal_subpacket(struct el_subpacket *header) { struct ev7_pal_subpacket *packet; if (header->class != EL_CLASS__PAL) { printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", err_print_prefix, header->class, header->type); return NULL; } packet = (struct ev7_pal_subpacket *)header->by_type.raw.data_start; switch(header->type) { case EL_TYPE__PAL__LOGOUT_FRAME: printk("%s*** MCHK occurred on LPID %lld (RBOX %llx)\n", err_print_prefix, packet->by_type.logout.whami, packet->by_type.logout.rbox_whami); el_print_timestamp(&packet->by_type.logout.timestamp); printk("%s EXC_ADDR: %016llx\n" " HALT_CODE: %llx\n", err_print_prefix, packet->by_type.logout.exc_addr, packet->by_type.logout.halt_code); el_process_subpackets(header, packet->by_type.logout.subpacket_count); break; default: printk("%s ** PAL TYPE %d SUBPACKET\n", err_print_prefix, header->type); el_annotate_subpacket(header); break; } return (struct el_subpacket *)((unsigned long)header + header->length); } struct el_subpacket_handler ev7_pal_subpacket_handler = SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket); void __init ev7_register_error_handlers(void) { int i; for (i = 0; i < ARRAY_SIZE(el_ev7_pal_annotations); i++) cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]); cdl_register_subpacket_handler(&ev7_pal_subpacket_handler); }
linux-master
arch/alpha/kernel/err_ev7.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/es1888.c * * Init the built-in ES1888 sound chip (SB16 compatible) */ #include <linux/init.h> #include <asm/io.h> #include "proto.h" void __init es1888_init(void) { /* Sequence of IO reads to init the audio controller */ inb(0x0229); inb(0x0229); inb(0x0229); inb(0x022b); inb(0x0229); inb(0x022b); inb(0x0229); inb(0x0229); inb(0x022b); inb(0x0229); inb(0x0220); /* This sets the base address to 0x220 */ /* Sequence to set DMA channels */ outb(0x01, 0x0226); /* reset */ inb(0x0226); /* pause */ outb(0x00, 0x0226); /* release reset */ while (!(inb(0x022e) & 0x80)) /* wait for bit 7 to assert*/ continue; inb(0x022a); /* pause */ outb(0xc6, 0x022c); /* enable extended mode */ inb(0x022a); /* pause, also forces the write */ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ continue; outb(0xb1, 0x022c); /* setup for write to Interrupt CR */ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ continue; outb(0x14, 0x022c); /* set IRQ 5 */ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ continue; outb(0xb2, 0x022c); /* setup for write to DMA CR */ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */ continue; outb(0x18, 0x022c); /* set DMA channel 1 */ inb(0x022c); /* force the write */ }
linux-master
arch/alpha/kernel/es1888.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/alpha/kernel/sys_cabriolet.c * * Copyright (C) 1995 David A Rusling * Copyright (C) 1996 Jay A Estabrook * Copyright (C) 1998, 1999, 2000 Richard Henderson * * Code supporting the Cabriolet (AlphaPC64), EB66+, and EB164, * PC164 and LX164. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/ptrace.h> #include <asm/dma.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/io.h> #include <asm/core_apecs.h> #include <asm/core_cia.h> #include <asm/core_lca.h> #include <asm/tlbflush.h> #include "proto.h" #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" #include "pc873xx.h" /* Note mask bit is true for DISABLED irqs. */ static unsigned long cached_irq_mask = ~0UL; static inline void cabriolet_update_irq_hw(unsigned int irq, unsigned long mask) { int ofs = (irq - 16) / 8; outb(mask >> (16 + ofs * 8), 0x804 + ofs); } static inline void cabriolet_enable_irq(struct irq_data *d) { cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq)); } static void cabriolet_disable_irq(struct irq_data *d) { cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq); } static struct irq_chip cabriolet_irq_type = { .name = "CABRIOLET", .irq_unmask = cabriolet_enable_irq, .irq_mask = cabriolet_disable_irq, .irq_mask_ack = cabriolet_disable_irq, }; static void cabriolet_device_interrupt(unsigned long v) { unsigned long pld; unsigned int i; /* Read the interrupt summary registers */ pld = inb(0x804) | (inb(0x805) << 8) | (inb(0x806) << 16); /* * Now for every possible bit set, work through them and call * the appropriate interrupt handler. */ while (pld) { i = ffz(~pld); pld &= pld - 1; /* clear least bit set */ if (i == 4) { isa_device_interrupt(v); } else { handle_irq(16 + i); } } } static void __init common_init_irq(void (*srm_dev_int)(unsigned long v)) { init_i8259a_irqs(); if (alpha_using_srm) { alpha_mv.device_interrupt = srm_dev_int; init_srm_irqs(35, 0); } else { long i; outb(0xff, 0x804); outb(0xff, 0x805); outb(0xff, 0x806); for (i = 16; i < 35; ++i) { irq_set_chip_and_handler(i, &cabriolet_irq_type, handle_level_irq); irq_set_status_flags(i, IRQ_LEVEL); } } common_init_isa_dma(); if (request_irq(16 + 4, no_action, 0, "isa-cascade", NULL)) pr_err("Failed to register isa-cascade interrupt\n"); } #ifndef CONFIG_ALPHA_PC164 static void __init cabriolet_init_irq(void) { common_init_irq(srm_device_interrupt); } #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164) /* In theory, the PC164 has the same interrupt hardware as the other Cabriolet based systems. However, something got screwed up late in the development cycle which broke the interrupt masking hardware. Repeat, it is not possible to mask and ack interrupts. At all. In an attempt to work around this, while processing interrupts, we do not allow the IPL to drop below what it is currently. This prevents the possibility of recursion. ??? Another option might be to force all PCI devices to use edge triggered rather than level triggered interrupts. That might be too invasive though. */ static void pc164_srm_device_interrupt(unsigned long v) { __min_ipl = getipl(); srm_device_interrupt(v); __min_ipl = 0; } static void pc164_device_interrupt(unsigned long v) { __min_ipl = getipl(); cabriolet_device_interrupt(v); __min_ipl = 0; } static void __init pc164_init_irq(void) { common_init_irq(pc164_srm_device_interrupt); } #endif /* * The EB66+ is very similar to the EB66 except that it does not have * the on-board NCR and Tulip chips. In the code below, I have used * slot number to refer to the id select line and *not* the slot * number used in the EB66+ documentation. However, in the table, * I've given the slot number, the id select line and the Jxx number * that's printed on the board. The interrupt pins from the PCI slots * are wired into 3 interrupt summary registers at 0x804, 0x805 and * 0x806 ISA. * * In the table, -1 means don't assign an IRQ number. This is usually * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. */ static inline int eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[5][5] = { /*INT INTA INTB INTC INTD */ {16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */ {16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */ { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ {16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 9, slot 2, J27 */ {16+3, 16+3, 16+8, 16+12, 16+6} /* IdSel 10, slot 3, J28 */ }; const long min_idsel = 6, max_idsel = 10, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } /* * The AlphaPC64 is very similar to the EB66+ except that its slots * are numbered differently. In the code below, I have used slot * number to refer to the id select line and *not* the slot number * used in the AlphaPC64 documentation. However, in the table, I've * given the slot number, the id select line and the Jxx number that's * printed on the board. The interrupt pins from the PCI slots are * wired into 3 interrupt summary registers at 0x804, 0x805 and 0x806 * ISA. * * In the table, -1 means don't assign an IRQ number. This is usually * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. */ static inline int cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[5][5] = { /*INT INTA INTB INTC INTD */ { 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */ { 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */ { 16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J20 */ { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ { 16+3, 16+3, 16+8, 16+12, 16+16} /* IdSel 9, slot 3, J22 */ }; const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static inline void __init cabriolet_enable_ide(void) { if (pc873xx_probe() == -1) { printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n"); } else { printk(KERN_INFO "Found %s Super IO chip at 0x%x\n", pc873xx_get_model(), pc873xx_get_base()); pc873xx_enable_ide(); } } static inline void __init cabriolet_init_pci(void) { common_init_pci(); cabriolet_enable_ide(); } static inline void __init cia_cab_init_pci(void) { cia_init_pci(); cabriolet_enable_ide(); } /* * The PC164 and LX164 have 19 PCI interrupts, four from each of the four * PCI slots, the SIO, PCI/IDE, and USB. * * Each of the interrupts can be individually masked. This is * accomplished by setting the appropriate bit in the mask register. * A bit is set by writing a "1" to the desired position in the mask * register and cleared by writing a "0". There are 3 mask registers * located at ISA address 804h, 805h and 806h. * * An I/O read at ISA address 804h, 805h, 806h will return the * state of the 11 PCI interrupts and not the state of the MASKED * interrupts. * * Note: A write to I/O 804h, 805h, and 806h the mask register will be * updated. * * * ISA DATA<7:0> * ISA +--------------------------------------------------------------+ * ADDRESS | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | * +==============================================================+ * 0x804 | INTB0 | USB | IDE | SIO | INTA3 |INTA2 | INTA1 | INTA0 | * +--------------------------------------------------------------+ * 0x805 | INTD0 | INTC3 | INTC2 | INTC1 | INTC0 |INTB3 | INTB2 | INTB1 | * +--------------------------------------------------------------+ * 0x806 | Rsrv | Rsrv | Rsrv | Rsrv | Rsrv |INTD3 | INTD2 | INTD1 | * +--------------------------------------------------------------+ * * Rsrv = reserved bits * Note: The mask register is write-only. * * IdSel * 5 32 bit PCI option slot 2 * 6 64 bit PCI option slot 0 * 7 64 bit PCI option slot 1 * 8 Saturn I/O * 9 32 bit PCI option slot 3 * 10 USB * 11 IDE * */ static inline int alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static char irq_tab[7][5] = { /*INT INTA INTB INTC INTD */ { 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */ { 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */ { 16+1, 16+1, 16+8, 16+12, 16+16}, /* IdSel 7, slot 1, J26 */ { -1, -1, -1, -1, -1}, /* IdSel 8, SIO */ { 16+3, 16+3, 16+10, 16+14, 16+18}, /* IdSel 9, slot 3, J19 */ { 16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 10, USB */ { 16+5, 16+5, 16+5, 16+5, 16+5} /* IdSel 11, IDE */ }; const long min_idsel = 5, max_idsel = 11, irqs_per_slot = 5; return COMMON_TABLE_LOOKUP; } static inline void __init alphapc164_init_pci(void) { cia_init_pci(); SMC93x_Init(); } /* * The System Vector */ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET) struct alpha_machine_vector cabriolet_mv __initmv = { .vector_name = "Cabriolet", DO_EV4_MMU, DO_DEFAULT_RTC, DO_APECS_IO, .machine_check = apecs_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 35, .device_interrupt = cabriolet_device_interrupt, .init_arch = apecs_init_arch, .init_irq = cabriolet_init_irq, .init_rtc = common_init_rtc, .init_pci = cabriolet_init_pci, .pci_map_irq = cabriolet_map_irq, .pci_swizzle = common_swizzle, }; #ifndef CONFIG_ALPHA_EB64P ALIAS_MV(cabriolet) #endif #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB164) struct alpha_machine_vector eb164_mv __initmv = { .vector_name = "EB164", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 35, .device_interrupt = cabriolet_device_interrupt, .init_arch = cia_init_arch, .init_irq = cabriolet_init_irq, .init_rtc = common_init_rtc, .init_pci = cia_cab_init_pci, .kill_arch = cia_kill_arch, .pci_map_irq = cabriolet_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(eb164) #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66P) struct alpha_machine_vector eb66p_mv __initmv = { .vector_name = "EB66+", DO_EV4_MMU, DO_DEFAULT_RTC, DO_LCA_IO, .machine_check = lca_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE, .nr_irqs = 35, .device_interrupt = cabriolet_device_interrupt, .init_arch = lca_init_arch, .init_irq = cabriolet_init_irq, .init_rtc = common_init_rtc, .init_pci = cabriolet_init_pci, .pci_map_irq = eb66p_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(eb66p) #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LX164) struct alpha_machine_vector lx164_mv __initmv = { .vector_name = "LX164", DO_EV5_MMU, DO_DEFAULT_RTC, DO_PYXIS_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = DEFAULT_MEM_BASE, .pci_dac_offset = PYXIS_DAC_OFFSET, .nr_irqs = 35, .device_interrupt = cabriolet_device_interrupt, .init_arch = pyxis_init_arch, .init_irq = cabriolet_init_irq, .init_rtc = common_init_rtc, .init_pci = alphapc164_init_pci, .kill_arch = cia_kill_arch, .pci_map_irq = alphapc164_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(lx164) #endif #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164) struct alpha_machine_vector pc164_mv __initmv = { .vector_name = "PC164", DO_EV5_MMU, DO_DEFAULT_RTC, DO_CIA_IO, .machine_check = cia_machine_check, .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS, .min_io_address = DEFAULT_IO_BASE, .min_mem_address = CIA_DEFAULT_MEM_BASE, .nr_irqs = 35, .device_interrupt = pc164_device_interrupt, .init_arch = cia_init_arch, .init_irq = pc164_init_irq, .init_rtc = common_init_rtc, .init_pci = alphapc164_init_pci, .kill_arch = cia_kill_arch, .pci_map_irq = alphapc164_map_irq, .pci_swizzle = common_swizzle, }; ALIAS_MV(pc164) #endif
linux-master
arch/alpha/kernel/sys_cabriolet.c
// SPDX-License-Identifier: GPL-2.0 /* * printf.c: Internal prom library printf facility. * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1997 Jakub Jelinek ([email protected]) * Copyright (c) 2002 Pete Zaitcev ([email protected]) * * We used to warn all over the code: DO NOT USE prom_printf(), * and yet people do. Anton's banking code was outputting banks * with prom_printf for most of the 2.4 lifetime. Since an effective * stick is not available, we deployed a carrot: an early printk * through PROM by means of -p boot option. This ought to fix it. * USE printk; if you need, deploy -p. */ #include <linux/kernel.h> #include <linux/compiler.h> #include <linux/spinlock.h> #include <asm/openprom.h> #include <asm/oplib.h> #define CONSOLE_WRITE_BUF_SIZE 1024 static char ppbuf[1024]; static char console_write_buf[CONSOLE_WRITE_BUF_SIZE]; static DEFINE_RAW_SPINLOCK(console_write_lock); void notrace prom_write(const char *buf, unsigned int n) { unsigned int dest_len; unsigned long flags; char *dest; dest = console_write_buf; raw_spin_lock_irqsave(&console_write_lock, flags); dest_len = 0; while (n-- != 0) { char ch = *buf++; if (ch == '\n') { *dest++ = '\r'; dest_len++; } *dest++ = ch; dest_len++; if (dest_len >= CONSOLE_WRITE_BUF_SIZE - 1) { prom_console_write_buf(console_write_buf, dest_len); dest = console_write_buf; dest_len = 0; } } if (dest_len) prom_console_write_buf(console_write_buf, dest_len); raw_spin_unlock_irqrestore(&console_write_lock, flags); } void notrace prom_printf(const char *fmt, ...) { va_list args; int i; va_start(args, fmt); i = vscnprintf(ppbuf, sizeof(ppbuf), fmt, args); va_end(args); prom_write(ppbuf, i); }
linux-master
arch/sparc/prom/printf.c
// SPDX-License-Identifier: GPL-2.0 /* * bootstr.c: Boot string/argument acquisition from the PROM. * * Copyright(C) 1995 David S. Miller ([email protected]) * Copyright(C) 1996,1998 Jakub Jelinek ([email protected]) */ #include <linux/string.h> #include <linux/init.h> #include <asm/oplib.h> /* WARNING: The boot loader knows that these next three variables come one right * after another in the .data section. Do not move this stuff into * the .bss section or it will break things. */ /* We limit BARG_LEN to 1024 because this is the size of the * 'barg_out' command line buffer in the SILO bootloader. */ #define BARG_LEN 1024 struct { int bootstr_len; int bootstr_valid; char bootstr_buf[BARG_LEN]; } bootstr_info = { .bootstr_len = BARG_LEN, #ifdef CONFIG_CMDLINE .bootstr_valid = 1, .bootstr_buf = CONFIG_CMDLINE, #endif }; char * __init prom_getbootargs(void) { /* This check saves us from a panic when bootfd patches args. */ if (bootstr_info.bootstr_valid) return bootstr_info.bootstr_buf; prom_getstring(prom_chosen_node, "bootargs", bootstr_info.bootstr_buf, BARG_LEN); bootstr_info.bootstr_valid = 1; return bootstr_info.bootstr_buf; }
linux-master
arch/sparc/prom/bootstr_64.c
// SPDX-License-Identifier: GPL-2.0 /* memory.c: Prom routine for acquiring various bits of information * about RAM on the machine, both virtual and physical. * * Copyright (C) 1995, 2008 David S. Miller ([email protected]) * Copyright (C) 1997 Michael A. Griffith ([email protected]) */ #include <linux/kernel.h> #include <linux/sort.h> #include <linux/init.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/page.h> static int __init prom_meminit_v0(void) { struct linux_mlist_v0 *p; int index; index = 0; for (p = *(romvec->pv_v0mem.v0_available); p; p = p->theres_more) { sp_banks[index].base_addr = (unsigned long) p->start_adr; sp_banks[index].num_bytes = p->num_bytes; index++; } return index; } static int __init prom_meminit_v2(void) { struct linux_prom_registers reg[64]; phandle node; int size, num_ents, i; node = prom_searchsiblings(prom_getchild(prom_root_node), "memory"); size = prom_getproperty(node, "available", (char *) reg, sizeof(reg)); num_ents = size / sizeof(struct linux_prom_registers); for (i = 0; i < num_ents; i++) { sp_banks[i].base_addr = reg[i].phys_addr; sp_banks[i].num_bytes = reg[i].reg_size; } return num_ents; } static int sp_banks_cmp(const void *a, const void *b) { const struct sparc_phys_banks *x = a, *y = b; if (x->base_addr > y->base_addr) return 1; if (x->base_addr < y->base_addr) return -1; return 0; } /* Initialize the memory lists based upon the prom version. */ void __init prom_meminit(void) { int i, num_ents = 0; switch (prom_vers) { case PROM_V0: num_ents = prom_meminit_v0(); break; case PROM_V2: case PROM_V3: num_ents = prom_meminit_v2(); break; default: break; } sort(sp_banks, num_ents, sizeof(struct sparc_phys_banks), sp_banks_cmp, NULL); /* Sentinel. */ sp_banks[num_ents].base_addr = 0xdeadbeef; sp_banks[num_ents].num_bytes = 0; for (i = 0; i < num_ents; i++) sp_banks[i].num_bytes &= PAGE_MASK; }
linux-master
arch/sparc/prom/memory.c
// SPDX-License-Identifier: GPL-2.0 /* * console.c: Routines that deal with sending and receiving IO * to/from the current console device using the PROM. * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1998 Pete Zaitcev <[email protected]> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <linux/string.h> extern void restore_current(void); /* Non blocking put character to console device, returns -1 if * unsuccessful. */ static int prom_nbputchar(const char *buf) { unsigned long flags; int i = -1; spin_lock_irqsave(&prom_lock, flags); switch(prom_vers) { case PROM_V0: if ((*(romvec->pv_nbputchar))(*buf)) i = 1; break; case PROM_V2: case PROM_V3: if ((*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, buf, 0x1) == 1) i = 1; break; default: break; } restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return i; /* Ugh, we could spin forever on unsupported proms ;( */ } void prom_console_write_buf(const char *buf, int len) { while (len) { int n = prom_nbputchar(buf); if (n < 0) continue; len--; buf++; } }
linux-master
arch/sparc/prom/console_32.c
// SPDX-License-Identifier: GPL-2.0 /* * misc.c: Miscellaneous prom functions that don't belong * anywhere else. * * Copyright (C) 1995 David S. Miller ([email protected]) */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/module.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/auxio.h> extern void restore_current(void); DEFINE_SPINLOCK(prom_lock); /* Reset and reboot the machine with the command 'bcommand'. */ void prom_reboot(char *bcommand) { unsigned long flags; spin_lock_irqsave(&prom_lock, flags); (*(romvec->pv_reboot))(bcommand); /* Never get here. */ restore_current(); spin_unlock_irqrestore(&prom_lock, flags); } /* Forth evaluate the expression contained in 'fstring'. */ void prom_feval(char *fstring) { unsigned long flags; if(!fstring || fstring[0] == 0) return; spin_lock_irqsave(&prom_lock, flags); if(prom_vers == PROM_V0) (*(romvec->pv_fortheval.v0_eval))(strlen(fstring), fstring); else (*(romvec->pv_fortheval.v2_eval))(fstring); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); } EXPORT_SYMBOL(prom_feval); /* Drop into the prom, with the chance to continue with the 'go' * prom command. */ void prom_cmdline(void) { unsigned long flags; spin_lock_irqsave(&prom_lock, flags); (*(romvec->pv_abort))(); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); set_auxio(AUXIO_LED, 0); } /* Drop into the prom, but completely terminate the program. * No chance of continuing. */ void __noreturn prom_halt(void) { unsigned long flags; again: spin_lock_irqsave(&prom_lock, flags); (*(romvec->pv_halt))(); /* Never get here. */ restore_current(); spin_unlock_irqrestore(&prom_lock, flags); goto again; /* PROM is out to get me -DaveM */ } typedef void (*sfunc_t)(void); /* Set prom sync handler to call function 'funcp'. */ void prom_setsync(sfunc_t funcp) { if(!funcp) return; *romvec->pv_synchook = funcp; } /* Get the idprom and stuff it into buffer 'idbuf'. Returns the * format type. 'num_bytes' is the number of bytes that your idbuf * has space for. Returns 0xff on error. */ unsigned char prom_get_idprom(char *idbuf, int num_bytes) { int len; len = prom_getproplen(prom_root_node, "idprom"); if((len>num_bytes) || (len==-1)) return 0xff; if(!prom_getproperty(prom_root_node, "idprom", idbuf, num_bytes)) return idbuf[0]; return 0xff; } /* Get the major prom version number. */ int prom_version(void) { return romvec->pv_romvers; } /* Get the prom plugin-revision. */ int prom_getrev(void) { return prom_rev; } /* Get the prom firmware print revision. */ int prom_getprev(void) { return prom_prev; }
linux-master
arch/sparc/prom/misc_32.c
// SPDX-License-Identifier: GPL-2.0 /* * tree.c: Basic device tree traversal/scanning for the Linux * prom library. * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1996,1997 Jakub Jelinek ([email protected]) */ #include <linux/string.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/module.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/ldc.h> static phandle prom_node_to_node(const char *type, phandle node) { unsigned long args[5]; args[0] = (unsigned long) type; args[1] = 1; args[2] = 1; args[3] = (unsigned int) node; args[4] = (unsigned long) -1; p1275_cmd_direct(args); return (phandle) args[4]; } /* Return the child of node 'node' or zero if no this node has no * direct descendent. */ inline phandle __prom_getchild(phandle node) { return prom_node_to_node("child", node); } phandle prom_getchild(phandle node) { phandle cnode; if ((s32)node == -1) return 0; cnode = __prom_getchild(node); if ((s32)cnode == -1) return 0; return cnode; } EXPORT_SYMBOL(prom_getchild); inline phandle prom_getparent(phandle node) { phandle cnode; if ((s32)node == -1) return 0; cnode = prom_node_to_node("parent", node); if ((s32)cnode == -1) return 0; return cnode; } /* Return the next sibling of node 'node' or zero if no more siblings * at this level of depth in the tree. */ inline phandle __prom_getsibling(phandle node) { return prom_node_to_node(prom_peer_name, node); } phandle prom_getsibling(phandle node) { phandle sibnode; if ((s32)node == -1) return 0; sibnode = __prom_getsibling(node); if ((s32)sibnode == -1) return 0; return sibnode; } EXPORT_SYMBOL(prom_getsibling); /* Return the length in bytes of property 'prop' at node 'node'. * Return -1 on error. */ int prom_getproplen(phandle node, const char *prop) { unsigned long args[6]; if (!node || !prop) return -1; args[0] = (unsigned long) "getproplen"; args[1] = 2; args[2] = 1; args[3] = (unsigned int) node; args[4] = (unsigned long) prop; args[5] = (unsigned long) -1; p1275_cmd_direct(args); return (int) args[5]; } EXPORT_SYMBOL(prom_getproplen); /* Acquire a property 'prop' at node 'node' and place it in * 'buffer' which has a size of 'bufsize'. If the acquisition * was successful the length will be returned, else -1 is returned. */ int prom_getproperty(phandle node, const char *prop, char *buffer, int bufsize) { unsigned long args[8]; int plen; plen = prom_getproplen(node, prop); if ((plen > bufsize) || (plen == 0) || (plen == -1)) return -1; args[0] = (unsigned long) prom_getprop_name; args[1] = 4; args[2] = 1; args[3] = (unsigned int) node; args[4] = (unsigned long) prop; args[5] = (unsigned long) buffer; args[6] = bufsize; args[7] = (unsigned long) -1; p1275_cmd_direct(args); return (int) args[7]; } EXPORT_SYMBOL(prom_getproperty); /* Acquire an integer property and return its value. Returns -1 * on failure. */ int prom_getint(phandle node, const char *prop) { int intprop; if (prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1) return intprop; return -1; } EXPORT_SYMBOL(prom_getint); /* Acquire an integer property, upon error return the passed default * integer. */ int prom_getintdefault(phandle node, const char *property, int deflt) { int retval; retval = prom_getint(node, property); if (retval == -1) return deflt; return retval; } EXPORT_SYMBOL(prom_getintdefault); /* Acquire a boolean property, 1=TRUE 0=FALSE. */ int prom_getbool(phandle node, const char *prop) { int retval; retval = prom_getproplen(node, prop); if (retval == -1) return 0; return 1; } EXPORT_SYMBOL(prom_getbool); /* Acquire a property whose value is a string, returns a null * string on error. The char pointer is the user supplied string * buffer. */ void prom_getstring(phandle node, const char *prop, char *user_buf, int ubuf_size) { int len; len = prom_getproperty(node, prop, user_buf, ubuf_size); if (len != -1) return; user_buf[0] = 0; } EXPORT_SYMBOL(prom_getstring); /* Does the device at node 'node' have name 'name'? * YES = 1 NO = 0 */ int prom_nodematch(phandle node, const char *name) { char namebuf[128]; prom_getproperty(node, "name", namebuf, sizeof(namebuf)); if (strcmp(namebuf, name) == 0) return 1; return 0; } /* Search siblings at 'node_start' for a node with name * 'nodename'. Return node if successful, zero if not. */ phandle prom_searchsiblings(phandle node_start, const char *nodename) { phandle thisnode; int error; char promlib_buf[128]; for(thisnode = node_start; thisnode; thisnode=prom_getsibling(thisnode)) { error = prom_getproperty(thisnode, "name", promlib_buf, sizeof(promlib_buf)); /* Should this ever happen? */ if(error == -1) continue; if(strcmp(nodename, promlib_buf)==0) return thisnode; } return 0; } EXPORT_SYMBOL(prom_searchsiblings); static const char *prom_nextprop_name = "nextprop"; /* Return the first property type for node 'node'. * buffer should be at least 32B in length */ char *prom_firstprop(phandle node, char *buffer) { unsigned long args[7]; *buffer = 0; if ((s32)node == -1) return buffer; args[0] = (unsigned long) prom_nextprop_name; args[1] = 3; args[2] = 1; args[3] = (unsigned int) node; args[4] = 0; args[5] = (unsigned long) buffer; args[6] = (unsigned long) -1; p1275_cmd_direct(args); return buffer; } EXPORT_SYMBOL(prom_firstprop); /* Return the property type string after property type 'oprop' * at node 'node' . Returns NULL string if no more * property types for this node. */ char *prom_nextprop(phandle node, const char *oprop, char *buffer) { unsigned long args[7]; char buf[32]; if ((s32)node == -1) { *buffer = 0; return buffer; } if (oprop == buffer) { strcpy (buf, oprop); oprop = buf; } args[0] = (unsigned long) prom_nextprop_name; args[1] = 3; args[2] = 1; args[3] = (unsigned int) node; args[4] = (unsigned long) oprop; args[5] = (unsigned long) buffer; args[6] = (unsigned long) -1; p1275_cmd_direct(args); return buffer; } EXPORT_SYMBOL(prom_nextprop); phandle prom_finddevice(const char *name) { unsigned long args[5]; if (!name) return 0; args[0] = (unsigned long) "finddevice"; args[1] = 1; args[2] = 1; args[3] = (unsigned long) name; args[4] = (unsigned long) -1; p1275_cmd_direct(args); return (int) args[4]; } EXPORT_SYMBOL(prom_finddevice); int prom_node_has_property(phandle node, const char *prop) { char buf [32]; *buf = 0; do { prom_nextprop(node, buf, buf); if (!strcmp(buf, prop)) return 1; } while (*buf); return 0; } EXPORT_SYMBOL(prom_node_has_property); /* Set property 'pname' at node 'node' to value 'value' which has a length * of 'size' bytes. Return the number of bytes the prom accepted. */ int prom_setprop(phandle node, const char *pname, char *value, int size) { unsigned long args[8]; if (size == 0) return 0; if ((pname == 0) || (value == 0)) return 0; #ifdef CONFIG_SUN_LDOMS if (ldom_domaining_enabled) { ldom_set_var(pname, value); return 0; } #endif args[0] = (unsigned long) "setprop"; args[1] = 4; args[2] = 1; args[3] = (unsigned int) node; args[4] = (unsigned long) pname; args[5] = (unsigned long) value; args[6] = size; args[7] = (unsigned long) -1; p1275_cmd_direct(args); return (int) args[7]; } EXPORT_SYMBOL(prom_setprop); inline phandle prom_inst2pkg(int inst) { unsigned long args[5]; phandle node; args[0] = (unsigned long) "instance-to-package"; args[1] = 1; args[2] = 1; args[3] = (unsigned int) inst; args[4] = (unsigned long) -1; p1275_cmd_direct(args); node = (int) args[4]; if ((s32)node == -1) return 0; return node; } int prom_ihandle2path(int handle, char *buffer, int bufsize) { unsigned long args[7]; args[0] = (unsigned long) "instance-to-path"; args[1] = 3; args[2] = 1; args[3] = (unsigned int) handle; args[4] = (unsigned long) buffer; args[5] = bufsize; args[6] = (unsigned long) -1; p1275_cmd_direct(args); return (int) args[6]; }
linux-master
arch/sparc/prom/tree_64.c
// SPDX-License-Identifier: GPL-2.0 /* * mp.c: OpenBoot Prom Multiprocessor support routines. Don't call * these on a UP or else you will halt and catch fire. ;) * * Copyright (C) 1995 David S. Miller ([email protected]) */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <asm/openprom.h> #include <asm/oplib.h> extern void restore_current(void); /* Start cpu with prom-tree node 'cpunode' using context described * by 'ctable_reg' in context 'ctx' at program counter 'pc'. * * XXX Have to look into what the return values mean. XXX */ int prom_startcpu(int cpunode, struct linux_prom_registers *ctable_reg, int ctx, char *pc) { int ret; unsigned long flags; spin_lock_irqsave(&prom_lock, flags); switch(prom_vers) { case PROM_V0: case PROM_V2: default: ret = -1; break; case PROM_V3: ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc); break; } restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return ret; }
linux-master
arch/sparc/prom/mp.c
// SPDX-License-Identifier: GPL-2.0 /* * init.c: Initialize internal variables used by the PROM * library functions. * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1996,1997 Jakub Jelinek ([email protected]) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/ctype.h> #include <asm/openprom.h> #include <asm/oplib.h> /* OBP version string. */ char prom_version[80]; /* The root node of the prom device tree. */ int prom_stdout; phandle prom_chosen_node; /* You must call prom_init() before you attempt to use any of the * routines in the prom library. * It gets passed the pointer to the PROM vector. */ extern void prom_cif_init(void *); void __init prom_init(void *cif_handler) { phandle node; prom_cif_init(cif_handler); prom_chosen_node = prom_finddevice(prom_chosen_path); if (!prom_chosen_node || (s32)prom_chosen_node == -1) prom_halt(); prom_stdout = prom_getint(prom_chosen_node, "stdout"); node = prom_finddevice("/openprom"); if (!node || (s32)node == -1) prom_halt(); prom_getstring(node, "version", prom_version, sizeof(prom_version)); prom_printf("\n"); } void __init prom_init_report(void) { printk("PROMLIB: Sun IEEE Boot Prom '%s'\n", prom_version); printk("PROMLIB: Root node compatible: %s\n", prom_root_compatible); }
linux-master
arch/sparc/prom/init_64.c
// SPDX-License-Identifier: GPL-2.0 /* * ranges.c: Handle ranges in newer proms for obio/sbus. * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1997 Jakub Jelinek ([email protected]) */ #include <linux/init.h> #include <linux/module.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/types.h> static struct linux_prom_ranges promlib_obio_ranges[PROMREG_MAX]; static int num_obio_ranges; /* Adjust register values based upon the ranges parameters. */ static void prom_adjust_regs(struct linux_prom_registers *regp, int nregs, struct linux_prom_ranges *rangep, int nranges) { int regc, rngc; for (regc = 0; regc < nregs; regc++) { for (rngc = 0; rngc < nranges; rngc++) if (regp[regc].which_io == rangep[rngc].ot_child_space) break; /* Fount it */ if (rngc == nranges) /* oops */ prom_printf("adjust_regs: Could not find range with matching bus type...\n"); regp[regc].which_io = rangep[rngc].ot_parent_space; regp[regc].phys_addr -= rangep[rngc].ot_child_base; regp[regc].phys_addr += rangep[rngc].ot_parent_base; } } static void prom_adjust_ranges(struct linux_prom_ranges *ranges1, int nranges1, struct linux_prom_ranges *ranges2, int nranges2) { int rng1c, rng2c; for (rng1c = 0; rng1c < nranges1; rng1c++) { for (rng2c = 0; rng2c < nranges2; rng2c++) if (ranges1[rng1c].ot_parent_space == ranges2[rng2c].ot_child_space && ranges1[rng1c].ot_parent_base >= ranges2[rng2c].ot_child_base && ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base > 0U) break; if (rng2c == nranges2) /* oops */ prom_printf("adjust_ranges: Could not find matching bus type...\n"); else if (ranges1[rng1c].ot_parent_base + ranges1[rng1c].or_size > ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size) ranges1[rng1c].or_size = ranges2[rng2c].ot_child_base + ranges2[rng2c].or_size - ranges1[rng1c].ot_parent_base; ranges1[rng1c].ot_parent_space = ranges2[rng2c].ot_parent_space; ranges1[rng1c].ot_parent_base += ranges2[rng2c].ot_parent_base; } } /* Apply probed obio ranges to registers passed, if no ranges return. */ void prom_apply_obio_ranges(struct linux_prom_registers *regs, int nregs) { if (num_obio_ranges) prom_adjust_regs(regs, nregs, promlib_obio_ranges, num_obio_ranges); } EXPORT_SYMBOL(prom_apply_obio_ranges); void __init prom_ranges_init(void) { phandle node, obio_node; int success; num_obio_ranges = 0; /* Check for obio and sbus ranges. */ node = prom_getchild(prom_root_node); obio_node = prom_searchsiblings(node, "obio"); if (obio_node) { success = prom_getproperty(obio_node, "ranges", (char *) promlib_obio_ranges, sizeof(promlib_obio_ranges)); if (success != -1) num_obio_ranges = (success / sizeof(struct linux_prom_ranges)); } if (num_obio_ranges) prom_printf("PROMLIB: obio_ranges %d\n", num_obio_ranges); } void prom_apply_generic_ranges(phandle node, phandle parent, struct linux_prom_registers *regs, int nregs) { int success; int num_ranges; struct linux_prom_ranges ranges[PROMREG_MAX]; success = prom_getproperty(node, "ranges", (char *) ranges, sizeof(ranges)); if (success != -1) { num_ranges = (success / sizeof(struct linux_prom_ranges)); if (parent) { struct linux_prom_ranges parent_ranges[PROMREG_MAX]; int num_parent_ranges; success = prom_getproperty(parent, "ranges", (char *) parent_ranges, sizeof(parent_ranges)); if (success != -1) { num_parent_ranges = (success / sizeof(struct linux_prom_ranges)); prom_adjust_ranges(ranges, num_ranges, parent_ranges, num_parent_ranges); } } prom_adjust_regs(regs, nregs, ranges, num_ranges); } }
linux-master
arch/sparc/prom/ranges.c
// SPDX-License-Identifier: GPL-2.0 /* * bootstr.c: Boot string/argument acquisition from the PROM. * * Copyright(C) 1995 David S. Miller ([email protected]) */ #include <linux/string.h> #include <asm/oplib.h> #include <linux/init.h> #define BARG_LEN 256 static char barg_buf[BARG_LEN] = { 0 }; static char fetched __initdata = 0; char * __init prom_getbootargs(void) { int iter; char *cp, *arg; /* This check saves us from a panic when bootfd patches args. */ if (fetched) { return barg_buf; } switch (prom_vers) { case PROM_V0: cp = barg_buf; /* Start from 1 and go over fd(0,0,0)kernel */ for (iter = 1; iter < 8; iter++) { arg = (*(romvec->pv_v0bootargs))->argv[iter]; if (arg == NULL) break; while (*arg != 0) { /* Leave place for space and null. */ if (cp >= barg_buf + BARG_LEN - 2) /* We might issue a warning here. */ break; *cp++ = *arg++; } *cp++ = ' '; if (cp >= barg_buf + BARG_LEN - 1) /* We might issue a warning here. */ break; } *cp = 0; break; case PROM_V2: case PROM_V3: /* * V3 PROM cannot supply as with more than 128 bytes * of an argument. But a smart bootstrap loader can. */ strscpy(barg_buf, *romvec->pv_v2bootargs.bootargs, sizeof(barg_buf)); break; default: break; } fetched = 1; return barg_buf; }
linux-master
arch/sparc/prom/bootstr_32.c
// SPDX-License-Identifier: GPL-2.0 /* * init.c: Initialize internal variables used by the PROM * library functions. * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1998 Jakub Jelinek ([email protected]) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <asm/openprom.h> #include <asm/oplib.h> struct linux_romvec *romvec; EXPORT_SYMBOL(romvec); enum prom_major_version prom_vers; unsigned int prom_rev, prom_prev; /* The root node of the prom device tree. */ phandle prom_root_node; EXPORT_SYMBOL(prom_root_node); /* Pointer to the device tree operations structure. */ struct linux_nodeops *prom_nodeops; /* You must call prom_init() before you attempt to use any of the * routines in the prom library. * It gets passed the pointer to the PROM vector. */ void __init prom_init(struct linux_romvec *rp) { romvec = rp; switch(romvec->pv_romvers) { case 0: prom_vers = PROM_V0; break; case 2: prom_vers = PROM_V2; break; case 3: prom_vers = PROM_V3; break; default: prom_printf("PROMLIB: Bad PROM version %d\n", romvec->pv_romvers); prom_halt(); break; } prom_rev = romvec->pv_plugin_revision; prom_prev = romvec->pv_printrev; prom_nodeops = romvec->pv_nodeops; prom_root_node = prom_getsibling(0); if ((prom_root_node == 0) || ((s32)prom_root_node == -1)) prom_halt(); if((((unsigned long) prom_nodeops) == 0) || (((unsigned long) prom_nodeops) == -1)) prom_halt(); prom_meminit(); prom_ranges_init(); printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n", romvec->pv_romvers, prom_rev); /* Initialization successful. */ }
linux-master
arch/sparc/prom/init_32.c
// SPDX-License-Identifier: GPL-2.0 /* console.c: Routines that deal with sending and receiving IO * to/from the current console device using the PROM. * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1996,1997 Jakub Jelinek ([email protected]) */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <linux/string.h> static int __prom_console_write_buf(const char *buf, int len) { unsigned long args[7]; int ret; args[0] = (unsigned long) "write"; args[1] = 3; args[2] = 1; args[3] = (unsigned int) prom_stdout; args[4] = (unsigned long) buf; args[5] = (unsigned int) len; args[6] = (unsigned long) -1; p1275_cmd_direct(args); ret = (int) args[6]; if (ret < 0) return -1; return ret; } void prom_console_write_buf(const char *buf, int len) { while (len) { int n = __prom_console_write_buf(buf, len); if (n < 0) continue; len -= n; buf += len; } }
linux-master
arch/sparc/prom/console_64.c
// SPDX-License-Identifier: GPL-2.0 /* * p1275.c: Sun IEEE 1275 PROM low level interface routines * * Copyright (C) 1996,1997 Jakub Jelinek ([email protected]) */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/irqflags.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/spitfire.h> #include <asm/pstate.h> #include <asm/ldc.h> struct { long prom_callback; /* 0x00 */ void (*prom_cif_handler)(long *); /* 0x08 */ } p1275buf; extern void prom_world(int); extern void prom_cif_direct(unsigned long *args); extern void prom_cif_callback(void); /* * This provides SMP safety on the p1275buf. */ DEFINE_RAW_SPINLOCK(prom_entry_lock); void p1275_cmd_direct(unsigned long *args) { unsigned long flags; local_save_flags(flags); local_irq_restore((unsigned long)PIL_NMI); raw_spin_lock(&prom_entry_lock); prom_world(1); prom_cif_direct(args); prom_world(0); raw_spin_unlock(&prom_entry_lock); local_irq_restore(flags); } void prom_cif_init(void *cif_handler, void *cif_stack) { p1275buf.prom_cif_handler = (void (*)(long *))cif_handler; }
linux-master
arch/sparc/prom/p1275.c
// SPDX-License-Identifier: GPL-2.0 /* * misc.c: Miscellaneous prom functions that don't belong * anywhere else. * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1996,1997 Jakub Jelinek ([email protected]) */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/module.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/ldc.h> static int prom_service_exists(const char *service_name) { unsigned long args[5]; args[0] = (unsigned long) "test"; args[1] = 1; args[2] = 1; args[3] = (unsigned long) service_name; args[4] = (unsigned long) -1; p1275_cmd_direct(args); if (args[4]) return 0; return 1; } void prom_sun4v_guest_soft_state(void) { const char *svc = "SUNW,soft-state-supported"; unsigned long args[3]; if (!prom_service_exists(svc)) return; args[0] = (unsigned long) svc; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); } /* Reset and reboot the machine with the command 'bcommand'. */ void prom_reboot(const char *bcommand) { unsigned long args[4]; #ifdef CONFIG_SUN_LDOMS if (ldom_domaining_enabled) ldom_reboot(bcommand); #endif args[0] = (unsigned long) "boot"; args[1] = 1; args[2] = 0; args[3] = (unsigned long) bcommand; p1275_cmd_direct(args); } /* Forth evaluate the expression contained in 'fstring'. */ void prom_feval(const char *fstring) { unsigned long args[5]; if (!fstring || fstring[0] == 0) return; args[0] = (unsigned long) "interpret"; args[1] = 1; args[2] = 1; args[3] = (unsigned long) fstring; args[4] = (unsigned long) -1; p1275_cmd_direct(args); } EXPORT_SYMBOL(prom_feval); /* Drop into the prom, with the chance to continue with the 'go' * prom command. */ void prom_cmdline(void) { unsigned long args[3]; unsigned long flags; local_irq_save(flags); #ifdef CONFIG_SMP smp_capture(); #endif args[0] = (unsigned long) "enter"; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); #ifdef CONFIG_SMP smp_release(); #endif local_irq_restore(flags); } /* Drop into the prom, but completely terminate the program. * No chance of continuing. */ void notrace prom_halt(void) { unsigned long args[3]; #ifdef CONFIG_SUN_LDOMS if (ldom_domaining_enabled) ldom_power_off(); #endif again: args[0] = (unsigned long) "exit"; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); goto again; /* PROM is out to get me -DaveM */ } void prom_halt_power_off(void) { unsigned long args[3]; #ifdef CONFIG_SUN_LDOMS if (ldom_domaining_enabled) ldom_power_off(); #endif args[0] = (unsigned long) "SUNW,power-off"; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); /* if nothing else helps, we just halt */ prom_halt(); } /* Get the idprom and stuff it into buffer 'idbuf'. Returns the * format type. 'num_bytes' is the number of bytes that your idbuf * has space for. Returns 0xff on error. */ unsigned char prom_get_idprom(char *idbuf, int num_bytes) { int len; len = prom_getproplen(prom_root_node, "idprom"); if ((len >num_bytes) || (len == -1)) return 0xff; if (!prom_getproperty(prom_root_node, "idprom", idbuf, num_bytes)) return idbuf[0]; return 0xff; } int prom_get_mmu_ihandle(void) { phandle node; int ret; if (prom_mmu_ihandle_cache != 0) return prom_mmu_ihandle_cache; node = prom_finddevice(prom_chosen_path); ret = prom_getint(node, prom_mmu_name); if (ret == -1 || ret == 0) prom_mmu_ihandle_cache = -1; else prom_mmu_ihandle_cache = ret; return ret; } static int prom_get_memory_ihandle(void) { static int memory_ihandle_cache; phandle node; int ret; if (memory_ihandle_cache != 0) return memory_ihandle_cache; node = prom_finddevice("/chosen"); ret = prom_getint(node, "memory"); if (ret == -1 || ret == 0) memory_ihandle_cache = -1; else memory_ihandle_cache = ret; return ret; } /* Load explicit I/D TLB entries. */ static long tlb_load(const char *type, unsigned long index, unsigned long tte_data, unsigned long vaddr) { unsigned long args[9]; args[0] = (unsigned long) prom_callmethod_name; args[1] = 5; args[2] = 1; args[3] = (unsigned long) type; args[4] = (unsigned int) prom_get_mmu_ihandle(); args[5] = vaddr; args[6] = tte_data; args[7] = index; args[8] = (unsigned long) -1; p1275_cmd_direct(args); return (long) args[8]; } long prom_itlb_load(unsigned long index, unsigned long tte_data, unsigned long vaddr) { return tlb_load("SUNW,itlb-load", index, tte_data, vaddr); } long prom_dtlb_load(unsigned long index, unsigned long tte_data, unsigned long vaddr) { return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr); } int prom_map(int mode, unsigned long size, unsigned long vaddr, unsigned long paddr) { unsigned long args[11]; int ret; args[0] = (unsigned long) prom_callmethod_name; args[1] = 7; args[2] = 1; args[3] = (unsigned long) prom_map_name; args[4] = (unsigned int) prom_get_mmu_ihandle(); args[5] = (unsigned int) mode; args[6] = size; args[7] = vaddr; args[8] = 0; args[9] = paddr; args[10] = (unsigned long) -1; p1275_cmd_direct(args); ret = (int) args[10]; if (ret == 0) ret = -1; return ret; } void prom_unmap(unsigned long size, unsigned long vaddr) { unsigned long args[7]; args[0] = (unsigned long) prom_callmethod_name; args[1] = 4; args[2] = 0; args[3] = (unsigned long) prom_unmap_name; args[4] = (unsigned int) prom_get_mmu_ihandle(); args[5] = size; args[6] = vaddr; p1275_cmd_direct(args); } /* Set aside physical memory which is not touched or modified * across soft resets. */ int prom_retain(const char *name, unsigned long size, unsigned long align, unsigned long *paddr) { unsigned long args[11]; args[0] = (unsigned long) prom_callmethod_name; args[1] = 5; args[2] = 3; args[3] = (unsigned long) "SUNW,retain"; args[4] = (unsigned int) prom_get_memory_ihandle(); args[5] = align; args[6] = size; args[7] = (unsigned long) name; args[8] = (unsigned long) -1; args[9] = (unsigned long) -1; args[10] = (unsigned long) -1; p1275_cmd_direct(args); if (args[8]) return (int) args[8]; /* Next we get "phys_high" then "phys_low". On 64-bit * the phys_high cell is don't care since the phys_low * cell has the full value. */ *paddr = args[10]; return 0; } /* Get "Unumber" string for the SIMM at the given * memory address. Usually this will be of the form * "Uxxxx" where xxxx is a decimal number which is * etched into the motherboard next to the SIMM slot * in question. */ int prom_getunumber(int syndrome_code, unsigned long phys_addr, char *buf, int buflen) { unsigned long args[12]; args[0] = (unsigned long) prom_callmethod_name; args[1] = 7; args[2] = 2; args[3] = (unsigned long) "SUNW,get-unumber"; args[4] = (unsigned int) prom_get_memory_ihandle(); args[5] = buflen; args[6] = (unsigned long) buf; args[7] = 0; args[8] = phys_addr; args[9] = (unsigned int) syndrome_code; args[10] = (unsigned long) -1; args[11] = (unsigned long) -1; p1275_cmd_direct(args); return (int) args[10]; } /* Power management extensions. */ void prom_sleepself(void) { unsigned long args[3]; args[0] = (unsigned long) "SUNW,sleep-self"; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); } int prom_sleepsystem(void) { unsigned long args[4]; args[0] = (unsigned long) "SUNW,sleep-system"; args[1] = 0; args[2] = 1; args[3] = (unsigned long) -1; p1275_cmd_direct(args); return (int) args[3]; } int prom_wakeupsystem(void) { unsigned long args[4]; args[0] = (unsigned long) "SUNW,wakeup-system"; args[1] = 0; args[2] = 1; args[3] = (unsigned long) -1; p1275_cmd_direct(args); return (int) args[3]; } #ifdef CONFIG_SMP void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg) { unsigned long args[6]; args[0] = (unsigned long) "SUNW,start-cpu"; args[1] = 3; args[2] = 0; args[3] = (unsigned int) cpunode; args[4] = pc; args[5] = arg; p1275_cmd_direct(args); } void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg) { unsigned long args[6]; args[0] = (unsigned long) "SUNW,start-cpu-by-cpuid"; args[1] = 3; args[2] = 0; args[3] = (unsigned int) cpuid; args[4] = pc; args[5] = arg; p1275_cmd_direct(args); } void prom_stopcpu_cpuid(int cpuid) { unsigned long args[4]; args[0] = (unsigned long) "SUNW,stop-cpu-by-cpuid"; args[1] = 1; args[2] = 0; args[3] = (unsigned int) cpuid; p1275_cmd_direct(args); } void prom_stopself(void) { unsigned long args[3]; args[0] = (unsigned long) "SUNW,stop-self"; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); } void prom_idleself(void) { unsigned long args[3]; args[0] = (unsigned long) "SUNW,idle-self"; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); } void prom_resumecpu(int cpunode) { unsigned long args[4]; args[0] = (unsigned long) "SUNW,resume-cpu"; args[1] = 1; args[2] = 0; args[3] = (unsigned int) cpunode; p1275_cmd_direct(args); } #endif
linux-master
arch/sparc/prom/misc_64.c
// SPDX-License-Identifier: GPL-2.0 /* * tree.c: Basic device tree traversal/scanning for the Linux * prom library. * * Copyright (C) 1995 David S. Miller ([email protected]) */ #include <linux/string.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/ctype.h> #include <linux/module.h> #include <asm/openprom.h> #include <asm/oplib.h> extern void restore_current(void); static char promlib_buf[128]; /* Internal version of prom_getchild that does not alter return values. */ static phandle __prom_getchild(phandle node) { unsigned long flags; phandle cnode; spin_lock_irqsave(&prom_lock, flags); cnode = prom_nodeops->no_child(node); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return cnode; } /* Return the child of node 'node' or zero if no this node has no * direct descendent. */ phandle prom_getchild(phandle node) { phandle cnode; if ((s32)node == -1) return 0; cnode = __prom_getchild(node); if (cnode == 0 || (s32)cnode == -1) return 0; return cnode; } EXPORT_SYMBOL(prom_getchild); /* Internal version of prom_getsibling that does not alter return values. */ static phandle __prom_getsibling(phandle node) { unsigned long flags; phandle cnode; spin_lock_irqsave(&prom_lock, flags); cnode = prom_nodeops->no_nextnode(node); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return cnode; } /* Return the next sibling of node 'node' or zero if no more siblings * at this level of depth in the tree. */ phandle prom_getsibling(phandle node) { phandle sibnode; if ((s32)node == -1) return 0; sibnode = __prom_getsibling(node); if (sibnode == 0 || (s32)sibnode == -1) return 0; return sibnode; } EXPORT_SYMBOL(prom_getsibling); /* Return the length in bytes of property 'prop' at node 'node'. * Return -1 on error. */ int prom_getproplen(phandle node, const char *prop) { int ret; unsigned long flags; if((!node) || (!prop)) return -1; spin_lock_irqsave(&prom_lock, flags); ret = prom_nodeops->no_proplen(node, prop); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return ret; } EXPORT_SYMBOL(prom_getproplen); /* Acquire a property 'prop' at node 'node' and place it in * 'buffer' which has a size of 'bufsize'. If the acquisition * was successful the length will be returned, else -1 is returned. */ int prom_getproperty(phandle node, const char *prop, char *buffer, int bufsize) { int plen, ret; unsigned long flags; plen = prom_getproplen(node, prop); if((plen > bufsize) || (plen == 0) || (plen == -1)) return -1; /* Ok, things seem all right. */ spin_lock_irqsave(&prom_lock, flags); ret = prom_nodeops->no_getprop(node, prop, buffer); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return ret; } EXPORT_SYMBOL(prom_getproperty); /* Acquire an integer property and return its value. Returns -1 * on failure. */ int prom_getint(phandle node, char *prop) { static int intprop; if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1) return intprop; return -1; } EXPORT_SYMBOL(prom_getint); /* Acquire an integer property, upon error return the passed default * integer. */ int prom_getintdefault(phandle node, char *property, int deflt) { int retval; retval = prom_getint(node, property); if(retval == -1) return deflt; return retval; } EXPORT_SYMBOL(prom_getintdefault); /* Acquire a boolean property, 1=TRUE 0=FALSE. */ int prom_getbool(phandle node, char *prop) { int retval; retval = prom_getproplen(node, prop); if(retval == -1) return 0; return 1; } EXPORT_SYMBOL(prom_getbool); /* Acquire a property whose value is a string, returns a null * string on error. The char pointer is the user supplied string * buffer. */ void prom_getstring(phandle node, char *prop, char *user_buf, int ubuf_size) { int len; len = prom_getproperty(node, prop, user_buf, ubuf_size); if(len != -1) return; user_buf[0] = 0; } EXPORT_SYMBOL(prom_getstring); /* Search siblings at 'node_start' for a node with name * 'nodename'. Return node if successful, zero if not. */ phandle prom_searchsiblings(phandle node_start, char *nodename) { phandle thisnode; int error; for(thisnode = node_start; thisnode; thisnode=prom_getsibling(thisnode)) { error = prom_getproperty(thisnode, "name", promlib_buf, sizeof(promlib_buf)); /* Should this ever happen? */ if(error == -1) continue; if(strcmp(nodename, promlib_buf)==0) return thisnode; } return 0; } EXPORT_SYMBOL(prom_searchsiblings); /* Interal version of nextprop that does not alter return values. */ static char *__prom_nextprop(phandle node, char * oprop) { unsigned long flags; char *prop; spin_lock_irqsave(&prom_lock, flags); prop = prom_nodeops->no_nextprop(node, oprop); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return prop; } /* Return the property type string after property type 'oprop' * at node 'node' . Returns empty string if no more * property types for this node. */ char *prom_nextprop(phandle node, char *oprop, char *buffer) { if (node == 0 || (s32)node == -1) return ""; return __prom_nextprop(node, oprop); } EXPORT_SYMBOL(prom_nextprop); phandle prom_finddevice(char *name) { char nbuf[128]; char *s = name, *d; phandle node = prom_root_node, node2; unsigned int which_io, phys_addr; struct linux_prom_registers reg[PROMREG_MAX]; while (*s++) { if (!*s) return node; /* path '.../' is legal */ node = prom_getchild(node); for (d = nbuf; *s != 0 && *s != '@' && *s != '/';) *d++ = *s++; *d = 0; node = prom_searchsiblings(node, nbuf); if (!node) return 0; if (*s == '@') { if (isxdigit(s[1]) && s[2] == ',') { which_io = simple_strtoul(s+1, NULL, 16); phys_addr = simple_strtoul(s+3, &d, 16); if (d != s + 3 && (!*d || *d == '/') && d <= s + 3 + 8) { node2 = node; while (node2 && (s32)node2 != -1) { if (prom_getproperty (node2, "reg", (char *)reg, sizeof (reg)) > 0) { if (which_io == reg[0].which_io && phys_addr == reg[0].phys_addr) { node = node2; break; } } node2 = prom_getsibling(node2); if (!node2 || (s32)node2 == -1) break; node2 = prom_searchsiblings(prom_getsibling(node2), nbuf); } } } while (*s != 0 && *s != '/') s++; } } return node; } EXPORT_SYMBOL(prom_finddevice); /* Set property 'pname' at node 'node' to value 'value' which has a length * of 'size' bytes. Return the number of bytes the prom accepted. */ int prom_setprop(phandle node, const char *pname, char *value, int size) { unsigned long flags; int ret; if (size == 0) return 0; if ((pname == NULL) || (value == NULL)) return 0; spin_lock_irqsave(&prom_lock, flags); ret = prom_nodeops->no_setprop(node, pname, value, size); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return ret; } EXPORT_SYMBOL(prom_setprop); phandle prom_inst2pkg(int inst) { phandle node; unsigned long flags; spin_lock_irqsave(&prom_lock, flags); node = (*romvec->pv_v2devops.v2_inst2pkg)(inst); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); if ((s32)node == -1) return 0; return node; }
linux-master
arch/sparc/prom/tree_32.c
// SPDX-License-Identifier: GPL-2.0 /* * hibernate.c: Hibernaton support specific for sparc64. * * Copyright (C) 2013 Kirill V Tkhai ([email protected]) */ #include <linux/mm.h> #include <asm/hibernate.h> #include <asm/visasm.h> #include <asm/page.h> #include <asm/sections.h> #include <asm/tlb.h> struct saved_context saved_context; /* * pfn_is_nosave - check if given pfn is in the 'nosave' section */ int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = PFN_DOWN((unsigned long)&__nosave_begin); unsigned long nosave_end_pfn = PFN_DOWN((unsigned long)&__nosave_end); return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); } void save_processor_state(void) { save_and_clear_fpu(); } void restore_processor_state(void) { struct mm_struct *mm = current->active_mm; tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); }
linux-master
arch/sparc/power/hibernate.c
// SPDX-License-Identifier: GPL-2.0-only /* Glue code for AES encryption optimized for sparc64 crypto opcodes. * * This is based largely upon arch/x86/crypto/aesni-intel_glue.c * * Copyright (C) 2008, Intel Corp. * Author: Huang Ying <[email protected]> * * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD * interface for 64-bit kernels. * Authors: Adrian Hoban <[email protected]> * Gabriele Paoloni <[email protected]> * Tadeusz Struk ([email protected]) * Aidan O'Mahony ([email protected]) * Copyright (c) 2010, Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/algapi.h> #include <crypto/aes.h> #include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" struct aes_ops { void (*encrypt)(const u64 *key, const u32 *input, u32 *output); void (*decrypt)(const u64 *key, const u32 *input, u32 *output); void (*load_encrypt_keys)(const u64 *key); void (*load_decrypt_keys)(const u64 *key); void (*ecb_encrypt)(const u64 *key, const u64 *input, u64 *output, unsigned int len); void (*ecb_decrypt)(const u64 *key, const u64 *input, u64 *output, unsigned int len); void (*cbc_encrypt)(const u64 *key, const u64 *input, u64 *output, unsigned int len, u64 *iv); void (*cbc_decrypt)(const u64 *key, const u64 *input, u64 *output, unsigned int len, u64 *iv); void (*ctr_crypt)(const u64 *key, const u64 *input, u64 *output, unsigned int len, u64 *iv); }; struct crypto_sparc64_aes_ctx { struct aes_ops *ops; u64 key[AES_MAX_KEYLENGTH / sizeof(u64)]; u32 key_length; u32 expanded_key_length; }; extern void aes_sparc64_encrypt_128(const u64 *key, const u32 *input, u32 *output); extern void aes_sparc64_encrypt_192(const u64 *key, const u32 *input, u32 *output); extern void aes_sparc64_encrypt_256(const u64 *key, const u32 *input, u32 *output); extern void aes_sparc64_decrypt_128(const u64 *key, const u32 *input, u32 *output); extern void aes_sparc64_decrypt_192(const u64 *key, const u32 *input, u32 *output); extern void aes_sparc64_decrypt_256(const u64 *key, const u32 *input, u32 *output); extern void aes_sparc64_load_encrypt_keys_128(const u64 *key); extern void aes_sparc64_load_encrypt_keys_192(const u64 *key); extern void aes_sparc64_load_encrypt_keys_256(const u64 *key); extern void aes_sparc64_load_decrypt_keys_128(const u64 *key); extern void aes_sparc64_load_decrypt_keys_192(const u64 *key); extern void aes_sparc64_load_decrypt_keys_256(const u64 *key); extern void aes_sparc64_ecb_encrypt_128(const u64 *key, const u64 *input, u64 *output, unsigned int len); extern void aes_sparc64_ecb_encrypt_192(const u64 *key, const u64 *input, u64 *output, unsigned int len); extern void aes_sparc64_ecb_encrypt_256(const u64 *key, const u64 *input, u64 *output, unsigned int len); extern void aes_sparc64_ecb_decrypt_128(const u64 *key, const u64 *input, u64 *output, unsigned int len); extern void aes_sparc64_ecb_decrypt_192(const u64 *key, const u64 *input, u64 *output, unsigned int len); extern void aes_sparc64_ecb_decrypt_256(const u64 *key, const u64 *input, u64 *output, unsigned int len); extern void aes_sparc64_cbc_encrypt_128(const u64 *key, const u64 *input, u64 *output, unsigned int len, u64 *iv); extern void aes_sparc64_cbc_encrypt_192(const u64 *key, const u64 *input, u64 *output, unsigned int len, u64 *iv); extern void aes_sparc64_cbc_encrypt_256(const u64 *key, const u64 *input, u64 *output, unsigned int len, u64 *iv); extern void aes_sparc64_cbc_decrypt_128(const u64 *key, const u64 *input, u64 *output, unsigned int len, u64 *iv); extern void aes_sparc64_cbc_decrypt_192(const u64 *key, const u64 *input, u64 *output, unsigned int len, u64 *iv); extern void aes_sparc64_cbc_decrypt_256(const u64 *key, const u64 *input, u64 *output, unsigned int len, u64 *iv); extern void aes_sparc64_ctr_crypt_128(const u64 *key, const u64 *input, u64 *output, unsigned int len, u64 *iv); extern void aes_sparc64_ctr_crypt_192(const u64 *key, const u64 *input, u64 *output, unsigned int len, u64 *iv); extern void aes_sparc64_ctr_crypt_256(const u64 *key, const u64 *input, u64 *output, unsigned int len, u64 *iv); static struct aes_ops aes128_ops = { .encrypt = aes_sparc64_encrypt_128, .decrypt = aes_sparc64_decrypt_128, .load_encrypt_keys = aes_sparc64_load_encrypt_keys_128, .load_decrypt_keys = aes_sparc64_load_decrypt_keys_128, .ecb_encrypt = aes_sparc64_ecb_encrypt_128, .ecb_decrypt = aes_sparc64_ecb_decrypt_128, .cbc_encrypt = aes_sparc64_cbc_encrypt_128, .cbc_decrypt = aes_sparc64_cbc_decrypt_128, .ctr_crypt = aes_sparc64_ctr_crypt_128, }; static struct aes_ops aes192_ops = { .encrypt = aes_sparc64_encrypt_192, .decrypt = aes_sparc64_decrypt_192, .load_encrypt_keys = aes_sparc64_load_encrypt_keys_192, .load_decrypt_keys = aes_sparc64_load_decrypt_keys_192, .ecb_encrypt = aes_sparc64_ecb_encrypt_192, .ecb_decrypt = aes_sparc64_ecb_decrypt_192, .cbc_encrypt = aes_sparc64_cbc_encrypt_192, .cbc_decrypt = aes_sparc64_cbc_decrypt_192, .ctr_crypt = aes_sparc64_ctr_crypt_192, }; static struct aes_ops aes256_ops = { .encrypt = aes_sparc64_encrypt_256, .decrypt = aes_sparc64_decrypt_256, .load_encrypt_keys = aes_sparc64_load_encrypt_keys_256, .load_decrypt_keys = aes_sparc64_load_decrypt_keys_256, .ecb_encrypt = aes_sparc64_ecb_encrypt_256, .ecb_decrypt = aes_sparc64_ecb_decrypt_256, .cbc_encrypt = aes_sparc64_cbc_encrypt_256, .cbc_decrypt = aes_sparc64_cbc_decrypt_256, .ctr_crypt = aes_sparc64_ctr_crypt_256, }; extern void aes_sparc64_key_expand(const u32 *in_key, u64 *output_key, unsigned int key_len); static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm); switch (key_len) { case AES_KEYSIZE_128: ctx->expanded_key_length = 0xb0; ctx->ops = &aes128_ops; break; case AES_KEYSIZE_192: ctx->expanded_key_length = 0xd0; ctx->ops = &aes192_ops; break; case AES_KEYSIZE_256: ctx->expanded_key_length = 0xf0; ctx->ops = &aes256_ops; break; default: return -EINVAL; } aes_sparc64_key_expand((const u32 *)in_key, &ctx->key[0], key_len); ctx->key_length = key_len; return 0; } static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len); } static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm); ctx->ops->encrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst); } static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm); ctx->ops->decrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst); } static int ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, true); if (err) return err; ctx->ops->load_encrypt_keys(&ctx->key[0]); while ((nbytes = walk.nbytes) != 0) { ctx->ops->ecb_encrypt(&ctx->key[0], walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, AES_BLOCK_SIZE)); err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } fprs_write(0); return err; } static int ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm); const u64 *key_end; struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, true); if (err) return err; ctx->ops->load_decrypt_keys(&ctx->key[0]); key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)]; while ((nbytes = walk.nbytes) != 0) { ctx->ops->ecb_decrypt(key_end, walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, AES_BLOCK_SIZE)); err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } fprs_write(0); return err; } static int cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, true); if (err) return err; ctx->ops->load_encrypt_keys(&ctx->key[0]); while ((nbytes = walk.nbytes) != 0) { ctx->ops->cbc_encrypt(&ctx->key[0], walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, AES_BLOCK_SIZE), walk.iv); err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } fprs_write(0); return err; } static int cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm); const u64 *key_end; struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, true); if (err) return err; ctx->ops->load_decrypt_keys(&ctx->key[0]); key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)]; while ((nbytes = walk.nbytes) != 0) { ctx->ops->cbc_decrypt(key_end, walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, AES_BLOCK_SIZE), walk.iv); err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } fprs_write(0); return err; } static void ctr_crypt_final(const struct crypto_sparc64_aes_ctx *ctx, struct skcipher_walk *walk) { u8 *ctrblk = walk->iv; u64 keystream[AES_BLOCK_SIZE / sizeof(u64)]; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; ctx->ops->ecb_encrypt(&ctx->key[0], (const u64 *)ctrblk, keystream, AES_BLOCK_SIZE); crypto_xor_cpy(dst, (u8 *) keystream, src, nbytes); crypto_inc(ctrblk, AES_BLOCK_SIZE); } static int ctr_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct crypto_sparc64_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, true); if (err) return err; ctx->ops->load_encrypt_keys(&ctx->key[0]); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { ctx->ops->ctr_crypt(&ctx->key[0], walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, AES_BLOCK_SIZE), walk.iv); err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } if (walk.nbytes) { ctr_crypt_final(ctx, &walk); err = skcipher_walk_done(&walk, 0); } fprs_write(0); return err; } static struct crypto_alg cipher_alg = { .cra_name = "aes", .cra_driver_name = "aes-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, .cia_encrypt = crypto_aes_encrypt, .cia_decrypt = crypto_aes_decrypt } } }; static struct skcipher_alg skcipher_algs[] = { { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-aes-sparc64", .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx), .base.cra_alignmask = 7, .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = aes_set_key_skcipher, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-sparc64", .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx), .base.cra_alignmask = 7, .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = aes_set_key_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "ctr-aes-sparc64", .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct crypto_sparc64_aes_ctx), .base.cra_alignmask = 7, .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = aes_set_key_skcipher, .encrypt = ctr_crypt, .decrypt = ctr_crypt, .chunksize = AES_BLOCK_SIZE, } }; static bool __init sparc64_has_aes_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_AES)) return false; return true; } static int __init aes_sparc64_mod_init(void) { int err; if (!sparc64_has_aes_opcode()) { pr_info("sparc64 aes opcodes not available.\n"); return -ENODEV; } pr_info("Using sparc64 aes opcodes optimized AES implementation\n"); err = crypto_register_alg(&cipher_alg); if (err) return err; err = crypto_register_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); if (err) crypto_unregister_alg(&cipher_alg); return err; } static void __exit aes_sparc64_mod_fini(void) { crypto_unregister_alg(&cipher_alg); crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); } module_init(aes_sparc64_mod_init); module_exit(aes_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, sparc64 aes opcode accelerated"); MODULE_ALIAS_CRYPTO("aes"); #include "crop_devid.c"
linux-master
arch/sparc/crypto/aes_glue.c
// SPDX-License-Identifier: GPL-2.0-only /* Glue code for SHA256 hashing optimized for sparc64 crypto opcodes. * * This is based largely upon crypto/sha256_generic.c * * Copyright (c) Jean-Luc Cooke <[email protected]> * Copyright (c) Andrew McDonald <[email protected]> * Copyright (c) 2002 James Morris <[email protected]> * SHA224 Support Copyright 2007 Intel Corporation <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/sha2.h> #include <crypto/sha256_base.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" asmlinkage void sha256_sparc64_transform(u32 *digest, const char *data, unsigned int rounds); static void __sha256_sparc64_update(struct sha256_state *sctx, const u8 *data, unsigned int len, unsigned int partial) { unsigned int done = 0; sctx->count += len; if (partial) { done = SHA256_BLOCK_SIZE - partial; memcpy(sctx->buf + partial, data, done); sha256_sparc64_transform(sctx->state, sctx->buf, 1); } if (len - done >= SHA256_BLOCK_SIZE) { const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE; sha256_sparc64_transform(sctx->state, data + done, rounds); done += rounds * SHA256_BLOCK_SIZE; } memcpy(sctx->buf, data + done, len - done); } static int sha256_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha256_state *sctx = shash_desc_ctx(desc); unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; /* Handle the fast case right here */ if (partial + len < SHA256_BLOCK_SIZE) { sctx->count += len; memcpy(sctx->buf + partial, data, len); } else __sha256_sparc64_update(sctx, data, len, partial); return 0; } static int sha256_sparc64_final(struct shash_desc *desc, u8 *out) { struct sha256_state *sctx = shash_desc_ctx(desc); unsigned int i, index, padlen; __be32 *dst = (__be32 *)out; __be64 bits; static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, }; bits = cpu_to_be64(sctx->count << 3); /* Pad out to 56 mod 64 and append length */ index = sctx->count % SHA256_BLOCK_SIZE; padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56) - index); /* We need to fill a whole block for __sha256_sparc64_update() */ if (padlen <= 56) { sctx->count += padlen; memcpy(sctx->buf + index, padding, padlen); } else { __sha256_sparc64_update(sctx, padding, padlen, index); } __sha256_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); /* Store state in digest */ for (i = 0; i < 8; i++) dst[i] = cpu_to_be32(sctx->state[i]); /* Wipe context */ memset(sctx, 0, sizeof(*sctx)); return 0; } static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash) { u8 D[SHA256_DIGEST_SIZE]; sha256_sparc64_final(desc, D); memcpy(hash, D, SHA224_DIGEST_SIZE); memzero_explicit(D, SHA256_DIGEST_SIZE); return 0; } static int sha256_sparc64_export(struct shash_desc *desc, void *out) { struct sha256_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int sha256_sparc64_import(struct shash_desc *desc, const void *in) { struct sha256_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg sha256_alg = { .digestsize = SHA256_DIGEST_SIZE, .init = sha256_base_init, .update = sha256_sparc64_update, .final = sha256_sparc64_final, .export = sha256_sparc64_export, .import = sha256_sparc64_import, .descsize = sizeof(struct sha256_state), .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name= "sha256-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static struct shash_alg sha224_alg = { .digestsize = SHA224_DIGEST_SIZE, .init = sha224_base_init, .update = sha256_sparc64_update, .final = sha224_sparc64_final, .descsize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name= "sha224-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static bool __init sparc64_has_sha256_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_SHA256)) return false; return true; } static int __init sha256_sparc64_mod_init(void) { if (sparc64_has_sha256_opcode()) { int ret = crypto_register_shash(&sha224_alg); if (ret < 0) return ret; ret = crypto_register_shash(&sha256_alg); if (ret < 0) { crypto_unregister_shash(&sha224_alg); return ret; } pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n"); return 0; } pr_info("sparc64 sha256 opcode not available.\n"); return -ENODEV; } static void __exit sha256_sparc64_mod_fini(void) { crypto_unregister_shash(&sha224_alg); crypto_unregister_shash(&sha256_alg); } module_init(sha256_sparc64_mod_init); module_exit(sha256_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated"); MODULE_ALIAS_CRYPTO("sha224"); MODULE_ALIAS_CRYPTO("sha256"); #include "crop_devid.c"
linux-master
arch/sparc/crypto/sha256_glue.c
// SPDX-License-Identifier: GPL-2.0-only /* Glue code for CRC32C optimized for sparc64 crypto opcodes. * * This is based largely upon arch/x86/crypto/crc32c-intel.c * * Copyright (C) 2008 Intel Corporation * Authors: Austin Zhang <[email protected]> * Kent Liu <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/crc32.h> #include <crypto/internal/hash.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" /* * Setting the seed allows arbitrary accumulators and flexible XOR policy * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key, unsigned int keylen) { u32 *mctx = crypto_shash_ctx(hash); if (keylen != sizeof(u32)) return -EINVAL; *mctx = le32_to_cpup((__le32 *)key); return 0; } static int crc32c_sparc64_init(struct shash_desc *desc) { u32 *mctx = crypto_shash_ctx(desc->tfm); u32 *crcp = shash_desc_ctx(desc); *crcp = *mctx; return 0; } extern void crc32c_sparc64(u32 *crcp, const u64 *data, unsigned int len); static void crc32c_compute(u32 *crcp, const u64 *data, unsigned int len) { unsigned int asm_len; asm_len = len & ~7U; if (asm_len) { crc32c_sparc64(crcp, data, asm_len); data += asm_len / 8; len -= asm_len; } if (len) *crcp = __crc32c_le(*crcp, (const unsigned char *) data, len); } static int crc32c_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { u32 *crcp = shash_desc_ctx(desc); crc32c_compute(crcp, (const u64 *) data, len); return 0; } static int __crc32c_sparc64_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out) { u32 tmp = *crcp; crc32c_compute(&tmp, (const u64 *) data, len); *(__le32 *) out = ~cpu_to_le32(tmp); return 0; } static int crc32c_sparc64_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_sparc64_finup(shash_desc_ctx(desc), data, len, out); } static int crc32c_sparc64_final(struct shash_desc *desc, u8 *out) { u32 *crcp = shash_desc_ctx(desc); *(__le32 *) out = ~cpu_to_le32p(crcp); return 0; } static int crc32c_sparc64_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { return __crc32c_sparc64_finup(crypto_shash_ctx(desc->tfm), data, len, out); } static int crc32c_sparc64_cra_init(struct crypto_tfm *tfm) { u32 *key = crypto_tfm_ctx(tfm); *key = ~0; return 0; } #define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_DIGEST_SIZE 4 static struct shash_alg alg = { .setkey = crc32c_sparc64_setkey, .init = crc32c_sparc64_init, .update = crc32c_sparc64_update, .final = crc32c_sparc64_final, .finup = crc32c_sparc64_finup, .digest = crc32c_sparc64_digest, .descsize = sizeof(u32), .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32c", .cra_driver_name = "crc32c-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_ctxsize = sizeof(u32), .cra_alignmask = 7, .cra_module = THIS_MODULE, .cra_init = crc32c_sparc64_cra_init, } }; static bool __init sparc64_has_crc32c_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_CRC32C)) return false; return true; } static int __init crc32c_sparc64_mod_init(void) { if (sparc64_has_crc32c_opcode()) { pr_info("Using sparc64 crc32c opcode optimized CRC32C implementation\n"); return crypto_register_shash(&alg); } pr_info("sparc64 crc32c opcode not available.\n"); return -ENODEV; } static void __exit crc32c_sparc64_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(crc32c_sparc64_mod_init); module_exit(crc32c_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); MODULE_ALIAS_CRYPTO("crc32c"); #include "crop_devid.c"
linux-master
arch/sparc/crypto/crc32c_glue.c
// SPDX-License-Identifier: GPL-2.0-only /* Glue code for MD5 hashing optimized for sparc64 crypto opcodes. * * This is based largely upon arch/x86/crypto/sha1_ssse3_glue.c * and crypto/md5.c which are: * * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <[email protected]> * Copyright (c) Jean-Francois Dive <[email protected]> * Copyright (c) Mathias Krause <[email protected]> * Copyright (c) Cryptoapi developers. * Copyright (c) 2002 James Morris <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/md5.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" asmlinkage void md5_sparc64_transform(u32 *digest, const char *data, unsigned int rounds); static int md5_sparc64_init(struct shash_desc *desc) { struct md5_state *mctx = shash_desc_ctx(desc); mctx->hash[0] = MD5_H0; mctx->hash[1] = MD5_H1; mctx->hash[2] = MD5_H2; mctx->hash[3] = MD5_H3; le32_to_cpu_array(mctx->hash, 4); mctx->byte_count = 0; return 0; } static void __md5_sparc64_update(struct md5_state *sctx, const u8 *data, unsigned int len, unsigned int partial) { unsigned int done = 0; sctx->byte_count += len; if (partial) { done = MD5_HMAC_BLOCK_SIZE - partial; memcpy((u8 *)sctx->block + partial, data, done); md5_sparc64_transform(sctx->hash, (u8 *)sctx->block, 1); } if (len - done >= MD5_HMAC_BLOCK_SIZE) { const unsigned int rounds = (len - done) / MD5_HMAC_BLOCK_SIZE; md5_sparc64_transform(sctx->hash, data + done, rounds); done += rounds * MD5_HMAC_BLOCK_SIZE; } memcpy(sctx->block, data + done, len - done); } static int md5_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md5_state *sctx = shash_desc_ctx(desc); unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; /* Handle the fast case right here */ if (partial + len < MD5_HMAC_BLOCK_SIZE) { sctx->byte_count += len; memcpy((u8 *)sctx->block + partial, data, len); } else __md5_sparc64_update(sctx, data, len, partial); return 0; } /* Add padding and return the message digest. */ static int md5_sparc64_final(struct shash_desc *desc, u8 *out) { struct md5_state *sctx = shash_desc_ctx(desc); unsigned int i, index, padlen; u32 *dst = (u32 *)out; __le64 bits; static const u8 padding[MD5_HMAC_BLOCK_SIZE] = { 0x80, }; bits = cpu_to_le64(sctx->byte_count << 3); /* Pad out to 56 mod 64 and append length */ index = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; padlen = (index < 56) ? (56 - index) : ((MD5_HMAC_BLOCK_SIZE+56) - index); /* We need to fill a whole block for __md5_sparc64_update() */ if (padlen <= 56) { sctx->byte_count += padlen; memcpy((u8 *)sctx->block + index, padding, padlen); } else { __md5_sparc64_update(sctx, padding, padlen, index); } __md5_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); /* Store state in digest */ for (i = 0; i < MD5_HASH_WORDS; i++) dst[i] = sctx->hash[i]; /* Wipe context */ memset(sctx, 0, sizeof(*sctx)); return 0; } static int md5_sparc64_export(struct shash_desc *desc, void *out) { struct md5_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int md5_sparc64_import(struct shash_desc *desc, const void *in) { struct md5_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = md5_sparc64_init, .update = md5_sparc64_update, .final = md5_sparc64_final, .export = md5_sparc64_export, .import = md5_sparc64_import, .descsize = sizeof(struct md5_state), .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_driver_name= "md5-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static bool __init sparc64_has_md5_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_MD5)) return false; return true; } static int __init md5_sparc64_mod_init(void) { if (sparc64_has_md5_opcode()) { pr_info("Using sparc64 md5 opcode optimized MD5 implementation\n"); return crypto_register_shash(&alg); } pr_info("sparc64 md5 opcode not available.\n"); return -ENODEV; } static void __exit md5_sparc64_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(md5_sparc64_mod_init); module_exit(md5_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD5 Message Digest Algorithm, sparc64 md5 opcode accelerated"); MODULE_ALIAS_CRYPTO("md5"); #include "crop_devid.c"
linux-master
arch/sparc/crypto/md5_glue.c
// SPDX-License-Identifier: GPL-2.0-only /* Glue code for SHA1 hashing optimized for sparc64 crypto opcodes. * * This is based largely upon arch/x86/crypto/sha1_ssse3_glue.c * * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <[email protected]> * Copyright (c) Jean-Francois Dive <[email protected]> * Copyright (c) Mathias Krause <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" asmlinkage void sha1_sparc64_transform(u32 *digest, const char *data, unsigned int rounds); static void __sha1_sparc64_update(struct sha1_state *sctx, const u8 *data, unsigned int len, unsigned int partial) { unsigned int done = 0; sctx->count += len; if (partial) { done = SHA1_BLOCK_SIZE - partial; memcpy(sctx->buffer + partial, data, done); sha1_sparc64_transform(sctx->state, sctx->buffer, 1); } if (len - done >= SHA1_BLOCK_SIZE) { const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; sha1_sparc64_transform(sctx->state, data + done, rounds); done += rounds * SHA1_BLOCK_SIZE; } memcpy(sctx->buffer, data + done, len - done); } static int sha1_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; /* Handle the fast case right here */ if (partial + len < SHA1_BLOCK_SIZE) { sctx->count += len; memcpy(sctx->buffer + partial, data, len); } else __sha1_sparc64_update(sctx, data, len, partial); return 0; } /* Add padding and return the message digest. */ static int sha1_sparc64_final(struct shash_desc *desc, u8 *out) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int i, index, padlen; __be32 *dst = (__be32 *)out; __be64 bits; static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; bits = cpu_to_be64(sctx->count << 3); /* Pad out to 56 mod 64 and append length */ index = sctx->count % SHA1_BLOCK_SIZE; padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index); /* We need to fill a whole block for __sha1_sparc64_update() */ if (padlen <= 56) { sctx->count += padlen; memcpy(sctx->buffer + index, padding, padlen); } else { __sha1_sparc64_update(sctx, padding, padlen, index); } __sha1_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); /* Store state in digest */ for (i = 0; i < 5; i++) dst[i] = cpu_to_be32(sctx->state[i]); /* Wipe context */ memset(sctx, 0, sizeof(*sctx)); return 0; } static int sha1_sparc64_export(struct shash_desc *desc, void *out) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int sha1_sparc64_import(struct shash_desc *desc, const void *in) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = sha1_sparc64_update, .final = sha1_sparc64_final, .export = sha1_sparc64_export, .import = sha1_sparc64_import, .descsize = sizeof(struct sha1_state), .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name= "sha1-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static bool __init sparc64_has_sha1_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_SHA1)) return false; return true; } static int __init sha1_sparc64_mod_init(void) { if (sparc64_has_sha1_opcode()) { pr_info("Using sparc64 sha1 opcode optimized SHA-1 implementation\n"); return crypto_register_shash(&alg); } pr_info("sparc64 sha1 opcode not available.\n"); return -ENODEV; } static void __exit sha1_sparc64_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(sha1_sparc64_mod_init); module_exit(sha1_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated"); MODULE_ALIAS_CRYPTO("sha1"); #include "crop_devid.c"
linux-master
arch/sparc/crypto/sha1_glue.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/mod_devicetable.h> #include <linux/module.h> /* This is a dummy device table linked into all of the crypto * opcode drivers. It serves to trigger the module autoloading * mechanisms in userspace which scan the OF device tree and * load any modules which have device table entries that * match OF device nodes. */ static const struct of_device_id crypto_opcode_match[] = { { .name = "cpu", .compatible = "sun4v", }, {}, }; MODULE_DEVICE_TABLE(of, crypto_opcode_match);
linux-master
arch/sparc/crypto/crop_devid.c
// SPDX-License-Identifier: GPL-2.0-only /* Glue code for SHA512 hashing optimized for sparc64 crypto opcodes. * * This is based largely upon crypto/sha512_generic.c * * Copyright (c) Jean-Luc Cooke <[email protected]> * Copyright (c) Andrew McDonald <[email protected]> * Copyright (c) 2003 Kyle McMartin <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" asmlinkage void sha512_sparc64_transform(u64 *digest, const char *data, unsigned int rounds); static void __sha512_sparc64_update(struct sha512_state *sctx, const u8 *data, unsigned int len, unsigned int partial) { unsigned int done = 0; if ((sctx->count[0] += len) < len) sctx->count[1]++; if (partial) { done = SHA512_BLOCK_SIZE - partial; memcpy(sctx->buf + partial, data, done); sha512_sparc64_transform(sctx->state, sctx->buf, 1); } if (len - done >= SHA512_BLOCK_SIZE) { const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE; sha512_sparc64_transform(sctx->state, data + done, rounds); done += rounds * SHA512_BLOCK_SIZE; } memcpy(sctx->buf, data + done, len - done); } static int sha512_sparc64_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha512_state *sctx = shash_desc_ctx(desc); unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; /* Handle the fast case right here */ if (partial + len < SHA512_BLOCK_SIZE) { if ((sctx->count[0] += len) < len) sctx->count[1]++; memcpy(sctx->buf + partial, data, len); } else __sha512_sparc64_update(sctx, data, len, partial); return 0; } static int sha512_sparc64_final(struct shash_desc *desc, u8 *out) { struct sha512_state *sctx = shash_desc_ctx(desc); unsigned int i, index, padlen; __be64 *dst = (__be64 *)out; __be64 bits[2]; static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, }; /* Save number of bits */ bits[1] = cpu_to_be64(sctx->count[0] << 3); bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); /* Pad out to 112 mod 128 and append length */ index = sctx->count[0] % SHA512_BLOCK_SIZE; padlen = (index < 112) ? (112 - index) : ((SHA512_BLOCK_SIZE+112) - index); /* We need to fill a whole block for __sha512_sparc64_update() */ if (padlen <= 112) { if ((sctx->count[0] += padlen) < padlen) sctx->count[1]++; memcpy(sctx->buf + index, padding, padlen); } else { __sha512_sparc64_update(sctx, padding, padlen, index); } __sha512_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 112); /* Store state in digest */ for (i = 0; i < 8; i++) dst[i] = cpu_to_be64(sctx->state[i]); /* Wipe context */ memset(sctx, 0, sizeof(*sctx)); return 0; } static int sha384_sparc64_final(struct shash_desc *desc, u8 *hash) { u8 D[64]; sha512_sparc64_final(desc, D); memcpy(hash, D, 48); memzero_explicit(D, 64); return 0; } static struct shash_alg sha512 = { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = sha512_sparc64_update, .final = sha512_sparc64_final, .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_driver_name= "sha512-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static struct shash_alg sha384 = { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = sha512_sparc64_update, .final = sha384_sparc64_final, .descsize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", .cra_driver_name= "sha384-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static bool __init sparc64_has_sha512_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_SHA512)) return false; return true; } static int __init sha512_sparc64_mod_init(void) { if (sparc64_has_sha512_opcode()) { int ret = crypto_register_shash(&sha384); if (ret < 0) return ret; ret = crypto_register_shash(&sha512); if (ret < 0) { crypto_unregister_shash(&sha384); return ret; } pr_info("Using sparc64 sha512 opcode optimized SHA-512/SHA-384 implementation\n"); return 0; } pr_info("sparc64 sha512 opcode not available.\n"); return -ENODEV; } static void __exit sha512_sparc64_mod_fini(void) { crypto_unregister_shash(&sha384); crypto_unregister_shash(&sha512); } module_init(sha512_sparc64_mod_init); module_exit(sha512_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated"); MODULE_ALIAS_CRYPTO("sha384"); MODULE_ALIAS_CRYPTO("sha512"); #include "crop_devid.c"
linux-master
arch/sparc/crypto/sha512_glue.c
// SPDX-License-Identifier: GPL-2.0-only /* Glue code for DES encryption optimized for sparc64 crypto opcodes. * * Copyright (C) 2012 David S. Miller <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/algapi.h> #include <crypto/internal/des.h> #include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" struct des_sparc64_ctx { u64 encrypt_expkey[DES_EXPKEY_WORDS / 2]; u64 decrypt_expkey[DES_EXPKEY_WORDS / 2]; }; struct des3_ede_sparc64_ctx { u64 encrypt_expkey[DES3_EDE_EXPKEY_WORDS / 2]; u64 decrypt_expkey[DES3_EDE_EXPKEY_WORDS / 2]; }; static void encrypt_to_decrypt(u64 *d, const u64 *e) { const u64 *s = e + (DES_EXPKEY_WORDS / 2) - 1; int i; for (i = 0; i < DES_EXPKEY_WORDS / 2; i++) *d++ = *s--; } extern void des_sparc64_key_expand(const u32 *input_key, u64 *key); static int des_set_key(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct des_sparc64_ctx *dctx = crypto_tfm_ctx(tfm); int err; /* Even though we have special instructions for key expansion, * we call des_verify_key() so that we don't have to write our own * weak key detection code. */ err = crypto_des_verify_key(tfm, key); if (err) return err; des_sparc64_key_expand((const u32 *) key, &dctx->encrypt_expkey[0]); encrypt_to_decrypt(&dctx->decrypt_expkey[0], &dctx->encrypt_expkey[0]); return 0; } static int des_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return des_set_key(crypto_skcipher_tfm(tfm), key, keylen); } extern void des_sparc64_crypt(const u64 *key, const u64 *input, u64 *output); static void sparc_des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); const u64 *K = ctx->encrypt_expkey; des_sparc64_crypt(K, (const u64 *) src, (u64 *) dst); } static void sparc_des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); const u64 *K = ctx->decrypt_expkey; des_sparc64_crypt(K, (const u64 *) src, (u64 *) dst); } extern void des_sparc64_load_keys(const u64 *key); extern void des_sparc64_ecb_crypt(const u64 *input, u64 *output, unsigned int len); static int __ecb_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, true); if (err) return err; if (encrypt) des_sparc64_load_keys(&ctx->encrypt_expkey[0]); else des_sparc64_load_keys(&ctx->decrypt_expkey[0]); while ((nbytes = walk.nbytes) != 0) { des_sparc64_ecb_crypt(walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, DES_BLOCK_SIZE)); err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE); } fprs_write(0); return err; } static int ecb_encrypt(struct skcipher_request *req) { return __ecb_crypt(req, true); } static int ecb_decrypt(struct skcipher_request *req) { return __ecb_crypt(req, false); } extern void des_sparc64_cbc_encrypt(const u64 *input, u64 *output, unsigned int len, u64 *iv); extern void des_sparc64_cbc_decrypt(const u64 *input, u64 *output, unsigned int len, u64 *iv); static int __cbc_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, true); if (err) return err; if (encrypt) des_sparc64_load_keys(&ctx->encrypt_expkey[0]); else des_sparc64_load_keys(&ctx->decrypt_expkey[0]); while ((nbytes = walk.nbytes) != 0) { if (encrypt) des_sparc64_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, DES_BLOCK_SIZE), walk.iv); else des_sparc64_cbc_decrypt(walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, DES_BLOCK_SIZE), walk.iv); err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE); } fprs_write(0); return err; } static int cbc_encrypt(struct skcipher_request *req) { return __cbc_crypt(req, true); } static int cbc_decrypt(struct skcipher_request *req) { return __cbc_crypt(req, false); } static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm); u64 k1[DES_EXPKEY_WORDS / 2]; u64 k2[DES_EXPKEY_WORDS / 2]; u64 k3[DES_EXPKEY_WORDS / 2]; int err; err = crypto_des3_ede_verify_key(tfm, key); if (err) return err; des_sparc64_key_expand((const u32 *)key, k1); key += DES_KEY_SIZE; des_sparc64_key_expand((const u32 *)key, k2); key += DES_KEY_SIZE; des_sparc64_key_expand((const u32 *)key, k3); memcpy(&dctx->encrypt_expkey[0], &k1[0], sizeof(k1)); encrypt_to_decrypt(&dctx->encrypt_expkey[DES_EXPKEY_WORDS / 2], &k2[0]); memcpy(&dctx->encrypt_expkey[(DES_EXPKEY_WORDS / 2) * 2], &k3[0], sizeof(k3)); encrypt_to_decrypt(&dctx->decrypt_expkey[0], &k3[0]); memcpy(&dctx->decrypt_expkey[DES_EXPKEY_WORDS / 2], &k2[0], sizeof(k2)); encrypt_to_decrypt(&dctx->decrypt_expkey[(DES_EXPKEY_WORDS / 2) * 2], &k1[0]); return 0; } static int des3_ede_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return des3_ede_set_key(crypto_skcipher_tfm(tfm), key, keylen); } extern void des3_ede_sparc64_crypt(const u64 *key, const u64 *input, u64 *output); static void sparc_des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); const u64 *K = ctx->encrypt_expkey; des3_ede_sparc64_crypt(K, (const u64 *) src, (u64 *) dst); } static void sparc_des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); const u64 *K = ctx->decrypt_expkey; des3_ede_sparc64_crypt(K, (const u64 *) src, (u64 *) dst); } extern void des3_ede_sparc64_load_keys(const u64 *key); extern void des3_ede_sparc64_ecb_crypt(const u64 *expkey, const u64 *input, u64 *output, unsigned int len); static int __ecb3_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; const u64 *K; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, true); if (err) return err; if (encrypt) K = &ctx->encrypt_expkey[0]; else K = &ctx->decrypt_expkey[0]; des3_ede_sparc64_load_keys(K); while ((nbytes = walk.nbytes) != 0) { des3_ede_sparc64_ecb_crypt(K, walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, DES_BLOCK_SIZE)); err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE); } fprs_write(0); return err; } static int ecb3_encrypt(struct skcipher_request *req) { return __ecb3_crypt(req, true); } static int ecb3_decrypt(struct skcipher_request *req) { return __ecb3_crypt(req, false); } extern void des3_ede_sparc64_cbc_encrypt(const u64 *expkey, const u64 *input, u64 *output, unsigned int len, u64 *iv); extern void des3_ede_sparc64_cbc_decrypt(const u64 *expkey, const u64 *input, u64 *output, unsigned int len, u64 *iv); static int __cbc3_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; const u64 *K; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, true); if (err) return err; if (encrypt) K = &ctx->encrypt_expkey[0]; else K = &ctx->decrypt_expkey[0]; des3_ede_sparc64_load_keys(K); while ((nbytes = walk.nbytes) != 0) { if (encrypt) des3_ede_sparc64_cbc_encrypt(K, walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, DES_BLOCK_SIZE), walk.iv); else des3_ede_sparc64_cbc_decrypt(K, walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, DES_BLOCK_SIZE), walk.iv); err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE); } fprs_write(0); return err; } static int cbc3_encrypt(struct skcipher_request *req) { return __cbc3_crypt(req, true); } static int cbc3_decrypt(struct skcipher_request *req) { return __cbc3_crypt(req, false); } static struct crypto_alg cipher_algs[] = { { .cra_name = "des", .cra_driver_name = "des-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct des_sparc64_ctx), .cra_alignmask = 7, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = DES_KEY_SIZE, .cia_max_keysize = DES_KEY_SIZE, .cia_setkey = des_set_key, .cia_encrypt = sparc_des_encrypt, .cia_decrypt = sparc_des_decrypt } } }, { .cra_name = "des3_ede", .cra_driver_name = "des3_ede-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx), .cra_alignmask = 7, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = DES3_EDE_KEY_SIZE, .cia_max_keysize = DES3_EDE_KEY_SIZE, .cia_setkey = des3_ede_set_key, .cia_encrypt = sparc_des3_ede_encrypt, .cia_decrypt = sparc_des3_ede_decrypt } } } }; static struct skcipher_alg skcipher_algs[] = { { .base.cra_name = "ecb(des)", .base.cra_driver_name = "ecb-des-sparc64", .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct des_sparc64_ctx), .base.cra_alignmask = 7, .base.cra_module = THIS_MODULE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = des_set_key_skcipher, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { .base.cra_name = "cbc(des)", .base.cra_driver_name = "cbc-des-sparc64", .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct des_sparc64_ctx), .base.cra_alignmask = 7, .base.cra_module = THIS_MODULE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, .setkey = des_set_key_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-des3_ede-sparc64", .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx), .base.cra_alignmask = 7, .base.cra_module = THIS_MODULE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = des3_ede_set_key_skcipher, .encrypt = ecb3_encrypt, .decrypt = ecb3_decrypt, }, { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-des3_ede-sparc64", .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx), .base.cra_alignmask = 7, .base.cra_module = THIS_MODULE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = des3_ede_set_key_skcipher, .encrypt = cbc3_encrypt, .decrypt = cbc3_decrypt, } }; static bool __init sparc64_has_des_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_DES)) return false; return true; } static int __init des_sparc64_mod_init(void) { int err; if (!sparc64_has_des_opcode()) { pr_info("sparc64 des opcodes not available.\n"); return -ENODEV; } pr_info("Using sparc64 des opcodes optimized DES implementation\n"); err = crypto_register_algs(cipher_algs, ARRAY_SIZE(cipher_algs)); if (err) return err; err = crypto_register_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); if (err) crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs)); return err; } static void __exit des_sparc64_mod_fini(void) { crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs)); crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); } module_init(des_sparc64_mod_init); module_exit(des_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); MODULE_ALIAS_CRYPTO("des"); MODULE_ALIAS_CRYPTO("des3_ede"); #include "crop_devid.c"
linux-master
arch/sparc/crypto/des_glue.c
// SPDX-License-Identifier: GPL-2.0-only /* Glue code for CAMELLIA encryption optimized for sparc64 crypto opcodes. * * Copyright (C) 2012 David S. Miller <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <crypto/algapi.h> #include <crypto/internal/skcipher.h> #include <asm/fpumacro.h> #include <asm/pstate.h> #include <asm/elf.h> #include "opcodes.h" #define CAMELLIA_MIN_KEY_SIZE 16 #define CAMELLIA_MAX_KEY_SIZE 32 #define CAMELLIA_BLOCK_SIZE 16 #define CAMELLIA_TABLE_BYTE_LEN 272 struct camellia_sparc64_ctx { u64 encrypt_key[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)]; u64 decrypt_key[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)]; int key_len; }; extern void camellia_sparc64_key_expand(const u32 *in_key, u64 *encrypt_key, unsigned int key_len, u64 *decrypt_key); static int camellia_set_key(struct crypto_tfm *tfm, const u8 *_in_key, unsigned int key_len) { struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); const u32 *in_key = (const u32 *) _in_key; if (key_len != 16 && key_len != 24 && key_len != 32) return -EINVAL; ctx->key_len = key_len; camellia_sparc64_key_expand(in_key, &ctx->encrypt_key[0], key_len, &ctx->decrypt_key[0]); return 0; } static int camellia_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { return camellia_set_key(crypto_skcipher_tfm(tfm), in_key, key_len); } extern void camellia_sparc64_crypt(const u64 *key, const u32 *input, u32 *output, unsigned int key_len); static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); camellia_sparc64_crypt(&ctx->encrypt_key[0], (const u32 *) src, (u32 *) dst, ctx->key_len); } static void camellia_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); camellia_sparc64_crypt(&ctx->decrypt_key[0], (const u32 *) src, (u32 *) dst, ctx->key_len); } extern void camellia_sparc64_load_keys(const u64 *key, unsigned int key_len); typedef void ecb_crypt_op(const u64 *input, u64 *output, unsigned int len, const u64 *key); extern ecb_crypt_op camellia_sparc64_ecb_crypt_3_grand_rounds; extern ecb_crypt_op camellia_sparc64_ecb_crypt_4_grand_rounds; static int __ecb_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; ecb_crypt_op *op; const u64 *key; unsigned int nbytes; int err; op = camellia_sparc64_ecb_crypt_3_grand_rounds; if (ctx->key_len != 16) op = camellia_sparc64_ecb_crypt_4_grand_rounds; err = skcipher_walk_virt(&walk, req, true); if (err) return err; if (encrypt) key = &ctx->encrypt_key[0]; else key = &ctx->decrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); while ((nbytes = walk.nbytes) != 0) { op(walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, CAMELLIA_BLOCK_SIZE), key); err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE); } fprs_write(0); return err; } static int ecb_encrypt(struct skcipher_request *req) { return __ecb_crypt(req, true); } static int ecb_decrypt(struct skcipher_request *req) { return __ecb_crypt(req, false); } typedef void cbc_crypt_op(const u64 *input, u64 *output, unsigned int len, const u64 *key, u64 *iv); extern cbc_crypt_op camellia_sparc64_cbc_encrypt_3_grand_rounds; extern cbc_crypt_op camellia_sparc64_cbc_encrypt_4_grand_rounds; extern cbc_crypt_op camellia_sparc64_cbc_decrypt_3_grand_rounds; extern cbc_crypt_op camellia_sparc64_cbc_decrypt_4_grand_rounds; static int cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; cbc_crypt_op *op; const u64 *key; unsigned int nbytes; int err; op = camellia_sparc64_cbc_encrypt_3_grand_rounds; if (ctx->key_len != 16) op = camellia_sparc64_cbc_encrypt_4_grand_rounds; err = skcipher_walk_virt(&walk, req, true); if (err) return err; key = &ctx->encrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); while ((nbytes = walk.nbytes) != 0) { op(walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, CAMELLIA_BLOCK_SIZE), key, walk.iv); err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE); } fprs_write(0); return err; } static int cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct camellia_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; cbc_crypt_op *op; const u64 *key; unsigned int nbytes; int err; op = camellia_sparc64_cbc_decrypt_3_grand_rounds; if (ctx->key_len != 16) op = camellia_sparc64_cbc_decrypt_4_grand_rounds; err = skcipher_walk_virt(&walk, req, true); if (err) return err; key = &ctx->decrypt_key[0]; camellia_sparc64_load_keys(key, ctx->key_len); while ((nbytes = walk.nbytes) != 0) { op(walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, CAMELLIA_BLOCK_SIZE), key, walk.iv); err = skcipher_walk_done(&walk, nbytes % CAMELLIA_BLOCK_SIZE); } fprs_write(0); return err; } static struct crypto_alg cipher_alg = { .cra_name = "camellia", .cra_driver_name = "camellia-sparc64", .cra_priority = SPARC_CR_OPCODE_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAMELLIA_BLOCK_SIZE, .cra_ctxsize = sizeof(struct camellia_sparc64_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = CAMELLIA_MIN_KEY_SIZE, .cia_max_keysize = CAMELLIA_MAX_KEY_SIZE, .cia_setkey = camellia_set_key, .cia_encrypt = camellia_encrypt, .cia_decrypt = camellia_decrypt } } }; static struct skcipher_alg skcipher_algs[] = { { .base.cra_name = "ecb(camellia)", .base.cra_driver_name = "ecb-camellia-sparc64", .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct camellia_sparc64_ctx), .base.cra_alignmask = 7, .base.cra_module = THIS_MODULE, .min_keysize = CAMELLIA_MIN_KEY_SIZE, .max_keysize = CAMELLIA_MAX_KEY_SIZE, .setkey = camellia_set_key_skcipher, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { .base.cra_name = "cbc(camellia)", .base.cra_driver_name = "cbc-camellia-sparc64", .base.cra_priority = SPARC_CR_OPCODE_PRIORITY, .base.cra_blocksize = CAMELLIA_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct camellia_sparc64_ctx), .base.cra_alignmask = 7, .base.cra_module = THIS_MODULE, .min_keysize = CAMELLIA_MIN_KEY_SIZE, .max_keysize = CAMELLIA_MAX_KEY_SIZE, .ivsize = CAMELLIA_BLOCK_SIZE, .setkey = camellia_set_key_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, } }; static bool __init sparc64_has_camellia_opcode(void) { unsigned long cfr; if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) return false; __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); if (!(cfr & CFR_CAMELLIA)) return false; return true; } static int __init camellia_sparc64_mod_init(void) { int err; if (!sparc64_has_camellia_opcode()) { pr_info("sparc64 camellia opcodes not available.\n"); return -ENODEV; } pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n"); err = crypto_register_alg(&cipher_alg); if (err) return err; err = crypto_register_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); if (err) crypto_unregister_alg(&cipher_alg); return err; } static void __exit camellia_sparc64_mod_fini(void) { crypto_unregister_alg(&cipher_alg); crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); } module_init(camellia_sparc64_mod_init); module_exit(camellia_sparc64_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); MODULE_ALIAS_CRYPTO("camellia"); #include "crop_devid.c"
linux-master
arch/sparc/crypto/camellia_glue.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/console.h> #include <linux/fb.h> #include <linux/module.h> #include <asm/prom.h> int fb_is_primary_device(struct fb_info *info) { struct device *dev = info->device; struct device_node *node; if (console_set_on_cmdline) return 0; node = dev->of_node; if (node && node == of_console_device) return 1; return 0; } EXPORT_SYMBOL(fb_is_primary_device); MODULE_DESCRIPTION("Sparc fbdev helpers"); MODULE_LICENSE("GPL");
linux-master
arch/sparc/video/fbdev.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/sparc/math-emu/math.c * * Copyright (C) 1998 Peter Maydell ([email protected]) * Copyright (C) 1997, 1999 Jakub Jelinek ([email protected]) * Copyright (C) 1999 David S. Miller ([email protected]) * * This is a good place to start if you're trying to understand the * emulation code, because it's pretty simple. What we do is * essentially analyse the instruction to work out what the operation * is and which registers are involved. We then execute the appropriate * FXXXX function. [The floating point queue introduces a minor wrinkle; * see below...] * The fxxxxx.c files each emulate a single insn. They look relatively * simple because the complexity is hidden away in an unholy tangle * of preprocessor macros. * * The first layer of macros is single.h, double.h, quad.h. Generally * these files define macros for working with floating point numbers * of the three IEEE formats. FP_ADD_D(R,A,B) is for adding doubles, * for instance. These macros are usually defined as calls to more * generic macros (in this case _FP_ADD(D,2,R,X,Y) where the number * of machine words required to store the given IEEE format is passed * as a parameter. [double.h and co check the number of bits in a word * and define FP_ADD_D & co appropriately]. * The generic macros are defined in op-common.h. This is where all * the grotty stuff like handling NaNs is coded. To handle the possible * word sizes macros in op-common.h use macros like _FP_FRAC_SLL_##wc() * where wc is the 'number of machine words' parameter (here 2). * These are defined in the third layer of macros: op-1.h, op-2.h * and op-4.h. These handle operations on floating point numbers composed * of 1,2 and 4 machine words respectively. [For example, on sparc64 * doubles are one machine word so macros in double.h eventually use * constructs in op-1.h, but on sparc32 they use op-2.h definitions.] * soft-fp.h is on the same level as op-common.h, and defines some * macros which are independent of both word size and FP format. * Finally, sfp-machine.h is the machine dependent part of the * code: it defines the word size and what type a word is. It also * defines how _FP_MUL_MEAT_t() maps to _FP_MUL_MEAT_n_* : op-n.h * provide several possible flavours of multiply algorithm, most * of which require that you supply some form of asm or C primitive to * do the actual multiply. (such asm primitives should be defined * in sfp-machine.h too). udivmodti4.c is the same sort of thing. * * There may be some errors here because I'm working from a * SPARC architecture manual V9, and what I really want is V8... * Also, the insns which can generate exceptions seem to be a * greater subset of the FPops than for V9 (for example, FCMPED * has to be emulated on V8). So I think I'm going to have * to emulate them all just to be on the safe side... * * Emulation routines originate from soft-fp package, which is * part of glibc and has appropriate copyrights in it (allegedly). * * NB: on sparc int == long == 4 bytes, long long == 8 bytes. * Most bits of the kernel seem to go for long rather than int, * so we follow that practice... */ /* TODO: * fpsave() saves the FP queue but fpload() doesn't reload it. * Therefore when we context switch or change FPU ownership * we have to check to see if the queue had anything in it and * emulate it if it did. This is going to be a pain. */ #include <linux/types.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include "sfp-util_32.h" #include <math-emu/soft-fp.h> #include <math-emu/single.h> #include <math-emu/double.h> #include <math-emu/quad.h> #define FLOATFUNC(x) extern int x(void *,void *,void *) /* The Vn labels indicate what version of the SPARC architecture gas thinks * each insn is. This is from the binutils source :-> */ /* quadword instructions */ #define FSQRTQ 0x02b /* v8 */ #define FADDQ 0x043 /* v8 */ #define FSUBQ 0x047 /* v8 */ #define FMULQ 0x04b /* v8 */ #define FDIVQ 0x04f /* v8 */ #define FDMULQ 0x06e /* v8 */ #define FQTOS 0x0c7 /* v8 */ #define FQTOD 0x0cb /* v8 */ #define FITOQ 0x0cc /* v8 */ #define FSTOQ 0x0cd /* v8 */ #define FDTOQ 0x0ce /* v8 */ #define FQTOI 0x0d3 /* v8 */ #define FCMPQ 0x053 /* v8 */ #define FCMPEQ 0x057 /* v8 */ /* single/double instructions (subnormal): should all work */ #define FSQRTS 0x029 /* v7 */ #define FSQRTD 0x02a /* v7 */ #define FADDS 0x041 /* v6 */ #define FADDD 0x042 /* v6 */ #define FSUBS 0x045 /* v6 */ #define FSUBD 0x046 /* v6 */ #define FMULS 0x049 /* v6 */ #define FMULD 0x04a /* v6 */ #define FDIVS 0x04d /* v6 */ #define FDIVD 0x04e /* v6 */ #define FSMULD 0x069 /* v6 */ #define FDTOS 0x0c6 /* v6 */ #define FSTOD 0x0c9 /* v6 */ #define FSTOI 0x0d1 /* v6 */ #define FDTOI 0x0d2 /* v6 */ #define FABSS 0x009 /* v6 */ #define FCMPS 0x051 /* v6 */ #define FCMPES 0x055 /* v6 */ #define FCMPD 0x052 /* v6 */ #define FCMPED 0x056 /* v6 */ #define FMOVS 0x001 /* v6 */ #define FNEGS 0x005 /* v6 */ #define FITOS 0x0c4 /* v6 */ #define FITOD 0x0c8 /* v6 */ #define FSR_TEM_SHIFT 23UL #define FSR_TEM_MASK (0x1fUL << FSR_TEM_SHIFT) #define FSR_AEXC_SHIFT 5UL #define FSR_AEXC_MASK (0x1fUL << FSR_AEXC_SHIFT) #define FSR_CEXC_SHIFT 0UL #define FSR_CEXC_MASK (0x1fUL << FSR_CEXC_SHIFT) static int do_one_mathemu(u32 insn, unsigned long *fsr, unsigned long *fregs); /* Unlike the Sparc64 version (which has a struct fpustate), we * pass the taskstruct corresponding to the task which currently owns the * FPU. This is partly because we don't have the fpustate struct and * partly because the task owning the FPU isn't always current (as is * the case for the Sparc64 port). This is probably SMP-related... * This function returns 1 if all queued insns were emulated successfully. * The test for unimplemented FPop in kernel mode has been moved into * kernel/traps.c for simplicity. */ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt) { /* regs->pc isn't necessarily the PC at which the offending insn is sitting. * The FPU maintains a queue of FPops which cause traps. * When it hits an instruction that requires that the trapped op succeeded * (usually because it reads a reg. that the trapped op wrote) then it * causes this exception. We need to emulate all the insns on the queue * and then allow the op to proceed. * This code should also handle the case where the trap was precise, * in which case the queue length is zero and regs->pc points at the * single FPop to be emulated. (this case is untested, though :->) * You'll need this case if you want to be able to emulate all FPops * because the FPU either doesn't exist or has been software-disabled. * [The UltraSPARC makes FP a precise trap; this isn't as stupid as it * might sound because the Ultra does funky things with a superscalar * architecture.] */ /* You wouldn't believe how often I typed 'ftp' when I meant 'fpt' :-> */ int i; int retcode = 0; /* assume all succeed */ unsigned long insn; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); #ifdef DEBUG_MATHEMU printk("In do_mathemu()... pc is %08lx\n", regs->pc); printk("fpqdepth is %ld\n", fpt->thread.fpqdepth); for (i = 0; i < fpt->thread.fpqdepth; i++) printk("%d: %08lx at %08lx\n", i, fpt->thread.fpqueue[i].insn, (unsigned long)fpt->thread.fpqueue[i].insn_addr); #endif if (fpt->thread.fpqdepth == 0) { /* no queue, guilty insn is at regs->pc */ #ifdef DEBUG_MATHEMU printk("precise trap at %08lx\n", regs->pc); #endif if (!get_user(insn, (u32 __user *) regs->pc)) { retcode = do_one_mathemu(insn, &fpt->thread.fsr, fpt->thread.float_regs); if (retcode) { /* in this case we need to fix up PC & nPC */ regs->pc = regs->npc; regs->npc += 4; } } return retcode; } /* Normal case: need to empty the queue... */ for (i = 0; i < fpt->thread.fpqdepth; i++) { retcode = do_one_mathemu(fpt->thread.fpqueue[i].insn, &(fpt->thread.fsr), fpt->thread.float_regs); if (!retcode) /* insn failed, no point doing any more */ break; } /* Now empty the queue and clear the queue_not_empty flag */ if (retcode) fpt->thread.fsr &= ~(0x3000 | FSR_CEXC_MASK); else fpt->thread.fsr &= ~0x3000; fpt->thread.fpqdepth = 0; return retcode; } /* All routines returning an exception to raise should detect * such exceptions _before_ rounding to be consistent with * the behavior of the hardware in the implemented cases * (and thus with the recommendations in the V9 architecture * manual). * * We return 0 if a SIGFPE should be sent, 1 otherwise. */ static inline int record_exception(unsigned long *pfsr, int eflag) { unsigned long fsr = *pfsr; int would_trap; /* Determine if this exception would have generated a trap. */ would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL; /* If trapping, we only want to signal one bit. */ if (would_trap != 0) { eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT); if ((eflag & (eflag - 1)) != 0) { if (eflag & FP_EX_INVALID) eflag = FP_EX_INVALID; else if (eflag & FP_EX_OVERFLOW) eflag = FP_EX_OVERFLOW; else if (eflag & FP_EX_UNDERFLOW) eflag = FP_EX_UNDERFLOW; else if (eflag & FP_EX_DIVZERO) eflag = FP_EX_DIVZERO; else if (eflag & FP_EX_INEXACT) eflag = FP_EX_INEXACT; } } /* Set CEXC, here is the rule: * * In general all FPU ops will set one and only one * bit in the CEXC field, this is always the case * when the IEEE exception trap is enabled in TEM. */ fsr &= ~(FSR_CEXC_MASK); fsr |= ((long)eflag << FSR_CEXC_SHIFT); /* Set the AEXC field, rule is: * * If a trap would not be generated, the * CEXC just generated is OR'd into the * existing value of AEXC. */ if (would_trap == 0) fsr |= ((long)eflag << FSR_AEXC_SHIFT); /* If trapping, indicate fault trap type IEEE. */ if (would_trap != 0) fsr |= (1UL << 14); *pfsr = fsr; return (would_trap ? 0 : 1); } typedef union { u32 s; u64 d; u64 q[2]; } *argp; static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) { /* Emulate the given insn, updating fsr and fregs appropriately. */ int type = 0; /* r is rd, b is rs2 and a is rs1. The *u arg tells whether the argument should be packed/unpacked (0 - do not unpack/pack, 1 - unpack/pack) non-u args tells the size of the argument (0 - no argument, 1 - single, 2 - double, 3 - quad */ #define TYPE(dummy, r, ru, b, bu, a, au) type = (au << 2) | (a << 0) | (bu << 5) | (b << 3) | (ru << 8) | (r << 6) int freg; argp rs1 = NULL, rs2 = NULL, rd = NULL; FP_DECL_EX; FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); int IR; long fsr; #ifdef DEBUG_MATHEMU printk("In do_mathemu(), emulating %08lx\n", insn); #endif if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ { switch ((insn >> 5) & 0x1ff) { case FSQRTQ: TYPE(3,3,1,3,1,0,0); break; case FADDQ: case FSUBQ: case FMULQ: case FDIVQ: TYPE(3,3,1,3,1,3,1); break; case FDMULQ: TYPE(3,3,1,2,1,2,1); break; case FQTOS: TYPE(3,1,1,3,1,0,0); break; case FQTOD: TYPE(3,2,1,3,1,0,0); break; case FITOQ: TYPE(3,3,1,1,0,0,0); break; case FSTOQ: TYPE(3,3,1,1,1,0,0); break; case FDTOQ: TYPE(3,3,1,2,1,0,0); break; case FQTOI: TYPE(3,1,0,3,1,0,0); break; case FSQRTS: TYPE(2,1,1,1,1,0,0); break; case FSQRTD: TYPE(2,2,1,2,1,0,0); break; case FADDD: case FSUBD: case FMULD: case FDIVD: TYPE(2,2,1,2,1,2,1); break; case FADDS: case FSUBS: case FMULS: case FDIVS: TYPE(2,1,1,1,1,1,1); break; case FSMULD: TYPE(2,2,1,1,1,1,1); break; case FDTOS: TYPE(2,1,1,2,1,0,0); break; case FSTOD: TYPE(2,2,1,1,1,0,0); break; case FSTOI: TYPE(2,1,0,1,1,0,0); break; case FDTOI: TYPE(2,1,0,2,1,0,0); break; case FITOS: TYPE(2,1,1,1,0,0,0); break; case FITOD: TYPE(2,2,1,1,0,0,0); break; case FMOVS: case FABSS: case FNEGS: TYPE(2,1,0,1,0,0,0); break; } } else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ { switch ((insn >> 5) & 0x1ff) { case FCMPS: TYPE(3,0,0,1,1,1,1); break; case FCMPES: TYPE(3,0,0,1,1,1,1); break; case FCMPD: TYPE(3,0,0,2,1,2,1); break; case FCMPED: TYPE(3,0,0,2,1,2,1); break; case FCMPQ: TYPE(3,0,0,3,1,3,1); break; case FCMPEQ: TYPE(3,0,0,3,1,3,1); break; } } if (!type) { /* oops, didn't recognise that FPop */ #ifdef DEBUG_MATHEMU printk("attempt to emulate unrecognised FPop!\n"); #endif return 0; } /* Decode the registers to be used */ freg = (*pfsr >> 14) & 0xf; *pfsr &= ~0x1c000; /* clear the traptype bits */ freg = ((insn >> 14) & 0x1f); switch (type & 0x3) { /* is rs1 single, double or quad? */ case 3: if (freg & 3) { /* quadwords must have bits 4&5 of the */ /* encoded reg. number set to zero. */ *pfsr |= (6 << 14); return 0; /* simulate invalid_fp_register exception */ } fallthrough; case 2: if (freg & 1) { /* doublewords must have bit 5 zeroed */ *pfsr |= (6 << 14); return 0; } } rs1 = (argp)&fregs[freg]; switch (type & 0x7) { case 7: FP_UNPACK_QP (QA, rs1); break; case 6: FP_UNPACK_DP (DA, rs1); break; case 5: FP_UNPACK_SP (SA, rs1); break; } freg = (insn & 0x1f); switch ((type >> 3) & 0x3) { /* same again for rs2 */ case 3: if (freg & 3) { /* quadwords must have bits 4&5 of the */ /* encoded reg. number set to zero. */ *pfsr |= (6 << 14); return 0; /* simulate invalid_fp_register exception */ } fallthrough; case 2: if (freg & 1) { /* doublewords must have bit 5 zeroed */ *pfsr |= (6 << 14); return 0; } } rs2 = (argp)&fregs[freg]; switch ((type >> 3) & 0x7) { case 7: FP_UNPACK_QP (QB, rs2); break; case 6: FP_UNPACK_DP (DB, rs2); break; case 5: FP_UNPACK_SP (SB, rs2); break; } freg = ((insn >> 25) & 0x1f); switch ((type >> 6) & 0x3) { /* and finally rd. This one's a bit different */ case 0: /* dest is fcc. (this must be FCMPQ or FCMPEQ) */ if (freg) { /* V8 has only one set of condition codes, so */ /* anything but 0 in the rd field is an error */ *pfsr |= (6 << 14); /* (should probably flag as invalid opcode */ return 0; /* but SIGFPE will do :-> ) */ } break; case 3: if (freg & 3) { /* quadwords must have bits 4&5 of the */ /* encoded reg. number set to zero. */ *pfsr |= (6 << 14); return 0; /* simulate invalid_fp_register exception */ } fallthrough; case 2: if (freg & 1) { /* doublewords must have bit 5 zeroed */ *pfsr |= (6 << 14); return 0; } fallthrough; case 1: rd = (void *)&fregs[freg]; break; } #ifdef DEBUG_MATHEMU printk("executing insn...\n"); #endif /* do the Right Thing */ switch ((insn >> 5) & 0x1ff) { /* + */ case FADDS: FP_ADD_S (SR, SA, SB); break; case FADDD: FP_ADD_D (DR, DA, DB); break; case FADDQ: FP_ADD_Q (QR, QA, QB); break; /* - */ case FSUBS: FP_SUB_S (SR, SA, SB); break; case FSUBD: FP_SUB_D (DR, DA, DB); break; case FSUBQ: FP_SUB_Q (QR, QA, QB); break; /* * */ case FMULS: FP_MUL_S (SR, SA, SB); break; case FSMULD: FP_CONV (D, S, 2, 1, DA, SA); FP_CONV (D, S, 2, 1, DB, SB); case FMULD: FP_MUL_D (DR, DA, DB); break; case FDMULQ: FP_CONV (Q, D, 4, 2, QA, DA); FP_CONV (Q, D, 4, 2, QB, DB); case FMULQ: FP_MUL_Q (QR, QA, QB); break; /* / */ case FDIVS: FP_DIV_S (SR, SA, SB); break; case FDIVD: FP_DIV_D (DR, DA, DB); break; case FDIVQ: FP_DIV_Q (QR, QA, QB); break; /* sqrt */ case FSQRTS: FP_SQRT_S (SR, SB); break; case FSQRTD: FP_SQRT_D (DR, DB); break; case FSQRTQ: FP_SQRT_Q (QR, QB); break; /* mov */ case FMOVS: rd->s = rs2->s; break; case FABSS: rd->s = rs2->s & 0x7fffffff; break; case FNEGS: rd->s = rs2->s ^ 0x80000000; break; /* float to int */ case FSTOI: FP_TO_INT_S (IR, SB, 32, 1); break; case FDTOI: FP_TO_INT_D (IR, DB, 32, 1); break; case FQTOI: FP_TO_INT_Q (IR, QB, 32, 1); break; /* int to float */ case FITOS: IR = rs2->s; FP_FROM_INT_S (SR, IR, 32, int); break; case FITOD: IR = rs2->s; FP_FROM_INT_D (DR, IR, 32, int); break; case FITOQ: IR = rs2->s; FP_FROM_INT_Q (QR, IR, 32, int); break; /* float to float */ case FSTOD: FP_CONV (D, S, 2, 1, DR, SB); break; case FSTOQ: FP_CONV (Q, S, 4, 1, QR, SB); break; case FDTOQ: FP_CONV (Q, D, 4, 2, QR, DB); break; case FDTOS: FP_CONV (S, D, 1, 2, SR, DB); break; case FQTOS: FP_CONV (S, Q, 1, 4, SR, QB); break; case FQTOD: FP_CONV (D, Q, 2, 4, DR, QB); break; /* comparison */ case FCMPS: case FCMPES: FP_CMP_S(IR, SB, SA, 3); if (IR == 3 && (((insn >> 5) & 0x1ff) == FCMPES || FP_ISSIGNAN_S(SA) || FP_ISSIGNAN_S(SB))) FP_SET_EXCEPTION (FP_EX_INVALID); break; case FCMPD: case FCMPED: FP_CMP_D(IR, DB, DA, 3); if (IR == 3 && (((insn >> 5) & 0x1ff) == FCMPED || FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB))) FP_SET_EXCEPTION (FP_EX_INVALID); break; case FCMPQ: case FCMPEQ: FP_CMP_Q(IR, QB, QA, 3); if (IR == 3 && (((insn >> 5) & 0x1ff) == FCMPEQ || FP_ISSIGNAN_Q(QA) || FP_ISSIGNAN_Q(QB))) FP_SET_EXCEPTION (FP_EX_INVALID); } if (!FP_INHIBIT_RESULTS) { switch ((type >> 6) & 0x7) { case 0: fsr = *pfsr; if (IR == -1) IR = 2; /* fcc is always fcc0 */ fsr &= ~0xc00; fsr |= (IR << 10); *pfsr = fsr; break; case 1: rd->s = IR; break; case 5: FP_PACK_SP (rd, SR); break; case 6: FP_PACK_DP (rd, DR); break; case 7: FP_PACK_QP (rd, QR); break; } } if (_fex == 0) return 1; /* success! */ return record_exception(pfsr, _fex); }
linux-master
arch/sparc/math-emu/math_32.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/sparc64/math-emu/math.c * * Copyright (C) 1997,1999 Jakub Jelinek ([email protected]) * Copyright (C) 1999 David S. Miller ([email protected]) * * Emulation routines originate from soft-fp package, which is part * of glibc and has appropriate copyrights in it. */ #include <linux/types.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/perf_event.h> #include <asm/fpumacro.h> #include <asm/ptrace.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include "sfp-util_64.h" #include <math-emu/soft-fp.h> #include <math-emu/single.h> #include <math-emu/double.h> #include <math-emu/quad.h> /* QUAD - ftt == 3 */ #define FMOVQ 0x003 #define FNEGQ 0x007 #define FABSQ 0x00b #define FSQRTQ 0x02b #define FADDQ 0x043 #define FSUBQ 0x047 #define FMULQ 0x04b #define FDIVQ 0x04f #define FDMULQ 0x06e #define FQTOX 0x083 #define FXTOQ 0x08c #define FQTOS 0x0c7 #define FQTOD 0x0cb #define FITOQ 0x0cc #define FSTOQ 0x0cd #define FDTOQ 0x0ce #define FQTOI 0x0d3 /* SUBNORMAL - ftt == 2 */ #define FSQRTS 0x029 #define FSQRTD 0x02a #define FADDS 0x041 #define FADDD 0x042 #define FSUBS 0x045 #define FSUBD 0x046 #define FMULS 0x049 #define FMULD 0x04a #define FDIVS 0x04d #define FDIVD 0x04e #define FSMULD 0x069 #define FSTOX 0x081 #define FDTOX 0x082 #define FDTOS 0x0c6 #define FSTOD 0x0c9 #define FSTOI 0x0d1 #define FDTOI 0x0d2 #define FXTOS 0x084 /* Only Ultra-III generates this. */ #define FXTOD 0x088 /* Only Ultra-III generates this. */ #if 0 /* Optimized inline in sparc64/kernel/entry.S */ #define FITOS 0x0c4 /* Only Ultra-III generates this. */ #endif #define FITOD 0x0c8 /* Only Ultra-III generates this. */ /* FPOP2 */ #define FCMPQ 0x053 #define FCMPEQ 0x057 #define FMOVQ0 0x003 #define FMOVQ1 0x043 #define FMOVQ2 0x083 #define FMOVQ3 0x0c3 #define FMOVQI 0x103 #define FMOVQX 0x183 #define FMOVQZ 0x027 #define FMOVQLE 0x047 #define FMOVQLZ 0x067 #define FMOVQNZ 0x0a7 #define FMOVQGZ 0x0c7 #define FMOVQGE 0x0e7 #define FSR_TEM_SHIFT 23UL #define FSR_TEM_MASK (0x1fUL << FSR_TEM_SHIFT) #define FSR_AEXC_SHIFT 5UL #define FSR_AEXC_MASK (0x1fUL << FSR_AEXC_SHIFT) #define FSR_CEXC_SHIFT 0UL #define FSR_CEXC_MASK (0x1fUL << FSR_CEXC_SHIFT) /* All routines returning an exception to raise should detect * such exceptions _before_ rounding to be consistent with * the behavior of the hardware in the implemented cases * (and thus with the recommendations in the V9 architecture * manual). * * We return 0 if a SIGFPE should be sent, 1 otherwise. */ static inline int record_exception(struct pt_regs *regs, int eflag) { u64 fsr = current_thread_info()->xfsr[0]; int would_trap; /* Determine if this exception would have generated a trap. */ would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL; /* If trapping, we only want to signal one bit. */ if(would_trap != 0) { eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT); if((eflag & (eflag - 1)) != 0) { if(eflag & FP_EX_INVALID) eflag = FP_EX_INVALID; else if(eflag & FP_EX_OVERFLOW) eflag = FP_EX_OVERFLOW; else if(eflag & FP_EX_UNDERFLOW) eflag = FP_EX_UNDERFLOW; else if(eflag & FP_EX_DIVZERO) eflag = FP_EX_DIVZERO; else if(eflag & FP_EX_INEXACT) eflag = FP_EX_INEXACT; } } /* Set CEXC, here is the rule: * * In general all FPU ops will set one and only one * bit in the CEXC field, this is always the case * when the IEEE exception trap is enabled in TEM. */ fsr &= ~(FSR_CEXC_MASK); fsr |= ((long)eflag << FSR_CEXC_SHIFT); /* Set the AEXC field, rule is: * * If a trap would not be generated, the * CEXC just generated is OR'd into the * existing value of AEXC. */ if(would_trap == 0) fsr |= ((long)eflag << FSR_AEXC_SHIFT); /* If trapping, indicate fault trap type IEEE. */ if(would_trap != 0) fsr |= (1UL << 14); current_thread_info()->xfsr[0] = fsr; /* If we will not trap, advance the program counter over * the instruction being handled. */ if(would_trap == 0) { regs->tpc = regs->tnpc; regs->tnpc += 4; } return (would_trap ? 0 : 1); } typedef union { u32 s; u64 d; u64 q[2]; } *argp; int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap) { unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn = 0; int type = 0; /* ftt tells which ftt it may happen in, r is rd, b is rs2 and a is rs1. The *u arg tells whether the argument should be packed/unpacked (0 - do not unpack/pack, 1 - unpack/pack) non-u args tells the size of the argument (0 - no argument, 1 - single, 2 - double, 3 - quad */ #define TYPE(ftt, r, ru, b, bu, a, au) type = (au << 2) | (a << 0) | (bu << 5) | (b << 3) | (ru << 8) | (r << 6) | (ftt << 9) int freg; static u64 zero[2] = { 0L, 0L }; int flags; FP_DECL_EX; FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR); int IR; long XR, xfsr; if (tstate & TSTATE_PRIV) die_if_kernel("unfinished/unimplemented FPop from kernel", regs); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ { switch ((insn >> 5) & 0x1ff) { /* QUAD - ftt == 3 */ case FMOVQ: case FNEGQ: case FABSQ: TYPE(3,3,0,3,0,0,0); break; case FSQRTQ: TYPE(3,3,1,3,1,0,0); break; case FADDQ: case FSUBQ: case FMULQ: case FDIVQ: TYPE(3,3,1,3,1,3,1); break; case FDMULQ: TYPE(3,3,1,2,1,2,1); break; case FQTOX: TYPE(3,2,0,3,1,0,0); break; case FXTOQ: TYPE(3,3,1,2,0,0,0); break; case FQTOS: TYPE(3,1,1,3,1,0,0); break; case FQTOD: TYPE(3,2,1,3,1,0,0); break; case FITOQ: TYPE(3,3,1,1,0,0,0); break; case FSTOQ: TYPE(3,3,1,1,1,0,0); break; case FDTOQ: TYPE(3,3,1,2,1,0,0); break; case FQTOI: TYPE(3,1,0,3,1,0,0); break; /* We can get either unimplemented or unfinished * for these cases. Pre-Niagara systems generate * unfinished fpop for SUBNORMAL cases, and Niagara * always gives unimplemented fpop for fsqrt{s,d}. */ case FSQRTS: { unsigned long x = current_thread_info()->xfsr[0]; x = (x >> 14) & 0x7; TYPE(x,1,1,1,1,0,0); break; } case FSQRTD: { unsigned long x = current_thread_info()->xfsr[0]; x = (x >> 14) & 0x7; TYPE(x,2,1,2,1,0,0); break; } /* SUBNORMAL - ftt == 2 */ case FADDD: case FSUBD: case FMULD: case FDIVD: TYPE(2,2,1,2,1,2,1); break; case FADDS: case FSUBS: case FMULS: case FDIVS: TYPE(2,1,1,1,1,1,1); break; case FSMULD: TYPE(2,2,1,1,1,1,1); break; case FSTOX: TYPE(2,2,0,1,1,0,0); break; case FDTOX: TYPE(2,2,0,2,1,0,0); break; case FDTOS: TYPE(2,1,1,2,1,0,0); break; case FSTOD: TYPE(2,2,1,1,1,0,0); break; case FSTOI: TYPE(2,1,0,1,1,0,0); break; case FDTOI: TYPE(2,1,0,2,1,0,0); break; /* Only Ultra-III generates these */ case FXTOS: TYPE(2,1,1,2,0,0,0); break; case FXTOD: TYPE(2,2,1,2,0,0,0); break; #if 0 /* Optimized inline in sparc64/kernel/entry.S */ case FITOS: TYPE(2,1,1,1,0,0,0); break; #endif case FITOD: TYPE(2,2,1,1,0,0,0); break; } } else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ { IR = 2; switch ((insn >> 5) & 0x1ff) { case FCMPQ: TYPE(3,0,0,3,1,3,1); break; case FCMPEQ: TYPE(3,0,0,3,1,3,1); break; /* Now the conditional fmovq support */ case FMOVQ0: case FMOVQ1: case FMOVQ2: case FMOVQ3: /* fmovq %fccX, %fY, %fZ */ if (!((insn >> 11) & 3)) XR = current_thread_info()->xfsr[0] >> 10; else XR = current_thread_info()->xfsr[0] >> (30 + ((insn >> 10) & 0x6)); XR &= 3; IR = 0; switch ((insn >> 14) & 0x7) { /* case 0: IR = 0; break; */ /* Never */ case 1: if (XR) IR = 1; break; /* Not Equal */ case 2: if (XR == 1 || XR == 2) IR = 1; break; /* Less or Greater */ case 3: if (XR & 1) IR = 1; break; /* Unordered or Less */ case 4: if (XR == 1) IR = 1; break; /* Less */ case 5: if (XR & 2) IR = 1; break; /* Unordered or Greater */ case 6: if (XR == 2) IR = 1; break; /* Greater */ case 7: if (XR == 3) IR = 1; break; /* Unordered */ } if ((insn >> 14) & 8) IR ^= 1; break; case FMOVQI: case FMOVQX: /* fmovq %[ix]cc, %fY, %fZ */ XR = regs->tstate >> 32; if ((insn >> 5) & 0x80) XR >>= 4; XR &= 0xf; IR = 0; freg = ((XR >> 2) ^ XR) & 2; switch ((insn >> 14) & 0x7) { /* case 0: IR = 0; break; */ /* Never */ case 1: if (XR & 4) IR = 1; break; /* Equal */ case 2: if ((XR & 4) || freg) IR = 1; break; /* Less or Equal */ case 3: if (freg) IR = 1; break; /* Less */ case 4: if (XR & 5) IR = 1; break; /* Less or Equal Unsigned */ case 5: if (XR & 1) IR = 1; break; /* Carry Set */ case 6: if (XR & 8) IR = 1; break; /* Negative */ case 7: if (XR & 2) IR = 1; break; /* Overflow Set */ } if ((insn >> 14) & 8) IR ^= 1; break; case FMOVQZ: case FMOVQLE: case FMOVQLZ: case FMOVQNZ: case FMOVQGZ: case FMOVQGE: freg = (insn >> 14) & 0x1f; if (!freg) XR = 0; else if (freg < 16) XR = regs->u_regs[freg]; else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { struct reg_window32 __user *win32; flushw_user (); win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); get_user(XR, &win32->locals[freg - 16]); } else { struct reg_window __user *win; flushw_user (); win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); get_user(XR, &win->locals[freg - 16]); } IR = 0; switch ((insn >> 10) & 3) { case 1: if (!XR) IR = 1; break; /* Register Zero */ case 2: if (XR <= 0) IR = 1; break; /* Register Less Than or Equal to Zero */ case 3: if (XR < 0) IR = 1; break; /* Register Less Than Zero */ } if ((insn >> 10) & 4) IR ^= 1; break; } if (IR == 0) { /* The fmov test was false. Do a nop instead */ current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK); regs->tpc = regs->tnpc; regs->tnpc += 4; return 1; } else if (IR == 1) { /* Change the instruction into plain fmovq */ insn = (insn & 0x3e00001f) | 0x81a00060; TYPE(3,3,0,3,0,0,0); } } } if (type) { argp rs1 = NULL, rs2 = NULL, rd = NULL; /* Starting with UltraSPARC-T2, the cpu does not set the FP Trap * Type field in the %fsr to unimplemented_FPop. Nor does it * use the fp_exception_other trap. Instead it signals an * illegal instruction and leaves the FP trap type field of * the %fsr unchanged. */ if (!illegal_insn_trap) { int ftt = (current_thread_info()->xfsr[0] >> 14) & 0x7; if (ftt != (type >> 9)) goto err; } current_thread_info()->xfsr[0] &= ~0x1c000; freg = ((insn >> 14) & 0x1f); switch (type & 0x3) { case 3: if (freg & 2) { current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; goto err; } case 2: freg = ((freg & 1) << 5) | (freg & 0x1e); case 1: rs1 = (argp)&f->regs[freg]; flags = (freg < 32) ? FPRS_DL : FPRS_DU; if (!(current_thread_info()->fpsaved[0] & flags)) rs1 = (argp)&zero; break; } switch (type & 0x7) { case 7: FP_UNPACK_QP (QA, rs1); break; case 6: FP_UNPACK_DP (DA, rs1); break; case 5: FP_UNPACK_SP (SA, rs1); break; } freg = (insn & 0x1f); switch ((type >> 3) & 0x3) { case 3: if (freg & 2) { current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; goto err; } case 2: freg = ((freg & 1) << 5) | (freg & 0x1e); case 1: rs2 = (argp)&f->regs[freg]; flags = (freg < 32) ? FPRS_DL : FPRS_DU; if (!(current_thread_info()->fpsaved[0] & flags)) rs2 = (argp)&zero; break; } switch ((type >> 3) & 0x7) { case 7: FP_UNPACK_QP (QB, rs2); break; case 6: FP_UNPACK_DP (DB, rs2); break; case 5: FP_UNPACK_SP (SB, rs2); break; } freg = ((insn >> 25) & 0x1f); switch ((type >> 6) & 0x3) { case 3: if (freg & 2) { current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; goto err; } case 2: freg = ((freg & 1) << 5) | (freg & 0x1e); case 1: rd = (argp)&f->regs[freg]; flags = (freg < 32) ? FPRS_DL : FPRS_DU; if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { current_thread_info()->fpsaved[0] = FPRS_FEF; current_thread_info()->gsr[0] = 0; } if (!(current_thread_info()->fpsaved[0] & flags)) { if (freg < 32) memset(f->regs, 0, 32*sizeof(u32)); else memset(f->regs+32, 0, 32*sizeof(u32)); } current_thread_info()->fpsaved[0] |= flags; break; } switch ((insn >> 5) & 0x1ff) { /* + */ case FADDS: FP_ADD_S (SR, SA, SB); break; case FADDD: FP_ADD_D (DR, DA, DB); break; case FADDQ: FP_ADD_Q (QR, QA, QB); break; /* - */ case FSUBS: FP_SUB_S (SR, SA, SB); break; case FSUBD: FP_SUB_D (DR, DA, DB); break; case FSUBQ: FP_SUB_Q (QR, QA, QB); break; /* * */ case FMULS: FP_MUL_S (SR, SA, SB); break; case FSMULD: FP_CONV (D, S, 1, 1, DA, SA); FP_CONV (D, S, 1, 1, DB, SB); case FMULD: FP_MUL_D (DR, DA, DB); break; case FDMULQ: FP_CONV (Q, D, 2, 1, QA, DA); FP_CONV (Q, D, 2, 1, QB, DB); case FMULQ: FP_MUL_Q (QR, QA, QB); break; /* / */ case FDIVS: FP_DIV_S (SR, SA, SB); break; case FDIVD: FP_DIV_D (DR, DA, DB); break; case FDIVQ: FP_DIV_Q (QR, QA, QB); break; /* sqrt */ case FSQRTS: FP_SQRT_S (SR, SB); break; case FSQRTD: FP_SQRT_D (DR, DB); break; case FSQRTQ: FP_SQRT_Q (QR, QB); break; /* mov */ case FMOVQ: rd->q[0] = rs2->q[0]; rd->q[1] = rs2->q[1]; break; case FABSQ: rd->q[0] = rs2->q[0] & 0x7fffffffffffffffUL; rd->q[1] = rs2->q[1]; break; case FNEGQ: rd->q[0] = rs2->q[0] ^ 0x8000000000000000UL; rd->q[1] = rs2->q[1]; break; /* float to int */ case FSTOI: FP_TO_INT_S (IR, SB, 32, 1); break; case FDTOI: FP_TO_INT_D (IR, DB, 32, 1); break; case FQTOI: FP_TO_INT_Q (IR, QB, 32, 1); break; case FSTOX: FP_TO_INT_S (XR, SB, 64, 1); break; case FDTOX: FP_TO_INT_D (XR, DB, 64, 1); break; case FQTOX: FP_TO_INT_Q (XR, QB, 64, 1); break; /* int to float */ case FITOQ: IR = rs2->s; FP_FROM_INT_Q (QR, IR, 32, int); break; case FXTOQ: XR = rs2->d; FP_FROM_INT_Q (QR, XR, 64, long); break; /* Only Ultra-III generates these */ case FXTOS: XR = rs2->d; FP_FROM_INT_S (SR, XR, 64, long); break; case FXTOD: XR = rs2->d; FP_FROM_INT_D (DR, XR, 64, long); break; #if 0 /* Optimized inline in sparc64/kernel/entry.S */ case FITOS: IR = rs2->s; FP_FROM_INT_S (SR, IR, 32, int); break; #endif case FITOD: IR = rs2->s; FP_FROM_INT_D (DR, IR, 32, int); break; /* float to float */ case FSTOD: FP_CONV (D, S, 1, 1, DR, SB); break; case FSTOQ: FP_CONV (Q, S, 2, 1, QR, SB); break; case FDTOQ: FP_CONV (Q, D, 2, 1, QR, DB); break; case FDTOS: FP_CONV (S, D, 1, 1, SR, DB); break; case FQTOS: FP_CONV (S, Q, 1, 2, SR, QB); break; case FQTOD: FP_CONV (D, Q, 1, 2, DR, QB); break; /* comparison */ case FCMPQ: case FCMPEQ: FP_CMP_Q(XR, QB, QA, 3); if (XR == 3 && (((insn >> 5) & 0x1ff) == FCMPEQ || FP_ISSIGNAN_Q(QA) || FP_ISSIGNAN_Q(QB))) FP_SET_EXCEPTION (FP_EX_INVALID); } if (!FP_INHIBIT_RESULTS) { switch ((type >> 6) & 0x7) { case 0: xfsr = current_thread_info()->xfsr[0]; if (XR == -1) XR = 2; switch (freg & 3) { /* fcc0, 1, 2, 3 */ case 0: xfsr &= ~0xc00; xfsr |= (XR << 10); break; case 1: xfsr &= ~0x300000000UL; xfsr |= (XR << 32); break; case 2: xfsr &= ~0xc00000000UL; xfsr |= (XR << 34); break; case 3: xfsr &= ~0x3000000000UL; xfsr |= (XR << 36); break; } current_thread_info()->xfsr[0] = xfsr; break; case 1: rd->s = IR; break; case 2: rd->d = XR; break; case 5: FP_PACK_SP (rd, SR); break; case 6: FP_PACK_DP (rd, DR); break; case 7: FP_PACK_QP (rd, QR); break; } } if(_fex != 0) return record_exception(regs, _fex); /* Success and no exceptions detected. */ current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK); regs->tpc = regs->tnpc; regs->tnpc += 4; return 1; } err: return 0; }
linux-master
arch/sparc/math-emu/math_64.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/moduleloader.h> #include <linux/workqueue.h> #include <linux/netdevice.h> #include <linux/filter.h> #include <linux/cache.h> #include <linux/if_vlan.h> #include <asm/cacheflush.h> #include <asm/ptrace.h> #include "bpf_jit_32.h" static inline bool is_simm13(unsigned int value) { return value + 0x1000 < 0x2000; } #define SEEN_DATAREF 1 /* might call external helpers */ #define SEEN_XREG 2 /* ebx is used */ #define SEEN_MEM 4 /* use mem[] for temporary storage */ #define S13(X) ((X) & 0x1fff) #define IMMED 0x00002000 #define RD(X) ((X) << 25) #define RS1(X) ((X) << 14) #define RS2(X) ((X)) #define OP(X) ((X) << 30) #define OP2(X) ((X) << 22) #define OP3(X) ((X) << 19) #define COND(X) ((X) << 25) #define F1(X) OP(X) #define F2(X, Y) (OP(X) | OP2(Y)) #define F3(X, Y) (OP(X) | OP3(Y)) #define CONDN COND(0x0) #define CONDE COND(0x1) #define CONDLE COND(0x2) #define CONDL COND(0x3) #define CONDLEU COND(0x4) #define CONDCS COND(0x5) #define CONDNEG COND(0x6) #define CONDVC COND(0x7) #define CONDA COND(0x8) #define CONDNE COND(0x9) #define CONDG COND(0xa) #define CONDGE COND(0xb) #define CONDGU COND(0xc) #define CONDCC COND(0xd) #define CONDPOS COND(0xe) #define CONDVS COND(0xf) #define CONDGEU CONDCC #define CONDLU CONDCS #define WDISP22(X) (((X) >> 2) & 0x3fffff) #define BA (F2(0, 2) | CONDA) #define BGU (F2(0, 2) | CONDGU) #define BLEU (F2(0, 2) | CONDLEU) #define BGEU (F2(0, 2) | CONDGEU) #define BLU (F2(0, 2) | CONDLU) #define BE (F2(0, 2) | CONDE) #define BNE (F2(0, 2) | CONDNE) #define BE_PTR BE #define SETHI(K, REG) \ (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff)) #define OR_LO(K, REG) \ (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG)) #define ADD F3(2, 0x00) #define AND F3(2, 0x01) #define ANDCC F3(2, 0x11) #define OR F3(2, 0x02) #define XOR F3(2, 0x03) #define SUB F3(2, 0x04) #define SUBCC F3(2, 0x14) #define MUL F3(2, 0x0a) /* umul */ #define DIV F3(2, 0x0e) /* udiv */ #define SLL F3(2, 0x25) #define SRL F3(2, 0x26) #define JMPL F3(2, 0x38) #define CALL F1(1) #define BR F2(0, 0x01) #define RD_Y F3(2, 0x28) #define WR_Y F3(2, 0x30) #define LD32 F3(3, 0x00) #define LD8 F3(3, 0x01) #define LD16 F3(3, 0x02) #define LD64 F3(3, 0x0b) #define ST32 F3(3, 0x04) #define LDPTR LD32 #define BASE_STACKFRAME 96 #define LD32I (LD32 | IMMED) #define LD8I (LD8 | IMMED) #define LD16I (LD16 | IMMED) #define LD64I (LD64 | IMMED) #define LDPTRI (LDPTR | IMMED) #define ST32I (ST32 | IMMED) #define emit_nop() \ do { \ *prog++ = SETHI(0, G0); \ } while (0) #define emit_neg() \ do { /* sub %g0, r_A, r_A */ \ *prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A); \ } while (0) #define emit_reg_move(FROM, TO) \ do { /* or %g0, FROM, TO */ \ *prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO); \ } while (0) #define emit_clear(REG) \ do { /* or %g0, %g0, REG */ \ *prog++ = OR | RS1(G0) | RS2(G0) | RD(REG); \ } while (0) #define emit_set_const(K, REG) \ do { /* sethi %hi(K), REG */ \ *prog++ = SETHI(K, REG); \ /* or REG, %lo(K), REG */ \ *prog++ = OR_LO(K, REG); \ } while (0) /* Emit * * OP r_A, r_X, r_A */ #define emit_alu_X(OPCODE) \ do { \ seen |= SEEN_XREG; \ *prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A); \ } while (0) /* Emit either: * * OP r_A, K, r_A * * or * * sethi %hi(K), r_TMP * or r_TMP, %lo(K), r_TMP * OP r_A, r_TMP, r_A * * depending upon whether K fits in a signed 13-bit * immediate instruction field. Emit nothing if K * is zero. */ #define emit_alu_K(OPCODE, K) \ do { \ if (K || OPCODE == AND || OPCODE == MUL) { \ unsigned int _insn = OPCODE; \ _insn |= RS1(r_A) | RD(r_A); \ if (is_simm13(K)) { \ *prog++ = _insn | IMMED | S13(K); \ } else { \ emit_set_const(K, r_TMP); \ *prog++ = _insn | RS2(r_TMP); \ } \ } \ } while (0) #define emit_loadimm(K, DEST) \ do { \ if (is_simm13(K)) { \ /* or %g0, K, DEST */ \ *prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST); \ } else { \ emit_set_const(K, DEST); \ } \ } while (0) #define emit_loadptr(BASE, STRUCT, FIELD, DEST) \ do { unsigned int _off = offsetof(STRUCT, FIELD); \ BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(void *)); \ *prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST); \ } while (0) #define emit_load32(BASE, STRUCT, FIELD, DEST) \ do { unsigned int _off = offsetof(STRUCT, FIELD); \ BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u32)); \ *prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST); \ } while (0) #define emit_load16(BASE, STRUCT, FIELD, DEST) \ do { unsigned int _off = offsetof(STRUCT, FIELD); \ BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u16)); \ *prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST); \ } while (0) #define __emit_load8(BASE, STRUCT, FIELD, DEST) \ do { unsigned int _off = offsetof(STRUCT, FIELD); \ *prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST); \ } while (0) #define emit_load8(BASE, STRUCT, FIELD, DEST) \ do { BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u8)); \ __emit_load8(BASE, STRUCT, FIELD, DEST); \ } while (0) #define BIAS (-4) #define emit_ldmem(OFF, DEST) \ do { *prog++ = LD32I | RS1(SP) | S13(BIAS - (OFF)) | RD(DEST); \ } while (0) #define emit_stmem(OFF, SRC) \ do { *prog++ = ST32I | RS1(SP) | S13(BIAS - (OFF)) | RD(SRC); \ } while (0) #ifdef CONFIG_SMP #define emit_load_cpu(REG) \ emit_load32(G6, struct thread_info, cpu, REG) #else #define emit_load_cpu(REG) emit_clear(REG) #endif #define emit_skb_loadptr(FIELD, DEST) \ emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST) #define emit_skb_load32(FIELD, DEST) \ emit_load32(r_SKB, struct sk_buff, FIELD, DEST) #define emit_skb_load16(FIELD, DEST) \ emit_load16(r_SKB, struct sk_buff, FIELD, DEST) #define __emit_skb_load8(FIELD, DEST) \ __emit_load8(r_SKB, struct sk_buff, FIELD, DEST) #define emit_skb_load8(FIELD, DEST) \ emit_load8(r_SKB, struct sk_buff, FIELD, DEST) #define emit_jmpl(BASE, IMM_OFF, LREG) \ *prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG)) #define emit_call(FUNC) \ do { void *_here = image + addrs[i] - 8; \ unsigned int _off = (void *)(FUNC) - _here; \ *prog++ = CALL | (((_off) >> 2) & 0x3fffffff); \ emit_nop(); \ } while (0) #define emit_branch(BR_OPC, DEST) \ do { unsigned int _here = addrs[i] - 8; \ *prog++ = BR_OPC | WDISP22((DEST) - _here); \ } while (0) #define emit_branch_off(BR_OPC, OFF) \ do { *prog++ = BR_OPC | WDISP22(OFF); \ } while (0) #define emit_jump(DEST) emit_branch(BA, DEST) #define emit_read_y(REG) *prog++ = RD_Y | RD(REG) #define emit_write_y(REG) *prog++ = WR_Y | IMMED | RS1(REG) | S13(0) #define emit_cmp(R1, R2) \ *prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0)) #define emit_cmpi(R1, IMM) \ *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0)); #define emit_btst(R1, R2) \ *prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0)) #define emit_btsti(R1, IMM) \ *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0)); #define emit_sub(R1, R2, R3) \ *prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3)) #define emit_subi(R1, IMM, R3) \ *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3)) #define emit_add(R1, R2, R3) \ *prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3)) #define emit_addi(R1, IMM, R3) \ *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3)) #define emit_and(R1, R2, R3) \ *prog++ = (AND | RS1(R1) | RS2(R2) | RD(R3)) #define emit_andi(R1, IMM, R3) \ *prog++ = (AND | IMMED | RS1(R1) | S13(IMM) | RD(R3)) #define emit_alloc_stack(SZ) \ *prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP)) #define emit_release_stack(SZ) \ *prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP)) /* A note about branch offset calculations. The addrs[] array, * indexed by BPF instruction, records the address after all the * sparc instructions emitted for that BPF instruction. * * The most common case is to emit a branch at the end of such * a code sequence. So this would be two instructions, the * branch and it's delay slot. * * Therefore by default the branch emitters calculate the branch * offset field as: * * destination - (addrs[i] - 8) * * This "addrs[i] - 8" is the address of the branch itself or * what "." would be in assembler notation. The "8" part is * how we take into consideration the branch and it's delay * slot mentioned above. * * Sometimes we need to emit a branch earlier in the code * sequence. And in these situations we adjust "destination" * to accommodate this difference. For example, if we needed * to emit a branch (and it's delay slot) right before the * final instruction emitted for a BPF opcode, we'd use * "destination + 4" instead of just plain "destination" above. * * This is why you see all of these funny emit_branch() and * emit_jump() calls with adjusted offsets. */ void bpf_jit_compile(struct bpf_prog *fp) { unsigned int cleanup_addr, proglen, oldproglen = 0; u32 temp[8], *prog, *func, seen = 0, pass; const struct sock_filter *filter = fp->insns; int i, flen = fp->len, pc_ret0 = -1; unsigned int *addrs; void *image; if (!bpf_jit_enable) return; addrs = kmalloc_array(flen, sizeof(*addrs), GFP_KERNEL); if (addrs == NULL) return; /* Before first pass, make a rough estimation of addrs[] * each bpf instruction is translated to less than 64 bytes */ for (proglen = 0, i = 0; i < flen; i++) { proglen += 64; addrs[i] = proglen; } cleanup_addr = proglen; /* epilogue address */ image = NULL; for (pass = 0; pass < 10; pass++) { u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen; /* no prologue/epilogue for trivial filters (RET something) */ proglen = 0; prog = temp; /* Prologue */ if (seen_or_pass0) { if (seen_or_pass0 & SEEN_MEM) { unsigned int sz = BASE_STACKFRAME; sz += BPF_MEMWORDS * sizeof(u32); emit_alloc_stack(sz); } /* Make sure we dont leek kernel memory. */ if (seen_or_pass0 & SEEN_XREG) emit_clear(r_X); /* If this filter needs to access skb data, * load %o4 and %o5 with: * %o4 = skb->len - skb->data_len * %o5 = skb->data * And also back up %o7 into r_saved_O7 so we can * invoke the stubs using 'call'. */ if (seen_or_pass0 & SEEN_DATAREF) { emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN); emit_load32(r_SKB, struct sk_buff, data_len, r_TMP); emit_sub(r_HEADLEN, r_TMP, r_HEADLEN); emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA); } } emit_reg_move(O7, r_saved_O7); /* Make sure we dont leak kernel information to the user. */ if (bpf_needs_clear_a(&filter[0])) emit_clear(r_A); /* A = 0 */ for (i = 0; i < flen; i++) { unsigned int K = filter[i].k; unsigned int t_offset; unsigned int f_offset; u32 t_op, f_op; u16 code = bpf_anc_helper(&filter[i]); int ilen; switch (code) { case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */ emit_alu_X(ADD); break; case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ emit_alu_K(ADD, K); break; case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */ emit_alu_X(SUB); break; case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ emit_alu_K(SUB, K); break; case BPF_ALU | BPF_AND | BPF_X: /* A &= X */ emit_alu_X(AND); break; case BPF_ALU | BPF_AND | BPF_K: /* A &= K */ emit_alu_K(AND, K); break; case BPF_ALU | BPF_OR | BPF_X: /* A |= X */ emit_alu_X(OR); break; case BPF_ALU | BPF_OR | BPF_K: /* A |= K */ emit_alu_K(OR, K); break; case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */ case BPF_ALU | BPF_XOR | BPF_X: emit_alu_X(XOR); break; case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ emit_alu_K(XOR, K); break; case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */ emit_alu_X(SLL); break; case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */ emit_alu_K(SLL, K); break; case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */ emit_alu_X(SRL); break; case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */ emit_alu_K(SRL, K); break; case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ emit_alu_X(MUL); break; case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ emit_alu_K(MUL, K); break; case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/ if (K == 1) break; emit_write_y(G0); /* The Sparc v8 architecture requires * three instructions between a %y * register write and the first use. */ emit_nop(); emit_nop(); emit_nop(); emit_alu_K(DIV, K); break; case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ emit_cmpi(r_X, 0); if (pc_ret0 > 0) { t_offset = addrs[pc_ret0 - 1]; emit_branch(BE, t_offset + 20); emit_nop(); /* delay slot */ } else { emit_branch_off(BNE, 16); emit_nop(); emit_jump(cleanup_addr + 20); emit_clear(r_A); } emit_write_y(G0); /* The Sparc v8 architecture requires * three instructions between a %y * register write and the first use. */ emit_nop(); emit_nop(); emit_nop(); emit_alu_X(DIV); break; case BPF_ALU | BPF_NEG: emit_neg(); break; case BPF_RET | BPF_K: if (!K) { if (pc_ret0 == -1) pc_ret0 = i; emit_clear(r_A); } else { emit_loadimm(K, r_A); } fallthrough; case BPF_RET | BPF_A: if (seen_or_pass0) { if (i != flen - 1) { emit_jump(cleanup_addr); emit_nop(); break; } if (seen_or_pass0 & SEEN_MEM) { unsigned int sz = BASE_STACKFRAME; sz += BPF_MEMWORDS * sizeof(u32); emit_release_stack(sz); } } /* jmpl %r_saved_O7 + 8, %g0 */ emit_jmpl(r_saved_O7, 8, G0); emit_reg_move(r_A, O0); /* delay slot */ break; case BPF_MISC | BPF_TAX: seen |= SEEN_XREG; emit_reg_move(r_A, r_X); break; case BPF_MISC | BPF_TXA: seen |= SEEN_XREG; emit_reg_move(r_X, r_A); break; case BPF_ANC | SKF_AD_CPU: emit_load_cpu(r_A); break; case BPF_ANC | SKF_AD_PROTOCOL: emit_skb_load16(protocol, r_A); break; case BPF_ANC | SKF_AD_PKTTYPE: __emit_skb_load8(__pkt_type_offset, r_A); emit_andi(r_A, PKT_TYPE_MAX, r_A); emit_alu_K(SRL, 5); break; case BPF_ANC | SKF_AD_IFINDEX: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); emit_branch(BE_PTR, cleanup_addr + 4); emit_nop(); emit_load32(r_A, struct net_device, ifindex, r_A); break; case BPF_ANC | SKF_AD_MARK: emit_skb_load32(mark, r_A); break; case BPF_ANC | SKF_AD_QUEUE: emit_skb_load16(queue_mapping, r_A); break; case BPF_ANC | SKF_AD_HATYPE: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); emit_branch(BE_PTR, cleanup_addr + 4); emit_nop(); emit_load16(r_A, struct net_device, type, r_A); break; case BPF_ANC | SKF_AD_RXHASH: emit_skb_load32(hash, r_A); break; case BPF_ANC | SKF_AD_VLAN_TAG: emit_skb_load16(vlan_tci, r_A); break; case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: emit_skb_load32(vlan_all, r_A); emit_cmpi(r_A, 0); emit_branch_off(BE, 12); emit_nop(); emit_loadimm(1, r_A); break; case BPF_LD | BPF_W | BPF_LEN: emit_skb_load32(len, r_A); break; case BPF_LDX | BPF_W | BPF_LEN: emit_skb_load32(len, r_X); break; case BPF_LD | BPF_IMM: emit_loadimm(K, r_A); break; case BPF_LDX | BPF_IMM: emit_loadimm(K, r_X); break; case BPF_LD | BPF_MEM: seen |= SEEN_MEM; emit_ldmem(K * 4, r_A); break; case BPF_LDX | BPF_MEM: seen |= SEEN_MEM | SEEN_XREG; emit_ldmem(K * 4, r_X); break; case BPF_ST: seen |= SEEN_MEM; emit_stmem(K * 4, r_A); break; case BPF_STX: seen |= SEEN_MEM | SEEN_XREG; emit_stmem(K * 4, r_X); break; #define CHOOSE_LOAD_FUNC(K, func) \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) case BPF_LD | BPF_W | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word); common_load: seen |= SEEN_DATAREF; emit_loadimm(K, r_OFF); emit_call(func); break; case BPF_LD | BPF_H | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half); goto common_load; case BPF_LD | BPF_B | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte); goto common_load; case BPF_LDX | BPF_B | BPF_MSH: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh); goto common_load; case BPF_LD | BPF_W | BPF_IND: func = bpf_jit_load_word; common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; if (K) { if (is_simm13(K)) { emit_addi(r_X, K, r_OFF); } else { emit_loadimm(K, r_TMP); emit_add(r_X, r_TMP, r_OFF); } } else { emit_reg_move(r_X, r_OFF); } emit_call(func); break; case BPF_LD | BPF_H | BPF_IND: func = bpf_jit_load_half; goto common_load_ind; case BPF_LD | BPF_B | BPF_IND: func = bpf_jit_load_byte; goto common_load_ind; case BPF_JMP | BPF_JA: emit_jump(addrs[i + K]); emit_nop(); break; #define COND_SEL(CODE, TOP, FOP) \ case CODE: \ t_op = TOP; \ f_op = FOP; \ goto cond_branch COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU); COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU); COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE); COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE); COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU); COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU); COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE); COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE); cond_branch: f_offset = addrs[i + filter[i].jf]; t_offset = addrs[i + filter[i].jt]; /* same targets, can avoid doing the test :) */ if (filter[i].jt == filter[i].jf) { emit_jump(t_offset); emit_nop(); break; } switch (code) { case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X: seen |= SEEN_XREG; emit_cmp(r_A, r_X); break; case BPF_JMP | BPF_JSET | BPF_X: seen |= SEEN_XREG; emit_btst(r_A, r_X); break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: if (is_simm13(K)) { emit_cmpi(r_A, K); } else { emit_loadimm(K, r_TMP); emit_cmp(r_A, r_TMP); } break; case BPF_JMP | BPF_JSET | BPF_K: if (is_simm13(K)) { emit_btsti(r_A, K); } else { emit_loadimm(K, r_TMP); emit_btst(r_A, r_TMP); } break; } if (filter[i].jt != 0) { if (filter[i].jf) t_offset += 8; emit_branch(t_op, t_offset); emit_nop(); /* delay slot */ if (filter[i].jf) { emit_jump(f_offset); emit_nop(); } break; } emit_branch(f_op, f_offset); emit_nop(); /* delay slot */ break; default: /* hmm, too complex filter, give up with jit compiler */ goto out; } ilen = (void *) prog - (void *) temp; if (image) { if (unlikely(proglen + ilen > oldproglen)) { pr_err("bpb_jit_compile fatal error\n"); kfree(addrs); module_memfree(image); return; } memcpy(image + proglen, temp, ilen); } proglen += ilen; addrs[i] = proglen; prog = temp; } /* last bpf instruction is always a RET : * use it to give the cleanup instruction(s) addr */ cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */ if (seen_or_pass0 & SEEN_MEM) cleanup_addr -= 4; /* add %sp, X, %sp; */ if (image) { if (proglen != oldproglen) pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen); break; } if (proglen == oldproglen) { image = module_alloc(proglen); if (!image) goto out; } oldproglen = proglen; } if (bpf_jit_enable > 1) bpf_jit_dump(flen, proglen, pass + 1, image); if (image) { fp->bpf_func = (void *)image; fp->jited = 1; } out: kfree(addrs); return; } void bpf_jit_free(struct bpf_prog *fp) { if (fp->jited) module_memfree(fp->bpf_func); bpf_prog_unlock_free(fp); }
linux-master
arch/sparc/net/bpf_jit_comp_32.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/moduleloader.h> #include <linux/workqueue.h> #include <linux/netdevice.h> #include <linux/filter.h> #include <linux/bpf.h> #include <linux/cache.h> #include <linux/if_vlan.h> #include <asm/cacheflush.h> #include <asm/ptrace.h> #include "bpf_jit_64.h" static inline bool is_simm13(unsigned int value) { return value + 0x1000 < 0x2000; } static inline bool is_simm10(unsigned int value) { return value + 0x200 < 0x400; } static inline bool is_simm5(unsigned int value) { return value + 0x10 < 0x20; } static inline bool is_sethi(unsigned int value) { return (value & ~0x3fffff) == 0; } static void bpf_flush_icache(void *start_, void *end_) { /* Cheetah's I-cache is fully coherent. */ if (tlb_type == spitfire) { unsigned long start = (unsigned long) start_; unsigned long end = (unsigned long) end_; start &= ~7UL; end = (end + 7UL) & ~7UL; while (start < end) { flushi(start); start += 32; } } } #define S13(X) ((X) & 0x1fff) #define S5(X) ((X) & 0x1f) #define IMMED 0x00002000 #define RD(X) ((X) << 25) #define RS1(X) ((X) << 14) #define RS2(X) ((X)) #define OP(X) ((X) << 30) #define OP2(X) ((X) << 22) #define OP3(X) ((X) << 19) #define COND(X) (((X) & 0xf) << 25) #define CBCOND(X) (((X) & 0x1f) << 25) #define F1(X) OP(X) #define F2(X, Y) (OP(X) | OP2(Y)) #define F3(X, Y) (OP(X) | OP3(Y)) #define ASI(X) (((X) & 0xff) << 5) #define CONDN COND(0x0) #define CONDE COND(0x1) #define CONDLE COND(0x2) #define CONDL COND(0x3) #define CONDLEU COND(0x4) #define CONDCS COND(0x5) #define CONDNEG COND(0x6) #define CONDVC COND(0x7) #define CONDA COND(0x8) #define CONDNE COND(0x9) #define CONDG COND(0xa) #define CONDGE COND(0xb) #define CONDGU COND(0xc) #define CONDCC COND(0xd) #define CONDPOS COND(0xe) #define CONDVS COND(0xf) #define CONDGEU CONDCC #define CONDLU CONDCS #define WDISP22(X) (((X) >> 2) & 0x3fffff) #define WDISP19(X) (((X) >> 2) & 0x7ffff) /* The 10-bit branch displacement for CBCOND is split into two fields */ static u32 WDISP10(u32 off) { u32 ret = ((off >> 2) & 0xff) << 5; ret |= ((off >> (2 + 8)) & 0x03) << 19; return ret; } #define CBCONDE CBCOND(0x09) #define CBCONDLE CBCOND(0x0a) #define CBCONDL CBCOND(0x0b) #define CBCONDLEU CBCOND(0x0c) #define CBCONDCS CBCOND(0x0d) #define CBCONDN CBCOND(0x0e) #define CBCONDVS CBCOND(0x0f) #define CBCONDNE CBCOND(0x19) #define CBCONDG CBCOND(0x1a) #define CBCONDGE CBCOND(0x1b) #define CBCONDGU CBCOND(0x1c) #define CBCONDCC CBCOND(0x1d) #define CBCONDPOS CBCOND(0x1e) #define CBCONDVC CBCOND(0x1f) #define CBCONDGEU CBCONDCC #define CBCONDLU CBCONDCS #define ANNUL (1 << 29) #define XCC (1 << 21) #define BRANCH (F2(0, 1) | XCC) #define CBCOND_OP (F2(0, 3) | XCC) #define BA (BRANCH | CONDA) #define BG (BRANCH | CONDG) #define BL (BRANCH | CONDL) #define BLE (BRANCH | CONDLE) #define BGU (BRANCH | CONDGU) #define BLEU (BRANCH | CONDLEU) #define BGE (BRANCH | CONDGE) #define BGEU (BRANCH | CONDGEU) #define BLU (BRANCH | CONDLU) #define BE (BRANCH | CONDE) #define BNE (BRANCH | CONDNE) #define SETHI(K, REG) \ (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff)) #define OR_LO(K, REG) \ (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG)) #define ADD F3(2, 0x00) #define AND F3(2, 0x01) #define ANDCC F3(2, 0x11) #define OR F3(2, 0x02) #define XOR F3(2, 0x03) #define SUB F3(2, 0x04) #define SUBCC F3(2, 0x14) #define MUL F3(2, 0x0a) #define MULX F3(2, 0x09) #define UDIVX F3(2, 0x0d) #define DIV F3(2, 0x0e) #define SLL F3(2, 0x25) #define SLLX (F3(2, 0x25)|(1<<12)) #define SRA F3(2, 0x27) #define SRAX (F3(2, 0x27)|(1<<12)) #define SRL F3(2, 0x26) #define SRLX (F3(2, 0x26)|(1<<12)) #define JMPL F3(2, 0x38) #define SAVE F3(2, 0x3c) #define RESTORE F3(2, 0x3d) #define CALL F1(1) #define BR F2(0, 0x01) #define RD_Y F3(2, 0x28) #define WR_Y F3(2, 0x30) #define LD32 F3(3, 0x00) #define LD8 F3(3, 0x01) #define LD16 F3(3, 0x02) #define LD64 F3(3, 0x0b) #define LD64A F3(3, 0x1b) #define ST8 F3(3, 0x05) #define ST16 F3(3, 0x06) #define ST32 F3(3, 0x04) #define ST64 F3(3, 0x0e) #define CAS F3(3, 0x3c) #define CASX F3(3, 0x3e) #define LDPTR LD64 #define BASE_STACKFRAME 176 #define LD32I (LD32 | IMMED) #define LD8I (LD8 | IMMED) #define LD16I (LD16 | IMMED) #define LD64I (LD64 | IMMED) #define LDPTRI (LDPTR | IMMED) #define ST32I (ST32 | IMMED) struct jit_ctx { struct bpf_prog *prog; unsigned int *offset; int idx; int epilogue_offset; bool tmp_1_used; bool tmp_2_used; bool tmp_3_used; bool saw_frame_pointer; bool saw_call; bool saw_tail_call; u32 *image; }; #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) #define TMP_REG_3 (MAX_BPF_JIT_REG + 2) /* Map BPF registers to SPARC registers */ static const int bpf2sparc[] = { /* return value from in-kernel function, and exit value from eBPF */ [BPF_REG_0] = O5, /* arguments from eBPF program to in-kernel function */ [BPF_REG_1] = O0, [BPF_REG_2] = O1, [BPF_REG_3] = O2, [BPF_REG_4] = O3, [BPF_REG_5] = O4, /* callee saved registers that in-kernel function will preserve */ [BPF_REG_6] = L0, [BPF_REG_7] = L1, [BPF_REG_8] = L2, [BPF_REG_9] = L3, /* read-only frame pointer to access stack */ [BPF_REG_FP] = L6, [BPF_REG_AX] = G7, /* temporary register for BPF JIT */ [TMP_REG_1] = G1, [TMP_REG_2] = G2, [TMP_REG_3] = G3, }; static void emit(const u32 insn, struct jit_ctx *ctx) { if (ctx->image != NULL) ctx->image[ctx->idx] = insn; ctx->idx++; } static void emit_call(u32 *func, struct jit_ctx *ctx) { if (ctx->image != NULL) { void *here = &ctx->image[ctx->idx]; unsigned int off; off = (void *)func - here; ctx->image[ctx->idx] = CALL | ((off >> 2) & 0x3fffffff); } ctx->idx++; } static void emit_nop(struct jit_ctx *ctx) { emit(SETHI(0, G0), ctx); } static void emit_reg_move(u32 from, u32 to, struct jit_ctx *ctx) { emit(OR | RS1(G0) | RS2(from) | RD(to), ctx); } /* Emit 32-bit constant, zero extended. */ static void emit_set_const(s32 K, u32 reg, struct jit_ctx *ctx) { emit(SETHI(K, reg), ctx); emit(OR_LO(K, reg), ctx); } /* Emit 32-bit constant, sign extended. */ static void emit_set_const_sext(s32 K, u32 reg, struct jit_ctx *ctx) { if (K >= 0) { emit(SETHI(K, reg), ctx); emit(OR_LO(K, reg), ctx); } else { u32 hbits = ~(u32) K; u32 lbits = -0x400 | (u32) K; emit(SETHI(hbits, reg), ctx); emit(XOR | IMMED | RS1(reg) | S13(lbits) | RD(reg), ctx); } } static void emit_alu(u32 opcode, u32 src, u32 dst, struct jit_ctx *ctx) { emit(opcode | RS1(dst) | RS2(src) | RD(dst), ctx); } static void emit_alu3(u32 opcode, u32 a, u32 b, u32 c, struct jit_ctx *ctx) { emit(opcode | RS1(a) | RS2(b) | RD(c), ctx); } static void emit_alu_K(unsigned int opcode, unsigned int dst, unsigned int imm, struct jit_ctx *ctx) { bool small_immed = is_simm13(imm); unsigned int insn = opcode; insn |= RS1(dst) | RD(dst); if (small_immed) { emit(insn | IMMED | S13(imm), ctx); } else { unsigned int tmp = bpf2sparc[TMP_REG_1]; ctx->tmp_1_used = true; emit_set_const_sext(imm, tmp, ctx); emit(insn | RS2(tmp), ctx); } } static void emit_alu3_K(unsigned int opcode, unsigned int src, unsigned int imm, unsigned int dst, struct jit_ctx *ctx) { bool small_immed = is_simm13(imm); unsigned int insn = opcode; insn |= RS1(src) | RD(dst); if (small_immed) { emit(insn | IMMED | S13(imm), ctx); } else { unsigned int tmp = bpf2sparc[TMP_REG_1]; ctx->tmp_1_used = true; emit_set_const_sext(imm, tmp, ctx); emit(insn | RS2(tmp), ctx); } } static void emit_loadimm32(s32 K, unsigned int dest, struct jit_ctx *ctx) { if (K >= 0 && is_simm13(K)) { /* or %g0, K, DEST */ emit(OR | IMMED | RS1(G0) | S13(K) | RD(dest), ctx); } else { emit_set_const(K, dest, ctx); } } static void emit_loadimm(s32 K, unsigned int dest, struct jit_ctx *ctx) { if (is_simm13(K)) { /* or %g0, K, DEST */ emit(OR | IMMED | RS1(G0) | S13(K) | RD(dest), ctx); } else { emit_set_const(K, dest, ctx); } } static void emit_loadimm_sext(s32 K, unsigned int dest, struct jit_ctx *ctx) { if (is_simm13(K)) { /* or %g0, K, DEST */ emit(OR | IMMED | RS1(G0) | S13(K) | RD(dest), ctx); } else { emit_set_const_sext(K, dest, ctx); } } static void analyze_64bit_constant(u32 high_bits, u32 low_bits, int *hbsp, int *lbsp, int *abbasp) { int lowest_bit_set, highest_bit_set, all_bits_between_are_set; int i; lowest_bit_set = highest_bit_set = -1; i = 0; do { if ((lowest_bit_set == -1) && ((low_bits >> i) & 1)) lowest_bit_set = i; if ((highest_bit_set == -1) && ((high_bits >> (32 - i - 1)) & 1)) highest_bit_set = (64 - i - 1); } while (++i < 32 && (highest_bit_set == -1 || lowest_bit_set == -1)); if (i == 32) { i = 0; do { if (lowest_bit_set == -1 && ((high_bits >> i) & 1)) lowest_bit_set = i + 32; if (highest_bit_set == -1 && ((low_bits >> (32 - i - 1)) & 1)) highest_bit_set = 32 - i - 1; } while (++i < 32 && (highest_bit_set == -1 || lowest_bit_set == -1)); } all_bits_between_are_set = 1; for (i = lowest_bit_set; i <= highest_bit_set; i++) { if (i < 32) { if ((low_bits & (1 << i)) != 0) continue; } else { if ((high_bits & (1 << (i - 32))) != 0) continue; } all_bits_between_are_set = 0; break; } *hbsp = highest_bit_set; *lbsp = lowest_bit_set; *abbasp = all_bits_between_are_set; } static unsigned long create_simple_focus_bits(unsigned long high_bits, unsigned long low_bits, int lowest_bit_set, int shift) { long hi, lo; if (lowest_bit_set < 32) { lo = (low_bits >> lowest_bit_set) << shift; hi = ((high_bits << (32 - lowest_bit_set)) << shift); } else { lo = 0; hi = ((high_bits >> (lowest_bit_set - 32)) << shift); } return hi | lo; } static bool const64_is_2insns(unsigned long high_bits, unsigned long low_bits) { int highest_bit_set, lowest_bit_set, all_bits_between_are_set; if (high_bits == 0 || high_bits == 0xffffffff) return true; analyze_64bit_constant(high_bits, low_bits, &highest_bit_set, &lowest_bit_set, &all_bits_between_are_set); if ((highest_bit_set == 63 || lowest_bit_set == 0) && all_bits_between_are_set != 0) return true; if (highest_bit_set - lowest_bit_set < 21) return true; return false; } static void sparc_emit_set_const64_quick2(unsigned long high_bits, unsigned long low_imm, unsigned int dest, int shift_count, struct jit_ctx *ctx) { emit_loadimm32(high_bits, dest, ctx); /* Now shift it up into place. */ emit_alu_K(SLLX, dest, shift_count, ctx); /* If there is a low immediate part piece, finish up by * putting that in as well. */ if (low_imm != 0) emit(OR | IMMED | RS1(dest) | S13(low_imm) | RD(dest), ctx); } static void emit_loadimm64(u64 K, unsigned int dest, struct jit_ctx *ctx) { int all_bits_between_are_set, lowest_bit_set, highest_bit_set; unsigned int tmp = bpf2sparc[TMP_REG_1]; u32 low_bits = (K & 0xffffffff); u32 high_bits = (K >> 32); /* These two tests also take care of all of the one * instruction cases. */ if (high_bits == 0xffffffff && (low_bits & 0x80000000)) return emit_loadimm_sext(K, dest, ctx); if (high_bits == 0x00000000) return emit_loadimm32(K, dest, ctx); analyze_64bit_constant(high_bits, low_bits, &highest_bit_set, &lowest_bit_set, &all_bits_between_are_set); /* 1) mov -1, %reg * sllx %reg, shift, %reg * 2) mov -1, %reg * srlx %reg, shift, %reg * 3) mov some_small_const, %reg * sllx %reg, shift, %reg */ if (((highest_bit_set == 63 || lowest_bit_set == 0) && all_bits_between_are_set != 0) || ((highest_bit_set - lowest_bit_set) < 12)) { int shift = lowest_bit_set; long the_const = -1; if ((highest_bit_set != 63 && lowest_bit_set != 0) || all_bits_between_are_set == 0) { the_const = create_simple_focus_bits(high_bits, low_bits, lowest_bit_set, 0); } else if (lowest_bit_set == 0) shift = -(63 - highest_bit_set); emit(OR | IMMED | RS1(G0) | S13(the_const) | RD(dest), ctx); if (shift > 0) emit_alu_K(SLLX, dest, shift, ctx); else if (shift < 0) emit_alu_K(SRLX, dest, -shift, ctx); return; } /* Now a range of 22 or less bits set somewhere. * 1) sethi %hi(focus_bits), %reg * sllx %reg, shift, %reg * 2) sethi %hi(focus_bits), %reg * srlx %reg, shift, %reg */ if ((highest_bit_set - lowest_bit_set) < 21) { unsigned long focus_bits = create_simple_focus_bits(high_bits, low_bits, lowest_bit_set, 10); emit(SETHI(focus_bits, dest), ctx); /* If lowest_bit_set == 10 then a sethi alone could * have done it. */ if (lowest_bit_set < 10) emit_alu_K(SRLX, dest, 10 - lowest_bit_set, ctx); else if (lowest_bit_set > 10) emit_alu_K(SLLX, dest, lowest_bit_set - 10, ctx); return; } /* Ok, now 3 instruction sequences. */ if (low_bits == 0) { emit_loadimm32(high_bits, dest, ctx); emit_alu_K(SLLX, dest, 32, ctx); return; } /* We may be able to do something quick * when the constant is negated, so try that. */ if (const64_is_2insns((~high_bits) & 0xffffffff, (~low_bits) & 0xfffffc00)) { /* NOTE: The trailing bits get XOR'd so we need the * non-negated bits, not the negated ones. */ unsigned long trailing_bits = low_bits & 0x3ff; if ((((~high_bits) & 0xffffffff) == 0 && ((~low_bits) & 0x80000000) == 0) || (((~high_bits) & 0xffffffff) == 0xffffffff && ((~low_bits) & 0x80000000) != 0)) { unsigned long fast_int = (~low_bits & 0xffffffff); if ((is_sethi(fast_int) && (~high_bits & 0xffffffff) == 0)) { emit(SETHI(fast_int, dest), ctx); } else if (is_simm13(fast_int)) { emit(OR | IMMED | RS1(G0) | S13(fast_int) | RD(dest), ctx); } else { emit_loadimm64(fast_int, dest, ctx); } } else { u64 n = ((~low_bits) & 0xfffffc00) | (((unsigned long)((~high_bits) & 0xffffffff))<<32); emit_loadimm64(n, dest, ctx); } low_bits = -0x400 | trailing_bits; emit(XOR | IMMED | RS1(dest) | S13(low_bits) | RD(dest), ctx); return; } /* 1) sethi %hi(xxx), %reg * or %reg, %lo(xxx), %reg * sllx %reg, yyy, %reg */ if ((highest_bit_set - lowest_bit_set) < 32) { unsigned long focus_bits = create_simple_focus_bits(high_bits, low_bits, lowest_bit_set, 0); /* So what we know is that the set bits straddle the * middle of the 64-bit word. */ sparc_emit_set_const64_quick2(focus_bits, 0, dest, lowest_bit_set, ctx); return; } /* 1) sethi %hi(high_bits), %reg * or %reg, %lo(high_bits), %reg * sllx %reg, 32, %reg * or %reg, low_bits, %reg */ if (is_simm13(low_bits) && ((int)low_bits > 0)) { sparc_emit_set_const64_quick2(high_bits, low_bits, dest, 32, ctx); return; } /* Oh well, we tried... Do a full 64-bit decomposition. */ ctx->tmp_1_used = true; emit_loadimm32(high_bits, tmp, ctx); emit_loadimm32(low_bits, dest, ctx); emit_alu_K(SLLX, tmp, 32, ctx); emit(OR | RS1(dest) | RS2(tmp) | RD(dest), ctx); } static void emit_branch(unsigned int br_opc, unsigned int from_idx, unsigned int to_idx, struct jit_ctx *ctx) { unsigned int off = to_idx - from_idx; if (br_opc & XCC) emit(br_opc | WDISP19(off << 2), ctx); else emit(br_opc | WDISP22(off << 2), ctx); } static void emit_cbcond(unsigned int cb_opc, unsigned int from_idx, unsigned int to_idx, const u8 dst, const u8 src, struct jit_ctx *ctx) { unsigned int off = to_idx - from_idx; emit(cb_opc | WDISP10(off << 2) | RS1(dst) | RS2(src), ctx); } static void emit_cbcondi(unsigned int cb_opc, unsigned int from_idx, unsigned int to_idx, const u8 dst, s32 imm, struct jit_ctx *ctx) { unsigned int off = to_idx - from_idx; emit(cb_opc | IMMED | WDISP10(off << 2) | RS1(dst) | S5(imm), ctx); } #define emit_read_y(REG, CTX) emit(RD_Y | RD(REG), CTX) #define emit_write_y(REG, CTX) emit(WR_Y | IMMED | RS1(REG) | S13(0), CTX) #define emit_cmp(R1, R2, CTX) \ emit(SUBCC | RS1(R1) | RS2(R2) | RD(G0), CTX) #define emit_cmpi(R1, IMM, CTX) \ emit(SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0), CTX) #define emit_btst(R1, R2, CTX) \ emit(ANDCC | RS1(R1) | RS2(R2) | RD(G0), CTX) #define emit_btsti(R1, IMM, CTX) \ emit(ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0), CTX) static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, const s32 imm, bool is_imm, int branch_dst, struct jit_ctx *ctx) { bool use_cbcond = (sparc64_elf_hwcap & AV_SPARC_CBCOND) != 0; const u8 tmp = bpf2sparc[TMP_REG_1]; branch_dst = ctx->offset[branch_dst]; if (!is_simm10(branch_dst - ctx->idx) || BPF_OP(code) == BPF_JSET) use_cbcond = false; if (is_imm) { bool fits = true; if (use_cbcond) { if (!is_simm5(imm)) fits = false; } else if (!is_simm13(imm)) { fits = false; } if (!fits) { ctx->tmp_1_used = true; emit_loadimm_sext(imm, tmp, ctx); src = tmp; is_imm = false; } } if (!use_cbcond) { u32 br_opcode; if (BPF_OP(code) == BPF_JSET) { if (is_imm) emit_btsti(dst, imm, ctx); else emit_btst(dst, src, ctx); } else { if (is_imm) emit_cmpi(dst, imm, ctx); else emit_cmp(dst, src, ctx); } switch (BPF_OP(code)) { case BPF_JEQ: br_opcode = BE; break; case BPF_JGT: br_opcode = BGU; break; case BPF_JLT: br_opcode = BLU; break; case BPF_JGE: br_opcode = BGEU; break; case BPF_JLE: br_opcode = BLEU; break; case BPF_JSET: case BPF_JNE: br_opcode = BNE; break; case BPF_JSGT: br_opcode = BG; break; case BPF_JSLT: br_opcode = BL; break; case BPF_JSGE: br_opcode = BGE; break; case BPF_JSLE: br_opcode = BLE; break; default: /* Make sure we dont leak kernel information to the * user. */ return -EFAULT; } emit_branch(br_opcode, ctx->idx, branch_dst, ctx); emit_nop(ctx); } else { u32 cbcond_opcode; switch (BPF_OP(code)) { case BPF_JEQ: cbcond_opcode = CBCONDE; break; case BPF_JGT: cbcond_opcode = CBCONDGU; break; case BPF_JLT: cbcond_opcode = CBCONDLU; break; case BPF_JGE: cbcond_opcode = CBCONDGEU; break; case BPF_JLE: cbcond_opcode = CBCONDLEU; break; case BPF_JNE: cbcond_opcode = CBCONDNE; break; case BPF_JSGT: cbcond_opcode = CBCONDG; break; case BPF_JSLT: cbcond_opcode = CBCONDL; break; case BPF_JSGE: cbcond_opcode = CBCONDGE; break; case BPF_JSLE: cbcond_opcode = CBCONDLE; break; default: /* Make sure we dont leak kernel information to the * user. */ return -EFAULT; } cbcond_opcode |= CBCOND_OP; if (is_imm) emit_cbcondi(cbcond_opcode, ctx->idx, branch_dst, dst, imm, ctx); else emit_cbcond(cbcond_opcode, ctx->idx, branch_dst, dst, src, ctx); } return 0; } /* Just skip the save instruction and the ctx register move. */ #define BPF_TAILCALL_PROLOGUE_SKIP 32 #define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128) static void build_prologue(struct jit_ctx *ctx) { s32 stack_needed = BASE_STACKFRAME; if (ctx->saw_frame_pointer || ctx->saw_tail_call) { struct bpf_prog *prog = ctx->prog; u32 stack_depth; stack_depth = prog->aux->stack_depth; stack_needed += round_up(stack_depth, 16); } if (ctx->saw_tail_call) stack_needed += 8; /* save %sp, -176, %sp */ emit(SAVE | IMMED | RS1(SP) | S13(-stack_needed) | RD(SP), ctx); /* tail_call_cnt = 0 */ if (ctx->saw_tail_call) { u32 off = BPF_TAILCALL_CNT_SP_OFF; emit(ST32 | IMMED | RS1(SP) | S13(off) | RD(G0), ctx); } else { emit_nop(ctx); } if (ctx->saw_frame_pointer) { const u8 vfp = bpf2sparc[BPF_REG_FP]; emit(ADD | IMMED | RS1(FP) | S13(STACK_BIAS) | RD(vfp), ctx); } else { emit_nop(ctx); } emit_reg_move(I0, O0, ctx); emit_reg_move(I1, O1, ctx); emit_reg_move(I2, O2, ctx); emit_reg_move(I3, O3, ctx); emit_reg_move(I4, O4, ctx); /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */ } static void build_epilogue(struct jit_ctx *ctx) { ctx->epilogue_offset = ctx->idx; /* ret (jmpl %i7 + 8, %g0) */ emit(JMPL | IMMED | RS1(I7) | S13(8) | RD(G0), ctx); /* restore %i5, %g0, %o0 */ emit(RESTORE | RS1(bpf2sparc[BPF_REG_0]) | RS2(G0) | RD(O0), ctx); } static void emit_tail_call(struct jit_ctx *ctx) { const u8 bpf_array = bpf2sparc[BPF_REG_2]; const u8 bpf_index = bpf2sparc[BPF_REG_3]; const u8 tmp = bpf2sparc[TMP_REG_1]; u32 off; ctx->saw_tail_call = true; off = offsetof(struct bpf_array, map.max_entries); emit(LD32 | IMMED | RS1(bpf_array) | S13(off) | RD(tmp), ctx); emit_cmp(bpf_index, tmp, ctx); #define OFFSET1 17 emit_branch(BGEU, ctx->idx, ctx->idx + OFFSET1, ctx); emit_nop(ctx); off = BPF_TAILCALL_CNT_SP_OFF; emit(LD32 | IMMED | RS1(SP) | S13(off) | RD(tmp), ctx); emit_cmpi(tmp, MAX_TAIL_CALL_CNT, ctx); #define OFFSET2 13 emit_branch(BGEU, ctx->idx, ctx->idx + OFFSET2, ctx); emit_nop(ctx); emit_alu_K(ADD, tmp, 1, ctx); off = BPF_TAILCALL_CNT_SP_OFF; emit(ST32 | IMMED | RS1(SP) | S13(off) | RD(tmp), ctx); emit_alu3_K(SLL, bpf_index, 3, tmp, ctx); emit_alu(ADD, bpf_array, tmp, ctx); off = offsetof(struct bpf_array, ptrs); emit(LD64 | IMMED | RS1(tmp) | S13(off) | RD(tmp), ctx); emit_cmpi(tmp, 0, ctx); #define OFFSET3 5 emit_branch(BE, ctx->idx, ctx->idx + OFFSET3, ctx); emit_nop(ctx); off = offsetof(struct bpf_prog, bpf_func); emit(LD64 | IMMED | RS1(tmp) | S13(off) | RD(tmp), ctx); off = BPF_TAILCALL_PROLOGUE_SKIP; emit(JMPL | IMMED | RS1(tmp) | S13(off) | RD(G0), ctx); emit_nop(ctx); } static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) { const u8 code = insn->code; const u8 dst = bpf2sparc[insn->dst_reg]; const u8 src = bpf2sparc[insn->src_reg]; const int i = insn - ctx->prog->insnsi; const s16 off = insn->off; const s32 imm = insn->imm; if (insn->src_reg == BPF_REG_FP) ctx->saw_frame_pointer = true; switch (code) { /* dst = src */ case BPF_ALU | BPF_MOV | BPF_X: emit_alu3_K(SRL, src, 0, dst, ctx); if (insn_is_zext(&insn[1])) return 1; break; case BPF_ALU64 | BPF_MOV | BPF_X: emit_reg_move(src, dst, ctx); break; /* dst = dst OP src */ case BPF_ALU | BPF_ADD | BPF_X: case BPF_ALU64 | BPF_ADD | BPF_X: emit_alu(ADD, src, dst, ctx); goto do_alu32_trunc; case BPF_ALU | BPF_SUB | BPF_X: case BPF_ALU64 | BPF_SUB | BPF_X: emit_alu(SUB, src, dst, ctx); goto do_alu32_trunc; case BPF_ALU | BPF_AND | BPF_X: case BPF_ALU64 | BPF_AND | BPF_X: emit_alu(AND, src, dst, ctx); goto do_alu32_trunc; case BPF_ALU | BPF_OR | BPF_X: case BPF_ALU64 | BPF_OR | BPF_X: emit_alu(OR, src, dst, ctx); goto do_alu32_trunc; case BPF_ALU | BPF_XOR | BPF_X: case BPF_ALU64 | BPF_XOR | BPF_X: emit_alu(XOR, src, dst, ctx); goto do_alu32_trunc; case BPF_ALU | BPF_MUL | BPF_X: emit_alu(MUL, src, dst, ctx); goto do_alu32_trunc; case BPF_ALU64 | BPF_MUL | BPF_X: emit_alu(MULX, src, dst, ctx); break; case BPF_ALU | BPF_DIV | BPF_X: emit_write_y(G0, ctx); emit_alu(DIV, src, dst, ctx); if (insn_is_zext(&insn[1])) return 1; break; case BPF_ALU64 | BPF_DIV | BPF_X: emit_alu(UDIVX, src, dst, ctx); break; case BPF_ALU | BPF_MOD | BPF_X: { const u8 tmp = bpf2sparc[TMP_REG_1]; ctx->tmp_1_used = true; emit_write_y(G0, ctx); emit_alu3(DIV, dst, src, tmp, ctx); emit_alu3(MULX, tmp, src, tmp, ctx); emit_alu3(SUB, dst, tmp, dst, ctx); goto do_alu32_trunc; } case BPF_ALU64 | BPF_MOD | BPF_X: { const u8 tmp = bpf2sparc[TMP_REG_1]; ctx->tmp_1_used = true; emit_alu3(UDIVX, dst, src, tmp, ctx); emit_alu3(MULX, tmp, src, tmp, ctx); emit_alu3(SUB, dst, tmp, dst, ctx); break; } case BPF_ALU | BPF_LSH | BPF_X: emit_alu(SLL, src, dst, ctx); goto do_alu32_trunc; case BPF_ALU64 | BPF_LSH | BPF_X: emit_alu(SLLX, src, dst, ctx); break; case BPF_ALU | BPF_RSH | BPF_X: emit_alu(SRL, src, dst, ctx); if (insn_is_zext(&insn[1])) return 1; break; case BPF_ALU64 | BPF_RSH | BPF_X: emit_alu(SRLX, src, dst, ctx); break; case BPF_ALU | BPF_ARSH | BPF_X: emit_alu(SRA, src, dst, ctx); goto do_alu32_trunc; case BPF_ALU64 | BPF_ARSH | BPF_X: emit_alu(SRAX, src, dst, ctx); break; /* dst = -dst */ case BPF_ALU | BPF_NEG: case BPF_ALU64 | BPF_NEG: emit(SUB | RS1(0) | RS2(dst) | RD(dst), ctx); goto do_alu32_trunc; case BPF_ALU | BPF_END | BPF_FROM_BE: switch (imm) { case 16: emit_alu_K(SLL, dst, 16, ctx); emit_alu_K(SRL, dst, 16, ctx); if (insn_is_zext(&insn[1])) return 1; break; case 32: if (!ctx->prog->aux->verifier_zext) emit_alu_K(SRL, dst, 0, ctx); break; case 64: /* nop */ break; } break; /* dst = BSWAP##imm(dst) */ case BPF_ALU | BPF_END | BPF_FROM_LE: { const u8 tmp = bpf2sparc[TMP_REG_1]; const u8 tmp2 = bpf2sparc[TMP_REG_2]; ctx->tmp_1_used = true; switch (imm) { case 16: emit_alu3_K(AND, dst, 0xff, tmp, ctx); emit_alu3_K(SRL, dst, 8, dst, ctx); emit_alu3_K(AND, dst, 0xff, dst, ctx); emit_alu3_K(SLL, tmp, 8, tmp, ctx); emit_alu(OR, tmp, dst, ctx); if (insn_is_zext(&insn[1])) return 1; break; case 32: ctx->tmp_2_used = true; emit_alu3_K(SRL, dst, 24, tmp, ctx); /* tmp = dst >> 24 */ emit_alu3_K(SRL, dst, 16, tmp2, ctx); /* tmp2 = dst >> 16 */ emit_alu3_K(AND, tmp2, 0xff, tmp2, ctx);/* tmp2 = tmp2 & 0xff */ emit_alu3_K(SLL, tmp2, 8, tmp2, ctx); /* tmp2 = tmp2 << 8 */ emit_alu(OR, tmp2, tmp, ctx); /* tmp = tmp | tmp2 */ emit_alu3_K(SRL, dst, 8, tmp2, ctx); /* tmp2 = dst >> 8 */ emit_alu3_K(AND, tmp2, 0xff, tmp2, ctx);/* tmp2 = tmp2 & 0xff */ emit_alu3_K(SLL, tmp2, 16, tmp2, ctx); /* tmp2 = tmp2 << 16 */ emit_alu(OR, tmp2, tmp, ctx); /* tmp = tmp | tmp2 */ emit_alu3_K(AND, dst, 0xff, dst, ctx); /* dst = dst & 0xff */ emit_alu3_K(SLL, dst, 24, dst, ctx); /* dst = dst << 24 */ emit_alu(OR, tmp, dst, ctx); /* dst = dst | tmp */ if (insn_is_zext(&insn[1])) return 1; break; case 64: emit_alu3_K(ADD, SP, STACK_BIAS + 128, tmp, ctx); emit(ST64 | RS1(tmp) | RS2(G0) | RD(dst), ctx); emit(LD64A | ASI(ASI_PL) | RS1(tmp) | RS2(G0) | RD(dst), ctx); break; } break; } /* dst = imm */ case BPF_ALU | BPF_MOV | BPF_K: emit_loadimm32(imm, dst, ctx); if (insn_is_zext(&insn[1])) return 1; break; case BPF_ALU64 | BPF_MOV | BPF_K: emit_loadimm_sext(imm, dst, ctx); break; /* dst = dst OP imm */ case BPF_ALU | BPF_ADD | BPF_K: case BPF_ALU64 | BPF_ADD | BPF_K: emit_alu_K(ADD, dst, imm, ctx); goto do_alu32_trunc; case BPF_ALU | BPF_SUB | BPF_K: case BPF_ALU64 | BPF_SUB | BPF_K: emit_alu_K(SUB, dst, imm, ctx); goto do_alu32_trunc; case BPF_ALU | BPF_AND | BPF_K: case BPF_ALU64 | BPF_AND | BPF_K: emit_alu_K(AND, dst, imm, ctx); goto do_alu32_trunc; case BPF_ALU | BPF_OR | BPF_K: case BPF_ALU64 | BPF_OR | BPF_K: emit_alu_K(OR, dst, imm, ctx); goto do_alu32_trunc; case BPF_ALU | BPF_XOR | BPF_K: case BPF_ALU64 | BPF_XOR | BPF_K: emit_alu_K(XOR, dst, imm, ctx); goto do_alu32_trunc; case BPF_ALU | BPF_MUL | BPF_K: emit_alu_K(MUL, dst, imm, ctx); goto do_alu32_trunc; case BPF_ALU64 | BPF_MUL | BPF_K: emit_alu_K(MULX, dst, imm, ctx); break; case BPF_ALU | BPF_DIV | BPF_K: if (imm == 0) return -EINVAL; emit_write_y(G0, ctx); emit_alu_K(DIV, dst, imm, ctx); goto do_alu32_trunc; case BPF_ALU64 | BPF_DIV | BPF_K: if (imm == 0) return -EINVAL; emit_alu_K(UDIVX, dst, imm, ctx); break; case BPF_ALU64 | BPF_MOD | BPF_K: case BPF_ALU | BPF_MOD | BPF_K: { const u8 tmp = bpf2sparc[TMP_REG_2]; unsigned int div; if (imm == 0) return -EINVAL; div = (BPF_CLASS(code) == BPF_ALU64) ? UDIVX : DIV; ctx->tmp_2_used = true; if (BPF_CLASS(code) != BPF_ALU64) emit_write_y(G0, ctx); if (is_simm13(imm)) { emit(div | IMMED | RS1(dst) | S13(imm) | RD(tmp), ctx); emit(MULX | IMMED | RS1(tmp) | S13(imm) | RD(tmp), ctx); emit(SUB | RS1(dst) | RS2(tmp) | RD(dst), ctx); } else { const u8 tmp1 = bpf2sparc[TMP_REG_1]; ctx->tmp_1_used = true; emit_set_const_sext(imm, tmp1, ctx); emit(div | RS1(dst) | RS2(tmp1) | RD(tmp), ctx); emit(MULX | RS1(tmp) | RS2(tmp1) | RD(tmp), ctx); emit(SUB | RS1(dst) | RS2(tmp) | RD(dst), ctx); } goto do_alu32_trunc; } case BPF_ALU | BPF_LSH | BPF_K: emit_alu_K(SLL, dst, imm, ctx); goto do_alu32_trunc; case BPF_ALU64 | BPF_LSH | BPF_K: emit_alu_K(SLLX, dst, imm, ctx); break; case BPF_ALU | BPF_RSH | BPF_K: emit_alu_K(SRL, dst, imm, ctx); if (insn_is_zext(&insn[1])) return 1; break; case BPF_ALU64 | BPF_RSH | BPF_K: emit_alu_K(SRLX, dst, imm, ctx); break; case BPF_ALU | BPF_ARSH | BPF_K: emit_alu_K(SRA, dst, imm, ctx); goto do_alu32_trunc; case BPF_ALU64 | BPF_ARSH | BPF_K: emit_alu_K(SRAX, dst, imm, ctx); break; do_alu32_trunc: if (BPF_CLASS(code) == BPF_ALU && !ctx->prog->aux->verifier_zext) emit_alu_K(SRL, dst, 0, ctx); break; /* JUMP off */ case BPF_JMP | BPF_JA: emit_branch(BA, ctx->idx, ctx->offset[i + off], ctx); emit_nop(ctx); break; /* IF (dst COND src) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: case BPF_JMP | BPF_JSET | BPF_X: { int err; err = emit_compare_and_branch(code, dst, src, 0, false, i + off, ctx); if (err) return err; break; } /* IF (dst COND imm) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP | BPF_JSET | BPF_K: { int err; err = emit_compare_and_branch(code, dst, 0, imm, true, i + off, ctx); if (err) return err; break; } /* function call */ case BPF_JMP | BPF_CALL: { u8 *func = ((u8 *)__bpf_call_base) + imm; ctx->saw_call = true; emit_call((u32 *)func, ctx); emit_nop(ctx); emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx); break; } /* tail call */ case BPF_JMP | BPF_TAIL_CALL: emit_tail_call(ctx); break; /* function return */ case BPF_JMP | BPF_EXIT: /* Optimization: when last instruction is EXIT, simply fallthrough to epilogue. */ if (i == ctx->prog->len - 1) break; emit_branch(BA, ctx->idx, ctx->epilogue_offset, ctx); emit_nop(ctx); break; /* dst = imm64 */ case BPF_LD | BPF_IMM | BPF_DW: { const struct bpf_insn insn1 = insn[1]; u64 imm64; imm64 = (u64)insn1.imm << 32 | (u32)imm; emit_loadimm64(imm64, dst, ctx); return 1; } /* LDX: dst = *(size *)(src + off) */ case BPF_LDX | BPF_MEM | BPF_W: case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_DW: { const u8 tmp = bpf2sparc[TMP_REG_1]; u32 opcode = 0, rs2; ctx->tmp_1_used = true; switch (BPF_SIZE(code)) { case BPF_W: opcode = LD32; break; case BPF_H: opcode = LD16; break; case BPF_B: opcode = LD8; break; case BPF_DW: opcode = LD64; break; } if (is_simm13(off)) { opcode |= IMMED; rs2 = S13(off); } else { emit_loadimm(off, tmp, ctx); rs2 = RS2(tmp); } emit(opcode | RS1(src) | rs2 | RD(dst), ctx); if (opcode != LD64 && insn_is_zext(&insn[1])) return 1; break; } /* speculation barrier */ case BPF_ST | BPF_NOSPEC: break; /* ST: *(size *)(dst + off) = imm */ case BPF_ST | BPF_MEM | BPF_W: case BPF_ST | BPF_MEM | BPF_H: case BPF_ST | BPF_MEM | BPF_B: case BPF_ST | BPF_MEM | BPF_DW: { const u8 tmp = bpf2sparc[TMP_REG_1]; const u8 tmp2 = bpf2sparc[TMP_REG_2]; u32 opcode = 0, rs2; if (insn->dst_reg == BPF_REG_FP) ctx->saw_frame_pointer = true; ctx->tmp_2_used = true; emit_loadimm(imm, tmp2, ctx); switch (BPF_SIZE(code)) { case BPF_W: opcode = ST32; break; case BPF_H: opcode = ST16; break; case BPF_B: opcode = ST8; break; case BPF_DW: opcode = ST64; break; } if (is_simm13(off)) { opcode |= IMMED; rs2 = S13(off); } else { ctx->tmp_1_used = true; emit_loadimm(off, tmp, ctx); rs2 = RS2(tmp); } emit(opcode | RS1(dst) | rs2 | RD(tmp2), ctx); break; } /* STX: *(size *)(dst + off) = src */ case BPF_STX | BPF_MEM | BPF_W: case BPF_STX | BPF_MEM | BPF_H: case BPF_STX | BPF_MEM | BPF_B: case BPF_STX | BPF_MEM | BPF_DW: { const u8 tmp = bpf2sparc[TMP_REG_1]; u32 opcode = 0, rs2; if (insn->dst_reg == BPF_REG_FP) ctx->saw_frame_pointer = true; switch (BPF_SIZE(code)) { case BPF_W: opcode = ST32; break; case BPF_H: opcode = ST16; break; case BPF_B: opcode = ST8; break; case BPF_DW: opcode = ST64; break; } if (is_simm13(off)) { opcode |= IMMED; rs2 = S13(off); } else { ctx->tmp_1_used = true; emit_loadimm(off, tmp, ctx); rs2 = RS2(tmp); } emit(opcode | RS1(dst) | rs2 | RD(src), ctx); break; } case BPF_STX | BPF_ATOMIC | BPF_W: { const u8 tmp = bpf2sparc[TMP_REG_1]; const u8 tmp2 = bpf2sparc[TMP_REG_2]; const u8 tmp3 = bpf2sparc[TMP_REG_3]; if (insn->imm != BPF_ADD) { pr_err_once("unknown atomic op %02x\n", insn->imm); return -EINVAL; } /* lock *(u32 *)(dst + off) += src */ if (insn->dst_reg == BPF_REG_FP) ctx->saw_frame_pointer = true; ctx->tmp_1_used = true; ctx->tmp_2_used = true; ctx->tmp_3_used = true; emit_loadimm(off, tmp, ctx); emit_alu3(ADD, dst, tmp, tmp, ctx); emit(LD32 | RS1(tmp) | RS2(G0) | RD(tmp2), ctx); emit_alu3(ADD, tmp2, src, tmp3, ctx); emit(CAS | ASI(ASI_P) | RS1(tmp) | RS2(tmp2) | RD(tmp3), ctx); emit_cmp(tmp2, tmp3, ctx); emit_branch(BNE, 4, 0, ctx); emit_nop(ctx); break; } /* STX XADD: lock *(u64 *)(dst + off) += src */ case BPF_STX | BPF_ATOMIC | BPF_DW: { const u8 tmp = bpf2sparc[TMP_REG_1]; const u8 tmp2 = bpf2sparc[TMP_REG_2]; const u8 tmp3 = bpf2sparc[TMP_REG_3]; if (insn->imm != BPF_ADD) { pr_err_once("unknown atomic op %02x\n", insn->imm); return -EINVAL; } if (insn->dst_reg == BPF_REG_FP) ctx->saw_frame_pointer = true; ctx->tmp_1_used = true; ctx->tmp_2_used = true; ctx->tmp_3_used = true; emit_loadimm(off, tmp, ctx); emit_alu3(ADD, dst, tmp, tmp, ctx); emit(LD64 | RS1(tmp) | RS2(G0) | RD(tmp2), ctx); emit_alu3(ADD, tmp2, src, tmp3, ctx); emit(CASX | ASI(ASI_P) | RS1(tmp) | RS2(tmp2) | RD(tmp3), ctx); emit_cmp(tmp2, tmp3, ctx); emit_branch(BNE, 4, 0, ctx); emit_nop(ctx); break; } default: pr_err_once("unknown opcode %02x\n", code); return -EINVAL; } return 0; } static int build_body(struct jit_ctx *ctx) { const struct bpf_prog *prog = ctx->prog; int i; for (i = 0; i < prog->len; i++) { const struct bpf_insn *insn = &prog->insnsi[i]; int ret; ret = build_insn(insn, ctx); if (ret > 0) { i++; ctx->offset[i] = ctx->idx; continue; } ctx->offset[i] = ctx->idx; if (ret) return ret; } return 0; } static void jit_fill_hole(void *area, unsigned int size) { u32 *ptr; /* We are guaranteed to have aligned memory. */ for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) *ptr++ = 0x91d02005; /* ta 5 */ } bool bpf_jit_needs_zext(void) { return true; } struct sparc64_jit_data { struct bpf_binary_header *header; u8 *image; struct jit_ctx ctx; }; struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_prog *tmp, *orig_prog = prog; struct sparc64_jit_data *jit_data; struct bpf_binary_header *header; u32 prev_image_size, image_size; bool tmp_blinded = false; bool extra_pass = false; struct jit_ctx ctx; u8 *image_ptr; int pass, i; if (!prog->jit_requested) return orig_prog; tmp = bpf_jit_blind_constants(prog); /* If blinding was requested and we failed during blinding, * we must fall back to the interpreter. */ if (IS_ERR(tmp)) return orig_prog; if (tmp != prog) { tmp_blinded = true; prog = tmp; } jit_data = prog->aux->jit_data; if (!jit_data) { jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); if (!jit_data) { prog = orig_prog; goto out; } prog->aux->jit_data = jit_data; } if (jit_data->ctx.offset) { ctx = jit_data->ctx; image_ptr = jit_data->image; header = jit_data->header; extra_pass = true; image_size = sizeof(u32) * ctx.idx; prev_image_size = image_size; pass = 1; goto skip_init_ctx; } memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; ctx.offset = kmalloc_array(prog->len, sizeof(unsigned int), GFP_KERNEL); if (ctx.offset == NULL) { prog = orig_prog; goto out_off; } /* Longest sequence emitted is for bswap32, 12 instructions. Pre-cook * the offset array so that we converge faster. */ for (i = 0; i < prog->len; i++) ctx.offset[i] = i * (12 * 4); prev_image_size = ~0U; for (pass = 1; pass < 40; pass++) { ctx.idx = 0; build_prologue(&ctx); if (build_body(&ctx)) { prog = orig_prog; goto out_off; } build_epilogue(&ctx); if (bpf_jit_enable > 1) pr_info("Pass %d: size = %u, seen = [%c%c%c%c%c%c]\n", pass, ctx.idx * 4, ctx.tmp_1_used ? '1' : ' ', ctx.tmp_2_used ? '2' : ' ', ctx.tmp_3_used ? '3' : ' ', ctx.saw_frame_pointer ? 'F' : ' ', ctx.saw_call ? 'C' : ' ', ctx.saw_tail_call ? 'T' : ' '); if (ctx.idx * 4 == prev_image_size) break; prev_image_size = ctx.idx * 4; cond_resched(); } /* Now we know the actual image size. */ image_size = sizeof(u32) * ctx.idx; header = bpf_jit_binary_alloc(image_size, &image_ptr, sizeof(u32), jit_fill_hole); if (header == NULL) { prog = orig_prog; goto out_off; } ctx.image = (u32 *)image_ptr; skip_init_ctx: ctx.idx = 0; build_prologue(&ctx); if (build_body(&ctx)) { bpf_jit_binary_free(header); prog = orig_prog; goto out_off; } build_epilogue(&ctx); if (ctx.idx * 4 != prev_image_size) { pr_err("bpf_jit: Failed to converge, prev_size=%u size=%d\n", prev_image_size, ctx.idx * 4); bpf_jit_binary_free(header); prog = orig_prog; goto out_off; } if (bpf_jit_enable > 1) bpf_jit_dump(prog->len, image_size, pass, ctx.image); bpf_flush_icache(header, (u8 *)header + header->size); if (!prog->is_func || extra_pass) { bpf_jit_binary_lock_ro(header); } else { jit_data->ctx = ctx; jit_data->image = image_ptr; jit_data->header = header; } prog->bpf_func = (void *)ctx.image; prog->jited = 1; prog->jited_len = image_size; if (!prog->is_func || extra_pass) { bpf_prog_fill_jited_linfo(prog, ctx.offset); out_off: kfree(ctx.offset); kfree(jit_data); prog->aux->jit_data = NULL; } out: if (tmp_blinded) bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog); return prog; }
linux-master
arch/sparc/net/bpf_jit_comp_64.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2006 Andi Kleen, SUSE Labs. * * Fast user context implementation of clock_gettime, gettimeofday, and time. * * The code should have no internal unresolved relocations. * Check with readelf after changing. * Also alternative() doesn't work. */ /* * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. */ #include <linux/kernel.h> #include <linux/time.h> #include <linux/string.h> #include <asm/io.h> #include <asm/unistd.h> #include <asm/timex.h> #include <asm/clocksource.h> #include <asm/vvar.h> #ifdef CONFIG_SPARC64 #define SYSCALL_STRING \ "ta 0x6d;" \ "bcs,a 1f;" \ " sub %%g0, %%o0, %%o0;" \ "1:" #else #define SYSCALL_STRING \ "ta 0x10;" \ "bcs,a 1f;" \ " sub %%g0, %%o0, %%o0;" \ "1:" #endif #define SYSCALL_CLOBBERS \ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", \ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", \ "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", \ "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", \ "cc", "memory" /* * Compute the vvar page's address in the process address space, and return it * as a pointer to the vvar_data. */ notrace static __always_inline struct vvar_data *get_vvar_data(void) { unsigned long ret; /* * vdso data page is the first vDSO page so grab the PC * and move up a page to get to the data page. */ __asm__("rd %%pc, %0" : "=r" (ret)); ret &= ~(8192 - 1); ret -= 8192; return (struct vvar_data *) ret; } notrace static long vdso_fallback_gettime(long clock, struct __kernel_old_timespec *ts) { register long num __asm__("g1") = __NR_clock_gettime; register long o0 __asm__("o0") = clock; register long o1 __asm__("o1") = (long) ts; __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num), "0" (o0), "r" (o1) : SYSCALL_CLOBBERS); return o0; } notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { register long num __asm__("g1") = __NR_gettimeofday; register long o0 __asm__("o0") = (long) tv; register long o1 __asm__("o1") = (long) tz; __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num), "0" (o0), "r" (o1) : SYSCALL_CLOBBERS); return o0; } #ifdef CONFIG_SPARC64 notrace static __always_inline u64 vread_tick(void) { u64 ret; __asm__ __volatile__("rd %%tick, %0" : "=r" (ret)); return ret; } notrace static __always_inline u64 vread_tick_stick(void) { u64 ret; __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret)); return ret; } #else notrace static __always_inline u64 vread_tick(void) { register unsigned long long ret asm("o4"); __asm__ __volatile__("rd %%tick, %L0\n\t" "srlx %L0, 32, %H0" : "=r" (ret)); return ret; } notrace static __always_inline u64 vread_tick_stick(void) { register unsigned long long ret asm("o4"); __asm__ __volatile__("rd %%asr24, %L0\n\t" "srlx %L0, 32, %H0" : "=r" (ret)); return ret; } #endif notrace static __always_inline u64 vgetsns(struct vvar_data *vvar) { u64 v; u64 cycles; cycles = vread_tick(); v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask; return v * vvar->clock.mult; } notrace static __always_inline u64 vgetsns_stick(struct vvar_data *vvar) { u64 v; u64 cycles; cycles = vread_tick_stick(); v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask; return v * vvar->clock.mult; } notrace static __always_inline int do_realtime(struct vvar_data *vvar, struct __kernel_old_timespec *ts) { unsigned long seq; u64 ns; do { seq = vvar_read_begin(vvar); ts->tv_sec = vvar->wall_time_sec; ns = vvar->wall_time_snsec; ns += vgetsns(vvar); ns >>= vvar->clock.shift; } while (unlikely(vvar_read_retry(vvar, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_nsec = ns; return 0; } notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar, struct __kernel_old_timespec *ts) { unsigned long seq; u64 ns; do { seq = vvar_read_begin(vvar); ts->tv_sec = vvar->wall_time_sec; ns = vvar->wall_time_snsec; ns += vgetsns_stick(vvar); ns >>= vvar->clock.shift; } while (unlikely(vvar_read_retry(vvar, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_nsec = ns; return 0; } notrace static __always_inline int do_monotonic(struct vvar_data *vvar, struct __kernel_old_timespec *ts) { unsigned long seq; u64 ns; do { seq = vvar_read_begin(vvar); ts->tv_sec = vvar->monotonic_time_sec; ns = vvar->monotonic_time_snsec; ns += vgetsns(vvar); ns >>= vvar->clock.shift; } while (unlikely(vvar_read_retry(vvar, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_nsec = ns; return 0; } notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar, struct __kernel_old_timespec *ts) { unsigned long seq; u64 ns; do { seq = vvar_read_begin(vvar); ts->tv_sec = vvar->monotonic_time_sec; ns = vvar->monotonic_time_snsec; ns += vgetsns_stick(vvar); ns >>= vvar->clock.shift; } while (unlikely(vvar_read_retry(vvar, seq))); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); ts->tv_nsec = ns; return 0; } notrace static int do_realtime_coarse(struct vvar_data *vvar, struct __kernel_old_timespec *ts) { unsigned long seq; do { seq = vvar_read_begin(vvar); ts->tv_sec = vvar->wall_time_coarse_sec; ts->tv_nsec = vvar->wall_time_coarse_nsec; } while (unlikely(vvar_read_retry(vvar, seq))); return 0; } notrace static int do_monotonic_coarse(struct vvar_data *vvar, struct __kernel_old_timespec *ts) { unsigned long seq; do { seq = vvar_read_begin(vvar); ts->tv_sec = vvar->monotonic_time_coarse_sec; ts->tv_nsec = vvar->monotonic_time_coarse_nsec; } while (unlikely(vvar_read_retry(vvar, seq))); return 0; } notrace int __vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts) { struct vvar_data *vvd = get_vvar_data(); switch (clock) { case CLOCK_REALTIME: if (unlikely(vvd->vclock_mode == VCLOCK_NONE)) break; return do_realtime(vvd, ts); case CLOCK_MONOTONIC: if (unlikely(vvd->vclock_mode == VCLOCK_NONE)) break; return do_monotonic(vvd, ts); case CLOCK_REALTIME_COARSE: return do_realtime_coarse(vvd, ts); case CLOCK_MONOTONIC_COARSE: return do_monotonic_coarse(vvd, ts); } /* * Unknown clock ID ? Fall back to the syscall. */ return vdso_fallback_gettime(clock, ts); } int clock_gettime(clockid_t, struct __kernel_old_timespec *) __attribute__((weak, alias("__vdso_clock_gettime"))); notrace int __vdso_clock_gettime_stick(clockid_t clock, struct __kernel_old_timespec *ts) { struct vvar_data *vvd = get_vvar_data(); switch (clock) { case CLOCK_REALTIME: if (unlikely(vvd->vclock_mode == VCLOCK_NONE)) break; return do_realtime_stick(vvd, ts); case CLOCK_MONOTONIC: if (unlikely(vvd->vclock_mode == VCLOCK_NONE)) break; return do_monotonic_stick(vvd, ts); case CLOCK_REALTIME_COARSE: return do_realtime_coarse(vvd, ts); case CLOCK_MONOTONIC_COARSE: return do_monotonic_coarse(vvd, ts); } /* * Unknown clock ID ? Fall back to the syscall. */ return vdso_fallback_gettime(clock, ts); } notrace int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { struct vvar_data *vvd = get_vvar_data(); if (likely(vvd->vclock_mode != VCLOCK_NONE)) { if (likely(tv != NULL)) { union tstv_t { struct __kernel_old_timespec ts; struct __kernel_old_timeval tv; } *tstv = (union tstv_t *) tv; do_realtime(vvd, &tstv->ts); /* * Assign before dividing to ensure that the division is * done in the type of tv_usec, not tv_nsec. * * There cannot be > 1 billion usec in a second: * do_realtime() has already distributed such overflow * into tv_sec. So we can assign it to an int safely. */ tstv->tv.tv_usec = tstv->ts.tv_nsec; tstv->tv.tv_usec /= 1000; } if (unlikely(tz != NULL)) { /* Avoid memcpy. Some old compilers fail to inline it */ tz->tz_minuteswest = vvd->tz_minuteswest; tz->tz_dsttime = vvd->tz_dsttime; } return 0; } return vdso_fallback_gettimeofday(tv, tz); } int gettimeofday(struct __kernel_old_timeval *, struct timezone *) __attribute__((weak, alias("__vdso_gettimeofday"))); notrace int __vdso_gettimeofday_stick(struct __kernel_old_timeval *tv, struct timezone *tz) { struct vvar_data *vvd = get_vvar_data(); if (likely(vvd->vclock_mode != VCLOCK_NONE)) { if (likely(tv != NULL)) { union tstv_t { struct __kernel_old_timespec ts; struct __kernel_old_timeval tv; } *tstv = (union tstv_t *) tv; do_realtime_stick(vvd, &tstv->ts); /* * Assign before dividing to ensure that the division is * done in the type of tv_usec, not tv_nsec. * * There cannot be > 1 billion usec in a second: * do_realtime() has already distributed such overflow * into tv_sec. So we can assign it to an int safely. */ tstv->tv.tv_usec = tstv->ts.tv_nsec; tstv->tv.tv_usec /= 1000; } if (unlikely(tz != NULL)) { /* Avoid memcpy. Some old compilers fail to inline it */ tz->tz_minuteswest = vvd->tz_minuteswest; tz->tz_dsttime = vvd->tz_dsttime; } return 0; } return vdso_fallback_gettimeofday(tv, tz); }
linux-master
arch/sparc/vdso/vclock_gettime.c
// SPDX-License-Identifier: GPL-2.0-only /* * vdso2c - A vdso image preparation tool * Copyright (c) 2014 Andy Lutomirski and others * * vdso2c requires stripped and unstripped input. It would be trivial * to fully strip the input in here, but, for reasons described below, * we need to write a section table. Doing this is more or less * equivalent to dropping all non-allocatable sections, but it's * easier to let objcopy handle that instead of doing it ourselves. * If we ever need to do something fancier than what objcopy provides, * it would be straightforward to add here. * * We keep a section table for a few reasons: * * Binutils has issues debugging the vDSO: it reads the section table to * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which * would break build-id if we removed the section table. Binutils * also requires that shstrndx != 0. See: * https://sourceware.org/bugzilla/show_bug.cgi?id=17064 * * elfutils might not look for PT_NOTE if there is a section table at * all. I don't know whether this matters for any practical purpose. * * For simplicity, rather than hacking up a partial section table, we * just write a mostly complete one. We omit non-dynamic symbols, * though, since they're rather large. * * Once binutils gets fixed, we might be able to drop this for all but * the 64-bit vdso, since build-id only works in kernel RPMs, and * systems that update to new enough kernel RPMs will likely update * binutils in sync. build-id has never worked for home-built kernel * RPMs without manual symlinking, and I suspect that no one ever does * that. */ /* * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. */ #include <inttypes.h> #include <stdint.h> #include <unistd.h> #include <stdarg.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <fcntl.h> #include <err.h> #include <sys/mman.h> #include <sys/types.h> #include <tools/be_byteshift.h> #include <linux/elf.h> #include <linux/types.h> #include <linux/kernel.h> const char *outfilename; /* Symbols that we need in vdso2c. */ enum { sym_vvar_start, sym_VDSO_FAKE_SECTION_TABLE_START, sym_VDSO_FAKE_SECTION_TABLE_END, }; struct vdso_sym { const char *name; int export; }; struct vdso_sym required_syms[] = { [sym_vvar_start] = {"vvar_start", 1}, [sym_VDSO_FAKE_SECTION_TABLE_START] = { "VDSO_FAKE_SECTION_TABLE_START", 0 }, [sym_VDSO_FAKE_SECTION_TABLE_END] = { "VDSO_FAKE_SECTION_TABLE_END", 0 }, }; __attribute__((format(printf, 1, 2))) __attribute__((noreturn)) static void fail(const char *format, ...) { va_list ap; va_start(ap, format); fprintf(stderr, "Error: "); vfprintf(stderr, format, ap); if (outfilename) unlink(outfilename); exit(1); va_end(ap); } /* * Evil macros for big-endian reads and writes */ #define GBE(x, bits, ifnot) \ __builtin_choose_expr( \ (sizeof(*(x)) == bits/8), \ (__typeof__(*(x)))get_unaligned_be##bits(x), ifnot) #define LAST_GBE(x) \ __builtin_choose_expr(sizeof(*(x)) == 1, *(x), (void)(0)) #define GET_BE(x) \ GBE(x, 64, GBE(x, 32, GBE(x, 16, LAST_GBE(x)))) #define PBE(x, val, bits, ifnot) \ __builtin_choose_expr( \ (sizeof(*(x)) == bits/8), \ put_unaligned_be##bits((val), (x)), ifnot) #define LAST_PBE(x, val) \ __builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), (void)(0)) #define PUT_BE(x, val) \ PBE(x, val, 64, PBE(x, val, 32, PBE(x, val, 16, LAST_PBE(x, val)))) #define NSYMS ARRAY_SIZE(required_syms) #define BITSFUNC3(name, bits, suffix) name##bits##suffix #define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix) #define BITSFUNC(name) BITSFUNC2(name, ELF_BITS, ) #define INT_BITS BITSFUNC2(int, ELF_BITS, _t) #define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x #define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x) #define ELF(x) ELF_BITS_XFORM(ELF_BITS, x) #define ELF_BITS 64 #include "vdso2c.h" #undef ELF_BITS #define ELF_BITS 32 #include "vdso2c.h" #undef ELF_BITS static void go(void *raw_addr, size_t raw_len, void *stripped_addr, size_t stripped_len, FILE *outfile, const char *name) { Elf64_Ehdr *hdr = (Elf64_Ehdr *)raw_addr; if (hdr->e_ident[EI_CLASS] == ELFCLASS64) { go64(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name); } else if (hdr->e_ident[EI_CLASS] == ELFCLASS32) { go32(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name); } else { fail("unknown ELF class\n"); } } static void map_input(const char *name, void **addr, size_t *len, int prot) { off_t tmp_len; int fd = open(name, O_RDONLY); if (fd == -1) err(1, "%s", name); tmp_len = lseek(fd, 0, SEEK_END); if (tmp_len == (off_t)-1) err(1, "lseek"); *len = (size_t)tmp_len; *addr = mmap(NULL, tmp_len, prot, MAP_PRIVATE, fd, 0); if (*addr == MAP_FAILED) err(1, "mmap"); close(fd); } int main(int argc, char **argv) { size_t raw_len, stripped_len; void *raw_addr, *stripped_addr; FILE *outfile; char *name, *tmp; int namelen; if (argc != 4) { printf("Usage: vdso2c RAW_INPUT STRIPPED_INPUT OUTPUT\n"); return 1; } /* * Figure out the struct name. If we're writing to a .so file, * generate raw output insted. */ name = strdup(argv[3]); namelen = strlen(name); if (namelen >= 3 && !strcmp(name + namelen - 3, ".so")) { name = NULL; } else { tmp = strrchr(name, '/'); if (tmp) name = tmp + 1; tmp = strchr(name, '.'); if (tmp) *tmp = '\0'; for (tmp = name; *tmp; tmp++) if (*tmp == '-') *tmp = '_'; } map_input(argv[1], &raw_addr, &raw_len, PROT_READ); map_input(argv[2], &stripped_addr, &stripped_len, PROT_READ); outfilename = argv[3]; outfile = fopen(outfilename, "w"); if (!outfile) err(1, "%s", argv[2]); go(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name); munmap(raw_addr, raw_len); munmap(stripped_addr, stripped_len); fclose(outfile); return 0; }
linux-master
arch/sparc/vdso/vdso2c.c
// SPDX-License-Identifier: GPL-2.0-only /* * Set up the VMAs to tell the VM about the vDSO. * Copyright 2007 Andi Kleen, SUSE Labs. */ /* * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. */ #include <linux/mm.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/linkage.h> #include <linux/random.h> #include <linux/elf.h> #include <asm/cacheflush.h> #include <asm/spitfire.h> #include <asm/vdso.h> #include <asm/vvar.h> #include <asm/page.h> unsigned int __read_mostly vdso_enabled = 1; static struct vm_special_mapping vvar_mapping = { .name = "[vvar]" }; #ifdef CONFIG_SPARC64 static struct vm_special_mapping vdso_mapping64 = { .name = "[vdso]" }; #endif #ifdef CONFIG_COMPAT static struct vm_special_mapping vdso_mapping32 = { .name = "[vdso]" }; #endif struct vvar_data *vvar_data; struct vdso_elfinfo32 { Elf32_Ehdr *hdr; Elf32_Sym *dynsym; unsigned long dynsymsize; const char *dynstr; unsigned long text; }; struct vdso_elfinfo64 { Elf64_Ehdr *hdr; Elf64_Sym *dynsym; unsigned long dynsymsize; const char *dynstr; unsigned long text; }; struct vdso_elfinfo { union { struct vdso_elfinfo32 elf32; struct vdso_elfinfo64 elf64; } u; }; static void *one_section64(struct vdso_elfinfo64 *e, const char *name, unsigned long *size) { const char *snames; Elf64_Shdr *shdrs; unsigned int i; shdrs = (void *)e->hdr + e->hdr->e_shoff; snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset; for (i = 1; i < e->hdr->e_shnum; i++) { if (!strcmp(snames+shdrs[i].sh_name, name)) { if (size) *size = shdrs[i].sh_size; return (void *)e->hdr + shdrs[i].sh_offset; } } return NULL; } static int find_sections64(const struct vdso_image *image, struct vdso_elfinfo *_e) { struct vdso_elfinfo64 *e = &_e->u.elf64; e->hdr = image->data; e->dynsym = one_section64(e, ".dynsym", &e->dynsymsize); e->dynstr = one_section64(e, ".dynstr", NULL); if (!e->dynsym || !e->dynstr) { pr_err("VDSO64: Missing symbol sections.\n"); return -ENODEV; } return 0; } static Elf64_Sym *find_sym64(const struct vdso_elfinfo64 *e, const char *name) { unsigned int i; for (i = 0; i < (e->dynsymsize / sizeof(Elf64_Sym)); i++) { Elf64_Sym *s = &e->dynsym[i]; if (s->st_name == 0) continue; if (!strcmp(e->dynstr + s->st_name, name)) return s; } return NULL; } static int patchsym64(struct vdso_elfinfo *_e, const char *orig, const char *new) { struct vdso_elfinfo64 *e = &_e->u.elf64; Elf64_Sym *osym = find_sym64(e, orig); Elf64_Sym *nsym = find_sym64(e, new); if (!nsym || !osym) { pr_err("VDSO64: Missing symbols.\n"); return -ENODEV; } osym->st_value = nsym->st_value; osym->st_size = nsym->st_size; osym->st_info = nsym->st_info; osym->st_other = nsym->st_other; osym->st_shndx = nsym->st_shndx; return 0; } static void *one_section32(struct vdso_elfinfo32 *e, const char *name, unsigned long *size) { const char *snames; Elf32_Shdr *shdrs; unsigned int i; shdrs = (void *)e->hdr + e->hdr->e_shoff; snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset; for (i = 1; i < e->hdr->e_shnum; i++) { if (!strcmp(snames+shdrs[i].sh_name, name)) { if (size) *size = shdrs[i].sh_size; return (void *)e->hdr + shdrs[i].sh_offset; } } return NULL; } static int find_sections32(const struct vdso_image *image, struct vdso_elfinfo *_e) { struct vdso_elfinfo32 *e = &_e->u.elf32; e->hdr = image->data; e->dynsym = one_section32(e, ".dynsym", &e->dynsymsize); e->dynstr = one_section32(e, ".dynstr", NULL); if (!e->dynsym || !e->dynstr) { pr_err("VDSO32: Missing symbol sections.\n"); return -ENODEV; } return 0; } static Elf32_Sym *find_sym32(const struct vdso_elfinfo32 *e, const char *name) { unsigned int i; for (i = 0; i < (e->dynsymsize / sizeof(Elf32_Sym)); i++) { Elf32_Sym *s = &e->dynsym[i]; if (s->st_name == 0) continue; if (!strcmp(e->dynstr + s->st_name, name)) return s; } return NULL; } static int patchsym32(struct vdso_elfinfo *_e, const char *orig, const char *new) { struct vdso_elfinfo32 *e = &_e->u.elf32; Elf32_Sym *osym = find_sym32(e, orig); Elf32_Sym *nsym = find_sym32(e, new); if (!nsym || !osym) { pr_err("VDSO32: Missing symbols.\n"); return -ENODEV; } osym->st_value = nsym->st_value; osym->st_size = nsym->st_size; osym->st_info = nsym->st_info; osym->st_other = nsym->st_other; osym->st_shndx = nsym->st_shndx; return 0; } static int find_sections(const struct vdso_image *image, struct vdso_elfinfo *e, bool elf64) { if (elf64) return find_sections64(image, e); else return find_sections32(image, e); } static int patch_one_symbol(struct vdso_elfinfo *e, const char *orig, const char *new_target, bool elf64) { if (elf64) return patchsym64(e, orig, new_target); else return patchsym32(e, orig, new_target); } static int stick_patch(const struct vdso_image *image, struct vdso_elfinfo *e, bool elf64) { int err; err = find_sections(image, e, elf64); if (err) return err; err = patch_one_symbol(e, "__vdso_gettimeofday", "__vdso_gettimeofday_stick", elf64); if (err) return err; return patch_one_symbol(e, "__vdso_clock_gettime", "__vdso_clock_gettime_stick", elf64); return 0; } /* * Allocate pages for the vdso and vvar, and copy in the vdso text from the * kernel image. */ int __init init_vdso_image(const struct vdso_image *image, struct vm_special_mapping *vdso_mapping, bool elf64) { int cnpages = (image->size) / PAGE_SIZE; struct page *dp, **dpp = NULL; struct page *cp, **cpp = NULL; struct vdso_elfinfo ei; int i, dnpages = 0; if (tlb_type != spitfire) { int err = stick_patch(image, &ei, elf64); if (err) return err; } /* * First, the vdso text. This is initialied data, an integral number of * pages long. */ if (WARN_ON(image->size % PAGE_SIZE != 0)) goto oom; cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL); vdso_mapping->pages = cpp; if (!cpp) goto oom; for (i = 0; i < cnpages; i++) { cp = alloc_page(GFP_KERNEL); if (!cp) goto oom; cpp[i] = cp; copy_page(page_address(cp), image->data + i * PAGE_SIZE); } /* * Now the vvar page. This is uninitialized data. */ if (vvar_data == NULL) { dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1; if (WARN_ON(dnpages != 1)) goto oom; dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL); vvar_mapping.pages = dpp; if (!dpp) goto oom; dp = alloc_page(GFP_KERNEL); if (!dp) goto oom; dpp[0] = dp; vvar_data = page_address(dp); memset(vvar_data, 0, PAGE_SIZE); vvar_data->seq = 0; } return 0; oom: if (cpp != NULL) { for (i = 0; i < cnpages; i++) { if (cpp[i] != NULL) __free_page(cpp[i]); } kfree(cpp); vdso_mapping->pages = NULL; } if (dpp != NULL) { for (i = 0; i < dnpages; i++) { if (dpp[i] != NULL) __free_page(dpp[i]); } kfree(dpp); vvar_mapping.pages = NULL; } pr_warn("Cannot allocate vdso\n"); vdso_enabled = 0; return -ENOMEM; } static int __init init_vdso(void) { int err = 0; #ifdef CONFIG_SPARC64 err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64, true); if (err) return err; #endif #ifdef CONFIG_COMPAT err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32, false); #endif return err; } subsys_initcall(init_vdso); struct linux_binprm; /* Shuffle the vdso up a bit, randomly. */ static unsigned long vdso_addr(unsigned long start, unsigned int len) { unsigned int offset; /* This loses some more bits than a modulo, but is cheaper */ offset = get_random_u32_below(PTRS_PER_PTE); return start + (offset << PAGE_SHIFT); } static int map_vdso(const struct vdso_image *image, struct vm_special_mapping *vdso_mapping) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long text_start, addr = 0; int ret = 0; mmap_write_lock(mm); /* * First, get an unmapped region: then randomize it, and make sure that * region is free. */ if (current->flags & PF_RANDOMIZE) { addr = get_unmapped_area(NULL, 0, image->size - image->sym_vvar_start, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } addr = vdso_addr(addr, image->size - image->sym_vvar_start); } addr = get_unmapped_area(NULL, addr, image->size - image->sym_vvar_start, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } text_start = addr - image->sym_vvar_start; current->mm->context.vdso = (void __user *)text_start; /* * MAYWRITE to allow gdb to COW and set breakpoints */ vma = _install_special_mapping(mm, text_start, image->size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, vdso_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto up_fail; } vma = _install_special_mapping(mm, addr, -image->sym_vvar_start, VM_READ|VM_MAYREAD, &vvar_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); do_munmap(mm, text_start, image->size, NULL); } up_fail: if (ret) current->mm->context.vdso = NULL; mmap_write_unlock(mm); return ret; } int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { if (!vdso_enabled) return 0; #if defined CONFIG_COMPAT if (!(is_32bit_task())) return map_vdso(&vdso_image_64_builtin, &vdso_mapping64); else return map_vdso(&vdso_image_32_builtin, &vdso_mapping32); #else return map_vdso(&vdso_image_64_builtin, &vdso_mapping64); #endif } static __init int vdso_setup(char *s) { int err; unsigned long val; err = kstrtoul(s, 10, &val); if (err) return err; vdso_enabled = val; return 0; } __setup("vdso=", vdso_setup);
linux-master
arch/sparc/vdso/vma.c
/* * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. */ #define BUILD_VDSO32 #ifdef CONFIG_SPARC64 /* * in case of a 32 bit VDSO for a 64 bit kernel fake a 32 bit kernel * configuration */ #undef CONFIG_64BIT #undef CONFIG_SPARC64 #define BUILD_VDSO32_64 #define CONFIG_32BIT #undef CONFIG_QUEUED_RWLOCKS #undef CONFIG_QUEUED_SPINLOCKS #endif #include "../vclock_gettime.c"
linux-master
arch/sparc/vdso/vdso32/vclock_gettime.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Simple utility to make a single-image install kernel with initial ramdisk for Sparc tftpbooting without need to set up nfs. Copyright (C) 1996,1997 Jakub Jelinek ([email protected]) Pete Zaitcev <[email protected]> endian fixes for cross-compiles, 2000. Copyright (C) 2011 Sam Ravnborg <[email protected]> */ #include <dirent.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <ctype.h> #include <errno.h> #include <fcntl.h> #include <stdio.h> #include <sys/types.h> #include <sys/stat.h> /* * Note: run this on an a.out kernel (use elftoaout for it), * as PROM looks for a.out image only. */ #define AOUT_TEXT_OFFSET 32 static int is64bit = 0; /* align to power-of-two size */ static int align(int n) { if (is64bit) return (n + 0x1fff) & ~0x1fff; else return (n + 0xfff) & ~0xfff; } /* read two bytes as big endian */ static unsigned short ld2(char *p) { return (p[0] << 8) | p[1]; } /* save 4 bytes as big endian */ static void st4(char *p, unsigned int x) { p[0] = x >> 24; p[1] = x >> 16; p[2] = x >> 8; p[3] = x; } static void die(const char *str) { perror(str); exit(1); } static void usage(void) { /* fs_img.gz is an image of initial ramdisk. */ fprintf(stderr, "Usage: piggyback bits vmlinux.aout System.map fs_img.gz\n"); fprintf(stderr, "\tKernel image will be modified in place.\n"); exit(1); } static int start_line(const char *line) { if (strcmp(line + 10, " _start\n") == 0) return 1; else if (strcmp(line + 18, " _start\n") == 0) return 1; return 0; } static int end_line(const char *line) { if (strcmp(line + 10, " _end\n") == 0) return 1; else if (strcmp (line + 18, " _end\n") == 0) return 1; return 0; } /* * Find address for start and end in System.map. * The file looks like this: * f0004000 ... _start * f0379f79 ... _end * 1234567890123456 * ^coloumn 1 * There is support for 64 bit addresses too. * * Return 0 if either start or end is not found */ static int get_start_end(const char *filename, unsigned int *start, unsigned int *end) { FILE *map; char buffer[1024]; *start = 0; *end = 0; map = fopen(filename, "r"); if (!map) die(filename); while (fgets(buffer, 1024, map)) { if (start_line(buffer)) *start = strtoul(buffer, NULL, 16); else if (end_line(buffer)) *end = strtoul(buffer, NULL, 16); } fclose (map); if (*start == 0 || *end == 0) return 0; return 1; } #define LOOKBACK (128 * 4) #define BUFSIZE 1024 /* * Find the HdrS entry from head_32/head_64. * We check if it is at the beginning of the file (sparc64 case) * and if not we search for it. * When we search do so in steps of 4 as HdrS is on a 4-byte aligned * address (it is on same alignment as sparc instructions) * Return the offset to the HdrS entry (as off_t) */ static off_t get_hdrs_offset(int kernelfd, const char *filename) { char buffer[BUFSIZE]; off_t offset; int i; if (lseek(kernelfd, 0, SEEK_SET) < 0) die("lseek"); if (read(kernelfd, buffer, BUFSIZE) != BUFSIZE) die(filename); if (buffer[40] == 'H' && buffer[41] == 'd' && buffer[42] == 'r' && buffer[43] == 'S') { return 40; } else { /* Find the gokernel label */ /* Decode offset from branch instruction */ offset = ld2(buffer + AOUT_TEXT_OFFSET + 2) << 2; /* Go back 512 bytes so we do not miss HdrS */ offset -= LOOKBACK; /* skip a.out header */ offset += AOUT_TEXT_OFFSET; if (offset < 0) { errno = -EINVAL; die("Calculated a negative offset, probably elftoaout generated an invalid image. Did you use a recent elftoaout ?"); } if (lseek(kernelfd, offset, SEEK_SET) < 0) die("lseek"); if (read(kernelfd, buffer, BUFSIZE) != BUFSIZE) die(filename); for (i = 0; i < LOOKBACK; i += 4) { if (buffer[i + 0] == 'H' && buffer[i + 1] == 'd' && buffer[i + 2] == 'r' && buffer[i + 3] == 'S') { return offset + i; } } } fprintf (stderr, "Couldn't find headers signature in %s\n", filename); exit(1); } int main(int argc,char **argv) { static char aout_magic[] = { 0x01, 0x03, 0x01, 0x07 }; char buffer[1024]; unsigned int i, start, end; off_t offset; struct stat s; int image, tail; if (argc != 5) usage(); if (strcmp(argv[1], "64") == 0) is64bit = 1; if (stat (argv[4], &s) < 0) die(argv[4]); if (!get_start_end(argv[3], &start, &end)) { fprintf(stderr, "Could not determine start and end from %s\n", argv[3]); exit(1); } if ((image = open(argv[2], O_RDWR)) < 0) die(argv[2]); if (read(image, buffer, 512) != 512) die(argv[2]); if (memcmp(buffer, aout_magic, 4) != 0) { fprintf (stderr, "Not a.out. Don't blame me.\n"); exit(1); } /* * We need to fill in values for * sparc_ramdisk_image + sparc_ramdisk_size * To locate these symbols search for the "HdrS" text which appear * in the image a little before the gokernel symbol. * See definition of these in init_32.S */ offset = get_hdrs_offset(image, argv[2]); /* skip HdrS + LINUX_VERSION_CODE + HdrS version */ offset += 10; if (lseek(image, offset, 0) < 0) die("lseek"); /* * root_flags = 0 * root_dev = 1 (RAMDISK_MAJOR) * ram_flags = 0 * sparc_ramdisk_image = "PAGE aligned address after _end") * sparc_ramdisk_size = size of image */ st4(buffer, 0); st4(buffer + 4, 0x01000000); st4(buffer + 8, align(end + 32)); st4(buffer + 12, s.st_size); if (write(image, buffer + 2, 14) != 14) die(argv[2]); /* For sparc64 update a_text and clear a_data + a_bss */ if (is64bit) { if (lseek(image, 4, 0) < 0) die("lseek"); /* a_text */ st4(buffer, align(end + 32 + 8191) - (start & ~0x3fffffUL) + s.st_size); /* a_data */ st4(buffer + 4, 0); /* a_bss */ st4(buffer + 8, 0); if (write(image, buffer, 12) != 12) die(argv[2]); } /* seek page aligned boundary in the image file and add boot image */ if (lseek(image, AOUT_TEXT_OFFSET - start + align(end + 32), 0) < 0) die("lseek"); if ((tail = open(argv[4], O_RDONLY)) < 0) die(argv[4]); while ((i = read(tail, buffer, 1024)) > 0) if (write(image, buffer, i) != i) die(argv[2]); if (close(image) < 0) die("close"); if (close(tail) < 0) die("close"); return 0; }
linux-master
arch/sparc/boot/piggyback.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include "libgcc.h" word_type __cmpdi2(long long a, long long b) { const DWunion au = { .ll = a }; const DWunion bu = { .ll = b }; if (au.s.high < bu.s.high) return 0; else if (au.s.high > bu.s.high) return 2; if ((unsigned int) au.s.low < (unsigned int) bu.s.low) return 0; else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) return 2; return 1; } EXPORT_SYMBOL(__cmpdi2);
linux-master
arch/sparc/lib/cmpdi2.c
// SPDX-License-Identifier: GPL-2.0 /* * PeeCeeI.c: The emerging standard... * * Copyright (C) 1997 David S. Miller ([email protected]) */ #include <linux/module.h> #include <asm/io.h> #include <asm/byteorder.h> void outsb(unsigned long __addr, const void *src, unsigned long count) { void __iomem *addr = (void __iomem *) __addr; const u8 *p = src; while (count--) __raw_writeb(*p++, addr); } EXPORT_SYMBOL(outsb); void outsw(unsigned long __addr, const void *src, unsigned long count) { void __iomem *addr = (void __iomem *) __addr; while (count--) { __raw_writew(*(u16 *)src, addr); src += sizeof(u16); } } EXPORT_SYMBOL(outsw); void outsl(unsigned long __addr, const void *src, unsigned long count) { void __iomem *addr = (void __iomem *) __addr; u32 l, l2; if (!count) return; switch (((unsigned long)src) & 0x3) { case 0x0: /* src is naturally aligned */ while (count--) { __raw_writel(*(u32 *)src, addr); src += sizeof(u32); } break; case 0x2: /* 2-byte alignment */ while (count--) { l = (*(u16 *)src) << 16; l |= *(u16 *)(src + sizeof(u16)); __raw_writel(l, addr); src += sizeof(u32); } break; case 0x1: /* Hold three bytes in l each time, grab a byte from l2 */ l = (*(u8 *)src) << 24; l |= (*(u16 *)(src + sizeof(u8))) << 8; src += sizeof(u8) + sizeof(u16); while (count--) { l2 = *(u32 *)src; l |= (l2 >> 24); __raw_writel(l, addr); l = l2 << 8; src += sizeof(u32); } break; case 0x3: /* Hold a byte in l each time, grab 3 bytes from l2 */ l = (*(u8 *)src) << 24; src += sizeof(u8); while (count--) { l2 = *(u32 *)src; l |= (l2 >> 8); __raw_writel(l, addr); l = l2 << 24; src += sizeof(u32); } break; } } EXPORT_SYMBOL(outsl); void insb(unsigned long __addr, void *dst, unsigned long count) { void __iomem *addr = (void __iomem *) __addr; if (count) { u32 *pi; u8 *pb = dst; while ((((unsigned long)pb) & 0x3) && count--) *pb++ = __raw_readb(addr); pi = (u32 *)pb; while (count >= 4) { u32 w; w = (__raw_readb(addr) << 24); w |= (__raw_readb(addr) << 16); w |= (__raw_readb(addr) << 8); w |= (__raw_readb(addr) << 0); *pi++ = w; count -= 4; } pb = (u8 *)pi; while (count--) *pb++ = __raw_readb(addr); } } EXPORT_SYMBOL(insb); void insw(unsigned long __addr, void *dst, unsigned long count) { void __iomem *addr = (void __iomem *) __addr; if (count) { u16 *ps = dst; u32 *pi; if (((unsigned long)ps) & 0x2) { *ps++ = __raw_readw(addr); count--; } pi = (u32 *)ps; while (count >= 2) { u32 w; w = __raw_readw(addr) << 16; w |= __raw_readw(addr) << 0; *pi++ = w; count -= 2; } ps = (u16 *)pi; if (count) *ps = __raw_readw(addr); } } EXPORT_SYMBOL(insw); void insl(unsigned long __addr, void *dst, unsigned long count) { void __iomem *addr = (void __iomem *) __addr; if (count) { if ((((unsigned long)dst) & 0x3) == 0) { u32 *pi = dst; while (count--) *pi++ = __raw_readl(addr); } else { u32 l = 0, l2, *pi; u16 *ps; u8 *pb; switch (((unsigned long)dst) & 3) { case 0x2: ps = dst; count -= 1; l = __raw_readl(addr); *ps++ = l; pi = (u32 *)ps; while (count--) { l2 = __raw_readl(addr); *pi++ = (l << 16) | (l2 >> 16); l = l2; } ps = (u16 *)pi; *ps = l; break; case 0x1: pb = dst; count -= 1; l = __raw_readl(addr); *pb++ = l >> 24; ps = (u16 *)pb; *ps++ = ((l >> 8) & 0xffff); pi = (u32 *)ps; while (count--) { l2 = __raw_readl(addr); *pi++ = (l << 24) | (l2 >> 8); l = l2; } pb = (u8 *)pi; *pb = l; break; case 0x3: pb = (u8 *)dst; count -= 1; l = __raw_readl(addr); *pb++ = l >> 24; pi = (u32 *)pb; while (count--) { l2 = __raw_readl(addr); *pi++ = (l << 8) | (l2 >> 24); l = l2; } ps = (u16 *)pi; *ps++ = ((l >> 8) & 0xffff); pb = (u8 *)ps; *pb = l; break; } } } } EXPORT_SYMBOL(insl);
linux-master
arch/sparc/lib/PeeCeeI.c
// SPDX-License-Identifier: GPL-2.0 /* * bitext.c: kernel little helper (of bit shuffling variety). * * Copyright (C) 2002 Pete Zaitcev <[email protected]> * * The algorithm to search a zero bit string is geared towards its application. * We expect a couple of fixed sizes of requests, so a rotating counter, reset * by align size, should provide fast enough search while maintaining low * fragmentation. */ #include <linux/string.h> #include <linux/bitmap.h> #include <asm/bitext.h> /** * bit_map_string_get - find and set a bit string in bit map. * @t: the bit map. * @len: requested string length * @align: requested alignment * * Returns offset in the map or -1 if out of space. * * Not safe to call from an interrupt (uses spin_lock). */ int bit_map_string_get(struct bit_map *t, int len, int align) { int offset, count; /* siamese twins */ int off_new; int align1; int i, color; if (t->num_colors) { /* align is overloaded to be the page color */ color = align; align = t->num_colors; } else { color = 0; if (align == 0) align = 1; } align1 = align - 1; if ((align & align1) != 0) BUG(); if (align < 0 || align >= t->size) BUG(); if (len <= 0 || len > t->size) BUG(); color &= align1; spin_lock(&t->lock); if (len < t->last_size) offset = t->first_free; else offset = t->last_off & ~align1; count = 0; for (;;) { off_new = find_next_zero_bit(t->map, t->size, offset); off_new = ((off_new + align1) & ~align1) + color; count += off_new - offset; offset = off_new; if (offset >= t->size) offset = 0; if (count + len > t->size) { spin_unlock(&t->lock); /* P3 */ printk(KERN_ERR "bitmap out: size %d used %d off %d len %d align %d count %d\n", t->size, t->used, offset, len, align, count); return -1; } if (offset + len > t->size) { count += t->size - offset; offset = 0; continue; } i = 0; while (test_bit(offset + i, t->map) == 0) { i++; if (i == len) { bitmap_set(t->map, offset, len); if (offset == t->first_free) t->first_free = find_next_zero_bit (t->map, t->size, t->first_free + len); if ((t->last_off = offset + len) >= t->size) t->last_off = 0; t->used += len; t->last_size = len; spin_unlock(&t->lock); return offset; } } count += i + 1; if ((offset += i + 1) >= t->size) offset = 0; } } void bit_map_clear(struct bit_map *t, int offset, int len) { int i; if (t->used < len) BUG(); /* Much too late to do any good, but alas... */ spin_lock(&t->lock); for (i = 0; i < len; i++) { if (test_bit(offset + i, t->map) == 0) BUG(); __clear_bit(offset + i, t->map); } if (offset < t->first_free) t->first_free = offset; t->used -= len; spin_unlock(&t->lock); } void bit_map_init(struct bit_map *t, unsigned long *map, int size) { bitmap_zero(map, size); memset(t, 0, sizeof *t); spin_lock_init(&t->lock); t->map = map; t->size = size; }
linux-master
arch/sparc/lib/bitext.c
// SPDX-License-Identifier: GPL-2.0 /* * atomic32.c: 32-bit atomic_t implementation * * Copyright (C) 2004 Keith M Wesolowski * Copyright (C) 2007 Kyle McMartin * * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf */ #include <linux/atomic.h> #include <linux/spinlock.h> #include <linux/module.h> #ifdef CONFIG_SMP #define ATOMIC_HASH_SIZE 4 #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)]) spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = { [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash) }; #else /* SMP */ static DEFINE_SPINLOCK(dummy); #define ATOMIC_HASH_SIZE 1 #define ATOMIC_HASH(a) (&dummy) #endif /* SMP */ #define ATOMIC_FETCH_OP(op, c_op) \ int arch_atomic_fetch_##op(int i, atomic_t *v) \ { \ int ret; \ unsigned long flags; \ spin_lock_irqsave(ATOMIC_HASH(v), flags); \ \ ret = v->counter; \ v->counter c_op i; \ \ spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ return ret; \ } \ EXPORT_SYMBOL(arch_atomic_fetch_##op); #define ATOMIC_OP_RETURN(op, c_op) \ int arch_atomic_##op##_return(int i, atomic_t *v) \ { \ int ret; \ unsigned long flags; \ spin_lock_irqsave(ATOMIC_HASH(v), flags); \ \ ret = (v->counter c_op i); \ \ spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ return ret; \ } \ EXPORT_SYMBOL(arch_atomic_##op##_return); ATOMIC_OP_RETURN(add, +=) ATOMIC_FETCH_OP(add, +=) ATOMIC_FETCH_OP(and, &=) ATOMIC_FETCH_OP(or, |=) ATOMIC_FETCH_OP(xor, ^=) #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN int arch_atomic_xchg(atomic_t *v, int new) { int ret; unsigned long flags; spin_lock_irqsave(ATOMIC_HASH(v), flags); ret = v->counter; v->counter = new; spin_unlock_irqrestore(ATOMIC_HASH(v), flags); return ret; } EXPORT_SYMBOL(arch_atomic_xchg); int arch_atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; unsigned long flags; spin_lock_irqsave(ATOMIC_HASH(v), flags); ret = v->counter; if (likely(ret == old)) v->counter = new; spin_unlock_irqrestore(ATOMIC_HASH(v), flags); return ret; } EXPORT_SYMBOL(arch_atomic_cmpxchg); int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) { int ret; unsigned long flags; spin_lock_irqsave(ATOMIC_HASH(v), flags); ret = v->counter; if (ret != u) v->counter += a; spin_unlock_irqrestore(ATOMIC_HASH(v), flags); return ret; } EXPORT_SYMBOL(arch_atomic_fetch_add_unless); /* Atomic operations are already serializing */ void arch_atomic_set(atomic_t *v, int i) { unsigned long flags; spin_lock_irqsave(ATOMIC_HASH(v), flags); v->counter = i; spin_unlock_irqrestore(ATOMIC_HASH(v), flags); } EXPORT_SYMBOL(arch_atomic_set); unsigned long sp32___set_bit(unsigned long *addr, unsigned long mask) { unsigned long old, flags; spin_lock_irqsave(ATOMIC_HASH(addr), flags); old = *addr; *addr = old | mask; spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); return old & mask; } EXPORT_SYMBOL(sp32___set_bit); unsigned long sp32___clear_bit(unsigned long *addr, unsigned long mask) { unsigned long old, flags; spin_lock_irqsave(ATOMIC_HASH(addr), flags); old = *addr; *addr = old & ~mask; spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); return old & mask; } EXPORT_SYMBOL(sp32___clear_bit); unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask) { unsigned long old, flags; spin_lock_irqsave(ATOMIC_HASH(addr), flags); old = *addr; *addr = old ^ mask; spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); return old & mask; } EXPORT_SYMBOL(sp32___change_bit); unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) { unsigned long flags; u32 prev; spin_lock_irqsave(ATOMIC_HASH(ptr), flags); if ((prev = *ptr) == old) *ptr = new; spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); return (unsigned long)prev; } EXPORT_SYMBOL(__cmpxchg_u32); u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new) { unsigned long flags; u64 prev; spin_lock_irqsave(ATOMIC_HASH(ptr), flags); if ((prev = *ptr) == old) *ptr = new; spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); return prev; } EXPORT_SYMBOL(__cmpxchg_u64); unsigned long __xchg_u32(volatile u32 *ptr, u32 new) { unsigned long flags; u32 prev; spin_lock_irqsave(ATOMIC_HASH(ptr), flags); prev = *ptr; *ptr = new; spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); return (unsigned long)prev; } EXPORT_SYMBOL(__xchg_u32);
linux-master
arch/sparc/lib/atomic32.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include "libgcc.h" word_type __ucmpdi2(unsigned long long a, unsigned long long b) { const DWunion au = {.ll = a}; const DWunion bu = {.ll = b}; if ((unsigned int) au.s.high < (unsigned int) bu.s.high) return 0; else if ((unsigned int) au.s.high > (unsigned int) bu.s.high) return 2; if ((unsigned int) au.s.low < (unsigned int) bu.s.low) return 0; else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) return 2; return 1; } EXPORT_SYMBOL(__ucmpdi2);
linux-master
arch/sparc/lib/ucmpdi2.c
// SPDX-License-Identifier: GPL-2.0 /* * Implement the sparc iomap interfaces */ #include <linux/pci.h> #include <linux/module.h> #include <asm/io.h> /* Create a virtual mapping cookie for an IO port range */ void __iomem *ioport_map(unsigned long port, unsigned int nr) { return (void __iomem *) (unsigned long) port; } void ioport_unmap(void __iomem *addr) { /* Nothing to do */ } EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap); #ifdef CONFIG_PCI void pci_iounmap(struct pci_dev *dev, void __iomem * addr) { /* nothing to do */ } EXPORT_SYMBOL(pci_iounmap); #endif
linux-master
arch/sparc/lib/iomap.c
// SPDX-License-Identifier: GPL-2.0 /* * SPARC64 Huge TLB page support. * * Copyright (C) 2002, 2003, 2006 David S. Miller ([email protected]) */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/sysctl.h> #include <asm/mman.h> #include <asm/pgalloc.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> /* Slightly simplified from the non-hugepage variant because by * definition we don't have to worry about any page coloring stuff */ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(filp); unsigned long task_size = TASK_SIZE; struct vm_unmapped_area_info info; if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = min(task_size, VA_EXCLUDE_START); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; addr = vm_unmapped_area(&info); if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { VM_BUG_ON(addr != -ENOMEM); info.low_limit = VA_EXCLUDE_END; info.high_limit = task_size; addr = vm_unmapped_area(&info); } return addr; } static unsigned long hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct hstate *h = hstate_file(filp); struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = STACK_TOP32; addr = vm_unmapped_area(&info); } return addr; } unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long task_size = TASK_SIZE; if (test_thread_flag(TIF_32BIT)) task_size = STACK_TOP32; if (len & ~huge_page_mask(h)) return -EINVAL; if (len > task_size) return -ENOMEM; if (flags & MAP_FIXED) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (task_size - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) return hugetlb_get_unmapped_area_bottomup(file, addr, len, pgoff, flags); else return hugetlb_get_unmapped_area_topdown(file, addr, len, pgoff, flags); } static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) { return entry; } static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) { unsigned long hugepage_size = _PAGE_SZ4MB_4V; pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; switch (shift) { case HPAGE_16GB_SHIFT: hugepage_size = _PAGE_SZ16GB_4V; pte_val(entry) |= _PAGE_PUD_HUGE; break; case HPAGE_2GB_SHIFT: hugepage_size = _PAGE_SZ2GB_4V; pte_val(entry) |= _PAGE_PMD_HUGE; break; case HPAGE_256MB_SHIFT: hugepage_size = _PAGE_SZ256MB_4V; pte_val(entry) |= _PAGE_PMD_HUGE; break; case HPAGE_SHIFT: pte_val(entry) |= _PAGE_PMD_HUGE; break; case HPAGE_64K_SHIFT: hugepage_size = _PAGE_SZ64K_4V; break; default: WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift); } pte_val(entry) = pte_val(entry) | hugepage_size; return entry; } static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift) { if (tlb_type == hypervisor) return sun4v_hugepage_shift_to_tte(entry, shift); else return sun4u_hugepage_shift_to_tte(entry, shift); } pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) { pte_t pte; entry = pte_mkhuge(entry); pte = hugepage_shift_to_tte(entry, shift); #ifdef CONFIG_SPARC64 /* If this vma has ADI enabled on it, turn on TTE.mcd */ if (flags & VM_SPARC_ADI) return pte_mkmcd(pte); else return pte_mknotmcd(pte); #else return pte; #endif } static unsigned int sun4v_huge_tte_to_shift(pte_t entry) { unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V; unsigned int shift; switch (tte_szbits) { case _PAGE_SZ16GB_4V: shift = HPAGE_16GB_SHIFT; break; case _PAGE_SZ2GB_4V: shift = HPAGE_2GB_SHIFT; break; case _PAGE_SZ256MB_4V: shift = HPAGE_256MB_SHIFT; break; case _PAGE_SZ4MB_4V: shift = REAL_HPAGE_SHIFT; break; case _PAGE_SZ64K_4V: shift = HPAGE_64K_SHIFT; break; default: shift = PAGE_SHIFT; break; } return shift; } static unsigned int sun4u_huge_tte_to_shift(pte_t entry) { unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U; unsigned int shift; switch (tte_szbits) { case _PAGE_SZ256MB_4U: shift = HPAGE_256MB_SHIFT; break; case _PAGE_SZ4MB_4U: shift = REAL_HPAGE_SHIFT; break; case _PAGE_SZ64K_4U: shift = HPAGE_64K_SHIFT; break; default: shift = PAGE_SHIFT; break; } return shift; } static unsigned long tte_to_shift(pte_t entry) { if (tlb_type == hypervisor) return sun4v_huge_tte_to_shift(entry); return sun4u_huge_tte_to_shift(entry); } static unsigned int huge_tte_to_shift(pte_t entry) { unsigned long shift = tte_to_shift(entry); if (shift == PAGE_SHIFT) WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n", pte_val(entry)); return shift; } static unsigned long huge_tte_to_size(pte_t pte) { unsigned long size = 1UL << huge_tte_to_shift(pte); if (size == REAL_HPAGE_SIZE) size = HPAGE_SIZE; return size; } unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); } unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); } unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); } pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); p4d = p4d_offset(pgd, addr); pud = pud_alloc(mm, p4d, addr); if (!pud) return NULL; if (sz >= PUD_SIZE) return (pte_t *)pud; pmd = pmd_alloc(mm, pud, addr); if (!pmd) return NULL; if (sz >= PMD_SIZE) return (pte_t *)pmd; return pte_alloc_huge(mm, pmd, addr); } pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd)) return NULL; p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) return NULL; pud = pud_offset(p4d, addr); if (pud_none(*pud)) return NULL; if (is_hugetlb_pud(*pud)) return (pte_t *)pud; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return NULL; if (is_hugetlb_pmd(*pmd)) return (pte_t *)pmd; return pte_offset_huge(pmd, addr); } void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { unsigned int nptes, orig_shift, shift; unsigned long i, size; pte_t orig; size = huge_tte_to_size(entry); shift = PAGE_SHIFT; if (size >= PUD_SIZE) shift = PUD_SHIFT; else if (size >= PMD_SIZE) shift = PMD_SHIFT; else shift = PAGE_SHIFT; nptes = size >> shift; if (!pte_present(*ptep) && pte_present(entry)) mm->context.hugetlb_pte_count += nptes; addr &= ~(size - 1); orig = *ptep; orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig); for (i = 0; i < nptes; i++) ptep[i] = __pte(pte_val(entry) + (i << shift)); maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift); /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ if (size == HPAGE_SIZE) maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0, orig_shift); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { unsigned int i, nptes, orig_shift, shift; unsigned long size; pte_t entry; entry = *ptep; size = huge_tte_to_size(entry); shift = PAGE_SHIFT; if (size >= PUD_SIZE) shift = PUD_SHIFT; else if (size >= PMD_SIZE) shift = PMD_SHIFT; else shift = PAGE_SHIFT; nptes = size >> shift; orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry); if (pte_present(entry)) mm->context.hugetlb_pte_count -= nptes; addr &= ~(size - 1); for (i = 0; i < nptes; i++) ptep[i] = __pte(0UL); maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift); /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ if (size == HPAGE_SIZE) maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, orig_shift); return entry; } int pmd_huge(pmd_t pmd) { return !pmd_none(pmd) && (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID; } int pud_huge(pud_t pud) { return !pud_none(pud) && (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID; } static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) { pgtable_t token = pmd_pgtable(*pmd); pmd_clear(pmd); pte_free_tlb(tlb, token, addr); mm_dec_nr_ptes(tlb->mm); } static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pmd_t *pmd; unsigned long next; unsigned long start; start = addr; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_none(*pmd)) continue; if (is_hugetlb_pmd(*pmd)) pmd_clear(pmd); else hugetlb_free_pte_range(tlb, pmd, addr); } while (pmd++, addr = next, addr != end); start &= PUD_MASK; if (start < floor) return; if (ceiling) { ceiling &= PUD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pmd = pmd_offset(pud, start); pud_clear(pud); pmd_free_tlb(tlb, pmd, start); mm_dec_nr_pmds(tlb->mm); } static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pud_t *pud; unsigned long next; unsigned long start; start = addr; pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; if (is_hugetlb_pud(*pud)) pud_clear(pud); else hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); } while (pud++, addr = next, addr != end); start &= PGDIR_MASK; if (start < floor) return; if (ceiling) { ceiling &= PGDIR_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) return; pud = pud_offset(p4d, start); p4d_clear(p4d); pud_free_tlb(tlb, pud, start); mm_dec_nr_puds(tlb->mm); } void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { pgd_t *pgd; p4d_t *p4d; unsigned long next; addr &= PMD_MASK; if (addr < floor) { addr += PMD_SIZE; if (!addr) return; } if (ceiling) { ceiling &= PMD_MASK; if (!ceiling) return; } if (end - 1 > ceiling - 1) end -= PMD_SIZE; if (addr > end - 1) return; pgd = pgd_offset(tlb->mm, addr); p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) continue; hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling); } while (p4d++, addr = next, addr != end); }
linux-master
arch/sparc/mm/hugetlbpage.c
// SPDX-License-Identifier: GPL-2.0 /* arch/sparc64/mm/tsb.c * * Copyright (C) 2006, 2008 David S. Miller <[email protected]> */ #include <linux/kernel.h> #include <linux/preempt.h> #include <linux/slab.h> #include <linux/mm_types.h> #include <linux/pgtable.h> #include <asm/page.h> #include <asm/mmu_context.h> #include <asm/setup.h> #include <asm/tsb.h> #include <asm/tlb.h> #include <asm/oplib.h> extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries) { vaddr >>= hash_shift; return vaddr & (nentries - 1); } static inline int tag_compare(unsigned long tag, unsigned long vaddr) { return (tag == (vaddr >> 22)); } static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end) { unsigned long idx; for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) { struct tsb *ent = &swapper_tsb[idx]; unsigned long match = idx << 13; match |= (ent->tag << 22); if (match >= start && match < end) ent->tag = (1UL << TSB_TAG_INVALID_BIT); } } /* TSB flushes need only occur on the processor initiating the address * space modification, not on each cpu the address space has run on. * Only the TLB flush needs that treatment. */ void flush_tsb_kernel_range(unsigned long start, unsigned long end) { unsigned long v; if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES) return flush_tsb_kernel_range_scan(start, end); for (v = start; v < end; v += PAGE_SIZE) { unsigned long hash = tsb_hash(v, PAGE_SHIFT, KERNEL_TSB_NENTRIES); struct tsb *ent = &swapper_tsb[hash]; if (tag_compare(ent->tag, v)) ent->tag = (1UL << TSB_TAG_INVALID_BIT); } } static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, unsigned long hash_shift, unsigned long nentries) { unsigned long tag, ent, hash; v &= ~0x1UL; hash = tsb_hash(v, hash_shift, nentries); ent = tsb + (hash * sizeof(struct tsb)); tag = (v >> 22UL); tsb_flush(ent, tag); } static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) { unsigned long i; for (i = 0; i < tb->tlb_nr; i++) __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v, unsigned long hash_shift, unsigned long nentries, unsigned int hugepage_shift) { unsigned int hpage_entries; unsigned int i; hpage_entries = 1 << (hugepage_shift - hash_shift); for (i = 0; i < hpage_entries; i++) __flush_tsb_one_entry(tsb, v + (i << hash_shift), hash_shift, nentries); } static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, unsigned long tsb, unsigned long nentries, unsigned int hugepage_shift) { unsigned long i; for (i = 0; i < tb->tlb_nr; i++) __flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries, hugepage_shift); } #endif void flush_tsb_user(struct tlb_batch *tb) { struct mm_struct *mm = tb->mm; unsigned long nentries, base, flags; spin_lock_irqsave(&mm->context.lock, flags); if (tb->hugepage_shift < REAL_HPAGE_SHIFT) { base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); if (tb->hugepage_shift == PAGE_SHIFT) __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); #if defined(CONFIG_HUGETLB_PAGE) else __flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries, tb->hugepage_shift); #endif } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); __flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries, tb->hugepage_shift); } #endif spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, unsigned int hugepage_shift) { unsigned long nentries, base, flags; spin_lock_irqsave(&mm->context.lock, flags); if (hugepage_shift < REAL_HPAGE_SHIFT) { base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); if (hugepage_shift == PAGE_SHIFT) __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); #if defined(CONFIG_HUGETLB_PAGE) else __flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries, hugepage_shift); #endif } #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); __flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT, nentries, hugepage_shift); } #endif spin_unlock_irqrestore(&mm->context.lock, flags); } #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB #endif static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) { unsigned long tsb_reg, base, tsb_paddr; unsigned long page_sz, tte; mm->context.tsb_block[tsb_idx].tsb_nentries = tsb_bytes / sizeof(struct tsb); switch (tsb_idx) { case MM_TSB_BASE: base = TSBMAP_8K_BASE; break; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) case MM_TSB_HUGE: base = TSBMAP_4M_BASE; break; #endif default: BUG(); } tte = pgprot_val(PAGE_KERNEL_LOCKED); tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); /* Use the smallest page size that can map the whole TSB * in one TLB entry. */ switch (tsb_bytes) { case 8192 << 0: tsb_reg = 0x0UL; #ifdef DCACHE_ALIASING_POSSIBLE base += (tsb_paddr & 8192); #endif page_sz = 8192; break; case 8192 << 1: tsb_reg = 0x1UL; page_sz = 64 * 1024; break; case 8192 << 2: tsb_reg = 0x2UL; page_sz = 64 * 1024; break; case 8192 << 3: tsb_reg = 0x3UL; page_sz = 64 * 1024; break; case 8192 << 4: tsb_reg = 0x4UL; page_sz = 512 * 1024; break; case 8192 << 5: tsb_reg = 0x5UL; page_sz = 512 * 1024; break; case 8192 << 6: tsb_reg = 0x6UL; page_sz = 512 * 1024; break; case 8192 << 7: tsb_reg = 0x7UL; page_sz = 4 * 1024 * 1024; break; default: printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n", current->comm, current->pid, tsb_bytes); BUG(); } tte |= pte_sz_bits(page_sz); if (tlb_type == cheetah_plus || tlb_type == hypervisor) { /* Physical mapping, no locked TLB entry for TSB. */ tsb_reg |= tsb_paddr; mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; } else { tsb_reg |= base; tsb_reg |= (tsb_paddr & (page_sz - 1UL)); tte |= (tsb_paddr & ~(page_sz - 1UL)); mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; } /* Setup the Hypervisor TSB descriptor. */ if (tlb_type == hypervisor) { struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; switch (tsb_idx) { case MM_TSB_BASE: hp->pgsz_idx = HV_PGSZ_IDX_BASE; break; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) case MM_TSB_HUGE: hp->pgsz_idx = HV_PGSZ_IDX_HUGE; break; #endif default: BUG(); } hp->assoc = 1; hp->num_ttes = tsb_bytes / 16; hp->ctx_idx = 0; switch (tsb_idx) { case MM_TSB_BASE: hp->pgsz_mask = HV_PGSZ_MASK_BASE; break; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) case MM_TSB_HUGE: hp->pgsz_mask = HV_PGSZ_MASK_HUGE; break; #endif default: BUG(); } hp->tsb_base = tsb_paddr; hp->resv = 0; } } struct kmem_cache *pgtable_cache __read_mostly; static struct kmem_cache *tsb_caches[8] __read_mostly; static const char *tsb_cache_names[8] = { "tsb_8KB", "tsb_16KB", "tsb_32KB", "tsb_64KB", "tsb_128KB", "tsb_256KB", "tsb_512KB", "tsb_1MB", }; void __init pgtable_cache_init(void) { unsigned long i; pgtable_cache = kmem_cache_create("pgtable_cache", PAGE_SIZE, PAGE_SIZE, 0, _clear_page); if (!pgtable_cache) { prom_printf("pgtable_cache_init(): Could not create!\n"); prom_halt(); } for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) { unsigned long size = 8192 << i; const char *name = tsb_cache_names[i]; tsb_caches[i] = kmem_cache_create(name, size, size, 0, NULL); if (!tsb_caches[i]) { prom_printf("Could not create %s cache\n", name); prom_halt(); } } } int sysctl_tsb_ratio = -2; static unsigned long tsb_size_to_rss_limit(unsigned long new_size) { unsigned long num_ents = (new_size / sizeof(struct tsb)); if (sysctl_tsb_ratio < 0) return num_ents - (num_ents >> -sysctl_tsb_ratio); else return num_ents + (num_ents >> sysctl_tsb_ratio); } /* When the RSS of an address space exceeds tsb_rss_limit for a TSB, * do_sparc64_fault() invokes this routine to try and grow it. * * When we reach the maximum TSB size supported, we stick ~0UL into * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault() * will not trigger any longer. * * The TSB can be anywhere from 8K to 1MB in size, in increasing powers * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB * must be 512K aligned. It also must be physically contiguous, so we * cannot use vmalloc(). * * The idea here is to grow the TSB when the RSS of the process approaches * the number of entries that the current TSB can hold at once. Currently, * we trigger when the RSS hits 3/4 of the TSB capacity. */ void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) { unsigned long max_tsb_size = 1 * 1024 * 1024; unsigned long new_size, old_size, flags; struct tsb *old_tsb, *new_tsb; unsigned long new_cache_index, old_cache_index; unsigned long new_rss_limit; gfp_t gfp_flags; if (max_tsb_size > PAGE_SIZE << MAX_ORDER) max_tsb_size = PAGE_SIZE << MAX_ORDER; new_cache_index = 0; for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { new_rss_limit = tsb_size_to_rss_limit(new_size); if (new_rss_limit > rss) break; new_cache_index++; } if (new_size == max_tsb_size) new_rss_limit = ~0UL; retry_tsb_alloc: gfp_flags = GFP_KERNEL; if (new_size > (PAGE_SIZE * 2)) gfp_flags |= __GFP_NOWARN | __GFP_NORETRY; new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index], gfp_flags, numa_node_id()); if (unlikely(!new_tsb)) { /* Not being able to fork due to a high-order TSB * allocation failure is very bad behavior. Just back * down to a 0-order allocation and force no TSB * growing for this address space. */ if (mm->context.tsb_block[tsb_index].tsb == NULL && new_cache_index > 0) { new_cache_index = 0; new_size = 8192; new_rss_limit = ~0UL; goto retry_tsb_alloc; } /* If we failed on a TSB grow, we are under serious * memory pressure so don't try to grow any more. */ if (mm->context.tsb_block[tsb_index].tsb != NULL) mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; return; } /* Mark all tags as invalid. */ tsb_init(new_tsb, new_size); /* Ok, we are about to commit the changes. If we are * growing an existing TSB the locking is very tricky, * so WATCH OUT! * * We have to hold mm->context.lock while committing to the * new TSB, this synchronizes us with processors in * flush_tsb_user() and switch_mm() for this address space. * * But even with that lock held, processors run asynchronously * accessing the old TSB via TLB miss handling. This is OK * because those actions are just propagating state from the * Linux page tables into the TSB, page table mappings are not * being changed. If a real fault occurs, the processor will * synchronize with us when it hits flush_tsb_user(), this is * also true for the case where vmscan is modifying the page * tables. The only thing we need to be careful with is to * skip any locked TSB entries during copy_tsb(). * * When we finish committing to the new TSB, we have to drop * the lock and ask all other cpus running this address space * to run tsb_context_switch() to see the new TSB table. */ spin_lock_irqsave(&mm->context.lock, flags); old_tsb = mm->context.tsb_block[tsb_index].tsb; old_cache_index = (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * sizeof(struct tsb)); /* Handle multiple threads trying to grow the TSB at the same time. * One will get in here first, and bump the size and the RSS limit. * The others will get in here next and hit this check. */ if (unlikely(old_tsb && (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { spin_unlock_irqrestore(&mm->context.lock, flags); kmem_cache_free(tsb_caches[new_cache_index], new_tsb); return; } mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; if (old_tsb) { extern void copy_tsb(unsigned long old_tsb_base, unsigned long old_tsb_size, unsigned long new_tsb_base, unsigned long new_tsb_size, unsigned long page_size_shift); unsigned long old_tsb_base = (unsigned long) old_tsb; unsigned long new_tsb_base = (unsigned long) new_tsb; if (tlb_type == cheetah_plus || tlb_type == hypervisor) { old_tsb_base = __pa(old_tsb_base); new_tsb_base = __pa(new_tsb_base); } copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size, tsb_index == MM_TSB_BASE ? PAGE_SHIFT : REAL_HPAGE_SHIFT); } mm->context.tsb_block[tsb_index].tsb = new_tsb; setup_tsb_params(mm, tsb_index, new_size); spin_unlock_irqrestore(&mm->context.lock, flags); /* If old_tsb is NULL, we're being invoked for the first time * from init_new_context(). */ if (old_tsb) { /* Reload it on the local cpu. */ tsb_context_switch(mm); /* Now force other processors to do the same. */ preempt_disable(); smp_tsb_sync(mm); preempt_enable(); /* Now it is safe to free the old tsb. */ kmem_cache_free(tsb_caches[old_cache_index], old_tsb); } } int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { unsigned long mm_rss = get_mm_rss(mm); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) unsigned long saved_hugetlb_pte_count; unsigned long saved_thp_pte_count; #endif unsigned int i; spin_lock_init(&mm->context.lock); mm->context.sparc64_ctx_val = 0UL; mm->context.tag_store = NULL; spin_lock_init(&mm->context.tag_lock); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) /* We reset them to zero because the fork() page copying * will re-increment the counters as the parent PTEs are * copied into the child address space. */ saved_hugetlb_pte_count = mm->context.hugetlb_pte_count; saved_thp_pte_count = mm->context.thp_pte_count; mm->context.hugetlb_pte_count = 0; mm->context.thp_pte_count = 0; mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE); #endif /* copy_mm() copies over the parent's mm_struct before calling * us, so we need to zero out the TSB pointer or else tsb_grow() * will be confused and think there is an older TSB to free up. */ for (i = 0; i < MM_NUM_TSBS; i++) mm->context.tsb_block[i].tsb = NULL; /* If this is fork, inherit the parent's TSB size. We would * grow it to that size on the first page fault anyways. */ tsb_grow(mm, MM_TSB_BASE, mm_rss); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count)) tsb_grow(mm, MM_TSB_HUGE, (saved_hugetlb_pte_count + saved_thp_pte_count) * REAL_HPAGE_PER_HPAGE); #endif if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) return -ENOMEM; return 0; } static void tsb_destroy_one(struct tsb_config *tp) { unsigned long cache_index; if (!tp->tsb) return; cache_index = tp->tsb_reg_val & 0x7UL; kmem_cache_free(tsb_caches[cache_index], tp->tsb); tp->tsb = NULL; tp->tsb_reg_val = 0UL; } void destroy_context(struct mm_struct *mm) { unsigned long flags, i; for (i = 0; i < MM_NUM_TSBS; i++) tsb_destroy_one(&mm->context.tsb_block[i]); spin_lock_irqsave(&ctx_alloc_lock, flags); if (CTX_VALID(mm->context)) { unsigned long nr = CTX_NRBITS(mm->context); mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); } spin_unlock_irqrestore(&ctx_alloc_lock, flags); /* If ADI tag storage was allocated for this task, free it */ if (mm->context.tag_store) { tag_storage_desc_t *tag_desc; unsigned long max_desc; unsigned char *tags; tag_desc = mm->context.tag_store; max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t); for (i = 0; i < max_desc; i++) { tags = tag_desc->tags; tag_desc->tags = NULL; kfree(tags); tag_desc++; } kfree(mm->context.tag_store); mm->context.tag_store = NULL; } }
linux-master
arch/sparc/mm/tsb.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc. * * Copyright (C) 1996, 2008 David S. Miller ([email protected]) * Copyright (C) 1997, 1999 Jakub Jelinek ([email protected]) */ #include <asm/head.h> #include <linux/string.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/signal.h> #include <linux/mm.h> #include <linux/extable.h> #include <linux/init.h> #include <linux/perf_event.h> #include <linux/interrupt.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/percpu.h> #include <linux/context_tracking.h> #include <linux/uaccess.h> #include <asm/page.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/asi.h> #include <asm/lsu.h> #include <asm/sections.h> #include <asm/mmu_context.h> #include <asm/setup.h> int show_unhandled_signals = 1; static void __kprobes unhandled_fault(unsigned long address, struct task_struct *tsk, struct pt_regs *regs) { if ((unsigned long) address < PAGE_SIZE) { printk(KERN_ALERT "Unable to handle kernel NULL " "pointer dereference\n"); } else { printk(KERN_ALERT "Unable to handle kernel paging request " "at virtual address %016lx\n", (unsigned long)address); } printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n", (tsk->mm ? CTX_HWBITS(tsk->mm->context) : CTX_HWBITS(tsk->active_mm->context))); printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n", (tsk->mm ? (unsigned long) tsk->mm->pgd : (unsigned long) tsk->active_mm->pgd)); die_if_kernel("Oops", regs); } static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) { printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", regs->tpc); printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]); printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); dump_stack(); unhandled_fault(regs->tpc, current, regs); } /* * We now make sure that mmap_lock is held in all paths that call * this. Additionally, to prevent kswapd from ripping ptes from * under us, raise interrupts around the time that we look at the * pte, kswapd will have to wait to get his smp ipi response from * us. vmtruncate likewise. This saves us having to get pte lock. */ static unsigned int get_user_insn(unsigned long tpc) { pgd_t *pgdp = pgd_offset(current->mm, tpc); p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep, pte; unsigned long pa; u32 insn = 0; if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) goto out; p4dp = p4d_offset(pgdp, tpc); if (p4d_none(*p4dp) || unlikely(p4d_bad(*p4dp))) goto out; pudp = pud_offset(p4dp, tpc); if (pud_none(*pudp) || unlikely(pud_bad(*pudp))) goto out; /* This disables preemption for us as well. */ local_irq_disable(); pmdp = pmd_offset(pudp, tpc); again: if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) goto out_irq_enable; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (is_hugetlb_pmd(*pmdp)) { pa = pmd_pfn(*pmdp) << PAGE_SHIFT; pa += tpc & ~HPAGE_MASK; /* Use phys bypass so we don't pollute dtlb/dcache. */ __asm__ __volatile__("lduwa [%1] %2, %0" : "=r" (insn) : "r" (pa), "i" (ASI_PHYS_USE_EC)); } else #endif { ptep = pte_offset_map(pmdp, tpc); if (!ptep) goto again; pte = *ptep; if (pte_present(pte)) { pa = (pte_pfn(pte) << PAGE_SHIFT); pa += (tpc & ~PAGE_MASK); /* Use phys bypass so we don't pollute dtlb/dcache. */ __asm__ __volatile__("lduwa [%1] %2, %0" : "=r" (insn) : "r" (pa), "i" (ASI_PHYS_USE_EC)); } pte_unmap(ptep); } out_irq_enable: local_irq_enable(); out: return insn; } static inline void show_signal_msg(struct pt_regs *regs, int sig, int code, unsigned long address, struct task_struct *tsk) { if (!unhandled_signal(tsk, sig)) return; if (!printk_ratelimit()) return; printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x", task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, tsk->comm, task_pid_nr(tsk), address, (void *)regs->tpc, (void *)regs->u_regs[UREG_I7], (void *)regs->u_regs[UREG_FP], code); print_vma_addr(KERN_CONT " in ", regs->tpc); printk(KERN_CONT "\n"); } static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, unsigned long fault_addr, unsigned int insn, int fault_code) { unsigned long addr; if (fault_code & FAULT_CODE_ITLB) { addr = regs->tpc; } else { /* If we were able to probe the faulting instruction, use it * to compute a precise fault address. Otherwise use the fault * time provided address which may only have page granularity. */ if (insn) addr = compute_effective_address(regs, insn, 0); else addr = fault_addr; } if (unlikely(show_unhandled_signals)) show_signal_msg(regs, sig, code, addr, current); force_sig_fault(sig, code, (void __user *) addr); } static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) { if (!insn) { if (!regs->tpc || (regs->tpc & 0x3)) return 0; if (regs->tstate & TSTATE_PRIV) { insn = *(unsigned int *) regs->tpc; } else { insn = get_user_insn(regs->tpc); } } return insn; } static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, unsigned int insn, unsigned long address) { unsigned char asi = ASI_P; if ((!insn) && (regs->tstate & TSTATE_PRIV)) goto cannot_handle; /* If user insn could be read (thus insn is zero), that * is fine. We will just gun down the process with a signal * in that case. */ if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) && (insn & 0xc0800000) == 0xc0800000) { if (insn & 0x2000) asi = (regs->tstate >> 24); else asi = (insn >> 5); if ((asi & 0xf2) == 0x82) { if (insn & 0x1000000) { handle_ldf_stq(insn, regs); } else { /* This was a non-faulting load. Just clear the * destination register(s) and continue with the next * instruction. -jj */ handle_ld_nf(insn, regs); } return; } } /* Is this in ex_table? */ if (regs->tstate & TSTATE_PRIV) { const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (entry) { regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; return; } } else { /* The si_code was set to make clear whether * this was a SEGV_MAPERR or SEGV_ACCERR fault. */ do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code); return; } cannot_handle: unhandled_fault (address, current, regs); } static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs) { static int times; if (times++ < 10) printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports " "64-bit TPC [%lx]\n", current->comm, current->pid, regs->tpc); show_regs(regs); } asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned int insn = 0; int si_code, fault_code; vm_fault_t fault; unsigned long address, mm_rss; unsigned int flags = FAULT_FLAG_DEFAULT; fault_code = get_thread_fault_code(); if (kprobe_page_fault(regs, 0)) goto exit_exception; si_code = SEGV_MAPERR; address = current_thread_info()->fault_address; if ((fault_code & FAULT_CODE_ITLB) && (fault_code & FAULT_CODE_DTLB)) BUG(); if (test_thread_flag(TIF_32BIT)) { if (!(regs->tstate & TSTATE_PRIV)) { if (unlikely((regs->tpc >> 32) != 0)) { bogus_32bit_fault_tpc(regs); goto intr_or_no_mm; } } if (unlikely((address >> 32) != 0)) goto intr_or_no_mm; } if (regs->tstate & TSTATE_PRIV) { unsigned long tpc = regs->tpc; /* Sanity check the PC. */ if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) || (tpc >= MODULES_VADDR && tpc < MODULES_END)) { /* Valid, no problems... */ } else { bad_kernel_pc(regs, address); goto exit_exception; } } else flags |= FAULT_FLAG_USER; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (faulthandler_disabled() || !mm) goto intr_or_no_mm; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); if (!mmap_read_trylock(mm)) { if ((regs->tstate & TSTATE_PRIV) && !search_exception_tables(regs->tpc)) { insn = get_fault_insn(regs, insn); goto handle_kernel_fault; } retry: mmap_read_lock(mm); } if (fault_code & FAULT_CODE_BAD_RA) goto do_sigbus; vma = find_vma(mm, address); if (!vma) goto bad_area; /* Pure DTLB misses do not tell us whether the fault causing * load/store/atomic was a write or not, it only says that there * was no match. So in such a case we (carefully) read the * instruction to try and figure this out. It's an optimization * so it's ok if we can't do this. * * Special hack, window spill/fill knows the exact fault type. */ if (((fault_code & (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) && (vma->vm_flags & VM_WRITE) != 0) { insn = get_fault_insn(regs, 0); if (!insn) goto continue_fault; /* All loads, stores and atomics have bits 30 and 31 both set * in the instruction. Bit 21 is set in all stores, but we * have to avoid prefetches which also have bit 21 set. */ if ((insn & 0xc0200000) == 0xc0200000 && (insn & 0x01780000) != 0x01680000) { /* Don't bother updating thread struct value, * because update_mmu_cache only cares which tlb * the access came from. */ fault_code |= FAULT_CODE_WRITE; } } continue_fault: if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (!(fault_code & FAULT_CODE_WRITE)) { /* Non-faulting loads shouldn't expand stack. */ insn = get_fault_insn(regs, insn); if ((insn & 0xc0800000) == 0xc0800000) { unsigned char asi; if (insn & 0x2000) asi = (regs->tstate >> 24); else asi = (insn >> 5); if ((asi & 0xf2) == 0x82) goto bad_area; } } vma = expand_stack(mm, address); if (!vma) goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: si_code = SEGV_ACCERR; /* If we took a ITLB miss on a non-executable page, catch * that here. */ if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) { WARN(address != regs->tpc, "address (%lx) != regs->tpc (%lx)\n", address, regs->tpc); WARN_ON(regs->tstate & TSTATE_PRIV); goto bad_area; } if (fault_code & FAULT_CODE_WRITE) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; /* Spitfire has an icache which does not snoop * processor stores. Later processors do... */ if (tlb_type == spitfire && (vma->vm_flags & VM_EXEC) != 0 && vma->vm_file != NULL) set_thread_fault_code(fault_code | FAULT_CODE_BLKCOMMIT); flags |= FAULT_FLAG_WRITE; } else { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) { if (regs->tstate & TSTATE_PRIV) { insn = get_fault_insn(regs, insn); goto handle_kernel_fault; } goto exit_exception; } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) goto lock_released; if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ goto retry; } mmap_read_unlock(mm); lock_released: mm_rss = get_mm_rss(mm); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE)); #endif if (unlikely(mm_rss > mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) tsb_grow(mm, MM_TSB_BASE, mm_rss); #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count; mm_rss *= REAL_HPAGE_PER_HPAGE; if (unlikely(mm_rss > mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { if (mm->context.tsb_block[MM_TSB_HUGE].tsb) tsb_grow(mm, MM_TSB_HUGE, mm_rss); else hugetlb_setup(regs); } #endif exit_exception: exception_exit(prev_state); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: mmap_read_unlock(mm); bad_area_nosemaphore: insn = get_fault_insn(regs, insn); handle_kernel_fault: do_kernel_fault(regs, si_code, fault_code, insn, address); goto exit_exception; /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: insn = get_fault_insn(regs, insn); mmap_read_unlock(mm); if (!(regs->tstate & TSTATE_PRIV)) { pagefault_out_of_memory(); goto exit_exception; } goto handle_kernel_fault; intr_or_no_mm: insn = get_fault_insn(regs, 0); goto handle_kernel_fault; do_sigbus: insn = get_fault_insn(regs, insn); mmap_read_unlock(mm); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code); /* Kernel mode? Handle exceptions or die */ if (regs->tstate & TSTATE_PRIV) goto handle_kernel_fault; }
linux-master
arch/sparc/mm/fault_64.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/sparc64/mm/init.c * * Copyright (C) 1996-1999 David S. Miller ([email protected]) * Copyright (C) 1997-1999 Jakub Jelinek ([email protected]) */ #include <linux/extable.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/initrd.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/poison.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/kprobes.h> #include <linux/cache.h> #include <linux/sort.h> #include <linux/ioport.h> #include <linux/percpu.h> #include <linux/mmzone.h> #include <linux/gfp.h> #include <linux/bootmem_info.h> #include <asm/head.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/oplib.h> #include <asm/iommu.h> #include <asm/io.h> #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/dma.h> #include <asm/starfire.h> #include <asm/tlb.h> #include <asm/spitfire.h> #include <asm/sections.h> #include <asm/tsb.h> #include <asm/hypervisor.h> #include <asm/prom.h> #include <asm/mdesc.h> #include <asm/cpudata.h> #include <asm/setup.h> #include <asm/irq.h> #include "init_64.h" unsigned long kern_linear_pte_xor[4] __read_mostly; static unsigned long page_cache4v_flag; /* A bitmap, two bits for every 256MB of physical memory. These two * bits determine what page size we use for kernel linear * translations. They form an index into kern_linear_pte_xor[]. The * value in the indexed slot is XOR'd with the TLB miss virtual * address to form the resulting TTE. The mapping is: * * 0 ==> 4MB * 1 ==> 256MB * 2 ==> 2GB * 3 ==> 16GB * * All sun4v chips support 256MB pages. Only SPARC-T4 and later * support 2GB pages, and hopefully future cpus will support the 16GB * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there * if these larger page sizes are not supported by the cpu. * * It would be nice to determine this from the machine description * 'cpu' properties, but we need to have this table setup before the * MDESC is initialized. */ #ifndef CONFIG_DEBUG_PAGEALLOC /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings. * Space is allocated for this right after the trap table in * arch/sparc64/kernel/head.S */ extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; #endif extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; static unsigned long cpu_pgsz_mask; #define MAX_BANKS 1024 static struct linux_prom64_registers pavail[MAX_BANKS]; static int pavail_ents; u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES]; static int cmp_p64(const void *a, const void *b) { const struct linux_prom64_registers *x = a, *y = b; if (x->phys_addr > y->phys_addr) return 1; if (x->phys_addr < y->phys_addr) return -1; return 0; } static void __init read_obp_memory(const char *property, struct linux_prom64_registers *regs, int *num_ents) { phandle node = prom_finddevice("/memory"); int prop_size = prom_getproplen(node, property); int ents, ret, i; ents = prop_size / sizeof(struct linux_prom64_registers); if (ents > MAX_BANKS) { prom_printf("The machine has more %s property entries than " "this kernel can support (%d).\n", property, MAX_BANKS); prom_halt(); } ret = prom_getproperty(node, property, (char *) regs, prop_size); if (ret == -1) { prom_printf("Couldn't get %s property from /memory.\n", property); prom_halt(); } /* Sanitize what we got from the firmware, by page aligning * everything. */ for (i = 0; i < ents; i++) { unsigned long base, size; base = regs[i].phys_addr; size = regs[i].reg_size; size &= PAGE_MASK; if (base & ~PAGE_MASK) { unsigned long new_base = PAGE_ALIGN(base); size -= new_base - base; if ((long) size < 0L) size = 0UL; base = new_base; } if (size == 0UL) { /* If it is empty, simply get rid of it. * This simplifies the logic of the other * functions that process these arrays. */ memmove(&regs[i], &regs[i + 1], (ents - i - 1) * sizeof(regs[0])); i--; ents--; continue; } regs[i].phys_addr = base; regs[i].reg_size = size; } *num_ents = ents; sort(regs, ents, sizeof(struct linux_prom64_registers), cmp_p64, NULL); } /* Kernel physical address base and size in bytes. */ unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; /* Initial ramdisk setup */ extern unsigned long sparc_ramdisk_image64; extern unsigned int sparc_ramdisk_image; extern unsigned int sparc_ramdisk_size; struct page *mem_map_zero __read_mostly; EXPORT_SYMBOL(mem_map_zero); unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; unsigned long sparc64_kern_pri_context __read_mostly; unsigned long sparc64_kern_pri_nuc_bits __read_mostly; unsigned long sparc64_kern_sec_context __read_mostly; int num_kernel_image_mappings; #ifdef CONFIG_DEBUG_DCFLUSH atomic_t dcpage_flushes = ATOMIC_INIT(0); #ifdef CONFIG_SMP atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); #endif #endif inline void flush_dcache_folio_impl(struct folio *folio) { unsigned int i, nr = folio_nr_pages(folio); BUG_ON(tlb_type == hypervisor); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); #endif #ifdef DCACHE_ALIASING_POSSIBLE for (i = 0; i < nr; i++) __flush_dcache_page(folio_address(folio) + i * PAGE_SIZE, ((tlb_type == spitfire) && folio_flush_mapping(folio) != NULL)); #else if (folio_flush_mapping(folio) != NULL && tlb_type == spitfire) { for (i = 0; i < nr; i++) __flush_icache_page((pfn + i) * PAGE_SIZE); } #endif } #define PG_dcache_dirty PG_arch_1 #define PG_dcache_cpu_shift 32UL #define PG_dcache_cpu_mask \ ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) #define dcache_dirty_cpu(folio) \ (((folio)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) static inline void set_dcache_dirty(struct folio *folio, int this_cpu) { unsigned long mask = this_cpu; unsigned long non_cpu_bits; non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); __asm__ __volatile__("1:\n\t" "ldx [%2], %%g7\n\t" "and %%g7, %1, %%g1\n\t" "or %%g1, %0, %%g1\n\t" "casx [%2], %%g7, %%g1\n\t" "cmp %%g7, %%g1\n\t" "bne,pn %%xcc, 1b\n\t" " nop" : /* no outputs */ : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags) : "g1", "g7"); } static inline void clear_dcache_dirty_cpu(struct folio *folio, unsigned long cpu) { unsigned long mask = (1UL << PG_dcache_dirty); __asm__ __volatile__("! test_and_clear_dcache_dirty\n" "1:\n\t" "ldx [%2], %%g7\n\t" "srlx %%g7, %4, %%g1\n\t" "and %%g1, %3, %%g1\n\t" "cmp %%g1, %0\n\t" "bne,pn %%icc, 2f\n\t" " andn %%g7, %1, %%g1\n\t" "casx [%2], %%g7, %%g1\n\t" "cmp %%g7, %%g1\n\t" "bne,pn %%xcc, 1b\n\t" " nop\n" "2:" : /* no outputs */ : "r" (cpu), "r" (mask), "r" (&folio->flags), "i" (PG_dcache_cpu_mask), "i" (PG_dcache_cpu_shift) : "g1", "g7"); } static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) { unsigned long tsb_addr = (unsigned long) ent; if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_addr = __pa(tsb_addr); __tsb_insert(tsb_addr, tag, pte); } unsigned long _PAGE_ALL_SZ_BITS __read_mostly; static void flush_dcache(unsigned long pfn) { struct page *page; page = pfn_to_page(pfn); if (page) { struct folio *folio = page_folio(page); unsigned long pg_flags; pg_flags = folio->flags; if (pg_flags & (1UL << PG_dcache_dirty)) { int cpu = ((pg_flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask); int this_cpu = get_cpu(); /* This is just to optimize away some function calls * in the SMP case. */ if (cpu == this_cpu) flush_dcache_folio_impl(folio); else smp_flush_dcache_folio_impl(folio, cpu); clear_dcache_dirty_cpu(folio, cpu); put_cpu(); } } } /* mm->context.lock must be held */ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index, unsigned long tsb_hash_shift, unsigned long address, unsigned long tte) { struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb; unsigned long tag; if (unlikely(!tsb)) return; tsb += ((address >> tsb_hash_shift) & (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); tag = (address >> 22UL); tsb_insert(tsb, tag, tte); } #ifdef CONFIG_HUGETLB_PAGE static int __init hugetlbpage_init(void) { hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT); hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT); hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT); return 0; } arch_initcall(hugetlbpage_init); static void __init pud_huge_patch(void) { struct pud_huge_patch_entry *p; unsigned long addr; p = &__pud_huge_patch; addr = p->addr; *(unsigned int *)addr = p->insn; __asm__ __volatile__("flush %0" : : "r" (addr)); } bool __init arch_hugetlb_valid_size(unsigned long size) { unsigned int hugepage_shift = ilog2(size); unsigned short hv_pgsz_idx; unsigned int hv_pgsz_mask; switch (hugepage_shift) { case HPAGE_16GB_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_16GB; hv_pgsz_idx = HV_PGSZ_IDX_16GB; pud_huge_patch(); break; case HPAGE_2GB_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_2GB; hv_pgsz_idx = HV_PGSZ_IDX_2GB; break; case HPAGE_256MB_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_256MB; hv_pgsz_idx = HV_PGSZ_IDX_256MB; break; case HPAGE_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_4MB; hv_pgsz_idx = HV_PGSZ_IDX_4MB; break; case HPAGE_64K_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_64K; hv_pgsz_idx = HV_PGSZ_IDX_64K; break; default: hv_pgsz_mask = 0; } if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) return false; return true; } #endif /* CONFIG_HUGETLB_PAGE */ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr) { struct mm_struct *mm; unsigned long flags; bool is_huge_tsb; pte_t pte = *ptep; unsigned int i; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); if (pfn_valid(pfn)) flush_dcache(pfn); } mm = vma->vm_mm; /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */ if (!pte_accessible(mm, pte)) return; spin_lock_irqsave(&mm->context.lock, flags); is_huge_tsb = false; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) { unsigned long hugepage_size = PAGE_SIZE; if (is_vm_hugetlb_page(vma)) hugepage_size = huge_page_size(hstate_vma(vma)); if (hugepage_size >= PUD_SIZE) { unsigned long mask = 0x1ffc00000UL; /* Transfer bits [32:22] from address to resolve * at 4M granularity. */ pte_val(pte) &= ~mask; pte_val(pte) |= (address & mask); } else if (hugepage_size >= PMD_SIZE) { /* We are fabricating 8MB pages using 4MB * real hw pages. */ pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); } if (hugepage_size >= PMD_SIZE) { __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, address, pte_val(pte)); is_huge_tsb = true; } } #endif if (!is_huge_tsb) { for (i = 0; i < nr; i++) { __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); address += PAGE_SIZE; pte_val(pte) += PAGE_SIZE; } } spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_folio(struct folio *folio) { unsigned long pfn = folio_pfn(folio); struct address_space *mapping; int this_cpu; if (tlb_type == hypervisor) return; /* Do not bother with the expensive D-cache flush if it * is merely the zero page. The 'bigcore' testcase in GDB * causes this case to run millions of times. */ if (is_zero_pfn(pfn)) return; this_cpu = get_cpu(); mapping = folio_flush_mapping(folio); if (mapping && !mapping_mapped(mapping)) { bool dirty = test_bit(PG_dcache_dirty, &folio->flags); if (dirty) { int dirty_cpu = dcache_dirty_cpu(folio); if (dirty_cpu == this_cpu) goto out; smp_flush_dcache_folio_impl(folio, dirty_cpu); } set_dcache_dirty(folio, this_cpu); } else { /* We could delay the flush for the !page_mapping * case too. But that case is for exec env/arg * pages and those are %99 certainly going to get * faulted into the tlb (and thus flushed) anyways. */ flush_dcache_folio_impl(folio); } out: put_cpu(); } EXPORT_SYMBOL(flush_dcache_folio); void __kprobes flush_icache_range(unsigned long start, unsigned long end) { /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ if (tlb_type == spitfire) { unsigned long kaddr; /* This code only runs on Spitfire cpus so this is * why we can assume _PAGE_PADDR_4U. */ for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { unsigned long paddr, mask = _PAGE_PADDR_4U; if (kaddr >= PAGE_OFFSET) paddr = kaddr & mask; else { pte_t *ptep = virt_to_kpte(kaddr); paddr = pte_val(*ptep) & mask; } __flush_icache_page(paddr); } } } EXPORT_SYMBOL(flush_icache_range); void mmu_info(struct seq_file *m) { static const char *pgsz_strings[] = { "8K", "64K", "512K", "4MB", "32MB", "256MB", "2GB", "16GB", }; int i, printed; if (tlb_type == cheetah) seq_printf(m, "MMU Type\t: Cheetah\n"); else if (tlb_type == cheetah_plus) seq_printf(m, "MMU Type\t: Cheetah+\n"); else if (tlb_type == spitfire) seq_printf(m, "MMU Type\t: Spitfire\n"); else if (tlb_type == hypervisor) seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); else seq_printf(m, "MMU Type\t: ???\n"); seq_printf(m, "MMU PGSZs\t: "); printed = 0; for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) { if (cpu_pgsz_mask & (1UL << i)) { seq_printf(m, "%s%s", printed ? "," : "", pgsz_strings[i]); printed++; } } seq_putc(m, '\n'); #ifdef CONFIG_DEBUG_DCFLUSH seq_printf(m, "DCPageFlushes\t: %d\n", atomic_read(&dcpage_flushes)); #ifdef CONFIG_SMP seq_printf(m, "DCPageFlushesXC\t: %d\n", atomic_read(&dcpage_flushes_xcall)); #endif /* CONFIG_SMP */ #endif /* CONFIG_DEBUG_DCFLUSH */ } struct linux_prom_translation prom_trans[512] __read_mostly; unsigned int prom_trans_ents __read_mostly; unsigned long kern_locked_tte_data; /* The obp translations are saved based on 8k pagesize, since obp can * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> * HI_OBP_ADDRESS range are handled in ktlb.S. */ static inline int in_obp_range(unsigned long vaddr) { return (vaddr >= LOW_OBP_ADDRESS && vaddr < HI_OBP_ADDRESS); } static int cmp_ptrans(const void *a, const void *b) { const struct linux_prom_translation *x = a, *y = b; if (x->virt > y->virt) return 1; if (x->virt < y->virt) return -1; return 0; } /* Read OBP translations property into 'prom_trans[]'. */ static void __init read_obp_translations(void) { int n, node, ents, first, last, i; node = prom_finddevice("/virtual-memory"); n = prom_getproplen(node, "translations"); if (unlikely(n == 0 || n == -1)) { prom_printf("prom_mappings: Couldn't get size.\n"); prom_halt(); } if (unlikely(n > sizeof(prom_trans))) { prom_printf("prom_mappings: Size %d is too big.\n", n); prom_halt(); } if ((n = prom_getproperty(node, "translations", (char *)&prom_trans[0], sizeof(prom_trans))) == -1) { prom_printf("prom_mappings: Couldn't get property.\n"); prom_halt(); } n = n / sizeof(struct linux_prom_translation); ents = n; sort(prom_trans, ents, sizeof(struct linux_prom_translation), cmp_ptrans, NULL); /* Now kick out all the non-OBP entries. */ for (i = 0; i < ents; i++) { if (in_obp_range(prom_trans[i].virt)) break; } first = i; for (; i < ents; i++) { if (!in_obp_range(prom_trans[i].virt)) break; } last = i; for (i = 0; i < (last - first); i++) { struct linux_prom_translation *src = &prom_trans[i + first]; struct linux_prom_translation *dest = &prom_trans[i]; *dest = *src; } for (; i < ents; i++) { struct linux_prom_translation *dest = &prom_trans[i]; dest->virt = dest->size = dest->data = 0x0UL; } prom_trans_ents = last - first; if (tlb_type == spitfire) { /* Clear diag TTE bits. */ for (i = 0; i < prom_trans_ents; i++) prom_trans[i].data &= ~0x0003fe0000000000UL; } /* Force execute bit on. */ for (i = 0; i < prom_trans_ents; i++) prom_trans[i].data |= (tlb_type == hypervisor ? _PAGE_EXEC_4V : _PAGE_EXEC_4U); } static void __init hypervisor_tlb_lock(unsigned long vaddr, unsigned long pte, unsigned long mmu) { unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); if (ret != 0) { prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: " "errors with %lx\n", vaddr, 0, pte, mmu, ret); prom_halt(); } } static unsigned long kern_large_tte(unsigned long paddr); static void __init remap_kernel(void) { unsigned long phys_page, tte_vaddr, tte_data; int i, tlb_ent = sparc64_highest_locked_tlbent(); tte_vaddr = (unsigned long) KERNBASE; phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; tte_data = kern_large_tte(phys_page); kern_locked_tte_data = tte_data; /* Now lock us into the TLBs via Hypervisor or OBP. */ if (tlb_type == hypervisor) { for (i = 0; i < num_kernel_image_mappings; i++) { hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); tte_vaddr += 0x400000; tte_data += 0x400000; } } else { for (i = 0; i < num_kernel_image_mappings; i++) { prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); tte_vaddr += 0x400000; tte_data += 0x400000; } sparc64_highest_unlocked_tlb_ent = tlb_ent - i; } if (tlb_type == cheetah_plus) { sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | CTX_CHEETAH_PLUS_NUC); sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; } } static void __init inherit_prom_mappings(void) { /* Now fixup OBP's idea about where we really are mapped. */ printk("Remapping the kernel... "); remap_kernel(); printk("done.\n"); } void prom_world(int enter) { /* * No need to change the address space any more, just flush * the register windows */ __asm__ __volatile__("flushw"); } void __flush_dcache_range(unsigned long start, unsigned long end) { unsigned long va; if (tlb_type == spitfire) { int n = 0; for (va = start; va < end; va += 32) { spitfire_put_dcache_tag(va & 0x3fe0, 0x0); if (++n >= 512) break; } } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { start = __pa(start); end = __pa(end); for (va = start; va < end; va += 32) __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (va), "i" (ASI_DCACHE_INVALIDATE)); } } EXPORT_SYMBOL(__flush_dcache_range); /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); unsigned long tlb_context_cache = CTX_FIRST_VERSION; #define MAX_CTX_NR (1UL << CTX_NR_BITS) #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0}; static void mmu_context_wrap(void) { unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK; unsigned long new_ver, new_ctx, old_ctx; struct mm_struct *mm; int cpu; bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS); /* Reserve kernel context */ set_bit(0, mmu_context_bmap); new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; if (unlikely(new_ver == 0)) new_ver = CTX_FIRST_VERSION; tlb_context_cache = new_ver; /* * Make sure that any new mm that are added into per_cpu_secondary_mm, * are going to go through get_new_mmu_context() path. */ mb(); /* * Updated versions to current on those CPUs that had valid secondary * contexts */ for_each_online_cpu(cpu) { /* * If a new mm is stored after we took this mm from the array, * it will go into get_new_mmu_context() path, because we * already bumped the version in tlb_context_cache. */ mm = per_cpu(per_cpu_secondary_mm, cpu); if (unlikely(!mm || mm == &init_mm)) continue; old_ctx = mm->context.sparc64_ctx_val; if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) { new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver; set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap); mm->context.sparc64_ctx_val = new_ctx; } } } /* Caller does TLB context flushing on local CPU if necessary. * The caller also ensures that CTX_VALID(mm->context) is false. * * We must be careful about boundary cases so that we never * let the user have CTX 0 (nucleus) or we ever use a CTX * version of zero (and thus NO_CONTEXT would not be caught * by version mis-match tests in mmu_context.h). * * Always invoked with interrupts disabled. */ void get_new_mmu_context(struct mm_struct *mm) { unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; spin_lock(&ctx_alloc_lock); retry: /* wrap might have happened, test again if our context became valid */ if (unlikely(CTX_VALID(mm->context))) goto out; orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); if (new_ctx >= (1 << CTX_NR_BITS)) { new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); if (new_ctx >= ctx) { mmu_context_wrap(); goto retry; } } if (mm->context.sparc64_ctx_val) cpumask_clear(mm_cpumask(mm)); mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; out: spin_unlock(&ctx_alloc_lock); } static int numa_enabled = 1; static int numa_debug; static int __init early_numa(char *p) { if (!p) return 0; if (strstr(p, "off")) numa_enabled = 0; if (strstr(p, "debug")) numa_debug = 1; return 0; } early_param("numa", early_numa); #define numadbg(f, a...) \ do { if (numa_debug) \ printk(KERN_INFO f, ## a); \ } while (0) static void __init find_ramdisk(unsigned long phys_base) { #ifdef CONFIG_BLK_DEV_INITRD if (sparc_ramdisk_image || sparc_ramdisk_image64) { unsigned long ramdisk_image; /* Older versions of the bootloader only supported a * 32-bit physical address for the ramdisk image * location, stored at sparc_ramdisk_image. Newer * SILO versions set sparc_ramdisk_image to zero and * provide a full 64-bit physical address at * sparc_ramdisk_image64. */ ramdisk_image = sparc_ramdisk_image; if (!ramdisk_image) ramdisk_image = sparc_ramdisk_image64; /* Another bootloader quirk. The bootloader normalizes * the physical address to KERNBASE, so we have to * factor that back out and add in the lowest valid * physical page address to get the true physical address. */ ramdisk_image -= KERNBASE; ramdisk_image += phys_base; numadbg("Found ramdisk at physical address 0x%lx, size %u\n", ramdisk_image, sparc_ramdisk_size); initrd_start = ramdisk_image; initrd_end = ramdisk_image + sparc_ramdisk_size; memblock_reserve(initrd_start, sparc_ramdisk_size); initrd_start += PAGE_OFFSET; initrd_end += PAGE_OFFSET; } #endif } struct node_mem_mask { unsigned long mask; unsigned long match; }; static struct node_mem_mask node_masks[MAX_NUMNODES]; static int num_node_masks; #ifdef CONFIG_NUMA struct mdesc_mlgroup { u64 node; u64 latency; u64 match; u64 mask; }; static struct mdesc_mlgroup *mlgroups; static int num_mlgroups; int numa_cpu_lookup_table[NR_CPUS]; cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; struct mdesc_mblock { u64 base; u64 size; u64 offset; /* RA-to-PA */ }; static struct mdesc_mblock *mblocks; static int num_mblocks; static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr) { struct mdesc_mblock *m = NULL; int i; for (i = 0; i < num_mblocks; i++) { m = &mblocks[i]; if (addr >= m->base && addr < (m->base + m->size)) { break; } } return m; } static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid) { int prev_nid, new_nid; prev_nid = NUMA_NO_NODE; for ( ; start < end; start += PAGE_SIZE) { for (new_nid = 0; new_nid < num_node_masks; new_nid++) { struct node_mem_mask *p = &node_masks[new_nid]; if ((start & p->mask) == p->match) { if (prev_nid == NUMA_NO_NODE) prev_nid = new_nid; break; } } if (new_nid == num_node_masks) { prev_nid = 0; WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.", start); break; } if (prev_nid != new_nid) break; } *nid = prev_nid; return start > end ? end : start; } static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) { u64 ret_end, pa_start, m_mask, m_match, m_end; struct mdesc_mblock *mblock; int _nid, i; if (tlb_type != hypervisor) return memblock_nid_range_sun4u(start, end, nid); mblock = addr_to_mblock(start); if (!mblock) { WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]", start); _nid = 0; ret_end = end; goto done; } pa_start = start + mblock->offset; m_match = 0; m_mask = 0; for (_nid = 0; _nid < num_node_masks; _nid++) { struct node_mem_mask *const m = &node_masks[_nid]; if ((pa_start & m->mask) == m->match) { m_match = m->match; m_mask = m->mask; break; } } if (num_node_masks == _nid) { /* We could not find NUMA group, so default to 0, but lets * search for latency group, so we could calculate the correct * end address that we return */ _nid = 0; for (i = 0; i < num_mlgroups; i++) { struct mdesc_mlgroup *const m = &mlgroups[i]; if ((pa_start & m->mask) == m->match) { m_match = m->match; m_mask = m->mask; break; } } if (i == num_mlgroups) { WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]", start); ret_end = end; goto done; } } /* * Each latency group has match and mask, and each memory block has an * offset. An address belongs to a latency group if its address matches * the following formula: ((addr + offset) & mask) == match * It is, however, slow to check every single page if it matches a * particular latency group. As optimization we calculate end value by * using bit arithmetics. */ m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset; m_end += pa_start & ~((1ul << fls64(m_mask)) - 1); ret_end = m_end > end ? end : m_end; done: *nid = _nid; return ret_end; } #endif /* This must be invoked after performing all of the necessary * memblock_set_node() calls for 'nid'. We need to be able to get * correct data from get_pfn_range_for_nid(). */ static void __init allocate_node_data(int nid) { struct pglist_data *p; unsigned long start_pfn, end_pfn; #ifdef CONFIG_NUMA NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); if (!NODE_DATA(nid)) { prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); prom_halt(); } NODE_DATA(nid)->node_id = nid; #endif p = NODE_DATA(nid); get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); p->node_start_pfn = start_pfn; p->node_spanned_pages = end_pfn - start_pfn; } static void init_node_masks_nonnuma(void) { #ifdef CONFIG_NUMA int i; #endif numadbg("Initializing tables for non-numa.\n"); node_masks[0].mask = 0; node_masks[0].match = 0; num_node_masks = 1; #ifdef CONFIG_NUMA for (i = 0; i < NR_CPUS; i++) numa_cpu_lookup_table[i] = 0; cpumask_setall(&numa_cpumask_lookup_table[0]); #endif } #ifdef CONFIG_NUMA struct pglist_data *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(numa_cpu_lookup_table); EXPORT_SYMBOL(numa_cpumask_lookup_table); EXPORT_SYMBOL(node_data); static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, u32 cfg_handle) { u64 arc; mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { u64 target = mdesc_arc_target(md, arc); const u64 *val; val = mdesc_get_property(md, target, "cfg-handle", NULL); if (val && *val == cfg_handle) return 0; } return -ENODEV; } static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, u32 cfg_handle) { u64 arc, candidate, best_latency = ~(u64)0; candidate = MDESC_NODE_NULL; mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { u64 target = mdesc_arc_target(md, arc); const char *name = mdesc_node_name(md, target); const u64 *val; if (strcmp(name, "pio-latency-group")) continue; val = mdesc_get_property(md, target, "latency", NULL); if (!val) continue; if (*val < best_latency) { candidate = target; best_latency = *val; } } if (candidate == MDESC_NODE_NULL) return -ENODEV; return scan_pio_for_cfg_handle(md, candidate, cfg_handle); } int of_node_to_nid(struct device_node *dp) { const struct linux_prom64_registers *regs; struct mdesc_handle *md; u32 cfg_handle; int count, nid; u64 grp; /* This is the right thing to do on currently supported * SUN4U NUMA platforms as well, as the PCI controller does * not sit behind any particular memory controller. */ if (!mlgroups) return -1; regs = of_get_property(dp, "reg", NULL); if (!regs) return -1; cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; md = mdesc_grab(); count = 0; nid = NUMA_NO_NODE; mdesc_for_each_node_by_name(md, grp, "group") { if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { nid = count; break; } count++; } mdesc_release(md); return nid; } static void __init add_node_ranges(void) { phys_addr_t start, end; unsigned long prev_max; u64 i; memblock_resized: prev_max = memblock.memory.max; for_each_mem_range(i, &start, &end) { while (start < end) { unsigned long this_end; int nid; this_end = memblock_nid_range(start, end, &nid); numadbg("Setting memblock NUMA node nid[%d] " "start[%llx] end[%lx]\n", nid, start, this_end); memblock_set_node(start, this_end - start, &memblock.memory, nid); if (memblock.memory.max != prev_max) goto memblock_resized; start = this_end; } } } static int __init grab_mlgroups(struct mdesc_handle *md) { unsigned long paddr; int count = 0; u64 node; mdesc_for_each_node_by_name(md, node, "memory-latency-group") count++; if (!count) return -ENOENT; paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup), SMP_CACHE_BYTES); if (!paddr) return -ENOMEM; mlgroups = __va(paddr); num_mlgroups = count; count = 0; mdesc_for_each_node_by_name(md, node, "memory-latency-group") { struct mdesc_mlgroup *m = &mlgroups[count++]; const u64 *val; m->node = node; val = mdesc_get_property(md, node, "latency", NULL); m->latency = *val; val = mdesc_get_property(md, node, "address-match", NULL); m->match = *val; val = mdesc_get_property(md, node, "address-mask", NULL); m->mask = *val; numadbg("MLGROUP[%d]: node[%llx] latency[%llx] " "match[%llx] mask[%llx]\n", count - 1, m->node, m->latency, m->match, m->mask); } return 0; } static int __init grab_mblocks(struct mdesc_handle *md) { unsigned long paddr; int count = 0; u64 node; mdesc_for_each_node_by_name(md, node, "mblock") count++; if (!count) return -ENOENT; paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock), SMP_CACHE_BYTES); if (!paddr) return -ENOMEM; mblocks = __va(paddr); num_mblocks = count; count = 0; mdesc_for_each_node_by_name(md, node, "mblock") { struct mdesc_mblock *m = &mblocks[count++]; const u64 *val; val = mdesc_get_property(md, node, "base", NULL); m->base = *val; val = mdesc_get_property(md, node, "size", NULL); m->size = *val; val = mdesc_get_property(md, node, "address-congruence-offset", NULL); /* The address-congruence-offset property is optional. * Explicity zero it be identifty this. */ if (val) m->offset = *val; else m->offset = 0UL; numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", count - 1, m->base, m->size, m->offset); } return 0; } static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, u64 grp, cpumask_t *mask) { u64 arc; cpumask_clear(mask); mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { u64 target = mdesc_arc_target(md, arc); const char *name = mdesc_node_name(md, target); const u64 *id; if (strcmp(name, "cpu")) continue; id = mdesc_get_property(md, target, "id", NULL); if (*id < nr_cpu_ids) cpumask_set_cpu(*id, mask); } } static struct mdesc_mlgroup * __init find_mlgroup(u64 node) { int i; for (i = 0; i < num_mlgroups; i++) { struct mdesc_mlgroup *m = &mlgroups[i]; if (m->node == node) return m; } return NULL; } int __node_distance(int from, int to) { if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) { pr_warn("Returning default NUMA distance value for %d->%d\n", from, to); return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE; } return numa_latency[from][to]; } EXPORT_SYMBOL(__node_distance); static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) { int i; for (i = 0; i < MAX_NUMNODES; i++) { struct node_mem_mask *n = &node_masks[i]; if ((grp->mask == n->mask) && (grp->match == n->match)) break; } return i; } static void __init find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp, int index) { u64 arc; mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { int tnode; u64 target = mdesc_arc_target(md, arc); struct mdesc_mlgroup *m = find_mlgroup(target); if (!m) continue; tnode = find_best_numa_node_for_mlgroup(m); if (tnode == MAX_NUMNODES) continue; numa_latency[index][tnode] = m->latency; } } static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, int index) { struct mdesc_mlgroup *candidate = NULL; u64 arc, best_latency = ~(u64)0; struct node_mem_mask *n; mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { u64 target = mdesc_arc_target(md, arc); struct mdesc_mlgroup *m = find_mlgroup(target); if (!m) continue; if (m->latency < best_latency) { candidate = m; best_latency = m->latency; } } if (!candidate) return -ENOENT; if (num_node_masks != index) { printk(KERN_ERR "Inconsistent NUMA state, " "index[%d] != num_node_masks[%d]\n", index, num_node_masks); return -EINVAL; } n = &node_masks[num_node_masks++]; n->mask = candidate->mask; n->match = candidate->match; numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n", index, n->mask, n->match, candidate->latency); return 0; } static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, int index) { cpumask_t mask; int cpu; numa_parse_mdesc_group_cpus(md, grp, &mask); for_each_cpu(cpu, &mask) numa_cpu_lookup_table[cpu] = index; cpumask_copy(&numa_cpumask_lookup_table[index], &mask); if (numa_debug) { printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); for_each_cpu(cpu, &mask) printk("%d ", cpu); printk("]\n"); } return numa_attach_mlgroup(md, grp, index); } static int __init numa_parse_mdesc(void) { struct mdesc_handle *md = mdesc_grab(); int i, j, err, count; u64 node; node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); if (node == MDESC_NODE_NULL) { mdesc_release(md); return -ENOENT; } err = grab_mblocks(md); if (err < 0) goto out; err = grab_mlgroups(md); if (err < 0) goto out; count = 0; mdesc_for_each_node_by_name(md, node, "group") { err = numa_parse_mdesc_group(md, node, count); if (err < 0) break; count++; } count = 0; mdesc_for_each_node_by_name(md, node, "group") { find_numa_latencies_for_group(md, node, count); count++; } /* Normalize numa latency matrix according to ACPI SLIT spec. */ for (i = 0; i < MAX_NUMNODES; i++) { u64 self_latency = numa_latency[i][i]; for (j = 0; j < MAX_NUMNODES; j++) { numa_latency[i][j] = (numa_latency[i][j] * LOCAL_DISTANCE) / self_latency; } } add_node_ranges(); for (i = 0; i < num_node_masks; i++) { allocate_node_data(i); node_set_online(i); } err = 0; out: mdesc_release(md); return err; } static int __init numa_parse_jbus(void) { unsigned long cpu, index; /* NUMA node id is encoded in bits 36 and higher, and there is * a 1-to-1 mapping from CPU ID to NUMA node ID. */ index = 0; for_each_present_cpu(cpu) { numa_cpu_lookup_table[cpu] = index; cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu)); node_masks[index].mask = ~((1UL << 36UL) - 1UL); node_masks[index].match = cpu << 36UL; index++; } num_node_masks = index; add_node_ranges(); for (index = 0; index < num_node_masks; index++) { allocate_node_data(index); node_set_online(index); } return 0; } static int __init numa_parse_sun4u(void) { if (tlb_type == cheetah || tlb_type == cheetah_plus) { unsigned long ver; __asm__ ("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32UL) == __JALAPENO_ID || (ver >> 32UL) == __SERRANO_ID) return numa_parse_jbus(); } return -1; } static int __init bootmem_init_numa(void) { int i, j; int err = -1; numadbg("bootmem_init_numa()\n"); /* Some sane defaults for numa latency values */ for (i = 0; i < MAX_NUMNODES; i++) { for (j = 0; j < MAX_NUMNODES; j++) numa_latency[i][j] = (i == j) ? LOCAL_DISTANCE : REMOTE_DISTANCE; } if (numa_enabled) { if (tlb_type == hypervisor) err = numa_parse_mdesc(); else err = numa_parse_sun4u(); } return err; } #else static int bootmem_init_numa(void) { return -1; } #endif static void __init bootmem_init_nonnuma(void) { unsigned long top_of_ram = memblock_end_of_DRAM(); unsigned long total_ram = memblock_phys_mem_size(); numadbg("bootmem_init_nonnuma()\n"); printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_INFO "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); init_node_masks_nonnuma(); memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); allocate_node_data(0); node_set_online(0); } static unsigned long __init bootmem_init(unsigned long phys_base) { unsigned long end_pfn; end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; max_pfn = max_low_pfn = end_pfn; min_low_pfn = (phys_base >> PAGE_SHIFT); if (bootmem_init_numa() < 0) bootmem_init_nonnuma(); /* Dump memblock with node info. */ memblock_dump_all(); /* XXX cpu notifier XXX */ sparse_init(); return end_pfn; } static struct linux_prom64_registers pall[MAX_BANKS] __initdata; static int pall_ents __initdata; static unsigned long max_phys_bits = 40; bool kern_addr_valid(unsigned long addr) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; if ((long)addr < 0L) { unsigned long pa = __pa(addr); if ((pa >> max_phys_bits) != 0UL) return false; return pfn_valid(pa >> PAGE_SHIFT); } if (addr >= (unsigned long) KERNBASE && addr < (unsigned long)&_end) return true; pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) return false; p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) return false; pud = pud_offset(p4d, addr); if (pud_none(*pud)) return false; if (pud_large(*pud)) return pfn_valid(pud_pfn(*pud)); pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return false; if (pmd_large(*pmd)) return pfn_valid(pmd_pfn(*pmd)); pte = pte_offset_kernel(pmd, addr); if (pte_none(*pte)) return false; return pfn_valid(pte_pfn(*pte)); } static unsigned long __ref kernel_map_hugepud(unsigned long vstart, unsigned long vend, pud_t *pud) { const unsigned long mask16gb = (1UL << 34) - 1UL; u64 pte_val = vstart; /* Each PUD is 8GB */ if ((vstart & mask16gb) || (vend - vstart <= mask16gb)) { pte_val ^= kern_linear_pte_xor[2]; pud_val(*pud) = pte_val | _PAGE_PUD_HUGE; return vstart + PUD_SIZE; } pte_val ^= kern_linear_pte_xor[3]; pte_val |= _PAGE_PUD_HUGE; vend = vstart + mask16gb + 1UL; while (vstart < vend) { pud_val(*pud) = pte_val; pte_val += PUD_SIZE; vstart += PUD_SIZE; pud++; } return vstart; } static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend, bool guard) { if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE) return true; return false; } static unsigned long __ref kernel_map_hugepmd(unsigned long vstart, unsigned long vend, pmd_t *pmd) { const unsigned long mask256mb = (1UL << 28) - 1UL; const unsigned long mask2gb = (1UL << 31) - 1UL; u64 pte_val = vstart; /* Each PMD is 8MB */ if ((vstart & mask256mb) || (vend - vstart <= mask256mb)) { pte_val ^= kern_linear_pte_xor[0]; pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE; return vstart + PMD_SIZE; } if ((vstart & mask2gb) || (vend - vstart <= mask2gb)) { pte_val ^= kern_linear_pte_xor[1]; pte_val |= _PAGE_PMD_HUGE; vend = vstart + mask256mb + 1UL; } else { pte_val ^= kern_linear_pte_xor[2]; pte_val |= _PAGE_PMD_HUGE; vend = vstart + mask2gb + 1UL; } while (vstart < vend) { pmd_val(*pmd) = pte_val; pte_val += PMD_SIZE; vstart += PMD_SIZE; pmd++; } return vstart; } static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend, bool guard) { if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE) return true; return false; } static unsigned long __ref kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot, bool use_huge) { unsigned long vstart = PAGE_OFFSET + pstart; unsigned long vend = PAGE_OFFSET + pend; unsigned long alloc_bytes = 0UL; if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", vstart, vend); prom_halt(); } while (vstart < vend) { unsigned long this_end, paddr = __pa(vstart); pgd_t *pgd = pgd_offset_k(vstart); p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; if (pgd_none(*pgd)) { pud_t *new; new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); if (!new) goto err_alloc; alloc_bytes += PAGE_SIZE; pgd_populate(&init_mm, pgd, new); } p4d = p4d_offset(pgd, vstart); if (p4d_none(*p4d)) { pud_t *new; new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); if (!new) goto err_alloc; alloc_bytes += PAGE_SIZE; p4d_populate(&init_mm, p4d, new); } pud = pud_offset(p4d, vstart); if (pud_none(*pud)) { pmd_t *new; if (kernel_can_map_hugepud(vstart, vend, use_huge)) { vstart = kernel_map_hugepud(vstart, vend, pud); continue; } new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); if (!new) goto err_alloc; alloc_bytes += PAGE_SIZE; pud_populate(&init_mm, pud, new); } pmd = pmd_offset(pud, vstart); if (pmd_none(*pmd)) { pte_t *new; if (kernel_can_map_hugepmd(vstart, vend, use_huge)) { vstart = kernel_map_hugepmd(vstart, vend, pmd); continue; } new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); if (!new) goto err_alloc; alloc_bytes += PAGE_SIZE; pmd_populate_kernel(&init_mm, pmd, new); } pte = pte_offset_kernel(pmd, vstart); this_end = (vstart + PMD_SIZE) & PMD_MASK; if (this_end > vend) this_end = vend; while (vstart < this_end) { pte_val(*pte) = (paddr | pgprot_val(prot)); vstart += PAGE_SIZE; paddr += PAGE_SIZE; pte++; } } return alloc_bytes; err_alloc: panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); return -ENOMEM; } static void __init flush_all_kernel_tsbs(void) { int i; for (i = 0; i < KERNEL_TSB_NENTRIES; i++) { struct tsb *ent = &swapper_tsb[i]; ent->tag = (1UL << TSB_TAG_INVALID_BIT); } #ifndef CONFIG_DEBUG_PAGEALLOC for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) { struct tsb *ent = &swapper_4m_tsb[i]; ent->tag = (1UL << TSB_TAG_INVALID_BIT); } #endif } extern unsigned int kvmap_linear_patch[1]; static void __init kernel_physical_mapping_init(void) { unsigned long i, mem_alloced = 0UL; bool use_huge = true; #ifdef CONFIG_DEBUG_PAGEALLOC use_huge = false; #endif for (i = 0; i < pall_ents; i++) { unsigned long phys_start, phys_end; phys_start = pall[i].phys_addr; phys_end = phys_start + pall[i].reg_size; mem_alloced += kernel_map_range(phys_start, phys_end, PAGE_KERNEL, use_huge); } printk("Allocated %ld bytes for kernel page tables.\n", mem_alloced); kvmap_linear_patch[0] = 0x01000000; /* nop */ flushi(&kvmap_linear_patch[0]); flush_all_kernel_tsbs(); __flush_tlb_all(); } #ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); kernel_map_range(phys_start, phys_end, (enable ? PAGE_KERNEL : __pgprot(0)), false); flush_tsb_kernel_range(PAGE_OFFSET + phys_start, PAGE_OFFSET + phys_end); /* we should perform an IPI and flush all tlbs, * but that can deadlock->flush only current cpu. */ __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, PAGE_OFFSET + phys_end); } #endif unsigned long __init find_ecache_flush_span(unsigned long size) { int i; for (i = 0; i < pavail_ents; i++) { if (pavail[i].reg_size >= size) return pavail[i].phys_addr; } return ~0UL; } unsigned long PAGE_OFFSET; EXPORT_SYMBOL(PAGE_OFFSET); unsigned long VMALLOC_END = 0x0000010000000000UL; EXPORT_SYMBOL(VMALLOC_END); unsigned long sparc64_va_hole_top = 0xfffff80000000000UL; unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL; static void __init setup_page_offset(void) { if (tlb_type == cheetah || tlb_type == cheetah_plus) { /* Cheetah/Panther support a full 64-bit virtual * address, so we can use all that our page tables * support. */ sparc64_va_hole_top = 0xfff0000000000000UL; sparc64_va_hole_bottom = 0x0010000000000000UL; max_phys_bits = 42; } else if (tlb_type == hypervisor) { switch (sun4v_chip_type) { case SUN4V_CHIP_NIAGARA1: case SUN4V_CHIP_NIAGARA2: /* T1 and T2 support 48-bit virtual addresses. */ sparc64_va_hole_top = 0xffff800000000000UL; sparc64_va_hole_bottom = 0x0000800000000000UL; max_phys_bits = 39; break; case SUN4V_CHIP_NIAGARA3: /* T3 supports 48-bit virtual addresses. */ sparc64_va_hole_top = 0xffff800000000000UL; sparc64_va_hole_bottom = 0x0000800000000000UL; max_phys_bits = 43; break; case SUN4V_CHIP_NIAGARA4: case SUN4V_CHIP_NIAGARA5: case SUN4V_CHIP_SPARC64X: case SUN4V_CHIP_SPARC_M6: /* T4 and later support 52-bit virtual addresses. */ sparc64_va_hole_top = 0xfff8000000000000UL; sparc64_va_hole_bottom = 0x0008000000000000UL; max_phys_bits = 47; break; case SUN4V_CHIP_SPARC_M7: case SUN4V_CHIP_SPARC_SN: /* M7 and later support 52-bit virtual addresses. */ sparc64_va_hole_top = 0xfff8000000000000UL; sparc64_va_hole_bottom = 0x0008000000000000UL; max_phys_bits = 49; break; case SUN4V_CHIP_SPARC_M8: default: /* M8 and later support 54-bit virtual addresses. * However, restricting M8 and above VA bits to 53 * as 4-level page table cannot support more than * 53 VA bits. */ sparc64_va_hole_top = 0xfff0000000000000UL; sparc64_va_hole_bottom = 0x0010000000000000UL; max_phys_bits = 51; break; } } if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) { prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n", max_phys_bits); prom_halt(); } PAGE_OFFSET = sparc64_va_hole_top; VMALLOC_END = ((sparc64_va_hole_bottom >> 1) + (sparc64_va_hole_bottom >> 2)); pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n", PAGE_OFFSET, max_phys_bits); pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n", VMALLOC_START, VMALLOC_END); pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n", VMEMMAP_BASE, VMEMMAP_BASE << 1); } static void __init tsb_phys_patch(void) { struct tsb_ldquad_phys_patch_entry *pquad; struct tsb_phys_patch_entry *p; pquad = &__tsb_ldquad_phys_patch; while (pquad < &__tsb_ldquad_phys_patch_end) { unsigned long addr = pquad->addr; if (tlb_type == hypervisor) *(unsigned int *) addr = pquad->sun4v_insn; else *(unsigned int *) addr = pquad->sun4u_insn; wmb(); __asm__ __volatile__("flush %0" : /* no outputs */ : "r" (addr)); pquad++; } p = &__tsb_phys_patch; while (p < &__tsb_phys_patch_end) { unsigned long addr = p->addr; *(unsigned int *) addr = p->insn; wmb(); __asm__ __volatile__("flush %0" : /* no outputs */ : "r" (addr)); p++; } } /* Don't mark as init, we give this to the Hypervisor. */ #ifndef CONFIG_DEBUG_PAGEALLOC #define NUM_KTSB_DESCR 2 #else #define NUM_KTSB_DESCR 1 #endif static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; /* The swapper TSBs are loaded with a base sequence of: * * sethi %uhi(SYMBOL), REG1 * sethi %hi(SYMBOL), REG2 * or REG1, %ulo(SYMBOL), REG1 * or REG2, %lo(SYMBOL), REG2 * sllx REG1, 32, REG1 * or REG1, REG2, REG1 * * When we use physical addressing for the TSB accesses, we patch the * first four instructions in the above sequence. */ static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa) { unsigned long high_bits, low_bits; high_bits = (pa >> 32) & 0xffffffff; low_bits = (pa >> 0) & 0xffffffff; while (start < end) { unsigned int *ia = (unsigned int *)(unsigned long)*start; ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10); __asm__ __volatile__("flush %0" : : "r" (ia)); ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10); __asm__ __volatile__("flush %0" : : "r" (ia + 1)); ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff); __asm__ __volatile__("flush %0" : : "r" (ia + 2)); ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff); __asm__ __volatile__("flush %0" : : "r" (ia + 3)); start++; } } static void ktsb_phys_patch(void) { extern unsigned int __swapper_tsb_phys_patch; extern unsigned int __swapper_tsb_phys_patch_end; unsigned long ktsb_pa; ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); patch_one_ktsb_phys(&__swapper_tsb_phys_patch, &__swapper_tsb_phys_patch_end, ktsb_pa); #ifndef CONFIG_DEBUG_PAGEALLOC { extern unsigned int __swapper_4m_tsb_phys_patch; extern unsigned int __swapper_4m_tsb_phys_patch_end; ktsb_pa = (kern_base + ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch, &__swapper_4m_tsb_phys_patch_end, ktsb_pa); } #endif } static void __init sun4v_ktsb_init(void) { unsigned long ktsb_pa; /* First KTSB for PAGE_SIZE mappings. */ ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); switch (PAGE_SIZE) { case 8 * 1024: default: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; break; case 64 * 1024: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; break; case 512 * 1024: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; break; case 4 * 1024 * 1024: ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; break; } ktsb_descr[0].assoc = 1; ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; ktsb_descr[0].ctx_idx = 0; ktsb_descr[0].tsb_base = ktsb_pa; ktsb_descr[0].resv = 0; #ifndef CONFIG_DEBUG_PAGEALLOC /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */ ktsb_pa = (kern_base + ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB | HV_PGSZ_MASK_256MB | HV_PGSZ_MASK_2GB | HV_PGSZ_MASK_16GB) & cpu_pgsz_mask); ktsb_descr[1].assoc = 1; ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; ktsb_descr[1].ctx_idx = 0; ktsb_descr[1].tsb_base = ktsb_pa; ktsb_descr[1].resv = 0; #endif } void sun4v_ktsb_register(void) { unsigned long pa, ret; pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); if (ret != 0) { prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " "errors with %lx\n", pa, ret); prom_halt(); } } static void __init sun4u_linear_pte_xor_finalize(void) { #ifndef CONFIG_DEBUG_PAGEALLOC /* This is where we would add Panther support for * 32MB and 256MB pages. */ #endif } static void __init sun4v_linear_pte_xor_finalize(void) { unsigned long pagecv_flag; /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead * enables MCD error. Do not set bit 9 on M7 processor. */ switch (sun4v_chip_type) { case SUN4V_CHIP_SPARC_M7: case SUN4V_CHIP_SPARC_M8: case SUN4V_CHIP_SPARC_SN: pagecv_flag = 0x00; break; default: pagecv_flag = _PAGE_CV_4V; break; } #ifndef CONFIG_DEBUG_PAGEALLOC if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) { kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ PAGE_OFFSET; kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag | _PAGE_P_4V | _PAGE_W_4V); } else { kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; } if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) { kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^ PAGE_OFFSET; kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag | _PAGE_P_4V | _PAGE_W_4V); } else { kern_linear_pte_xor[2] = kern_linear_pte_xor[1]; } if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) { kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^ PAGE_OFFSET; kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag | _PAGE_P_4V | _PAGE_W_4V); } else { kern_linear_pte_xor[3] = kern_linear_pte_xor[2]; } #endif } /* paging_init() sets up the page tables */ static unsigned long last_valid_pfn; static void sun4u_pgprot_init(void); static void sun4v_pgprot_init(void); #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) /* We need to exclude reserved regions. This exclusion will include * vmlinux and initrd. To be more precise the initrd size could be used to * compute a new lower limit because it is freed later during initialization. */ static void __init reduce_memory(phys_addr_t limit_ram) { limit_ram += memblock_reserved_size(); memblock_enforce_memory_limit(limit_ram); } void __init paging_init(void) { unsigned long end_pfn, shift, phys_base; unsigned long real_end, i; setup_page_offset(); /* These build time checkes make sure that the dcache_dirty_cpu() * folio->flags usage will work. * * When a page gets marked as dcache-dirty, we store the * cpu number starting at bit 32 in the folio->flags. Also, * functions like clear_dcache_dirty_cpu use the cpu mask * in 13-bit signed-immediate instruction fields. */ /* * Page flags must not reach into upper 32 bits that are used * for the cpu number */ BUILD_BUG_ON(NR_PAGEFLAGS > 32); /* * The bit fields placed in the high range must not reach below * the 32 bit boundary. Otherwise we cannot place the cpu field * at the 32 bit boundary. */ BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + ilog2(roundup_pow_of_two(NR_CPUS)) > 32); BUILD_BUG_ON(NR_CPUS > 4096); kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; /* Invalidate both kernel TSBs. */ memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); #ifndef CONFIG_DEBUG_PAGEALLOC memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); #endif /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde * bit on M7 processor. This is a conflicting usage of the same * bit. Enabling TTE.cv on M7 would turn on Memory Corruption * Detection error on all pages and this will lead to problems * later. Kernel does not run with MCD enabled and hence rest * of the required steps to fully configure memory corruption * detection are not taken. We need to ensure TTE.mcde is not * set on M7 processor. Compute the value of cacheability * flag for use later taking this into consideration. */ switch (sun4v_chip_type) { case SUN4V_CHIP_SPARC_M7: case SUN4V_CHIP_SPARC_M8: case SUN4V_CHIP_SPARC_SN: page_cache4v_flag = _PAGE_CP_4V; break; default: page_cache4v_flag = _PAGE_CACHE_4V; break; } if (tlb_type == hypervisor) sun4v_pgprot_init(); else sun4u_pgprot_init(); if (tlb_type == cheetah_plus || tlb_type == hypervisor) { tsb_phys_patch(); ktsb_phys_patch(); } if (tlb_type == hypervisor) sun4v_patch_tlb_handlers(); /* Find available physical memory... * * Read it twice in order to work around a bug in openfirmware. * The call to grab this table itself can cause openfirmware to * allocate memory, which in turn can take away some space from * the list of available memory. Reading it twice makes sure * we really do get the final value. */ read_obp_translations(); read_obp_memory("reg", &pall[0], &pall_ents); read_obp_memory("available", &pavail[0], &pavail_ents); read_obp_memory("available", &pavail[0], &pavail_ents); phys_base = 0xffffffffffffffffUL; for (i = 0; i < pavail_ents; i++) { phys_base = min(phys_base, pavail[i].phys_addr); memblock_add(pavail[i].phys_addr, pavail[i].reg_size); } memblock_reserve(kern_base, kern_size); find_ramdisk(phys_base); if (cmdline_memory_size) reduce_memory(cmdline_memory_size); memblock_allow_resize(); memblock_dump_all(); set_bit(0, mmu_context_bmap); shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); real_end = (unsigned long)_end; num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB); printk("Kernel: Using %d locked TLB entries for main kernel image.\n", num_kernel_image_mappings); /* Set kernel pgd to upper alias so physical page computations * work. */ init_mm.pgd += ((shift) / (sizeof(pgd_t))); memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); inherit_prom_mappings(); /* Ok, we can use our TLB miss and window trap handlers safely. */ setup_tba(); __flush_tlb_all(); prom_build_devicetree(); of_populate_present_mask(); #ifndef CONFIG_SMP of_fill_in_cpu_data(); #endif if (tlb_type == hypervisor) { sun4v_mdesc_init(); mdesc_populate_present_mask(cpu_all_mask); #ifndef CONFIG_SMP mdesc_fill_in_cpu_data(cpu_all_mask); #endif mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask); sun4v_linear_pte_xor_finalize(); sun4v_ktsb_init(); sun4v_ktsb_register(); } else { unsigned long impl, ver; cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K | HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB); __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); impl = ((ver >> 32) & 0xffff); if (impl == PANTHER_IMPL) cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB | HV_PGSZ_MASK_256MB); sun4u_linear_pte_xor_finalize(); } /* Flush the TLBs and the 4M TSB so that the updated linear * pte XOR settings are realized for all mappings. */ __flush_tlb_all(); #ifndef CONFIG_DEBUG_PAGEALLOC memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); #endif __flush_tlb_all(); /* Setup bootmem... */ last_valid_pfn = end_pfn = bootmem_init(phys_base); kernel_physical_mapping_init(); { unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_NORMAL] = end_pfn; free_area_init(max_zone_pfns); } printk("Booting Linux...\n"); } int page_in_phys_avail(unsigned long paddr) { int i; paddr &= PAGE_MASK; for (i = 0; i < pavail_ents; i++) { unsigned long start, end; start = pavail[i].phys_addr; end = start + pavail[i].reg_size; if (paddr >= start && paddr < end) return 1; } if (paddr >= kern_base && paddr < (kern_base + kern_size)) return 1; #ifdef CONFIG_BLK_DEV_INITRD if (paddr >= __pa(initrd_start) && paddr < __pa(PAGE_ALIGN(initrd_end))) return 1; #endif return 0; } static void __init register_page_bootmem_info(void) { #ifdef CONFIG_NUMA int i; for_each_online_node(i) if (NODE_DATA(i)->node_spanned_pages) register_page_bootmem_info_node(NODE_DATA(i)); #endif } void __init mem_init(void) { high_memory = __va(last_valid_pfn << PAGE_SHIFT); memblock_free_all(); /* * Must be done after boot memory is put on freelist, because here we * might set fields in deferred struct pages that have not yet been * initialized, and memblock_free_all() initializes all the reserved * deferred pages for us. */ register_page_bootmem_info(); /* * Set up the zero page, mark it reserved, so that page count * is not manipulated when freeing the page from user ptes. */ mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); if (mem_map_zero == NULL) { prom_printf("paging_init: Cannot alloc zero page.\n"); prom_halt(); } mark_page_reserved(mem_map_zero); if (tlb_type == cheetah || tlb_type == cheetah_plus) cheetah_ecache_flush_init(); } void free_initmem(void) { unsigned long addr, initend; int do_free = 1; /* If the physical memory maps were trimmed by kernel command * line options, don't even try freeing this initmem stuff up. * The kernel image could have been in the trimmed out region * and if so the freeing below will free invalid page structs. */ if (cmdline_memory_size) do_free = 0; /* * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. */ addr = PAGE_ALIGN((unsigned long)(__init_begin)); initend = (unsigned long)(__init_end) & PAGE_MASK; for (; addr < initend; addr += PAGE_SIZE) { unsigned long page; page = (addr + ((unsigned long) __va(kern_base)) - ((unsigned long) KERNBASE)); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); if (do_free) free_reserved_page(virt_to_page(page)); } } pgprot_t PAGE_KERNEL __read_mostly; EXPORT_SYMBOL(PAGE_KERNEL); pgprot_t PAGE_KERNEL_LOCKED __read_mostly; pgprot_t PAGE_COPY __read_mostly; pgprot_t PAGE_SHARED __read_mostly; EXPORT_SYMBOL(PAGE_SHARED); unsigned long pg_iobits __read_mostly; unsigned long _PAGE_IE __read_mostly; EXPORT_SYMBOL(_PAGE_IE); unsigned long _PAGE_E __read_mostly; EXPORT_SYMBOL(_PAGE_E); unsigned long _PAGE_CACHE __read_mostly; EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, int node, struct vmem_altmap *altmap) { unsigned long pte_base; pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_W_4U); if (tlb_type == hypervisor) pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V); pte_base |= _PAGE_PMD_HUGE; vstart = vstart & PMD_MASK; vend = ALIGN(vend, PMD_SIZE); for (; vstart < vend; vstart += PMD_SIZE) { pgd_t *pgd = vmemmap_pgd_populate(vstart, node); unsigned long pte; p4d_t *p4d; pud_t *pud; pmd_t *pmd; if (!pgd) return -ENOMEM; p4d = vmemmap_p4d_populate(pgd, vstart, node); if (!p4d) return -ENOMEM; pud = vmemmap_pud_populate(p4d, vstart, node); if (!pud) return -ENOMEM; pmd = pmd_offset(pud, vstart); pte = pmd_val(*pmd); if (!(pte & _PAGE_VALID)) { void *block = vmemmap_alloc_block(PMD_SIZE, node); if (!block) return -ENOMEM; pmd_val(*pmd) = pte_base | __pa(block); } } return 0; } void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */ static pgprot_t protection_map[16] __ro_after_init; static void prot_init_common(unsigned long page_none, unsigned long page_shared, unsigned long page_copy, unsigned long page_readonly, unsigned long page_exec_bit) { PAGE_COPY = __pgprot(page_copy); PAGE_SHARED = __pgprot(page_shared); protection_map[0x0] = __pgprot(page_none); protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); protection_map[0x4] = __pgprot(page_readonly); protection_map[0x5] = __pgprot(page_readonly); protection_map[0x6] = __pgprot(page_copy); protection_map[0x7] = __pgprot(page_copy); protection_map[0x8] = __pgprot(page_none); protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); protection_map[0xc] = __pgprot(page_readonly); protection_map[0xd] = __pgprot(page_readonly); protection_map[0xe] = __pgprot(page_shared); protection_map[0xf] = __pgprot(page_shared); } static void __init sun4u_pgprot_init(void) { unsigned long page_none, page_shared, page_copy, page_readonly; unsigned long page_exec_bit; int i; PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | _PAGE_CACHE_4U | _PAGE_P_4U | __ACCESS_BITS_4U | __DIRTY_BITS_4U | _PAGE_EXEC_4U); PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | _PAGE_CACHE_4U | _PAGE_P_4U | __ACCESS_BITS_4U | __DIRTY_BITS_4U | _PAGE_EXEC_4U | _PAGE_L_4U); _PAGE_IE = _PAGE_IE_4U; _PAGE_E = _PAGE_E_4U; _PAGE_CACHE = _PAGE_CACHE_4U; pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | __ACCESS_BITS_4U | _PAGE_E_4U); #ifdef CONFIG_DEBUG_PAGEALLOC kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; #else kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ PAGE_OFFSET; #endif kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_W_4U); for (i = 1; i < 4; i++) kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | __ACCESS_BITS_4U | _PAGE_EXEC_4U); page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | __ACCESS_BITS_4U | _PAGE_EXEC_4U); page_exec_bit = _PAGE_EXEC_4U; prot_init_common(page_none, page_shared, page_copy, page_readonly, page_exec_bit); } static void __init sun4v_pgprot_init(void) { unsigned long page_none, page_shared, page_copy, page_readonly; unsigned long page_exec_bit; int i; PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | page_cache4v_flag | _PAGE_P_4V | __ACCESS_BITS_4V | __DIRTY_BITS_4V | _PAGE_EXEC_4V); PAGE_KERNEL_LOCKED = PAGE_KERNEL; _PAGE_IE = _PAGE_IE_4V; _PAGE_E = _PAGE_E_4V; _PAGE_CACHE = page_cache4v_flag; #ifdef CONFIG_DEBUG_PAGEALLOC kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET; #else kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ PAGE_OFFSET; #endif kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V); for (i = 1; i < 4; i++) kern_linear_pte_xor[i] = kern_linear_pte_xor[0]; pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | __ACCESS_BITS_4V | _PAGE_E_4V); _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag; page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | __ACCESS_BITS_4V | _PAGE_EXEC_4V); page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag | __ACCESS_BITS_4V | _PAGE_EXEC_4V); page_exec_bit = _PAGE_EXEC_4V; prot_init_common(page_none, page_shared, page_copy, page_readonly, page_exec_bit); } unsigned long pte_sz_bits(unsigned long sz) { if (tlb_type == hypervisor) { switch (sz) { case 8 * 1024: default: return _PAGE_SZ8K_4V; case 64 * 1024: return _PAGE_SZ64K_4V; case 512 * 1024: return _PAGE_SZ512K_4V; case 4 * 1024 * 1024: return _PAGE_SZ4MB_4V; } } else { switch (sz) { case 8 * 1024: default: return _PAGE_SZ8K_4U; case 64 * 1024: return _PAGE_SZ64K_4U; case 512 * 1024: return _PAGE_SZ512K_4U; case 4 * 1024 * 1024: return _PAGE_SZ4MB_4U; } } } pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) { pte_t pte; pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); pte_val(pte) |= (((unsigned long)space) << 32); pte_val(pte) |= pte_sz_bits(page_size); return pte; } static unsigned long kern_large_tte(unsigned long paddr) { unsigned long val; val = (_PAGE_VALID | _PAGE_SZ4MB_4U | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); if (tlb_type == hypervisor) val = (_PAGE_VALID | _PAGE_SZ4MB_4V | page_cache4v_flag | _PAGE_P_4V | _PAGE_EXEC_4V | _PAGE_W_4V); return val | paddr; } /* If not locked, zap it. */ void __flush_tlb_all(void) { unsigned long pstate; int i; __asm__ __volatile__("flushw\n\t" "rdpr %%pstate, %0\n\t" "wrpr %0, %1, %%pstate" : "=r" (pstate) : "i" (PSTATE_IE)); if (tlb_type == hypervisor) { sun4v_mmu_demap_all(); } else if (tlb_type == spitfire) { for (i = 0; i < 64; i++) { /* Spitfire Errata #32 workaround */ /* NOTE: Always runs on spitfire, so no * cheetah+ page size encodings. */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); spitfire_put_dtlb_data(i, 0x0UL); } /* Spitfire Errata #32 workaround */ /* NOTE: Always runs on spitfire, so no * cheetah+ page size encodings. */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); spitfire_put_itlb_data(i, 0x0UL); } } } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { cheetah_flush_dtlb_all(); cheetah_flush_itlb_all(); } __asm__ __volatile__("wrpr %0, 0, %%pstate" : : "r" (pstate)); } pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); pte_t *pte = NULL; if (page) pte = (pte_t *) page_address(page); return pte; } pgtable_t pte_alloc_one(struct mm_struct *mm) { struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0); if (!ptdesc) return NULL; if (!pagetable_pte_ctor(ptdesc)) { pagetable_free(ptdesc); return NULL; } return ptdesc_address(ptdesc); } void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { free_page((unsigned long)pte); } static void __pte_free(pgtable_t pte) { struct ptdesc *ptdesc = virt_to_ptdesc(pte); pagetable_pte_dtor(ptdesc); pagetable_free(ptdesc); } void pte_free(struct mm_struct *mm, pgtable_t pte) { __pte_free(pte); } void pgtable_free(void *table, bool is_page) { if (is_page) __pte_free(table); else kmem_cache_free(pgtable_cache, table); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void pte_free_now(struct rcu_head *head) { struct page *page; page = container_of(head, struct page, rcu_head); __pte_free((pgtable_t)page_address(page)); } void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable) { struct page *page; page = virt_to_page(pgtable); call_rcu(&page->rcu_head, pte_free_now); } void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) { unsigned long pte, flags; struct mm_struct *mm; pmd_t entry = *pmd; if (!pmd_large(entry) || !pmd_young(entry)) return; pte = pmd_val(entry); /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */ if (!(pte & _PAGE_VALID)) return; /* We are fabricating 8MB pages using 4MB real hw pages. */ pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); mm = vma->vm_mm; spin_lock_irqsave(&mm->context.lock, flags); if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, addr, pte); spin_unlock_irqrestore(&mm->context.lock, flags); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) static void context_reload(void *__data) { struct mm_struct *mm = __data; if (mm == current->mm) load_secondary_context(mm); } void hugetlb_setup(struct pt_regs *regs) { struct mm_struct *mm = current->mm; struct tsb_config *tp; if (faulthandler_disabled() || !mm) { const struct exception_table_entry *entry; entry = search_exception_tables(regs->tpc); if (entry) { regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; return; } pr_alert("Unexpected HugeTLB setup in atomic context.\n"); die_if_kernel("HugeTSB in atomic", regs); } tp = &mm->context.tsb_block[MM_TSB_HUGE]; if (likely(tp->tsb == NULL)) tsb_grow(mm, MM_TSB_HUGE, 0); tsb_context_switch(mm); smp_tsb_sync(mm); /* On UltraSPARC-III+ and later, configure the second half of * the Data-TLB for huge pages. */ if (tlb_type == cheetah_plus) { bool need_context_reload = false; unsigned long ctx; spin_lock_irq(&ctx_alloc_lock); ctx = mm->context.sparc64_ctx_val; ctx &= ~CTX_PGSZ_MASK; ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT; if (ctx != mm->context.sparc64_ctx_val) { /* When changing the page size fields, we * must perform a context flush so that no * stale entries match. This flush must * occur with the original context register * settings. */ do_flush_tlb_mm(mm); /* Reload the context register of all processors * also executing in this address space. */ mm->context.sparc64_ctx_val = ctx; need_context_reload = true; } spin_unlock_irq(&ctx_alloc_lock); if (need_context_reload) on_each_cpu(context_reload, mm, 0); } } #endif static struct resource code_resource = { .name = "Kernel code", .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM }; static struct resource data_resource = { .name = "Kernel data", .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM }; static struct resource bss_resource = { .name = "Kernel bss", .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM }; static inline resource_size_t compute_kern_paddr(void *addr) { return (resource_size_t) (addr - KERNBASE + kern_base); } static void __init kernel_lds_init(void) { code_resource.start = compute_kern_paddr(_text); code_resource.end = compute_kern_paddr(_etext - 1); data_resource.start = compute_kern_paddr(_etext); data_resource.end = compute_kern_paddr(_edata - 1); bss_resource.start = compute_kern_paddr(__bss_start); bss_resource.end = compute_kern_paddr(_end - 1); } static int __init report_memory(void) { int i; struct resource *res; kernel_lds_init(); for (i = 0; i < pavail_ents; i++) { res = kzalloc(sizeof(struct resource), GFP_KERNEL); if (!res) { pr_warn("Failed to allocate source.\n"); break; } res->name = "System RAM"; res->start = pavail[i].phys_addr; res->end = pavail[i].phys_addr + pavail[i].reg_size - 1; res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; if (insert_resource(&iomem_resource, res) < 0) { pr_warn("Resource insertion failed.\n"); break; } insert_resource(res, &code_resource); insert_resource(res, &data_resource); insert_resource(res, &bss_resource); } return 0; } arch_initcall(report_memory); #ifdef CONFIG_SMP #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range #else #define do_flush_tlb_kernel_range __flush_tlb_kernel_range #endif void flush_tlb_kernel_range(unsigned long start, unsigned long end) { if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) { if (start < LOW_OBP_ADDRESS) { flush_tsb_kernel_range(start, LOW_OBP_ADDRESS); do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS); } if (end > HI_OBP_ADDRESS) { flush_tsb_kernel_range(HI_OBP_ADDRESS, end); do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end); } } else { flush_tsb_kernel_range(start, end); do_flush_tlb_kernel_range(start, end); } } void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { char *vfrom, *vto; vfrom = kmap_atomic(from); vto = kmap_atomic(to); copy_user_page(vto, vfrom, vaddr, to); kunmap_atomic(vto); kunmap_atomic(vfrom); /* If this page has ADI enabled, copy over any ADI tags * as well */ if (vma->vm_flags & VM_SPARC_ADI) { unsigned long pfrom, pto, i, adi_tag; pfrom = page_to_phys(from); pto = page_to_phys(to); for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) { asm volatile("ldxa [%1] %2, %0\n\t" : "=r" (adi_tag) : "r" (i), "i" (ASI_MCD_REAL)); asm volatile("stxa %0, [%1] %2\n\t" : : "r" (adi_tag), "r" (pto), "i" (ASI_MCD_REAL)); pto += adi_blksize(); } asm volatile("membar #Sync\n\t"); } } EXPORT_SYMBOL(copy_user_highpage); void copy_highpage(struct page *to, struct page *from) { char *vfrom, *vto; vfrom = kmap_atomic(from); vto = kmap_atomic(to); copy_page(vto, vfrom); kunmap_atomic(vto); kunmap_atomic(vfrom); /* If this platform is ADI enabled, copy any ADI tags * as well */ if (adi_capable()) { unsigned long pfrom, pto, i, adi_tag; pfrom = page_to_phys(from); pto = page_to_phys(to); for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) { asm volatile("ldxa [%1] %2, %0\n\t" : "=r" (adi_tag) : "r" (i), "i" (ASI_MCD_REAL)); asm volatile("stxa %0, [%1] %2\n\t" : : "r" (adi_tag), "r" (pto), "i" (ASI_MCD_REAL)); pto += adi_blksize(); } asm volatile("membar #Sync\n\t"); } } EXPORT_SYMBOL(copy_highpage); pgprot_t vm_get_page_prot(unsigned long vm_flags) { unsigned long prot = pgprot_val(protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); if (vm_flags & VM_SPARC_ADI) prot |= _PAGE_MCD_4V; return __pgprot(prot); } EXPORT_SYMBOL(vm_get_page_prot);
linux-master
arch/sparc/mm/init_64.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/sparc/mm/leon_m.c * * Copyright (C) 2004 Konrad Eisele ([email protected], [email protected]) Gaisler Research * Copyright (C) 2009 Daniel Hellstrom ([email protected]) Aeroflex Gaisler AB * Copyright (C) 2009 Konrad Eisele ([email protected]) Aeroflex Gaisler AB * * do srmmu probe in software * */ #include <linux/kernel.h> #include <linux/mm.h> #include <asm/asi.h> #include <asm/leon.h> #include <asm/tlbflush.h> #include "mm_32.h" int leon_flush_during_switch = 1; static int srmmu_swprobe_trace; static inline unsigned long leon_get_ctable_ptr(void) { unsigned int retval; __asm__ __volatile__("lda [%1] %2, %0\n\t" : "=r" (retval) : "r" (SRMMU_CTXTBL_PTR), "i" (ASI_LEON_MMUREGS)); return (retval & SRMMU_CTX_PMASK) << 4; } unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr) { unsigned int ctxtbl; unsigned int pgd, pmd, ped; unsigned int ptr; unsigned int lvl, pte, paddrbase; unsigned int ctx; unsigned int paddr_calc; paddrbase = 0; if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: trace on\n"); ctxtbl = leon_get_ctable_ptr(); if (!(ctxtbl)) { if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: leon_get_ctable_ptr returned 0=>0\n"); return 0; } if (!_pfn_valid(PFN(ctxtbl))) { if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n", PFN(ctxtbl)); return 0; } ctx = srmmu_get_context(); if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: --- ctx (%x) ---\n", ctx); pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4)); if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: pgd is entry level 3\n"); lvl = 3; pte = pgd; paddrbase = pgd & _SRMMU_PTE_PMASK_LEON; goto ready; } if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: pgd is invalid => 0\n"); return 0; } if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: --- pgd (%x) ---\n", pgd); ptr = (pgd & SRMMU_PTD_PMASK) << 4; ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4); if (!_pfn_valid(PFN(ptr))) return 0; pmd = LEON_BYPASS_LOAD_PA(ptr); if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: pmd is entry level 2\n"); lvl = 2; pte = pmd; paddrbase = pmd & _SRMMU_PTE_PMASK_LEON; goto ready; } if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: pmd is invalid => 0\n"); return 0; } if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd); ptr = (pmd & SRMMU_PTD_PMASK) << 4; ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4); if (!_pfn_valid(PFN(ptr))) { if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n", PFN(ptr)); return 0; } ped = LEON_BYPASS_LOAD_PA(ptr); if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: ped is entry level 1\n"); lvl = 1; pte = ped; paddrbase = ped & _SRMMU_PTE_PMASK_LEON; goto ready; } if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) { if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: ped is invalid => 0\n"); return 0; } if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: --- ped (%x) ---\n", ped); ptr = (ped & SRMMU_PTD_PMASK) << 4; ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4); if (!_pfn_valid(PFN(ptr))) return 0; ptr = LEON_BYPASS_LOAD_PA(ptr); if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) { if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: ptr is entry level 0\n"); lvl = 0; pte = ptr; paddrbase = ptr & _SRMMU_PTE_PMASK_LEON; goto ready; } if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: ptr is invalid => 0\n"); return 0; ready: switch (lvl) { case 0: paddr_calc = (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4); break; case 1: paddr_calc = (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4); break; case 2: paddr_calc = (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4); break; default: case 3: paddr_calc = vaddr; break; } if (srmmu_swprobe_trace) printk(KERN_INFO "swprobe: padde %x\n", paddr_calc); if (paddr) *paddr = paddr_calc; return pte; } void leon_flush_icache_all(void) { __asm__ __volatile__(" flush "); /*iflush*/ } void leon_flush_dcache_all(void) { __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : "i"(ASI_LEON_DFLUSH) : "memory"); } void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page) { if (vma->vm_flags & VM_EXEC) leon_flush_icache_all(); leon_flush_dcache_all(); } void leon_flush_cache_all(void) { __asm__ __volatile__(" flush "); /*iflush*/ __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : "i"(ASI_LEON_DFLUSH) : "memory"); } void leon_flush_tlb_all(void) { leon_flush_cache_all(); __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400), "i"(ASI_LEON_MMUFLUSH) : "memory"); } /* get all cache regs */ void leon3_getCacheRegs(struct leon3_cacheregs *regs) { unsigned long ccr, iccr, dccr; if (!regs) return; /* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */ __asm__ __volatile__("lda [%%g0] %3, %0\n\t" "mov 0x08, %%g1\n\t" "lda [%%g1] %3, %1\n\t" "mov 0x0c, %%g1\n\t" "lda [%%g1] %3, %2\n\t" : "=r"(ccr), "=r"(iccr), "=r"(dccr) /* output */ : "i"(ASI_LEON_CACHEREGS) /* input */ : "g1" /* clobber list */ ); regs->ccr = ccr; regs->iccr = iccr; regs->dccr = dccr; } /* Due to virtual cache we need to check cache configuration if * it is possible to skip flushing in some cases. * * Leon2 and Leon3 differ in their way of telling cache information * */ int __init leon_flush_needed(void) { int flush_needed = -1; unsigned int ssize, sets; char *setStr[4] = { "direct mapped", "2-way associative", "3-way associative", "4-way associative" }; /* leon 3 */ struct leon3_cacheregs cregs; leon3_getCacheRegs(&cregs); sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24; /* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */ ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20); printk(KERN_INFO "CACHE: %s cache, set size %dk\n", sets > 3 ? "unknown" : setStr[sets], ssize); if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) { /* Set Size <= Page size ==> flush on every context switch not needed. */ flush_needed = 0; printk(KERN_INFO "CACHE: not flushing on every context switch\n"); } return flush_needed; } void leon_switch_mm(void) { flush_tlb_mm((void *)0); if (leon_flush_during_switch) leon_flush_cache_all(); } static void leon_flush_cache_mm(struct mm_struct *mm) { leon_flush_cache_all(); } static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page) { leon_flush_pcache_all(vma, page); } static void leon_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { leon_flush_cache_all(); } static void leon_flush_tlb_mm(struct mm_struct *mm) { leon_flush_tlb_all(); } static void leon_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { leon_flush_tlb_all(); } static void leon_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { leon_flush_tlb_all(); } static void leon_flush_page_to_ram(unsigned long page) { leon_flush_cache_all(); } static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page) { leon_flush_cache_all(); } static void leon_flush_page_for_dma(unsigned long page) { leon_flush_dcache_all(); } void __init poke_leonsparc(void) { } static const struct sparc32_cachetlb_ops leon_ops = { .cache_all = leon_flush_cache_all, .cache_mm = leon_flush_cache_mm, .cache_page = leon_flush_cache_page, .cache_range = leon_flush_cache_range, .tlb_all = leon_flush_tlb_all, .tlb_mm = leon_flush_tlb_mm, .tlb_page = leon_flush_tlb_page, .tlb_range = leon_flush_tlb_range, .page_to_ram = leon_flush_page_to_ram, .sig_insns = leon_flush_sig_insns, .page_for_dma = leon_flush_page_for_dma, }; void __init init_leon(void) { srmmu_name = "LEON"; sparc32_cachetlb_ops = &leon_ops; poke_srmmu = poke_leonsparc; leon_flush_during_switch = leon_flush_needed(); }
linux-master
arch/sparc/mm/leon_mm.c
// SPDX-License-Identifier: GPL-2.0 /* * srmmu.c: SRMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1995,2002 Pete Zaitcev ([email protected]) * Copyright (C) 1996 Eddie C. Dost ([email protected]) * Copyright (C) 1997,1998 Jakub Jelinek ([email protected]) * Copyright (C) 1999,2000 Anton Blanchard ([email protected]) */ #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/memblock.h> #include <linux/pagemap.h> #include <linux/vmalloc.h> #include <linux/kdebug.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/log2.h> #include <linux/gfp.h> #include <linux/fs.h> #include <linux/mm.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/io-unit.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/bitext.h> #include <asm/vaddrs.h> #include <asm/cache.h> #include <asm/traps.h> #include <asm/oplib.h> #include <asm/mbus.h> #include <asm/page.h> #include <asm/asi.h> #include <asm/smp.h> #include <asm/io.h> /* Now the cpu specific definitions. */ #include <asm/turbosparc.h> #include <asm/tsunami.h> #include <asm/viking.h> #include <asm/swift.h> #include <asm/leon.h> #include <asm/mxcc.h> #include <asm/ross.h> #include "mm_32.h" enum mbus_module srmmu_modtype; static unsigned int hwbug_bitmask; int vac_cache_size; EXPORT_SYMBOL(vac_cache_size); int vac_line_size; extern struct resource sparc_iomap; extern unsigned long last_valid_pfn; static pgd_t *srmmu_swapper_pg_dir; const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops; EXPORT_SYMBOL(sparc32_cachetlb_ops); #ifdef CONFIG_SMP const struct sparc32_cachetlb_ops *local_ops; #define FLUSH_BEGIN(mm) #define FLUSH_END #else #define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) { #define FLUSH_END } #endif int flush_page_for_dma_global = 1; char *srmmu_name; ctxd_t *srmmu_ctx_table_phys; static ctxd_t *srmmu_context_table; int viking_mxcc_present; static DEFINE_SPINLOCK(srmmu_context_spinlock); static int is_hypersparc; static int srmmu_cache_pagetables; /* these will be initialized in srmmu_nocache_calcsize() */ static unsigned long srmmu_nocache_size; static unsigned long srmmu_nocache_end; /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) /* The context table is a nocache user with the biggest alignment needs. */ #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS) void *srmmu_nocache_pool; static struct bit_map srmmu_nocache_map; static inline int srmmu_pmd_none(pmd_t pmd) { return !(pmd_val(pmd) & 0xFFFFFFF); } /* XXX should we hyper_flush_whole_icache here - Anton */ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) { pte_t pte; pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4))); set_pte((pte_t *)ctxp, pte); } /* * Locations of MSI Registers. */ #define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */ /* * Useful bits in the MSI Registers. */ #define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ static void msi_set_sync(void) { __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" "andn %%g3, %2, %%g3\n\t" "sta %%g3, [%0] %1\n\t" : : "r" (MSI_MBUS_ARBEN), "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3"); } void pmd_set(pmd_t *pmdp, pte_t *ptep) { unsigned long ptp = __nocache_pa(ptep) >> 4; set_pte((pte_t *)&pmd_val(*pmdp), __pte(SRMMU_ET_PTD | ptp)); } /* * size: bytes to allocate in the nocache area. * align: bytes, number to align at. * Returns the virtual address of the allocated area. */ static void *__srmmu_get_nocache(int size, int align) { int offset, minsz = 1 << SRMMU_NOCACHE_BITMAP_SHIFT; unsigned long addr; if (size < minsz) { printk(KERN_ERR "Size 0x%x too small for nocache request\n", size); size = minsz; } if (size & (minsz - 1)) { printk(KERN_ERR "Size 0x%x unaligned in nocache request\n", size); size += minsz - 1; } BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); offset = bit_map_string_get(&srmmu_nocache_map, size >> SRMMU_NOCACHE_BITMAP_SHIFT, align >> SRMMU_NOCACHE_BITMAP_SHIFT); if (offset == -1) { printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n", size, (int) srmmu_nocache_size, srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); return NULL; } addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT); return (void *)addr; } void *srmmu_get_nocache(int size, int align) { void *tmp; tmp = __srmmu_get_nocache(size, align); if (tmp) memset(tmp, 0, size); return tmp; } void srmmu_free_nocache(void *addr, int size) { unsigned long vaddr; int offset; vaddr = (unsigned long)addr; if (vaddr < SRMMU_NOCACHE_VADDR) { printk("Vaddr %lx is smaller than nocache base 0x%lx\n", vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); BUG(); } if (vaddr + size > srmmu_nocache_end) { printk("Vaddr %lx is bigger than nocache end 0x%lx\n", vaddr, srmmu_nocache_end); BUG(); } if (!is_power_of_2(size)) { printk("Size 0x%x is not a power of 2\n", size); BUG(); } if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { printk("Size 0x%x is too small\n", size); BUG(); } if (vaddr & (size - 1)) { printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); BUG(); } offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; size = size >> SRMMU_NOCACHE_BITMAP_SHIFT; bit_map_clear(&srmmu_nocache_map, offset, size); } static void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end); /* Return how much physical memory we have. */ static unsigned long __init probe_memory(void) { unsigned long total = 0; int i; for (i = 0; sp_banks[i].num_bytes; i++) total += sp_banks[i].num_bytes; return total; } /* * Reserve nocache dynamically proportionally to the amount of * system RAM. -- Tomas Szepe <[email protected]>, June 2002 */ static void __init srmmu_nocache_calcsize(void) { unsigned long sysmemavail = probe_memory() / 1024; int srmmu_nocache_npages; srmmu_nocache_npages = sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256; /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */ // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256; if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES) srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES; /* anything above 1280 blows up */ if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES) srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES; srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE; srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; } static void __init srmmu_nocache_init(void) { void *srmmu_nocache_bitmap; unsigned int bitmap_bits; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long paddr, vaddr; unsigned long pteval; bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX); if (!srmmu_nocache_pool) panic("%s: Failed to allocate %lu bytes align=0x%x\n", __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); srmmu_nocache_bitmap = memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long), SMP_CACHE_BYTES); if (!srmmu_nocache_bitmap) panic("%s: Failed to allocate %zu bytes\n", __func__, BITS_TO_LONGS(bitmap_bits) * sizeof(long)); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); init_mm.pgd = srmmu_swapper_pg_dir; srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end); paddr = __pa((unsigned long)srmmu_nocache_pool); vaddr = SRMMU_NOCACHE_VADDR; while (vaddr < srmmu_nocache_end) { pgd = pgd_offset_k(vaddr); p4d = p4d_offset(pgd, vaddr); pud = pud_offset(p4d, vaddr); pmd = pmd_offset(__nocache_fix(pud), vaddr); pte = pte_offset_kernel(__nocache_fix(pmd), vaddr); pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); if (srmmu_cache_pagetables) pteval |= SRMMU_CACHE; set_pte(__nocache_fix(pte), __pte(pteval)); vaddr += PAGE_SIZE; paddr += PAGE_SIZE; } flush_cache_all(); flush_tlb_all(); } pgd_t *get_pgd_fast(void) { pgd_t *pgd = NULL; pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); if (pgd) { pgd_t *init = pgd_offset_k(0); memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); } return pgd; } /* * Hardware needs alignment to 256 only, but we align to whole page size * to reduce fragmentation problems due to the buddy principle. * XXX Provide actual fragmentation statistics in /proc. * * Alignments up to the page size are the same for physical and virtual * addresses of the nocache area. */ pgtable_t pte_alloc_one(struct mm_struct *mm) { pte_t *ptep; struct page *page; if (!(ptep = pte_alloc_one_kernel(mm))) return NULL; page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT); spin_lock(&mm->page_table_lock); if (page_ref_inc_return(page) == 2 && !pagetable_pte_ctor(page_ptdesc(page))) { page_ref_dec(page); ptep = NULL; } spin_unlock(&mm->page_table_lock); return ptep; } void pte_free(struct mm_struct *mm, pgtable_t ptep) { struct page *page; page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT); spin_lock(&mm->page_table_lock); if (page_ref_dec_return(page) == 1) pagetable_pte_dtor(page_ptdesc(page)); spin_unlock(&mm->page_table_lock); srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE); } /* context handling - a dynamically sized pool is used */ #define NO_CONTEXT -1 struct ctx_list { struct ctx_list *next; struct ctx_list *prev; unsigned int ctx_number; struct mm_struct *ctx_mm; }; static struct ctx_list *ctx_list_pool; static struct ctx_list ctx_free; static struct ctx_list ctx_used; /* At boot time we determine the number of contexts */ static int num_contexts; static inline void remove_from_ctx_list(struct ctx_list *entry) { entry->next->prev = entry->prev; entry->prev->next = entry->next; } static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) { entry->next = head; (entry->prev = head->prev)->next = entry; head->prev = entry; } #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) { struct ctx_list *ctxp; ctxp = ctx_free.next; if (ctxp != &ctx_free) { remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); mm->context = ctxp->ctx_number; ctxp->ctx_mm = mm; return; } ctxp = ctx_used.next; if (ctxp->ctx_mm == old_mm) ctxp = ctxp->next; if (ctxp == &ctx_used) panic("out of mmu contexts"); flush_cache_mm(ctxp->ctx_mm); flush_tlb_mm(ctxp->ctx_mm); remove_from_ctx_list(ctxp); add_to_used_ctxlist(ctxp); ctxp->ctx_mm->context = NO_CONTEXT; ctxp->ctx_mm = mm; mm->context = ctxp->ctx_number; } static inline void free_context(int context) { struct ctx_list *ctx_old; ctx_old = ctx_list_pool + context; remove_from_ctx_list(ctx_old); add_to_free_ctxlist(ctx_old); } static void __init sparc_context_init(int numctx) { int ctx; unsigned long size; size = numctx * sizeof(struct ctx_list); ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES); if (!ctx_list_pool) panic("%s: Failed to allocate %lu bytes\n", __func__, size); for (ctx = 0; ctx < numctx; ctx++) { struct ctx_list *clist; clist = (ctx_list_pool + ctx); clist->ctx_number = ctx; clist->ctx_mm = NULL; } ctx_free.next = ctx_free.prev = &ctx_free; ctx_used.next = ctx_used.prev = &ctx_used; for (ctx = 0; ctx < numctx; ctx++) add_to_free_ctxlist(ctx_list_pool + ctx); } void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) { unsigned long flags; if (mm->context == NO_CONTEXT) { spin_lock_irqsave(&srmmu_context_spinlock, flags); alloc_context(old_mm, mm); spin_unlock_irqrestore(&srmmu_context_spinlock, flags); srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); } if (sparc_cpu_model == sparc_leon) leon_switch_mm(); if (is_hypersparc) hyper_flush_whole_icache(); srmmu_set_context(mm->context); } /* Low level IO area allocation on the SRMMU. */ static inline void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type) { pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; unsigned long tmp; physaddr &= PAGE_MASK; pgdp = pgd_offset_k(virt_addr); p4dp = p4d_offset(pgdp, virt_addr); pudp = pud_offset(p4dp, virt_addr); pmdp = pmd_offset(pudp, virt_addr); ptep = pte_offset_kernel(pmdp, virt_addr); tmp = (physaddr >> 4) | SRMMU_ET_PTE; /* I need to test whether this is consistent over all * sun4m's. The bus_type represents the upper 4 bits of * 36-bit physical address on the I/O space lines... */ tmp |= (bus_type << 28); tmp |= SRMMU_PRIV; __flush_page_to_ram(virt_addr); set_pte(ptep, __pte(tmp)); } void srmmu_mapiorange(unsigned int bus, unsigned long xpa, unsigned long xva, unsigned int len) { while (len != 0) { len -= PAGE_SIZE; srmmu_mapioaddr(xpa, xva, bus); xva += PAGE_SIZE; xpa += PAGE_SIZE; } flush_tlb_all(); } static inline void srmmu_unmapioaddr(unsigned long virt_addr) { pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(virt_addr); p4dp = p4d_offset(pgdp, virt_addr); pudp = pud_offset(p4dp, virt_addr); pmdp = pmd_offset(pudp, virt_addr); ptep = pte_offset_kernel(pmdp, virt_addr); /* No need to flush uncacheable page. */ __pte_clear(ptep); } void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) { while (len != 0) { len -= PAGE_SIZE; srmmu_unmapioaddr(virt_addr); virt_addr += PAGE_SIZE; } flush_tlb_all(); } /* tsunami.S */ extern void tsunami_flush_cache_all(void); extern void tsunami_flush_cache_mm(struct mm_struct *mm); extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void tsunami_flush_page_to_ram(unsigned long page); extern void tsunami_flush_page_for_dma(unsigned long page); extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); extern void tsunami_flush_tlb_all(void); extern void tsunami_flush_tlb_mm(struct mm_struct *mm); extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void tsunami_setup_blockops(void); /* swift.S */ extern void swift_flush_cache_all(void); extern void swift_flush_cache_mm(struct mm_struct *mm); extern void swift_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void swift_flush_page_to_ram(unsigned long page); extern void swift_flush_page_for_dma(unsigned long page); extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); extern void swift_flush_tlb_all(void); extern void swift_flush_tlb_mm(struct mm_struct *mm); extern void swift_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); #if 0 /* P3: deadwood to debug precise flushes on Swift. */ void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cctx, ctx1; page &= PAGE_MASK; if ((ctx1 = vma->vm_mm->context) != -1) { cctx = srmmu_get_context(); /* Is context # ever different from current context? P3 */ if (cctx != ctx1) { printk("flush ctx %02x curr %02x\n", ctx1, cctx); srmmu_set_context(ctx1); swift_flush_page(page); __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (page), "i" (ASI_M_FLUSH_PROBE)); srmmu_set_context(cctx); } else { /* Rm. prot. bits from virt. c. */ /* swift_flush_cache_all(); */ /* swift_flush_cache_page(vma, page); */ swift_flush_page(page); __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (page), "i" (ASI_M_FLUSH_PROBE)); /* same as above: srmmu_flush_tlb_page() */ } } } #endif /* * The following are all MBUS based SRMMU modules, and therefore could * be found in a multiprocessor configuration. On the whole, these * chips seems to be much more touchy about DVMA and page tables * with respect to cache coherency. */ /* viking.S */ extern void viking_flush_cache_all(void); extern void viking_flush_cache_mm(struct mm_struct *mm); extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void viking_flush_page_to_ram(unsigned long page); extern void viking_flush_page_for_dma(unsigned long page); extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); extern void viking_flush_page(unsigned long page); extern void viking_mxcc_flush_page(unsigned long page); extern void viking_flush_tlb_all(void); extern void viking_flush_tlb_mm(struct mm_struct *mm); extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void sun4dsmp_flush_tlb_all(void); extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm); extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); /* hypersparc.S */ extern void hypersparc_flush_cache_all(void); extern void hypersparc_flush_cache_mm(struct mm_struct *mm); extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page); extern void hypersparc_flush_page_to_ram(unsigned long page); extern void hypersparc_flush_page_for_dma(unsigned long page); extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); extern void hypersparc_flush_tlb_all(void); extern void hypersparc_flush_tlb_mm(struct mm_struct *mm); extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void hypersparc_setup_blockops(void); /* * NOTE: All of this startup code assumes the low 16mb (approx.) of * kernel mappings are done with one single contiguous chunk of * ram. On small ram machines (classics mainly) we only get * around 8mb mapped for us. */ static void __init early_pgtable_allocfail(char *type) { prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); prom_halt(); } static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end) { pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; while (start < end) { pgdp = pgd_offset_k(start); p4dp = p4d_offset(pgdp, start); pudp = pud_offset(p4dp, start); if (pud_none(*__nocache_fix(pudp))) { pmdp = __srmmu_get_nocache( SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) early_pgtable_allocfail("pmd"); memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); pud_set(__nocache_fix(pudp), pmdp); } pmdp = pmd_offset(__nocache_fix(pudp), start); if (srmmu_pmd_none(*__nocache_fix(pmdp))) { ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); memset(__nocache_fix(ptep), 0, PTE_SIZE); pmd_set(__nocache_fix(pmdp), ptep); } if (start > (0xffffffffUL - PMD_SIZE)) break; start = (start + PMD_SIZE) & PMD_MASK; } } static void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end) { pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; while (start < end) { pgdp = pgd_offset_k(start); p4dp = p4d_offset(pgdp, start); pudp = pud_offset(p4dp, start); if (pud_none(*pudp)) { pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) early_pgtable_allocfail("pmd"); memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); pud_set((pud_t *)pgdp, pmdp); } pmdp = pmd_offset(pudp, start); if (srmmu_pmd_none(*pmdp)) { ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); memset(ptep, 0, PTE_SIZE); pmd_set(pmdp, ptep); } if (start > (0xffffffffUL - PMD_SIZE)) break; start = (start + PMD_SIZE) & PMD_MASK; } } /* These flush types are not available on all chips... */ static inline unsigned long srmmu_probe(unsigned long vaddr) { unsigned long retval; if (sparc_cpu_model != sparc_leon) { vaddr &= PAGE_MASK; __asm__ __volatile__("lda [%1] %2, %0\n\t" : "=r" (retval) : "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE)); } else { retval = leon_swprobe(vaddr, NULL); } return retval; } /* * This is much cleaner than poking around physical address space * looking at the prom's page table directly which is what most * other OS's do. Yuck... this is much better. */ static void __init srmmu_inherit_prom_mappings(unsigned long start, unsigned long end) { unsigned long probed; unsigned long addr; pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ while (start <= end) { if (start == 0) break; /* probably wrap around */ if (start == 0xfef00000) start = KADB_DEBUGGER_BEGVM; probed = srmmu_probe(start); if (!probed) { /* continue probing until we find an entry */ start += PAGE_SIZE; continue; } /* A red snapper, see what it really is. */ what = 0; addr = start - PAGE_SIZE; if (!(start & ~(PMD_MASK))) { if (srmmu_probe(addr + PMD_SIZE) == probed) what = 1; } if (!(start & ~(PGDIR_MASK))) { if (srmmu_probe(addr + PGDIR_SIZE) == probed) what = 2; } pgdp = pgd_offset_k(start); p4dp = p4d_offset(pgdp, start); pudp = pud_offset(p4dp, start); if (what == 2) { *__nocache_fix(pgdp) = __pgd(probed); start += PGDIR_SIZE; continue; } if (pud_none(*__nocache_fix(pudp))) { pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); if (pmdp == NULL) early_pgtable_allocfail("pmd"); memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); pud_set(__nocache_fix(pudp), pmdp); } pmdp = pmd_offset(__nocache_fix(pudp), start); if (what == 1) { *(pmd_t *)__nocache_fix(pmdp) = __pmd(probed); start += PMD_SIZE; continue; } if (srmmu_pmd_none(*__nocache_fix(pmdp))) { ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); if (ptep == NULL) early_pgtable_allocfail("pte"); memset(__nocache_fix(ptep), 0, PTE_SIZE); pmd_set(__nocache_fix(pmdp), ptep); } ptep = pte_offset_kernel(__nocache_fix(pmdp), start); *__nocache_fix(ptep) = __pte(probed); start += PAGE_SIZE; } } #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID) /* Create a third-level SRMMU 16MB page mapping. */ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base) { pgd_t *pgdp = pgd_offset_k(vaddr); unsigned long big_pte; big_pte = KERNEL_PTE(phys_base >> 4); *__nocache_fix(pgdp) = __pgd(big_pte); } /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) { unsigned long pstart = (sp_banks[sp_entry].base_addr & PGDIR_MASK); unsigned long vstart = (vbase & PGDIR_MASK); unsigned long vend = PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); /* Map "low" memory only */ const unsigned long min_vaddr = PAGE_OFFSET; const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM; if (vstart < min_vaddr || vstart >= max_vaddr) return vstart; if (vend > max_vaddr || vend < min_vaddr) vend = max_vaddr; while (vstart < vend) { do_large_mapping(vstart, pstart); vstart += PGDIR_SIZE; pstart += PGDIR_SIZE; } return vstart; } static void __init map_kernel(void) { int i; if (phys_base > 0) { do_large_mapping(PAGE_OFFSET, phys_base); } for (i = 0; sp_banks[i].num_bytes != 0; i++) { map_spbank((unsigned long)__va(sp_banks[i].base_addr), i); } } void (*poke_srmmu)(void) = NULL; void __init srmmu_paging_init(void) { int i; phandle cpunode; char node_str[128]; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long pages_avail; init_mm.context = (unsigned long) NO_CONTEXT; sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ if (sparc_cpu_model == sun4d) num_contexts = 65536; /* We know it is Viking */ else { /* Find the number of contexts on the srmmu. */ cpunode = prom_getchild(prom_root_node); num_contexts = 0; while (cpunode != 0) { prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); if (!strcmp(node_str, "cpu")) { num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); break; } cpunode = prom_getsibling(cpunode); } } if (!num_contexts) { prom_printf("Something wrong, can't find cpu node in paging_init.\n"); prom_halt(); } pages_avail = 0; last_valid_pfn = bootmem_init(&pages_avail); srmmu_nocache_calcsize(); srmmu_nocache_init(); srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE)); map_kernel(); /* ctx table has to be physically aligned to its size */ srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t)); srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table); for (i = 0; i < num_contexts; i++) srmmu_ctxd_set(__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); flush_cache_all(); srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); #ifdef CONFIG_SMP /* Stop from hanging here... */ local_ops->tlb_all(); #else flush_tlb_all(); #endif poke_srmmu(); srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END); srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); srmmu_allocate_ptable_skeleton( __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP); srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); pgd = pgd_offset_k(PKMAP_BASE); p4d = p4d_offset(pgd, PKMAP_BASE); pud = pud_offset(p4d, PKMAP_BASE); pmd = pmd_offset(pud, PKMAP_BASE); pte = pte_offset_kernel(pmd, PKMAP_BASE); pkmap_page_table = pte; flush_cache_all(); flush_tlb_all(); sparc_context_init(num_contexts); { unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; max_zone_pfn[ZONE_DMA] = max_low_pfn; max_zone_pfn[ZONE_NORMAL] = max_low_pfn; max_zone_pfn[ZONE_HIGHMEM] = highend_pfn; free_area_init(max_zone_pfn); } } void mmu_info(struct seq_file *m) { seq_printf(m, "MMU type\t: %s\n" "contexts\t: %d\n" "nocache total\t: %ld\n" "nocache used\t: %d\n", srmmu_name, num_contexts, srmmu_nocache_size, srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); } int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context = NO_CONTEXT; return 0; } void destroy_context(struct mm_struct *mm) { unsigned long flags; if (mm->context != NO_CONTEXT) { flush_cache_mm(mm); srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); flush_tlb_mm(mm); spin_lock_irqsave(&srmmu_context_spinlock, flags); free_context(mm->context); spin_unlock_irqrestore(&srmmu_context_spinlock, flags); mm->context = NO_CONTEXT; } } /* Init various srmmu chip types. */ static void __init srmmu_is_bad(void) { prom_printf("Could not determine SRMMU chip type.\n"); prom_halt(); } static void __init init_vac_layout(void) { phandle nd; int cache_lines; char node_str[128]; #ifdef CONFIG_SMP int cpu = 0; unsigned long max_size = 0; unsigned long min_line_size = 0x10000000; #endif nd = prom_getchild(prom_root_node); while ((nd = prom_getsibling(nd)) != 0) { prom_getstring(nd, "device_type", node_str, sizeof(node_str)); if (!strcmp(node_str, "cpu")) { vac_line_size = prom_getint(nd, "cache-line-size"); if (vac_line_size == -1) { prom_printf("can't determine cache-line-size, halting.\n"); prom_halt(); } cache_lines = prom_getint(nd, "cache-nlines"); if (cache_lines == -1) { prom_printf("can't determine cache-nlines, halting.\n"); prom_halt(); } vac_cache_size = cache_lines * vac_line_size; #ifdef CONFIG_SMP if (vac_cache_size > max_size) max_size = vac_cache_size; if (vac_line_size < min_line_size) min_line_size = vac_line_size; //FIXME: cpus not contiguous!! cpu++; if (cpu >= nr_cpu_ids || !cpu_online(cpu)) break; #else break; #endif } } if (nd == 0) { prom_printf("No CPU nodes found, halting.\n"); prom_halt(); } #ifdef CONFIG_SMP vac_cache_size = max_size; vac_line_size = min_line_size; #endif printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n", (int)vac_cache_size, (int)vac_line_size); } static void poke_hypersparc(void) { volatile unsigned long clear; unsigned long mreg = srmmu_get_mmureg(); hyper_flush_unconditional_combined(); mreg &= ~(HYPERSPARC_CWENABLE); mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE); mreg |= (HYPERSPARC_CMODE); srmmu_set_mmureg(mreg); #if 0 /* XXX I think this is bad news... -DaveM */ hyper_clear_all_tags(); #endif put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE); hyper_flush_whole_icache(); clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); } static const struct sparc32_cachetlb_ops hypersparc_ops = { .cache_all = hypersparc_flush_cache_all, .cache_mm = hypersparc_flush_cache_mm, .cache_page = hypersparc_flush_cache_page, .cache_range = hypersparc_flush_cache_range, .tlb_all = hypersparc_flush_tlb_all, .tlb_mm = hypersparc_flush_tlb_mm, .tlb_page = hypersparc_flush_tlb_page, .tlb_range = hypersparc_flush_tlb_range, .page_to_ram = hypersparc_flush_page_to_ram, .sig_insns = hypersparc_flush_sig_insns, .page_for_dma = hypersparc_flush_page_for_dma, }; static void __init init_hypersparc(void) { srmmu_name = "ROSS HyperSparc"; srmmu_modtype = HyperSparc; init_vac_layout(); is_hypersparc = 1; sparc32_cachetlb_ops = &hypersparc_ops; poke_srmmu = poke_hypersparc; hypersparc_setup_blockops(); } static void poke_swift(void) { unsigned long mreg; /* Clear any crap from the cache or else... */ swift_flush_cache_all(); /* Enable I & D caches */ mreg = srmmu_get_mmureg(); mreg |= (SWIFT_IE | SWIFT_DE); /* * The Swift branch folding logic is completely broken. At * trap time, if things are just right, if can mistakenly * think that a trap is coming from kernel mode when in fact * it is coming from user mode (it mis-executes the branch in * the trap code). So you see things like crashme completely * hosing your machine which is completely unacceptable. Turn * this shit off... nice job Fujitsu. */ mreg &= ~(SWIFT_BF); srmmu_set_mmureg(mreg); } static const struct sparc32_cachetlb_ops swift_ops = { .cache_all = swift_flush_cache_all, .cache_mm = swift_flush_cache_mm, .cache_page = swift_flush_cache_page, .cache_range = swift_flush_cache_range, .tlb_all = swift_flush_tlb_all, .tlb_mm = swift_flush_tlb_mm, .tlb_page = swift_flush_tlb_page, .tlb_range = swift_flush_tlb_range, .page_to_ram = swift_flush_page_to_ram, .sig_insns = swift_flush_sig_insns, .page_for_dma = swift_flush_page_for_dma, }; #define SWIFT_MASKID_ADDR 0x10003018 static void __init init_swift(void) { unsigned long swift_rev; __asm__ __volatile__("lda [%1] %2, %0\n\t" "srl %0, 0x18, %0\n\t" : "=r" (swift_rev) : "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); srmmu_name = "Fujitsu Swift"; switch (swift_rev) { case 0x11: case 0x20: case 0x23: case 0x30: srmmu_modtype = Swift_lots_o_bugs; hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN); /* * Gee george, I wonder why Sun is so hush hush about * this hardware bug... really braindamage stuff going * on here. However I think we can find a way to avoid * all of the workaround overhead under Linux. Basically, * any page fault can cause kernel pages to become user * accessible (the mmu gets confused and clears some of * the ACC bits in kernel ptes). Aha, sounds pretty * horrible eh? But wait, after extensive testing it appears * that if you use pgd_t level large kernel pte's (like the * 4MB pages on the Pentium) the bug does not get tripped * at all. This avoids almost all of the major overhead. * Welcome to a world where your vendor tells you to, * "apply this kernel patch" instead of "sorry for the * broken hardware, send it back and we'll give you * properly functioning parts" */ break; case 0x25: case 0x31: srmmu_modtype = Swift_bad_c; hwbug_bitmask |= HWBUG_KERN_CBITBROKEN; /* * You see Sun allude to this hardware bug but never * admit things directly, they'll say things like, * "the Swift chip cache problems" or similar. */ break; default: srmmu_modtype = Swift_ok; break; } sparc32_cachetlb_ops = &swift_ops; flush_page_for_dma_global = 0; /* * Are you now convinced that the Swift is one of the * biggest VLSI abortions of all time? Bravo Fujitsu! * Fujitsu, the !#?!%$'d up processor people. I bet if * you examined the microcode of the Swift you'd find * XXX's all over the place. */ poke_srmmu = poke_swift; } static void turbosparc_flush_cache_all(void) { flush_user_windows(); turbosparc_idflash_clear(); } static void turbosparc_flush_cache_mm(struct mm_struct *mm) { FLUSH_BEGIN(mm) flush_user_windows(); turbosparc_idflash_clear(); FLUSH_END } static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { FLUSH_BEGIN(vma->vm_mm) flush_user_windows(); turbosparc_idflash_clear(); FLUSH_END } static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page) { FLUSH_BEGIN(vma->vm_mm) flush_user_windows(); if (vma->vm_flags & VM_EXEC) turbosparc_flush_icache(); turbosparc_flush_dcache(); FLUSH_END } /* TurboSparc is copy-back, if we turn it on, but this does not work. */ static void turbosparc_flush_page_to_ram(unsigned long page) { #ifdef TURBOSPARC_WRITEBACK volatile unsigned long clear; if (srmmu_probe(page)) turbosparc_flush_page_cache(page); clear = srmmu_get_fstatus(); #endif } static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { } static void turbosparc_flush_page_for_dma(unsigned long page) { turbosparc_flush_dcache(); } static void turbosparc_flush_tlb_all(void) { srmmu_flush_whole_tlb(); } static void turbosparc_flush_tlb_mm(struct mm_struct *mm) { FLUSH_BEGIN(mm) srmmu_flush_whole_tlb(); FLUSH_END } static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { FLUSH_BEGIN(vma->vm_mm) srmmu_flush_whole_tlb(); FLUSH_END } static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { FLUSH_BEGIN(vma->vm_mm) srmmu_flush_whole_tlb(); FLUSH_END } static void poke_turbosparc(void) { unsigned long mreg = srmmu_get_mmureg(); unsigned long ccreg; /* Clear any crap from the cache or else... */ turbosparc_flush_cache_all(); /* Temporarily disable I & D caches */ mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ srmmu_set_mmureg(mreg); ccreg = turbosparc_get_ccreg(); #ifdef TURBOSPARC_WRITEBACK ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */ ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE); /* Write-back D-cache, emulate VLSI * abortion number three, not number one */ #else /* For now let's play safe, optimize later */ ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE); /* Do DVMA snooping in Dcache, Write-thru D-cache */ ccreg &= ~(TURBOSPARC_uS2); /* Emulate VLSI abortion number three, not number one */ #endif switch (ccreg & 7) { case 0: /* No SE cache */ case 7: /* Test mode */ break; default: ccreg |= (TURBOSPARC_SCENABLE); } turbosparc_set_ccreg(ccreg); mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ srmmu_set_mmureg(mreg); } static const struct sparc32_cachetlb_ops turbosparc_ops = { .cache_all = turbosparc_flush_cache_all, .cache_mm = turbosparc_flush_cache_mm, .cache_page = turbosparc_flush_cache_page, .cache_range = turbosparc_flush_cache_range, .tlb_all = turbosparc_flush_tlb_all, .tlb_mm = turbosparc_flush_tlb_mm, .tlb_page = turbosparc_flush_tlb_page, .tlb_range = turbosparc_flush_tlb_range, .page_to_ram = turbosparc_flush_page_to_ram, .sig_insns = turbosparc_flush_sig_insns, .page_for_dma = turbosparc_flush_page_for_dma, }; static void __init init_turbosparc(void) { srmmu_name = "Fujitsu TurboSparc"; srmmu_modtype = TurboSparc; sparc32_cachetlb_ops = &turbosparc_ops; poke_srmmu = poke_turbosparc; } static void poke_tsunami(void) { unsigned long mreg = srmmu_get_mmureg(); tsunami_flush_icache(); tsunami_flush_dcache(); mreg &= ~TSUNAMI_ITD; mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB); srmmu_set_mmureg(mreg); } static const struct sparc32_cachetlb_ops tsunami_ops = { .cache_all = tsunami_flush_cache_all, .cache_mm = tsunami_flush_cache_mm, .cache_page = tsunami_flush_cache_page, .cache_range = tsunami_flush_cache_range, .tlb_all = tsunami_flush_tlb_all, .tlb_mm = tsunami_flush_tlb_mm, .tlb_page = tsunami_flush_tlb_page, .tlb_range = tsunami_flush_tlb_range, .page_to_ram = tsunami_flush_page_to_ram, .sig_insns = tsunami_flush_sig_insns, .page_for_dma = tsunami_flush_page_for_dma, }; static void __init init_tsunami(void) { /* * Tsunami's pretty sane, Sun and TI actually got it * somewhat right this time. Fujitsu should have * taken some lessons from them. */ srmmu_name = "TI Tsunami"; srmmu_modtype = Tsunami; sparc32_cachetlb_ops = &tsunami_ops; poke_srmmu = poke_tsunami; tsunami_setup_blockops(); } static void poke_viking(void) { unsigned long mreg = srmmu_get_mmureg(); static int smp_catch; if (viking_mxcc_present) { unsigned long mxcc_control = mxcc_get_creg(); mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); mxcc_control &= ~(MXCC_CTL_RRC); mxcc_set_creg(mxcc_control); /* * We don't need memory parity checks. * XXX This is a mess, have to dig out later. ecd. viking_mxcc_turn_off_parity(&mreg, &mxcc_control); */ /* We do cache ptables on MXCC. */ mreg |= VIKING_TCENABLE; } else { unsigned long bpreg; mreg &= ~(VIKING_TCENABLE); if (smp_catch++) { /* Must disable mixed-cmd mode here for other cpu's. */ bpreg = viking_get_bpreg(); bpreg &= ~(VIKING_ACTION_MIX); viking_set_bpreg(bpreg); /* Just in case PROM does something funny. */ msi_set_sync(); } } mreg |= VIKING_SPENABLE; mreg |= (VIKING_ICENABLE | VIKING_DCENABLE); mreg |= VIKING_SBENABLE; mreg &= ~(VIKING_ACENABLE); srmmu_set_mmureg(mreg); } static struct sparc32_cachetlb_ops viking_ops __ro_after_init = { .cache_all = viking_flush_cache_all, .cache_mm = viking_flush_cache_mm, .cache_page = viking_flush_cache_page, .cache_range = viking_flush_cache_range, .tlb_all = viking_flush_tlb_all, .tlb_mm = viking_flush_tlb_mm, .tlb_page = viking_flush_tlb_page, .tlb_range = viking_flush_tlb_range, .page_to_ram = viking_flush_page_to_ram, .sig_insns = viking_flush_sig_insns, .page_for_dma = viking_flush_page_for_dma, }; #ifdef CONFIG_SMP /* On sun4d the cpu broadcasts local TLB flushes, so we can just * perform the local TLB flush and all the other cpus will see it. * But, unfortunately, there is a bug in the sun4d XBUS backplane * that requires that we add some synchronization to these flushes. * * The bug is that the fifo which keeps track of all the pending TLB * broadcasts in the system is an entry or two too small, so if we * have too many going at once we'll overflow that fifo and lose a TLB * flush resulting in corruption. * * Our workaround is to take a global spinlock around the TLB flushes, * which guarentees we won't ever have too many pending. It's a big * hammer, but a semaphore like system to make sure we only have N TLB * flushes going at once will require SMP locking anyways so there's * no real value in trying any harder than this. */ static struct sparc32_cachetlb_ops viking_sun4d_smp_ops __ro_after_init = { .cache_all = viking_flush_cache_all, .cache_mm = viking_flush_cache_mm, .cache_page = viking_flush_cache_page, .cache_range = viking_flush_cache_range, .tlb_all = sun4dsmp_flush_tlb_all, .tlb_mm = sun4dsmp_flush_tlb_mm, .tlb_page = sun4dsmp_flush_tlb_page, .tlb_range = sun4dsmp_flush_tlb_range, .page_to_ram = viking_flush_page_to_ram, .sig_insns = viking_flush_sig_insns, .page_for_dma = viking_flush_page_for_dma, }; #endif static void __init init_viking(void) { unsigned long mreg = srmmu_get_mmureg(); /* Ahhh, the viking. SRMMU VLSI abortion number two... */ if (mreg & VIKING_MMODE) { srmmu_name = "TI Viking"; viking_mxcc_present = 0; msi_set_sync(); /* * We need this to make sure old viking takes no hits * on it's cache for dma snoops to workaround the * "load from non-cacheable memory" interrupt bug. * This is only necessary because of the new way in * which we use the IOMMU. */ viking_ops.page_for_dma = viking_flush_page; #ifdef CONFIG_SMP viking_sun4d_smp_ops.page_for_dma = viking_flush_page; #endif flush_page_for_dma_global = 0; } else { srmmu_name = "TI Viking/MXCC"; viking_mxcc_present = 1; srmmu_cache_pagetables = 1; } sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *) &viking_ops; #ifdef CONFIG_SMP if (sparc_cpu_model == sun4d) sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *) &viking_sun4d_smp_ops; #endif poke_srmmu = poke_viking; } /* Probe for the srmmu chip version. */ static void __init get_srmmu_type(void) { unsigned long mreg, psr; unsigned long mod_typ, mod_rev, psr_typ, psr_vers; srmmu_modtype = SRMMU_INVAL_MOD; hwbug_bitmask = 0; mreg = srmmu_get_mmureg(); psr = get_psr(); mod_typ = (mreg & 0xf0000000) >> 28; mod_rev = (mreg & 0x0f000000) >> 24; psr_typ = (psr >> 28) & 0xf; psr_vers = (psr >> 24) & 0xf; /* First, check for sparc-leon. */ if (sparc_cpu_model == sparc_leon) { init_leon(); return; } /* Second, check for HyperSparc or Cypress. */ if (mod_typ == 1) { switch (mod_rev) { case 7: /* UP or MP Hypersparc */ init_hypersparc(); break; case 0: case 2: case 10: case 11: case 12: case 13: case 14: case 15: default: prom_printf("Sparc-Linux Cypress support does not longer exit.\n"); prom_halt(); break; } return; } /* Now Fujitsu TurboSparc. It might happen that it is * in Swift emulation mode, so we will check later... */ if (psr_typ == 0 && psr_vers == 5) { init_turbosparc(); return; } /* Next check for Fujitsu Swift. */ if (psr_typ == 0 && psr_vers == 4) { phandle cpunode; char node_str[128]; /* Look if it is not a TurboSparc emulating Swift... */ cpunode = prom_getchild(prom_root_node); while ((cpunode = prom_getsibling(cpunode)) != 0) { prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); if (!strcmp(node_str, "cpu")) { if (!prom_getintdefault(cpunode, "psr-implementation", 1) && prom_getintdefault(cpunode, "psr-version", 1) == 5) { init_turbosparc(); return; } break; } } init_swift(); return; } /* Now the Viking family of srmmu. */ if (psr_typ == 4 && ((psr_vers == 0) || ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { init_viking(); return; } /* Finally the Tsunami. */ if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { init_tsunami(); return; } /* Oh well */ srmmu_is_bad(); } #ifdef CONFIG_SMP /* Local cross-calls. */ static void smp_flush_page_for_dma(unsigned long page) { xc1(local_ops->page_for_dma, page); local_ops->page_for_dma(page); } static void smp_flush_cache_all(void) { xc0(local_ops->cache_all); local_ops->cache_all(); } static void smp_flush_tlb_all(void) { xc0(local_ops->tlb_all); local_ops->tlb_all(); } static void smp_flush_cache_mm(struct mm_struct *mm) { if (mm->context != NO_CONTEXT) { cpumask_t cpu_mask; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) xc1(local_ops->cache_mm, (unsigned long)mm); local_ops->cache_mm(mm); } } static void smp_flush_tlb_mm(struct mm_struct *mm) { if (mm->context != NO_CONTEXT) { cpumask_t cpu_mask; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) { xc1(local_ops->tlb_mm, (unsigned long)mm); if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); } local_ops->tlb_mm(mm); } } static void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; if (mm->context != NO_CONTEXT) { cpumask_t cpu_mask; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) xc3(local_ops->cache_range, (unsigned long)vma, start, end); local_ops->cache_range(vma, start, end); } } static void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; if (mm->context != NO_CONTEXT) { cpumask_t cpu_mask; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) xc3(local_ops->tlb_range, (unsigned long)vma, start, end); local_ops->tlb_range(vma, start, end); } } static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) { struct mm_struct *mm = vma->vm_mm; if (mm->context != NO_CONTEXT) { cpumask_t cpu_mask; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) xc2(local_ops->cache_page, (unsigned long)vma, page); local_ops->cache_page(vma, page); } } static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { struct mm_struct *mm = vma->vm_mm; if (mm->context != NO_CONTEXT) { cpumask_t cpu_mask; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) xc2(local_ops->tlb_page, (unsigned long)vma, page); local_ops->tlb_page(vma, page); } } static void smp_flush_page_to_ram(unsigned long page) { /* Current theory is that those who call this are the one's * who have just dirtied their cache with the pages contents * in kernel space, therefore we only run this on local cpu. * * XXX This experiment failed, research further... -DaveM */ #if 1 xc1(local_ops->page_to_ram, page); #endif local_ops->page_to_ram(page); } static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { cpumask_t cpu_mask; cpumask_copy(&cpu_mask, mm_cpumask(mm)); cpumask_clear_cpu(smp_processor_id(), &cpu_mask); if (!cpumask_empty(&cpu_mask)) xc2(local_ops->sig_insns, (unsigned long)mm, insn_addr); local_ops->sig_insns(mm, insn_addr); } static struct sparc32_cachetlb_ops smp_cachetlb_ops __ro_after_init = { .cache_all = smp_flush_cache_all, .cache_mm = smp_flush_cache_mm, .cache_page = smp_flush_cache_page, .cache_range = smp_flush_cache_range, .tlb_all = smp_flush_tlb_all, .tlb_mm = smp_flush_tlb_mm, .tlb_page = smp_flush_tlb_page, .tlb_range = smp_flush_tlb_range, .page_to_ram = smp_flush_page_to_ram, .sig_insns = smp_flush_sig_insns, .page_for_dma = smp_flush_page_for_dma, }; #endif /* Load up routines and constants for sun4m and sun4d mmu */ void __init load_mmu(void) { /* Functions */ get_srmmu_type(); #ifdef CONFIG_SMP /* El switcheroo... */ local_ops = sparc32_cachetlb_ops; if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) { smp_cachetlb_ops.tlb_all = local_ops->tlb_all; smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm; smp_cachetlb_ops.tlb_range = local_ops->tlb_range; smp_cachetlb_ops.tlb_page = local_ops->tlb_page; } if (poke_srmmu == poke_viking) { /* Avoid unnecessary cross calls. */ smp_cachetlb_ops.cache_all = local_ops->cache_all; smp_cachetlb_ops.cache_mm = local_ops->cache_mm; smp_cachetlb_ops.cache_range = local_ops->cache_range; smp_cachetlb_ops.cache_page = local_ops->cache_page; smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram; smp_cachetlb_ops.sig_insns = local_ops->sig_insns; smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma; } /* It really is const after this point. */ sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *) &smp_cachetlb_ops; #endif if (sparc_cpu_model != sun4d) ld_mmu_iommu(); #ifdef CONFIG_SMP if (sparc_cpu_model == sun4d) sun4d_init_smp(); else if (sparc_cpu_model == sparc_leon) leon_init_smp(); else sun4m_init_smp(); #endif }
linux-master
arch/sparc/mm/srmmu.c
// SPDX-License-Identifier: GPL-2.0 /* * iommu.c: IOMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1995,2002 Pete Zaitcev ([email protected]) * Copyright (C) 1996 Eddie C. Dost ([email protected]) * Copyright (C) 1997,1998 Jakub Jelinek ([email protected]) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/dma-map-ops.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/mxcc.h> #include <asm/mbus.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/bitext.h> #include <asm/iommu.h> #include <asm/dma.h> #include "mm_32.h" /* * This can be sized dynamically, but we will do this * only when we have a guidance about actual I/O pressures. */ #define IOMMU_RNGE IOMMU_RNGE_256MB #define IOMMU_START 0xF0000000 #define IOMMU_WINSIZE (256*1024*1024U) #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */ #define IOMMU_ORDER 6 /* 4096 * (1<<6) */ static int viking_flush; /* viking.S */ extern void viking_flush_page(unsigned long page); extern void viking_mxcc_flush_page(unsigned long page); /* * Values precomputed according to CPU type. */ static unsigned int ioperm_noc; /* Consistent mapping iopte flags */ static pgprot_t dvma_prot; /* Consistent mapping pte flags */ #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID) #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ) static const struct dma_map_ops sbus_iommu_dma_gflush_ops; static const struct dma_map_ops sbus_iommu_dma_pflush_ops; static void __init sbus_iommu_init(struct platform_device *op) { struct iommu_struct *iommu; unsigned int impl, vers; unsigned long *bitmap; unsigned long control; unsigned long base; unsigned long tmp; iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL); if (!iommu) { prom_printf("Unable to allocate iommu structure\n"); prom_halt(); } iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3, "iommu_regs"); if (!iommu->regs) { prom_printf("Cannot map IOMMU registers\n"); prom_halt(); } control = sbus_readl(&iommu->regs->control); impl = (control & IOMMU_CTRL_IMPL) >> 28; vers = (control & IOMMU_CTRL_VERS) >> 24; control &= ~(IOMMU_CTRL_RNGE); control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB); sbus_writel(control, &iommu->regs->control); iommu_invalidate(iommu->regs); iommu->start = IOMMU_START; iommu->end = 0xffffffff; /* Allocate IOMMU page table */ /* Stupid alignment constraints give me a headache. We need 256K or 512K or 1M or 2M area aligned to its size and current gfp will fortunately give it to us. */ tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER); if (!tmp) { prom_printf("Unable to allocate iommu table [0x%lx]\n", IOMMU_NPTES * sizeof(iopte_t)); prom_halt(); } iommu->page_table = (iopte_t *)tmp; /* Initialize new table. */ memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t)); flush_cache_all(); flush_tlb_all(); base = __pa((unsigned long)iommu->page_table) >> 4; sbus_writel(base, &iommu->regs->base); iommu_invalidate(iommu->regs); bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL); if (!bitmap) { prom_printf("Unable to allocate iommu bitmap [%d]\n", (int)(IOMMU_NPTES>>3)); prom_halt(); } bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES); /* To be coherent on HyperSparc, the page color of DVMA * and physical addresses must match. */ if (srmmu_modtype == HyperSparc) iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT; else iommu->usemap.num_colors = 1; printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n", impl, vers, iommu->page_table, (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES); op->dev.archdata.iommu = iommu; if (flush_page_for_dma_global) op->dev.dma_ops = &sbus_iommu_dma_gflush_ops; else op->dev.dma_ops = &sbus_iommu_dma_pflush_ops; } static int __init iommu_init(void) { struct device_node *dp; for_each_node_by_name(dp, "iommu") { struct platform_device *op = of_find_device_by_node(dp); sbus_iommu_init(op); of_propagate_archdata(op); } return 0; } subsys_initcall(iommu_init); /* Flush the iotlb entries to ram. */ /* This could be better if we didn't have to flush whole pages. */ static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte) { unsigned long start; unsigned long end; start = (unsigned long)iopte; end = PAGE_ALIGN(start + niopte*sizeof(iopte_t)); start &= PAGE_MASK; if (viking_mxcc_present) { while(start < end) { viking_mxcc_flush_page(start); start += PAGE_SIZE; } } else if (viking_flush) { while(start < end) { viking_flush_page(start); start += PAGE_SIZE; } } else { while(start < end) { __flush_page_to_ram(start); start += PAGE_SIZE; } } } static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t len, bool per_page_flush) { struct iommu_struct *iommu = dev->archdata.iommu; phys_addr_t paddr = page_to_phys(page) + offset; unsigned long off = paddr & ~PAGE_MASK; unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long pfn = __phys_to_pfn(paddr); unsigned int busa, busa0; iopte_t *iopte, *iopte0; int ioptex, i; /* XXX So what is maxphys for us and how do drivers know it? */ if (!len || len > 256 * 1024) return DMA_MAPPING_ERROR; /* * We expect unmapped highmem pages to be not in the cache. * XXX Is this a good assumption? * XXX What if someone else unmaps it here and races us? */ if (per_page_flush && !PageHighMem(page)) { unsigned long vaddr, p; vaddr = (unsigned long)page_address(page) + offset; for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE) flush_page_for_dma(p); } /* page color = pfn of page */ ioptex = bit_map_string_get(&iommu->usemap, npages, pfn); if (ioptex < 0) panic("iommu out"); busa0 = iommu->start + (ioptex << PAGE_SHIFT); iopte0 = &iommu->page_table[ioptex]; busa = busa0; iopte = iopte0; for (i = 0; i < npages; i++) { iopte_val(*iopte) = MKIOPTE(pfn, IOPERM); iommu_invalidate_page(iommu->regs, busa); busa += PAGE_SIZE; iopte++; pfn++; } iommu_flush_iotlb(iopte0, npages); return busa0 + off; } static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev, struct page *page, unsigned long offset, size_t len, enum dma_data_direction dir, unsigned long attrs) { flush_page_for_dma(0); return __sbus_iommu_map_page(dev, page, offset, len, false); } static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev, struct page *page, unsigned long offset, size_t len, enum dma_data_direction dir, unsigned long attrs) { return __sbus_iommu_map_page(dev, page, offset, len, true); } static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs, bool per_page_flush) { struct scatterlist *sg; int j; for_each_sg(sgl, sg, nents, j) { sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg), sg->offset, sg->length, per_page_flush); if (sg->dma_address == DMA_MAPPING_ERROR) return -EIO; sg->dma_length = sg->length; } return nents; } static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { flush_page_for_dma(0); return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false); } static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true); } static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t len, enum dma_data_direction dir, unsigned long attrs) { struct iommu_struct *iommu = dev->archdata.iommu; unsigned int busa = dma_addr & PAGE_MASK; unsigned long off = dma_addr & ~PAGE_MASK; unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT; unsigned int i; BUG_ON(busa < iommu->start); for (i = 0; i < npages; i++) { iopte_val(iommu->page_table[ioptex + i]) = 0; iommu_invalidate_page(iommu->regs, busa); busa += PAGE_SIZE; } bit_map_clear(&iommu->usemap, ioptex, npages); } static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *sg; int i; for_each_sg(sgl, sg, nents, i) { sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir, attrs); sg->dma_address = 0x21212121; } } #ifdef CONFIG_SBUS static void *sbus_iommu_alloc(struct device *dev, size_t len, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { struct iommu_struct *iommu = dev->archdata.iommu; unsigned long va, addr, page, end, ret; iopte_t *iopte = iommu->page_table; iopte_t *first; int ioptex; /* XXX So what is maxphys for us and how do drivers know it? */ if (!len || len > 256 * 1024) return NULL; len = PAGE_ALIGN(len); va = __get_free_pages(gfp | __GFP_ZERO, get_order(len)); if (va == 0) return NULL; addr = ret = sparc_dma_alloc_resource(dev, len); if (!addr) goto out_free_pages; BUG_ON((va & ~PAGE_MASK) != 0); BUG_ON((addr & ~PAGE_MASK) != 0); BUG_ON((len & ~PAGE_MASK) != 0); /* page color = physical address */ ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT, addr >> PAGE_SHIFT); if (ioptex < 0) panic("iommu out"); iopte += ioptex; first = iopte; end = addr + len; while(addr < end) { page = va; { pmd_t *pmdp; pte_t *ptep; if (viking_mxcc_present) viking_mxcc_flush_page(page); else if (viking_flush) viking_flush_page(page); else __flush_page_to_ram(page); pmdp = pmd_off_k(addr); ptep = pte_offset_kernel(pmdp, addr); set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); } iopte_val(*iopte++) = MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc); addr += PAGE_SIZE; va += PAGE_SIZE; } /* P3: why do we need this? * * DAVEM: Because there are several aspects, none of which * are handled by a single interface. Some cpus are * completely not I/O DMA coherent, and some have * virtually indexed caches. The driver DMA flushing * methods handle the former case, but here during * IOMMU page table modifications, and usage of non-cacheable * cpu mappings of pages potentially in the cpu caches, we have * to handle the latter case as well. */ flush_cache_all(); iommu_flush_iotlb(first, len >> PAGE_SHIFT); flush_tlb_all(); iommu_invalidate(iommu->regs); *dma_handle = iommu->start + (ioptex << PAGE_SHIFT); return (void *)ret; out_free_pages: free_pages(va, get_order(len)); return NULL; } static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr, dma_addr_t busa, unsigned long attrs) { struct iommu_struct *iommu = dev->archdata.iommu; iopte_t *iopte = iommu->page_table; struct page *page = virt_to_page(cpu_addr); int ioptex = (busa - iommu->start) >> PAGE_SHIFT; unsigned long end; if (!sparc_dma_free_resource(cpu_addr, len)) return; BUG_ON((busa & ~PAGE_MASK) != 0); BUG_ON((len & ~PAGE_MASK) != 0); iopte += ioptex; end = busa + len; while (busa < end) { iopte_val(*iopte++) = 0; busa += PAGE_SIZE; } flush_tlb_all(); iommu_invalidate(iommu->regs); bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT); __free_pages(page, get_order(len)); } #endif static const struct dma_map_ops sbus_iommu_dma_gflush_ops = { #ifdef CONFIG_SBUS .alloc = sbus_iommu_alloc, .free = sbus_iommu_free, #endif .map_page = sbus_iommu_map_page_gflush, .unmap_page = sbus_iommu_unmap_page, .map_sg = sbus_iommu_map_sg_gflush, .unmap_sg = sbus_iommu_unmap_sg, }; static const struct dma_map_ops sbus_iommu_dma_pflush_ops = { #ifdef CONFIG_SBUS .alloc = sbus_iommu_alloc, .free = sbus_iommu_free, #endif .map_page = sbus_iommu_map_page_pflush, .unmap_page = sbus_iommu_unmap_page, .map_sg = sbus_iommu_map_sg_pflush, .unmap_sg = sbus_iommu_unmap_sg, }; void __init ld_mmu_iommu(void) { if (viking_mxcc_present || srmmu_modtype == HyperSparc) { dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID; } else { dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV); ioperm_noc = IOPTE_WRITE | IOPTE_VALID; } }
linux-master
arch/sparc/mm/iommu.c
// SPDX-License-Identifier: GPL-2.0 /* * io-unit.c: IO-UNIT specific routines for memory management. * * Copyright (C) 1997,1998 Jakub Jelinek ([email protected]) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/bitops.h> #include <linux/dma-map-ops.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/io-unit.h> #include <asm/mxcc.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/dma.h> #include <asm/oplib.h> #include "mm_32.h" /* #define IOUNIT_DEBUG */ #ifdef IOUNIT_DEBUG #define IOD(x) printk(x) #else #define IOD(x) do { } while (0) #endif #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID) #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM) static const struct dma_map_ops iounit_dma_ops; static void __init iounit_iommu_init(struct platform_device *op) { struct iounit_struct *iounit; iopte_t __iomem *xpt; iopte_t __iomem *xptend; iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC); if (!iounit) { prom_printf("SUN4D: Cannot alloc iounit, halting.\n"); prom_halt(); } iounit->limit[0] = IOUNIT_BMAP1_START; iounit->limit[1] = IOUNIT_BMAP2_START; iounit->limit[2] = IOUNIT_BMAPM_START; iounit->limit[3] = IOUNIT_BMAPM_END; iounit->rotor[1] = IOUNIT_BMAP2_START; iounit->rotor[2] = IOUNIT_BMAPM_START; xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT"); if (!xpt) { prom_printf("SUN4D: Cannot map External Page Table."); prom_halt(); } op->dev.archdata.iommu = iounit; iounit->page_table = xpt; spin_lock_init(&iounit->lock); xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t); for (; xpt < xptend; xpt++) sbus_writel(0, xpt); op->dev.dma_ops = &iounit_dma_ops; } static int __init iounit_init(void) { extern void sun4d_init_sbi_irq(void); struct device_node *dp; for_each_node_by_name(dp, "sbi") { struct platform_device *op = of_find_device_by_node(dp); iounit_iommu_init(op); of_propagate_archdata(op); } sun4d_init_sbi_irq(); return 0; } subsys_initcall(iounit_init); /* One has to hold iounit->lock to call this */ static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size) { int i, j, k, npages; unsigned long rotor, scan, limit; iopte_t iopte; npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; /* A tiny bit of magic ingredience :) */ switch (npages) { case 1: i = 0x0231; break; case 2: i = 0x0132; break; default: i = 0x0213; break; } IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); next: j = (i & 15); rotor = iounit->rotor[j - 1]; limit = iounit->limit[j]; scan = rotor; nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan); if (scan + npages > limit) { if (limit != rotor) { limit = rotor; scan = iounit->limit[j - 1]; goto nexti; } i >>= 4; if (!(i & 15)) panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size); goto next; } for (k = 1, scan++; k < npages; k++) if (test_bit(scan++, iounit->bmap)) goto nexti; iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1]; scan -= npages; iopte = MKIOPTE(__pa(vaddr & PAGE_MASK)); vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { set_bit(scan, iounit->bmap); sbus_writel(iopte_val(iopte), &iounit->page_table[scan]); } IOD(("%08lx\n", vaddr)); return vaddr; } static dma_addr_t iounit_map_page(struct device *dev, struct page *page, unsigned long offset, size_t len, enum dma_data_direction dir, unsigned long attrs) { void *vaddr = page_address(page) + offset; struct iounit_struct *iounit = dev->archdata.iommu; unsigned long ret, flags; /* XXX So what is maxphys for us and how do drivers know it? */ if (!len || len > 256 * 1024) return DMA_MAPPING_ERROR; spin_lock_irqsave(&iounit->lock, flags); ret = iounit_get_area(iounit, (unsigned long)vaddr, len); spin_unlock_irqrestore(&iounit->lock, flags); return ret; } static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { struct iounit_struct *iounit = dev->archdata.iommu; struct scatterlist *sg; unsigned long flags; int i; /* FIXME: Cache some resolved pages - often several sg entries are to the same page */ spin_lock_irqsave(&iounit->lock, flags); for_each_sg(sgl, sg, nents, i) { sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length); sg->dma_length = sg->length; } spin_unlock_irqrestore(&iounit->lock, flags); return nents; } static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len, enum dma_data_direction dir, unsigned long attrs) { struct iounit_struct *iounit = dev->archdata.iommu; unsigned long flags; spin_lock_irqsave(&iounit->lock, flags); len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT; vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); for (len += vaddr; vaddr < len; vaddr++) clear_bit(vaddr, iounit->bmap); spin_unlock_irqrestore(&iounit->lock, flags); } static void iounit_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { struct iounit_struct *iounit = dev->archdata.iommu; unsigned long flags, vaddr, len; struct scatterlist *sg; int i; spin_lock_irqsave(&iounit->lock, flags); for_each_sg(sgl, sg, nents, i) { len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT; vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); for (len += vaddr; vaddr < len; vaddr++) clear_bit(vaddr, iounit->bmap); } spin_unlock_irqrestore(&iounit->lock, flags); } #ifdef CONFIG_SBUS static void *iounit_alloc(struct device *dev, size_t len, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { struct iounit_struct *iounit = dev->archdata.iommu; unsigned long va, addr, page, end, ret; pgprot_t dvma_prot; iopte_t __iomem *iopte; /* XXX So what is maxphys for us and how do drivers know it? */ if (!len || len > 256 * 1024) return NULL; len = PAGE_ALIGN(len); va = __get_free_pages(gfp | __GFP_ZERO, get_order(len)); if (!va) return NULL; addr = ret = sparc_dma_alloc_resource(dev, len); if (!addr) goto out_free_pages; *dma_handle = addr; dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); end = PAGE_ALIGN((addr + len)); while(addr < end) { page = va; { pmd_t *pmdp; pte_t *ptep; long i; pmdp = pmd_off_k(addr); ptep = pte_offset_kernel(pmdp, addr); set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT); iopte = iounit->page_table + i; sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte); } addr += PAGE_SIZE; va += PAGE_SIZE; } flush_cache_all(); flush_tlb_all(); return (void *)ret; out_free_pages: free_pages(va, get_order(len)); return NULL; } static void iounit_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { /* XXX Somebody please fill this in */ } #endif static const struct dma_map_ops iounit_dma_ops = { #ifdef CONFIG_SBUS .alloc = iounit_alloc, .free = iounit_free, #endif .map_page = iounit_map_page, .unmap_page = iounit_unmap_page, .map_sg = iounit_map_sg, .unmap_sg = iounit_unmap_sg, };
linux-master
arch/sparc/mm/io-unit.c
// SPDX-License-Identifier: GPL-2.0 /* * fault.c: Page fault handlers for the Sparc. * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1996 Eddie C. Dost ([email protected]) * Copyright (C) 1997 Jakub Jelinek ([email protected]) */ #include <asm/head.h> #include <linux/string.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/threads.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/perf_event.h> #include <linux/interrupt.h> #include <linux/kdebug.h> #include <linux/uaccess.h> #include <linux/extable.h> #include <asm/page.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/setup.h> #include <asm/smp.h> #include <asm/traps.h> #include "mm_32.h" int show_unhandled_signals = 1; static void __noreturn unhandled_fault(unsigned long address, struct task_struct *tsk, struct pt_regs *regs) { if ((unsigned long) address < PAGE_SIZE) { printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference\n"); } else { printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n", address); } printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n", (tsk->mm ? tsk->mm->context : tsk->active_mm->context)); printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n", (tsk->mm ? (unsigned long) tsk->mm->pgd : (unsigned long) tsk->active_mm->pgd)); die_if_kernel("Oops", regs); } static inline void show_signal_msg(struct pt_regs *regs, int sig, int code, unsigned long address, struct task_struct *tsk) { if (!unhandled_signal(tsk, sig)) return; if (!printk_ratelimit()) return; printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x", task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, tsk->comm, task_pid_nr(tsk), address, (void *)regs->pc, (void *)regs->u_regs[UREG_I7], (void *)regs->u_regs[UREG_FP], code); print_vma_addr(KERN_CONT " in ", regs->pc); printk(KERN_CONT "\n"); } static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs, unsigned long addr) { if (unlikely(show_unhandled_signals)) show_signal_msg(regs, sig, code, addr, current); force_sig_fault(sig, code, (void __user *) addr); } static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) { unsigned int insn; if (text_fault) return regs->pc; if (regs->psr & PSR_PS) insn = *(unsigned int *) regs->pc; else __get_user(insn, (unsigned int *) regs->pc); return safe_compute_effective_address(regs, insn); } static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, int text_fault) { unsigned long addr = compute_si_addr(regs, text_fault); __do_fault_siginfo(code, sig, regs, addr); } asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, unsigned long address) { struct vm_area_struct *vma; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; int from_user = !(regs->psr & PSR_PS); int code; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; if (text_fault) address = regs->pc; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ code = SEGV_MAPERR; if (address >= TASK_SIZE) goto vmalloc_fault; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (pagefault_disabled() || !mm) goto no_context; if (!from_user && address >= PAGE_OFFSET) goto no_context; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ code = SEGV_ACCERR; if (write) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } if (from_user) flags |= FAULT_FLAG_USER; if (write) flags |= FAULT_FLAG_WRITE; /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) { if (!from_user) goto no_context; return; } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) return; if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ goto retry; } mmap_read_unlock(mm); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: mmap_read_unlock(mm); bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (from_user) { do_fault_siginfo(code, SIGSEGV, regs, text_fault); return; } /* Is this in ex_table? */ no_context: if (!from_user) { const struct exception_table_entry *entry; entry = search_exception_tables(regs->pc); #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address); printk("EX_TABLE: insn<%08lx> fixup<%08x>\n", regs->pc, entry->fixup); #endif regs->pc = entry->fixup; regs->npc = regs->pc + 4; return; } unhandled_fault(address, tsk, regs); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: mmap_read_unlock(mm); if (from_user) { pagefault_out_of_memory(); return; } goto no_context; do_sigbus: mmap_read_unlock(mm); do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault); if (!from_user) goto no_context; vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. */ int offset = pgd_index(address); pgd_t *pgd, *pgd_k; p4d_t *p4d, *p4d_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pgd = tsk->active_mm->pgd + offset; pgd_k = init_mm.pgd + offset; if (!pgd_present(*pgd)) { if (!pgd_present(*pgd_k)) goto bad_area_nosemaphore; pgd_val(*pgd) = pgd_val(*pgd_k); return; } p4d = p4d_offset(pgd, address); pud = pud_offset(p4d, address); pmd = pmd_offset(pud, address); p4d_k = p4d_offset(pgd_k, address); pud_k = pud_offset(p4d_k, address); pmd_k = pmd_offset(pud_k, address); if (pmd_present(*pmd) || !pmd_present(*pmd_k)) goto bad_area_nosemaphore; *pmd = *pmd_k; return; } } /* This always deals with user addresses. */ static void force_user_fault(unsigned long address, int write) { struct vm_area_struct *vma; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; unsigned int flags = FAULT_FLAG_USER; int code; code = SEGV_MAPERR; vma = lock_mm_and_find_vma(mm, address, NULL); if (!vma) goto bad_area_nosemaphore; code = SEGV_ACCERR; if (write) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; flags |= FAULT_FLAG_WRITE; } else { if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } switch (handle_mm_fault(vma, address, flags, NULL)) { case VM_FAULT_SIGBUS: case VM_FAULT_OOM: goto do_sigbus; } mmap_read_unlock(mm); return; bad_area: mmap_read_unlock(mm); bad_area_nosemaphore: __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address); return; do_sigbus: mmap_read_unlock(mm); __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address); } static void check_stack_aligned(unsigned long sp) { if (sp & 0x7UL) force_sig(SIGILL); } void window_overflow_fault(void) { unsigned long sp; sp = current_thread_info()->rwbuf_stkptrs[0]; if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) force_user_fault(sp + 0x38, 1); force_user_fault(sp, 1); check_stack_aligned(sp); } void window_underflow_fault(unsigned long sp) { if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) force_user_fault(sp + 0x38, 0); force_user_fault(sp, 0); check_stack_aligned(sp); } void window_ret_fault(struct pt_regs *regs) { unsigned long sp; sp = regs->u_regs[UREG_FP]; if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK)) force_user_fault(sp + 0x38, 0); force_user_fault(sp, 0); check_stack_aligned(sp); }
linux-master
arch/sparc/mm/fault_32.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/sparc/mm/init.c * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1995 Eddie C. Dost ([email protected]) * Copyright (C) 1998 Jakub Jelinek ([email protected]) * Copyright (C) 2000 Anton Blanchard ([email protected]) */ #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/initrd.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/memblock.h> #include <linux/pagemap.h> #include <linux/poison.h> #include <linux/gfp.h> #include <asm/sections.h> #include <asm/page.h> #include <asm/vaddrs.h> #include <asm/setup.h> #include <asm/tlb.h> #include <asm/prom.h> #include <asm/leon.h> #include "mm_32.h" static unsigned long *sparc_valid_addr_bitmap; unsigned long phys_base; EXPORT_SYMBOL(phys_base); unsigned long pfn_base; EXPORT_SYMBOL(pfn_base); struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1]; /* Initial ramdisk setup */ extern unsigned int sparc_ramdisk_image; extern unsigned int sparc_ramdisk_size; unsigned long highstart_pfn, highend_pfn; unsigned long last_valid_pfn; unsigned long calc_highpages(void) { int i; int nr = 0; for (i = 0; sp_banks[i].num_bytes != 0; i++) { unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; if (end_pfn <= max_low_pfn) continue; if (start_pfn < max_low_pfn) start_pfn = max_low_pfn; nr += end_pfn - start_pfn; } return nr; } static unsigned long calc_max_low_pfn(void) { int i; unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); unsigned long curr_pfn, last_pfn; last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT; for (i = 1; sp_banks[i].num_bytes != 0; i++) { curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; if (curr_pfn >= tmp) { if (last_pfn < tmp) tmp = last_pfn; break; } last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; } return tmp; } static void __init find_ramdisk(unsigned long end_of_phys_memory) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long size; /* Now have to check initial ramdisk, so that it won't pass * the end of memory */ if (sparc_ramdisk_image) { if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE) sparc_ramdisk_image -= KERNBASE; initrd_start = sparc_ramdisk_image + phys_base; initrd_end = initrd_start + sparc_ramdisk_size; if (initrd_end > end_of_phys_memory) { printk(KERN_CRIT "initrd extends beyond end of memory " "(0x%016lx > 0x%016lx)\ndisabling initrd\n", initrd_end, end_of_phys_memory); initrd_start = 0; } else { /* Reserve the initrd image area. */ size = initrd_end - initrd_start; memblock_reserve(initrd_start, size); initrd_start = (initrd_start - phys_base) + PAGE_OFFSET; initrd_end = (initrd_end - phys_base) + PAGE_OFFSET; } } #endif } unsigned long __init bootmem_init(unsigned long *pages_avail) { unsigned long start_pfn, bytes_avail, size; unsigned long end_of_phys_memory = 0; unsigned long high_pages = 0; int i; memblock_set_bottom_up(true); memblock_allow_resize(); bytes_avail = 0UL; for (i = 0; sp_banks[i].num_bytes != 0; i++) { end_of_phys_memory = sp_banks[i].base_addr + sp_banks[i].num_bytes; bytes_avail += sp_banks[i].num_bytes; if (cmdline_memory_size) { if (bytes_avail > cmdline_memory_size) { unsigned long slack = bytes_avail - cmdline_memory_size; bytes_avail -= slack; end_of_phys_memory -= slack; sp_banks[i].num_bytes -= slack; if (sp_banks[i].num_bytes == 0) { sp_banks[i].base_addr = 0xdeadbeef; } else { memblock_add(sp_banks[i].base_addr, sp_banks[i].num_bytes); sp_banks[i+1].num_bytes = 0; sp_banks[i+1].base_addr = 0xdeadbeef; } break; } } memblock_add(sp_banks[i].base_addr, sp_banks[i].num_bytes); } /* Start with page aligned address of last symbol in kernel * image. */ start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end)); /* Now shift down to get the real physical page frame number. */ start_pfn >>= PAGE_SHIFT; max_pfn = end_of_phys_memory >> PAGE_SHIFT; max_low_pfn = max_pfn; highstart_pfn = highend_pfn = max_pfn; if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) { highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT); max_low_pfn = calc_max_low_pfn(); high_pages = calc_highpages(); printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", high_pages >> (20 - PAGE_SHIFT)); } find_ramdisk(end_of_phys_memory); /* Reserve the kernel text/data/bss. */ size = (start_pfn << PAGE_SHIFT) - phys_base; memblock_reserve(phys_base, size); memblock_add(phys_base, size); size = memblock_phys_mem_size() - memblock_reserved_size(); *pages_avail = (size >> PAGE_SHIFT) - high_pages; /* Only allow low memory to be allocated via memblock allocation */ memblock_set_current_limit(max_low_pfn << PAGE_SHIFT); return max_pfn; } /* * paging_init() sets up the page tables: We call the MMU specific * init routine based upon the Sun model type on the Sparc. * */ void __init paging_init(void) { srmmu_paging_init(); prom_build_devicetree(); of_fill_in_cpu_data(); device_scan(); } static void __init taint_real_pages(void) { int i; for (i = 0; sp_banks[i].num_bytes; i++) { unsigned long start, end; start = sp_banks[i].base_addr; end = start + sp_banks[i].num_bytes; while (start < end) { set_bit(start >> 20, sparc_valid_addr_bitmap); start += PAGE_SIZE; } } } static void map_high_region(unsigned long start_pfn, unsigned long end_pfn) { unsigned long tmp; #ifdef CONFIG_DEBUG_HIGHMEM printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn); #endif for (tmp = start_pfn; tmp < end_pfn; tmp++) free_highmem_page(pfn_to_page(tmp)); } void __init mem_init(void) { int i; if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { prom_printf("BUG: fixmap and pkmap areas overlap\n"); prom_printf("pkbase: 0x%lx pkend: 0x%lx fixstart 0x%lx\n", PKMAP_BASE, (unsigned long)PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START); prom_printf("Please mail [email protected].\n"); prom_halt(); } /* Saves us work later. */ memset((void *)empty_zero_page, 0, PAGE_SIZE); i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); i += 1; sparc_valid_addr_bitmap = (unsigned long *) memblock_alloc(i << 2, SMP_CACHE_BYTES); if (sparc_valid_addr_bitmap == NULL) { prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); prom_halt(); } memset(sparc_valid_addr_bitmap, 0, i << 2); taint_real_pages(); max_mapnr = last_valid_pfn - pfn_base; high_memory = __va(max_low_pfn << PAGE_SHIFT); memblock_free_all(); for (i = 0; sp_banks[i].num_bytes != 0; i++) { unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT; if (end_pfn <= highstart_pfn) continue; if (start_pfn < highstart_pfn) start_pfn = highstart_pfn; map_high_region(start_pfn, end_pfn); } } void sparc_flush_page_to_ram(struct page *page) { unsigned long vaddr = (unsigned long)page_address(page); __flush_page_to_ram(vaddr); } EXPORT_SYMBOL(sparc_flush_page_to_ram); void sparc_flush_folio_to_ram(struct folio *folio) { unsigned long vaddr = (unsigned long)folio_address(folio); unsigned int i, nr = folio_nr_pages(folio); for (i = 0; i < nr; i++) __flush_page_to_ram(vaddr + i * PAGE_SIZE); } EXPORT_SYMBOL(sparc_flush_folio_to_ram); static const pgprot_t protection_map[16] = { [VM_NONE] = PAGE_NONE, [VM_READ] = PAGE_READONLY, [VM_WRITE] = PAGE_COPY, [VM_WRITE | VM_READ] = PAGE_COPY, [VM_EXEC] = PAGE_READONLY, [VM_EXEC | VM_READ] = PAGE_READONLY, [VM_EXEC | VM_WRITE] = PAGE_COPY, [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY, [VM_SHARED] = PAGE_NONE, [VM_SHARED | VM_READ] = PAGE_READONLY, [VM_SHARED | VM_WRITE] = PAGE_SHARED, [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, [VM_SHARED | VM_EXEC] = PAGE_READONLY, [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY, [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED, [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED }; DECLARE_VM_GET_PAGE_PROT
linux-master
arch/sparc/mm/init_32.c
// SPDX-License-Identifier: GPL-2.0 /* arch/sparc64/mm/tlb.c * * Copyright (C) 2004 David S. Miller <[email protected]> */ #include <linux/kernel.h> #include <linux/percpu.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/preempt.h> #include <linux/pagemap.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> #include <asm/tlb.h> /* Heavily inspired by the ppc64 code. */ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); struct mm_struct *mm = tb->mm; if (!tb->tlb_nr) goto out; flush_tsb_user(tb); if (CTX_VALID(mm->context)) { if (tb->tlb_nr == 1) { global_flush_tlb_page(mm, tb->vaddrs[0]); } else { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); #else __flush_tlb_pending(CTX_HWBITS(tb->mm->context), tb->tlb_nr, &tb->vaddrs[0]); #endif } } tb->tlb_nr = 0; out: put_cpu_var(tlb_batch); } void arch_enter_lazy_mmu_mode(void) { struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); tb->active = 1; } void arch_leave_lazy_mmu_mode(void) { struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, bool exec, unsigned int hugepage_shift) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; if (exec) vaddr |= 0x1UL; nr = tb->tlb_nr; if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (!tb->active) { flush_tsb_user_page(mm, vaddr, hugepage_shift); global_flush_tlb_page(mm, vaddr); goto out; } if (nr == 0) { tb->mm = mm; tb->hugepage_shift = hugepage_shift; } if (tb->hugepage_shift != hugepage_shift) { flush_tlb_pending(); tb->hugepage_shift = hugepage_shift; nr = 0; } tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); out: put_cpu_var(tlb_batch); } void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm, unsigned int hugepage_shift) { if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); struct address_space *mapping; struct page *page; struct folio *folio; if (!pfn_valid(pfn)) goto no_cache_flush; page = pfn_to_page(pfn); if (PageReserved(page)) goto no_cache_flush; /* A real file page? */ folio = page_folio(page); mapping = folio_flush_mapping(folio); if (!mapping) goto no_cache_flush; paddr = (unsigned long) page_address(page); if ((paddr ^ vaddr) & (1 << 13)) flush_dcache_folio_all(mm, folio); } no_cache_flush: if (!fullmm) tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, pmd_t pmd) { unsigned long end; pte_t *pte; pte = pte_offset_map(&pmd, vaddr); if (!pte) return; end = vaddr + HPAGE_SIZE; while (vaddr < end) { if (pte_val(*pte) & _PAGE_VALID) { bool exec = pte_exec(*pte); tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT); } pte++; vaddr += PAGE_SIZE; } pte_unmap(pte); } static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr, pmd_t orig, pmd_t pmd) { if (mm == &init_mm) return; if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { /* * Note that this routine only sets pmds for THP pages. * Hugetlb pages are handled elsewhere. We need to check * for huge zero page. Huge zero pages are like hugetlb * pages in that there is no RSS, but there is the need * for TSB entries. So, huge zero page counts go into * hugetlb_pte_count. */ if (pmd_val(pmd) & _PAGE_PMD_HUGE) { if (is_huge_zero_page(pmd_page(pmd))) mm->context.hugetlb_pte_count++; else mm->context.thp_pte_count++; } else { if (is_huge_zero_page(pmd_page(orig))) mm->context.hugetlb_pte_count--; else mm->context.thp_pte_count--; } /* Do not try to allocate the TSB hash table if we * don't have one already. We have various locks held * and thus we'll end up doing a GFP_KERNEL allocation * in an atomic context. * * Instead, we let the first TLB miss on a hugepage * take care of this. */ } if (!pmd_none(orig)) { addr &= HPAGE_MASK; if (pmd_trans_huge(orig)) { pte_t orig_pte = __pte(pmd_val(orig)); bool exec = pte_exec(orig_pte); tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT); tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, REAL_HPAGE_SHIFT); } else { tlb_batch_pmd_scan(mm, addr, orig); } } } void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { pmd_t orig = *pmdp; *pmdp = pmd; __set_pmd_acct(mm, addr, orig, pmd); } static inline pmd_t pmdp_establish(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t pmd) { pmd_t old; do { old = *pmdp; } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); __set_pmd_acct(vma->vm_mm, address, old, pmd); return old; } /* * This routine is only called when splitting a THP */ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t old, entry; entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID); old = pmdp_establish(vma, address, pmdp, entry); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); /* * set_pmd_at() will not be called in a way to decrement * thp_pte_count when splitting a THP, so do it now. * Sanity check pmd before doing the actual decrement. */ if ((pmd_val(entry) & _PAGE_PMD_HUGE) && !is_huge_zero_page(pmd_page(entry))) (vma->vm_mm)->context.thp_pte_count--; return old; } void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) { struct list_head *lh = (struct list_head *) pgtable; assert_spin_locked(&mm->page_table_lock); /* FIFO */ if (!pmd_huge_pte(mm, pmdp)) INIT_LIST_HEAD(lh); else list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); pmd_huge_pte(mm, pmdp) = pgtable; } pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) { struct list_head *lh; pgtable_t pgtable; assert_spin_locked(&mm->page_table_lock); /* FIFO */ pgtable = pmd_huge_pte(mm, pmdp); lh = (struct list_head *) pgtable; if (list_empty(lh)) pmd_huge_pte(mm, pmdp) = NULL; else { pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; list_del(lh); } pte_val(pgtable[0]) = 0; pte_val(pgtable[1]) = 0; return pgtable; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
linux-master
arch/sparc/mm/tlb.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/spinlock.h> #include <linux/hardirq.h> #include <linux/ftrace.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/list.h> #include <trace/syscall.h> #include <asm/ftrace.h> #ifdef CONFIG_DYNAMIC_FTRACE static const u32 ftrace_nop = 0x01000000; static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) { u32 call; s32 off; off = ((s32)addr - (s32)ip); call = 0x40000000 | ((u32)off >> 2); return call; } static int ftrace_modify_code(unsigned long ip, u32 old, u32 new) { u32 replaced; int faulted; __asm__ __volatile__( "1: cas [%[ip]], %[old], %[new]\n" " flush %[ip]\n" " mov 0, %[faulted]\n" "2:\n" " .section .fixup,#alloc,#execinstr\n" " .align 4\n" "3: sethi %%hi(2b), %[faulted]\n" " jmpl %[faulted] + %%lo(2b), %%g0\n" " mov 1, %[faulted]\n" " .previous\n" " .section __ex_table,\"a\"\n" " .align 4\n" " .word 1b, 3b\n" " .previous\n" : "=r" (replaced), [faulted] "=r" (faulted) : [new] "0" (new), [old] "r" (old), [ip] "r" (ip) : "memory"); if (replaced != old && replaced != new) faulted = 2; return faulted; } int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { unsigned long ip = rec->ip; u32 old, new; old = ftrace_call_replace(ip, addr); new = ftrace_nop; return ftrace_modify_code(ip, old, new); } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long ip = rec->ip; u32 old, new; old = ftrace_nop; new = ftrace_call_replace(ip, addr); return ftrace_modify_code(ip, old, new); } int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); u32 old, new; old = *(u32 *) &ftrace_call; new = ftrace_call_replace(ip, (unsigned long)func); return ftrace_modify_code(ip, old, new); } #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_DYNAMIC_FTRACE extern void ftrace_graph_call(void); int ftrace_enable_ftrace_graph_caller(void) { unsigned long ip = (unsigned long)(&ftrace_graph_call); u32 old, new; old = *(u32 *) &ftrace_graph_call; new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); return ftrace_modify_code(ip, old, new); } int ftrace_disable_ftrace_graph_caller(void) { unsigned long ip = (unsigned long)(&ftrace_graph_call); u32 old, new; old = *(u32 *) &ftrace_graph_call; new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); return ftrace_modify_code(ip, old, new); } #endif /* !CONFIG_DYNAMIC_FTRACE */ /* * Hook the return address and push it in the stack of return addrs * in current thread info. */ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long self_addr, unsigned long frame_pointer) { unsigned long return_hooker = (unsigned long) &return_to_handler; if (unlikely(atomic_read(&current->tracing_graph_pause))) return parent + 8UL; if (function_graph_enter(parent, self_addr, frame_pointer, NULL)) return parent + 8UL; return return_hooker; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
linux-master
arch/sparc/kernel/ftrace.c
// SPDX-License-Identifier: GPL-2.0 /* pci_common.c: PCI controller common support. * * Copyright (C) 1999, 2007 David S. Miller ([email protected]) */ #include <linux/string.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/of.h> #include <linux/platform_device.h> #include <asm/prom.h> #include <asm/oplib.h> #include "pci_impl.h" #include "pci_sun4v.h" static int config_out_of_range(struct pci_pbm_info *pbm, unsigned long bus, unsigned long devfn, unsigned long reg) { if (bus < pbm->pci_first_busno || bus > pbm->pci_last_busno) return 1; return 0; } static void *sun4u_config_mkaddr(struct pci_pbm_info *pbm, unsigned long bus, unsigned long devfn, unsigned long reg) { unsigned long rbits = pbm->config_space_reg_bits; if (config_out_of_range(pbm, bus, devfn, reg)) return NULL; reg = (reg & ((1 << rbits) - 1)); devfn <<= rbits; bus <<= rbits + 8; return (void *) (pbm->config_space | bus | devfn | reg); } /* At least on Sabre, it is necessary to access all PCI host controller * registers at their natural size, otherwise zeros are returned. * Strange but true, and I see no language in the UltraSPARC-IIi * programmer's manual that mentions this even indirectly. */ static int sun4u_read_pci_cfg_host(struct pci_pbm_info *pbm, unsigned char bus, unsigned int devfn, int where, int size, u32 *value) { u32 tmp32, *addr; u16 tmp16; u8 tmp8; addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: if (where < 8) { unsigned long align = (unsigned long) addr; align &= ~1; pci_config_read16((u16 *)align, &tmp16); if (where & 1) *value = tmp16 >> 8; else *value = tmp16 & 0xff; } else { pci_config_read8((u8 *)addr, &tmp8); *value = (u32) tmp8; } break; case 2: if (where < 8) { pci_config_read16((u16 *)addr, &tmp16); *value = (u32) tmp16; } else { pci_config_read8((u8 *)addr, &tmp8); *value = (u32) tmp8; pci_config_read8(((u8 *)addr) + 1, &tmp8); *value |= ((u32) tmp8) << 8; } break; case 4: tmp32 = 0xffffffff; sun4u_read_pci_cfg_host(pbm, bus, devfn, where, 2, &tmp32); *value = tmp32; tmp32 = 0xffffffff; sun4u_read_pci_cfg_host(pbm, bus, devfn, where + 2, 2, &tmp32); *value |= tmp32 << 16; break; } return PCIBIOS_SUCCESSFUL; } static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 *value) { struct pci_pbm_info *pbm = bus_dev->sysdata; unsigned char bus = bus_dev->number; u32 *addr; u16 tmp16; u8 tmp8; switch (size) { case 1: *value = 0xff; break; case 2: *value = 0xffff; break; case 4: *value = 0xffffffff; break; } if (!bus_dev->number && !PCI_SLOT(devfn)) return sun4u_read_pci_cfg_host(pbm, bus, devfn, where, size, value); addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: pci_config_read8((u8 *)addr, &tmp8); *value = (u32) tmp8; break; case 2: if (where & 0x01) { printk("pci_read_config_word: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_read16((u16 *)addr, &tmp16); *value = (u32) tmp16; break; case 4: if (where & 0x03) { printk("pci_read_config_dword: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_read32(addr, value); break; } return PCIBIOS_SUCCESSFUL; } static int sun4u_write_pci_cfg_host(struct pci_pbm_info *pbm, unsigned char bus, unsigned int devfn, int where, int size, u32 value) { u32 *addr; addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: if (where < 8) { unsigned long align = (unsigned long) addr; u16 tmp16; align &= ~1; pci_config_read16((u16 *)align, &tmp16); if (where & 1) { tmp16 &= 0x00ff; tmp16 |= value << 8; } else { tmp16 &= 0xff00; tmp16 |= value; } pci_config_write16((u16 *)align, tmp16); } else pci_config_write8((u8 *)addr, value); break; case 2: if (where < 8) { pci_config_write16((u16 *)addr, value); } else { pci_config_write8((u8 *)addr, value & 0xff); pci_config_write8(((u8 *)addr) + 1, value >> 8); } break; case 4: sun4u_write_pci_cfg_host(pbm, bus, devfn, where, 2, value & 0xffff); sun4u_write_pci_cfg_host(pbm, bus, devfn, where + 2, 2, value >> 16); break; } return PCIBIOS_SUCCESSFUL; } static int sun4u_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 value) { struct pci_pbm_info *pbm = bus_dev->sysdata; unsigned char bus = bus_dev->number; u32 *addr; if (!bus_dev->number && !PCI_SLOT(devfn)) return sun4u_write_pci_cfg_host(pbm, bus, devfn, where, size, value); addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: pci_config_write8((u8 *)addr, value); break; case 2: if (where & 0x01) { printk("pci_write_config_word: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_write16((u16 *)addr, value); break; case 4: if (where & 0x03) { printk("pci_write_config_dword: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_write32(addr, value); } return PCIBIOS_SUCCESSFUL; } struct pci_ops sun4u_pci_ops = { .read = sun4u_read_pci_cfg, .write = sun4u_write_pci_cfg, }; static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 *value) { struct pci_pbm_info *pbm = bus_dev->sysdata; u32 devhandle = pbm->devhandle; unsigned int bus = bus_dev->number; unsigned int device = PCI_SLOT(devfn); unsigned int func = PCI_FUNC(devfn); unsigned long ret; if (config_out_of_range(pbm, bus, devfn, where)) { ret = ~0UL; } else { ret = pci_sun4v_config_get(devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), where, size); } switch (size) { case 1: *value = ret & 0xff; break; case 2: *value = ret & 0xffff; break; case 4: *value = ret & 0xffffffff; break; } return PCIBIOS_SUCCESSFUL; } static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 value) { struct pci_pbm_info *pbm = bus_dev->sysdata; u32 devhandle = pbm->devhandle; unsigned int bus = bus_dev->number; unsigned int device = PCI_SLOT(devfn); unsigned int func = PCI_FUNC(devfn); if (config_out_of_range(pbm, bus, devfn, where)) { /* Do nothing. */ } else { /* We don't check for hypervisor errors here, but perhaps * we should and influence our return value depending upon * what kind of error is thrown. */ pci_sun4v_config_put(devhandle, HV_PCI_DEVICE_BUILD(bus, device, func), where, size, value); } return PCIBIOS_SUCCESSFUL; } struct pci_ops sun4v_pci_ops = { .read = sun4v_read_pci_cfg, .write = sun4v_write_pci_cfg, }; void pci_get_pbm_props(struct pci_pbm_info *pbm) { const u32 *val = of_get_property(pbm->op->dev.of_node, "bus-range", NULL); pbm->pci_first_busno = val[0]; pbm->pci_last_busno = val[1]; val = of_get_property(pbm->op->dev.of_node, "ino-bitmap", NULL); if (val) { pbm->ino_bitmap = (((u64)val[1] << 32UL) | ((u64)val[0] << 0UL)); } } static void pci_register_iommu_region(struct pci_pbm_info *pbm) { const u32 *vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL); if (vdma) { struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); if (!rp) { pr_info("%s: Cannot allocate IOMMU resource.\n", pbm->name); return; } rp->name = "IOMMU"; rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; rp->end = rp->start + (unsigned long) vdma[1] - 1UL; rp->flags = IORESOURCE_BUSY; if (request_resource(&pbm->mem_space, rp)) { pr_info("%s: Unable to request IOMMU resource.\n", pbm->name); kfree(rp); } } } void pci_determine_mem_io_space(struct pci_pbm_info *pbm) { const struct linux_prom_pci_ranges *pbm_ranges; int i, saw_mem, saw_io; int num_pbm_ranges; /* Corresponding generic code in of_pci_get_host_bridge_resources() */ saw_mem = saw_io = 0; pbm_ranges = of_get_property(pbm->op->dev.of_node, "ranges", &i); if (!pbm_ranges) { prom_printf("PCI: Fatal error, missing PBM ranges property " " for %s\n", pbm->name); prom_halt(); } num_pbm_ranges = i / sizeof(*pbm_ranges); memset(&pbm->mem64_space, 0, sizeof(struct resource)); for (i = 0; i < num_pbm_ranges; i++) { const struct linux_prom_pci_ranges *pr = &pbm_ranges[i]; unsigned long a, size, region_a; u32 parent_phys_hi, parent_phys_lo; u32 child_phys_mid, child_phys_lo; u32 size_hi, size_lo; int type; parent_phys_hi = pr->parent_phys_hi; parent_phys_lo = pr->parent_phys_lo; child_phys_mid = pr->child_phys_mid; child_phys_lo = pr->child_phys_lo; if (tlb_type == hypervisor) parent_phys_hi &= 0x0fffffff; size_hi = pr->size_hi; size_lo = pr->size_lo; type = (pr->child_phys_hi >> 24) & 0x3; a = (((unsigned long)parent_phys_hi << 32UL) | ((unsigned long)parent_phys_lo << 0UL)); region_a = (((unsigned long)child_phys_mid << 32UL) | ((unsigned long)child_phys_lo << 0UL)); size = (((unsigned long)size_hi << 32UL) | ((unsigned long)size_lo << 0UL)); switch (type) { case 0: /* PCI config space, 16MB */ pbm->config_space = a; break; case 1: /* 16-bit IO space, 16MB */ pbm->io_space.start = a; pbm->io_space.end = a + size - 1UL; pbm->io_space.flags = IORESOURCE_IO; pbm->io_offset = a - region_a; saw_io = 1; break; case 2: /* 32-bit MEM space, 2GB */ pbm->mem_space.start = a; pbm->mem_space.end = a + size - 1UL; pbm->mem_space.flags = IORESOURCE_MEM; pbm->mem_offset = a - region_a; saw_mem = 1; break; case 3: /* 64-bit MEM handling */ pbm->mem64_space.start = a; pbm->mem64_space.end = a + size - 1UL; pbm->mem64_space.flags = IORESOURCE_MEM; pbm->mem64_offset = a - region_a; saw_mem = 1; break; default: break; } } if (!saw_io || !saw_mem) { prom_printf("%s: Fatal error, missing %s PBM range.\n", pbm->name, (!saw_io ? "IO" : "MEM")); prom_halt(); } if (pbm->io_space.flags) printk("%s: PCI IO %pR offset %llx\n", pbm->name, &pbm->io_space, pbm->io_offset); if (pbm->mem_space.flags) printk("%s: PCI MEM %pR offset %llx\n", pbm->name, &pbm->mem_space, pbm->mem_offset); if (pbm->mem64_space.flags && pbm->mem_space.flags) { if (pbm->mem64_space.start <= pbm->mem_space.end) pbm->mem64_space.start = pbm->mem_space.end + 1; if (pbm->mem64_space.start > pbm->mem64_space.end) pbm->mem64_space.flags = 0; } if (pbm->mem64_space.flags) printk("%s: PCI MEM64 %pR offset %llx\n", pbm->name, &pbm->mem64_space, pbm->mem64_offset); pbm->io_space.name = pbm->mem_space.name = pbm->name; pbm->mem64_space.name = pbm->name; request_resource(&ioport_resource, &pbm->io_space); request_resource(&iomem_resource, &pbm->mem_space); if (pbm->mem64_space.flags) request_resource(&iomem_resource, &pbm->mem64_space); pci_register_iommu_region(pbm); } /* Generic helper routines for PCI error reporting. */ void pci_scan_for_target_abort(struct pci_pbm_info *pbm, struct pci_bus *pbus) { struct pci_dev *pdev; struct pci_bus *bus; list_for_each_entry(pdev, &pbus->devices, bus_list) { u16 status, error_bits; pci_read_config_word(pdev, PCI_STATUS, &status); error_bits = (status & (PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT)); if (error_bits) { pci_write_config_word(pdev, PCI_STATUS, error_bits); pci_info(pdev, "%s: Device saw Target Abort [%016x]\n", pbm->name, status); } } list_for_each_entry(bus, &pbus->children, node) pci_scan_for_target_abort(pbm, bus); } void pci_scan_for_master_abort(struct pci_pbm_info *pbm, struct pci_bus *pbus) { struct pci_dev *pdev; struct pci_bus *bus; list_for_each_entry(pdev, &pbus->devices, bus_list) { u16 status, error_bits; pci_read_config_word(pdev, PCI_STATUS, &status); error_bits = (status & (PCI_STATUS_REC_MASTER_ABORT)); if (error_bits) { pci_write_config_word(pdev, PCI_STATUS, error_bits); pci_info(pdev, "%s: Device received Master Abort " "[%016x]\n", pbm->name, status); } } list_for_each_entry(bus, &pbus->children, node) pci_scan_for_master_abort(pbm, bus); } void pci_scan_for_parity_error(struct pci_pbm_info *pbm, struct pci_bus *pbus) { struct pci_dev *pdev; struct pci_bus *bus; list_for_each_entry(pdev, &pbus->devices, bus_list) { u16 status, error_bits; pci_read_config_word(pdev, PCI_STATUS, &status); error_bits = (status & (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY)); if (error_bits) { pci_write_config_word(pdev, PCI_STATUS, error_bits); pci_info(pdev, "%s: Device saw Parity Error [%016x]\n", pbm->name, status); } } list_for_each_entry(bus, &pbus->children, node) pci_scan_for_parity_error(pbm, bus); }
linux-master
arch/sparc/kernel/pci_common.c
// SPDX-License-Identifier: GPL-2.0 /* * leon_pci.c: LEON Host PCI support * * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom * * Code is partially derived from pcic.c */ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/export.h> #include <asm/leon.h> #include <asm/leon_pci.h> /* The LEON architecture does not rely on a BIOS or bootloader to setup * PCI for us. The Linux generic routines are used to setup resources, * reset values of configuration-space register settings are preserved. * * PCI Memory and Prefetchable Memory is direct-mapped. However I/O Space is * accessed through a Window which is translated to low 64KB in PCI space, the * first 4KB is not used so 60KB is available. */ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info) { LIST_HEAD(resources); struct pci_bus *root_bus; struct pci_host_bridge *bridge; int ret; bridge = pci_alloc_host_bridge(0); if (!bridge) return; pci_add_resource_offset(&resources, &info->io_space, info->io_space.start - 0x1000); pci_add_resource(&resources, &info->mem_space); info->busn.flags = IORESOURCE_BUS; pci_add_resource(&resources, &info->busn); list_splice_init(&resources, &bridge->windows); bridge->dev.parent = &ofdev->dev; bridge->sysdata = info; bridge->busnr = 0; bridge->ops = info->ops; bridge->swizzle_irq = pci_common_swizzle; bridge->map_irq = info->map_irq; ret = pci_scan_root_bus_bridge(bridge); if (ret) { pci_free_host_bridge(bridge); return; } root_bus = bridge->bus; /* Assign devices with resources */ pci_assign_unassigned_resources(); pci_bus_add_devices(root_bus); } int pcibios_enable_device(struct pci_dev *dev, int mask) { struct resource *res; u16 cmd, oldcmd; int i; pci_read_config_word(dev, PCI_COMMAND, &cmd); oldcmd = cmd; pci_dev_for_each_resource(dev, res, i) { /* Only set up the requested stuff */ if (!(mask & (1<<i))) continue; if (res->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (res->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } if (cmd != oldcmd) { pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; }
linux-master
arch/sparc/kernel/leon_pci.c
// SPDX-License-Identifier: GPL-2.0 /* * unaligned.c: Unaligned load/store trap handling with special * cases for the kernel to do them more quickly. * * Copyright (C) 1996 David S. Miller ([email protected]) * Copyright (C) 1996 Jakub Jelinek ([email protected]) */ #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/mm.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <linux/uaccess.h> #include <linux/smp.h> #include <linux/perf_event.h> #include <linux/extable.h> #include <asm/setup.h> #include "kernel.h" enum direction { load, /* ld, ldd, ldh, ldsh */ store, /* st, std, sth, stsh */ both, /* Swap, ldstub, etc. */ fpload, fpstore, invalid, }; static inline enum direction decode_direction(unsigned int insn) { unsigned long tmp = (insn >> 21) & 1; if(!tmp) return load; else { if(((insn>>19)&0x3f) == 15) return both; else return store; } } /* 8 = double-word, 4 = word, 2 = half-word */ static inline int decode_access_size(unsigned int insn) { insn = (insn >> 19) & 3; if(!insn) return 4; else if(insn == 3) return 8; else if(insn == 2) return 2; else { printk("Impossible unaligned trap. insn=%08x\n", insn); die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs); return 4; /* just to keep gcc happy. */ } } /* 0x400000 = signed, 0 = unsigned */ static inline int decode_signedness(unsigned int insn) { return (insn & 0x400000); } static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, unsigned int rd) { if(rs2 >= 16 || rs1 >= 16 || rd >= 16) { /* Wheee... */ __asm__ __volatile__("save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "save %sp, -0x40, %sp\n\t" "restore; restore; restore; restore;\n\t" "restore; restore; restore;\n\t"); } } static inline int sign_extend_imm13(int imm) { return imm << 19 >> 19; } static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) { struct reg_window32 *win; if(reg < 16) return (!reg ? 0 : regs->u_regs[reg]); /* Ho hum, the slightly complicated case. */ win = (struct reg_window32 *) regs->u_regs[UREG_FP]; return win->locals[reg - 16]; /* yes, I know what this does... */ } static inline unsigned long safe_fetch_reg(unsigned int reg, struct pt_regs *regs) { struct reg_window32 __user *win; unsigned long ret; if (reg < 16) return (!reg ? 0 : regs->u_regs[reg]); /* Ho hum, the slightly complicated case. */ win = (struct reg_window32 __user *) regs->u_regs[UREG_FP]; if ((unsigned long)win & 3) return -1; if (get_user(ret, &win->locals[reg - 16])) return -1; return ret; } static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) { struct reg_window32 *win; if(reg < 16) return &regs->u_regs[reg]; win = (struct reg_window32 *) regs->u_regs[UREG_FP]; return &win->locals[reg - 16]; } static unsigned long compute_effective_address(struct pt_regs *regs, unsigned int insn) { unsigned int rs1 = (insn >> 14) & 0x1f; unsigned int rs2 = insn & 0x1f; unsigned int rd = (insn >> 25) & 0x1f; if(insn & 0x2000) { maybe_flush_windows(rs1, 0, rd); return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); } else { maybe_flush_windows(rs1, rs2, rd); return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); } } unsigned long safe_compute_effective_address(struct pt_regs *regs, unsigned int insn) { unsigned int rs1 = (insn >> 14) & 0x1f; unsigned int rs2 = insn & 0x1f; unsigned int rd = (insn >> 25) & 0x1f; if(insn & 0x2000) { maybe_flush_windows(rs1, 0, rd); return (safe_fetch_reg(rs1, regs) + sign_extend_imm13(insn)); } else { maybe_flush_windows(rs1, rs2, rd); return (safe_fetch_reg(rs1, regs) + safe_fetch_reg(rs2, regs)); } } /* This is just to make gcc think panic does return... */ static void unaligned_panic(char *str) { panic("%s", str); } /* una_asm.S */ extern int do_int_load(unsigned long *dest_reg, int size, unsigned long *saddr, int is_signed); extern int __do_int_store(unsigned long *dst_addr, int size, unsigned long *src_val); static int do_int_store(int reg_num, int size, unsigned long *dst_addr, struct pt_regs *regs) { unsigned long zero[2] = { 0, 0 }; unsigned long *src_val; if (reg_num) src_val = fetch_reg_addr(reg_num, regs); else { src_val = &zero[0]; if (size == 8) zero[1] = fetch_reg(1, regs); } return __do_int_store(dst_addr, size, src_val); } extern void smp_capture(void); extern void smp_release(void); static inline void advance(struct pt_regs *regs) { regs->pc = regs->npc; regs->npc += 4; } static inline int floating_point_load_or_store_p(unsigned int insn) { return (insn >> 24) & 1; } static inline int ok_for_kernel(unsigned int insn) { return !floating_point_load_or_store_p(insn); } static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) { const struct exception_table_entry *entry; entry = search_exception_tables(regs->pc); if (!entry) { unsigned long address = compute_effective_address(regs, insn); if(address < PAGE_SIZE) { printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler"); } else printk(KERN_ALERT "Unable to handle kernel paging request in mna handler"); printk(KERN_ALERT " at virtual address %08lx\n",address); printk(KERN_ALERT "current->{mm,active_mm}->context = %08lx\n", (current->mm ? current->mm->context : current->active_mm->context)); printk(KERN_ALERT "current->{mm,active_mm}->pgd = %08lx\n", (current->mm ? (unsigned long) current->mm->pgd : (unsigned long) current->active_mm->pgd)); die_if_kernel("Oops", regs); /* Not reached */ } regs->pc = entry->fixup; regs->npc = regs->pc + 4; } asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) { enum direction dir = decode_direction(insn); int size = decode_access_size(insn); if(!ok_for_kernel(insn) || dir == both) { printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n", regs->pc); unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store."); } else { unsigned long addr = compute_effective_address(regs, insn); int err; perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), size, (unsigned long *) addr, decode_signedness(insn)); break; case store: err = do_int_store(((insn>>25)&0x1f), size, (unsigned long *) addr, regs); break; default: panic("Impossible kernel unaligned trap."); /* Not reached... */ } if (err) kernel_mna_trap_fault(regs, insn); else advance(regs); } } asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) { send_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)safe_compute_effective_address(regs, insn), current); }
linux-master
arch/sparc/kernel/unaligned_32.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2009 Daniel Hellstrom ([email protected]) Aeroflex Gaisler AB * Copyright (C) 2009 Konrad Eisele ([email protected]) Aeroflex Gaisler AB */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/interrupt.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <asm/oplib.h> #include <asm/timer.h> #include <asm/prom.h> #include <asm/leon.h> #include <asm/leon_amba.h> #include <asm/traps.h> #include <asm/cacheflush.h> #include <asm/smp.h> #include <asm/setup.h> #include "kernel.h" #include "prom.h" #include "irq.h" struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address */ struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address */ int leondebug_irq_disable; int leon_debug_irqout; static volatile u32 dummy_master_l10_counter; unsigned long amba_system_id; static DEFINE_SPINLOCK(leon_irq_lock); static unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ static unsigned long leon3_gptimer_ackmask; /* For clearing pending bit */ unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ unsigned int sparc_leon_eirq; #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) #define LEON_IACK (&leon3_irqctrl_regs->iclear) #define LEON_DO_ACK_HW 1 /* Return the last ACKed IRQ by the Extended IRQ controller. It has already * been (automatically) ACKed when the CPU takes the trap. */ static inline unsigned int leon_eirq_get(int cpu) { return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f; } /* Handle one or multiple IRQs from the extended interrupt controller */ static void leon_handle_ext_irq(struct irq_desc *desc) { unsigned int eirq; struct irq_bucket *p; int cpu = sparc_leon3_cpuid(); eirq = leon_eirq_get(cpu); p = irq_map[eirq]; if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */ generic_handle_irq(p->irq); } /* The extended IRQ controller has been found, this function registers it */ static void leon_eirq_setup(unsigned int eirq) { unsigned long mask, oldmask; unsigned int veirq; if (eirq < 1 || eirq > 0xf) { printk(KERN_ERR "LEON EXT IRQ NUMBER BAD: %d\n", eirq); return; } veirq = leon_build_device_irq(eirq, leon_handle_ext_irq, "extirq", 0); /* * Unmask the Extended IRQ, the IRQs routed through the Ext-IRQ * controller have a mask-bit of their own, so this is safe. */ irq_link(veirq); mask = 1 << eirq; oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(boot_cpu_id)); LEON3_BYPASS_STORE_PA(LEON_IMASK(boot_cpu_id), (oldmask | mask)); sparc_leon_eirq = eirq; } unsigned long leon_get_irqmask(unsigned int irq) { unsigned long mask; if (!irq || ((irq > 0xf) && !sparc_leon_eirq) || ((irq > 0x1f) && sparc_leon_eirq)) { printk(KERN_ERR "leon_get_irqmask: false irq number: %d\n", irq); mask = 0; } else { mask = LEON_HARD_INT(irq); } return mask; } #ifdef CONFIG_SMP static int irq_choose_cpu(const struct cpumask *affinity) { cpumask_t mask; cpumask_and(&mask, cpu_online_mask, affinity); if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask)) return boot_cpu_id; else return cpumask_first(&mask); } #else #define irq_choose_cpu(affinity) boot_cpu_id #endif static int leon_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) { unsigned long mask, oldmask, flags; int oldcpu, newcpu; mask = (unsigned long)data->chip_data; oldcpu = irq_choose_cpu(irq_data_get_affinity_mask(data)); newcpu = irq_choose_cpu(dest); if (oldcpu == newcpu) goto out; /* unmask on old CPU first before enabling on the selected CPU */ spin_lock_irqsave(&leon_irq_lock, flags); oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(oldcpu)); LEON3_BYPASS_STORE_PA(LEON_IMASK(oldcpu), (oldmask & ~mask)); oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(newcpu)); LEON3_BYPASS_STORE_PA(LEON_IMASK(newcpu), (oldmask | mask)); spin_unlock_irqrestore(&leon_irq_lock, flags); out: return IRQ_SET_MASK_OK; } static void leon_unmask_irq(struct irq_data *data) { unsigned long mask, oldmask, flags; int cpu; mask = (unsigned long)data->chip_data; cpu = irq_choose_cpu(irq_data_get_affinity_mask(data)); spin_lock_irqsave(&leon_irq_lock, flags); oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu)); LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask)); spin_unlock_irqrestore(&leon_irq_lock, flags); } static void leon_mask_irq(struct irq_data *data) { unsigned long mask, oldmask, flags; int cpu; mask = (unsigned long)data->chip_data; cpu = irq_choose_cpu(irq_data_get_affinity_mask(data)); spin_lock_irqsave(&leon_irq_lock, flags); oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu)); LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask)); spin_unlock_irqrestore(&leon_irq_lock, flags); } static unsigned int leon_startup_irq(struct irq_data *data) { irq_link(data->irq); leon_unmask_irq(data); return 0; } static void leon_shutdown_irq(struct irq_data *data) { leon_mask_irq(data); irq_unlink(data->irq); } /* Used by external level sensitive IRQ handlers on the LEON: ACK IRQ ctrl */ static void leon_eoi_irq(struct irq_data *data) { unsigned long mask = (unsigned long)data->chip_data; if (mask & LEON_DO_ACK_HW) LEON3_BYPASS_STORE_PA(LEON_IACK, mask & ~LEON_DO_ACK_HW); } static struct irq_chip leon_irq = { .name = "leon", .irq_startup = leon_startup_irq, .irq_shutdown = leon_shutdown_irq, .irq_mask = leon_mask_irq, .irq_unmask = leon_unmask_irq, .irq_eoi = leon_eoi_irq, .irq_set_affinity = leon_set_affinity, }; /* * Build a LEON IRQ for the edge triggered LEON IRQ controller: * Edge (normal) IRQ - handle_simple_irq, ack=DON'T-CARE, never ack * Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR * Per-CPU Edge - handle_percpu_irq, ack=0 */ unsigned int leon_build_device_irq(unsigned int real_irq, irq_flow_handler_t flow_handler, const char *name, int do_ack) { unsigned int irq; unsigned long mask; struct irq_desc *desc; irq = 0; mask = leon_get_irqmask(real_irq); if (mask == 0) goto out; irq = irq_alloc(real_irq, real_irq); if (irq == 0) goto out; if (do_ack) mask |= LEON_DO_ACK_HW; desc = irq_to_desc(irq); if (!desc || !desc->handle_irq || desc->handle_irq == handle_bad_irq) { irq_set_chip_and_handler_name(irq, &leon_irq, flow_handler, name); irq_set_chip_data(irq, (void *)mask); } out: return irq; } static unsigned int _leon_build_device_irq(struct platform_device *op, unsigned int real_irq) { return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0); } void leon_update_virq_handling(unsigned int virq, irq_flow_handler_t flow_handler, const char *name, int do_ack) { unsigned long mask = (unsigned long)irq_get_chip_data(virq); mask &= ~LEON_DO_ACK_HW; if (do_ack) mask |= LEON_DO_ACK_HW; irq_set_chip_and_handler_name(virq, &leon_irq, flow_handler, name); irq_set_chip_data(virq, (void *)mask); } static u32 leon_cycles_offset(void) { u32 rld, val, ctrl, off; rld = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld); val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val); ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl); if (LEON3_GPTIMER_CTRL_ISPENDING(ctrl)) { val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val); off = 2 * rld - val; } else { off = rld - val; } return off; } #ifdef CONFIG_SMP /* smp clockevent irq */ static irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused) { struct clock_event_device *ce; int cpu = smp_processor_id(); leon_clear_profile_irq(cpu); if (cpu == boot_cpu_id) timer_interrupt(irq, NULL); ce = &per_cpu(sparc32_clockevent, cpu); irq_enter(); if (ce->event_handler) ce->event_handler(ce); irq_exit(); return IRQ_HANDLED; } #endif /* CONFIG_SMP */ void __init leon_init_timers(void) { int irq, eirq; struct device_node *rootnp, *np, *nnp; struct property *pp; int len; int icsel; int ampopts; int err; u32 config; u32 ctrl; sparc_config.get_cycles_offset = leon_cycles_offset; sparc_config.cs_period = 1000000 / HZ; sparc_config.features |= FEAT_L10_CLOCKSOURCE; #ifndef CONFIG_SMP sparc_config.features |= FEAT_L10_CLOCKEVENT; #endif leondebug_irq_disable = 0; leon_debug_irqout = 0; master_l10_counter = (u32 __iomem *)&dummy_master_l10_counter; dummy_master_l10_counter = 0; rootnp = of_find_node_by_path("/ambapp0"); if (!rootnp) goto bad; /* Find System ID: GRLIB build ID and optional CHIP ID */ pp = of_find_property(rootnp, "systemid", &len); if (pp) amba_system_id = *(unsigned long *)pp->value; /* Find IRQMP IRQ Controller Registers base adr otherwise bail out */ np = of_find_node_by_name(rootnp, "GAISLER_IRQMP"); if (!np) { np = of_find_node_by_name(rootnp, "01_00d"); if (!np) goto bad; } pp = of_find_property(np, "reg", &len); if (!pp) goto bad; leon3_irqctrl_regs = *(struct leon3_irqctrl_regs_map **)pp->value; /* Find GPTIMER Timer Registers base address otherwise bail out. */ nnp = rootnp; retry: np = of_find_node_by_name(nnp, "GAISLER_GPTIMER"); if (!np) { np = of_find_node_by_name(nnp, "01_011"); if (!np) goto bad; } ampopts = 0; pp = of_find_property(np, "ampopts", &len); if (pp) { ampopts = *(int *)pp->value; if (ampopts == 0) { /* Skip this instance, resource already * allocated by other OS */ nnp = np; goto retry; } } /* Select Timer-Instance on Timer Core. Default is zero */ leon3_gptimer_idx = ampopts & 0x7; pp = of_find_property(np, "reg", &len); if (pp) leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **) pp->value; pp = of_find_property(np, "interrupts", &len); if (pp) leon3_gptimer_irq = *(unsigned int *)pp->value; if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq)) goto bad; ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl); LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, ctrl | LEON3_GPTIMER_CTRL_PENDING); ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl); if ((ctrl & LEON3_GPTIMER_CTRL_PENDING) != 0) leon3_gptimer_ackmask = ~LEON3_GPTIMER_CTRL_PENDING; else leon3_gptimer_ackmask = ~0; LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0); LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld, (((1000000 / HZ) - 1))); LEON3_BYPASS_STORE_PA( &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); /* * The IRQ controller may (if implemented) consist of multiple * IRQ controllers, each mapped on a 4Kb boundary. * Each CPU may be routed to different IRQCTRLs, however * we assume that all CPUs (in SMP system) is routed to the * same IRQ Controller, and for non-SMP only one IRQCTRL is * accessed anyway. * In AMP systems, Linux must run on CPU0 for the time being. */ icsel = LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->icsel[boot_cpu_id/8]); icsel = (icsel >> ((7 - (boot_cpu_id&0x7)) * 4)) & 0xf; leon3_irqctrl_regs += icsel; /* Mask all IRQs on boot-cpu IRQ controller */ LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[boot_cpu_id], 0); /* Probe extended IRQ controller */ eirq = (LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->mpstatus) >> 16) & 0xf; if (eirq != 0) leon_eirq_setup(eirq); #ifdef CONFIG_SMP { unsigned long flags; /* * In SMP, sun4m adds a IPI handler to IRQ trap handler that * LEON never must take, sun4d and LEON overwrites the branch * with a NOP. */ local_irq_save(flags); patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */ local_ops->cache_all(); local_irq_restore(flags); } #endif config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config); if (config & (1 << LEON3_GPTIMER_SEPIRQ)) leon3_gptimer_irq += leon3_gptimer_idx; else if ((config & LEON3_GPTIMER_TIMERS) > 1) pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n"); #ifdef CONFIG_SMP /* Install per-cpu IRQ handler for broadcasted ticker */ irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq, "per-cpu", 0); err = request_irq(irq, leon_percpu_timer_ce_interrupt, IRQF_PERCPU | IRQF_TIMER, "timer", NULL); #else irq = _leon_build_device_irq(NULL, leon3_gptimer_irq); err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); #endif if (err) { pr_err("Unable to attach timer IRQ%d\n", irq); prom_halt(); } LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, LEON3_GPTIMER_EN | LEON3_GPTIMER_RL | LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); return; bad: printk(KERN_ERR "No Timer/irqctrl found\n"); BUG(); return; } static void leon_clear_clock_irq(void) { u32 ctrl; ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl); LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, ctrl & leon3_gptimer_ackmask); } static void leon_load_profile_irq(int cpu, unsigned int limit) { } #ifdef CONFIG_SMP void leon_clear_profile_irq(int cpu) { } void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu) { unsigned long mask, flags, *addr; mask = leon_get_irqmask(irq_nr); spin_lock_irqsave(&leon_irq_lock, flags); addr = (unsigned long *)LEON_IMASK(cpu); LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | mask)); spin_unlock_irqrestore(&leon_irq_lock, flags); } #endif void __init leon_init_IRQ(void) { sparc_config.init_timers = leon_init_timers; sparc_config.build_device_irq = _leon_build_device_irq; sparc_config.clock_rate = 1000000; sparc_config.clear_clock_irq = leon_clear_clock_irq; sparc_config.load_profile_irq = leon_load_profile_irq; }
linux-master
arch/sparc/kernel/leon_kernel.c
// SPDX-License-Identifier: GPL-2.0 /* linux/arch/sparc/kernel/signal.c * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 1996 Miguel de Icaza ([email protected]) * Copyright (C) 1997 Eddie C. Dost ([email protected]) */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/smp.h> #include <linux/binfmts.h> /* do_coredum */ #include <linux/bitops.h> #include <linux/resume_user_mode.h> #include <linux/uaccess.h> #include <asm/ptrace.h> #include <asm/cacheflush.h> /* flush_sig_insns */ #include <asm/switch_to.h> #include "sigutil.h" #include "kernel.h" extern void fpsave(unsigned long *fpregs, unsigned long *fsr, void *fpqueue, unsigned long *fpqdepth); extern void fpload(unsigned long *fpregs, unsigned long *fsr); struct signal_frame { struct sparc_stackf ss; __siginfo32_t info; __siginfo_fpu_t __user *fpu_save; unsigned long insns[2] __attribute__ ((aligned (8))); unsigned int extramask[_NSIG_WORDS - 1]; unsigned int extra_size; /* Should be 0 */ __siginfo_rwin_t __user *rwin_save; } __attribute__((aligned(8))); struct rt_signal_frame { struct sparc_stackf ss; siginfo_t info; struct pt_regs regs; sigset_t mask; __siginfo_fpu_t __user *fpu_save; unsigned int insns[2]; stack_t stack; unsigned int extra_size; /* Should be 0 */ __siginfo_rwin_t __user *rwin_save; } __attribute__((aligned(8))); /* Align macros */ #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) /* Checks if the fp is valid. We always build signal frames which are * 16-byte aligned, therefore we can always enforce that the restore * frame has that property as well. */ static inline bool invalid_frame_pointer(void __user *fp, int fplen) { if ((((unsigned long) fp) & 15) || !access_ok(fp, fplen)) return true; return false; } asmlinkage void do_sigreturn(struct pt_regs *regs) { unsigned long up_psr, pc, npc, ufp; struct signal_frame __user *sf; sigset_t set; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; int err; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; synchronize_user_stack(); sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv_and_exit; if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) goto segv_and_exit; if (ufp & 0x7) goto segv_and_exit; err = __get_user(pc, &sf->info.si_regs.pc); err |= __get_user(npc, &sf->info.si_regs.npc); if ((pc | npc) & 3) goto segv_and_exit; /* 2. Restore the state */ up_psr = regs->psr; err |= __copy_from_user(regs, &sf->info.si_regs, sizeof(struct pt_regs)); /* User can only change condition codes and FPU enabling in %psr. */ regs->psr = (up_psr & ~(PSR_ICC | PSR_EF)) | (regs->psr & (PSR_ICC | PSR_EF)); /* Prevent syscall restart. */ pt_regs_clear_syscall(regs); err |= __get_user(fpu_save, &sf->fpu_save); if (fpu_save) err |= restore_fpu_state(regs, fpu_save); err |= __get_user(rwin_save, &sf->rwin_save); if (rwin_save) err |= restore_rwin_state(rwin_save); /* This is pretty much atomic, no amount locking would prevent * the races which exist anyways. */ err |= __get_user(set.sig[0], &sf->info.si_mask); err |= __copy_from_user(&set.sig[1], &sf->extramask, (_NSIG_WORDS-1) * sizeof(unsigned int)); if (err) goto segv_and_exit; set_current_blocked(&set); return; segv_and_exit: force_sig(SIGSEGV); } asmlinkage void do_rt_sigreturn(struct pt_regs *regs) { struct rt_signal_frame __user *sf; unsigned int psr, pc, npc, ufp; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; sigset_t set; int err; synchronize_user_stack(); sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv; if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) goto segv; if (ufp & 0x7) goto segv; err = __get_user(pc, &sf->regs.pc); err |= __get_user(npc, &sf->regs.npc); err |= ((pc | npc) & 0x03); err |= __get_user(regs->y, &sf->regs.y); err |= __get_user(psr, &sf->regs.psr); err |= __copy_from_user(&regs->u_regs[UREG_G1], &sf->regs.u_regs[UREG_G1], 15 * sizeof(u32)); regs->psr = (regs->psr & ~PSR_ICC) | (psr & PSR_ICC); /* Prevent syscall restart. */ pt_regs_clear_syscall(regs); err |= __get_user(fpu_save, &sf->fpu_save); if (!err && fpu_save) err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); err |= restore_altstack(&sf->stack); if (err) goto segv; regs->pc = pc; regs->npc = npc; err |= __get_user(rwin_save, &sf->rwin_save); if (!err && rwin_save) { if (restore_rwin_state(rwin_save)) goto segv; } set_current_blocked(&set); return; segv: force_sig(SIGSEGV); } static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp = regs->u_regs[UREG_FP]; /* * If we are on the alternate signal stack and would overflow it, don't. * Return an always-bogus address instead so we will die with SIGSEGV. */ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize))) return (void __user *) -1L; /* This is the X/Open sanctioned signal stack switching. */ sp = sigsp(sp, ksig) - framesize; /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack * alignment. Second, if we took this signal because the stack * is not aligned properly, we'd like to take the signal cleanly * and report that. */ sp &= ~15UL; return (void __user *) sp; } static int setup_frame(struct ksignal *ksig, struct pt_regs *regs, sigset_t *oldset) { struct signal_frame __user *sf; int sigframe_size, err, wsaved; void __user *tail; /* 1. Make sure everything is clean */ synchronize_user_stack(); wsaved = current_thread_info()->w_saved; sigframe_size = sizeof(*sf); if (used_math()) sigframe_size += sizeof(__siginfo_fpu_t); if (wsaved) sigframe_size += sizeof(__siginfo_rwin_t); sf = (struct signal_frame __user *) get_sigframe(ksig, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) { force_exit_sig(SIGILL); return -EINVAL; } tail = sf + 1; /* 2. Save the current process state */ err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs)); err |= __put_user(0, &sf->extra_size); if (used_math()) { __siginfo_fpu_t __user *fp = tail; tail += sizeof(*fp); err |= save_fpu_state(regs, fp); err |= __put_user(fp, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } if (wsaved) { __siginfo_rwin_t __user *rwp = tail; tail += sizeof(*rwp); err |= save_rwin_state(wsaved, rwp); err |= __put_user(rwp, &sf->rwin_save); } else { err |= __put_user(0, &sf->rwin_save); } err |= __put_user(oldset->sig[0], &sf->info.si_mask); err |= __copy_to_user(sf->extramask, &oldset->sig[1], (_NSIG_WORDS - 1) * sizeof(unsigned int)); if (!wsaved) { err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], sizeof(struct reg_window32)); } else { struct reg_window32 *rp; rp = &current_thread_info()->reg_window[wsaved - 1]; err |= __copy_to_user(sf, rp, sizeof(struct reg_window32)); } if (err) return err; /* 3. signal handler back-trampoline and parameters */ regs->u_regs[UREG_FP] = (unsigned long) sf; regs->u_regs[UREG_I0] = ksig->sig; regs->u_regs[UREG_I1] = (unsigned long) &sf->info; regs->u_regs[UREG_I2] = (unsigned long) &sf->info; /* 4. signal handler */ regs->pc = (unsigned long) ksig->ka.sa.sa_handler; regs->npc = (regs->pc + 4); /* 5. return to kernel instructions */ if (ksig->ka.ka_restorer) regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer; else { regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2); /* mov __NR_sigreturn, %g1 */ err |= __put_user(0x821020d8, &sf->insns[0]); /* t 0x10 */ err |= __put_user(0x91d02010, &sf->insns[1]); if (err) return err; /* Flush instruction space. */ flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); } return 0; } static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs, sigset_t *oldset) { struct rt_signal_frame __user *sf; int sigframe_size, wsaved; void __user *tail; unsigned int psr; int err; synchronize_user_stack(); wsaved = current_thread_info()->w_saved; sigframe_size = sizeof(*sf); if (used_math()) sigframe_size += sizeof(__siginfo_fpu_t); if (wsaved) sigframe_size += sizeof(__siginfo_rwin_t); sf = (struct rt_signal_frame __user *) get_sigframe(ksig, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) { force_exit_sig(SIGILL); return -EINVAL; } tail = sf + 1; err = __put_user(regs->pc, &sf->regs.pc); err |= __put_user(regs->npc, &sf->regs.npc); err |= __put_user(regs->y, &sf->regs.y); psr = regs->psr; if (used_math()) psr |= PSR_EF; err |= __put_user(psr, &sf->regs.psr); err |= __copy_to_user(&sf->regs.u_regs, regs->u_regs, sizeof(regs->u_regs)); err |= __put_user(0, &sf->extra_size); if (psr & PSR_EF) { __siginfo_fpu_t __user *fp = tail; tail += sizeof(*fp); err |= save_fpu_state(regs, fp); err |= __put_user(fp, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } if (wsaved) { __siginfo_rwin_t __user *rwp = tail; tail += sizeof(*rwp); err |= save_rwin_state(wsaved, rwp); err |= __put_user(rwp, &sf->rwin_save); } else { err |= __put_user(0, &sf->rwin_save); } err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t)); /* Setup sigaltstack */ err |= __save_altstack(&sf->stack, regs->u_regs[UREG_FP]); if (!wsaved) { err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], sizeof(struct reg_window32)); } else { struct reg_window32 *rp; rp = &current_thread_info()->reg_window[wsaved - 1]; err |= __copy_to_user(sf, rp, sizeof(struct reg_window32)); } err |= copy_siginfo_to_user(&sf->info, &ksig->info); if (err) return err; regs->u_regs[UREG_FP] = (unsigned long) sf; regs->u_regs[UREG_I0] = ksig->sig; regs->u_regs[UREG_I1] = (unsigned long) &sf->info; regs->u_regs[UREG_I2] = (unsigned long) &sf->regs; regs->pc = (unsigned long) ksig->ka.sa.sa_handler; regs->npc = (regs->pc + 4); if (ksig->ka.ka_restorer) regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer; else { regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2); /* mov __NR_rt_sigreturn, %g1 */ err |= __put_user(0x82102065, &sf->insns[0]); /* t 0x10 */ err |= __put_user(0x91d02010, &sf->insns[1]); if (err) return err; /* Flush instruction space. */ flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); } return 0; } static inline void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); int err; if (ksig->ka.sa.sa_flags & SA_SIGINFO) err = setup_rt_frame(ksig, regs, oldset); else err = setup_frame(ksig, regs, oldset); signal_setup_done(err, ksig, 0); } static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, struct sigaction *sa) { switch(regs->u_regs[UREG_I0]) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: no_system_call_restart: regs->u_regs[UREG_I0] = EINTR; regs->psr |= PSR_C; break; case ERESTARTSYS: if (!(sa->sa_flags & SA_RESTART)) goto no_system_call_restart; fallthrough; case ERESTARTNOINTR: regs->u_regs[UREG_I0] = orig_i0; regs->pc -= 4; regs->npc -= 4; } } /* Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) { struct ksignal ksig; int restart_syscall; bool has_handler; /* It's a lot of work and synchronization to add a new ptrace * register for GDB to save and restore in order to get * orig_i0 correct for syscall restarts when debugging. * * Although it should be the case that most of the global * registers are volatile across a system call, glibc already * depends upon that fact that we preserve them. So we can't * just use any global register to save away the orig_i0 value. * * In particular %g2, %g3, %g4, and %g5 are all assumed to be * preserved across a system call trap by various pieces of * code in glibc. * * %g7 is used as the "thread register". %g6 is not used in * any fixed manner. %g6 is used as a scratch register and * a compiler temporary, but it's value is never used across * a system call. Therefore %g6 is usable for orig_i0 storage. */ if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) regs->u_regs[UREG_G6] = orig_i0; has_handler = get_signal(&ksig); /* If the debugger messes with the program counter, it clears * the software "in syscall" bit, directing us to not perform * a syscall restart. */ restart_syscall = 0; if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) { restart_syscall = 1; orig_i0 = regs->u_regs[UREG_G6]; } if (has_handler) { if (restart_syscall) syscall_restart(orig_i0, regs, &ksig.ka.sa); handle_signal(&ksig, regs); } else { if (restart_syscall) { switch (regs->u_regs[UREG_I0]) { case ERESTARTNOHAND: case ERESTARTSYS: case ERESTARTNOINTR: /* replay the system call when we are done */ regs->u_regs[UREG_I0] = orig_i0; regs->pc -= 4; regs->npc -= 4; pt_regs_clear_syscall(regs); fallthrough; case ERESTART_RESTARTBLOCK: regs->u_regs[UREG_G1] = __NR_restart_syscall; regs->pc -= 4; regs->npc -= 4; pt_regs_clear_syscall(regs); } } restore_saved_sigmask(); } } void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) { if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) resume_user_mode_work(regs); } asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr, struct sigstack __user *ossptr, unsigned long sp) { int ret = -EFAULT; /* First see if old state is wanted. */ if (ossptr) { if (put_user(current->sas_ss_sp + current->sas_ss_size, &ossptr->the_stack) || __put_user(on_sig_stack(sp), &ossptr->cur_status)) goto out; } /* Now see if we want to update the new state. */ if (ssptr) { char *ss_sp; if (get_user(ss_sp, &ssptr->the_stack)) goto out; /* If the current stack was set with sigaltstack, don't swap stacks while we are on it. */ ret = -EPERM; if (current->sas_ss_sp && on_sig_stack(sp)) goto out; /* Since we don't know the extent of the stack, and we don't track onstack-ness, but rather calculate it, we must presume a size. Ho hum this interface is lossy. */ current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; current->sas_ss_size = SIGSTKSZ; } ret = 0; out: return ret; }
linux-master
arch/sparc/kernel/signal_32.c