python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-or-later /* * PDC early console support - use PDC firmware to dump text via boot console * * Copyright (C) 2001-2022 Helge Deller <[email protected]> */ #include <linux/console.h> #include <linux/init.h> #include <linux/serial_core.h> #include <linux/kgdb.h> #include <asm/page.h> /* for PAGE0 */ #include <asm/pdc.h> /* for iodc_call() proto and friends */ static void pdc_console_write(struct console *co, const char *s, unsigned count) { int i = 0; do { i += pdc_iodc_print(s + i, count - i); } while (i < count); } #ifdef CONFIG_KGDB static int kgdb_pdc_read_char(void) { int c = pdc_iodc_getc(); return (c <= 0) ? NO_POLL_CHAR : c; } static void kgdb_pdc_write_char(u8 chr) { /* no need to print char as it's shown on standard console */ /* pdc_iodc_print(&chr, 1); */ } static struct kgdb_io kgdb_pdc_io_ops = { .name = "kgdb_pdc", .read_char = kgdb_pdc_read_char, .write_char = kgdb_pdc_write_char, }; #endif static int __init pdc_earlycon_setup(struct earlycon_device *device, const char *opt) { struct console *earlycon_console; /* If the console is duplex then copy the COUT parameters to CIN. */ if (PAGE0->mem_cons.cl_class == CL_DUPLEX) memcpy(&PAGE0->mem_kbd, &PAGE0->mem_cons, sizeof(PAGE0->mem_cons)); earlycon_console = device->con; earlycon_console->write = pdc_console_write; device->port.iotype = UPIO_MEM32BE; #ifdef CONFIG_KGDB kgdb_register_io_module(&kgdb_pdc_io_ops); #endif return 0; } EARLYCON_DECLARE(pdc, pdc_earlycon_setup);
linux-master
arch/parisc/kernel/pdc_cons.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/kgdb.h> #include <linux/printk.h> #include <linux/sched/debug.h> #include <linux/delay.h> #include <linux/reboot.h> #include <asm/pdc.h> #include <asm/pdc_chassis.h> #include <asm/ldcw.h> #include <asm/processor.h> static unsigned int __aligned(16) toc_lock = 1; DEFINE_PER_CPU_PAGE_ALIGNED(char [16384], toc_stack) __visible; static void toc20_to_pt_regs(struct pt_regs *regs, struct pdc_toc_pim_20 *toc) { int i; regs->gr[0] = (unsigned long)toc->cr[22]; for (i = 1; i < 32; i++) regs->gr[i] = (unsigned long)toc->gr[i]; for (i = 0; i < 8; i++) regs->sr[i] = (unsigned long)toc->sr[i]; regs->iasq[0] = (unsigned long)toc->cr[17]; regs->iasq[1] = (unsigned long)toc->iasq_back; regs->iaoq[0] = (unsigned long)toc->cr[18]; regs->iaoq[1] = (unsigned long)toc->iaoq_back; regs->sar = (unsigned long)toc->cr[11]; regs->iir = (unsigned long)toc->cr[19]; regs->isr = (unsigned long)toc->cr[20]; regs->ior = (unsigned long)toc->cr[21]; } static void toc11_to_pt_regs(struct pt_regs *regs, struct pdc_toc_pim_11 *toc) { int i; regs->gr[0] = toc->cr[22]; for (i = 1; i < 32; i++) regs->gr[i] = toc->gr[i]; for (i = 0; i < 8; i++) regs->sr[i] = toc->sr[i]; regs->iasq[0] = toc->cr[17]; regs->iasq[1] = toc->iasq_back; regs->iaoq[0] = toc->cr[18]; regs->iaoq[1] = toc->iaoq_back; regs->sar = toc->cr[11]; regs->iir = toc->cr[19]; regs->isr = toc->cr[20]; regs->ior = toc->cr[21]; } void notrace __noreturn __cold toc_intr(struct pt_regs *regs) { struct pdc_toc_pim_20 pim_data20; struct pdc_toc_pim_11 pim_data11; /* verify we wrote regs to the correct stack */ BUG_ON(regs != (struct pt_regs *)&per_cpu(toc_stack, raw_smp_processor_id())); if (boot_cpu_data.cpu_type >= pcxu) { if (pdc_pim_toc20(&pim_data20)) panic("Failed to get PIM data"); toc20_to_pt_regs(regs, &pim_data20); } else { if (pdc_pim_toc11(&pim_data11)) panic("Failed to get PIM data"); toc11_to_pt_regs(regs, &pim_data11); } #ifdef CONFIG_KGDB nmi_enter(); if (atomic_read(&kgdb_active) != -1) kgdb_nmicallback(raw_smp_processor_id(), regs); kgdb_handle_exception(9, SIGTRAP, 0, regs); #endif /* serialize output, otherwise all CPUs write backtrace at once */ while (__ldcw(&toc_lock) == 0) ; /* wait */ show_regs(regs); toc_lock = 1; /* release lock for next CPU */ if (raw_smp_processor_id() != 0) while (1) ; /* all but monarch CPU will wait endless. */ /* give other CPUs time to show their backtrace */ mdelay(2000); machine_restart("TOC"); /* should never reach this */ panic("TOC"); } static __init int setup_toc(void) { unsigned int csum = 0; unsigned long toc_code = (unsigned long)dereference_function_descriptor(toc_handler); int i; PAGE0->vec_toc = __pa(toc_code) & 0xffffffff; #ifdef CONFIG_64BIT PAGE0->vec_toc_hi = __pa(toc_code) >> 32; #endif PAGE0->vec_toclen = toc_handler_size; for (i = 0; i < toc_handler_size/4; i++) csum += ((u32 *)toc_code)[i]; toc_handler_csum = -csum; pr_info("TOC handler registered\n"); return 0; } early_initcall(setup_toc);
linux-master
arch/parisc/kernel/toc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Signal support for 32-bit kernel builds * * Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org> * Copyright (C) 2006 Kyle McMartin <kyle at parisc-linux.org> * * Code was mostly borrowed from kernel/signal.c. * See kernel/signal.c for additional Copyrights. */ #include <linux/compat.h> #include <linux/module.h> #include <linux/unistd.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/syscalls.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/uaccess.h> #include "signal32.h" #define DEBUG_COMPAT_SIG 0 #define DEBUG_COMPAT_SIG_LEVEL 2 #if DEBUG_COMPAT_SIG #define DBG(LEVEL, ...) \ ((DEBUG_COMPAT_SIG_LEVEL >= LEVEL) \ ? printk(__VA_ARGS__) : (void) 0) #else #define DBG(LEVEL, ...) #endif long restore_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf, struct pt_regs *regs) { long err = 0; compat_uint_t compat_reg; compat_uint_t compat_regt; int regn; /* When loading 32-bit values into 64-bit registers make sure to clear the upper 32-bits */ DBG(2,"restore_sigcontext32: PER_LINUX32 process\n"); DBG(2,"restore_sigcontext32: sc = 0x%p, rf = 0x%p, regs = 0x%p\n", sc, rf, regs); DBG(2,"restore_sigcontext32: compat_sigcontext is %#lx bytes\n", sizeof(*sc)); for(regn=0; regn < 32; regn++){ err |= __get_user(compat_reg,&sc->sc_gr[regn]); regs->gr[regn] = compat_reg; /* Load upper half */ err |= __get_user(compat_regt,&rf->rf_gr[regn]); regs->gr[regn] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(3,"restore_sigcontext32: gr%02d = %#lx (%#x / %#x)\n", regn, regs->gr[regn], compat_regt, compat_reg); } DBG(2,"restore_sigcontext32: sc->sc_fr = 0x%p (%#lx)\n",sc->sc_fr, sizeof(sc->sc_fr)); /* XXX: BE WARNED FR's are 64-BIT! */ err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr)); /* Better safe than sorry, pass __get_user two things of the same size and let gcc do the upward conversion to 64-bits */ err |= __get_user(compat_reg, &sc->sc_iaoq[0]); /* Load upper half */ err |= __get_user(compat_regt, &rf->rf_iaoq[0]); regs->iaoq[0] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iaoq[0] = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: sc->sc_iaoq[0] = %p => %#x\n", &sc->sc_iaoq[0], compat_reg); err |= __get_user(compat_reg, &sc->sc_iaoq[1]); /* Load upper half */ err |= __get_user(compat_regt, &rf->rf_iaoq[1]); regs->iaoq[1] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iaoq[1] = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: sc->sc_iaoq[1] = %p => %#x\n", &sc->sc_iaoq[1],compat_reg); DBG(2,"restore_sigcontext32: iaoq is %#lx / %#lx\n", regs->iaoq[0],regs->iaoq[1]); err |= __get_user(compat_reg, &sc->sc_iasq[0]); /* Load the upper half for iasq */ err |= __get_user(compat_regt, &rf->rf_iasq[0]); regs->iasq[0] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iasq[0] = %#lx\n", compat_regt); err |= __get_user(compat_reg, &sc->sc_iasq[1]); /* Load the upper half for iasq */ err |= __get_user(compat_regt, &rf->rf_iasq[1]); regs->iasq[1] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iasq[1] = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: iasq is %#lx / %#lx\n", regs->iasq[0],regs->iasq[1]); err |= __get_user(compat_reg, &sc->sc_sar); /* Load the upper half for sar */ err |= __get_user(compat_regt, &rf->rf_sar); regs->sar = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper_half & sar = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: sar is %#lx\n", regs->sar); DBG(2,"restore_sigcontext32: r28 is %ld\n", regs->gr[28]); return err; } /* * Set up the sigcontext structure for this process. * This is not an easy task if the kernel is 64-bit, it will require * that we examine the process personality to determine if we need to * truncate for a 32-bit userspace. */ long setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf, struct pt_regs *regs, int in_syscall) { compat_int_t flags = 0; long err = 0; compat_uint_t compat_reg; compat_uint_t compat_regb; int regn; if (on_sig_stack((unsigned long) sc)) flags |= PARISC_SC_FLAG_ONSTACK; if (in_syscall) { DBG(1,"setup_sigcontext32: in_syscall\n"); flags |= PARISC_SC_FLAG_IN_SYSCALL; /* Truncate gr31 */ compat_reg = (compat_uint_t)(regs->gr[31]); /* regs->iaoq is undefined in the syscall return path */ err |= __put_user(compat_reg, &sc->sc_iaoq[0]); DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n", &sc->sc_iaoq[0], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->gr[31] >> 32); err |= __put_user(compat_reg, &rf->rf_iaoq[0]); DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg); compat_reg = (compat_uint_t)(regs->gr[31]+4); err |= __put_user(compat_reg, &sc->sc_iaoq[1]); DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n", &sc->sc_iaoq[1], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)((regs->gr[31]+4) >> 32); err |= __put_user(compat_reg, &rf->rf_iaoq[1]); DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg); /* Truncate sr3 */ compat_reg = (compat_uint_t)(regs->sr[3]); err |= __put_user(compat_reg, &sc->sc_iasq[0]); err |= __put_user(compat_reg, &sc->sc_iasq[1]); /* Store upper half */ compat_reg = (compat_uint_t)(regs->sr[3] >> 32); err |= __put_user(compat_reg, &rf->rf_iasq[0]); err |= __put_user(compat_reg, &rf->rf_iasq[1]); DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg); DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg); DBG(1,"setup_sigcontext32: iaoq %#lx / %#lx\n", regs->gr[31], regs->gr[31]+4); } else { compat_reg = (compat_uint_t)(regs->iaoq[0]); err |= __put_user(compat_reg, &sc->sc_iaoq[0]); DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n", &sc->sc_iaoq[0], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->iaoq[0] >> 32); err |= __put_user(compat_reg, &rf->rf_iaoq[0]); DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg); compat_reg = (compat_uint_t)(regs->iaoq[1]); err |= __put_user(compat_reg, &sc->sc_iaoq[1]); DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n", &sc->sc_iaoq[1], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->iaoq[1] >> 32); err |= __put_user(compat_reg, &rf->rf_iaoq[1]); DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg); compat_reg = (compat_uint_t)(regs->iasq[0]); err |= __put_user(compat_reg, &sc->sc_iasq[0]); DBG(2,"setup_sigcontext32: sc->sc_iasq[0] = %p <= %#x\n", &sc->sc_iasq[0], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->iasq[0] >> 32); err |= __put_user(compat_reg, &rf->rf_iasq[0]); DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg); compat_reg = (compat_uint_t)(regs->iasq[1]); err |= __put_user(compat_reg, &sc->sc_iasq[1]); DBG(2,"setup_sigcontext32: sc->sc_iasq[1] = %p <= %#x\n", &sc->sc_iasq[1], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->iasq[1] >> 32); err |= __put_user(compat_reg, &rf->rf_iasq[1]); DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg); /* Print out the IAOQ for debugging */ DBG(1,"setup_sigcontext32: ia0q %#lx / %#lx\n", regs->iaoq[0], regs->iaoq[1]); } err |= __put_user(flags, &sc->sc_flags); DBG(1,"setup_sigcontext32: Truncating general registers.\n"); for(regn=0; regn < 32; regn++){ /* Truncate a general register */ compat_reg = (compat_uint_t)(regs->gr[regn]); err |= __put_user(compat_reg, &sc->sc_gr[regn]); /* Store upper half */ compat_regb = (compat_uint_t)(regs->gr[regn] >> 32); err |= __put_user(compat_regb, &rf->rf_gr[regn]); /* DEBUG: Write out the "upper / lower" register data */ DBG(2,"setup_sigcontext32: gr%02d = %#x / %#x\n", regn, compat_regb, compat_reg); } /* Copy the floating point registers (same size) XXX: BE WARNED FR's are 64-BIT! */ DBG(1,"setup_sigcontext32: Copying from regs to sc, " "sc->sc_fr size = %#lx, regs->fr size = %#lx\n", sizeof(regs->fr), sizeof(sc->sc_fr)); err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr)); compat_reg = (compat_uint_t)(regs->sar); err |= __put_user(compat_reg, &sc->sc_sar); DBG(2,"setup_sigcontext32: sar is %#x\n", compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->sar >> 32); err |= __put_user(compat_reg, &rf->rf_sar); DBG(2,"setup_sigcontext32: upper half sar = %#x\n", compat_reg); DBG(1,"setup_sigcontext32: r28 is %ld\n", regs->gr[28]); return err; }
linux-master
arch/parisc/kernel/signal32.c
// SPDX-License-Identifier: GPL-2.0-only /* * pci.c - Low-Level PCI Access in IA-64 * * Derived from bios32.c of i386 tree. * * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P. * David Mosberger-Tang <[email protected]> * Bjorn Helgaas <[email protected]> * Copyright (C) 2004 Silicon Graphics, Inc. * * Note: Above list of copyright holders is incomplete... */ #include <linux/acpi.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/memblock.h> #include <linux/export.h> #include <asm/page.h> #include <asm/io.h> #include <asm/sal.h> #include <asm/smp.h> #include <asm/irq.h> #include <asm/hw_irq.h> /* * Low-level SAL-based PCI configuration access functions. Note that SAL * calls are already serialized (via sal_lock), so we don't need another * synchronization mechanism here. */ #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \ (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg)) /* SAL 3.2 adds support for extended config space. */ #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \ (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg)) int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 *value) { u64 addr, data = 0; int mode, result; if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095)) return -EINVAL; if ((seg | reg) <= 255) { addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); mode = 0; } else if (sal_revision >= SAL_VERSION_CODE(3,2)) { addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); mode = 1; } else { return -EINVAL; } result = ia64_sal_pci_config_read(addr, mode, len, &data); if (result != 0) return -EINVAL; *value = (u32) data; return 0; } int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn, int reg, int len, u32 value) { u64 addr; int mode, result; if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095)) return -EINVAL; if ((seg | reg) <= 255) { addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg); mode = 0; } else if (sal_revision >= SAL_VERSION_CODE(3,2)) { addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg); mode = 1; } else { return -EINVAL; } result = ia64_sal_pci_config_write(addr, mode, len, value); if (result != 0) return -EINVAL; return 0; } static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { return raw_pci_read(pci_domain_nr(bus), bus->number, devfn, where, size, value); } static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { return raw_pci_write(pci_domain_nr(bus), bus->number, devfn, where, size, value); } struct pci_ops pci_root_ops = { .read = pci_read, .write = pci_write, }; struct pci_root_info { struct acpi_pci_root_info common; struct pci_controller controller; struct list_head io_resources; }; static unsigned int new_space(u64 phys_base, int sparse) { u64 mmio_base; int i; if (phys_base == 0) return 0; /* legacy I/O port space */ mmio_base = (u64) ioremap(phys_base, 0); for (i = 0; i < num_io_spaces; i++) if (io_space[i].mmio_base == mmio_base && io_space[i].sparse == sparse) return i; if (num_io_spaces == MAX_IO_SPACES) { pr_err("PCI: Too many IO port spaces " "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES); return ~0; } i = num_io_spaces++; io_space[i].mmio_base = mmio_base; io_space[i].sparse = sparse; return i; } static int add_io_space(struct device *dev, struct pci_root_info *info, struct resource_entry *entry) { struct resource_entry *iospace; struct resource *resource, *res = entry->res; char *name; unsigned long base, min, max, base_port; unsigned int sparse = 0, space_nr, len; len = strlen(info->common.name) + 32; iospace = resource_list_create_entry(NULL, len); if (!iospace) { dev_err(dev, "PCI: No memory for %s I/O port space\n", info->common.name); return -ENOMEM; } if (res->flags & IORESOURCE_IO_SPARSE) sparse = 1; space_nr = new_space(entry->offset, sparse); if (space_nr == ~0) goto free_resource; name = (char *)(iospace + 1); min = res->start - entry->offset; max = res->end - entry->offset; base = __pa(io_space[space_nr].mmio_base); base_port = IO_SPACE_BASE(space_nr); snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->common.name, base_port + min, base_port + max); /* * The SDM guarantees the legacy 0-64K space is sparse, but if the * mapping is done by the processor (not the bridge), ACPI may not * mark it as sparse. */ if (space_nr == 0) sparse = 1; resource = iospace->res; resource->name = name; resource->flags = IORESOURCE_MEM; resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min); resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max); if (insert_resource(&iomem_resource, resource)) { dev_err(dev, "can't allocate host bridge io space resource %pR\n", resource); goto free_resource; } entry->offset = base_port; res->start = min + base_port; res->end = max + base_port; resource_list_add_tail(iospace, &info->io_resources); return 0; free_resource: resource_list_free_entry(iospace); return -ENOSPC; } /* * An IO port or MMIO resource assigned to a PCI host bridge may be * consumed by the host bridge itself or available to its child * bus/devices. The ACPI specification defines a bit (Producer/Consumer) * to tell whether the resource is consumed by the host bridge itself, * but firmware hasn't used that bit consistently, so we can't rely on it. * * On x86 and IA64 platforms, all IO port and MMIO resources are assumed * to be available to child bus/devices except one special case: * IO port [0xCF8-0xCFF] is consumed by the host bridge itself * to access PCI configuration space. * * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF]. */ static bool resource_is_pcicfg_ioport(struct resource *res) { return (res->flags & IORESOURCE_IO) && res->start == 0xCF8 && res->end == 0xCFF; } static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci) { struct device *dev = &ci->bridge->dev; struct pci_root_info *info; struct resource *res; struct resource_entry *entry, *tmp; int status; status = acpi_pci_probe_root_resources(ci); if (status > 0) { info = container_of(ci, struct pci_root_info, common); resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { res = entry->res; if (res->flags & IORESOURCE_MEM) { /* * HP's firmware has a hack to work around a * Windows bug. Ignore these tiny memory ranges. */ if (resource_size(res) <= 16) { resource_list_del(entry); insert_resource(&iomem_resource, entry->res); resource_list_add_tail(entry, &info->io_resources); } } else if (res->flags & IORESOURCE_IO) { if (resource_is_pcicfg_ioport(entry->res)) resource_list_destroy_entry(entry); else if (add_io_space(dev, info, entry)) resource_list_destroy_entry(entry); } } } return status; } static void pci_acpi_root_release_info(struct acpi_pci_root_info *ci) { struct pci_root_info *info; struct resource_entry *entry, *tmp; info = container_of(ci, struct pci_root_info, common); resource_list_for_each_entry_safe(entry, tmp, &info->io_resources) { release_resource(entry->res); resource_list_destroy_entry(entry); } kfree(info); } static struct acpi_pci_root_ops pci_acpi_root_ops = { .pci_ops = &pci_root_ops, .release_info = pci_acpi_root_release_info, .prepare_resources = pci_acpi_root_prepare_resources, }; struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) { struct acpi_device *device = root->device; struct pci_root_info *info; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { dev_err(&device->dev, "pci_bus %04x:%02x: ignored (out of memory)\n", root->segment, (int)root->secondary.start); return NULL; } info->controller.segment = root->segment; info->controller.companion = device; info->controller.node = acpi_get_node(device->handle); INIT_LIST_HEAD(&info->io_resources); return acpi_pci_root_create(root, &pci_acpi_root_ops, &info->common, &info->controller); } int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { /* * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL * here, pci_create_root_bus() has been called by someone else and * sysdata is likely to be different from what we expect. Let it go in * that case. */ if (!bridge->dev.parent) { struct pci_controller *controller = bridge->bus->sysdata; ACPI_COMPANION_SET(&bridge->dev, controller->companion); } return 0; } void pcibios_fixup_device_resources(struct pci_dev *dev) { int idx; if (!dev->bus) return; for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) { struct resource *r = &dev->resource[idx]; if (!r->flags || r->parent || !r->start) continue; pci_claim_resource(dev, idx); } } EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources); static void pcibios_fixup_bridge_resources(struct pci_dev *dev) { int idx; if (!dev->bus) return; for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) { struct resource *r = &dev->resource[idx]; if (!r->flags || r->parent || !r->start) continue; pci_claim_bridge_resource(dev, idx); } } /* * Called after each bus is probed, but before its children are examined. */ void pcibios_fixup_bus(struct pci_bus *b) { struct pci_dev *dev; if (b->self) { pci_read_bridge_bases(b); pcibios_fixup_bridge_resources(b->self); } list_for_each_entry(dev, &b->devices, bus_list) pcibios_fixup_device_resources(dev); } void pcibios_add_bus(struct pci_bus *bus) { acpi_pci_add_bus(bus); } void pcibios_remove_bus(struct pci_bus *bus) { acpi_pci_remove_bus(bus); } void pcibios_set_master (struct pci_dev *dev) { /* No special bus mastering setup handling */ } int pcibios_enable_device (struct pci_dev *dev, int mask) { int ret; ret = pci_enable_resources(dev, mask); if (ret < 0) return ret; if (!pci_dev_msi_enabled(dev)) return acpi_pci_irq_enable(dev); return 0; } void pcibios_disable_device (struct pci_dev *dev) { BUG_ON(atomic_read(&dev->enable_cnt)); if (!pci_dev_msi_enabled(dev)) acpi_pci_irq_disable(dev); } /** * pci_get_legacy_mem - generic legacy mem routine * @bus: bus to get legacy memory base address for * * Find the base of legacy memory for @bus. This is typically the first * megabyte of bus address space for @bus or is simply 0 on platforms whose * chipsets support legacy I/O and memory routing. Returns the base address * or an error pointer if an error occurred. * * This is the ia64 generic version of this routine. Other platforms * are free to override it with a machine vector. */ char *pci_get_legacy_mem(struct pci_bus *bus) { return (char *)__IA64_UNCACHED_OFFSET; } /** * pci_mmap_legacy_page_range - map legacy memory space to userland * @bus: bus whose legacy space we're mapping * @vma: vma passed in by mmap * * Map legacy memory space for this device back to userspace using a machine * vector to get the base address. */ int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, enum pci_mmap_state mmap_state) { unsigned long size = vma->vm_end - vma->vm_start; pgprot_t prot; char *addr; /* We only support mmap'ing of legacy memory space */ if (mmap_state != pci_mmap_mem) return -ENOSYS; /* * Avoid attribute aliasing. See Documentation/arch/ia64/aliasing.rst * for more details. */ if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) return -EINVAL; prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size, vma->vm_page_prot); addr = pci_get_legacy_mem(bus); if (IS_ERR(addr)) return PTR_ERR(addr); vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT; vma->vm_page_prot = prot; if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, vma->vm_page_prot)) return -EAGAIN; return 0; } /** * pci_legacy_read - read from legacy I/O space * @bus: bus to read * @port: legacy port value * @val: caller allocated storage for returned value * @size: number of bytes to read * * Simply reads @size bytes from @port and puts the result in @val. * * Again, this (and the write routine) are generic versions that can be * overridden by the platform. This is necessary on platforms that don't * support legacy I/O routing or that hard fail on legacy I/O timeouts. */ int pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size) { int ret = size; switch (size) { case 1: *val = inb(port); break; case 2: *val = inw(port); break; case 4: *val = inl(port); break; default: ret = -EINVAL; break; } return ret; } /** * pci_legacy_write - perform a legacy I/O write * @bus: bus pointer * @port: port to write * @val: value to write * @size: number of bytes to write from @val * * Simply writes @size bytes of @val to @port. */ int pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) { int ret = size; switch (size) { case 1: outb(val, port); break; case 2: outw(val, port); break; case 4: outl(val, port); break; default: ret = -EINVAL; break; } return ret; } /** * set_pci_cacheline_size - determine cacheline size for PCI devices * * We want to use the line-size of the outer-most cache. We assume * that this line-size is the same for all CPUs. * * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). */ static void __init set_pci_dfl_cacheline_size(void) { unsigned long levels, unique_caches; long status; pal_cache_config_info_t cci; status = ia64_pal_cache_summary(&levels, &unique_caches); if (status != 0) { pr_err("%s: ia64_pal_cache_summary() failed " "(status=%ld)\n", __func__, status); return; } status = ia64_pal_cache_config_info(levels - 1, /* cache_type (data_or_unified)= */ 2, &cci); if (status != 0) { pr_err("%s: ia64_pal_cache_config_info() failed " "(status=%ld)\n", __func__, status); return; } pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4; } static int __init pcibios_init(void) { set_pci_dfl_cacheline_size(); return 0; } subsys_initcall(pcibios_init);
linux-master
arch/ia64/pci/pci.c
// SPDX-License-Identifier: GPL-2.0 /* * Exceptions for specific devices. Usually work-arounds for fatal design flaws. * Derived from fixup.c of i386 tree. */ #include <linux/pci.h> #include <linux/init.h> #include <linux/vgaarb.h> #include <linux/screen_info.h> #include <asm/uv/uv.h> /* * Fixup to mark boot BIOS video selected by BIOS before it changes * * From information provided by "Jon Smirl" <[email protected]> * * The standard boot ROM sequence for an x86 machine uses the BIOS * to select an initial video card for boot display. This boot video * card will have its BIOS copied to 0xC0000 in system RAM. * IORESOURCE_ROM_SHADOW is used to associate the boot video * card with this copy. On laptops this copy has to be used since * the main ROM may be compressed or combined with another image. * See pci_map_rom() for use of this flag. Before marking the device * with IORESOURCE_ROM_SHADOW check if a vga_default_device is already set * by either arch code or vga-arbitration; if so only apply the fixup to this * already-determined primary video card. */ static void pci_fixup_video(struct pci_dev *pdev) { struct pci_dev *bridge; struct pci_bus *bus; u16 config; struct resource *res; if (is_uv_system()) return; /* Maybe, this machine supports legacy memory map. */ /* Is VGA routed to us? */ bus = pdev->bus; while (bus) { bridge = bus->self; /* * From information provided by * "David Miller" <[email protected]> * The bridge control register is valid for PCI header * type BRIDGE, or CARDBUS. Host to PCI controllers use * PCI header type NORMAL. */ if (bridge && (pci_is_bridge(bridge))) { pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &config); if (!(config & PCI_BRIDGE_CTL_VGA)) return; } bus = bus->parent; } if (!vga_default_device() || pdev == vga_default_device()) { pci_read_config_word(pdev, PCI_COMMAND, &config); if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { res = &pdev->resource[PCI_ROM_RESOURCE]; pci_disable_rom(pdev); if (res->parent) release_resource(res); res->start = 0xC0000; res->end = res->start + 0x20000 - 1; res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW | IORESOURCE_PCI_FIXED; dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n", res); } } } DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
linux-master
arch/ia64/pci/fixup.c
// SPDX-License-Identifier: GPL-2.0-or-later /* ** IA64 System Bus Adapter (SBA) I/O MMU manager ** ** (c) Copyright 2002-2005 Alex Williamson ** (c) Copyright 2002-2003 Grant Grundler ** (c) Copyright 2002-2005 Hewlett-Packard Company ** ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) ** ** ** ** This module initializes the IOC (I/O Controller) found on HP ** McKinley machines and their successors. ** */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/acpi.h> #include <linux/efi.h> #include <linux/nodemask.h> #include <linux/bitops.h> /* hweight64() */ #include <linux/crash_dump.h> #include <linux/iommu-helper.h> #include <linux/dma-map-ops.h> #include <linux/prefetch.h> #include <linux/swiotlb.h> #include <asm/delay.h> /* ia64_get_itc() */ #include <asm/io.h> #include <asm/page.h> /* PAGE_OFFSET */ #include <asm/dma.h> #include <asm/acpi-ext.h> #define PFX "IOC: " /* ** Enabling timing search of the pdir resource map. Output in /proc. ** Disabled by default to optimize performance. */ #undef PDIR_SEARCH_TIMING /* ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If ** not defined, all DMA will be 32bit and go through the TLB. ** There's potentially a conflict in the bio merge code with us ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing ** appears to give more performance than bio-level virtual merging, we'll ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to ** completely restrict DMA to the IOMMU. */ #define ALLOW_IOV_BYPASS /* ** This option specifically allows/disallows bypassing scatterlists with ** multiple entries. Coalescing these entries can allow better DMA streaming ** and in some cases shows better performance than entirely bypassing the ** IOMMU. Performance increase on the order of 1-2% sequential output/input ** using bonnie++ on a RAID0 MD device (sym2 & mpt). */ #undef ALLOW_IOV_BYPASS_SG /* ** If a device prefetches beyond the end of a valid pdir entry, it will cause ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should ** disconnect on 4k boundaries and prevent such issues. If the device is ** particularly aggressive, this option will keep the entire pdir valid such ** that prefetching will hit a valid address. This could severely impact ** error containment, and is therefore off by default. The page that is ** used for spill-over is poisoned, so that should help debugging somewhat. */ #undef FULL_VALID_PDIR #define ENABLE_MARK_CLEAN /* ** The number of debug flags is a clue - this code is fragile. NOTE: since ** tightening the use of res_lock the resource bitmap and actual pdir are no ** longer guaranteed to stay in sync. The sanity checking code isn't going to ** like that. */ #undef DEBUG_SBA_INIT #undef DEBUG_SBA_RUN #undef DEBUG_SBA_RUN_SG #undef DEBUG_SBA_RESOURCE #undef ASSERT_PDIR_SANITY #undef DEBUG_LARGE_SG_ENTRIES #undef DEBUG_BYPASS #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY) #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive #endif #define SBA_INLINE __inline__ /* #define SBA_INLINE */ #ifdef DEBUG_SBA_INIT #define DBG_INIT(x...) printk(x) #else #define DBG_INIT(x...) #endif #ifdef DEBUG_SBA_RUN #define DBG_RUN(x...) printk(x) #else #define DBG_RUN(x...) #endif #ifdef DEBUG_SBA_RUN_SG #define DBG_RUN_SG(x...) printk(x) #else #define DBG_RUN_SG(x...) #endif #ifdef DEBUG_SBA_RESOURCE #define DBG_RES(x...) printk(x) #else #define DBG_RES(x...) #endif #ifdef DEBUG_BYPASS #define DBG_BYPASS(x...) printk(x) #else #define DBG_BYPASS(x...) #endif #ifdef ASSERT_PDIR_SANITY #define ASSERT(expr) \ if(!(expr)) { \ printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \ panic(#expr); \ } #else #define ASSERT(expr) #endif /* ** The number of pdir entries to "free" before issuing ** a read to PCOM register to flush out PCOM writes. ** Interacts with allocation granularity (ie 4 or 8 entries ** allocated and free'd/purged at a time might make this ** less interesting). */ #define DELAYED_RESOURCE_CNT 64 #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP) #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP) #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP) #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP) #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP) #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ #define IOC_FUNC_ID 0x000 #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */ #define IOC_IBASE 0x300 /* IO TLB */ #define IOC_IMASK 0x308 #define IOC_PCOM 0x310 #define IOC_TCNFG 0x318 #define IOC_PDIR_BASE 0x320 #define IOC_ROPE0_CFG 0x500 #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */ /* AGP GART driver looks for this */ #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL /* ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register) ** ** Some IOCs (sx1000) can run at the above pages sizes, but are ** really only supported using the IOC at a 4k page size. ** ** iovp_size could only be greater than PAGE_SIZE if we are ** confident the drivers really only touch the next physical ** page iff that driver instance owns it. */ static unsigned long iovp_size; static unsigned long iovp_shift; static unsigned long iovp_mask; struct ioc { void __iomem *ioc_hpa; /* I/O MMU base address */ char *res_map; /* resource map, bit == pdir entry */ u64 *pdir_base; /* physical base address */ unsigned long ibase; /* pdir IOV Space base */ unsigned long imask; /* pdir IOV Space mask */ unsigned long *res_hint; /* next avail IOVP - circular search */ unsigned long dma_mask; spinlock_t res_lock; /* protects the resource bitmap, but must be held when */ /* clearing pdir to prevent races with allocations. */ unsigned int res_bitshift; /* from the RIGHT! */ unsigned int res_size; /* size of resource map in bytes */ #ifdef CONFIG_NUMA unsigned int node; /* node where this IOC lives */ #endif #if DELAYED_RESOURCE_CNT > 0 spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */ /* than res_lock for bigger systems. */ int saved_cnt; struct sba_dma_pair { dma_addr_t iova; size_t size; } saved[DELAYED_RESOURCE_CNT]; #endif #ifdef PDIR_SEARCH_TIMING #define SBA_SEARCH_SAMPLE 0x100 unsigned long avg_search[SBA_SEARCH_SAMPLE]; unsigned long avg_idx; /* current index into avg_search */ #endif /* Stuff we don't need in performance path */ struct ioc *next; /* list of IOC's in system */ acpi_handle handle; /* for multiple IOC's */ const char *name; unsigned int func_id; unsigned int rev; /* HW revision of chip */ u32 iov_size; unsigned int pdir_size; /* in bytes, determined by IOV Space size */ struct pci_dev *sac_only_dev; }; static struct ioc *ioc_list, *ioc_found; static int reserve_sba_gart = 1; static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t); static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t); #define sba_sg_address(sg) sg_virt((sg)) #ifdef FULL_VALID_PDIR static u64 prefetch_spill_page; #endif #define GET_IOC(dev) ((dev_is_pci(dev)) \ ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL) /* ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up ** (or rather not merge) DMAs into manageable chunks. ** On parisc, this is more of the software/tuning constraint ** rather than the HW. I/O MMU allocation algorithms can be ** faster with smaller sizes (to some degree). */ #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size) #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) /************************************ ** SBA register read and write support ** ** BE WARNED: register writes are posted. ** (ie follow writes which must reach HW with a read) ** */ #define READ_REG(addr) __raw_readq(addr) #define WRITE_REG(val, addr) __raw_writeq(val, addr) #ifdef DEBUG_SBA_INIT /** * sba_dump_tlb - debugging only - print IOMMU operating parameters * @hpa: base address of the IOMMU * * Print the size/location of the IO MMU PDIR. */ static void sba_dump_tlb(char *hpa) { DBG_INIT("IO TLB at 0x%p\n", (void *)hpa); DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE)); DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK)); DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG)); DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE)); DBG_INIT("\n"); } #endif #ifdef ASSERT_PDIR_SANITY /** * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry * @ioc: IO MMU structure which owns the pdir we are interested in. * @msg: text to print ont the output line. * @pide: pdir index. * * Print one entry of the IO MMU PDIR in human readable form. */ static void sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) { /* start printing from lowest pde in rval */ u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)]; unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)]; uint rcnt; printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", msg, rptr, pide & (BITS_PER_LONG - 1), *rptr); rcnt = 0; while (rcnt < BITS_PER_LONG) { printk(KERN_DEBUG "%s %2d %p %016Lx\n", (rcnt == (pide & (BITS_PER_LONG - 1))) ? " -->" : " ", rcnt, ptr, (unsigned long long) *ptr ); rcnt++; ptr++; } printk(KERN_DEBUG "%s", msg); } /** * sba_check_pdir - debugging only - consistency checker * @ioc: IO MMU structure which owns the pdir we are interested in. * @msg: text to print ont the output line. * * Verify the resource map and pdir state is consistent */ static int sba_check_pdir(struct ioc *ioc, char *msg) { u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]); u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */ u64 *pptr = ioc->pdir_base; /* pdir ptr */ uint pide = 0; while (rptr < rptr_end) { u64 rval; int rcnt; /* number of bits we might check */ rval = *rptr; rcnt = 64; while (rcnt) { /* Get last byte and highest bit from that */ u32 pde = ((u32)((*pptr >> (63)) & 0x1)); if ((rval & 0x1) ^ pde) { /* ** BUMMER! -- res_map != pdir -- ** Dump rval and matching pdir entries */ sba_dump_pdir_entry(ioc, msg, pide); return(1); } rcnt--; rval >>= 1; /* try the next bit */ pptr++; pide++; } rptr++; /* look at next word of res_map */ } /* It'd be nice if we always got here :^) */ return 0; } /** * sba_dump_sg - debugging only - print Scatter-Gather list * @ioc: IO MMU structure which owns the pdir we are interested in. * @startsg: head of the SG list * @nents: number of entries in SG list * * print the SG list so we can verify it's correct by hand. */ static void sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) { while (nents-- > 0) { printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents, startsg->dma_address, startsg->dma_length, sba_sg_address(startsg)); startsg = sg_next(startsg); } } static void sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) { struct scatterlist *the_sg = startsg; int the_nents = nents; while (the_nents-- > 0) { if (sba_sg_address(the_sg) == 0x0UL) sba_dump_sg(NULL, startsg, nents); the_sg = sg_next(the_sg); } } #endif /* ASSERT_PDIR_SANITY */ /************************************************************** * * I/O Pdir Resource Management * * Bits set in the resource map are in use. * Each bit can represent a number of pages. * LSbs represent lower addresses (IOVA's). * ***************************************************************/ #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ /* Convert from IOVP to IOVA and vice versa. */ #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset)) #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase)) #define PDIR_ENTRY_SIZE sizeof(u64) #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift) #define RESMAP_MASK(n) ~(~0UL << (n)) #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) /** * For most cases the normal get_order is sufficient, however it limits us * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity. * It only incurs about 1 clock cycle to use this one with the static variable * and makes the code more intuitive. */ static SBA_INLINE int get_iovp_order (unsigned long size) { long double d = size - 1; long order; order = ia64_getf_exp(d); order = order - iovp_shift - 0xffff + 1; if (order < 0) order = 0; return order; } static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, unsigned int bitshiftcnt) { return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) + bitshiftcnt; } /** * sba_search_bitmap - find free space in IO PDIR resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. * @bits_wanted: number of entries we need. * @use_hint: use res_hint to indicate where to start looking * * Find consecutive free bits in resource bitmap. * Each bit represents one entry in the IO Pdir. * Cool perf optimization: search for log2(size) bits at a time. */ static SBA_INLINE unsigned long sba_search_bitmap(struct ioc *ioc, struct device *dev, unsigned long bits_wanted, int use_hint) { unsigned long *res_ptr; unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); unsigned long flags, pide = ~0UL, tpide; unsigned long boundary_size; unsigned long shift; int ret; ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); ASSERT(res_ptr < res_end); boundary_size = dma_get_seg_boundary_nr_pages(dev, iovp_shift); BUG_ON(ioc->ibase & ~iovp_mask); shift = ioc->ibase >> iovp_shift; spin_lock_irqsave(&ioc->res_lock, flags); /* Allow caller to force a search through the entire resource space */ if (likely(use_hint)) { res_ptr = ioc->res_hint; } else { res_ptr = (ulong *)ioc->res_map; ioc->res_bitshift = 0; } /* * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts * if a TLB entry is purged while in use. sba_mark_invalid() * purges IOTLB entries in power-of-two sizes, so we also * allocate IOVA space in power-of-two sizes. */ bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift); if (likely(bits_wanted == 1)) { unsigned int bitshiftcnt; for(; res_ptr < res_end ; res_ptr++) { if (likely(*res_ptr != ~0UL)) { bitshiftcnt = ffz(*res_ptr); *res_ptr |= (1UL << bitshiftcnt); pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); ioc->res_bitshift = bitshiftcnt + bits_wanted; goto found_it; } } goto not_found; } if (likely(bits_wanted <= BITS_PER_LONG/2)) { /* ** Search the resource bit map on well-aligned values. ** "o" is the alignment. ** We need the alignment to invalidate I/O TLB using ** SBA HW features in the unmap path. */ unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift); uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); unsigned long mask, base_mask; base_mask = RESMAP_MASK(bits_wanted); mask = base_mask << bitshiftcnt; DBG_RES("%s() o %ld %p", __func__, o, res_ptr); for(; res_ptr < res_end ; res_ptr++) { DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); ASSERT(0 != mask); for (; mask ; mask <<= o, bitshiftcnt += o) { tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); ret = iommu_is_span_boundary(tpide, bits_wanted, shift, boundary_size); if ((0 == ((*res_ptr) & mask)) && !ret) { *res_ptr |= mask; /* mark resources busy! */ pide = tpide; ioc->res_bitshift = bitshiftcnt + bits_wanted; goto found_it; } } bitshiftcnt = 0; mask = base_mask; } } else { int qwords, bits, i; unsigned long *end; qwords = bits_wanted >> 6; /* /64 */ bits = bits_wanted - (qwords * BITS_PER_LONG); end = res_end - qwords; for (; res_ptr < end; res_ptr++) { tpide = ptr_to_pide(ioc, res_ptr, 0); ret = iommu_is_span_boundary(tpide, bits_wanted, shift, boundary_size); if (ret) goto next_ptr; for (i = 0 ; i < qwords ; i++) { if (res_ptr[i] != 0) goto next_ptr; } if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits)) continue; /* Found it, mark it */ for (i = 0 ; i < qwords ; i++) res_ptr[i] = ~0UL; res_ptr[i] |= RESMAP_MASK(bits); pide = tpide; res_ptr += qwords; ioc->res_bitshift = bits; goto found_it; next_ptr: ; } } not_found: prefetch(ioc->res_map); ioc->res_hint = (unsigned long *) ioc->res_map; ioc->res_bitshift = 0; spin_unlock_irqrestore(&ioc->res_lock, flags); return (pide); found_it: ioc->res_hint = res_ptr; spin_unlock_irqrestore(&ioc->res_lock, flags); return (pide); } /** * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. * @size: number of bytes to create a mapping for * * Given a size, find consecutive unmarked and then mark those bits in the * resource bit map. */ static int sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) { unsigned int pages_needed = size >> iovp_shift; #ifdef PDIR_SEARCH_TIMING unsigned long itc_start; #endif unsigned long pide; ASSERT(pages_needed); ASSERT(0 == (size & ~iovp_mask)); #ifdef PDIR_SEARCH_TIMING itc_start = ia64_get_itc(); #endif /* ** "seek and ye shall find"...praying never hurts either... */ pide = sba_search_bitmap(ioc, dev, pages_needed, 1); if (unlikely(pide >= (ioc->res_size << 3))) { pide = sba_search_bitmap(ioc, dev, pages_needed, 0); if (unlikely(pide >= (ioc->res_size << 3))) { #if DELAYED_RESOURCE_CNT > 0 unsigned long flags; /* ** With delayed resource freeing, we can give this one more shot. We're ** getting close to being in trouble here, so do what we can to make this ** one count. */ spin_lock_irqsave(&ioc->saved_lock, flags); if (ioc->saved_cnt > 0) { struct sba_dma_pair *d; int cnt = ioc->saved_cnt; d = &(ioc->saved[ioc->saved_cnt - 1]); spin_lock(&ioc->res_lock); while (cnt--) { sba_mark_invalid(ioc, d->iova, d->size); sba_free_range(ioc, d->iova, d->size); d--; } ioc->saved_cnt = 0; READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ spin_unlock(&ioc->res_lock); } spin_unlock_irqrestore(&ioc->saved_lock, flags); pide = sba_search_bitmap(ioc, dev, pages_needed, 0); if (unlikely(pide >= (ioc->res_size << 3))) { printk(KERN_WARNING "%s: I/O MMU @ %p is" "out of mapping resources, %u %u %lx\n", __func__, ioc->ioc_hpa, ioc->res_size, pages_needed, dma_get_seg_boundary(dev)); return -1; } #else printk(KERN_WARNING "%s: I/O MMU @ %p is" "out of mapping resources, %u %u %lx\n", __func__, ioc->ioc_hpa, ioc->res_size, pages_needed, dma_get_seg_boundary(dev)); return -1; #endif } } #ifdef PDIR_SEARCH_TIMING ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed; ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; #endif prefetchw(&(ioc->pdir_base[pide])); #ifdef ASSERT_PDIR_SANITY /* verify the first enable bit is clear */ if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) { sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); } #endif DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", __func__, size, pages_needed, pide, (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), ioc->res_bitshift ); return (pide); } /** * sba_free_range - unmark bits in IO PDIR resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. * @iova: IO virtual address which was previously allocated. * @size: number of bytes to create a mapping for * * clear bits in the ioc's resource map */ static SBA_INLINE void sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) { unsigned long iovp = SBA_IOVP(ioc, iova); unsigned int pide = PDIR_INDEX(iovp); unsigned int ridx = pide >> 3; /* convert bit to byte address */ unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); int bits_not_wanted = size >> iovp_shift; unsigned long m; /* Round up to power-of-two size: see AR2305 note above */ bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift); for (; bits_not_wanted > 0 ; res_ptr++) { if (unlikely(bits_not_wanted > BITS_PER_LONG)) { /* these mappings start 64bit aligned */ *res_ptr = 0UL; bits_not_wanted -= BITS_PER_LONG; pide += BITS_PER_LONG; } else { /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1)); bits_not_wanted = 0; DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size, bits_not_wanted, m, pide, res_ptr, *res_ptr); ASSERT(m != 0); ASSERT(bits_not_wanted); ASSERT((*res_ptr & m) == m); /* verify same bits are set */ *res_ptr &= ~m; } } } /************************************************************** * * "Dynamic DMA Mapping" support (aka "Coherent I/O") * ***************************************************************/ /** * sba_io_pdir_entry - fill in one IO PDIR entry * @pdir_ptr: pointer to IO PDIR entry * @vba: Virtual CPU address of buffer to map * * SBA Mapping Routine * * Given a virtual address (vba, arg1) sba_io_pdir_entry() * loads the I/O PDIR entry pointed to by pdir_ptr (arg0). * Each IO Pdir entry consists of 8 bytes as shown below * (LSB == bit 0): * * 63 40 11 7 0 * +-+---------------------+----------------------------------+----+--------+ * |V| U | PPN[39:12] | U | FF | * +-+---------------------+----------------------------------+----+--------+ * * V == Valid Bit * U == Unused * PPN == Physical Page Number * * The physical address fields are filled with the results of virt_to_phys() * on the vba. */ #if 1 #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \ | 0x8000000000000000ULL) #else void SBA_INLINE sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba) { *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL); } #endif #ifdef ENABLE_MARK_CLEAN /* * Since DMA is i-cache coherent, any (complete) pages that were written via * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to * flush them when they get mapped into an executable vm-area. */ static void mark_clean(void *addr, size_t size) { struct folio *folio = virt_to_folio(addr); ssize_t left = size; size_t offset = offset_in_folio(folio, addr); if (offset) { left -= folio_size(folio) - offset; if (left <= 0) return; folio = folio_next(folio); } while (left >= folio_size(folio)) { left -= folio_size(folio); set_bit(PG_arch_1, &folio->flags); if (!left) break; folio = folio_next(folio); } } #endif /** * sba_mark_invalid - invalidate one or more IO PDIR entries * @ioc: IO MMU structure which owns the pdir we are interested in. * @iova: IO Virtual Address mapped earlier * @byte_cnt: number of bytes this mapping covers. * * Marking the IO PDIR entry(ies) as Invalid and invalidate * corresponding IO TLB entry. The PCOM (Purge Command Register) * is to purge stale entries in the IO TLB when unmapping entries. * * The PCOM register supports purging of multiple pages, with a minium * of 1 page and a maximum of 2GB. Hardware requires the address be * aligned to the size of the range being purged. The size of the range * must be a power of 2. The "Cool perf optimization" in the * allocation routine helps keep that true. */ static SBA_INLINE void sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) { u32 iovp = (u32) SBA_IOVP(ioc,iova); int off = PDIR_INDEX(iovp); /* Must be non-zero and rounded up */ ASSERT(byte_cnt > 0); ASSERT(0 == (byte_cnt & ~iovp_mask)); #ifdef ASSERT_PDIR_SANITY /* Assert first pdir entry is set */ if (!(ioc->pdir_base[off] >> 60)) { sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); } #endif if (byte_cnt <= iovp_size) { ASSERT(off < ioc->pdir_size); iovp |= iovp_shift; /* set "size" field for PCOM */ #ifndef FULL_VALID_PDIR /* ** clear I/O PDIR entry "valid" bit ** Do NOT clear the rest - save it for debugging. ** We should only clear bits that have previously ** been enabled. */ ioc->pdir_base[off] &= ~(0x80000000000000FFULL); #else /* ** If we want to maintain the PDIR as valid, put in ** the spill page so devices prefetching won't ** cause a hard fail. */ ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); #endif } else { u32 t = get_iovp_order(byte_cnt) + iovp_shift; iovp |= t; ASSERT(t <= 31); /* 2GB! Max value of "size" field */ do { /* verify this pdir entry is enabled */ ASSERT(ioc->pdir_base[off] >> 63); #ifndef FULL_VALID_PDIR /* clear I/O Pdir entry "valid" bit first */ ioc->pdir_base[off] &= ~(0x80000000000000FFULL); #else ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); #endif off++; byte_cnt -= iovp_size; } while (byte_cnt > 0); } WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM); } /** * sba_map_page - map one buffer and return IOVA for DMA * @dev: instance of PCI owned by the driver that's asking. * @page: page to map * @poff: offset into page * @size: number of bytes to map * @dir: dma direction * @attrs: optional dma attributes * * See Documentation/core-api/dma-api-howto.rst */ static dma_addr_t sba_map_page(struct device *dev, struct page *page, unsigned long poff, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct ioc *ioc; void *addr = page_address(page) + poff; dma_addr_t iovp; dma_addr_t offset; u64 *pdir_start; int pide; #ifdef ASSERT_PDIR_SANITY unsigned long flags; #endif #ifdef ALLOW_IOV_BYPASS unsigned long pci_addr = virt_to_phys(addr); #endif #ifdef ALLOW_IOV_BYPASS ASSERT(to_pci_dev(dev)->dma_mask); /* ** Check if the PCI device can DMA to ptr... if so, just return ptr */ if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) { /* ** Device is bit capable of DMA'ing to the buffer... ** just return the PCI address of ptr */ DBG_BYPASS("sba_map_page() bypass mask/addr: " "0x%lx/0x%lx\n", to_pci_dev(dev)->dma_mask, pci_addr); return pci_addr; } #endif ioc = GET_IOC(dev); ASSERT(ioc); prefetch(ioc->res_hint); ASSERT(size > 0); ASSERT(size <= DMA_CHUNK_SIZE); /* save offset bits */ offset = ((dma_addr_t) (long) addr) & ~iovp_mask; /* round up to nearest iovp_size */ size = (size + offset + ~iovp_mask) & iovp_mask; #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); if (sba_check_pdir(ioc,"Check before sba_map_page()")) panic("Sanity check failed"); spin_unlock_irqrestore(&ioc->res_lock, flags); #endif pide = sba_alloc_range(ioc, dev, size); if (pide < 0) return DMA_MAPPING_ERROR; iovp = (dma_addr_t) pide << iovp_shift; DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset); pdir_start = &(ioc->pdir_base[pide]); while (size > 0) { ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */ sba_io_pdir_entry(pdir_start, (unsigned long) addr); DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start); addr += iovp_size; size -= iovp_size; pdir_start++; } /* force pdir update */ wmb(); /* form complete address */ #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); sba_check_pdir(ioc,"Check after sba_map_page()"); spin_unlock_irqrestore(&ioc->res_lock, flags); #endif return SBA_IOVA(ioc, iovp, offset); } #ifdef ENABLE_MARK_CLEAN static SBA_INLINE void sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) { u32 iovp = (u32) SBA_IOVP(ioc,iova); int off = PDIR_INDEX(iovp); void *addr; if (size <= iovp_size) { addr = phys_to_virt(ioc->pdir_base[off] & ~0xE000000000000FFFULL); mark_clean(addr, size); } else { do { addr = phys_to_virt(ioc->pdir_base[off] & ~0xE000000000000FFFULL); mark_clean(addr, min(size, iovp_size)); off++; size -= iovp_size; } while (size > 0); } } #endif /** * sba_unmap_page - unmap one IOVA and free resources * @dev: instance of PCI owned by the driver that's asking. * @iova: IOVA of driver buffer previously mapped. * @size: number of bytes mapped in driver buffer. * @dir: R/W or both. * @attrs: optional dma attributes * * See Documentation/core-api/dma-api-howto.rst */ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir, unsigned long attrs) { struct ioc *ioc; #if DELAYED_RESOURCE_CNT > 0 struct sba_dma_pair *d; #endif unsigned long flags; dma_addr_t offset; ioc = GET_IOC(dev); ASSERT(ioc); #ifdef ALLOW_IOV_BYPASS if (likely((iova & ioc->imask) != ioc->ibase)) { /* ** Address does not fall w/in IOVA, must be bypassing */ DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n", iova); #ifdef ENABLE_MARK_CLEAN if (dir == DMA_FROM_DEVICE) { mark_clean(phys_to_virt(iova), size); } #endif return; } #endif offset = iova & ~iovp_mask; DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); iova ^= offset; /* clear offset bits */ size += offset; size = ROUNDUP(size, iovp_size); #ifdef ENABLE_MARK_CLEAN if (dir == DMA_FROM_DEVICE) sba_mark_clean(ioc, iova, size); #endif #if DELAYED_RESOURCE_CNT > 0 spin_lock_irqsave(&ioc->saved_lock, flags); d = &(ioc->saved[ioc->saved_cnt]); d->iova = iova; d->size = size; if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) { int cnt = ioc->saved_cnt; spin_lock(&ioc->res_lock); while (cnt--) { sba_mark_invalid(ioc, d->iova, d->size); sba_free_range(ioc, d->iova, d->size); d--; } ioc->saved_cnt = 0; READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ spin_unlock(&ioc->res_lock); } spin_unlock_irqrestore(&ioc->saved_lock, flags); #else /* DELAYED_RESOURCE_CNT == 0 */ spin_lock_irqsave(&ioc->res_lock, flags); sba_mark_invalid(ioc, iova, size); sba_free_range(ioc, iova, size); READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ spin_unlock_irqrestore(&ioc->res_lock, flags); #endif /* DELAYED_RESOURCE_CNT == 0 */ } /** * sba_alloc_coherent - allocate/map shared mem for DMA * @dev: instance of PCI owned by the driver that's asking. * @size: number of bytes mapped in driver buffer. * @dma_handle: IOVA of new buffer. * * See Documentation/core-api/dma-api-howto.rst */ static void * sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { struct page *page; struct ioc *ioc; int node = -1; void *addr; ioc = GET_IOC(dev); ASSERT(ioc); #ifdef CONFIG_NUMA node = ioc->node; #endif page = alloc_pages_node(node, flags, get_order(size)); if (unlikely(!page)) return NULL; addr = page_address(page); memset(addr, 0, size); *dma_handle = page_to_phys(page); #ifdef ALLOW_IOV_BYPASS ASSERT(dev->coherent_dma_mask); /* ** Check if the PCI device can DMA to ptr... if so, just return ptr */ if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) { DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n", dev->coherent_dma_mask, *dma_handle); return addr; } #endif /* * If device can't bypass or bypass is disabled, pass the 32bit fake * device to map single to get an iova mapping. */ *dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size, DMA_BIDIRECTIONAL, 0); if (dma_mapping_error(dev, *dma_handle)) return NULL; return addr; } /** * sba_free_coherent - free/unmap shared mem for DMA * @dev: instance of PCI owned by the driver that's asking. * @size: number of bytes mapped in driver buffer. * @vaddr: virtual address IOVA of "consistent" buffer. * @dma_handler: IO virtual address of "consistent" buffer. * * See Documentation/core-api/dma-api-howto.rst */ static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { sba_unmap_page(dev, dma_handle, size, 0, 0); free_pages((unsigned long) vaddr, get_order(size)); } /* ** Since 0 is a valid pdir_base index value, can't use that ** to determine if a value is valid or not. Use a flag to indicate ** the SG list entry contains a valid pdir index. */ #define PIDE_FLAG 0x1UL #ifdef DEBUG_LARGE_SG_ENTRIES int dump_run_sg = 0; #endif /** * sba_fill_pdir - write allocated SG entries into IO PDIR * @ioc: IO MMU structure which owns the pdir we are interested in. * @startsg: list of IOVA/size pairs * @nents: number of entries in startsg list * * Take preprocessed SG list and write corresponding entries * in the IO PDIR. */ static SBA_INLINE int sba_fill_pdir( struct ioc *ioc, struct scatterlist *startsg, int nents) { struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ int n_mappings = 0; u64 *pdirp = NULL; unsigned long dma_offset = 0; while (nents-- > 0) { int cnt = startsg->dma_length; startsg->dma_length = 0; #ifdef DEBUG_LARGE_SG_ENTRIES if (dump_run_sg) printk(" %2d : %08lx/%05x %p\n", nents, startsg->dma_address, cnt, sba_sg_address(startsg)); #else DBG_RUN_SG(" %d : %08lx/%05x %p\n", nents, startsg->dma_address, cnt, sba_sg_address(startsg)); #endif /* ** Look for the start of a new DMA stream */ if (startsg->dma_address & PIDE_FLAG) { u32 pide = startsg->dma_address & ~PIDE_FLAG; dma_offset = (unsigned long) pide & ~iovp_mask; startsg->dma_address = 0; if (n_mappings) dma_sg = sg_next(dma_sg); dma_sg->dma_address = pide | ioc->ibase; pdirp = &(ioc->pdir_base[pide >> iovp_shift]); n_mappings++; } /* ** Look for a VCONTIG chunk */ if (cnt) { unsigned long vaddr = (unsigned long) sba_sg_address(startsg); ASSERT(pdirp); /* Since multiple Vcontig blocks could make up ** one DMA stream, *add* cnt to dma_len. */ dma_sg->dma_length += cnt; cnt += dma_offset; dma_offset=0; /* only want offset on first chunk */ cnt = ROUNDUP(cnt, iovp_size); do { sba_io_pdir_entry(pdirp, vaddr); vaddr += iovp_size; cnt -= iovp_size; pdirp++; } while (cnt > 0); } startsg = sg_next(startsg); } /* force pdir update */ wmb(); #ifdef DEBUG_LARGE_SG_ENTRIES dump_run_sg = 0; #endif return(n_mappings); } /* ** Two address ranges are DMA contiguous *iff* "end of prev" and ** "start of next" are both on an IOV page boundary. ** ** (shift left is a quick trick to mask off upper bits) */ #define DMA_CONTIG(__X, __Y) \ (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL) /** * sba_coalesce_chunks - preprocess the SG list * @ioc: IO MMU structure which owns the pdir we are interested in. * @startsg: list of IOVA/size pairs * @nents: number of entries in startsg list * * First pass is to walk the SG list and determine where the breaks are * in the DMA stream. Allocates PDIR entries but does not fill them. * Returns the number of DMA chunks. * * Doing the fill separate from the coalescing/allocation keeps the * code simpler. Future enhancement could make one pass through * the sglist do both. */ static SBA_INLINE int sba_coalesce_chunks(struct ioc *ioc, struct device *dev, struct scatterlist *startsg, int nents) { struct scatterlist *vcontig_sg; /* VCONTIG chunk head */ unsigned long vcontig_len; /* len of VCONTIG chunk */ unsigned long vcontig_end; struct scatterlist *dma_sg; /* next DMA stream head */ unsigned long dma_offset, dma_len; /* start/len of DMA stream */ int n_mappings = 0; unsigned int max_seg_size = dma_get_max_seg_size(dev); int idx; while (nents > 0) { unsigned long vaddr = (unsigned long) sba_sg_address(startsg); /* ** Prepare for first/next DMA stream */ dma_sg = vcontig_sg = startsg; dma_len = vcontig_len = vcontig_end = startsg->length; vcontig_end += vaddr; dma_offset = vaddr & ~iovp_mask; /* PARANOID: clear entries */ startsg->dma_address = startsg->dma_length = 0; /* ** This loop terminates one iteration "early" since ** it's always looking one "ahead". */ while (--nents > 0) { unsigned long vaddr; /* tmp */ startsg = sg_next(startsg); /* PARANOID */ startsg->dma_address = startsg->dma_length = 0; /* catch brokenness in SCSI layer */ ASSERT(startsg->length <= DMA_CHUNK_SIZE); /* ** First make sure current dma stream won't ** exceed DMA_CHUNK_SIZE if we coalesce the ** next entry. */ if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask) > DMA_CHUNK_SIZE) break; if (dma_len + startsg->length > max_seg_size) break; /* ** Then look for virtually contiguous blocks. ** ** append the next transaction? */ vaddr = (unsigned long) sba_sg_address(startsg); if (vcontig_end == vaddr) { vcontig_len += startsg->length; vcontig_end += startsg->length; dma_len += startsg->length; continue; } #ifdef DEBUG_LARGE_SG_ENTRIES dump_run_sg = (vcontig_len > iovp_size); #endif /* ** Not virtually contiguous. ** Terminate prev chunk. ** Start a new chunk. ** ** Once we start a new VCONTIG chunk, dma_offset ** can't change. And we need the offset from the first ** chunk - not the last one. Ergo Successive chunks ** must start on page boundaries and dove tail ** with it's predecessor. */ vcontig_sg->dma_length = vcontig_len; vcontig_sg = startsg; vcontig_len = startsg->length; /* ** 3) do the entries end/start on page boundaries? ** Don't update vcontig_end until we've checked. */ if (DMA_CONTIG(vcontig_end, vaddr)) { vcontig_end = vcontig_len + vaddr; dma_len += vcontig_len; continue; } else { break; } } /* ** End of DMA Stream ** Terminate last VCONTIG block. ** Allocate space for DMA stream. */ vcontig_sg->dma_length = vcontig_len; dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; ASSERT(dma_len <= DMA_CHUNK_SIZE); idx = sba_alloc_range(ioc, dev, dma_len); if (idx < 0) { dma_sg->dma_length = 0; return -1; } dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift) | dma_offset); n_mappings++; } return n_mappings; } static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, unsigned long attrs); /** * sba_map_sg - map Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. * @sglist: array of buffer/length pairs * @nents: number of entries in list * @dir: R/W or both. * @attrs: optional dma attributes * * See Documentation/core-api/dma-api-howto.rst */ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, unsigned long attrs) { struct ioc *ioc; int coalesced, filled = 0; #ifdef ASSERT_PDIR_SANITY unsigned long flags; #endif #ifdef ALLOW_IOV_BYPASS_SG struct scatterlist *sg; #endif DBG_RUN_SG("%s() START %d entries\n", __func__, nents); ioc = GET_IOC(dev); ASSERT(ioc); #ifdef ALLOW_IOV_BYPASS_SG ASSERT(to_pci_dev(dev)->dma_mask); if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) { for_each_sg(sglist, sg, nents, filled) { sg->dma_length = sg->length; sg->dma_address = virt_to_phys(sba_sg_address(sg)); } return filled; } #endif /* Fast path single entry scatterlists. */ if (nents == 1) { sglist->dma_length = sglist->length; sglist->dma_address = sba_map_page(dev, sg_page(sglist), sglist->offset, sglist->length, dir, attrs); if (dma_mapping_error(dev, sglist->dma_address)) return -EIO; return 1; } #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()")) { sba_dump_sg(ioc, sglist, nents); panic("Check before sba_map_sg_attrs()"); } spin_unlock_irqrestore(&ioc->res_lock, flags); #endif prefetch(ioc->res_hint); /* ** First coalesce the chunks and allocate I/O pdir space ** ** If this is one DMA stream, we can properly map using the ** correct virtual address associated with each DMA page. ** w/o this association, we wouldn't have coherent DMA! ** Access to the virtual address is what forces a two pass algorithm. */ coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); if (coalesced < 0) { sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs); return -ENOMEM; } /* ** Program the I/O Pdir ** ** map the virtual addresses to the I/O Pdir ** o dma_address will contain the pdir index ** o dma_len will contain the number of bytes to map ** o address contains the virtual address. */ filled = sba_fill_pdir(ioc, sglist, nents); #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()")) { sba_dump_sg(ioc, sglist, nents); panic("Check after sba_map_sg_attrs()\n"); } spin_unlock_irqrestore(&ioc->res_lock, flags); #endif ASSERT(coalesced == filled); DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled); return filled; } /** * sba_unmap_sg_attrs - unmap Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. * @sglist: array of buffer/length pairs * @nents: number of entries in list * @dir: R/W or both. * @attrs: optional dma attributes * * See Documentation/core-api/dma-api-howto.rst */ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, unsigned long attrs) { #ifdef ASSERT_PDIR_SANITY struct ioc *ioc; unsigned long flags; #endif DBG_RUN_SG("%s() START %d entries, %p,%x\n", __func__, nents, sba_sg_address(sglist), sglist->length); #ifdef ASSERT_PDIR_SANITY ioc = GET_IOC(dev); ASSERT(ioc); spin_lock_irqsave(&ioc->res_lock, flags); sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()"); spin_unlock_irqrestore(&ioc->res_lock, flags); #endif while (nents && sglist->dma_length) { sba_unmap_page(dev, sglist->dma_address, sglist->dma_length, dir, attrs); sglist = sg_next(sglist); nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()"); spin_unlock_irqrestore(&ioc->res_lock, flags); #endif } /************************************************************** * * Initialization and claim * ***************************************************************/ static void ioc_iova_init(struct ioc *ioc) { int tcnfg; int agp_found = 0; struct pci_dev *device = NULL; #ifdef FULL_VALID_PDIR unsigned long index; #endif /* ** Firmware programs the base and size of a "safe IOVA space" ** (one that doesn't overlap memory or LMMIO space) in the ** IBASE and IMASK registers. */ ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL; ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL; ioc->iov_size = ~ioc->imask + 1; DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n", __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask, ioc->iov_size >> 20); switch (iovp_size) { case 4*1024: tcnfg = 0; break; case 8*1024: tcnfg = 1; break; case 16*1024: tcnfg = 2; break; case 64*1024: tcnfg = 3; break; default: panic(PFX "Unsupported IOTLB page size %ldK", iovp_size >> 10); break; } WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE; ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, get_order(ioc->pdir_size)); if (!ioc->pdir_base) panic(PFX "Couldn't allocate I/O Page Table\n"); memset(ioc->pdir_base, 0, ioc->pdir_size); DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__, iovp_size >> 10, ioc->pdir_base, ioc->pdir_size); ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base); WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); /* ** If an AGP device is present, only use half of the IOV space ** for PCI DMA. Unfortunately we can't know ahead of time ** whether GART support will actually be used, for now we ** can just key on an AGP device found in the system. ** We program the next pdir index after we stop w/ a key for ** the GART code to handshake on. */ for_each_pci_dev(device) agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP); if (agp_found && reserve_sba_gart) { printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n", ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2); ioc->pdir_size /= 2; ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE; } #ifdef FULL_VALID_PDIR /* ** Check to see if the spill page has been allocated, we don't need more than ** one across multiple SBAs. */ if (!prefetch_spill_page) { char *spill_poison = "SBAIOMMU POISON"; int poison_size = 16; void *poison_addr, *addr; addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size)); if (!addr) panic(PFX "Couldn't allocate PDIR spill page\n"); poison_addr = addr; for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size) memcpy(poison_addr, spill_poison, poison_size); prefetch_spill_page = virt_to_phys(addr); DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page); } /* ** Set all the PDIR entries valid w/ the spill page as the target */ for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++) ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page); #endif /* Clear I/O TLB of any possible entries */ WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM); READ_REG(ioc->ioc_hpa + IOC_PCOM); /* Enable IOVA translation */ WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); READ_REG(ioc->ioc_hpa + IOC_IBASE); } static void __init ioc_resource_init(struct ioc *ioc) { spin_lock_init(&ioc->res_lock); #if DELAYED_RESOURCE_CNT > 0 spin_lock_init(&ioc->saved_lock); #endif /* resource map size dictated by pdir_size */ ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */ ioc->res_size >>= 3; /* convert bit count to byte count */ DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size); ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(ioc->res_size)); if (!ioc->res_map) panic(PFX "Couldn't allocate resource map\n"); memset(ioc->res_map, 0, ioc->res_size); /* next available IOVP - circular search */ ioc->res_hint = (unsigned long *) ioc->res_map; #ifdef ASSERT_PDIR_SANITY /* Mark first bit busy - ie no IOVA 0 */ ioc->res_map[0] = 0x1; ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE; #endif #ifdef FULL_VALID_PDIR /* Mark the last resource used so we don't prefetch beyond IOVA space */ ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */ ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF | prefetch_spill_page); #endif DBG_INIT("%s() res_map %x %p\n", __func__, ioc->res_size, (void *) ioc->res_map); } static void __init ioc_sac_init(struct ioc *ioc) { struct pci_dev *sac = NULL; struct pci_controller *controller = NULL; /* * pci_alloc_coherent() must return a DMA address which is * SAC (single address cycle) addressable, so allocate a * pseudo-device to enforce that. */ sac = kzalloc(sizeof(*sac), GFP_KERNEL); if (!sac) panic(PFX "Couldn't allocate struct pci_dev"); controller = kzalloc(sizeof(*controller), GFP_KERNEL); if (!controller) panic(PFX "Couldn't allocate struct pci_controller"); controller->iommu = ioc; sac->sysdata = controller; sac->dma_mask = 0xFFFFFFFFUL; sac->dev.bus = &pci_bus_type; ioc->sac_only_dev = sac; } static void __init ioc_zx1_init(struct ioc *ioc) { unsigned long rope_config; unsigned int i; if (ioc->rev < 0x20) panic(PFX "IOC 2.0 or later required for IOMMU support\n"); /* 38 bit memory controller + extra bit for range displaced by MMIO */ ioc->dma_mask = (0x1UL << 39) - 1; /* ** Clear ROPE(N)_CONFIG AO bit. ** Disables "NT Ordering" (~= !"Relaxed Ordering") ** Overrides bit 1 in DMA Hint Sets. ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701. */ for (i=0; i<(8*8); i+=8) { rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i); rope_config &= ~IOC_ROPE_AO; WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i); } } typedef void (initfunc)(struct ioc *); struct ioc_iommu { u32 func_id; char *name; initfunc *init; }; static struct ioc_iommu ioc_iommu_info[] __initdata = { { ZX1_IOC_ID, "zx1", ioc_zx1_init }, { ZX2_IOC_ID, "zx2", NULL }, { SX1000_IOC_ID, "sx1000", NULL }, { SX2000_IOC_ID, "sx2000", NULL }, }; static void __init ioc_init(unsigned long hpa, struct ioc *ioc) { struct ioc_iommu *info; ioc->next = ioc_list; ioc_list = ioc; ioc->ioc_hpa = ioremap(hpa, 0x1000); ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID); ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL; ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */ for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) { if (ioc->func_id == info->func_id) { ioc->name = info->name; if (info->init) (info->init)(ioc); } } iovp_size = (1 << iovp_shift); iovp_mask = ~(iovp_size - 1); DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__, PAGE_SIZE >> 10, iovp_size >> 10); if (!ioc->name) { ioc->name = kmalloc(24, GFP_KERNEL); if (ioc->name) sprintf((char *) ioc->name, "Unknown (%04x:%04x)", ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF); else ioc->name = "Unknown"; } ioc_iova_init(ioc); ioc_resource_init(ioc); ioc_sac_init(ioc); printk(KERN_INFO PFX "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, hpa, ioc->iov_size >> 20, ioc->ibase); } /************************************************************************** ** ** SBA initialization code (HW and SW) ** ** o identify SBA chip itself ** o FIXME: initialize DMA hints for reasonable defaults ** **************************************************************************/ #ifdef CONFIG_PROC_FS static void * ioc_start(struct seq_file *s, loff_t *pos) { struct ioc *ioc; loff_t n = *pos; for (ioc = ioc_list; ioc; ioc = ioc->next) if (!n--) return ioc; return NULL; } static void * ioc_next(struct seq_file *s, void *v, loff_t *pos) { struct ioc *ioc = v; ++*pos; return ioc->next; } static void ioc_stop(struct seq_file *s, void *v) { } static int ioc_show(struct seq_file *s, void *v) { struct ioc *ioc = v; unsigned long *res_ptr = (unsigned long *)ioc->res_map; int i, used = 0; seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n", ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF)); #ifdef CONFIG_NUMA if (ioc->node != NUMA_NO_NODE) seq_printf(s, "NUMA node : %d\n", ioc->node); #endif seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024)); seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024); for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr) used += hweight64(*res_ptr); seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3); seq_printf(s, "PDIR used : %d entries\n", used); #ifdef PDIR_SEARCH_TIMING { unsigned long i = 0, avg = 0, min, max; min = max = ioc->avg_search[0]; for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { avg += ioc->avg_search[i]; if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; } avg /= SBA_SEARCH_SAMPLE; seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n", min, avg, max); } #endif #ifndef ALLOW_IOV_BYPASS seq_printf(s, "IOVA bypass disabled\n"); #endif return 0; } static const struct seq_operations ioc_seq_ops = { .start = ioc_start, .next = ioc_next, .stop = ioc_stop, .show = ioc_show }; static void __init ioc_proc_init(void) { struct proc_dir_entry *dir; dir = proc_mkdir("bus/mckinley", NULL); if (!dir) return; proc_create_seq(ioc_list->name, 0, dir, &ioc_seq_ops); } #endif static void sba_connect_bus(struct pci_bus *bus) { acpi_handle handle, parent; acpi_status status; struct ioc *ioc; if (!PCI_CONTROLLER(bus)) panic(PFX "no sysdata on bus %d!\n", bus->number); if (PCI_CONTROLLER(bus)->iommu) return; handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion); if (!handle) return; /* * The IOC scope encloses PCI root bridges in the ACPI * namespace, so work our way out until we find an IOC we * claimed previously. */ do { for (ioc = ioc_list; ioc; ioc = ioc->next) if (ioc->handle == handle) { PCI_CONTROLLER(bus)->iommu = ioc; return; } status = acpi_get_parent(handle, &parent); handle = parent; } while (ACPI_SUCCESS(status)); printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number); } static void __init sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) { #ifdef CONFIG_NUMA unsigned int node; node = acpi_get_node(handle); if (node != NUMA_NO_NODE && !node_online(node)) node = NUMA_NO_NODE; ioc->node = node; #endif } static void __init acpi_sba_ioc_add(struct ioc *ioc) { acpi_handle handle = ioc->handle; acpi_status status; u64 hpa, length; struct acpi_device_info *adi; ioc_found = ioc->next; status = hp_acpi_csr_space(handle, &hpa, &length); if (ACPI_FAILURE(status)) goto err; status = acpi_get_object_info(handle, &adi); if (ACPI_FAILURE(status)) goto err; /* * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI * root bridges, and its CSR space includes the IOC function. */ if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) { hpa += ZX1_IOC_OFFSET; /* zx1 based systems default to kernel page size iommu pages */ if (!iovp_shift) iovp_shift = min(PAGE_SHIFT, 16); } kfree(adi); /* * default anything not caught above or specified on cmdline to 4k * iommu page size */ if (!iovp_shift) iovp_shift = 12; ioc_init(hpa, ioc); /* setup NUMA node association */ sba_map_ioc_to_node(ioc, handle); return; err: kfree(ioc); } static const struct acpi_device_id hp_ioc_iommu_device_ids[] = { {"HWP0001", 0}, {"HWP0004", 0}, {"", 0}, }; static int acpi_sba_ioc_attach(struct acpi_device *device, const struct acpi_device_id *not_used) { struct ioc *ioc; ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) return -ENOMEM; ioc->next = ioc_found; ioc_found = ioc; ioc->handle = device->handle; return 1; } static struct acpi_scan_handler acpi_sba_ioc_handler = { .ids = hp_ioc_iommu_device_ids, .attach = acpi_sba_ioc_attach, }; static int __init acpi_sba_ioc_init_acpi(void) { return acpi_scan_add_handler(&acpi_sba_ioc_handler); } /* This has to run before acpi_scan_init(). */ arch_initcall(acpi_sba_ioc_init_acpi); static int sba_dma_supported (struct device *dev, u64 mask) { /* make sure it's at least 32bit capable */ return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); } static const struct dma_map_ops sba_dma_ops = { .alloc = sba_alloc_coherent, .free = sba_free_coherent, .map_page = sba_map_page, .unmap_page = sba_unmap_page, .map_sg = sba_map_sg_attrs, .unmap_sg = sba_unmap_sg_attrs, .dma_supported = sba_dma_supported, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, .alloc_pages = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; static int __init sba_init(void) { /* * If we are booting a kdump kernel, the sba_iommu will cause devices * that were not shutdown properly to MCA as soon as they are turned * back on. Our only option for a successful kdump kernel boot is to * use swiotlb. */ if (is_kdump_kernel()) return 0; /* * ioc_found should be populated by the acpi_sba_ioc_handler's .attach() * routine, but that only happens if acpi_scan_init() has already run. */ while (ioc_found) acpi_sba_ioc_add(ioc_found); if (!ioc_list) return 0; { struct pci_bus *b = NULL; while ((b = pci_find_next_bus(b)) != NULL) sba_connect_bus(b); } /* no need for swiotlb with the iommu */ swiotlb_exit(); dma_ops = &sba_dma_ops; #ifdef CONFIG_PROC_FS ioc_proc_init(); #endif return 0; } subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ static int __init nosbagart(char *str) { reserve_sba_gart = 0; return 1; } __setup("nosbagart", nosbagart); static int __init sba_page_override(char *str) { unsigned long page_size; page_size = memparse(str, &str); switch (page_size) { case 4096: case 8192: case 16384: case 65536: iovp_shift = ffs(page_size) - 1; break; default: printk("%s: unknown/unsupported iommu page size %ld\n", __func__, page_size); } return 1; } __setup("sbapagesize=",sba_page_override);
linux-master
arch/ia64/hp/common/sba_iommu.c
// SPDX-License-Identifier: GPL-2.0-only /* * OpRegion handler to allow AML to call native firmware * * (c) Copyright 2007 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <[email protected]> * * This driver implements HP Open Source Review Board proposal 1842, * which was approved on 9/20/2006. * * For technical documentation, see the HP SPPA Firmware EAS, Appendix F. * * ACPI does not define a mechanism for AML methods to call native firmware * interfaces such as PAL or SAL. This OpRegion handler adds such a mechanism. * After the handler is installed, an AML method can call native firmware by * storing the arguments and firmware entry point to specific offsets in the * OpRegion. When AML reads the "return value" offset from the OpRegion, this * handler loads up the arguments, makes the firmware call, and returns the * result. */ #include <linux/module.h> #include <linux/acpi.h> #include <asm/sal.h> MODULE_AUTHOR("Bjorn Helgaas <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ACPI opregion handler for native firmware calls"); static bool force_register; module_param_named(force, force_register, bool, 0); MODULE_PARM_DESC(force, "Install opregion handler even without HPQ5001 device"); #define AML_NFW_SPACE 0xA1 struct ia64_pdesc { void *ip; void *gp; }; /* * N.B. The layout of this structure is defined in the HP SPPA FW EAS, and * the member offsets are embedded in AML methods. */ struct ia64_nfw_context { u64 arg[8]; struct ia64_sal_retval ret; u64 ip; u64 gp; u64 pad[2]; }; static void *virt_map(u64 address) { if (address & (1UL << 63)) return (void *) (__IA64_UNCACHED_OFFSET | address); return __va(address); } static void aml_nfw_execute(struct ia64_nfw_context *c) { struct ia64_pdesc virt_entry; ia64_sal_handler entry; virt_entry.ip = virt_map(c->ip); virt_entry.gp = virt_map(c->gp); entry = (ia64_sal_handler) &virt_entry; IA64_FW_CALL(entry, c->ret, c->arg[0], c->arg[1], c->arg[2], c->arg[3], c->arg[4], c->arg[5], c->arg[6], c->arg[7]); } static void aml_nfw_read_arg(u8 *offset, u32 bit_width, u64 *value) { switch (bit_width) { case 8: *value = *(u8 *)offset; break; case 16: *value = *(u16 *)offset; break; case 32: *value = *(u32 *)offset; break; case 64: *value = *(u64 *)offset; break; } } static void aml_nfw_write_arg(u8 *offset, u32 bit_width, u64 *value) { switch (bit_width) { case 8: *(u8 *) offset = *value; break; case 16: *(u16 *) offset = *value; break; case 32: *(u32 *) offset = *value; break; case 64: *(u64 *) offset = *value; break; } } static acpi_status aml_nfw_handler(u32 function, acpi_physical_address address, u32 bit_width, u64 *value, void *handler_context, void *region_context) { struct ia64_nfw_context *context = handler_context; u8 *offset = (u8 *) context + address; if (bit_width != 8 && bit_width != 16 && bit_width != 32 && bit_width != 64) return AE_BAD_PARAMETER; if (address + (bit_width >> 3) > sizeof(struct ia64_nfw_context)) return AE_BAD_PARAMETER; switch (function) { case ACPI_READ: if (address == offsetof(struct ia64_nfw_context, ret)) aml_nfw_execute(context); aml_nfw_read_arg(offset, bit_width, value); break; case ACPI_WRITE: aml_nfw_write_arg(offset, bit_width, value); break; } return AE_OK; } static struct ia64_nfw_context global_context; static int global_handler_registered; static int aml_nfw_add_global_handler(void) { acpi_status status; if (global_handler_registered) return 0; status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, AML_NFW_SPACE, aml_nfw_handler, NULL, &global_context); if (ACPI_FAILURE(status)) return -ENODEV; global_handler_registered = 1; printk(KERN_INFO "Global 0x%02X opregion handler registered\n", AML_NFW_SPACE); return 0; } static int aml_nfw_remove_global_handler(void) { acpi_status status; if (!global_handler_registered) return 0; status = acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, AML_NFW_SPACE, aml_nfw_handler); if (ACPI_FAILURE(status)) return -ENODEV; global_handler_registered = 0; printk(KERN_INFO "Global 0x%02X opregion handler removed\n", AML_NFW_SPACE); return 0; } static int aml_nfw_add(struct acpi_device *device) { /* * We would normally allocate a new context structure and install * the address space handler for the specific device we found. * But the HP-UX implementation shares a single global context * and always puts the handler at the root, so we'll do the same. */ return aml_nfw_add_global_handler(); } static void aml_nfw_remove(struct acpi_device *device) { aml_nfw_remove_global_handler(); } static const struct acpi_device_id aml_nfw_ids[] = { {"HPQ5001", 0}, {"", 0} }; static struct acpi_driver acpi_aml_nfw_driver = { .name = "native firmware", .ids = aml_nfw_ids, .ops = { .add = aml_nfw_add, .remove = aml_nfw_remove, }, }; static int __init aml_nfw_init(void) { int result; if (force_register) aml_nfw_add_global_handler(); result = acpi_bus_register_driver(&acpi_aml_nfw_driver); if (result < 0) { aml_nfw_remove_global_handler(); return result; } return 0; } static void __exit aml_nfw_exit(void) { acpi_bus_unregister_driver(&acpi_aml_nfw_driver); aml_nfw_remove_global_handler(); } module_init(aml_nfw_init); module_exit(aml_nfw_exit);
linux-master
arch/ia64/hp/common/aml_nfw.c
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * SGI UV Core Functions * * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. */ #include <linux/acpi.h> #include <linux/efi.h> #include <linux/module.h> #include <linux/percpu.h> #include <asm/uv/uv.h> #include <asm/uv/uv_mmrs.h> #include <asm/uv/uv_hub.h> bool ia64_is_uv; EXPORT_SYMBOL_GPL(ia64_is_uv); DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); struct redir_addr { unsigned long redirect; unsigned long alias; }; #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT static __initdata struct redir_addr redir_addrs[] = { {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, }; static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) { union uvh_si_alias0_overlay_config_u alias; union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; int i; for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { alias.v = uv_read_local_mmr(redir_addrs[i].alias); if (alias.s.base == 0) { *size = (1UL << alias.s.m_alias); redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; return; } } BUG(); } void __init uv_probe_system_type(void) { struct acpi_table_rsdp *rsdp; struct acpi_table_xsdt *xsdt; if (efi.acpi20 == EFI_INVALID_TABLE_ADDR) { pr_err("ACPI 2.0 RSDP not found.\n"); return; } rsdp = (struct acpi_table_rsdp *)__va(efi.acpi20); if (strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1)) { pr_err("ACPI 2.0 RSDP signature incorrect.\n"); return; } xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address); if (strncmp(xsdt->header.signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1)) { pr_err("ACPI 2.0 XSDT signature incorrect.\n"); return; } if (!strcmp(xsdt->header.oem_id, "SGI") && !strcmp(xsdt->header.oem_table_id + 4, "UV")) ia64_is_uv = true; } void __init uv_setup(char **cmdline_p) { union uvh_si_addr_map_config_u m_n_config; union uvh_node_id_u node_id; unsigned long gnode_upper; int nid, cpu, m_val, n_val; unsigned long mmr_base, lowmem_redir_base, lowmem_redir_size; get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); node_id.v = uv_read_local_mmr(UVH_NODE_ID); m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); mmr_base = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & ~UV_MMR_ENABLE; m_val = m_n_config.s.m_skt; n_val = m_n_config.s.n_skt; printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); gnode_upper = (((unsigned long)node_id.s.node_id) & ~((1 << n_val) - 1)) << m_val; for_each_present_cpu(cpu) { nid = cpu_to_node(cpu); uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_base + lowmem_redir_size; uv_cpu_hub_info(cpu)->m_val = m_val; uv_cpu_hub_info(cpu)->n_val = n_val; uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) -1; uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ printk(KERN_DEBUG "UV cpu %d, nid %d\n", cpu, nid); } }
linux-master
arch/ia64/uv/kernel/setup.c
int __attribute__ ((__model__ (__small__))) x;
linux-master
arch/ia64/scripts/check-model.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include <linux/types.h> #include <asm/io.h> /* * Copy data from IO memory space to "real" memory space. * This needs to be optimized. */ void memcpy_fromio(void *to, const volatile void __iomem *from, long count) { char *dst = to; while (count) { count--; *dst++ = readb(from++); } } EXPORT_SYMBOL(memcpy_fromio); /* * Copy data from "real" memory space to IO memory space. * This needs to be optimized. */ void memcpy_toio(volatile void __iomem *to, const void *from, long count) { const char *src = from; while (count) { count--; writeb(*src++, to++); } } EXPORT_SYMBOL(memcpy_toio); /* * "memset" on IO memory space. * This needs to be optimized. */ void memset_io(volatile void __iomem *dst, int c, long count) { unsigned char ch = (char)(c & 0xff); while (count) { count--; writeb(ch, dst); dst++; } } EXPORT_SYMBOL(memset_io);
linux-master
arch/ia64/lib/io.c
// SPDX-License-Identifier: GPL-2.0 /* * Network Checksum & Copy routine * * Copyright (C) 1999, 2003-2004 Hewlett-Packard Co * Stephane Eranian <[email protected]> * * Most of the code has been imported from Linux/Alpha */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <net/checksum.h> /* * XXX Fixme: those 2 inlines are meant for debugging and will go away */ static inline unsigned short from64to16(unsigned long x) { /* add up 32-bit words for 33 bits */ x = (x & 0xffffffff) + (x >> 32); /* add up 16-bit and 17-bit words for 17+c bits */ x = (x & 0xffff) + (x >> 16); /* add up 16-bit and 2-bit for 16+c bit */ x = (x & 0xffff) + (x >> 16); /* add up carry.. */ x = (x & 0xffff) + (x >> 16); return x; } static inline unsigned long do_csum_c(const unsigned char * buff, int len, unsigned int psum) { int odd, count; unsigned long result = (unsigned long)psum; if (len <= 0) goto out; odd = 1 & (unsigned long) buff; if (odd) { result = *buff << 8; len--; buff++; } count = len >> 1; /* nr of 16-bit words.. */ if (count) { if (2 & (unsigned long) buff) { result += *(unsigned short *) buff; count--; len -= 2; buff += 2; } count >>= 1; /* nr of 32-bit words.. */ if (count) { if (4 & (unsigned long) buff) { result += *(unsigned int *) buff; count--; len -= 4; buff += 4; } count >>= 1; /* nr of 64-bit words.. */ if (count) { unsigned long carry = 0; do { unsigned long w = *(unsigned long *) buff; count--; buff += 8; result += carry; result += w; carry = (w > result); } while (count); result += carry; result = (result & 0xffffffff) + (result >> 32); } if (len & 4) { result += *(unsigned int *) buff; buff += 4; } } if (len & 2) { result += *(unsigned short *) buff; buff += 2; } } if (len & 1) result += *buff; result = from64to16(result); if (odd) result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); out: return result; }
linux-master
arch/ia64/lib/csum_partial_copy.c
// SPDX-License-Identifier: GPL-2.0 /* * Network checksum routines * * Copyright (C) 1999, 2003 Hewlett-Packard Co * Stephane Eranian <[email protected]> * * Most of the code coming from arch/alpha/lib/checksum.c * * This file contains network checksum routines that are better done * in an architecture-specific manner due to speed.. */ #include <linux/module.h> #include <linux/string.h> #include <asm/byteorder.h> static inline unsigned short from64to16 (unsigned long x) { /* add up 32-bit words for 33 bits */ x = (x & 0xffffffff) + (x >> 32); /* add up 16-bit and 17-bit words for 17+c bits */ x = (x & 0xffff) + (x >> 16); /* add up 16-bit and 2-bit for 16+c bit */ x = (x & 0xffff) + (x >> 16); /* add up carry.. */ x = (x & 0xffff) + (x >> 16); return x; } /* * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented. */ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum) { return (__force __sum16)~from64to16( (__force u64)saddr + (__force u64)daddr + (__force u64)sum + ((len + proto) << 8)); } EXPORT_SYMBOL(csum_tcpudp_magic); __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum sum) { unsigned long result; result = (__force u64)saddr + (__force u64)daddr + (__force u64)sum + ((len + proto) << 8); /* Fold down to 32-bits so we don't lose in the typedef-less network stack. */ /* 64 to 33 */ result = (result & 0xffffffff) + (result >> 32); /* 33 to 32 */ result = (result & 0xffffffff) + (result >> 32); return (__force __wsum)result; } EXPORT_SYMBOL(csum_tcpudp_nofold); extern unsigned long do_csum (const unsigned char *, long); /* * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) * * returns a 32-bit number suitable for feeding into itself * or csum_tcpudp_magic * * this function must be called with even lengths, except * for the last fragment, which may be odd * * it's best to have buff aligned on a 32-bit boundary */ __wsum csum_partial(const void *buff, int len, __wsum sum) { u64 result = do_csum(buff, len); /* add in old sum, and carry.. */ result += (__force u32)sum; /* 32+c bits -> 32 bits */ result = (result & 0xffffffff) + (result >> 32); return (__force __wsum)result; } EXPORT_SYMBOL(csum_partial); /* * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ __sum16 ip_compute_csum (const void *buff, int len) { return (__force __sum16)~do_csum(buff,len); } EXPORT_SYMBOL(ip_compute_csum);
linux-master
arch/ia64/lib/checksum.c
// SPDX-License-Identifier: GPL-2.0 /* * IA-64 Huge TLB Page Support for Kernel. * * Copyright (C) 2002-2004 Rohit Seth <[email protected]> * Copyright (C) 2003-2004 Ken Chen <[email protected]> * * Sep, 2003: add numa support * Feb, 2004: dynamic hugetlb page size via boot parameter */ #include <linux/init.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/module.h> #include <linux/sysctl.h> #include <linux/log2.h> #include <asm/mman.h> #include <asm/tlb.h> #include <asm/tlbflush.h> unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT; EXPORT_SYMBOL(hpage_shift); pte_t * huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, taddr); p4d = p4d_offset(pgd, taddr); pud = pud_alloc(mm, p4d, taddr); if (pud) { pmd = pmd_alloc(mm, pud, taddr); if (pmd) pte = pte_alloc_huge(mm, pmd, taddr); } return pte; } pte_t * huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, taddr); if (pgd_present(*pgd)) { p4d = p4d_offset(pgd, taddr); if (p4d_present(*p4d)) { pud = pud_offset(p4d, taddr); if (pud_present(*pud)) { pmd = pmd_offset(pud, taddr); if (pmd_present(*pmd)) pte = pte_offset_huge(pmd, taddr); } } } return pte; } #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } /* * Don't actually need to do any preparation, but need to make sure * the address is in the right region. */ int prepare_hugepage_range(struct file *file, unsigned long addr, unsigned long len) { if (len & ~HPAGE_MASK) return -EINVAL; if (addr & ~HPAGE_MASK) return -EINVAL; if (REGION_NUMBER(addr) != RGN_HPAGE) return -EINVAL; return 0; } int pmd_huge(pmd_t pmd) { return 0; } int pud_huge(pud_t pud) { return 0; } void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) { /* * This is called to free hugetlb page tables. * * The offset of these addresses from the base of the hugetlb * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that * the standard free_pgd_range will free the right page tables. * * If floor and ceiling are also in the hugetlb region, they * must likewise be scaled down; but if outside, left unchanged. */ addr = htlbpage_to_page(addr); end = htlbpage_to_page(end); if (REGION_NUMBER(floor) == RGN_HPAGE) floor = htlbpage_to_page(floor); if (REGION_NUMBER(ceiling) == RGN_HPAGE) ceiling = htlbpage_to_page(ceiling); free_pgd_range(tlb, addr, end, floor, ceiling); } unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_unmapped_area_info info; if (len > RGN_MAP_LIMIT) return -ENOMEM; if (len & ~HPAGE_MASK) return -EINVAL; /* Handle MAP_FIXED */ if (flags & MAP_FIXED) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } /* This code assumes that RGN_HPAGE != 0. */ if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1))) addr = HPAGE_REGION_BASE; info.flags = 0; info.length = len; info.low_limit = addr; info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT; info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1); info.align_offset = 0; return vm_unmapped_area(&info); } static int __init hugetlb_setup_sz(char *str) { u64 tr_pages; unsigned long long size; if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0) /* * shouldn't happen, but just in case. */ tr_pages = 0x15557000UL; size = memparse(str, &str); if (*str || !is_power_of_2(size) || !(tr_pages & size) || size <= PAGE_SIZE || size > (1UL << PAGE_SHIFT << MAX_ORDER)) { printk(KERN_WARNING "Invalid huge page size specified\n"); return 1; } hpage_shift = __ffs(size); /* * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT * override here with new page shift. */ ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2); return 0; } early_param("hugepagesz", hugetlb_setup_sz);
linux-master
arch/ia64/mm/hugetlbpage.c
// SPDX-License-Identifier: GPL-2.0 /* * Initialize MMU support. * * Copyright (C) 1998-2003 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/dma-map-ops.h> #include <linux/dmar.h> #include <linux/efi.h> #include <linux/elf.h> #include <linux/memblock.h> #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/mmzone.h> #include <linux/module.h> #include <linux/personality.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/swap.h> #include <linux/proc_fs.h> #include <linux/bitops.h> #include <linux/kexec.h> #include <linux/swiotlb.h> #include <asm/dma.h> #include <asm/efi.h> #include <asm/io.h> #include <asm/numa.h> #include <asm/patch.h> #include <asm/pgalloc.h> #include <asm/sal.h> #include <asm/sections.h> #include <asm/tlb.h> #include <linux/uaccess.h> #include <asm/unistd.h> #include <asm/mca.h> extern void ia64_tlb_init (void); unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; struct page *zero_page_memmap_ptr; /* map entry for zero page */ EXPORT_SYMBOL(zero_page_memmap_ptr); void __ia64_sync_icache_dcache (pte_t pte) { unsigned long addr; struct folio *folio; folio = page_folio(pte_page(pte)); addr = (unsigned long)folio_address(folio); if (test_bit(PG_arch_1, &folio->flags)) return; /* i-cache is already coherent with d-cache */ flush_icache_range(addr, addr + folio_size(folio)); set_bit(PG_arch_1, &folio->flags); /* mark page as clean */ } /* * Since DMA is i-cache coherent, any (complete) folios that were written via * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to * flush them when they get mapped into an executable vm-area. */ void arch_dma_mark_clean(phys_addr_t paddr, size_t size) { unsigned long pfn = PHYS_PFN(paddr); struct folio *folio = page_folio(pfn_to_page(pfn)); ssize_t left = size; size_t offset = offset_in_folio(folio, paddr); if (offset) { left -= folio_size(folio) - offset; if (left <= 0) return; folio = folio_next(folio); } while (left >= (ssize_t)folio_size(folio)) { left -= folio_size(folio); set_bit(PG_arch_1, &pfn_to_page(pfn)->flags); if (!left) break; folio = folio_next(folio); } } inline void ia64_set_rbs_bot (void) { unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16; if (stack_size > MAX_USER_STACK_SIZE) stack_size = MAX_USER_STACK_SIZE; current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size); } /* * This performs some platform-dependent address space initialization. * On IA-64, we want to setup the VM area for the register backing * store (which grows upwards) and install the gateway page which is * used for signal trampolines, etc. */ void ia64_init_addr_space (void) { struct vm_area_struct *vma; ia64_set_rbs_bot(); /* * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore * the problem. When the process attempts to write to the register backing store * for the first time, it will get a SEGFAULT in this case. */ vma = vm_area_alloc(current->mm); if (vma) { vma_set_anonymous(vma); vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; vm_flags_init(vma, VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); mmap_write_lock(current->mm); if (insert_vm_struct(current->mm, vma)) { mmap_write_unlock(current->mm); vm_area_free(vma); return; } mmap_write_unlock(current->mm); } /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ if (!(current->personality & MMAP_PAGE_ZERO)) { vma = vm_area_alloc(current->mm); if (vma) { vma_set_anonymous(vma); vma->vm_end = PAGE_SIZE; vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); vm_flags_init(vma, VM_READ | VM_MAYREAD | VM_IO | VM_DONTEXPAND | VM_DONTDUMP); mmap_write_lock(current->mm); if (insert_vm_struct(current->mm, vma)) { mmap_write_unlock(current->mm); vm_area_free(vma); return; } mmap_write_unlock(current->mm); } } } void free_initmem (void) { free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end), -1, "unused kernel"); } void __init free_initrd_mem (unsigned long start, unsigned long end) { /* * EFI uses 4KB pages while the kernel can use 4KB or bigger. * Thus EFI and the kernel may have different page sizes. It is * therefore possible to have the initrd share the same page as * the end of the kernel (given current setup). * * To avoid freeing/using the wrong page (kernel sized) we: * - align up the beginning of initrd * - align down the end of initrd * * | | * |=============| a000 * | | * | | * | | 9000 * |/////////////| * |/////////////| * |=============| 8000 * |///INITRD////| * |/////////////| * |/////////////| 7000 * | | * |KKKKKKKKKKKKK| * |=============| 6000 * |KKKKKKKKKKKKK| * |KKKKKKKKKKKKK| * K=kernel using 8KB pages * * In this example, we must free page 8000 ONLY. So we must align up * initrd_start and keep initrd_end as is. */ start = PAGE_ALIGN(start); end = end & PAGE_MASK; if (start < end) printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10); for (; start < end; start += PAGE_SIZE) { if (!virt_addr_valid(start)) continue; free_reserved_page(virt_to_page(start)); } } /* * This installs a clean page in the kernel's page table. */ static struct page * __init put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ { p4d = p4d_alloc(&init_mm, pgd, address); if (!p4d) goto out; pud = pud_alloc(&init_mm, p4d, address); if (!pud) goto out; pmd = pmd_alloc(&init_mm, pud, address); if (!pmd) goto out; pte = pte_alloc_kernel(pmd, address); if (!pte) goto out; if (!pte_none(*pte)) goto out; set_pte(pte, mk_pte(page, pgprot)); } out: /* no need for flush_tlb */ return page; } static void __init setup_gate (void) { struct page *page; /* * Map the gate page twice: once read-only to export the ELF * headers etc. and once execute-only page to enable * privilege-promotion via "epc": */ page = virt_to_page(ia64_imva(__start_gate_section)); put_kernel_page(page, GATE_ADDR, PAGE_READONLY); #ifdef HAVE_BUGGY_SEGREL page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); #else put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); /* Fill in the holes (if any) with read-only zero pages: */ { unsigned long addr; for (addr = GATE_ADDR + PAGE_SIZE; addr < GATE_ADDR + PERCPU_PAGE_SIZE; addr += PAGE_SIZE) { put_kernel_page(ZERO_PAGE(0), addr, PAGE_READONLY); put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE, PAGE_READONLY); } } #endif ia64_patch_gate(); } static struct vm_area_struct gate_vma; static int __init gate_vma_init(void) { vma_init(&gate_vma, NULL); gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; vm_flags_init(&gate_vma, VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC); gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX); return 0; } __initcall(gate_vma_init); struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { return &gate_vma; } int in_gate_area_no_mm(unsigned long addr) { if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) return 1; return 0; } int in_gate_area(struct mm_struct *mm, unsigned long addr) { return in_gate_area_no_mm(addr); } void ia64_mmu_init(void *my_cpu_data) { unsigned long pta, impl_va_bits; extern void tlb_init(void); #ifdef CONFIG_DISABLE_VHPT # define VHPT_ENABLE_BIT 0 #else # define VHPT_ENABLE_BIT 1 #endif /* * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped * address space. The IA-64 architecture guarantees that at least 50 bits of * virtual address space are implemented but if we pick a large enough page size * (e.g., 64KB), the mapped address space is big enough that it will overlap with * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages, * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a * problem in practice. Alternatively, we could truncate the top of the mapped * address space to not permit mappings that would overlap with the VMLPT. * --davidm 00/12/06 */ # define pte_bits 3 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT) /* * The virtual page table has to cover the entire implemented address space within * a region even though not all of this space may be mappable. The reason for * this is that the Access bit and Dirty bit fault handlers perform * non-speculative accesses to the virtual page table, so the address range of the * virtual page table itself needs to be covered by virtual page table. */ # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits) # define POW2(n) (1ULL << (n)) impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61))); if (impl_va_bits < 51 || impl_va_bits > 61) panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1); /* * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need, * which must fit into "vmlpt_bits - pte_bits" slots. Second half of * the test makes sure that our mapped space doesn't overlap the * unimplemented hole in the middle of the region. */ if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) || (mapped_space_bits > impl_va_bits - 1)) panic("Cannot build a big enough virtual-linear page table" " to cover mapped address space.\n" " Try using a smaller page size.\n"); /* place the VMLPT at the end of each page-table mapped region: */ pta = POW2(61) - POW2(vmlpt_bits); /* * Set the (virtually mapped linear) page table address. Bit * 8 selects between the short and long format, bits 2-7 the * size of the table, and bit 0 whether the VHPT walker is * enabled. */ ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT); ia64_tlb_init(); #ifdef CONFIG_HUGETLB_PAGE ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2); ia64_srlz_d(); #endif } int __init register_active_ranges(u64 start, u64 len, int nid) { u64 end = start + len; #ifdef CONFIG_KEXEC if (start > crashk_res.start && start < crashk_res.end) start = crashk_res.end; if (end > crashk_res.start && end < crashk_res.end) end = crashk_res.start; #endif if (start < end) memblock_add_node(__pa(start), end - start, nid, MEMBLOCK_NONE); return 0; } int find_max_min_low_pfn (u64 start, u64 end, void *arg) { unsigned long pfn_start, pfn_end; #ifdef CONFIG_FLATMEM pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT; #else pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT; pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT; #endif min_low_pfn = min(min_low_pfn, pfn_start); max_low_pfn = max(max_low_pfn, pfn_end); return 0; } /* * Boot command-line option "nolwsys" can be used to disable the use of any light-weight * system call handler. When this option is in effect, all fsyscalls will end up bubbling * down into the kernel and calling the normal (heavy-weight) syscall handler. This is * useful for performance testing, but conceivably could also come in handy for debugging * purposes. */ static int nolwsys __initdata; static int __init nolwsys_setup (char *s) { nolwsys = 1; return 1; } __setup("nolwsys", nolwsys_setup); void __init mem_init (void) { int i; BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); /* * This needs to be called _after_ the command line has been parsed but * _before_ any drivers that may need the PCI DMA interface are * initialized or bootmem has been freed. */ do { #ifdef CONFIG_INTEL_IOMMU detect_intel_iommu(); if (iommu_detected) break; #endif swiotlb_init(true, SWIOTLB_VERBOSE); } while (0); #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); #endif set_max_mapnr(max_low_pfn); high_memory = __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); /* * For fsyscall entrypoints with no light-weight handler, use the ordinary * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry * code can tell them apart. */ for (i = 0; i < NR_syscalls; ++i) { extern unsigned long fsyscall_table[NR_syscalls]; extern unsigned long sys_call_table[NR_syscalls]; if (!fsyscall_table[i] || nolwsys) fsyscall_table[i] = sys_call_table[i] | 1; } setup_gate(); } #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) return -EINVAL; ret = __add_pages(nid, start_pfn, nr_pages, params); if (ret) printk("%s: Problem encountered in __add_pages() as ret=%d\n", __func__, ret); return ret; } void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; __remove_pages(start_pfn, nr_pages, altmap); } #endif static const pgprot_t protection_map[16] = { [VM_NONE] = PAGE_NONE, [VM_READ] = PAGE_READONLY, [VM_WRITE] = PAGE_READONLY, [VM_WRITE | VM_READ] = PAGE_READONLY, [VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX), [VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX), [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, [VM_SHARED] = PAGE_NONE, [VM_SHARED | VM_READ] = PAGE_READONLY, [VM_SHARED | VM_WRITE] = PAGE_SHARED, [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, [VM_SHARED | VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX), [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX), [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX), [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) }; DECLARE_VM_GET_PAGE_PROT
linux-master
arch/ia64/mm/init.c
// SPDX-License-Identifier: GPL-2.0 /* * Kernel exception handling table support. Derived from arch/alpha/mm/extable.c. * * Copyright (C) 1998, 1999, 2001-2002, 2004 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> */ #include <asm/ptrace.h> #include <asm/extable.h> #include <asm/errno.h> #include <asm/processor.h> void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e) { long fix = (u64) &e->fixup + e->fixup; regs->r8 = -EFAULT; if (fix & 4) regs->r9 = 0; regs->cr_iip = fix & ~0xf; ia64_psr(regs)->ri = fix & 0x3; /* set continuation slot number */ }
linux-master
arch/ia64/mm/extable.c
// SPDX-License-Identifier: GPL-2.0-only /* * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <[email protected]> */ #include <linux/compiler.h> #include <linux/module.h> #include <linux/efi.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <asm/io.h> #include <asm/meminit.h> static inline void __iomem * __ioremap_uc(unsigned long phys_addr) { return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); } void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size) { u64 attr; attr = kern_mem_attribute(phys_addr, size); if (attr & EFI_MEMORY_WB) return (void __iomem *) phys_to_virt(phys_addr); return __ioremap_uc(phys_addr); } void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, unsigned long flags) { u64 attr; unsigned long gran_base, gran_size; unsigned long page_base; /* * For things in kern_memmap, we must use the same attribute * as the rest of the kernel. For more details, see * Documentation/arch/ia64/aliasing.rst. */ attr = kern_mem_attribute(phys_addr, size); if (attr & EFI_MEMORY_WB) return (void __iomem *) phys_to_virt(phys_addr); else if (attr & EFI_MEMORY_UC) return __ioremap_uc(phys_addr); /* * Some chipsets don't support UC access to memory. If * WB is supported for the whole granule, we prefer that. */ gran_base = GRANULEROUNDDOWN(phys_addr); gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base; if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) return (void __iomem *) phys_to_virt(phys_addr); /* * WB is not supported for the whole granule, so we can't use * the region 7 identity mapping. If we can safely cover the * area with kernel page table mappings, we can use those * instead. */ page_base = phys_addr & PAGE_MASK; size = PAGE_ALIGN(phys_addr + size) - page_base; if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) return generic_ioremap_prot(phys_addr, size, __pgprot(flags)); return __ioremap_uc(phys_addr); } EXPORT_SYMBOL(ioremap_prot); void __iomem * ioremap_uc(unsigned long phys_addr, unsigned long size) { if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) return NULL; return __ioremap_uc(phys_addr); } EXPORT_SYMBOL(ioremap_uc); void early_iounmap (volatile void __iomem *addr, unsigned long size) { } void iounmap(volatile void __iomem *addr) { if (REGION_NUMBER(addr) == RGN_GATE) vunmap((void *) ((unsigned long) addr & PAGE_MASK)); } EXPORT_SYMBOL(iounmap);
linux-master
arch/ia64/mm/ioremap.c
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998-2003 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> * Stephane Eranian <[email protected]> * Copyright (C) 2000, Rohit Seth <[email protected]> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <[email protected]> * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved. * * Routines used by ia64 machines with contiguous (or virtually contiguous) * memory. */ #include <linux/efi.h> #include <linux/memblock.h> #include <linux/mm.h> #include <linux/nmi.h> #include <linux/swap.h> #include <linux/sizes.h> #include <asm/efi.h> #include <asm/meminit.h> #include <asm/sections.h> #include <asm/mca.h> /* physical address where the bootmem map is located */ unsigned long bootmap_start; #ifdef CONFIG_SMP static void *cpu_data; /** * per_cpu_init - setup per-cpu variables * * Allocate and setup per-cpu data areas. */ void *per_cpu_init(void) { static bool first_time = true; void *cpu0_data = __cpu0_per_cpu; unsigned int cpu; if (!first_time) goto skip; first_time = false; /* * get_free_pages() cannot be used before cpu_init() done. * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs * to avoid that AP calls get_zeroed_page(). */ for_each_possible_cpu(cpu) { void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start; memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start); __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start; per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; /* * percpu area for cpu0 is moved from the __init area * which is setup by head.S and used till this point. * Update ar.k3. This move is ensures that percpu * area for cpu0 is on the correct node and its * virtual address isn't insanely far from other * percpu areas which is important for congruent * percpu allocator. */ if (cpu == 0) ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) - (unsigned long)__per_cpu_start); cpu_data += PERCPU_PAGE_SIZE; } skip: return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; } static inline __init void alloc_per_cpu_data(void) { size_t size = PERCPU_PAGE_SIZE * num_possible_cpus(); cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!cpu_data) panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", __func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); } /** * setup_per_cpu_areas - setup percpu areas * * Arch code has already allocated and initialized percpu areas. All * this function has to do is to teach the determined layout to the * dynamic percpu allocator, which happens to be more complex than * creating whole new ones using helpers. */ void __init setup_per_cpu_areas(void) { struct pcpu_alloc_info *ai; struct pcpu_group_info *gi; unsigned int cpu; ssize_t static_size, reserved_size, dyn_size; ai = pcpu_alloc_alloc_info(1, num_possible_cpus()); if (!ai) panic("failed to allocate pcpu_alloc_info"); gi = &ai->groups[0]; /* units are assigned consecutively to possible cpus */ for_each_possible_cpu(cpu) gi->cpu_map[gi->nr_units++] = cpu; /* set parameters */ static_size = __per_cpu_end - __per_cpu_start; reserved_size = PERCPU_MODULE_RESERVE; dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; if (dyn_size < 0) panic("percpu area overflow static=%zd reserved=%zd\n", static_size, reserved_size); ai->static_size = static_size; ai->reserved_size = reserved_size; ai->dyn_size = dyn_size; ai->unit_size = PERCPU_PAGE_SIZE; ai->atom_size = PAGE_SIZE; ai->alloc_size = PERCPU_PAGE_SIZE; pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]); pcpu_free_alloc_info(ai); } #else #define alloc_per_cpu_data() do { } while (0) #endif /* CONFIG_SMP */ /** * find_memory - setup memory map * * Walk the EFI memory map and find usable memory for the system, taking * into account reserved areas. */ void __init find_memory (void) { reserve_memory(); /* first find highest page frame number */ min_low_pfn = ~0UL; max_low_pfn = 0; efi_memmap_walk(find_max_min_low_pfn, NULL); max_pfn = max_low_pfn; memblock_add_node(0, PFN_PHYS(max_low_pfn), 0, MEMBLOCK_NONE); find_initrd(); alloc_per_cpu_data(); } static int __init find_largest_hole(u64 start, u64 end, void *arg) { u64 *max_gap = arg; static u64 last_end = PAGE_OFFSET; /* NOTE: this algorithm assumes efi memmap table is ordered */ if (*max_gap < (start - last_end)) *max_gap = start - last_end; last_end = end; return 0; } static void __init verify_gap_absence(void) { unsigned long max_gap; /* Forbid FLATMEM if hole is > than 1G */ efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); if (max_gap >= SZ_1G) panic("Cannot use FLATMEM with %ldMB hole\n" "Please switch over to SPARSEMEM\n", (max_gap >> 20)); } /* * Set up the page tables. */ void __init paging_init (void) { unsigned long max_dma; unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; max_zone_pfns[ZONE_DMA32] = max_dma; max_zone_pfns[ZONE_NORMAL] = max_low_pfn; verify_gap_absence(); free_area_init(max_zone_pfns); zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); }
linux-master
arch/ia64/mm/contig.c
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * This file contains NUMA specific variables and functions which are used on * NUMA machines with contiguous memory. * * 2002/08/07 Erich Focht <[email protected]> */ #include <linux/cpu.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/node.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/module.h> #include <asm/mmzone.h> #include <asm/numa.h> /* * The following structures are usually initialized by ACPI or * similar mechanisms and describe the NUMA characteristics of the machine. */ int num_node_memblks; struct node_memblk_s node_memblk[NR_NODE_MEMBLKS]; struct node_cpuid_s node_cpuid[NR_CPUS] = { [0 ... NR_CPUS-1] = { .phys_id = 0, .nid = NUMA_NO_NODE } }; /* * This is a matrix with "distances" between nodes, they should be * proportional to the memory access latency ratios. */ u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES]; int __node_distance(int from, int to) { return slit_distance(from, to); } EXPORT_SYMBOL(__node_distance); /* Identify which cnode a physical address resides on */ int paddr_to_nid(unsigned long paddr) { int i; for (i = 0; i < num_node_memblks; i++) if (paddr >= node_memblk[i].start_paddr && paddr < node_memblk[i].start_paddr + node_memblk[i].size) break; return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0); } EXPORT_SYMBOL(paddr_to_nid); #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA) void numa_clear_node(int cpu) { unmap_cpu_from_node(cpu, NUMA_NO_NODE); } #ifdef CONFIG_MEMORY_HOTPLUG /* * SRAT information is stored in node_memblk[], then we can use SRAT * information at memory-hot-add if necessary. */ int memory_add_physaddr_to_nid(u64 addr) { int nid = paddr_to_nid(addr); if (nid < 0) return 0; return nid; } EXPORT_SYMBOL(memory_add_physaddr_to_nid); #endif #endif
linux-master
arch/ia64/mm/numa.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Tony Luck <[email protected]> * Copyright (c) 2002 NEC Corp. * Copyright (c) 2002 Kimio Suganuma <[email protected]> * Copyright (c) 2004 Silicon Graphics, Inc * Russ Anderson <[email protected]> * Jesse Barnes <[email protected]> * Jack Steiner <[email protected]> */ /* * Platform initialization for Discontig Memory */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/nmi.h> #include <linux/swap.h> #include <linux/memblock.h> #include <linux/acpi.h> #include <linux/efi.h> #include <linux/nodemask.h> #include <linux/slab.h> #include <asm/efi.h> #include <asm/tlb.h> #include <asm/meminit.h> #include <asm/numa.h> #include <asm/sections.h> /* * Track per-node information needed to setup the boot memory allocator, the * per-node areas, and the real VM. */ struct early_node_data { struct ia64_node_data *node_data; unsigned long pernode_addr; unsigned long pernode_size; unsigned long min_pfn; unsigned long max_pfn; }; static struct early_node_data mem_data[MAX_NUMNODES] __initdata; static nodemask_t memory_less_mask __initdata; pg_data_t *pgdat_list[MAX_NUMNODES]; /* * To prevent cache aliasing effects, align per-node structures so that they * start at addresses that are strided by node number. */ #define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024) #define NODEDATA_ALIGN(addr, node) \ ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \ (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1))) /** * build_node_maps - callback to setup mem_data structs for each node * @start: physical start of range * @len: length of range * @node: node where this range resides * * Detect extents of each piece of memory that we wish to * treat as a virtually contiguous block (i.e. each node). Each such block * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down * if necessary. Any non-existent pages will simply be part of the virtual * memmap. */ static int __init build_node_maps(unsigned long start, unsigned long len, int node) { unsigned long spfn, epfn, end = start + len; epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT; spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT; if (!mem_data[node].min_pfn) { mem_data[node].min_pfn = spfn; mem_data[node].max_pfn = epfn; } else { mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn); mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn); } return 0; } /** * early_nr_cpus_node - return number of cpus on a given node * @node: node to check * * Count the number of cpus on @node. We can't use nr_cpus_node() yet because * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been * called yet. Note that node 0 will also count all non-existent cpus. */ static int early_nr_cpus_node(int node) { int cpu, n = 0; for_each_possible_early_cpu(cpu) if (node == node_cpuid[cpu].nid) n++; return n; } /** * compute_pernodesize - compute size of pernode data * @node: the node id. */ static unsigned long compute_pernodesize(int node) { unsigned long pernodesize = 0, cpus; cpus = early_nr_cpus_node(node); pernodesize += PERCPU_PAGE_SIZE * cpus; pernodesize += node * L1_CACHE_BYTES; pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); pernodesize = PAGE_ALIGN(pernodesize); return pernodesize; } /** * per_cpu_node_setup - setup per-cpu areas on each node * @cpu_data: per-cpu area on this node * @node: node to setup * * Copy the static per-cpu data into the region we just set aside and then * setup __per_cpu_offset for each CPU on this node. Return a pointer to * the end of the area. */ static void *per_cpu_node_setup(void *cpu_data, int node) { #ifdef CONFIG_SMP int cpu; for_each_possible_early_cpu(cpu) { void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start; if (node != node_cpuid[cpu].nid) continue; memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start); __per_cpu_offset[cpu] = (char *)__va(cpu_data) - __per_cpu_start; /* * percpu area for cpu0 is moved from the __init area * which is setup by head.S and used till this point. * Update ar.k3. This move is ensures that percpu * area for cpu0 is on the correct node and its * virtual address isn't insanely far from other * percpu areas which is important for congruent * percpu allocator. */ if (cpu == 0) ia64_set_kr(IA64_KR_PER_CPU_DATA, (unsigned long)cpu_data - (unsigned long)__per_cpu_start); cpu_data += PERCPU_PAGE_SIZE; } #endif return cpu_data; } #ifdef CONFIG_SMP /** * setup_per_cpu_areas - setup percpu areas * * Arch code has already allocated and initialized percpu areas. All * this function has to do is to teach the determined layout to the * dynamic percpu allocator, which happens to be more complex than * creating whole new ones using helpers. */ void __init setup_per_cpu_areas(void) { struct pcpu_alloc_info *ai; struct pcpu_group_info *gi; unsigned int *cpu_map; void *base; unsigned long base_offset; unsigned int cpu; ssize_t static_size, reserved_size, dyn_size; int node, prev_node, unit, nr_units; ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids); if (!ai) panic("failed to allocate pcpu_alloc_info"); cpu_map = ai->groups[0].cpu_map; /* determine base */ base = (void *)ULONG_MAX; for_each_possible_cpu(cpu) base = min(base, (void *)(__per_cpu_offset[cpu] + __per_cpu_start)); base_offset = (void *)__per_cpu_start - base; /* build cpu_map, units are grouped by node */ unit = 0; for_each_node(node) for_each_possible_cpu(cpu) if (node == node_cpuid[cpu].nid) cpu_map[unit++] = cpu; nr_units = unit; /* set basic parameters */ static_size = __per_cpu_end - __per_cpu_start; reserved_size = PERCPU_MODULE_RESERVE; dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size; if (dyn_size < 0) panic("percpu area overflow static=%zd reserved=%zd\n", static_size, reserved_size); ai->static_size = static_size; ai->reserved_size = reserved_size; ai->dyn_size = dyn_size; ai->unit_size = PERCPU_PAGE_SIZE; ai->atom_size = PAGE_SIZE; ai->alloc_size = PERCPU_PAGE_SIZE; /* * CPUs are put into groups according to node. Walk cpu_map * and create new groups at node boundaries. */ prev_node = NUMA_NO_NODE; ai->nr_groups = 0; for (unit = 0; unit < nr_units; unit++) { cpu = cpu_map[unit]; node = node_cpuid[cpu].nid; if (node == prev_node) { gi->nr_units++; continue; } prev_node = node; gi = &ai->groups[ai->nr_groups++]; gi->nr_units = 1; gi->base_offset = __per_cpu_offset[cpu] + base_offset; gi->cpu_map = &cpu_map[unit]; } pcpu_setup_first_chunk(ai, base); pcpu_free_alloc_info(ai); } #endif /** * fill_pernode - initialize pernode data. * @node: the node id. * @pernode: physical address of pernode data * @pernodesize: size of the pernode data */ static void __init fill_pernode(int node, unsigned long pernode, unsigned long pernodesize) { void *cpu_data; int cpus = early_nr_cpus_node(node); mem_data[node].pernode_addr = pernode; mem_data[node].pernode_size = pernodesize; memset(__va(pernode), 0, pernodesize); cpu_data = (void *)pernode; pernode += PERCPU_PAGE_SIZE * cpus; pernode += node * L1_CACHE_BYTES; pgdat_list[node] = __va(pernode); pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); mem_data[node].node_data = __va(pernode); pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); cpu_data = per_cpu_node_setup(cpu_data, node); return; } /** * find_pernode_space - allocate memory for memory map and per-node structures * @start: physical start of range * @len: length of range * @node: node where this range resides * * This routine reserves space for the per-cpu data struct, the list of * pg_data_ts and the per-node data struct. Each node will have something like * the following in the first chunk of addr. space large enough to hold it. * * ________________________ * | | * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first * | PERCPU_PAGE_SIZE * | start and length big enough * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus. * |------------------------| * | local pg_data_t * | * |------------------------| * | local ia64_node_data | * |------------------------| * | ??? | * |________________________| * * Once this space has been set aside, the bootmem maps are initialized. We * could probably move the allocation of the per-cpu and ia64_node_data space * outside of this function and use alloc_bootmem_node(), but doing it here * is straightforward and we get the alignments we want so... */ static int __init find_pernode_space(unsigned long start, unsigned long len, int node) { unsigned long spfn, epfn; unsigned long pernodesize = 0, pernode; spfn = start >> PAGE_SHIFT; epfn = (start + len) >> PAGE_SHIFT; /* * Make sure this memory falls within this node's usable memory * since we may have thrown some away in build_maps(). */ if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn) return 0; /* Don't setup this node's local space twice... */ if (mem_data[node].pernode_addr) return 0; /* * Calculate total size needed, incl. what's necessary * for good alignment and alias prevention. */ pernodesize = compute_pernodesize(node); pernode = NODEDATA_ALIGN(start, node); /* Is this range big enough for what we want to store here? */ if (start + len > (pernode + pernodesize)) fill_pernode(node, pernode, pernodesize); return 0; } /** * reserve_pernode_space - reserve memory for per-node space * * Reserve the space used by the bootmem maps & per-node space in the boot * allocator so that when we actually create the real mem maps we don't * use their memory. */ static void __init reserve_pernode_space(void) { unsigned long base, size; int node; for_each_online_node(node) { if (node_isset(node, memory_less_mask)) continue; /* Now the per-node space */ size = mem_data[node].pernode_size; base = __pa(mem_data[node].pernode_addr); memblock_reserve(base, size); } } static void scatter_node_data(void) { pg_data_t **dst; int node; /* * for_each_online_node() can't be used at here. * node_online_map is not set for hot-added nodes at this time, * because we are halfway through initialization of the new node's * structures. If for_each_online_node() is used, a new node's * pg_data_ptrs will be not initialized. Instead of using it, * pgdat_list[] is checked. */ for_each_node(node) { if (pgdat_list[node]) { dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; memcpy(dst, pgdat_list, sizeof(pgdat_list)); } } } /** * initialize_pernode_data - fixup per-cpu & per-node pointers * * Each node's per-node area has a copy of the global pg_data_t list, so * we copy that to each node here, as well as setting the per-cpu pointer * to the local node data structure. */ static void __init initialize_pernode_data(void) { int cpu, node; scatter_node_data(); #ifdef CONFIG_SMP /* Set the node_data pointer for each per-cpu struct */ for_each_possible_early_cpu(cpu) { node = node_cpuid[cpu].nid; per_cpu(ia64_cpu_info, cpu).node_data = mem_data[node].node_data; } #else { struct cpuinfo_ia64 *cpu0_cpu_info; cpu = 0; node = node_cpuid[cpu].nid; cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + ((char *)&ia64_cpu_info - __per_cpu_start)); cpu0_cpu_info->node_data = mem_data[node].node_data; } #endif /* CONFIG_SMP */ } /** * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit * node but fall back to any other node when __alloc_bootmem_node fails * for best. * @nid: node id * @pernodesize: size of this node's pernode data */ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) { void *ptr = NULL; u8 best = 0xff; int bestnode = NUMA_NO_NODE, node, anynode = 0; for_each_online_node(node) { if (node_isset(node, memory_less_mask)) continue; else if (node_distance(nid, node) < best) { best = node_distance(nid, node); bestnode = node; } anynode = node; } if (bestnode == NUMA_NO_NODE) bestnode = anynode; ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, bestnode); if (!ptr) panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n", __func__, pernodesize, PERCPU_PAGE_SIZE, bestnode, __pa(MAX_DMA_ADDRESS)); return ptr; } /** * memory_less_nodes - allocate and initialize CPU only nodes pernode * information. */ static void __init memory_less_nodes(void) { unsigned long pernodesize; void *pernode; int node; for_each_node_mask(node, memory_less_mask) { pernodesize = compute_pernodesize(node); pernode = memory_less_node_alloc(node, pernodesize); fill_pernode(node, __pa(pernode), pernodesize); } return; } /** * find_memory - walk the EFI memory map and setup the bootmem allocator * * Called early in boot to setup the bootmem allocator, and to * allocate the per-cpu and per-node structures. */ void __init find_memory(void) { int node; reserve_memory(); efi_memmap_walk(filter_memory, register_active_ranges); if (num_online_nodes() == 0) { printk(KERN_ERR "node info missing!\n"); node_set_online(0); } nodes_or(memory_less_mask, memory_less_mask, node_online_map); min_low_pfn = -1; max_low_pfn = 0; /* These actually end up getting called by call_pernode_memory() */ efi_memmap_walk(filter_rsvd_memory, build_node_maps); efi_memmap_walk(filter_rsvd_memory, find_pernode_space); efi_memmap_walk(find_max_min_low_pfn, NULL); for_each_online_node(node) if (mem_data[node].min_pfn) node_clear(node, memory_less_mask); reserve_pernode_space(); memory_less_nodes(); initialize_pernode_data(); max_pfn = max_low_pfn; find_initrd(); } #ifdef CONFIG_SMP /** * per_cpu_init - setup per-cpu variables * * find_pernode_space() does most of this already, we just need to set * local_per_cpu_offset */ void *per_cpu_init(void) { int cpu; static int first_time = 1; if (first_time) { first_time = 0; for_each_possible_early_cpu(cpu) per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; } return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; } #endif /* CONFIG_SMP */ /** * call_pernode_memory - use SRAT to call callback functions with node info * @start: physical start of range * @len: length of range * @arg: function to call for each range * * efi_memmap_walk() knows nothing about layout of memory across nodes. Find * out to which node a block of memory belongs. Ignore memory that we cannot * identify, and split blocks that run across multiple nodes. * * Take this opportunity to round the start address up and the end address * down to page boundaries. */ void call_pernode_memory(unsigned long start, unsigned long len, void *arg) { unsigned long rs, re, end = start + len; void (*func)(unsigned long, unsigned long, int); int i; start = PAGE_ALIGN(start); end &= PAGE_MASK; if (start >= end) return; func = arg; if (!num_node_memblks) { /* No SRAT table, so assume one node (node 0) */ if (start < end) (*func)(start, end - start, 0); return; } for (i = 0; i < num_node_memblks; i++) { rs = max(start, node_memblk[i].start_paddr); re = min(end, node_memblk[i].start_paddr + node_memblk[i].size); if (rs < re) (*func)(rs, re - rs, node_memblk[i].nid); if (re == end) break; } } /** * paging_init - setup page tables * * paging_init() sets up the page tables for each node of the system and frees * the bootmem allocator memory for general use. */ void __init paging_init(void) { unsigned long max_dma; unsigned long max_zone_pfns[MAX_NR_ZONES]; max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; sparse_init(); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_DMA32] = max_dma; max_zone_pfns[ZONE_NORMAL] = max_low_pfn; free_area_init(max_zone_pfns); zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); } pg_data_t * __init arch_alloc_nodedata(int nid) { unsigned long size = compute_pernodesize(nid); return memblock_alloc(size, SMP_CACHE_BYTES); } void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) { pgdat_list[update_node] = update_pgdat; scatter_node_data(); } #ifdef CONFIG_SPARSEMEM_VMEMMAP int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { return vmemmap_populate_basepages(start, end, node, NULL); } void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { } #endif
linux-master
arch/ia64/mm/discontig.c
// SPDX-License-Identifier: GPL-2.0 /* * MMU fault handling support. * * Copyright (C) 1998-2002 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> */ #include <linux/sched/signal.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/extable.h> #include <linux/interrupt.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/prefetch.h> #include <linux/uaccess.h> #include <linux/perf_event.h> #include <asm/processor.h> #include <asm/exception.h> extern int die(char *, struct pt_regs *, long); /* * Return TRUE if ADDRESS points at a page in the kernel's mapped segment * (inside region 5, on ia64) and that page is present. */ static int mapped_kernel_page_is_present (unsigned long address) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *ptep, pte; pgd = pgd_offset_k(address); if (pgd_none(*pgd) || pgd_bad(*pgd)) return 0; p4d = p4d_offset(pgd, address); if (p4d_none(*p4d) || p4d_bad(*p4d)) return 0; pud = pud_offset(p4d, address); if (pud_none(*pud) || pud_bad(*pud)) return 0; pmd = pmd_offset(pud, address); if (pmd_none(*pmd) || pmd_bad(*pmd)) return 0; ptep = pte_offset_kernel(pmd, address); if (!ptep) return 0; pte = *ptep; return pte_present(pte); } # define VM_READ_BIT 0 # define VM_WRITE_BIT 1 # define VM_EXEC_BIT 2 void __kprobes ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) { int signal = SIGSEGV, code = SEGV_MAPERR; struct vm_area_struct *vma, *prev_vma; struct mm_struct *mm = current->mm; unsigned long mask; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); /* mmap_lock is performance critical.... */ prefetchw(&mm->mmap_lock); /* * If we're in an interrupt or have no user context, we must not take the fault.. */ if (faulthandler_disabled() || !mm) goto no_context; /* * This is to handle the kprobes on user space access instructions */ if (kprobe_page_fault(regs, TRAP_BRKPT)) return; if (user_mode(regs)) flags |= FAULT_FLAG_USER; if (mask & VM_WRITE) flags |= FAULT_FLAG_WRITE; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: mmap_read_lock(mm); vma = find_vma_prev(mm, address, &prev_vma); if (!vma && !prev_vma ) goto bad_area; /* * find_vma_prev() returns vma such that address < vma->vm_end or NULL * * May find no vma, but could be that the last vm area is the * register backing store that needs to expand upwards, in * this case vma will be null, but prev_vma will ne non-null */ if (( !vma && prev_vma ) || (address < vma->vm_start) ) { vma = expand_stack(mm, address); if (!vma) goto bad_area_nosemaphore; } code = SEGV_ACCERR; /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ || (1 << VM_EXEC_BIT) != VM_EXEC) # error File is out of sync with <linux/mm.h>. Please update. # endif if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) goto bad_area; if ((vma->vm_flags & mask) != mask) goto bad_area; /* * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the * fault. */ fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) goto no_context; return; } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) return; if (unlikely(fault & VM_FAULT_ERROR)) { /* * We ran out of memory, or some other thing happened * to us that made us unable to handle the page fault * gracefully. */ if (fault & VM_FAULT_OOM) { goto out_of_memory; } else if (fault & VM_FAULT_SIGSEGV) { goto bad_area; } else if (fault & VM_FAULT_SIGBUS) { signal = SIGBUS; goto bad_area; } BUG(); } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ goto retry; } mmap_read_unlock(mm); return; bad_area: mmap_read_unlock(mm); bad_area_nosemaphore: if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { /* * This fault was due to a speculative load or lfetch.fault, set the "ed" * bit in the psr to ensure forward progress. (Target register will get a * NaT for ld.s, lfetch will be canceled.) */ ia64_psr(regs)->ed = 1; return; } if (user_mode(regs)) { force_sig_fault(signal, code, (void __user *) address, 0, __ISR_VALID, isr); return; } no_context: if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { /* * This fault was due to a speculative load or lfetch.fault, set the "ed" * bit in the psr to ensure forward progress. (Target register will get a * NaT for ld.s, lfetch will be canceled.) */ ia64_psr(regs)->ed = 1; return; } /* * Since we have no vma's for region 5, we might get here even if the address is * valid, due to the VHPT walker inserting a non present translation that becomes * stale. If that happens, the non present fault handler already purged the stale * translation, which fixed the problem. So, we check to see if the translation is * valid, and return if it is. */ if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) return; if (ia64_done_with_exception(regs)) return; /* * Oops. The kernel tried to access some bad page. We'll have to terminate things * with extreme prejudice. */ bust_spinlocks(1); if (address < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address); else printk(KERN_ALERT "Unable to handle kernel paging request at " "virtual address %016lx\n", address); if (die("Oops", regs, isr)) regs = NULL; bust_spinlocks(0); if (regs) make_task_dead(SIGKILL); return; out_of_memory: mmap_read_unlock(mm); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); }
linux-master
arch/ia64/mm/fault.c
// SPDX-License-Identifier: GPL-2.0-only /* * TLB support routines. * * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> * * 08/02/00 A. Mallick <[email protected]> * Modified RID allocation for SMP * Goutham Rao <[email protected]> * IPI based ptc implementation and A-step IPI implementation. * Rohit Seth <[email protected]> * Ken Chen <[email protected]> * Christophe de Dinechin <[email protected]>: Avoid ptc.e on memory allocation * Copyright (C) 2007 Intel Corp * Fenghua Yu <[email protected]> * Add multiple ptc.g/ptc.ga instruction support in global tlb purge. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> #include <linux/memblock.h> #include <linux/slab.h> #include <asm/delay.h> #include <asm/mmu_context.h> #include <asm/pal.h> #include <asm/tlbflush.h> #include <asm/dma.h> #include <asm/processor.h> #include <asm/sal.h> #include <asm/tlb.h> static struct { u64 mask; /* mask of supported purge page-sizes */ unsigned long max_bits; /* log2 of largest supported purge page-size */ } purge; struct ia64_ctx ia64_ctx = { .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock), .next = 1, .max_ctx = ~0U }; DEFINE_PER_CPU(u8, ia64_need_tlb_flush); DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; /* * Initializes the ia64_ctx.bitmap array based on max_ctx+1. * Called after cpu_init() has setup ia64_ctx.max_ctx based on * maximum RID that is supported by boot CPU. */ void __init mmu_context_init (void) { ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, SMP_CACHE_BYTES); if (!ia64_ctx.bitmap) panic("%s: Failed to allocate %u bytes\n", __func__, (ia64_ctx.max_ctx + 1) >> 3); ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, SMP_CACHE_BYTES); if (!ia64_ctx.flushmap) panic("%s: Failed to allocate %u bytes\n", __func__, (ia64_ctx.max_ctx + 1) >> 3); } /* * Acquire the ia64_ctx.lock before calling this function! */ void wrap_mmu_context (struct mm_struct *mm) { int i, cpu; unsigned long flush_bit; for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) { flush_bit = xchg(&ia64_ctx.flushmap[i], 0); ia64_ctx.bitmap[i] ^= flush_bit; } /* use offset at 300 to skip daemons */ ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, ia64_ctx.max_ctx, 300); ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, ia64_ctx.max_ctx, ia64_ctx.next); /* * can't call flush_tlb_all() here because of race condition * with O(1) scheduler [EF] */ cpu = get_cpu(); /* prevent preemption/migration */ for_each_online_cpu(i) if (i != cpu) per_cpu(ia64_need_tlb_flush, i) = 1; put_cpu(); local_flush_tlb_all(); } /* * Implement "spinaphores" ... like counting semaphores, but they * spin instead of sleeping. If there are ever any other users for * this primitive it can be moved up to a spinaphore.h header. */ struct spinaphore { unsigned long ticket; unsigned long serve; }; static inline void spinaphore_init(struct spinaphore *ss, int val) { ss->ticket = 0; ss->serve = val; } static inline void down_spin(struct spinaphore *ss) { unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve; if (time_before(t, ss->serve)) return; ia64_invala(); for (;;) { asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); if (time_before(t, serve)) return; cpu_relax(); } } static inline void up_spin(struct spinaphore *ss) { ia64_fetchadd(1, &ss->serve, rel); } static struct spinaphore ptcg_sem; static u16 nptcg = 1; static int need_ptcg_sem = 1; static int toolatetochangeptcgsem = 0; /* * Kernel parameter "nptcg=" overrides max number of concurrent global TLB * purges which is reported from either PAL or SAL PALO. * * We don't have sanity checking for nptcg value. It's the user's responsibility * for valid nptcg value on the platform. Otherwise, kernel may hang in some * cases. */ static int __init set_nptcg(char *str) { int value = 0; get_option(&str, &value); setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER); return 1; } __setup("nptcg=", set_nptcg); /* * Maximum number of simultaneous ptc.g purges in the system can * be defined by PAL_VM_SUMMARY (in which case we should take * the smallest value for any cpu in the system) or by the PAL * override table (in which case we should ignore the value from * PAL_VM_SUMMARY). * * Kernel parameter "nptcg=" overrides maximum number of simultaneous ptc.g * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case, * we should ignore the value from either PAL_VM_SUMMARY or PAL override table. * * Complicating the logic here is the fact that num_possible_cpus() * isn't fully setup until we start bringing cpus online. */ void setup_ptcg_sem(int max_purges, int nptcg_from) { static int kp_override; static int palo_override; static int firstcpu = 1; if (toolatetochangeptcgsem) { if (nptcg_from == NPTCG_FROM_PAL && max_purges == 0) BUG_ON(1 < nptcg); else BUG_ON(max_purges < nptcg); return; } if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) { kp_override = 1; nptcg = max_purges; goto resetsema; } if (kp_override) { need_ptcg_sem = num_possible_cpus() > nptcg; return; } if (nptcg_from == NPTCG_FROM_PALO) { palo_override = 1; /* In PALO max_purges == 0 really means it! */ if (max_purges == 0) panic("Whoa! Platform does not support global TLB purges.\n"); nptcg = max_purges; if (nptcg == PALO_MAX_TLB_PURGES) { need_ptcg_sem = 0; return; } goto resetsema; } if (palo_override) { if (nptcg != PALO_MAX_TLB_PURGES) need_ptcg_sem = (num_possible_cpus() > nptcg); return; } /* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */ if (max_purges == 0) max_purges = 1; if (firstcpu) { nptcg = max_purges; firstcpu = 0; } if (max_purges < nptcg) nptcg = max_purges; if (nptcg == PAL_MAX_PURGES) { need_ptcg_sem = 0; return; } else need_ptcg_sem = (num_possible_cpus() > nptcg); resetsema: spinaphore_init(&ptcg_sem, max_purges); } #ifdef CONFIG_SMP static void ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits) { struct mm_struct *active_mm = current->active_mm; toolatetochangeptcgsem = 1; if (mm != active_mm) { /* Restore region IDs for mm */ if (mm && active_mm) { activate_context(mm); } else { flush_tlb_all(); return; } } if (need_ptcg_sem) down_spin(&ptcg_sem); do { /* * Flush ALAT entries also. */ ia64_ptcga(start, (nbits << 2)); ia64_srlz_i(); start += (1UL << nbits); } while (start < end); if (need_ptcg_sem) up_spin(&ptcg_sem); if (mm != active_mm) { activate_context(active_mm); } } #endif /* CONFIG_SMP */ void local_flush_tlb_all (void) { unsigned long i, j, flags, count0, count1, stride0, stride1, addr; addr = local_cpu_data->ptce_base; count0 = local_cpu_data->ptce_count[0]; count1 = local_cpu_data->ptce_count[1]; stride0 = local_cpu_data->ptce_stride[0]; stride1 = local_cpu_data->ptce_stride[1]; local_irq_save(flags); for (i = 0; i < count0; ++i) { for (j = 0; j < count1; ++j) { ia64_ptce(addr); addr += stride1; } addr += stride0; } local_irq_restore(flags); ia64_srlz_i(); /* srlz.i implies srlz.d */ } static void __flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned long size = end - start; unsigned long nbits; #ifndef CONFIG_SMP if (mm != current->active_mm) { mm->context = 0; return; } #endif nbits = ia64_fls(size + 0xfff); while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits)) ++nbits; if (nbits > purge.max_bits) nbits = purge.max_bits; start &= ~((1UL << nbits) - 1); preempt_disable(); #ifdef CONFIG_SMP if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) { ia64_global_tlb_purge(mm, start, end, nbits); preempt_enable(); return; } #endif do { ia64_ptcl(start, (nbits<<2)); start += (1UL << nbits); } while (start < end); preempt_enable(); ia64_srlz_i(); /* srlz.i implies srlz.d */ } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (unlikely(end - start >= 1024*1024*1024*1024UL || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) { /* * If we flush more than a tera-byte or across regions, we're * probably better off just flushing the entire TLB(s). This * should be very rare and is not worth optimizing for. */ flush_tlb_all(); } else { /* flush the address range from the tlb */ __flush_tlb_range(vma, start, end); /* flush the virt. page-table area mapping the addr range */ __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end)); } } EXPORT_SYMBOL(flush_tlb_range); void ia64_tlb_init(void) { ia64_ptce_info_t ptce_info; u64 tr_pgbits; long status; pal_vm_info_1_u_t vm_info_1; pal_vm_info_2_u_t vm_info_2; int cpu = smp_processor_id(); if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) { printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; " "defaulting to architected purge page-sizes.\n", status); purge.mask = 0x115557000UL; } purge.max_bits = ia64_fls(purge.mask); ia64_get_ptce(&ptce_info); local_cpu_data->ptce_base = ptce_info.base; local_cpu_data->ptce_count[0] = ptce_info.count[0]; local_cpu_data->ptce_count[1] = ptce_info.count[1]; local_cpu_data->ptce_stride[0] = ptce_info.stride[0]; local_cpu_data->ptce_stride[1] = ptce_info.stride[1]; local_flush_tlb_all(); /* nuke left overs from bootstrapping... */ status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2); if (status) { printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); per_cpu(ia64_tr_num, cpu) = 8; return; } per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1; if (per_cpu(ia64_tr_num, cpu) > (vm_info_1.pal_vm_info_1_s.max_dtr_entry+1)) per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1; if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { static int justonce = 1; per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; if (justonce) { justonce = 0; printk(KERN_DEBUG "TR register number exceeds " "IA64_TR_ALLOC_MAX!\n"); } } } /* * is_tr_overlap * * Check overlap with inserted TRs. */ static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size) { u64 tr_log_size; u64 tr_end; u64 va_rr = ia64_get_rr(va); u64 va_rid = RR_TO_RID(va_rr); u64 va_end = va + (1<<log_size) - 1; if (va_rid != RR_TO_RID(p->rr)) return 0; tr_log_size = (p->itir & 0xff) >> 2; tr_end = p->ifa + (1<<tr_log_size) - 1; if (va > tr_end || p->ifa > va_end) return 0; return 1; } /* * ia64_insert_tr in virtual mode. Allocate a TR slot * * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr * * va : virtual address. * pte : pte entries inserted. * log_size: range to be covered. * * Return value: <0 : error No. * * >=0 : slot number allocated for TR. * Must be called with preemption disabled. */ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) { int i, r; unsigned long psr; struct ia64_tr_entry *p; int cpu = smp_processor_id(); if (!ia64_idtrs[cpu]) { ia64_idtrs[cpu] = kmalloc_array(2 * IA64_TR_ALLOC_MAX, sizeof(struct ia64_tr_entry), GFP_KERNEL); if (!ia64_idtrs[cpu]) return -ENOMEM; } r = -EINVAL; /*Check overlap with existing TR entries*/ if (target_mask & 0x1) { p = ia64_idtrs[cpu]; for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); i++, p++) { if (p->pte & 0x1) if (is_tr_overlap(p, va, log_size)) { printk(KERN_DEBUG "Overlapped Entry" "Inserted for TR Register!!\n"); goto out; } } } if (target_mask & 0x2) { p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX; for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); i++, p++) { if (p->pte & 0x1) if (is_tr_overlap(p, va, log_size)) { printk(KERN_DEBUG "Overlapped Entry" "Inserted for TR Register!!\n"); goto out; } } } for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { switch (target_mask & 0x3) { case 1: if (!((ia64_idtrs[cpu] + i)->pte & 0x1)) goto found; continue; case 2: if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) goto found; continue; case 3: if (!((ia64_idtrs[cpu] + i)->pte & 0x1) && !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) goto found; continue; default: r = -EINVAL; goto out; } } found: if (i >= per_cpu(ia64_tr_num, cpu)) return -EBUSY; /*Record tr info for mca handler use!*/ if (i > per_cpu(ia64_tr_used, cpu)) per_cpu(ia64_tr_used, cpu) = i; psr = ia64_clear_ic(); if (target_mask & 0x1) { ia64_itr(0x1, i, va, pte, log_size); ia64_srlz_i(); p = ia64_idtrs[cpu] + i; p->ifa = va; p->pte = pte; p->itir = log_size << 2; p->rr = ia64_get_rr(va); } if (target_mask & 0x2) { ia64_itr(0x2, i, va, pte, log_size); ia64_srlz_i(); p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i; p->ifa = va; p->pte = pte; p->itir = log_size << 2; p->rr = ia64_get_rr(va); } ia64_set_psr(psr); r = i; out: return r; } EXPORT_SYMBOL_GPL(ia64_itr_entry); /* * ia64_purge_tr * * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr. * slot: slot number to be freed. * * Must be called with preemption disabled. */ void ia64_ptr_entry(u64 target_mask, int slot) { int cpu = smp_processor_id(); int i; struct ia64_tr_entry *p; if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu)) return; if (target_mask & 0x1) { p = ia64_idtrs[cpu] + slot; if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { p->pte = 0; ia64_ptr(0x1, p->ifa, p->itir>>2); ia64_srlz_i(); } } if (target_mask & 0x2) { p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot; if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { p->pte = 0; ia64_ptr(0x2, p->ifa, p->itir>>2); ia64_srlz_i(); } } for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { if (((ia64_idtrs[cpu] + i)->pte & 0x1) || ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) break; } per_cpu(ia64_tr_used, cpu) = i; } EXPORT_SYMBOL_GPL(ia64_ptr_entry);
linux-master
arch/ia64/mm/tlb.c
/* * Dynamic function tracing support. * * Copyright (C) 2008 Shaohua Li <[email protected]> * * For licencing details, see COPYING. * * Defines low-level handling of mcount calls when the kernel * is compiled with the -pg flag. When using dynamic ftrace, the * mcount call-sites get patched lazily with NOP till they are * enabled. All code mutation routines here take effect atomically. */ #include <linux/uaccess.h> #include <linux/ftrace.h> #include <asm/cacheflush.h> #include <asm/patch.h> /* In IA64, each function will be added below two bundles with -pg option */ static unsigned char __attribute__((aligned(8))) ftrace_orig_code[MCOUNT_INSN_SIZE] = { 0x02, 0x40, 0x31, 0x10, 0x80, 0x05, /* alloc r40=ar.pfs,12,8,0 */ 0xb0, 0x02, 0x00, 0x00, 0x42, 0x40, /* mov r43=r0;; */ 0x05, 0x00, 0xc4, 0x00, /* mov r42=b0 */ 0x11, 0x48, 0x01, 0x02, 0x00, 0x21, /* mov r41=r1 */ 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */ 0x08, 0x00, 0x00, 0x50 /* br.call.sptk.many b0 = _mcount;; */ }; struct ftrace_orig_insn { u64 dummy1, dummy2, dummy3; u64 dummy4:64-41+13; u64 imm20:20; u64 dummy5:3; u64 sign:1; u64 dummy6:4; }; /* mcount stub will be converted below for nop */ static unsigned char ftrace_nop_code[MCOUNT_INSN_SIZE] = { 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */ 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */ 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */ 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* nop.x 0x0;; */ 0x00, 0x00, 0x04, 0x00 }; static unsigned char *ftrace_nop_replace(void) { return ftrace_nop_code; } /* * mcount stub will be converted below for call * Note: Just the last instruction is changed against nop * */ static unsigned char __attribute__((aligned(8))) ftrace_call_code[MCOUNT_INSN_SIZE] = { 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */ 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */ 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */ 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */ 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, /* brl.many .;;*/ 0xf8, 0xff, 0xff, 0xc8 }; struct ftrace_call_insn { u64 dummy1, dummy2; u64 dummy3:48; u64 imm39_l:16; u64 imm39_h:23; u64 dummy4:13; u64 imm20:20; u64 dummy5:3; u64 i:1; u64 dummy6:4; }; static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) { struct ftrace_call_insn *code = (void *)ftrace_call_code; unsigned long offset = addr - (ip + 0x10); code->imm39_l = offset >> 24; code->imm39_h = offset >> 40; code->imm20 = offset >> 4; code->i = offset >> 63; return ftrace_call_code; } static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, unsigned char *new_code, int do_check) { unsigned char replaced[MCOUNT_INSN_SIZE]; /* * Note: * We are paranoid about modifying text, as if a bug was to happen, it * could cause us to read or write to someplace that could cause harm. * Carefully read and modify the code with probe_kernel_*(), and make * sure what we read is what we expected it to be before modifying it. */ if (!do_check) goto skip_check; /* read the text we want to modify */ if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE)) return -EFAULT; /* Make sure it is what we expect it to be */ if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) return -EINVAL; skip_check: /* replace the text with the new text */ if (copy_to_kernel_nofault(((void *)ip), new_code, MCOUNT_INSN_SIZE)) return -EPERM; flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); return 0; } static int ftrace_make_nop_check(struct dyn_ftrace *rec, unsigned long addr) { unsigned char __attribute__((aligned(8))) replaced[MCOUNT_INSN_SIZE]; unsigned long ip = rec->ip; if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE)) return -EFAULT; if (rec->flags & FTRACE_FL_CONVERTED) { struct ftrace_call_insn *call_insn, *tmp_call; call_insn = (void *)ftrace_call_code; tmp_call = (void *)replaced; call_insn->imm39_l = tmp_call->imm39_l; call_insn->imm39_h = tmp_call->imm39_h; call_insn->imm20 = tmp_call->imm20; call_insn->i = tmp_call->i; if (memcmp(replaced, ftrace_call_code, MCOUNT_INSN_SIZE) != 0) return -EINVAL; return 0; } else { struct ftrace_orig_insn *call_insn, *tmp_call; call_insn = (void *)ftrace_orig_code; tmp_call = (void *)replaced; call_insn->sign = tmp_call->sign; call_insn->imm20 = tmp_call->imm20; if (memcmp(replaced, ftrace_orig_code, MCOUNT_INSN_SIZE) != 0) return -EINVAL; return 0; } } int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { int ret; char *new; ret = ftrace_make_nop_check(rec, addr); if (ret) return ret; new = ftrace_nop_replace(); return ftrace_modify_code(rec->ip, NULL, new, 0); } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long ip = rec->ip; unsigned char *old, *new; old= ftrace_nop_replace(); new = ftrace_call_replace(ip, addr); return ftrace_modify_code(ip, old, new, 1); } /* in IA64, _mcount can't directly call ftrace_stub. Only jump is ok */ int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip; unsigned long addr = ((struct fnptr *)ftrace_call)->ip; if (func == ftrace_stub) return 0; ip = ((struct fnptr *)func)->ip; ia64_patch_imm64(addr + 2, ip); flush_icache_range(addr, addr + 16); return 0; }
linux-master
arch/ia64/kernel/ftrace.c
/* * err_inject.c - * 1.) Inject errors to a processor. * 2.) Query error injection capabilities. * This driver along with user space code can be acting as an error * injection tool. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Written by: Fenghua Yu <[email protected]>, Intel Corporation * Copyright (C) 2006, Intel Corp. All rights reserved. * */ #include <linux/device.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/cpu.h> #include <linux/module.h> #define ERR_INJ_DEBUG #define ERR_DATA_BUFFER_SIZE 3 // Three 8-byte; #define define_one_ro(name) \ static DEVICE_ATTR(name, 0444, show_##name, NULL) #define define_one_rw(name) \ static DEVICE_ATTR(name, 0644, show_##name, store_##name) static u64 call_start[NR_CPUS]; static u64 phys_addr[NR_CPUS]; static u64 err_type_info[NR_CPUS]; static u64 err_struct_info[NR_CPUS]; static struct { u64 data1; u64 data2; u64 data3; } __attribute__((__aligned__(16))) err_data_buffer[NR_CPUS]; static s64 status[NR_CPUS]; static u64 capabilities[NR_CPUS]; static u64 resources[NR_CPUS]; #define show(name) \ static ssize_t \ show_##name(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ u32 cpu=dev->id; \ return sprintf(buf, "%llx\n", name[cpu]); \ } #define store(name) \ static ssize_t \ store_##name(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t size) \ { \ unsigned int cpu=dev->id; \ name[cpu] = simple_strtoull(buf, NULL, 16); \ return size; \ } show(call_start) /* It's user's responsibility to call the PAL procedure on a specific * processor. The cpu number in driver is only used for storing data. */ static ssize_t store_call_start(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned int cpu=dev->id; unsigned long call_start = simple_strtoull(buf, NULL, 16); #ifdef ERR_INJ_DEBUG printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu); printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]); printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]); printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3); #endif switch (call_start) { case 0: /* Do nothing. */ break; case 1: /* Call pal_mc_error_inject in physical mode. */ status[cpu]=ia64_pal_mc_error_inject_phys(err_type_info[cpu], err_struct_info[cpu], ia64_tpa(&err_data_buffer[cpu]), &capabilities[cpu], &resources[cpu]); break; case 2: /* Call pal_mc_error_inject in virtual mode. */ status[cpu]=ia64_pal_mc_error_inject_virt(err_type_info[cpu], err_struct_info[cpu], ia64_tpa(&err_data_buffer[cpu]), &capabilities[cpu], &resources[cpu]); break; default: status[cpu] = -EINVAL; break; } #ifdef ERR_INJ_DEBUG printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]); printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]); printk(KERN_DEBUG "resources=%llx\n", resources[cpu]); #endif return size; } show(err_type_info) store(err_type_info) static ssize_t show_virtual_to_phys(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int cpu=dev->id; return sprintf(buf, "%llx\n", phys_addr[cpu]); } static ssize_t store_virtual_to_phys(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned int cpu=dev->id; u64 virt_addr=simple_strtoull(buf, NULL, 16); int ret; ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL); if (ret<=0) { #ifdef ERR_INJ_DEBUG printk("Virtual address %llx is not existing.\n", virt_addr); #endif return -EINVAL; } phys_addr[cpu] = ia64_tpa(virt_addr); return size; } show(err_struct_info) store(err_struct_info) static ssize_t show_err_data_buffer(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int cpu=dev->id; return sprintf(buf, "%llx, %llx, %llx\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3); } static ssize_t store_err_data_buffer(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { unsigned int cpu=dev->id; int ret; #ifdef ERR_INJ_DEBUG printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n", err_data_buffer[cpu].data1, err_data_buffer[cpu].data2, err_data_buffer[cpu].data3, cpu); #endif ret = sscanf(buf, "%llx, %llx, %llx", &err_data_buffer[cpu].data1, &err_data_buffer[cpu].data2, &err_data_buffer[cpu].data3); if (ret!=ERR_DATA_BUFFER_SIZE) return -EINVAL; return size; } show(status) show(capabilities) show(resources) define_one_rw(call_start); define_one_rw(err_type_info); define_one_rw(err_struct_info); define_one_rw(err_data_buffer); define_one_rw(virtual_to_phys); define_one_ro(status); define_one_ro(capabilities); define_one_ro(resources); static struct attribute *default_attrs[] = { &dev_attr_call_start.attr, &dev_attr_virtual_to_phys.attr, &dev_attr_err_type_info.attr, &dev_attr_err_struct_info.attr, &dev_attr_err_data_buffer.attr, &dev_attr_status.attr, &dev_attr_capabilities.attr, &dev_attr_resources.attr, NULL }; static struct attribute_group err_inject_attr_group = { .attrs = default_attrs, .name = "err_inject" }; /* Add/Remove err_inject interface for CPU device */ static int err_inject_add_dev(unsigned int cpu) { struct device *sys_dev = get_cpu_device(cpu); return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group); } static int err_inject_remove_dev(unsigned int cpu) { struct device *sys_dev = get_cpu_device(cpu); sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group); return 0; } static enum cpuhp_state hp_online; static int __init err_inject_init(void) { int ret; #ifdef ERR_INJ_DEBUG printk(KERN_INFO "Enter error injection driver.\n"); #endif ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/err_inj:online", err_inject_add_dev, err_inject_remove_dev); if (ret >= 0) { hp_online = ret; ret = 0; } return ret; } static void __exit err_inject_exit(void) { #ifdef ERR_INJ_DEBUG printk(KERN_INFO "Exit error injection driver.\n"); #endif cpuhp_remove_state(hp_online); } module_init(err_inject_init); module_exit(err_inject_exit); MODULE_AUTHOR("Fenghua Yu <[email protected]>"); MODULE_DESCRIPTION("MC error injection kernel sysfs interface"); MODULE_LICENSE("GPL");
linux-master
arch/ia64/kernel/err_inject.c
// SPDX-License-Identifier: GPL-2.0 /* * LSAPIC Interrupt Controller * * This takes care of interrupts that are generated by the CPU's * internal Streamlined Advanced Programmable Interrupt Controller * (LSAPIC), such as the ITC and IPI interrupts. * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <[email protected]> * Copyright (C) 2000 Hewlett-Packard Co * Copyright (C) 2000 David Mosberger-Tang <[email protected]> */ #include <linux/sched.h> #include <linux/irq.h> static unsigned int lsapic_noop_startup (struct irq_data *data) { return 0; } static void lsapic_noop (struct irq_data *data) { /* nothing to do... */ } static int lsapic_retrigger(struct irq_data *data) { ia64_resend_irq(data->irq); return 1; } struct irq_chip irq_type_ia64_lsapic = { .name = "LSAPIC", .irq_startup = lsapic_noop_startup, .irq_shutdown = lsapic_noop, .irq_enable = lsapic_noop, .irq_disable = lsapic_noop, .irq_ack = lsapic_noop, .irq_retrigger = lsapic_retrigger, };
linux-master
arch/ia64/kernel/irq_lsapic.c
// SPDX-License-Identifier: GPL-2.0 /* * This file contains various system calls that have different calling * conventions on different platforms. * * Copyright (C) 1999-2000, 2002-2003, 2005 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> */ #include <linux/errno.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/task_stack.h> #include <linux/shm.h> #include <linux/file.h> /* doh, must come after sched.h... */ #include <linux/smp.h> #include <linux/syscalls.h> #include <linux/highuid.h> #include <linux/hugetlb.h> #include <asm/shmparam.h> #include <linux/uaccess.h> unsigned long arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { long map_shared = (flags & MAP_SHARED); unsigned long align_mask = 0; struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; if (len > RGN_MAP_LIMIT) return -ENOMEM; /* handle fixed mapping: prevent overlap with huge pages */ if (flags & MAP_FIXED) { if (is_hugepage_only_range(mm, addr, len)) return -EINVAL; return addr; } #ifdef CONFIG_HUGETLB_PAGE if (REGION_NUMBER(addr) == RGN_HPAGE) addr = 0; #endif if (!addr) addr = TASK_UNMAPPED_BASE; if (map_shared && (TASK_SIZE > 0xfffffffful)) /* * For 64-bit tasks, align shared segments to 1MB to avoid potential * performance penalty due to virtual aliasing (see ASDM). For 32-bit * tasks, we prefer to avoid exhausting the address space too quickly by * limiting alignment to a single page. */ align_mask = PAGE_MASK & (SHMLBA - 1); info.flags = 0; info.length = len; info.low_limit = addr; info.high_limit = TASK_SIZE; info.align_mask = align_mask; info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info); } asmlinkage long ia64_getpriority (int which, int who) { long prio; prio = sys_getpriority(which, who); if (prio >= 0) { force_successful_syscall_return(); prio = 20 - prio; } return prio; } /* XXX obsolete, but leave it here until the old libc is gone... */ asmlinkage unsigned long sys_getpagesize (void) { return PAGE_SIZE; } asmlinkage unsigned long ia64_brk (unsigned long brk) { unsigned long retval = sys_brk(brk); force_successful_syscall_return(); return retval; } /* * On IA-64, we return the two file descriptors in ret0 and ret1 (r8 * and r9) as this is faster than doing a copy_to_user(). */ asmlinkage long sys_ia64_pipe (void) { struct pt_regs *regs = task_pt_regs(current); int fd[2]; int retval; retval = do_pipe_flags(fd, 0); if (retval) goto out; retval = fd[0]; regs->r9 = fd[1]; out: return retval; } int ia64_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) { unsigned long roff; /* * Don't permit mappings into unmapped space, the virtual page table * of a region, or across a region boundary. Note: RGN_MAP_LIMIT is * equal to 2^n-PAGE_SIZE (for some integer n <= 61) and len > 0. */ roff = REGION_OFFSET(addr); if ((len > RGN_MAP_LIMIT) || (roff > (RGN_MAP_LIMIT - len))) return -EINVAL; return 0; } /* * mmap2() is like mmap() except that the offset is expressed in units * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces * of) files that are larger than the address space of the CPU. */ asmlinkage unsigned long sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) { addr = ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); if (!IS_ERR_VALUE(addr)) force_successful_syscall_return(); return addr; } asmlinkage unsigned long sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, long off) { if (offset_in_page(off) != 0) return -EINVAL; addr = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); if (!IS_ERR_VALUE(addr)) force_successful_syscall_return(); return addr; } asmlinkage unsigned long ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) { addr = sys_mremap(addr, old_len, new_len, flags, new_addr); if (!IS_ERR_VALUE(addr)) force_successful_syscall_return(); return addr; } asmlinkage long ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *tp) { struct timespec64 rtn_tp; s64 tick_ns; /* * ia64's clock_gettime() syscall is implemented as a vdso call * fsys_clock_gettime(). Currently it handles only * CLOCK_REALTIME and CLOCK_MONOTONIC. Both are based on * 'ar.itc' counter which gets incremented at a constant * frequency. It's usually 400MHz, ~2.5x times slower than CPU * clock frequency. Which is almost a 1ns hrtimer, but not quite. * * Let's special-case these timers to report correct precision * based on ITC frequency and not HZ frequency for supported * clocks. */ switch (which_clock) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq); rtn_tp = ns_to_timespec64(tick_ns); return put_timespec64(&rtn_tp, tp); } return sys_clock_getres(which_clock, tp); }
linux-master
arch/ia64/kernel/sys_ia64.c
// SPDX-License-Identifier: GPL-2.0-only /* * File: mca.c * Purpose: Generic MCA handling layer * * Copyright (C) 2003 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> * * Copyright (C) 2002 Dell Inc. * Copyright (C) Matt Domsch <[email protected]> * * Copyright (C) 2002 Intel * Copyright (C) Jenna Hall <[email protected]> * * Copyright (C) 2001 Intel * Copyright (C) Fred Lewis <[email protected]> * * Copyright (C) 2000 Intel * Copyright (C) Chuck Fleckenstein <[email protected]> * * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc. * Copyright (C) Vijay Chander <[email protected]> * * Copyright (C) 2006 FUJITSU LIMITED * Copyright (C) Hidetoshi Seto <[email protected]> * * 2000-03-29 Chuck Fleckenstein <[email protected]> * Fixed PAL/SAL update issues, began MCA bug fixes, logging issues, * added min save state dump, added INIT handler. * * 2001-01-03 Fred Lewis <[email protected]> * Added setup of CMCI and CPEI IRQs, logging of corrected platform * errors, completed code for logging of corrected & uncorrected * machine check errors, and updated for conformance with Nov. 2000 * revision of the SAL 3.0 spec. * * 2002-01-04 Jenna Hall <[email protected]> * Aligned MCA stack to 16 bytes, added platform vs. CPU error flag, * set SAL default return values, changed error record structure to * linked list, added init call to sal_get_state_info_size(). * * 2002-03-25 Matt Domsch <[email protected]> * GUID cleanups. * * 2003-04-15 David Mosberger-Tang <[email protected]> * Added INIT backtrace support. * * 2003-12-08 Keith Owens <[email protected]> * smp_call_function() must not be called from interrupt context * (can deadlock on tasklist_lock). * Use keventd to call smp_call_function(). * * 2004-02-01 Keith Owens <[email protected]> * Avoid deadlock when using printk() for MCA and INIT records. * Delete all record printing code, moved to salinfo_decode in user * space. Mark variables and functions static where possible. * Delete dead variables and functions. Reorder to remove the need * for forward declarations and to consolidate related code. * * 2005-08-12 Keith Owens <[email protected]> * Convert MCA/INIT handlers to use per event stacks and SAL/OS * state. * * 2005-10-07 Keith Owens <[email protected]> * Add notify_die() hooks. * * 2006-09-15 Hidetoshi Seto <[email protected]> * Add printing support for MCA/INIT. * * 2007-04-27 Russ Anderson <[email protected]> * Support multiple cpus going through OS_MCA in the same event. */ #include <linux/jiffies.h> #include <linux/types.h> #include <linux/init.h> #include <linux/sched/signal.h> #include <linux/sched/debug.h> #include <linux/sched/task.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/memblock.h> #include <linux/acpi.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/smp.h> #include <linux/workqueue.h> #include <linux/cpumask.h> #include <linux/kdebug.h> #include <linux/cpu.h> #include <linux/gfp.h> #include <asm/delay.h> #include <asm/efi.h> #include <asm/meminit.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/sal.h> #include <asm/mca.h> #include <asm/mca_asm.h> #include <asm/kexec.h> #include <asm/irq.h> #include <asm/hw_irq.h> #include <asm/tlb.h> #include "mca_drv.h" #include "entry.h" #include "irq.h" #if defined(IA64_MCA_DEBUG_INFO) # define IA64_MCA_DEBUG(fmt...) printk(fmt) #else # define IA64_MCA_DEBUG(fmt...) do {} while (0) #endif #define NOTIFY_INIT(event, regs, arg, spin) \ do { \ if ((notify_die((event), "INIT", (regs), (arg), 0, 0) \ == NOTIFY_STOP) && ((spin) == 1)) \ ia64_mca_spin(__func__); \ } while (0) #define NOTIFY_MCA(event, regs, arg, spin) \ do { \ if ((notify_die((event), "MCA", (regs), (arg), 0, 0) \ == NOTIFY_STOP) && ((spin) == 1)) \ ia64_mca_spin(__func__); \ } while (0) /* Used by mca_asm.S */ DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */ unsigned long __per_cpu_mca[NR_CPUS]; /* In mca_asm.S */ extern void ia64_os_init_dispatch_monarch (void); extern void ia64_os_init_dispatch_slave (void); static int monarch_cpu = -1; static ia64_mc_info_t ia64_mc_info; #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */ #define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */ #define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */ #define CPE_HISTORY_LENGTH 5 #define CMC_HISTORY_LENGTH 5 static struct timer_list cpe_poll_timer; static struct timer_list cmc_poll_timer; /* * This variable tells whether we are currently in polling mode. * Start with this in the wrong state so we won't play w/ timers * before the system is ready. */ static int cmc_polling_enabled = 1; /* * Clearing this variable prevents CPE polling from getting activated * in mca_late_init. Use it if your system doesn't provide a CPEI, * but encounters problems retrieving CPE logs. This should only be * necessary for debugging. */ static int cpe_poll_enabled = 1; extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); static int mca_init __initdata; /* * limited & delayed printing support for MCA/INIT handler */ #define mprintk(fmt...) ia64_mca_printk(fmt) #define MLOGBUF_SIZE (512+256*NR_CPUS) #define MLOGBUF_MSGMAX 256 static char mlogbuf[MLOGBUF_SIZE]; static DEFINE_SPINLOCK(mlogbuf_wlock); /* mca context only */ static DEFINE_SPINLOCK(mlogbuf_rlock); /* normal context only */ static unsigned long mlogbuf_start; static unsigned long mlogbuf_end; static unsigned int mlogbuf_finished = 0; static unsigned long mlogbuf_timestamp = 0; static int loglevel_save = -1; #define BREAK_LOGLEVEL(__console_loglevel) \ oops_in_progress = 1; \ if (loglevel_save < 0) \ loglevel_save = __console_loglevel; \ __console_loglevel = 15; #define RESTORE_LOGLEVEL(__console_loglevel) \ if (loglevel_save >= 0) { \ __console_loglevel = loglevel_save; \ loglevel_save = -1; \ } \ mlogbuf_finished = 0; \ oops_in_progress = 0; /* * Push messages into buffer, print them later if not urgent. */ void ia64_mca_printk(const char *fmt, ...) { va_list args; int printed_len; char temp_buf[MLOGBUF_MSGMAX]; char *p; va_start(args, fmt); printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args); va_end(args); /* Copy the output into mlogbuf */ if (oops_in_progress) { /* mlogbuf was abandoned, use printk directly instead. */ printk("%s", temp_buf); } else { spin_lock(&mlogbuf_wlock); for (p = temp_buf; *p; p++) { unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE; if (next != mlogbuf_start) { mlogbuf[mlogbuf_end] = *p; mlogbuf_end = next; } else { /* buffer full */ break; } } mlogbuf[mlogbuf_end] = '\0'; spin_unlock(&mlogbuf_wlock); } } EXPORT_SYMBOL(ia64_mca_printk); /* * Print buffered messages. * NOTE: call this after returning normal context. (ex. from salinfod) */ void ia64_mlogbuf_dump(void) { char temp_buf[MLOGBUF_MSGMAX]; char *p; unsigned long index; unsigned long flags; unsigned int printed_len; /* Get output from mlogbuf */ while (mlogbuf_start != mlogbuf_end) { temp_buf[0] = '\0'; p = temp_buf; printed_len = 0; spin_lock_irqsave(&mlogbuf_rlock, flags); index = mlogbuf_start; while (index != mlogbuf_end) { *p = mlogbuf[index]; index = (index + 1) % MLOGBUF_SIZE; if (!*p) break; p++; if (++printed_len >= MLOGBUF_MSGMAX - 1) break; } *p = '\0'; if (temp_buf[0]) printk("%s", temp_buf); mlogbuf_start = index; mlogbuf_timestamp = 0; spin_unlock_irqrestore(&mlogbuf_rlock, flags); } } EXPORT_SYMBOL(ia64_mlogbuf_dump); /* * Call this if system is going to down or if immediate flushing messages to * console is required. (ex. recovery was failed, crash dump is going to be * invoked, long-wait rendezvous etc.) * NOTE: this should be called from monarch. */ static void ia64_mlogbuf_finish(int wait) { BREAK_LOGLEVEL(console_loglevel); ia64_mlogbuf_dump(); printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, " "MCA/INIT might be dodgy or fail.\n"); if (!wait) return; /* wait for console */ printk("Delaying for 5 seconds...\n"); udelay(5*1000000); mlogbuf_finished = 1; } /* * Print buffered messages from INIT context. */ static void ia64_mlogbuf_dump_from_init(void) { if (mlogbuf_finished) return; if (mlogbuf_timestamp && time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) { printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT " " and the system seems to be messed up.\n"); ia64_mlogbuf_finish(0); return; } if (!spin_trylock(&mlogbuf_rlock)) { printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. " "Generated messages other than stack dump will be " "buffered to mlogbuf and will be printed later.\n"); printk(KERN_ERR "INIT: If messages would not printed after " "this INIT, wait 30sec and assert INIT again.\n"); if (!mlogbuf_timestamp) mlogbuf_timestamp = jiffies; return; } spin_unlock(&mlogbuf_rlock); ia64_mlogbuf_dump(); } static inline void ia64_mca_spin(const char *func) { if (monarch_cpu == smp_processor_id()) ia64_mlogbuf_finish(0); mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func); while (1) cpu_relax(); } /* * IA64_MCA log support */ #define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */ #define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */ typedef struct ia64_state_log_s { spinlock_t isl_lock; int isl_index; unsigned long isl_count; ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */ } ia64_state_log_t; static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock) #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s) #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s) #define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index #define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index #define IA64_LOG_INDEX_INC(it) \ {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \ ia64_state_log[it].isl_count++;} #define IA64_LOG_INDEX_DEC(it) \ ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])) #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])) #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count static inline void ia64_log_allocate(int it, u64 size) { ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]) panic("%s: Failed to allocate %llu bytes\n", __func__, size); ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]) panic("%s: Failed to allocate %llu bytes\n", __func__, size); } /* * ia64_log_init * Reset the OS ia64 log buffer * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) * Outputs : None */ static void __init ia64_log_init(int sal_info_type) { u64 max_size = 0; IA64_LOG_NEXT_INDEX(sal_info_type) = 0; IA64_LOG_LOCK_INIT(sal_info_type); // SAL will tell us the maximum size of any error record of this type max_size = ia64_sal_get_state_info_size(sal_info_type); if (!max_size) /* alloc_bootmem() doesn't like zero-sized allocations! */ return; // set up OS data structures to hold error info ia64_log_allocate(sal_info_type, max_size); } /* * ia64_log_get * * Get the current MCA log from SAL and copy it into the OS log buffer. * * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) * irq_safe whether you can use printk at this point * Outputs : size (total record length) * *buffer (ptr to error record) * */ static u64 ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe) { sal_log_record_header_t *log_buffer; u64 total_len = 0; unsigned long s; IA64_LOG_LOCK(sal_info_type); /* Get the process state information */ log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type); total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer); if (total_len) { IA64_LOG_INDEX_INC(sal_info_type); IA64_LOG_UNLOCK(sal_info_type); if (irq_safe) { IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n", __func__, sal_info_type, total_len); } *buffer = (u8 *) log_buffer; return total_len; } else { IA64_LOG_UNLOCK(sal_info_type); return 0; } } /* * ia64_mca_log_sal_error_record * * This function retrieves a specified error record type from SAL * and wakes up any processes waiting for error records. * * Inputs : sal_info_type (Type of error record MCA/CMC/CPE) * FIXME: remove MCA and irq_safe. */ static void ia64_mca_log_sal_error_record(int sal_info_type) { u8 *buffer; sal_log_record_header_t *rh; u64 size; int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA; #ifdef IA64_MCA_DEBUG_INFO static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; #endif size = ia64_log_get(sal_info_type, &buffer, irq_safe); if (!size) return; salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe); if (irq_safe) IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n", smp_processor_id(), sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN"); /* Clear logs from corrected errors in case there's no user-level logger */ rh = (sal_log_record_header_t *)buffer; if (rh->severity == sal_log_severity_corrected) ia64_sal_clear_state_info(sal_info_type); } /* * search_mca_table * See if the MCA surfaced in an instruction range * that has been tagged as recoverable. * * Inputs * first First address range to check * last Last address range to check * ip Instruction pointer, address we are looking for * * Return value: * 1 on Success (in the table)/ 0 on Failure (not in the table) */ int search_mca_table (const struct mca_table_entry *first, const struct mca_table_entry *last, unsigned long ip) { const struct mca_table_entry *curr; u64 curr_start, curr_end; curr = first; while (curr <= last) { curr_start = (u64) &curr->start_addr + curr->start_addr; curr_end = (u64) &curr->end_addr + curr->end_addr; if ((ip >= curr_start) && (ip <= curr_end)) { return 1; } curr++; } return 0; } /* Given an address, look for it in the mca tables. */ int mca_recover_range(unsigned long addr) { extern struct mca_table_entry __start___mca_table[]; extern struct mca_table_entry __stop___mca_table[]; return search_mca_table(__start___mca_table, __stop___mca_table-1, addr); } EXPORT_SYMBOL_GPL(mca_recover_range); int cpe_vector = -1; int ia64_cpe_irq = -1; static irqreturn_t ia64_mca_cpe_int_handler (int cpe_irq, void *arg) { static unsigned long cpe_history[CPE_HISTORY_LENGTH]; static int index; static DEFINE_SPINLOCK(cpe_history_lock); IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", __func__, cpe_irq, smp_processor_id()); /* SAL spec states this should run w/ interrupts enabled */ local_irq_enable(); spin_lock(&cpe_history_lock); if (!cpe_poll_enabled && cpe_vector >= 0) { int i, count = 1; /* we know 1 happened now */ unsigned long now = jiffies; for (i = 0; i < CPE_HISTORY_LENGTH; i++) { if (now - cpe_history[i] <= HZ) count++; } IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH); if (count >= CPE_HISTORY_LENGTH) { cpe_poll_enabled = 1; spin_unlock(&cpe_history_lock); disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR)); /* * Corrected errors will still be corrected, but * make sure there's a log somewhere that indicates * something is generating more than we can handle. */ printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n"); mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL); /* lock already released, get out now */ goto out; } else { cpe_history[index++] = now; if (index == CPE_HISTORY_LENGTH) index = 0; } } spin_unlock(&cpe_history_lock); out: /* Get the CPE error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); local_irq_disable(); return IRQ_HANDLED; } /* * ia64_mca_register_cpev * * Register the corrected platform error vector with SAL. * * Inputs * cpev Corrected Platform Error Vector number * * Outputs * None */ void ia64_mca_register_cpev (int cpev) { /* Register the CPE interrupt vector with SAL */ struct ia64_sal_retval isrv; isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0); if (isrv.status) { printk(KERN_ERR "Failed to register Corrected Platform " "Error interrupt vector with SAL (status %ld)\n", isrv.status); return; } IA64_MCA_DEBUG("%s: corrected platform error " "vector %#x registered\n", __func__, cpev); } /* * ia64_mca_cmc_vector_setup * * Setup the corrected machine check vector register in the processor. * (The interrupt is masked on boot. ia64_mca_late_init unmask this.) * This function is invoked on a per-processor basis. * * Inputs * None * * Outputs * None */ void ia64_mca_cmc_vector_setup (void) { cmcv_reg_t cmcv; cmcv.cmcv_regval = 0; cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */ cmcv.cmcv_vector = IA64_CMC_VECTOR; ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n", __func__, smp_processor_id(), IA64_CMC_VECTOR); IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); } /* * ia64_mca_cmc_vector_disable * * Mask the corrected machine check vector register in the processor. * This function is invoked on a per-processor basis. * * Inputs * dummy(unused) * * Outputs * None */ static void ia64_mca_cmc_vector_disable (void *dummy) { cmcv_reg_t cmcv; cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n", __func__, smp_processor_id(), cmcv.cmcv_vector); } /* * ia64_mca_cmc_vector_enable * * Unmask the corrected machine check vector register in the processor. * This function is invoked on a per-processor basis. * * Inputs * dummy(unused) * * Outputs * None */ static void ia64_mca_cmc_vector_enable (void *dummy) { cmcv_reg_t cmcv; cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n", __func__, smp_processor_id(), cmcv.cmcv_vector); } /* * ia64_mca_cmc_vector_disable_keventd * * Called via keventd (smp_call_function() is not safe in interrupt context) to * disable the cmc interrupt vector. */ static void ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) { on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0); } /* * ia64_mca_cmc_vector_enable_keventd * * Called via keventd (smp_call_function() is not safe in interrupt context) to * enable the cmc interrupt vector. */ static void ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) { on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0); } /* * ia64_mca_wakeup * * Send an inter-cpu interrupt to wake-up a particular cpu. * * Inputs : cpuid * Outputs : None */ static void ia64_mca_wakeup(int cpu) { ia64_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0); } /* * ia64_mca_wakeup_all * * Wakeup all the slave cpus which have rendez'ed previously. * * Inputs : None * Outputs : None */ static void ia64_mca_wakeup_all(void) { int cpu; /* Clear the Rendez checkin flag for all cpus */ for_each_online_cpu(cpu) { if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE) ia64_mca_wakeup(cpu); } } /* * ia64_mca_rendez_interrupt_handler * * This is handler used to put slave processors into spinloop * while the monarch processor does the mca handling and later * wake each slave up once the monarch is done. The state * IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed * in SAL. The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates * the cpu has come out of OS rendezvous. * * Inputs : None * Outputs : None */ static irqreturn_t ia64_mca_rendez_int_handler(int rendez_irq, void *arg) { unsigned long flags; int cpu = smp_processor_id(); struct ia64_mca_notify_die nd = { .sos = NULL, .monarch_cpu = &monarch_cpu }; /* Mask all interrupts */ local_irq_save(flags); NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; /* Register with the SAL monarch that the slave has * reached SAL */ ia64_sal_mc_rendez(); NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1); /* Wait for the monarch cpu to exit. */ while (monarch_cpu != -1) cpu_relax(); /* spin until monarch leaves */ NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; /* Enable all interrupts */ local_irq_restore(flags); return IRQ_HANDLED; } /* * ia64_mca_wakeup_int_handler * * The interrupt handler for processing the inter-cpu interrupt to the * slave cpu which was spinning in the rendez loop. * Since this spinning is done by turning off the interrupts and * polling on the wakeup-interrupt bit in the IRR, there is * nothing useful to be done in the handler. * * Inputs : wakeup_irq (Wakeup-interrupt bit) * arg (Interrupt handler specific argument) * Outputs : None * */ static irqreturn_t ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg) { return IRQ_HANDLED; } /* Function pointer for extra MCA recovery */ int (*ia64_mca_ucmc_extension) (void*,struct ia64_sal_os_state*) = NULL; int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)) { if (ia64_mca_ucmc_extension) return 1; ia64_mca_ucmc_extension = fn; return 0; } void ia64_unreg_MCA_extension(void) { if (ia64_mca_ucmc_extension) ia64_mca_ucmc_extension = NULL; } EXPORT_SYMBOL(ia64_reg_MCA_extension); EXPORT_SYMBOL(ia64_unreg_MCA_extension); static inline void copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat) { u64 fslot, tslot, nat; *tr = *fr; fslot = ((unsigned long)fr >> 3) & 63; tslot = ((unsigned long)tr >> 3) & 63; *tnat &= ~(1UL << tslot); nat = (fnat >> fslot) & 1; *tnat |= (nat << tslot); } /* Change the comm field on the MCA/INT task to include the pid that * was interrupted, it makes for easier debugging. If that pid was 0 * (swapper or nested MCA/INIT) then use the start of the previous comm * field suffixed with its cpu. */ static void ia64_mca_modify_comm(const struct task_struct *previous_current) { char *p, comm[sizeof(current->comm)]; if (previous_current->pid) snprintf(comm, sizeof(comm), "%s %d", current->comm, previous_current->pid); else { int l; if ((p = strchr(previous_current->comm, ' '))) l = p - previous_current->comm; else l = strlen(previous_current->comm); snprintf(comm, sizeof(comm), "%s %*s %d", current->comm, l, previous_current->comm, task_thread_info(previous_current)->cpu); } memcpy(current->comm, comm, sizeof(current->comm)); } static void finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos, unsigned long *nat) { const struct pal_min_state_area *ms = sos->pal_min_state; const u64 *bank; /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use * pmsa_{xip,xpsr,xfs} */ if (ia64_psr(regs)->ic) { regs->cr_iip = ms->pmsa_iip; regs->cr_ipsr = ms->pmsa_ipsr; regs->cr_ifs = ms->pmsa_ifs; } else { regs->cr_iip = ms->pmsa_xip; regs->cr_ipsr = ms->pmsa_xpsr; regs->cr_ifs = ms->pmsa_xfs; sos->iip = ms->pmsa_iip; sos->ipsr = ms->pmsa_ipsr; sos->ifs = ms->pmsa_ifs; } regs->pr = ms->pmsa_pr; regs->b0 = ms->pmsa_br0; regs->ar_rsc = ms->pmsa_rsc; copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &regs->r1, nat); copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &regs->r2, nat); copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &regs->r3, nat); copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &regs->r8, nat); copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &regs->r9, nat); copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &regs->r10, nat); copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &regs->r11, nat); copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &regs->r12, nat); copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &regs->r13, nat); copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &regs->r14, nat); copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &regs->r15, nat); if (ia64_psr(regs)->bn) bank = ms->pmsa_bank1_gr; else bank = ms->pmsa_bank0_gr; copy_reg(&bank[16-16], ms->pmsa_nat_bits, &regs->r16, nat); copy_reg(&bank[17-16], ms->pmsa_nat_bits, &regs->r17, nat); copy_reg(&bank[18-16], ms->pmsa_nat_bits, &regs->r18, nat); copy_reg(&bank[19-16], ms->pmsa_nat_bits, &regs->r19, nat); copy_reg(&bank[20-16], ms->pmsa_nat_bits, &regs->r20, nat); copy_reg(&bank[21-16], ms->pmsa_nat_bits, &regs->r21, nat); copy_reg(&bank[22-16], ms->pmsa_nat_bits, &regs->r22, nat); copy_reg(&bank[23-16], ms->pmsa_nat_bits, &regs->r23, nat); copy_reg(&bank[24-16], ms->pmsa_nat_bits, &regs->r24, nat); copy_reg(&bank[25-16], ms->pmsa_nat_bits, &regs->r25, nat); copy_reg(&bank[26-16], ms->pmsa_nat_bits, &regs->r26, nat); copy_reg(&bank[27-16], ms->pmsa_nat_bits, &regs->r27, nat); copy_reg(&bank[28-16], ms->pmsa_nat_bits, &regs->r28, nat); copy_reg(&bank[29-16], ms->pmsa_nat_bits, &regs->r29, nat); copy_reg(&bank[30-16], ms->pmsa_nat_bits, &regs->r30, nat); copy_reg(&bank[31-16], ms->pmsa_nat_bits, &regs->r31, nat); } /* On entry to this routine, we are running on the per cpu stack, see * mca_asm.h. The original stack has not been touched by this event. Some of * the original stack's registers will be in the RBS on this stack. This stack * also contains a partial pt_regs and switch_stack, the rest of the data is in * PAL minstate. * * The first thing to do is modify the original stack to look like a blocked * task so we can run backtrace on the original task. Also mark the per cpu * stack as current to ensure that we use the correct task state, it also means * that we can do backtrace on the MCA/INIT handler code itself. */ static struct task_struct * ia64_mca_modify_original_stack(struct pt_regs *regs, const struct switch_stack *sw, struct ia64_sal_os_state *sos, const char *type) { char *p; ia64_va va; extern char ia64_leave_kernel[]; /* Need asm address, not function descriptor */ const struct pal_min_state_area *ms = sos->pal_min_state; struct task_struct *previous_current; struct pt_regs *old_regs; struct switch_stack *old_sw; unsigned size = sizeof(struct pt_regs) + sizeof(struct switch_stack) + 16; unsigned long *old_bspstore, *old_bsp; unsigned long *new_bspstore, *new_bsp; unsigned long old_unat, old_rnat, new_rnat, nat; u64 slots, loadrs = regs->loadrs; u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1]; u64 ar_bspstore = regs->ar_bspstore; u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16); const char *msg; int cpu = smp_processor_id(); previous_current = curr_task(cpu); ia64_set_curr_task(cpu, current); if ((p = strchr(current->comm, ' '))) *p = '\0'; /* Best effort attempt to cope with MCA/INIT delivered while in * physical mode. */ regs->cr_ipsr = ms->pmsa_ipsr; if (ia64_psr(regs)->dt == 0) { va.l = r12; if (va.f.reg == 0) { va.f.reg = 7; r12 = va.l; } va.l = r13; if (va.f.reg == 0) { va.f.reg = 7; r13 = va.l; } } if (ia64_psr(regs)->rt == 0) { va.l = ar_bspstore; if (va.f.reg == 0) { va.f.reg = 7; ar_bspstore = va.l; } va.l = ar_bsp; if (va.f.reg == 0) { va.f.reg = 7; ar_bsp = va.l; } } /* mca_asm.S ia64_old_stack() cannot assume that the dirty registers * have been copied to the old stack, the old stack may fail the * validation tests below. So ia64_old_stack() must restore the dirty * registers from the new stack. The old and new bspstore probably * have different alignments, so loadrs calculated on the old bsp * cannot be used to restore from the new bsp. Calculate a suitable * loadrs for the new stack and save it in the new pt_regs, where * ia64_old_stack() can get it. */ old_bspstore = (unsigned long *)ar_bspstore; old_bsp = (unsigned long *)ar_bsp; slots = ia64_rse_num_regs(old_bspstore, old_bsp); new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET); new_bsp = ia64_rse_skip_regs(new_bspstore, slots); regs->loadrs = (new_bsp - new_bspstore) * 8 << 16; /* Verify the previous stack state before we change it */ if (user_mode(regs)) { msg = "occurred in user space"; /* previous_current is guaranteed to be valid when the task was * in user space, so ... */ ia64_mca_modify_comm(previous_current); goto no_mod; } if (r13 != sos->prev_IA64_KR_CURRENT) { msg = "inconsistent previous current and r13"; goto no_mod; } if (!mca_recover_range(ms->pmsa_iip)) { if ((r12 - r13) >= KERNEL_STACK_SIZE) { msg = "inconsistent r12 and r13"; goto no_mod; } if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { msg = "inconsistent ar.bspstore and r13"; goto no_mod; } va.p = old_bspstore; if (va.f.reg < 5) { msg = "old_bspstore is in the wrong region"; goto no_mod; } if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { msg = "inconsistent ar.bsp and r13"; goto no_mod; } size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; if (ar_bspstore + size > r12) { msg = "no room for blocked state"; goto no_mod; } } ia64_mca_modify_comm(previous_current); /* Make the original task look blocked. First stack a struct pt_regs, * describing the state at the time of interrupt. mca_asm.S built a * partial pt_regs, copy it and fill in the blanks using minstate. */ p = (char *)r12 - sizeof(*regs); old_regs = (struct pt_regs *)p; memcpy(old_regs, regs, sizeof(*regs)); old_regs->loadrs = loadrs; old_unat = old_regs->ar_unat; finish_pt_regs(old_regs, sos, &old_unat); /* Next stack a struct switch_stack. mca_asm.S built a partial * switch_stack, copy it and fill in the blanks using pt_regs and * minstate. * * In the synthesized switch_stack, b0 points to ia64_leave_kernel, * ar.pfs is set to 0. * * unwind.c::unw_unwind() does special processing for interrupt frames. * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate * is clear then unw_unwind() does _not_ adjust bsp over pt_regs. Not * that this is documented, of course. Set PRED_NON_SYSCALL in the * switch_stack on the original stack so it will unwind correctly when * unwind.c reads pt_regs. * * thread.ksp is updated to point to the synthesized switch_stack. */ p -= sizeof(struct switch_stack); old_sw = (struct switch_stack *)p; memcpy(old_sw, sw, sizeof(*sw)); old_sw->caller_unat = old_unat; old_sw->ar_fpsr = old_regs->ar_fpsr; copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat); copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat); copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat); copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat); old_sw->b0 = (u64)ia64_leave_kernel; old_sw->b1 = ms->pmsa_br1; old_sw->ar_pfs = 0; old_sw->ar_unat = old_unat; old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL); previous_current->thread.ksp = (u64)p - 16; /* Finally copy the original stack's registers back to its RBS. * Registers from ar.bspstore through ar.bsp at the time of the event * are in the current RBS, copy them back to the original stack. The * copy must be done register by register because the original bspstore * and the current one have different alignments, so the saved RNAT * data occurs at different places. * * mca_asm does cover, so the old_bsp already includes all registers at * the time of MCA/INIT. It also does flushrs, so all registers before * this function have been written to backing store on the MCA/INIT * stack. */ new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore)); old_rnat = regs->ar_rnat; while (slots--) { if (ia64_rse_is_rnat_slot(new_bspstore)) { new_rnat = ia64_get_rnat(new_bspstore++); } if (ia64_rse_is_rnat_slot(old_bspstore)) { *old_bspstore++ = old_rnat; old_rnat = 0; } nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL; old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore)); old_rnat |= (nat << ia64_rse_slot_num(old_bspstore)); *old_bspstore++ = *new_bspstore++; } old_sw->ar_bspstore = (unsigned long)old_bspstore; old_sw->ar_rnat = old_rnat; sos->prev_task = previous_current; return previous_current; no_mod: mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", smp_processor_id(), type, msg); old_unat = regs->ar_unat; finish_pt_regs(regs, sos, &old_unat); return previous_current; } /* The monarch/slave interaction is based on monarch_cpu and requires that all * slaves have entered rendezvous before the monarch leaves. If any cpu has * not entered rendezvous yet then wait a bit. The assumption is that any * slave that has not rendezvoused after a reasonable time is never going to do * so. In this context, slave includes cpus that respond to the MCA rendezvous * interrupt, as well as cpus that receive the INIT slave event. */ static void ia64_wait_for_slaves(int monarch, const char *type) { int c, i , wait; /* * wait 5 seconds total for slaves (arbitrary) */ for (i = 0; i < 5000; i++) { wait = 0; for_each_online_cpu(c) { if (c == monarch) continue; if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) { udelay(1000); /* short wait */ wait = 1; break; } } if (!wait) goto all_in; } /* * Maybe slave(s) dead. Print buffered messages immediately. */ ia64_mlogbuf_finish(0); mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type); for_each_online_cpu(c) { if (c == monarch) continue; if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) mprintk(" %d", c); } mprintk("\n"); return; all_in: mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type); return; } /* mca_insert_tr * * Switch rid when TR reload and needed! * iord: 1: itr, 2: itr; * */ static void mca_insert_tr(u64 iord) { int i; u64 old_rr; struct ia64_tr_entry *p; unsigned long psr; int cpu = smp_processor_id(); if (!ia64_idtrs[cpu]) return; psr = ia64_clear_ic(); for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX; if (p->pte & 0x1) { old_rr = ia64_get_rr(p->ifa); if (old_rr != p->rr) { ia64_set_rr(p->ifa, p->rr); ia64_srlz_d(); } ia64_ptr(iord, p->ifa, p->itir >> 2); ia64_srlz_i(); if (iord & 0x1) { ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2); ia64_srlz_i(); } if (iord & 0x2) { ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2); ia64_srlz_i(); } if (old_rr != p->rr) { ia64_set_rr(p->ifa, old_rr); ia64_srlz_d(); } } } ia64_set_psr(psr); } /* * ia64_mca_handler * * This is uncorrectable machine check handler called from OS_MCA * dispatch code which is in turn called from SAL_CHECK(). * This is the place where the core of OS MCA handling is done. * Right now the logs are extracted and displayed in a well-defined * format. This handler code is supposed to be run only on the * monarch processor. Once the monarch is done with MCA handling * further MCA logging is enabled by clearing logs. * Monarch also has the duty of sending wakeup-IPIs to pull the * slave processors out of rendezvous spinloop. * * If multiple processors call into OS_MCA, the first will become * the monarch. Subsequent cpus will be recorded in the mca_cpu * bitmask. After the first monarch has processed its MCA, it * will wake up the next cpu in the mca_cpu bitmask and then go * into the rendezvous loop. When all processors have serviced * their MCA, the last monarch frees up the rest of the processors. */ void ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, struct ia64_sal_os_state *sos) { int recover, cpu = smp_processor_id(); struct task_struct *previous_current; struct ia64_mca_notify_die nd = { .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover }; static atomic_t mca_count; static cpumask_t mca_cpu; if (atomic_add_return(1, &mca_count) == 1) { monarch_cpu = cpu; sos->monarch = 1; } else { cpumask_set_cpu(cpu, &mca_cpu); sos->monarch = 0; } mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d " "monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch); previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; if (sos->monarch) { ia64_wait_for_slaves(cpu, "MCA"); /* Wakeup all the processors which are spinning in the * rendezvous loop. They will leave SAL, then spin in the OS * with interrupts disabled until this monarch cpu leaves the * MCA handler. That gets control back to the OS so we can * backtrace the other cpus, backtrace when spinning in SAL * does not work. */ ia64_mca_wakeup_all(); } else { while (cpumask_test_cpu(cpu, &mca_cpu)) cpu_relax(); /* spin until monarch wakes us */ } NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1); /* Get the MCA error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); /* MCA error recovery */ recover = (ia64_mca_ucmc_extension && ia64_mca_ucmc_extension( IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), sos)); if (recover) { sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); rh->severity = sal_log_severity_corrected; ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); sos->os_status = IA64_MCA_CORRECTED; } else { /* Dump buffered message to console */ ia64_mlogbuf_finish(1); } if (__this_cpu_read(ia64_mca_tr_reload)) { mca_insert_tr(0x1); /*Reload dynamic itrs*/ mca_insert_tr(0x2); /*Reload dynamic itrs*/ } NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1); if (atomic_dec_return(&mca_count) > 0) { int i; /* wake up the next monarch cpu, * and put this cpu in the rendez loop. */ for_each_online_cpu(i) { if (cpumask_test_cpu(i, &mca_cpu)) { monarch_cpu = i; cpumask_clear_cpu(i, &mca_cpu); /* wake next cpu */ while (monarch_cpu != -1) cpu_relax(); /* spin until last cpu leaves */ ia64_set_curr_task(cpu, previous_current); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; return; } } } ia64_set_curr_task(cpu, previous_current); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; monarch_cpu = -1; /* This frees the slaves and previous monarchs */ } static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd); static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd); /* * ia64_mca_cmc_int_handler * * This is corrected machine check interrupt handler. * Right now the logs are extracted and displayed in a well-defined * format. * * Inputs * interrupt number * client data arg ptr * * Outputs * None */ static irqreturn_t ia64_mca_cmc_int_handler(int cmc_irq, void *arg) { static unsigned long cmc_history[CMC_HISTORY_LENGTH]; static int index; static DEFINE_SPINLOCK(cmc_history_lock); IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", __func__, cmc_irq, smp_processor_id()); /* SAL spec states this should run w/ interrupts enabled */ local_irq_enable(); spin_lock(&cmc_history_lock); if (!cmc_polling_enabled) { int i, count = 1; /* we know 1 happened now */ unsigned long now = jiffies; for (i = 0; i < CMC_HISTORY_LENGTH; i++) { if (now - cmc_history[i] <= HZ) count++; } IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH); if (count >= CMC_HISTORY_LENGTH) { cmc_polling_enabled = 1; spin_unlock(&cmc_history_lock); /* If we're being hit with CMC interrupts, we won't * ever execute the schedule_work() below. Need to * disable CMC interrupts on this processor now. */ ia64_mca_cmc_vector_disable(NULL); schedule_work(&cmc_disable_work); /* * Corrected errors will still be corrected, but * make sure there's a log somewhere that indicates * something is generating more than we can handle. */ printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n"); mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); /* lock already released, get out now */ goto out; } else { cmc_history[index++] = now; if (index == CMC_HISTORY_LENGTH) index = 0; } } spin_unlock(&cmc_history_lock); out: /* Get the CMC error record and log it */ ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC); local_irq_disable(); return IRQ_HANDLED; } /* * ia64_mca_cmc_int_caller * * Triggered by sw interrupt from CMC polling routine. Calls * real interrupt handler and either triggers a sw interrupt * on the next cpu or does cleanup at the end. * * Inputs * interrupt number * client data arg ptr * Outputs * handled */ static irqreturn_t ia64_mca_cmc_int_caller(int cmc_irq, void *arg) { static int start_count = -1; unsigned int cpuid; cpuid = smp_processor_id(); /* If first cpu, update count */ if (start_count == -1) start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC); ia64_mca_cmc_int_handler(cmc_irq, arg); cpuid = cpumask_next(cpuid+1, cpu_online_mask); if (cpuid < nr_cpu_ids) { ia64_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); } else { /* If no log record, switch out of polling mode */ if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) { printk(KERN_WARNING "Returning to interrupt driven CMC handler\n"); schedule_work(&cmc_enable_work); cmc_polling_enabled = 0; } else { mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); } start_count = -1; } return IRQ_HANDLED; } /* * ia64_mca_cmc_poll * * Poll for Corrected Machine Checks (CMCs) * * Inputs : dummy(unused) * Outputs : None * */ static void ia64_mca_cmc_poll (struct timer_list *unused) { /* Trigger a CMC interrupt cascade */ ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); } /* * ia64_mca_cpe_int_caller * * Triggered by sw interrupt from CPE polling routine. Calls * real interrupt handler and either triggers a sw interrupt * on the next cpu or does cleanup at the end. * * Inputs * interrupt number * client data arg ptr * Outputs * handled */ static irqreturn_t ia64_mca_cpe_int_caller(int cpe_irq, void *arg) { static int start_count = -1; static int poll_time = MIN_CPE_POLL_INTERVAL; unsigned int cpuid; cpuid = smp_processor_id(); /* If first cpu, update count */ if (start_count == -1) start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE); ia64_mca_cpe_int_handler(cpe_irq, arg); cpuid = cpumask_next(cpuid+1, cpu_online_mask); if (cpuid < NR_CPUS) { ia64_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); } else { /* * If a log was recorded, increase our polling frequency, * otherwise, backoff or return to interrupt mode. */ if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) { poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2); } else if (cpe_vector < 0) { poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2); } else { poll_time = MIN_CPE_POLL_INTERVAL; printk(KERN_WARNING "Returning to interrupt driven CPE handler\n"); enable_irq(local_vector_to_irq(IA64_CPE_VECTOR)); cpe_poll_enabled = 0; } if (cpe_poll_enabled) mod_timer(&cpe_poll_timer, jiffies + poll_time); start_count = -1; } return IRQ_HANDLED; } /* * ia64_mca_cpe_poll * * Poll for Corrected Platform Errors (CPEs), trigger interrupt * on first cpu, from there it will trickle through all the cpus. * * Inputs : dummy(unused) * Outputs : None * */ static void ia64_mca_cpe_poll (struct timer_list *unused) { /* Trigger a CPE interrupt cascade */ ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); } static int default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data) { int c; struct task_struct *g, *t; if (val != DIE_INIT_MONARCH_PROCESS) return NOTIFY_DONE; #ifdef CONFIG_KEXEC if (atomic_read(&kdump_in_progress)) return NOTIFY_DONE; #endif /* * FIXME: mlogbuf will brim over with INIT stack dumps. * To enable show_stack from INIT, we use oops_in_progress which should * be used in real oops. This would cause something wrong after INIT. */ BREAK_LOGLEVEL(console_loglevel); ia64_mlogbuf_dump_from_init(); printk(KERN_ERR "Processes interrupted by INIT -"); for_each_online_cpu(c) { struct ia64_sal_os_state *s; t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET); s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET); g = s->prev_task; if (g) { if (g->pid) printk(" %d", g->pid); else printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g); } } printk("\n\n"); if (read_trylock(&tasklist_lock)) { for_each_process_thread(g, t) { printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); show_stack(t, NULL, KERN_DEFAULT); } read_unlock(&tasklist_lock); } /* FIXME: This will not restore zapped printk locks. */ RESTORE_LOGLEVEL(console_loglevel); return NOTIFY_DONE; } /* * C portion of the OS INIT handler * * Called from ia64_os_init_dispatch * * Inputs: pointer to pt_regs where processor info was saved. SAL/OS state for * this event. This code is used for both monarch and slave INIT events, see * sos->monarch. * * All INIT events switch to the INIT stack and change the previous process to * blocked status. If one of the INIT events is the monarch then we are * probably processing the nmi button/command. Use the monarch cpu to dump all * the processes. The slave INIT events all spin until the monarch cpu * returns. We can also get INIT slave events for MCA, in which case the MCA * process is the monarch. */ void ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, struct ia64_sal_os_state *sos) { static atomic_t slaves; static atomic_t monarchs; struct task_struct *previous_current; int cpu = smp_processor_id(); struct ia64_mca_notify_die nd = { .sos = sos, .monarch_cpu = &monarch_cpu }; NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0); mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch); salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0); previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT"); sos->os_status = IA64_INIT_RESUME; /* FIXME: Workaround for broken proms that drive all INIT events as * slaves. The last slave that enters is promoted to be a monarch. * Remove this code in September 2006, that gives platforms a year to * fix their proms and get their customers updated. */ if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", __func__, cpu); atomic_dec(&slaves); sos->monarch = 1; } /* FIXME: Workaround for broken proms that drive all INIT events as * monarchs. Second and subsequent monarchs are demoted to slaves. * Remove this code in September 2006, that gives platforms a year to * fix their proms and get their customers updated. */ if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", __func__, cpu); atomic_dec(&monarchs); sos->monarch = 0; } if (!sos->monarch) { ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; #ifdef CONFIG_KEXEC while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress)) udelay(1000); #else while (monarch_cpu == -1) cpu_relax(); /* spin until monarch enters */ #endif NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1); NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1); #ifdef CONFIG_KEXEC while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress)) udelay(1000); #else while (monarch_cpu != -1) cpu_relax(); /* spin until monarch leaves */ #endif NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1); mprintk("Slave on cpu %d returning to normal service.\n", cpu); ia64_set_curr_task(cpu, previous_current); ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; atomic_dec(&slaves); return; } monarch_cpu = cpu; NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1); /* * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be * generated via the BMC's command-line interface, but since the console is on the * same serial line, the user will need some time to switch out of the BMC before * the dump begins. */ mprintk("Delaying for 5 seconds...\n"); udelay(5*1000000); ia64_wait_for_slaves(cpu, "INIT"); /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through * to default_monarch_init_process() above and just print all the * tasks. */ NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1); NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1); mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); atomic_dec(&monarchs); ia64_set_curr_task(cpu, previous_current); monarch_cpu = -1; return; } static int __init ia64_mca_disable_cpe_polling(char *str) { cpe_poll_enabled = 0; return 1; } __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling); /* Minimal format of the MCA/INIT stacks. The pseudo processes that run on * these stacks can never sleep, they cannot return from the kernel to user * space, they do not appear in a normal ps listing. So there is no need to * format most of the fields. */ static void format_mca_init_stack(void *mca_data, unsigned long offset, const char *type, int cpu) { struct task_struct *p = (struct task_struct *)((char *)mca_data + offset); struct thread_info *ti; memset(p, 0, KERNEL_STACK_SIZE); ti = task_thread_info(p); ti->flags = _TIF_MCA_INIT; ti->preempt_count = 1; ti->task = p; ti->cpu = cpu; p->stack = ti; p->__state = TASK_UNINTERRUPTIBLE; cpumask_set_cpu(cpu, &p->cpus_mask); INIT_LIST_HEAD(&p->tasks); p->parent = p->real_parent = p->group_leader = p; INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); strscpy(p->comm, type, sizeof(p->comm)-1); } /* Caller prevents this from being called after init */ static void * __ref mca_bootmem(void) { return memblock_alloc(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE); } /* Do per-CPU MCA-related initialization. */ void ia64_mca_cpu_init(void *cpu_data) { void *pal_vaddr; void *data; long sz = sizeof(struct ia64_mca_cpu); int cpu = smp_processor_id(); static int first_time = 1; /* * Structure will already be allocated if cpu has been online, * then offlined. */ if (__per_cpu_mca[cpu]) { data = __va(__per_cpu_mca[cpu]); } else { if (first_time) { data = mca_bootmem(); first_time = 0; } else data = (void *)__get_free_pages(GFP_ATOMIC, get_order(sz)); if (!data) panic("Could not allocate MCA memory for cpu %d\n", cpu); } format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack), "MCA", cpu); format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack), "INIT", cpu); __this_cpu_write(ia64_mca_data, (__per_cpu_mca[cpu] = __pa(data))); /* * Stash away a copy of the PTE needed to map the per-CPU page. * We may need it during MCA recovery. */ __this_cpu_write(ia64_mca_per_cpu_pte, pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL))); /* * Also, stash away a copy of the PAL address and the PTE * needed to map it. */ pal_vaddr = efi_get_pal_addr(); if (!pal_vaddr) return; __this_cpu_write(ia64_mca_pal_base, GRANULEROUNDDOWN((unsigned long) pal_vaddr)); __this_cpu_write(ia64_mca_pal_pte, pte_val(mk_pte_phys(__pa(pal_vaddr), PAGE_KERNEL))); } static int ia64_mca_cpu_online(unsigned int cpu) { unsigned long flags; local_irq_save(flags); if (!cmc_polling_enabled) ia64_mca_cmc_vector_enable(NULL); local_irq_restore(flags); return 0; } /* * ia64_mca_init * * Do all the system level mca specific initialization. * * 1. Register spinloop and wakeup request interrupt vectors * * 2. Register OS_MCA handler entry point * * 3. Register OS_INIT handler entry point * * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS. * * Note that this initialization is done very early before some kernel * services are available. * * Inputs : None * * Outputs : None */ void __init ia64_mca_init(void) { ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch; ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave; ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; int i; long rc; struct ia64_sal_retval isrv; unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ static struct notifier_block default_init_monarch_nb = { .notifier_call = default_monarch_init_process, .priority = 0/* we need to notified last */ }; IA64_MCA_DEBUG("%s: begin\n", __func__); /* Clear the Rendez checkin flag for all cpus */ for(i = 0 ; i < NR_CPUS; i++) ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; /* * Register the rendezvous spinloop and wakeup mechanism with SAL */ /* Register the rendezvous interrupt vector with SAL */ while (1) { isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT, SAL_MC_PARAM_MECHANISM_INT, IA64_MCA_RENDEZ_VECTOR, timeout, SAL_MC_PARAM_RZ_ALWAYS); rc = isrv.status; if (rc == 0) break; if (rc == -2) { printk(KERN_INFO "Increasing MCA rendezvous timeout from " "%ld to %ld milliseconds\n", timeout, isrv.v0); timeout = isrv.v0; NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0); continue; } printk(KERN_ERR "Failed to register rendezvous interrupt " "with SAL (status %ld)\n", rc); return; } /* Register the wakeup interrupt vector with SAL */ isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP, SAL_MC_PARAM_MECHANISM_INT, IA64_MCA_WAKEUP_VECTOR, 0, 0); rc = isrv.status; if (rc) { printk(KERN_ERR "Failed to register wakeup interrupt with SAL " "(status %ld)\n", rc); return; } IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__); ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); /* * XXX - disable SAL checksum by setting size to 0; should be * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch); */ ia64_mc_info.imi_mca_handler_size = 0; /* Register the os mca handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp), ia64_mc_info.imi_mca_handler_size, 0, 0, 0))) { printk(KERN_ERR "Failed to register OS MCA handler with SAL " "(status %ld)\n", rc); return; } IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__, ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); /* * XXX - disable SAL checksum by setting size to 0, should be * size of the actual init handler in mca_asm.S. */ ia64_mc_info.imi_monarch_init_handler = ia64_tpa(init_hldlr_ptr_monarch->fp); ia64_mc_info.imi_monarch_init_handler_size = 0; ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); ia64_mc_info.imi_slave_init_handler_size = 0; IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__, ia64_mc_info.imi_monarch_init_handler); /* Register the os init handler with SAL */ if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, ia64_mc_info.imi_monarch_init_handler, ia64_tpa(ia64_getreg(_IA64_REG_GP)), ia64_mc_info.imi_monarch_init_handler_size, ia64_mc_info.imi_slave_init_handler, ia64_tpa(ia64_getreg(_IA64_REG_GP)), ia64_mc_info.imi_slave_init_handler_size))) { printk(KERN_ERR "Failed to register m/s INIT handlers with SAL " "(status %ld)\n", rc); return; } if (register_die_notifier(&default_init_monarch_nb)) { printk(KERN_ERR "Failed to register default monarch INIT process\n"); return; } IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__); /* Initialize the areas set aside by the OS to buffer the * platform/processor error states for MCA/INIT/CMC * handling. */ ia64_log_init(SAL_INFO_TYPE_MCA); ia64_log_init(SAL_INFO_TYPE_INIT); ia64_log_init(SAL_INFO_TYPE_CMC); ia64_log_init(SAL_INFO_TYPE_CPE); mca_init = 1; printk(KERN_INFO "MCA related initialization done\n"); } /* * These pieces cannot be done in ia64_mca_init() because it is called before * early_irq_init() which would wipe out our percpu irq registrations. But we * cannot leave them until ia64_mca_late_init() because by then all the other * processors have been brought online and have set their own CMC vectors to * point at a non-existant action. Called from arch_early_irq_init(). */ void __init ia64_mca_irq_init(void) { /* * Configure the CMCI/P vector and handler. Interrupts for CMC are * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). */ register_percpu_irq(IA64_CMC_VECTOR, ia64_mca_cmc_int_handler, 0, "cmc_hndlr"); register_percpu_irq(IA64_CMCP_VECTOR, ia64_mca_cmc_int_caller, 0, "cmc_poll"); ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ /* Setup the MCA rendezvous interrupt vector */ register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, ia64_mca_rendez_int_handler, 0, "mca_rdzv"); /* Setup the MCA wakeup interrupt vector */ register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, ia64_mca_wakeup_int_handler, 0, "mca_wkup"); /* Setup the CPEI/P handler */ register_percpu_irq(IA64_CPEP_VECTOR, ia64_mca_cpe_int_caller, 0, "cpe_poll"); } /* * ia64_mca_late_init * * Opportunity to setup things that require initialization later * than ia64_mca_init. Setup a timer to poll for CPEs if the * platform doesn't support an interrupt driven mechanism. * * Inputs : None * Outputs : Status */ static int __init ia64_mca_late_init(void) { if (!mca_init) return 0; /* Setup the CMCI/P vector and handler */ timer_setup(&cmc_poll_timer, ia64_mca_cmc_poll, 0); /* Unmask/enable the vector */ cmc_polling_enabled = 0; cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/mca:online", ia64_mca_cpu_online, NULL); IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__); /* Setup the CPEI/P vector and handler */ cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI); timer_setup(&cpe_poll_timer, ia64_mca_cpe_poll, 0); { unsigned int irq; if (cpe_vector >= 0) { /* If platform supports CPEI, enable the irq. */ irq = local_vector_to_irq(cpe_vector); if (irq > 0) { cpe_poll_enabled = 0; irq_set_status_flags(irq, IRQ_PER_CPU); if (request_irq(irq, ia64_mca_cpe_int_handler, 0, "cpe_hndlr", NULL)) pr_err("Failed to register cpe_hndlr interrupt\n"); ia64_cpe_irq = irq; ia64_mca_register_cpev(cpe_vector); IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __func__); return 0; } printk(KERN_ERR "%s: Failed to find irq for CPE " "interrupt handler, vector %d\n", __func__, cpe_vector); } /* If platform doesn't support CPEI, get the timer going. */ if (cpe_poll_enabled) { ia64_mca_cpe_poll(0UL); IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__); } } return 0; } device_initcall(ia64_mca_late_init);
linux-master
arch/ia64/kernel/mca.c
// SPDX-License-Identifier: GPL-2.0 /* * Dynamic DMA mapping support. */ #include <linux/types.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/dmar.h> #include <asm/iommu.h> #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <asm/page.h> int no_iommu __read_mostly; #ifdef CONFIG_IOMMU_DEBUG int force_iommu __read_mostly = 1; #else int force_iommu __read_mostly; #endif static int __init pci_iommu_init(void) { if (iommu_detected) intel_iommu_init(); return 0; } /* Must execute after PCI subsystem */ fs_initcall(pci_iommu_init);
linux-master
arch/ia64/kernel/pci-dma.c
// SPDX-License-Identifier: GPL-2.0 /* * Architecture-specific setup. * * Copyright (C) 1998-2003 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> * 04/11/17 Ashok Raj <[email protected]> Added CPU Hotplug Support * * 2005-10-07 Keith Owens <[email protected]> * Add notify_die() hooks. */ #include <linux/cpu.h> #include <linux/pm.h> #include <linux/elf.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/personality.h> #include <linux/reboot.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/hotplug.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/stddef.h> #include <linux/thread_info.h> #include <linux/unistd.h> #include <linux/efi.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/kdebug.h> #include <linux/utsname.h> #include <linux/resume_user_mode.h> #include <linux/rcupdate.h> #include <asm/cpu.h> #include <asm/delay.h> #include <asm/elf.h> #include <asm/irq.h> #include <asm/kexec.h> #include <asm/processor.h> #include <asm/sal.h> #include <asm/switch_to.h> #include <asm/tlbflush.h> #include <linux/uaccess.h> #include <asm/unwind.h> #include <asm/user.h> #include <asm/xtp.h> #include "entry.h" #include "sigframe.h" void (*ia64_mark_idle)(int); unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; EXPORT_SYMBOL(boot_option_idle_override); void (*pm_power_off) (void); EXPORT_SYMBOL(pm_power_off); static void ia64_do_show_stack (struct unw_frame_info *info, void *arg) { unsigned long ip, sp, bsp; const char *loglvl = arg; printk("%s\nCall Trace:\n", loglvl); do { unw_get_ip(info, &ip); if (ip == 0) break; unw_get_sp(info, &sp); unw_get_bsp(info, &bsp); printk("%s [<%016lx>] %pS\n" " sp=%016lx bsp=%016lx\n", loglvl, ip, (void *)ip, sp, bsp); } while (unw_unwind(info) >= 0); } void show_stack (struct task_struct *task, unsigned long *sp, const char *loglvl) { if (!task) unw_init_running(ia64_do_show_stack, (void *)loglvl); else { struct unw_frame_info info; unw_init_from_blocked_task(&info, task); ia64_do_show_stack(&info, (void *)loglvl); } } void show_regs (struct pt_regs *regs) { unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; print_modules(); printk("\n"); show_regs_print_info(KERN_DEFAULT); printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n", regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(), init_utsname()->release); printk("ip is at %pS\n", (void *)ip); printk("unat: %016lx pfs : %016lx rsc : %016lx\n", regs->ar_unat, regs->ar_pfs, regs->ar_rsc); printk("rnat: %016lx bsps: %016lx pr : %016lx\n", regs->ar_rnat, regs->ar_bspstore, regs->pr); printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n", regs->loadrs, regs->ar_ccv, regs->ar_fpsr); printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd); printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7); printk("f6 : %05lx%016lx f7 : %05lx%016lx\n", regs->f6.u.bits[1], regs->f6.u.bits[0], regs->f7.u.bits[1], regs->f7.u.bits[0]); printk("f8 : %05lx%016lx f9 : %05lx%016lx\n", regs->f8.u.bits[1], regs->f8.u.bits[0], regs->f9.u.bits[1], regs->f9.u.bits[0]); printk("f10 : %05lx%016lx f11 : %05lx%016lx\n", regs->f10.u.bits[1], regs->f10.u.bits[0], regs->f11.u.bits[1], regs->f11.u.bits[0]); printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3); printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10); printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13); printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16); printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19); printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22); printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25); printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28); printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31); if (user_mode(regs)) { /* print the stacked registers */ unsigned long val, *bsp, ndirty; int i, sof, is_nat = 0; sof = regs->cr_ifs & 0x7f; /* size of frame */ ndirty = (regs->loadrs >> 19); bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty); for (i = 0; i < sof; ++i) { get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i)); printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val, ((i == sof - 1) || (i % 3) == 2) ? "\n" : " "); } } else show_stack(NULL, NULL, KERN_DEFAULT); } /* local support for deprecated console_print */ void console_print(const char *s) { printk(KERN_EMERG "%s", s); } void do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall) { if (fsys_mode(current, &scr->pt)) { /* * defer signal-handling etc. until we return to * privilege-level 0. */ if (!ia64_psr(&scr->pt)->lp) ia64_psr(&scr->pt)->lp = 1; return; } /* deal with pending signal delivery */ if (test_thread_flag(TIF_SIGPENDING) || test_thread_flag(TIF_NOTIFY_SIGNAL)) { local_irq_enable(); /* force interrupt enable */ ia64_do_signal(scr, in_syscall); } if (test_thread_flag(TIF_NOTIFY_RESUME)) { local_irq_enable(); /* force interrupt enable */ resume_user_mode_work(&scr->pt); } /* copy user rbs to kernel rbs */ if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) { local_irq_enable(); /* force interrupt enable */ ia64_sync_krbs(); } local_irq_disable(); /* force interrupt disable */ } static int __init nohalt_setup(char * str) { cpu_idle_poll_ctrl(true); return 1; } __setup("nohalt", nohalt_setup); #ifdef CONFIG_HOTPLUG_CPU /* We don't actually take CPU down, just spin without interrupts. */ static inline void __noreturn play_dead(void) { unsigned int this_cpu = smp_processor_id(); /* Ack it */ __this_cpu_write(cpu_state, CPU_DEAD); max_xtp(); local_irq_disable(); idle_task_exit(); ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); /* * The above is a point of no-return, the processor is * expected to be in SAL loop now. */ BUG(); } #else static inline void __noreturn play_dead(void) { BUG(); } #endif /* CONFIG_HOTPLUG_CPU */ void __noreturn arch_cpu_idle_dead(void) { play_dead(); } void arch_cpu_idle(void) { void (*mark_idle)(int) = ia64_mark_idle; #ifdef CONFIG_SMP min_xtp(); #endif rmb(); if (mark_idle) (*mark_idle)(1); raw_safe_halt(); raw_local_irq_disable(); if (mark_idle) (*mark_idle)(0); #ifdef CONFIG_SMP normal_xtp(); #endif } void ia64_save_extra (struct task_struct *task) { if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) ia64_save_debug_regs(&task->thread.dbr[0]); } void ia64_load_extra (struct task_struct *task) { if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) ia64_load_debug_regs(&task->thread.dbr[0]); } /* * Copy the state of an ia-64 thread. * * We get here through the following call chain: * * from user-level: from kernel: * * <clone syscall> <some kernel call frames> * sys_clone : * kernel_clone kernel_clone * copy_thread copy_thread * * This means that the stack layout is as follows: * * +---------------------+ (highest addr) * | struct pt_regs | * +---------------------+ * | struct switch_stack | * +---------------------+ * | | * | memory stack | * | | <-- sp (lowest addr) * +---------------------+ * * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register, * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since * the stack is page aligned and the page size is at least 4KB, this is always the case, * so there is nothing to worry about. */ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { unsigned long clone_flags = args->flags; unsigned long user_stack_base = args->stack; unsigned long user_stack_size = args->stack_size; unsigned long tls = args->tls; extern char ia64_ret_from_clone; struct switch_stack *child_stack, *stack; unsigned long rbs, child_rbs, rbs_size; struct pt_regs *child_ptregs; struct pt_regs *regs = current_pt_regs(); int retval = 0; child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1; child_stack = (struct switch_stack *) child_ptregs - 1; rbs = (unsigned long) current + IA64_RBS_OFFSET; child_rbs = (unsigned long) p + IA64_RBS_OFFSET; /* copy parts of thread_struct: */ p->thread.ksp = (unsigned long) child_stack - 16; /* * NOTE: The calling convention considers all floating point * registers in the high partition (fph) to be scratch. Since * the only way to get to this point is through a system call, * we know that the values in fph are all dead. Hence, there * is no need to inherit the fph state from the parent to the * child and all we have to do is to make sure that * IA64_THREAD_FPH_VALID is cleared in the child. * * XXX We could push this optimization a bit further by * clearing IA64_THREAD_FPH_VALID on ANY system call. * However, it's not clear this is worth doing. Also, it * would be a slight deviation from the normal Linux system * call behavior where scratch registers are preserved across * system calls (unless used by the system call itself). */ # define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \ | IA64_THREAD_PM_VALID) # define THREAD_FLAGS_TO_SET 0 p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) | THREAD_FLAGS_TO_SET); ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ if (unlikely(args->fn)) { if (unlikely(args->idle)) { /* fork_idle() called us */ return 0; } memset(child_stack, 0, sizeof(*child_ptregs) + sizeof(*child_stack)); child_stack->r4 = (unsigned long) args->fn; child_stack->r5 = (unsigned long) args->fn_arg; /* * Preserve PSR bits, except for bits 32-34 and 37-45, * which we can't read. */ child_ptregs->cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN; /* mark as valid, empty frame */ child_ptregs->cr_ifs = 1UL << 63; child_stack->ar_fpsr = child_ptregs->ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR); child_stack->pr = (1 << PRED_KERNEL_STACK); child_stack->ar_bspstore = child_rbs; child_stack->b0 = (unsigned long) &ia64_ret_from_clone; /* stop some PSR bits from being inherited. * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() * therefore we must specify them explicitly here and not include them in * IA64_PSR_BITS_TO_CLEAR. */ child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); return 0; } stack = ((struct switch_stack *) regs) - 1; /* copy parent's switch_stack & pt_regs to child: */ memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack)); /* copy the parent's register backing store to the child: */ rbs_size = stack->ar_bspstore - rbs; memcpy((void *) child_rbs, (void *) rbs, rbs_size); if (clone_flags & CLONE_SETTLS) child_ptregs->r13 = tls; if (user_stack_base) { child_ptregs->r12 = user_stack_base + user_stack_size - 16; child_ptregs->ar_bspstore = user_stack_base; child_ptregs->ar_rnat = 0; child_ptregs->loadrs = 0; } child_stack->ar_bspstore = child_rbs + rbs_size; child_stack->b0 = (unsigned long) &ia64_ret_from_clone; /* stop some PSR bits from being inherited. * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() * therefore we must specify them explicitly here and not include them in * IA64_PSR_BITS_TO_CLEAR. */ child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); return retval; } asmlinkage long ia64_clone(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, unsigned long parent_tidptr, unsigned long child_tidptr, unsigned long tls) { struct kernel_clone_args args = { .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), .pidfd = (int __user *)parent_tidptr, .child_tid = (int __user *)child_tidptr, .parent_tid = (int __user *)parent_tidptr, .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), .stack = stack_start, .stack_size = stack_size, .tls = tls, }; return kernel_clone(&args); } static void do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg) { unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm; unsigned long ip; elf_greg_t *dst = arg; struct pt_regs *pt; char nat; int i; memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */ if (unw_unwind_to_user(info) < 0) return; unw_get_sp(info, &sp); pt = (struct pt_regs *) (sp + 16); urbs_end = ia64_get_user_rbs_end(task, pt, &cfm); if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0) return; ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end), &ar_rnat); /* * coredump format: * r0-r31 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) * predicate registers (p0-p63) * b0-b7 * ip cfm user-mask * ar.rsc ar.bsp ar.bspstore ar.rnat * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec */ /* r0 is zero */ for (i = 1, mask = (1UL << i); i < 32; ++i) { unw_get_gr(info, i, &dst[i], &nat); if (nat) nat_bits |= mask; mask <<= 1; } dst[32] = nat_bits; unw_get_pr(info, &dst[33]); for (i = 0; i < 8; ++i) unw_get_br(info, i, &dst[34 + i]); unw_get_rp(info, &ip); dst[42] = ip + ia64_psr(pt)->ri; dst[43] = cfm; dst[44] = pt->cr_ipsr & IA64_PSR_UM; unw_get_ar(info, UNW_AR_RSC, &dst[45]); /* * For bsp and bspstore, unw_get_ar() would return the kernel * addresses, but we need the user-level addresses instead: */ dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */ dst[47] = pt->ar_bspstore; dst[48] = ar_rnat; unw_get_ar(info, UNW_AR_CCV, &dst[49]); unw_get_ar(info, UNW_AR_UNAT, &dst[50]); unw_get_ar(info, UNW_AR_FPSR, &dst[51]); dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */ unw_get_ar(info, UNW_AR_LC, &dst[53]); unw_get_ar(info, UNW_AR_EC, &dst[54]); unw_get_ar(info, UNW_AR_CSD, &dst[55]); unw_get_ar(info, UNW_AR_SSD, &dst[56]); } static void do_copy_regs (struct unw_frame_info *info, void *arg) { do_copy_task_regs(current, info, arg); } void ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) { unw_init_running(do_copy_regs, dst); } /* * Flush thread state. This is called when a thread does an execve(). */ void flush_thread (void) { /* drop floating-point and debug-register state if it exists: */ current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); ia64_drop_fpu(current); } /* * Clean up state associated with a thread. This is called when * the thread calls exit(). */ void exit_thread (struct task_struct *tsk) { ia64_drop_fpu(tsk); } unsigned long __get_wchan (struct task_struct *p) { struct unw_frame_info info; unsigned long ip; int count = 0; /* * Note: p may not be a blocked task (it could be current or * another process running on some other CPU. Rather than * trying to determine if p is really blocked, we just assume * it's blocked and rely on the unwind routines to fail * gracefully if the process wasn't really blocked after all. * --davidm 99/12/15 */ unw_init_from_blocked_task(&info, p); do { if (task_is_running(p)) return 0; if (unw_unwind(&info) < 0) return 0; unw_get_ip(&info, &ip); if (!in_sched_functions(ip)) return ip; } while (count++ < 16); return 0; } void cpu_halt (void) { pal_power_mgmt_info_u_t power_info[8]; unsigned long min_power; int i, min_power_state; if (ia64_pal_halt_info(power_info) != 0) return; min_power_state = 0; min_power = power_info[0].pal_power_mgmt_info_s.power_consumption; for (i = 1; i < 8; ++i) if (power_info[i].pal_power_mgmt_info_s.im && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) { min_power = power_info[i].pal_power_mgmt_info_s.power_consumption; min_power_state = i; } while (1) ia64_pal_halt(min_power_state); } void machine_shutdown(void) { smp_shutdown_nonboot_cpus(reboot_cpu); #ifdef CONFIG_KEXEC kexec_disable_iosapic(); #endif } void machine_restart (char *restart_cmd) { (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0); efi_reboot(REBOOT_WARM, NULL); } void machine_halt (void) { (void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0); cpu_halt(); } void machine_power_off (void) { do_kernel_power_off(); machine_halt(); } EXPORT_SYMBOL(ia64_delay_loop);
linux-master
arch/ia64/kernel/process.c
// SPDX-License-Identifier: GPL-2.0-only /* * SMP boot-related support * * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> * Copyright (C) 2001, 2004-2005 Intel Corp * Rohit Seth <[email protected]> * Suresh Siddha <[email protected]> * Gordon Jin <[email protected]> * Ashok Raj <[email protected]> * * 01/05/16 Rohit Seth <[email protected]> Moved SMP booting functions from smp.c to here. * 01/04/27 David Mosberger <[email protected]> Added ITC synching code. * 02/07/31 David Mosberger <[email protected]> Switch over to hotplug-CPU boot-sequence. * smp_boot_cpus()/smp_commence() is replaced by * smp_prepare_cpus()/__cpu_up()/smp_cpus_done(). * 04/06/21 Ashok Raj <[email protected]> Added CPU Hotplug Support * 04/12/26 Jin Gordon <[email protected]> * 04/12/26 Rohit Seth <[email protected]> * Add multi-threading and multi-core detection * 05/01/30 Suresh Siddha <[email protected]> * Setup cpu_sibling_map and cpu_core_map */ #include <linux/module.h> #include <linux/acpi.h> #include <linux/memblock.h> #include <linux/cpu.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/notifier.h> #include <linux/smp.h> #include <linux/spinlock.h> #include <linux/efi.h> #include <linux/percpu.h> #include <linux/bitops.h> #include <linux/atomic.h> #include <asm/cache.h> #include <asm/current.h> #include <asm/delay.h> #include <asm/efi.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/mca.h> #include <asm/page.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/sal.h> #include <asm/tlbflush.h> #include <asm/unistd.h> #define SMP_DEBUG 0 #if SMP_DEBUG #define Dprintk(x...) printk(x) #else #define Dprintk(x...) #endif #ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_PERMIT_BSP_REMOVE #define bsp_remove_ok 1 #else #define bsp_remove_ok 0 #endif /* * Global array allocated for NR_CPUS at boot time */ struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; /* * start_ap in head.S uses this to store current booting cpu * info. */ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]); #else #define set_brendez_area(x) #endif /* * ITC synchronization related stuff: */ #define MASTER (0) #define SLAVE (SMP_CACHE_BYTES/8) #define NUM_ROUNDS 64 /* magic value */ #define NUM_ITERS 5 /* likewise */ static DEFINE_SPINLOCK(itc_sync_lock); static volatile unsigned long go[SLAVE + 1]; #define DEBUG_ITC_SYNC 0 extern void start_ap (void); extern unsigned long ia64_iobase; struct task_struct *task_for_booting_cpu; /* * State for each CPU */ DEFINE_PER_CPU(int, cpu_state); cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; EXPORT_SYMBOL(cpu_core_map); DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); int smp_num_siblings = 1; /* which logical CPU number maps to which CPU (physical APIC ID) */ volatile int ia64_cpu_to_sapicid[NR_CPUS]; EXPORT_SYMBOL(ia64_cpu_to_sapicid); static cpumask_t cpu_callin_map; struct smp_boot_data smp_boot_data __initdata; unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */ char __initdata no_int_routing; unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ #ifdef CONFIG_FORCE_CPEI_RETARGET #define CPEI_OVERRIDE_DEFAULT (1) #else #define CPEI_OVERRIDE_DEFAULT (0) #endif unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT; static int __init cmdl_force_cpei(char *str) { int value=0; get_option (&str, &value); force_cpei_retarget = value; return 1; } __setup("force_cpei=", cmdl_force_cpei); static int __init nointroute (char *str) { no_int_routing = 1; printk ("no_int_routing on\n"); return 1; } __setup("nointroute", nointroute); static void fix_b0_for_bsp(void) { #ifdef CONFIG_HOTPLUG_CPU int cpuid; static int fix_bsp_b0 = 1; cpuid = smp_processor_id(); /* * Cache the b0 value on the first AP that comes up */ if (!(fix_bsp_b0 && cpuid)) return; sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0]; printk ("Fixed BSP b0 value from CPU %d\n", cpuid); fix_bsp_b0 = 0; #endif } void sync_master (void *arg) { unsigned long flags, i; go[MASTER] = 0; local_irq_save(flags); { for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) { while (!go[MASTER]) cpu_relax(); go[MASTER] = 0; go[SLAVE] = ia64_get_itc(); } } local_irq_restore(flags); } /* * Return the number of cycles by which our itc differs from the itc on the master * (time-keeper) CPU. A positive number indicates our itc is ahead of the master, * negative that it is behind. */ static inline long get_delta (long *rt, long *master) { unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; unsigned long tcenter, t0, t1, tm; long i; for (i = 0; i < NUM_ITERS; ++i) { t0 = ia64_get_itc(); go[MASTER] = 1; while (!(tm = go[SLAVE])) cpu_relax(); go[SLAVE] = 0; t1 = ia64_get_itc(); if (t1 - t0 < best_t1 - best_t0) best_t0 = t0, best_t1 = t1, best_tm = tm; } *rt = best_t1 - best_t0; *master = best_tm - best_t0; /* average best_t0 and best_t1 without overflow: */ tcenter = (best_t0/2 + best_t1/2); if (best_t0 % 2 + best_t1 % 2 == 2) ++tcenter; return tcenter - best_tm; } /* * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of * unaccounted-for errors (such as getting a machine check in the middle of a calibration * step). The basic idea is for the slave to ask the master what itc value it has and to * read its own itc before and after the master responds. Each iteration gives us three * timestamps: * * slave master * * t0 ---\ * ---\ * ---> * tm * /--- * /--- * t1 <--- * * * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0 * and t1. If we achieve this, the clocks are synchronized provided the interconnect * between the slave and the master is symmetric. Even if the interconnect were * asymmetric, we would still know that the synchronization error is smaller than the * roundtrip latency (t0 - t1). * * When the interconnect is quiet and symmetric, this lets us synchronize the itc to * within one or two cycles. However, we can only *guarantee* that the synchronization is * accurate to within a round-trip time, which is typically in the range of several * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better * than half a micro second or so. */ void ia64_sync_itc (unsigned int master) { long i, delta, adj, adjust_latency = 0, done = 0; unsigned long flags, rt, master_time_stamp, bound; #if DEBUG_ITC_SYNC struct { long rt; /* roundtrip time */ long master; /* master's timestamp */ long diff; /* difference between midpoint and master's timestamp */ long lat; /* estimate of itc adjustment latency */ } t[NUM_ROUNDS]; #endif /* * Make sure local timer ticks are disabled while we sync. If * they were enabled, we'd have to worry about nasty issues * like setting the ITC ahead of (or a long time before) the * next scheduled tick. */ BUG_ON((ia64_get_itv() & (1 << 16)) == 0); go[MASTER] = 1; if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); return; } while (go[MASTER]) cpu_relax(); /* wait for master to be ready */ spin_lock_irqsave(&itc_sync_lock, flags); { for (i = 0; i < NUM_ROUNDS; ++i) { delta = get_delta(&rt, &master_time_stamp); if (delta == 0) { done = 1; /* let's lock on to this... */ bound = rt; } if (!done) { if (i > 0) { adjust_latency += -delta; adj = -delta + adjust_latency/4; } else adj = -delta; ia64_set_itc(ia64_get_itc() + adj); } #if DEBUG_ITC_SYNC t[i].rt = rt; t[i].master = master_time_stamp; t[i].diff = delta; t[i].lat = adjust_latency/4; #endif } } spin_unlock_irqrestore(&itc_sync_lock, flags); #if DEBUG_ITC_SYNC for (i = 0; i < NUM_ROUNDS; ++i) printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", t[i].rt, t[i].master, t[i].diff, t[i].lat); #endif printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, " "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt); } /* * Ideally sets up per-cpu profiling hooks. Doesn't do much now... */ static inline void smp_setup_percpu_timer(void) { } static void smp_callin (void) { int cpuid, phys_id, itc_master; struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo; extern void ia64_init_itm(void); extern volatile int time_keeper_id; cpuid = smp_processor_id(); phys_id = hard_smp_processor_id(); itc_master = time_keeper_id; if (cpu_online(cpuid)) { printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", phys_id, cpuid); BUG(); } fix_b0_for_bsp(); /* * numa_node_id() works after this. */ set_numa_node(cpu_to_node_map[cpuid]); set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); spin_lock(&vector_lock); /* Setup the per cpu irq handling data structures */ __setup_vector_irq(cpuid); notify_cpu_starting(cpuid); set_cpu_online(cpuid, true); per_cpu(cpu_state, cpuid) = CPU_ONLINE; spin_unlock(&vector_lock); smp_setup_percpu_timer(); ia64_mca_cmc_vector_setup(); /* Setup vector on AP */ local_irq_enable(); if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { /* * Synchronize the ITC with the BP. Need to do this after irqs are * enabled because ia64_sync_itc() calls smp_call_function_single(), which * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls * local_bh_enable(), which bugs out if irqs are not enabled... */ Dprintk("Going to syncup ITC with ITC Master.\n"); ia64_sync_itc(itc_master); } /* * Get our bogomips. */ ia64_init_itm(); /* * Delay calibration can be skipped if new processor is identical to the * previous processor. */ last_cpuinfo = cpu_data(cpuid - 1); this_cpuinfo = local_cpu_data; if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq || last_cpuinfo->proc_freq != this_cpuinfo->proc_freq || last_cpuinfo->features != this_cpuinfo->features || last_cpuinfo->revision != this_cpuinfo->revision || last_cpuinfo->family != this_cpuinfo->family || last_cpuinfo->archrev != this_cpuinfo->archrev || last_cpuinfo->model != this_cpuinfo->model) calibrate_delay(); local_cpu_data->loops_per_jiffy = loops_per_jiffy; /* * Allow the master to continue. */ cpumask_set_cpu(cpuid, &cpu_callin_map); Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); } /* * Activate a secondary processor. head.S calls this. */ int start_secondary (void *unused) { /* Early console may use I/O ports */ ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); #ifndef CONFIG_PRINTK_TIME Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); #endif efi_map_pal_code(); cpu_init(); smp_callin(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); return 0; } static int do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) { int timeout; task_for_booting_cpu = idle; Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); set_brendez_area(cpu); ia64_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); /* * Wait 10s total for the AP to start */ Dprintk("Waiting on callin_map ..."); for (timeout = 0; timeout < 100000; timeout++) { if (cpumask_test_cpu(cpu, &cpu_callin_map)) break; /* It has booted */ barrier(); /* Make sure we re-read cpu_callin_map */ udelay(100); } Dprintk("\n"); if (!cpumask_test_cpu(cpu, &cpu_callin_map)) { printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); ia64_cpu_to_sapicid[cpu] = -1; set_cpu_online(cpu, false); /* was set in smp_callin() */ return -EINVAL; } return 0; } static int __init decay (char *str) { int ticks; get_option (&str, &ticks); return 1; } __setup("decay=", decay); /* * Initialize the logical CPU number to SAPICID mapping */ void __init smp_build_cpu_map (void) { int sapicid, cpu, i; int boot_cpu_id = hard_smp_processor_id(); for (cpu = 0; cpu < NR_CPUS; cpu++) { ia64_cpu_to_sapicid[cpu] = -1; } ia64_cpu_to_sapicid[0] = boot_cpu_id; init_cpu_present(cpumask_of(0)); set_cpu_possible(0, true); for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { sapicid = smp_boot_data.cpu_phys_id[i]; if (sapicid == boot_cpu_id) continue; set_cpu_present(cpu, true); set_cpu_possible(cpu, true); ia64_cpu_to_sapicid[cpu] = sapicid; cpu++; } } /* * Cycle through the APs sending Wakeup IPIs to boot each. */ void __init smp_prepare_cpus (unsigned int max_cpus) { int boot_cpu_id = hard_smp_processor_id(); /* * Initialize the per-CPU profiling counter/multiplier */ smp_setup_percpu_timer(); cpumask_set_cpu(0, &cpu_callin_map); local_cpu_data->loops_per_jiffy = loops_per_jiffy; ia64_cpu_to_sapicid[0] = boot_cpu_id; printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id); current_thread_info()->cpu = 0; /* * If SMP should be disabled, then really disable it! */ if (!max_cpus) { printk(KERN_INFO "SMP mode deactivated.\n"); init_cpu_online(cpumask_of(0)); init_cpu_present(cpumask_of(0)); init_cpu_possible(cpumask_of(0)); return; } } void smp_prepare_boot_cpu(void) { set_cpu_online(smp_processor_id(), true); cpumask_set_cpu(smp_processor_id(), &cpu_callin_map); set_numa_node(cpu_to_node_map[smp_processor_id()]); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; } #ifdef CONFIG_HOTPLUG_CPU static inline void clear_cpu_sibling_map(int cpu) { int i; for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); for_each_cpu(i, &cpu_core_map[cpu]) cpumask_clear_cpu(cpu, &cpu_core_map[i]); per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; } static void remove_siblinginfo(int cpu) { if (cpu_data(cpu)->threads_per_core == 1 && cpu_data(cpu)->cores_per_socket == 1) { cpumask_clear_cpu(cpu, &cpu_core_map[cpu]); cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); return; } /* remove it from all sibling map's */ clear_cpu_sibling_map(cpu); } extern void fixup_irqs(void); int migrate_platform_irqs(unsigned int cpu) { int new_cpei_cpu; struct irq_data *data = NULL; const struct cpumask *mask; int retval = 0; /* * dont permit CPEI target to removed. */ if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) { printk ("CPU (%d) is CPEI Target\n", cpu); if (can_cpei_retarget()) { /* * Now re-target the CPEI to a different processor */ new_cpei_cpu = cpumask_any(cpu_online_mask); mask = cpumask_of(new_cpei_cpu); set_cpei_target_cpu(new_cpei_cpu); data = irq_get_irq_data(ia64_cpe_irq); /* * Switch for now, immediately, we need to do fake intr * as other interrupts, but need to study CPEI behaviour with * polling before making changes. */ if (data && data->chip) { data->chip->irq_disable(data); data->chip->irq_set_affinity(data, mask, false); data->chip->irq_enable(data); printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu); } } if (!data) { printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); retval = -EBUSY; } } return retval; } /* must be called with cpucontrol mutex held */ int __cpu_disable(void) { int cpu = smp_processor_id(); /* * dont permit boot processor for now */ if (cpu == 0 && !bsp_remove_ok) { printk ("Your platform does not support removal of BSP\n"); return (-EBUSY); } set_cpu_online(cpu, false); if (migrate_platform_irqs(cpu)) { set_cpu_online(cpu, true); return -EBUSY; } remove_siblinginfo(cpu); fixup_irqs(); local_flush_tlb_all(); cpumask_clear_cpu(cpu, &cpu_callin_map); return 0; } void __cpu_die(unsigned int cpu) { unsigned int i; for (i = 0; i < 100; i++) { /* They ack this in play_dead by setting CPU_DEAD */ if (per_cpu(cpu_state, cpu) == CPU_DEAD) { printk ("CPU %d is now offline\n", cpu); return; } msleep(100); } printk(KERN_ERR "CPU %u didn't die...\n", cpu); } #endif /* CONFIG_HOTPLUG_CPU */ void smp_cpus_done (unsigned int dummy) { int cpu; unsigned long bogosum = 0; /* * Allow the user to impress friends. */ for_each_online_cpu(cpu) { bogosum += cpu_data(cpu)->loops_per_jiffy; } printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); } static inline void set_cpu_sibling_map(int cpu) { int i; for_each_online_cpu(i) { if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { cpumask_set_cpu(i, &cpu_core_map[cpu]); cpumask_set_cpu(cpu, &cpu_core_map[i]); if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, cpu)); cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, i)); } } } } int __cpu_up(unsigned int cpu, struct task_struct *tidle) { int ret; int sapicid; sapicid = ia64_cpu_to_sapicid[cpu]; if (sapicid == -1) return -EINVAL; /* * Already booted cpu? not valid anymore since we dont * do idle loop tightspin anymore. */ if (cpumask_test_cpu(cpu, &cpu_callin_map)) return -EINVAL; per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* Processor goes to start_secondary(), sets online flag */ ret = do_boot_cpu(sapicid, cpu, tidle); if (ret < 0) return ret; if (cpu_data(cpu)->threads_per_core == 1 && cpu_data(cpu)->cores_per_socket == 1) { cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); cpumask_set_cpu(cpu, &cpu_core_map[cpu]); return 0; } set_cpu_sibling_map(cpu); return 0; } /* * Assume that CPUs have been discovered by some platform-dependent interface. For * SoftSDV/Lion, that would be ACPI. * * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP(). */ void __init init_smp_config(void) { struct fptr { unsigned long fp; unsigned long gp; } *ap_startup; long sal_ret; /* Tell SAL where to drop the APs. */ ap_startup = (struct fptr *) start_ap; sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0); if (sal_ret < 0) printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n", ia64_sal_strerror(sal_ret)); } /* * identify_siblings(cpu) gets called from identify_cpu. This populates the * information related to logical execution units in per_cpu_data structure. */ void identify_siblings(struct cpuinfo_ia64 *c) { long status; u16 pltid; pal_logical_to_physical_t info; status = ia64_pal_logical_to_phys(-1, &info); if (status != PAL_STATUS_SUCCESS) { if (status != PAL_STATUS_UNIMPLEMENTED) { printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", status); return; } info.overview_ppid = 0; info.overview_cpp = 1; info.overview_tpc = 1; } status = ia64_sal_physical_id_info(&pltid); if (status != PAL_STATUS_SUCCESS) { if (status != PAL_STATUS_UNIMPLEMENTED) printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status); return; } c->socket_id = (pltid << 8) | info.overview_ppid; if (info.overview_cpp == 1 && info.overview_tpc == 1) return; c->cores_per_socket = info.overview_cpp; c->threads_per_core = info.overview_tpc; c->num_log = info.overview_num_log; c->core_id = info.log1_cid; c->thread_id = info.log1_tid; } /* * returns non zero, if multi-threading is enabled * on at least one physical package. Due to hotplug cpu * and (maxcpus=), all threads may not necessarily be enabled * even though the processor supports multi-threading. */ int is_multithreading_enabled(void) { int i, j; for_each_present_cpu(i) { for_each_present_cpu(j) { if (j == i) continue; if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) { if (cpu_data(j)->core_id == cpu_data(i)->core_id) return 1; } } } return 0; } EXPORT_SYMBOL_GPL(is_multithreading_enabled);
linux-master
arch/ia64/kernel/smpboot.c
// SPDX-License-Identifier: GPL-2.0 /* * Instruction-patching support. * * Copyright (C) 2003 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> */ #include <linux/init.h> #include <linux/string.h> #include <asm/patch.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/unistd.h> /* * This was adapted from code written by Tony Luck: * * The 64-bit value in a "movl reg=value" is scattered between the two words of the bundle * like this: * * 6 6 5 4 3 2 1 * 3210987654321098765432109876543210987654321098765432109876543210 * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG * * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB */ static u64 get_imm64 (u64 insn_addr) { u64 *p = (u64 *) (insn_addr & -16); /* mask out slot number */ return ( (p[1] & 0x0800000000000000UL) << 4) | /*A*/ ((p[1] & 0x00000000007fffffUL) << 40) | /*B*/ ((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/ ((p[1] & 0x0000100000000000UL) >> 23) | /*D*/ ((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/ ((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/ ((p[1] & 0x000007f000000000UL) >> 36); /*G*/ } /* Patch instruction with "val" where "mask" has 1 bits. */ void ia64_patch (u64 insn_addr, u64 mask, u64 val) { u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16); # define insn_mask ((1UL << 41) - 1) unsigned long shift; b0 = b[0]; b1 = b[1]; shift = 5 + 41 * (insn_addr % 16); /* 5 bits of template, then 3 x 41-bit instructions */ if (shift >= 64) { m1 = mask << (shift - 64); v1 = val << (shift - 64); } else { m0 = mask << shift; m1 = mask >> (64 - shift); v0 = val << shift; v1 = val >> (64 - shift); b[0] = (b0 & ~m0) | (v0 & m0); } b[1] = (b1 & ~m1) | (v1 & m1); } void ia64_patch_imm64 (u64 insn_addr, u64 val) { /* The assembler may generate offset pointing to either slot 1 or slot 2 for a long (2-slot) instruction, occupying slots 1 and 2. */ insn_addr &= -16UL; ia64_patch(insn_addr + 2, 0x01fffefe000UL, ( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */ | ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */ | ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */ | ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */ | ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */)); ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22); } void ia64_patch_imm60 (u64 insn_addr, u64 val) { /* The assembler may generate offset pointing to either slot 1 or slot 2 for a long (2-slot) instruction, occupying slots 1 and 2. */ insn_addr &= -16UL; ia64_patch(insn_addr + 2, 0x011ffffe000UL, ( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */ | ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */)); ia64_patch(insn_addr + 1, 0x1fffffffffcUL, val >> 18); } /* * We need sometimes to load the physical address of a kernel * object. Often we can convert the virtual address to physical * at execution time, but sometimes (either for performance reasons * or during error recovery) we cannot to this. Patch the marked * bundles to load the physical address. */ void __init ia64_patch_vtop (unsigned long start, unsigned long end) { s32 *offp = (s32 *) start; u64 ip; while (offp < (s32 *) end) { ip = (u64) offp + *offp; /* replace virtual address with corresponding physical address: */ ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip))); ia64_fc((void *) ip); ++offp; } ia64_sync_i(); ia64_srlz_i(); } /* * Disable the RSE workaround by turning the conditional branch * that we tagged in each place the workaround was used into an * unconditional branch. */ void __init ia64_patch_rse (unsigned long start, unsigned long end) { s32 *offp = (s32 *) start; u64 ip, *b; while (offp < (s32 *) end) { ip = (u64) offp + *offp; b = (u64 *)(ip & -16); b[1] &= ~0xf800000L; ia64_fc((void *) ip); ++offp; } ia64_sync_i(); ia64_srlz_i(); } void __init ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) { static int first_time = 1; int need_workaround; s32 *offp = (s32 *) start; u64 *wp; need_workaround = (local_cpu_data->family == 0x1f && local_cpu_data->model == 0); if (first_time) { first_time = 0; if (need_workaround) printk(KERN_INFO "Leaving McKinley Errata 9 workaround enabled\n"); } if (need_workaround) return; while (offp < (s32 *) end) { wp = (u64 *) ia64_imva((char *) offp + *offp); wp[0] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */ wp[1] = 0x0084006880000200UL; wp[2] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */ wp[3] = 0x0004000000000200UL; ia64_fc(wp); ia64_fc(wp + 2); ++offp; } ia64_sync_i(); ia64_srlz_i(); } static void __init patch_fsyscall_table (unsigned long start, unsigned long end) { extern unsigned long fsyscall_table[NR_syscalls]; s32 *offp = (s32 *) start; u64 ip; while (offp < (s32 *) end) { ip = (u64) ia64_imva((char *) offp + *offp); ia64_patch_imm64(ip, (u64) fsyscall_table); ia64_fc((void *) ip); ++offp; } ia64_sync_i(); ia64_srlz_i(); } static void __init patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) { extern char fsys_bubble_down[]; s32 *offp = (s32 *) start; u64 ip; while (offp < (s32 *) end) { ip = (u64) offp + *offp; ia64_patch_imm60((u64) ia64_imva((void *) ip), (u64) (fsys_bubble_down - (ip & -16)) / 16); ia64_fc((void *) ip); ++offp; } ia64_sync_i(); ia64_srlz_i(); } void __init ia64_patch_gate (void) { # define START(name) ((unsigned long) __start_gate_##name##_patchlist) # define END(name) ((unsigned long)__end_gate_##name##_patchlist) patch_fsyscall_table(START(fsyscall), END(fsyscall)); patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down)); ia64_patch_vtop(START(vtop), END(vtop)); ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); } void ia64_patch_phys_stack_reg(unsigned long val) { s32 * offp = (s32 *) __start___phys_stack_reg_patchlist; s32 * end = (s32 *) __end___phys_stack_reg_patchlist; u64 ip, mask, imm; /* see instruction format A4: adds r1 = imm13, r3 */ mask = (0x3fUL << 27) | (0x7f << 13); imm = (((val >> 7) & 0x3f) << 27) | (val & 0x7f) << 13; while (offp < end) { ip = (u64) offp + *offp; ia64_patch(ip, mask, imm); ia64_fc((void *)ip); ++offp; } ia64_sync_i(); ia64_srlz_i(); }
linux-master
arch/ia64/kernel/patch.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/ia64/kernel/irq.c * * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar * * This file contains the code used by various IRQ handling routines: * asking for different IRQs should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. * * Copyright (C) Ashok Raj<[email protected]>, Intel Corporation 2004 * * 4/14/2004: Added code to handle cpu migration and do safe irq * migration without losing interrupts for iosapic * architecture. */ #include <asm/delay.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <asm/mca.h> #include <asm/xtp.h> /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. */ void ack_bad_irq(unsigned int irq) { printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id()); } /* * Interrupt statistics: */ atomic_t irq_err_count; /* * /proc/interrupts printing: */ int arch_show_interrupts(struct seq_file *p, int prec) { seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); return 0; } #ifdef CONFIG_SMP static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; void set_irq_affinity_info (unsigned int irq, int hwid, int redir) { if (irq < NR_IRQS) { irq_data_update_affinity(irq_get_irq_data(irq), cpumask_of(cpu_logical_id(hwid))); irq_redir[irq] = (char) (redir & 0xff); } } #endif /* CONFIG_SMP */ int __init arch_early_irq_init(void) { ia64_mca_irq_init(); return 0; } #ifdef CONFIG_HOTPLUG_CPU unsigned int vectors_in_migration[NR_IRQS]; /* * Since cpu_online_mask is already updated, we just need to check for * affinity that has zeros */ static void migrate_irqs(void) { int irq, new_cpu; for (irq=0; irq < NR_IRQS; irq++) { struct irq_desc *desc = irq_to_desc(irq); struct irq_data *data = irq_desc_get_irq_data(desc); struct irq_chip *chip = irq_data_get_irq_chip(data); if (irqd_irq_disabled(data)) continue; /* * No handling for now. * TBD: Implement a disable function so we can now * tell CPU not to respond to these local intr sources. * such as ITV,CPEI,MCA etc. */ if (irqd_is_per_cpu(data)) continue; if (cpumask_any_and(irq_data_get_affinity_mask(data), cpu_online_mask) >= nr_cpu_ids) { /* * Save it for phase 2 processing */ vectors_in_migration[irq] = irq; new_cpu = cpumask_any(cpu_online_mask); /* * Al three are essential, currently WARN_ON.. maybe panic? */ if (chip && chip->irq_disable && chip->irq_enable && chip->irq_set_affinity) { chip->irq_disable(data); chip->irq_set_affinity(data, cpumask_of(new_cpu), false); chip->irq_enable(data); } else { WARN_ON((!chip || !chip->irq_disable || !chip->irq_enable || !chip->irq_set_affinity)); } } } } void fixup_irqs(void) { unsigned int irq; extern void ia64_process_pending_intr(void); extern volatile int time_keeper_id; /* Mask ITV to disable timer */ ia64_set_itv(1 << 16); /* * Find a new timesync master */ if (smp_processor_id() == time_keeper_id) { time_keeper_id = cpumask_first(cpu_online_mask); printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id); } /* * Phase 1: Locate IRQs bound to this cpu and * relocate them for cpu removal. */ migrate_irqs(); /* * Phase 2: Perform interrupt processing for all entries reported in * local APIC. */ ia64_process_pending_intr(); /* * Phase 3: Now handle any interrupts not captured in local APIC. * This is to account for cases that device interrupted during the time the * rte was being disabled and re-programmed. */ for (irq=0; irq < NR_IRQS; irq++) { if (vectors_in_migration[irq]) { struct pt_regs *old_regs = set_irq_regs(NULL); vectors_in_migration[irq]=0; generic_handle_irq(irq); set_irq_regs(old_regs); } } /* * Now let processor die. We do irq disable and max_xtp() to * ensure there is no more interrupts routed to this processor. * But the local timer interrupt can have 1 pending which we * take care in timer_interrupt(). */ max_xtp(); local_irq_disable(); } #endif
linux-master
arch/ia64/kernel/irq.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/elf.h> #include <linux/coredump.h> #include <linux/fs.h> #include <linux/mm.h> #include <asm/elf.h> Elf64_Half elf_core_extra_phdrs(struct coredump_params *cprm) { return GATE_EHDR->e_phnum; } int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset) { const struct elf_phdr *const gate_phdrs = (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); int i; Elf64_Off ofs = 0; for (i = 0; i < GATE_EHDR->e_phnum; ++i) { struct elf_phdr phdr = gate_phdrs[i]; if (phdr.p_type == PT_LOAD) { phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); phdr.p_filesz = phdr.p_memsz; if (ofs == 0) { ofs = phdr.p_offset = offset; offset += phdr.p_filesz; } else { phdr.p_offset = ofs; } } else { phdr.p_offset += ofs; } phdr.p_paddr = 0; /* match other core phdrs */ if (!dump_emit(cprm, &phdr, sizeof(phdr))) return 0; } return 1; } int elf_core_write_extra_data(struct coredump_params *cprm) { const struct elf_phdr *const gate_phdrs = (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); int i; for (i = 0; i < GATE_EHDR->e_phnum; ++i) { if (gate_phdrs[i].p_type == PT_LOAD) { void *addr = (void *)gate_phdrs[i].p_vaddr; size_t memsz = PAGE_ALIGN(gate_phdrs[i].p_memsz); if (!dump_emit(cprm, addr, memsz)) return 0; break; } } return 1; } size_t elf_core_extra_data_size(struct coredump_params *cprm) { const struct elf_phdr *const gate_phdrs = (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); int i; size_t size = 0; for (i = 0; i < GATE_EHDR->e_phnum; ++i) { if (gate_phdrs[i].p_type == PT_LOAD) { size += PAGE_ALIGN(gate_phdrs[i].p_memsz); break; } } return size; }
linux-master
arch/ia64/kernel/elfcore.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/ia64/kernel/crash.c * * Architecture specific (ia64) functions for kexec based crash dumps. * * Created by: Khalid Aziz <[email protected]> * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. * Copyright (C) 2005 Intel Corp Zou Nan hai <[email protected]> * */ #include <linux/smp.h> #include <linux/delay.h> #include <linux/crash_dump.h> #include <linux/memblock.h> #include <linux/kexec.h> #include <linux/elfcore.h> #include <linux/reboot.h> #include <linux/sysctl.h> #include <linux/init.h> #include <linux/kdebug.h> #include <asm/mca.h> int kdump_status[NR_CPUS]; static atomic_t kdump_cpu_frozen; atomic_t kdump_in_progress; static int kdump_freeze_monarch; static int kdump_on_init = 1; static int kdump_on_fatal_mca = 1; extern void ia64_dump_cpu_regs(void *); static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus); void crash_save_this_cpu(void) { void *buf; unsigned long cfm, sof, sol; int cpu = smp_processor_id(); struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu); elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg); memset(prstatus, 0, sizeof(*prstatus)); prstatus->common.pr_pid = current->pid; ia64_dump_cpu_regs(dst); cfm = dst[43]; sol = (cfm >> 7) & 0x7f; sof = cfm & 0x7f; dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46], sof - sol); buf = (u64 *) per_cpu_ptr(crash_notes, cpu); if (!buf) return; buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, prstatus, sizeof(*prstatus)); final_note(buf); } #ifdef CONFIG_SMP static int kdump_wait_cpu_freeze(void) { int cpu_num = num_online_cpus() - 1; int timeout = 1000; while(timeout-- > 0) { if (atomic_read(&kdump_cpu_frozen) == cpu_num) return 0; udelay(1000); } return 1; } #endif void machine_crash_shutdown(struct pt_regs *pt) { /* This function is only called after the system * has paniced or is otherwise in a critical state. * The minimum amount of code to allow a kexec'd kernel * to run successfully needs to happen here. * * In practice this means shooting down the other cpus in * an SMP system. */ kexec_disable_iosapic(); #ifdef CONFIG_SMP /* * If kdump_on_init is set and an INIT is asserted here, kdump will * be started again via INIT monarch. */ local_irq_disable(); ia64_set_psr_mc(); /* mask MCA/INIT */ if (atomic_inc_return(&kdump_in_progress) != 1) unw_init_running(kdump_cpu_freeze, NULL); /* * Now this cpu is ready for kdump. * Stop all others by IPI or INIT. They could receive INIT from * outside and might be INIT monarch, but only thing they have to * do is falling into kdump_cpu_freeze(). * * If an INIT is asserted here: * - All receivers might be slaves, since some of cpus could already * be frozen and INIT might be masked on monarch. In this case, * all slaves will be frozen soon since kdump_in_progress will let * them into DIE_INIT_SLAVE_LEAVE. * - One might be a monarch, but INIT rendezvous will fail since * at least this cpu already have INIT masked so it never join * to the rendezvous. In this case, all slaves and monarch will * be frozen soon with no wait since the INIT rendezvous is skipped * by kdump_in_progress. */ kdump_smp_send_stop(); /* not all cpu response to IPI, send INIT to freeze them */ if (kdump_wait_cpu_freeze()) { kdump_smp_send_init(); /* wait again, don't go ahead if possible */ kdump_wait_cpu_freeze(); } #endif } static void machine_kdump_on_init(void) { crash_save_vmcoreinfo(); local_irq_disable(); kexec_disable_iosapic(); machine_kexec(ia64_kimage); } void kdump_cpu_freeze(struct unw_frame_info *info, void *arg) { int cpuid; local_irq_disable(); cpuid = smp_processor_id(); crash_save_this_cpu(); current->thread.ksp = (__u64)info->sw - 16; ia64_set_psr_mc(); /* mask MCA/INIT and stop reentrance */ atomic_inc(&kdump_cpu_frozen); kdump_status[cpuid] = 1; mb(); for (;;) cpu_relax(); } static int kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) { struct ia64_mca_notify_die *nd; struct die_args *args = data; if (atomic_read(&kdump_in_progress)) { switch (val) { case DIE_INIT_MONARCH_LEAVE: if (!kdump_freeze_monarch) break; fallthrough; case DIE_INIT_SLAVE_LEAVE: case DIE_INIT_MONARCH_ENTER: case DIE_MCA_RENDZVOUS_LEAVE: unw_init_running(kdump_cpu_freeze, NULL); break; } } if (!kdump_on_init && !kdump_on_fatal_mca) return NOTIFY_DONE; if (!ia64_kimage) { if (val == DIE_INIT_MONARCH_LEAVE) ia64_mca_printk(KERN_NOTICE "%s: kdump not configured\n", __func__); return NOTIFY_DONE; } if (val != DIE_INIT_MONARCH_LEAVE && val != DIE_INIT_MONARCH_PROCESS && val != DIE_MCA_MONARCH_LEAVE) return NOTIFY_DONE; nd = (struct ia64_mca_notify_die *)args->err; switch (val) { case DIE_INIT_MONARCH_PROCESS: /* Reason code 1 means machine check rendezvous*/ if (kdump_on_init && (nd->sos->rv_rc != 1)) { if (atomic_inc_return(&kdump_in_progress) != 1) kdump_freeze_monarch = 1; } break; case DIE_INIT_MONARCH_LEAVE: /* Reason code 1 means machine check rendezvous*/ if (kdump_on_init && (nd->sos->rv_rc != 1)) machine_kdump_on_init(); break; case DIE_MCA_MONARCH_LEAVE: /* *(nd->data) indicate if MCA is recoverable */ if (kdump_on_fatal_mca && !(*(nd->data))) { if (atomic_inc_return(&kdump_in_progress) == 1) machine_kdump_on_init(); /* We got fatal MCA while kdump!? No way!! */ } break; } return NOTIFY_DONE; } #ifdef CONFIG_SYSCTL static struct ctl_table kdump_ctl_table[] = { { .procname = "kdump_on_init", .data = &kdump_on_init, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "kdump_on_fatal_mca", .data = &kdump_on_fatal_mca, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; #endif static int machine_crash_setup(void) { /* be notified before default_monarch_init_process */ static struct notifier_block kdump_init_notifier_nb = { .notifier_call = kdump_init_notifier, .priority = 1, }; int ret; if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0) return ret; #ifdef CONFIG_SYSCTL register_sysctl("kernel", kdump_ctl_table); #endif return 0; } __initcall(machine_crash_setup);
linux-master
arch/ia64/kernel/crash.c
// SPDX-License-Identifier: GPL-2.0 /* * Kernel support for the ptrace() and syscall tracing interfaces. * * Copyright (C) 1999-2005 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> * Copyright (C) 2006 Intel Co * 2006-08-12 - IA64 Native Utrace implementation support added by * Anil S Keshavamurthy <[email protected]> * * Derived from the x86 and Alpha versions. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/security.h> #include <linux/audit.h> #include <linux/signal.h> #include <linux/regset.h> #include <linux/elf.h> #include <linux/resume_user_mode.h> #include <asm/processor.h> #include <asm/ptrace_offsets.h> #include <asm/rse.h> #include <linux/uaccess.h> #include <asm/unwind.h> #include "entry.h" /* * Bits in the PSR that we allow ptrace() to change: * be, up, ac, mfl, mfh (the user mask; five bits total) * db (debug breakpoint fault; one bit) * id (instruction debug fault disable; one bit) * dd (data debug fault disable; one bit) * ri (restart instruction; two bits) * is (instruction set; one bit) */ #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \ | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI) #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */ #define PFM_MASK MASK(38) #define PTRACE_DEBUG 0 #if PTRACE_DEBUG # define dprintk(format...) printk(format) # define inline #else # define dprintk(format...) #endif /* Return TRUE if PT was created due to kernel-entry via a system-call. */ static inline int in_syscall (struct pt_regs *pt) { return (long) pt->cr_ifs >= 0; } /* * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT * bitset where bit i is set iff the NaT bit of register i is set. */ unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat) { # define GET_BITS(first, last, unat) \ ({ \ unsigned long bit = ia64_unat_pos(&pt->r##first); \ unsigned long nbits = (last - first + 1); \ unsigned long mask = MASK(nbits) << first; \ unsigned long dist; \ if (bit < first) \ dist = 64 + bit - first; \ else \ dist = bit - first; \ ia64_rotr(unat, dist) & mask; \ }) unsigned long val; /* * Registers that are stored consecutively in struct pt_regs * can be handled in parallel. If the register order in * struct_pt_regs changes, this code MUST be updated. */ val = GET_BITS( 1, 1, scratch_unat); val |= GET_BITS( 2, 3, scratch_unat); val |= GET_BITS(12, 13, scratch_unat); val |= GET_BITS(14, 14, scratch_unat); val |= GET_BITS(15, 15, scratch_unat); val |= GET_BITS( 8, 11, scratch_unat); val |= GET_BITS(16, 31, scratch_unat); return val; # undef GET_BITS } /* * Set the NaT bits for the scratch registers according to NAT and * return the resulting unat (assuming the scratch registers are * stored in PT). */ unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat) { # define PUT_BITS(first, last, nat) \ ({ \ unsigned long bit = ia64_unat_pos(&pt->r##first); \ unsigned long nbits = (last - first + 1); \ unsigned long mask = MASK(nbits) << first; \ long dist; \ if (bit < first) \ dist = 64 + bit - first; \ else \ dist = bit - first; \ ia64_rotl(nat & mask, dist); \ }) unsigned long scratch_unat; /* * Registers that are stored consecutively in struct pt_regs * can be handled in parallel. If the register order in * struct_pt_regs changes, this code MUST be updated. */ scratch_unat = PUT_BITS( 1, 1, nat); scratch_unat |= PUT_BITS( 2, 3, nat); scratch_unat |= PUT_BITS(12, 13, nat); scratch_unat |= PUT_BITS(14, 14, nat); scratch_unat |= PUT_BITS(15, 15, nat); scratch_unat |= PUT_BITS( 8, 11, nat); scratch_unat |= PUT_BITS(16, 31, nat); return scratch_unat; # undef PUT_BITS } #define IA64_MLX_TEMPLATE 0x2 #define IA64_MOVL_OPCODE 6 void ia64_increment_ip (struct pt_regs *regs) { unsigned long w0, ri = ia64_psr(regs)->ri + 1; if (ri > 2) { ri = 0; regs->cr_iip += 16; } else if (ri == 2) { get_user(w0, (char __user *) regs->cr_iip + 0); if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { /* * rfi'ing to slot 2 of an MLX bundle causes * an illegal operation fault. We don't want * that to happen... */ ri = 0; regs->cr_iip += 16; } } ia64_psr(regs)->ri = ri; } void ia64_decrement_ip (struct pt_regs *regs) { unsigned long w0, ri = ia64_psr(regs)->ri - 1; if (ia64_psr(regs)->ri == 0) { regs->cr_iip -= 16; ri = 2; get_user(w0, (char __user *) regs->cr_iip + 0); if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) { /* * rfi'ing to slot 2 of an MLX bundle causes * an illegal operation fault. We don't want * that to happen... */ ri = 1; } } ia64_psr(regs)->ri = ri; } /* * This routine is used to read an rnat bits that are stored on the * kernel backing store. Since, in general, the alignment of the user * and kernel are different, this is not completely trivial. In * essence, we need to construct the user RNAT based on up to two * kernel RNAT values and/or the RNAT value saved in the child's * pt_regs. * * user rbs * * +--------+ <-- lowest address * | slot62 | * +--------+ * | rnat | 0x....1f8 * +--------+ * | slot00 | \ * +--------+ | * | slot01 | > child_regs->ar_rnat * +--------+ | * | slot02 | / kernel rbs * +--------+ +--------+ * <- child_regs->ar_bspstore | slot61 | <-- krbs * +- - - - + +--------+ * | slot62 | * +- - - - + +--------+ * | rnat | * +- - - - + +--------+ * vrnat | slot00 | * +- - - - + +--------+ * = = * +--------+ * | slot00 | \ * +--------+ | * | slot01 | > child_stack->ar_rnat * +--------+ | * | slot02 | / * +--------+ * <--- child_stack->ar_bspstore * * The way to think of this code is as follows: bit 0 in the user rnat * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat * value. The kernel rnat value holding this bit is stored in * variable rnat0. rnat1 is loaded with the kernel rnat value that * form the upper bits of the user rnat value. * * Boundary cases: * * o when reading the rnat "below" the first rnat slot on the kernel * backing store, rnat0/rnat1 are set to 0 and the low order bits are * merged in from pt->ar_rnat. * * o when reading the rnat "above" the last rnat slot on the kernel * backing store, rnat0/rnat1 gets its value from sw->ar_rnat. */ static unsigned long get_rnat (struct task_struct *task, struct switch_stack *sw, unsigned long *krbs, unsigned long *urnat_addr, unsigned long *urbs_end) { unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr; unsigned long umask = 0, mask, m; unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; long num_regs, nbits; struct pt_regs *pt; pt = task_pt_regs(task); kbsp = (unsigned long *) sw->ar_bspstore; ubspstore = (unsigned long *) pt->ar_bspstore; if (urbs_end < urnat_addr) nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end); else nbits = 63; mask = MASK(nbits); /* * First, figure out which bit number slot 0 in user-land maps * to in the kernel rnat. Do this by figuring out how many * register slots we're beyond the user's backingstore and * then computing the equivalent address in kernel space. */ num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); shift = ia64_rse_slot_num(slot0_kaddr); rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); rnat0_kaddr = rnat1_kaddr - 64; if (ubspstore + 63 > urnat_addr) { /* some bits need to be merged in from pt->ar_rnat */ umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; urnat = (pt->ar_rnat & umask); mask &= ~umask; if (!mask) return urnat; } m = mask << shift; if (rnat0_kaddr >= kbsp) rnat0 = sw->ar_rnat; else if (rnat0_kaddr > krbs) rnat0 = *rnat0_kaddr; urnat |= (rnat0 & m) >> shift; m = mask >> (63 - shift); if (rnat1_kaddr >= kbsp) rnat1 = sw->ar_rnat; else if (rnat1_kaddr > krbs) rnat1 = *rnat1_kaddr; urnat |= (rnat1 & m) << (63 - shift); return urnat; } /* * The reverse of get_rnat. */ static void put_rnat (struct task_struct *task, struct switch_stack *sw, unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat, unsigned long *urbs_end) { unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m; unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift; long num_regs, nbits; struct pt_regs *pt; unsigned long cfm, *urbs_kargs; pt = task_pt_regs(task); kbsp = (unsigned long *) sw->ar_bspstore; ubspstore = (unsigned long *) pt->ar_bspstore; urbs_kargs = urbs_end; if (in_syscall(pt)) { /* * If entered via syscall, don't allow user to set rnat bits * for syscall args. */ cfm = pt->cr_ifs; urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f)); } if (urbs_kargs >= urnat_addr) nbits = 63; else { if ((urnat_addr - 63) >= urbs_kargs) return; nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs); } mask = MASK(nbits); /* * First, figure out which bit number slot 0 in user-land maps * to in the kernel rnat. Do this by figuring out how many * register slots we're beyond the user's backingstore and * then computing the equivalent address in kernel space. */ num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1); slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs); shift = ia64_rse_slot_num(slot0_kaddr); rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr); rnat0_kaddr = rnat1_kaddr - 64; if (ubspstore + 63 > urnat_addr) { /* some bits need to be place in pt->ar_rnat: */ umask = MASK(ia64_rse_slot_num(ubspstore)) & mask; pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask); mask &= ~umask; if (!mask) return; } /* * Note: Section 11.1 of the EAS guarantees that bit 63 of an * rnat slot is ignored. so we don't have to clear it here. */ rnat0 = (urnat << shift); m = mask << shift; if (rnat0_kaddr >= kbsp) sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m); else if (rnat0_kaddr > krbs) *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m)); rnat1 = (urnat >> (63 - shift)); m = mask >> (63 - shift); if (rnat1_kaddr >= kbsp) sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m); else if (rnat1_kaddr > krbs) *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m)); } static inline int on_kernel_rbs (unsigned long addr, unsigned long bspstore, unsigned long urbs_end) { unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *) urbs_end); return (addr >= bspstore && addr <= (unsigned long) rnat_addr); } /* * Read a word from the user-level backing store of task CHILD. ADDR * is the user-level address to read the word from, VAL a pointer to * the return value, and USER_BSP gives the end of the user-level * backing store (i.e., it's the address that would be in ar.bsp after * the user executed a "cover" instruction). * * This routine takes care of accessing the kernel register backing * store for those registers that got spilled there. It also takes * care of calculating the appropriate RNaT collection words. */ long ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned long user_rbs_end, unsigned long addr, long *val) { unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr; struct pt_regs *child_regs; size_t copied; long ret; urbs_end = (long *) user_rbs_end; laddr = (unsigned long *) addr; child_regs = task_pt_regs(child); bspstore = (unsigned long *) child_regs->ar_bspstore; krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; if (on_kernel_rbs(addr, (unsigned long) bspstore, (unsigned long) urbs_end)) { /* * Attempt to read the RBS in an area that's actually * on the kernel RBS => read the corresponding bits in * the kernel RBS. */ rnat_addr = ia64_rse_rnat_addr(laddr); ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end); if (laddr == rnat_addr) { /* return NaT collection word itself */ *val = ret; return 0; } if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) { /* * It is implementation dependent whether the * data portion of a NaT value gets saved on a * st8.spill or RSE spill (e.g., see EAS 2.6, * 4.4.4.6 Register Spill and Fill). To get * consistent behavior across all possible * IA-64 implementations, we return zero in * this case. */ *val = 0; return 0; } if (laddr < urbs_end) { /* * The desired word is on the kernel RBS and * is not a NaT. */ regnum = ia64_rse_num_regs(bspstore, laddr); *val = *ia64_rse_skip_regs(krbs, regnum); return 0; } } copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE); if (copied != sizeof(ret)) return -EIO; *val = ret; return 0; } long ia64_poke (struct task_struct *child, struct switch_stack *child_stack, unsigned long user_rbs_end, unsigned long addr, long val) { unsigned long *bspstore, *krbs, regnum, *laddr; unsigned long *urbs_end = (long *) user_rbs_end; struct pt_regs *child_regs; laddr = (unsigned long *) addr; child_regs = task_pt_regs(child); bspstore = (unsigned long *) child_regs->ar_bspstore; krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; if (on_kernel_rbs(addr, (unsigned long) bspstore, (unsigned long) urbs_end)) { /* * Attempt to write the RBS in an area that's actually * on the kernel RBS => write the corresponding bits * in the kernel RBS. */ if (ia64_rse_is_rnat_slot(laddr)) put_rnat(child, child_stack, krbs, laddr, val, urbs_end); else { if (laddr < urbs_end) { regnum = ia64_rse_num_regs(bspstore, laddr); *ia64_rse_skip_regs(krbs, regnum) = val; } } } else if (access_process_vm(child, addr, &val, sizeof(val), FOLL_FORCE | FOLL_WRITE) != sizeof(val)) return -EIO; return 0; } /* * Calculate the address of the end of the user-level register backing * store. This is the address that would have been stored in ar.bsp * if the user had executed a "cover" instruction right before * entering the kernel. If CFMP is not NULL, it is used to return the * "current frame mask" that was active at the time the kernel was * entered. */ unsigned long ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, unsigned long *cfmp) { unsigned long *krbs, *bspstore, cfm = pt->cr_ifs; long ndirty; krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; bspstore = (unsigned long *) pt->ar_bspstore; ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); if (in_syscall(pt)) ndirty += (cfm & 0x7f); else cfm &= ~(1UL << 63); /* clear valid bit */ if (cfmp) *cfmp = cfm; return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty); } /* * Synchronize (i.e, write) the RSE backing store living in kernel * space to the VM of the CHILD task. SW and PT are the pointers to * the switch_stack and pt_regs structures, respectively. * USER_RBS_END is the user-level address at which the backing store * ends. */ long ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, unsigned long user_rbs_start, unsigned long user_rbs_end) { unsigned long addr, val; long ret; /* now copy word for word from kernel rbs to user rbs: */ for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { ret = ia64_peek(child, sw, user_rbs_end, addr, &val); if (ret < 0) return ret; if (access_process_vm(child, addr, &val, sizeof(val), FOLL_FORCE | FOLL_WRITE) != sizeof(val)) return -EIO; } return 0; } static long ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw, unsigned long user_rbs_start, unsigned long user_rbs_end) { unsigned long addr, val; long ret; /* now copy word for word from user rbs to kernel rbs: */ for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { if (access_process_vm(child, addr, &val, sizeof(val), FOLL_FORCE) != sizeof(val)) return -EIO; ret = ia64_poke(child, sw, user_rbs_end, addr, val); if (ret < 0) return ret; } return 0; } typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *, unsigned long, unsigned long); static void do_sync_rbs(struct unw_frame_info *info, void *arg) { struct pt_regs *pt; unsigned long urbs_end; syncfunc_t fn = arg; if (unw_unwind_to_user(info) < 0) return; pt = task_pt_regs(info->task); urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL); fn(info->task, info->sw, pt->ar_bspstore, urbs_end); } /* * when a thread is stopped (ptraced), debugger might change thread's user * stack (change memory directly), and we must avoid the RSE stored in kernel * to override user stack (user space's RSE is newer than kernel's in the * case). To workaround the issue, we copy kernel RSE to user RSE before the * task is stopped, so user RSE has updated data. we then copy user RSE to * kernel after the task is resummed from traced stop and kernel will use the * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need * synchronize user RSE to kernel. */ void ia64_ptrace_stop(void) { if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) return; set_notify_resume(current); unw_init_running(do_sync_rbs, ia64_sync_user_rbs); } /* * This is called to read back the register backing store. */ void ia64_sync_krbs(void) { clear_tsk_thread_flag(current, TIF_RESTORE_RSE); unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); } /* * Write f32-f127 back to task->thread.fph if it has been modified. */ inline void ia64_flush_fph (struct task_struct *task) { struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); /* * Prevent migrating this task while * we're fiddling with the FPU state */ preempt_disable(); if (ia64_is_local_fpu_owner(task) && psr->mfh) { psr->mfh = 0; task->thread.flags |= IA64_THREAD_FPH_VALID; ia64_save_fpu(&task->thread.fph[0]); } preempt_enable(); } /* * Sync the fph state of the task so that it can be manipulated * through thread.fph. If necessary, f32-f127 are written back to * thread.fph or, if the fph state hasn't been used before, thread.fph * is cleared to zeroes. Also, access to f32-f127 is disabled to * ensure that the task picks up the state from thread.fph when it * executes again. */ void ia64_sync_fph (struct task_struct *task) { struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); ia64_flush_fph(task); if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { task->thread.flags |= IA64_THREAD_FPH_VALID; memset(&task->thread.fph, 0, sizeof(task->thread.fph)); } ia64_drop_fpu(task); psr->dfh = 1; } /* * Change the machine-state of CHILD such that it will return via the normal * kernel exit-path, rather than the syscall-exit path. */ static void convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, unsigned long cfm) { struct unw_frame_info info, prev_info; unsigned long ip, sp, pr; unw_init_from_blocked_task(&info, child); while (1) { prev_info = info; if (unw_unwind(&info) < 0) return; unw_get_sp(&info, &sp); if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) < IA64_PT_REGS_SIZE) { dprintk("ptrace.%s: ran off the top of the kernel " "stack\n", __func__); return; } if (unw_get_pr (&prev_info, &pr) < 0) { unw_get_rp(&prev_info, &ip); dprintk("ptrace.%s: failed to read " "predicate register (ip=0x%lx)\n", __func__, ip); return; } if (unw_is_intr_frame(&info) && (pr & (1UL << PRED_USER_STACK))) break; } /* * Note: at the time of this call, the target task is blocked * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL * (aka, "pLvSys") we redirect execution from * .work_pending_syscall_end to .work_processed_kernel. */ unw_get_pr(&prev_info, &pr); pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL)); pr |= (1UL << PRED_NON_SYSCALL); unw_set_pr(&prev_info, pr); pt->cr_ifs = (1UL << 63) | cfm; /* * Clear the memory that is NOT written on syscall-entry to * ensure we do not leak kernel-state to user when execution * resumes. */ pt->r2 = 0; pt->r3 = 0; pt->r14 = 0; memset(&pt->r16, 0, 16*8); /* clear r16-r31 */ memset(&pt->f6, 0, 6*16); /* clear f6-f11 */ pt->b7 = 0; pt->ar_ccv = 0; pt->ar_csd = 0; pt->ar_ssd = 0; } static int access_nat_bits (struct task_struct *child, struct pt_regs *pt, struct unw_frame_info *info, unsigned long *data, int write_access) { unsigned long regnum, nat_bits, scratch_unat, dummy = 0; char nat = 0; if (write_access) { nat_bits = *data; scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits); if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) { dprintk("ptrace: failed to set ar.unat\n"); return -1; } for (regnum = 4; regnum <= 7; ++regnum) { unw_get_gr(info, regnum, &dummy, &nat); unw_set_gr(info, regnum, dummy, (nat_bits >> regnum) & 1); } } else { if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) { dprintk("ptrace: failed to read ar.unat\n"); return -1; } nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat); for (regnum = 4; regnum <= 7; ++regnum) { unw_get_gr(info, regnum, &dummy, &nat); nat_bits |= (nat != 0) << regnum; } *data = nat_bits; } return 0; } static int access_elf_reg(struct task_struct *target, struct unw_frame_info *info, unsigned long addr, unsigned long *data, int write_access); static long ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) { unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val; struct unw_frame_info info; struct ia64_fpreg fpval; struct switch_stack *sw; struct pt_regs *pt; long ret, retval = 0; char nat = 0; int i; if (!access_ok(ppr, sizeof(struct pt_all_user_regs))) return -EIO; pt = task_pt_regs(child); sw = (struct switch_stack *) (child->thread.ksp + 16); unw_init_from_blocked_task(&info, child); if (unw_unwind_to_user(&info) < 0) { return -EIO; } if (((unsigned long) ppr & 0x7) != 0) { dprintk("ptrace:unaligned register address %p\n", ppr); return -EIO; } if (access_elf_reg(child, &info, ELF_CR_IPSR_OFFSET, &psr, 0) < 0 || access_elf_reg(child, &info, ELF_AR_EC_OFFSET, &ec, 0) < 0 || access_elf_reg(child, &info, ELF_AR_LC_OFFSET, &lc, 0) < 0 || access_elf_reg(child, &info, ELF_AR_RNAT_OFFSET, &rnat, 0) < 0 || access_elf_reg(child, &info, ELF_AR_BSP_OFFSET, &bsp, 0) < 0 || access_elf_reg(child, &info, ELF_CFM_OFFSET, &cfm, 0) < 0 || access_elf_reg(child, &info, ELF_NAT_OFFSET, &nat_bits, 0) < 0) return -EIO; /* control regs */ retval |= __put_user(pt->cr_iip, &ppr->cr_iip); retval |= __put_user(psr, &ppr->cr_ipsr); /* app regs */ retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]); retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]); retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]); retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]); retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]); retval |= __put_user(cfm, &ppr->cfm); /* gr1-gr3 */ retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long)); retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2); /* gr4-gr7 */ for (i = 4; i < 8; i++) { if (unw_access_gr(&info, i, &val, &nat, 0) < 0) return -EIO; retval |= __put_user(val, &ppr->gr[i]); } /* gr8-gr11 */ retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4); /* gr12-gr15 */ retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2); retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long)); retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long)); /* gr16-gr31 */ retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16); /* b0 */ retval |= __put_user(pt->b0, &ppr->br[0]); /* b1-b5 */ for (i = 1; i < 6; i++) { if (unw_access_br(&info, i, &val, 0) < 0) return -EIO; __put_user(val, &ppr->br[i]); } /* b6-b7 */ retval |= __put_user(pt->b6, &ppr->br[6]); retval |= __put_user(pt->b7, &ppr->br[7]); /* fr2-fr5 */ for (i = 2; i < 6; i++) { if (unw_get_fr(&info, i, &fpval) < 0) return -EIO; retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); } /* fr6-fr11 */ retval |= __copy_to_user(&ppr->fr[6], &pt->f6, sizeof(struct ia64_fpreg) * 6); /* fp scratch regs(12-15) */ retval |= __copy_to_user(&ppr->fr[12], &sw->f12, sizeof(struct ia64_fpreg) * 4); /* fr16-fr31 */ for (i = 16; i < 32; i++) { if (unw_get_fr(&info, i, &fpval) < 0) return -EIO; retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval)); } /* fph */ ia64_flush_fph(child); retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, sizeof(ppr->fr[32]) * 96); /* preds */ retval |= __put_user(pt->pr, &ppr->pr); /* nat bits */ retval |= __put_user(nat_bits, &ppr->nat); ret = retval ? -EIO : 0; return ret; } static long ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) { unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0; struct unw_frame_info info; struct switch_stack *sw; struct ia64_fpreg fpval; struct pt_regs *pt; long retval = 0; int i; memset(&fpval, 0, sizeof(fpval)); if (!access_ok(ppr, sizeof(struct pt_all_user_regs))) return -EIO; pt = task_pt_regs(child); sw = (struct switch_stack *) (child->thread.ksp + 16); unw_init_from_blocked_task(&info, child); if (unw_unwind_to_user(&info) < 0) { return -EIO; } if (((unsigned long) ppr & 0x7) != 0) { dprintk("ptrace:unaligned register address %p\n", ppr); return -EIO; } /* control regs */ retval |= __get_user(pt->cr_iip, &ppr->cr_iip); retval |= __get_user(psr, &ppr->cr_ipsr); /* app regs */ retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]); retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]); retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]); retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]); retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]); retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]); retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]); retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]); retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]); retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]); retval |= __get_user(cfm, &ppr->cfm); /* gr1-gr3 */ retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long)); retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2); /* gr4-gr7 */ for (i = 4; i < 8; i++) { retval |= __get_user(val, &ppr->gr[i]); /* NaT bit will be set via PT_NAT_BITS: */ if (unw_set_gr(&info, i, val, 0) < 0) return -EIO; } /* gr8-gr11 */ retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4); /* gr12-gr15 */ retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2); retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long)); retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long)); /* gr16-gr31 */ retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16); /* b0 */ retval |= __get_user(pt->b0, &ppr->br[0]); /* b1-b5 */ for (i = 1; i < 6; i++) { retval |= __get_user(val, &ppr->br[i]); unw_set_br(&info, i, val); } /* b6-b7 */ retval |= __get_user(pt->b6, &ppr->br[6]); retval |= __get_user(pt->b7, &ppr->br[7]); /* fr2-fr5 */ for (i = 2; i < 6; i++) { retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval)); if (unw_set_fr(&info, i, fpval) < 0) return -EIO; } /* fr6-fr11 */ retval |= __copy_from_user(&pt->f6, &ppr->fr[6], sizeof(ppr->fr[6]) * 6); /* fp scratch regs(12-15) */ retval |= __copy_from_user(&sw->f12, &ppr->fr[12], sizeof(ppr->fr[12]) * 4); /* fr16-fr31 */ for (i = 16; i < 32; i++) { retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval)); if (unw_set_fr(&info, i, fpval) < 0) return -EIO; } /* fph */ ia64_sync_fph(child); retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], sizeof(ppr->fr[32]) * 96); /* preds */ retval |= __get_user(pt->pr, &ppr->pr); /* nat bits */ retval |= __get_user(nat_bits, &ppr->nat); retval |= access_elf_reg(child, &info, ELF_CR_IPSR_OFFSET, &psr, 1); retval |= access_elf_reg(child, &info, ELF_AR_RSC_OFFSET, &rsc, 1); retval |= access_elf_reg(child, &info, ELF_AR_EC_OFFSET, &ec, 1); retval |= access_elf_reg(child, &info, ELF_AR_LC_OFFSET, &lc, 1); retval |= access_elf_reg(child, &info, ELF_AR_RNAT_OFFSET, &rnat, 1); retval |= access_elf_reg(child, &info, ELF_AR_BSP_OFFSET, &bsp, 1); retval |= access_elf_reg(child, &info, ELF_CFM_OFFSET, &cfm, 1); retval |= access_elf_reg(child, &info, ELF_NAT_OFFSET, &nat_bits, 1); return retval ? -EIO : 0; } void user_enable_single_step (struct task_struct *child) { struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); set_tsk_thread_flag(child, TIF_SINGLESTEP); child_psr->ss = 1; } void user_enable_block_step (struct task_struct *child) { struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); set_tsk_thread_flag(child, TIF_SINGLESTEP); child_psr->tb = 1; } void user_disable_single_step (struct task_struct *child) { struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); /* make sure the single step/taken-branch trap bits are not set: */ clear_tsk_thread_flag(child, TIF_SINGLESTEP); child_psr->ss = 0; child_psr->tb = 0; } /* * Called by kernel/ptrace.c when detaching.. * * Make sure the single step bit is not set. */ void ptrace_disable (struct task_struct *child) { user_disable_single_step(child); } static int access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access); long arch_ptrace (struct task_struct *child, long request, unsigned long addr, unsigned long data) { switch (request) { case PTRACE_PEEKTEXT: case PTRACE_PEEKDATA: /* read word at location addr */ if (ptrace_access_vm(child, addr, &data, sizeof(data), FOLL_FORCE) != sizeof(data)) return -EIO; /* ensure return value is not mistaken for error code */ force_successful_syscall_return(); return data; /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled * by the generic ptrace_request(). */ case PTRACE_PEEKUSR: /* read the word at addr in the USER area */ if (access_uarea(child, addr, &data, 0) < 0) return -EIO; /* ensure return value is not mistaken for error code */ force_successful_syscall_return(); return data; case PTRACE_POKEUSR: /* write the word at addr in the USER area */ if (access_uarea(child, addr, &data, 1) < 0) return -EIO; return 0; case PTRACE_OLD_GETSIGINFO: /* for backwards-compatibility */ return ptrace_request(child, PTRACE_GETSIGINFO, addr, data); case PTRACE_OLD_SETSIGINFO: /* for backwards-compatibility */ return ptrace_request(child, PTRACE_SETSIGINFO, addr, data); case PTRACE_GETREGS: return ptrace_getregs(child, (struct pt_all_user_regs __user *) data); case PTRACE_SETREGS: return ptrace_setregs(child, (struct pt_all_user_regs __user *) data); default: return ptrace_request(child, request, addr, data); } } /* "asmlinkage" so the input arguments are preserved... */ asmlinkage long syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, struct pt_regs regs) { if (test_thread_flag(TIF_SYSCALL_TRACE)) if (ptrace_report_syscall_entry(&regs)) return -ENOSYS; /* copy user rbs to kernel rbs */ if (test_thread_flag(TIF_RESTORE_RSE)) ia64_sync_krbs(); audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3); return 0; } /* "asmlinkage" so the input arguments are preserved... */ asmlinkage void syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, struct pt_regs regs) { int step; audit_syscall_exit(&regs); step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) ptrace_report_syscall_exit(&regs, step); /* copy user rbs to kernel rbs */ if (test_thread_flag(TIF_RESTORE_RSE)) ia64_sync_krbs(); } /* Utrace implementation starts here */ struct regset_get { void *kbuf; void __user *ubuf; }; struct regset_set { const void *kbuf; const void __user *ubuf; }; struct regset_getset { struct task_struct *target; const struct user_regset *regset; union { struct regset_get get; struct regset_set set; } u; unsigned int pos; unsigned int count; int ret; }; static const ptrdiff_t pt_offsets[32] = { #define R(n) offsetof(struct pt_regs, r##n) [0] = -1, R(1), R(2), R(3), [4] = -1, [5] = -1, [6] = -1, [7] = -1, R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15), R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23), R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31), #undef R }; static int access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info, unsigned long addr, unsigned long *data, int write_access) { struct pt_regs *pt = task_pt_regs(target); unsigned reg = addr / sizeof(unsigned long); ptrdiff_t d = pt_offsets[reg]; if (d >= 0) { unsigned long *ptr = (void *)pt + d; if (write_access) *ptr = *data; else *data = *ptr; return 0; } else { char nat = 0; if (write_access) { /* read NaT bit first: */ unsigned long dummy; int ret = unw_get_gr(info, reg, &dummy, &nat); if (ret < 0) return ret; } return unw_access_gr(info, reg, data, &nat, write_access); } } static int access_elf_breg(struct task_struct *target, struct unw_frame_info *info, unsigned long addr, unsigned long *data, int write_access) { struct pt_regs *pt; unsigned long *ptr = NULL; pt = task_pt_regs(target); switch (addr) { case ELF_BR_OFFSET(0): ptr = &pt->b0; break; case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5): return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8, data, write_access); case ELF_BR_OFFSET(6): ptr = &pt->b6; break; case ELF_BR_OFFSET(7): ptr = &pt->b7; } if (write_access) *ptr = *data; else *data = *ptr; return 0; } static int access_elf_areg(struct task_struct *target, struct unw_frame_info *info, unsigned long addr, unsigned long *data, int write_access) { struct pt_regs *pt; unsigned long cfm, urbs_end; unsigned long *ptr = NULL; pt = task_pt_regs(target); if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) { switch (addr) { case ELF_AR_RSC_OFFSET: /* force PL3 */ if (write_access) pt->ar_rsc = *data | (3 << 2); else *data = pt->ar_rsc; return 0; case ELF_AR_BSP_OFFSET: /* * By convention, we use PT_AR_BSP to refer to * the end of the user-level backing store. * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) * to get the real value of ar.bsp at the time * the kernel was entered. * * Furthermore, when changing the contents of * PT_AR_BSP (or PT_CFM) while the task is * blocked in a system call, convert the state * so that the non-system-call exit * path is used. This ensures that the proper * state will be picked up when resuming * execution. However, it *also* means that * once we write PT_AR_BSP/PT_CFM, it won't be * possible to modify the syscall arguments of * the pending system call any longer. This * shouldn't be an issue because modifying * PT_AR_BSP/PT_CFM generally implies that * we're either abandoning the pending system * call or that we defer it's re-execution * (e.g., due to GDB doing an inferior * function call). */ urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); if (write_access) { if (*data != urbs_end) { if (in_syscall(pt)) convert_to_non_syscall(target, pt, cfm); /* * Simulate user-level write * of ar.bsp: */ pt->loadrs = 0; pt->ar_bspstore = *data; } } else *data = urbs_end; return 0; case ELF_AR_BSPSTORE_OFFSET: ptr = &pt->ar_bspstore; break; case ELF_AR_RNAT_OFFSET: ptr = &pt->ar_rnat; break; case ELF_AR_CCV_OFFSET: ptr = &pt->ar_ccv; break; case ELF_AR_UNAT_OFFSET: ptr = &pt->ar_unat; break; case ELF_AR_FPSR_OFFSET: ptr = &pt->ar_fpsr; break; case ELF_AR_PFS_OFFSET: ptr = &pt->ar_pfs; break; case ELF_AR_LC_OFFSET: return unw_access_ar(info, UNW_AR_LC, data, write_access); case ELF_AR_EC_OFFSET: return unw_access_ar(info, UNW_AR_EC, data, write_access); case ELF_AR_CSD_OFFSET: ptr = &pt->ar_csd; break; case ELF_AR_SSD_OFFSET: ptr = &pt->ar_ssd; } } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) { switch (addr) { case ELF_CR_IIP_OFFSET: ptr = &pt->cr_iip; break; case ELF_CFM_OFFSET: urbs_end = ia64_get_user_rbs_end(target, pt, &cfm); if (write_access) { if (((cfm ^ *data) & PFM_MASK) != 0) { if (in_syscall(pt)) convert_to_non_syscall(target, pt, cfm); pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK) | (*data & PFM_MASK)); } } else *data = cfm; return 0; case ELF_CR_IPSR_OFFSET: if (write_access) { unsigned long tmp = *data; /* psr.ri==3 is a reserved value: SDM 2:25 */ if ((tmp & IA64_PSR_RI) == IA64_PSR_RI) tmp &= ~IA64_PSR_RI; pt->cr_ipsr = ((tmp & IPSR_MASK) | (pt->cr_ipsr & ~IPSR_MASK)); } else *data = (pt->cr_ipsr & IPSR_MASK); return 0; } } else if (addr == ELF_NAT_OFFSET) return access_nat_bits(target, pt, info, data, write_access); else if (addr == ELF_PR_OFFSET) ptr = &pt->pr; else return -1; if (write_access) *ptr = *data; else *data = *ptr; return 0; } static int access_elf_reg(struct task_struct *target, struct unw_frame_info *info, unsigned long addr, unsigned long *data, int write_access) { if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(31)) return access_elf_gpreg(target, info, addr, data, write_access); else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7)) return access_elf_breg(target, info, addr, data, write_access); else return access_elf_areg(target, info, addr, data, write_access); } struct regset_membuf { struct membuf to; int ret; }; static void do_gpregs_get(struct unw_frame_info *info, void *arg) { struct regset_membuf *dst = arg; struct membuf to = dst->to; unsigned int n; elf_greg_t reg; if (unw_unwind_to_user(info) < 0) return; /* * coredump format: * r0-r31 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) * predicate registers (p0-p63) * b0-b7 * ip cfm user-mask * ar.rsc ar.bsp ar.bspstore ar.rnat * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec */ /* Skip r0 */ membuf_zero(&to, 8); for (n = 8; to.left && n < ELF_AR_END_OFFSET; n += 8) { if (access_elf_reg(info->task, info, n, &reg, 0) < 0) { dst->ret = -EIO; return; } membuf_store(&to, reg); } } static void do_gpregs_set(struct unw_frame_info *info, void *arg) { struct regset_getset *dst = arg; if (unw_unwind_to_user(info) < 0) return; if (!dst->count) return; /* Skip r0 */ if (dst->pos < ELF_GR_OFFSET(1)) { user_regset_copyin_ignore(&dst->pos, &dst->count, &dst->u.set.kbuf, &dst->u.set.ubuf, 0, ELF_GR_OFFSET(1)); dst->ret = 0; } while (dst->count && dst->pos < ELF_AR_END_OFFSET) { unsigned int n, from, to; elf_greg_t tmp[16]; from = dst->pos; to = from + sizeof(tmp); if (to > ELF_AR_END_OFFSET) to = ELF_AR_END_OFFSET; /* get up to 16 values */ dst->ret = user_regset_copyin(&dst->pos, &dst->count, &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, from, to); if (dst->ret) return; /* now copy them into registers */ for (n = 0; from < dst->pos; from += sizeof(elf_greg_t), n++) if (access_elf_reg(dst->target, info, from, &tmp[n], 1) < 0) { dst->ret = -EIO; return; } } } #define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t)) static void do_fpregs_get(struct unw_frame_info *info, void *arg) { struct task_struct *task = info->task; struct regset_membuf *dst = arg; struct membuf to = dst->to; elf_fpreg_t reg; unsigned int n; if (unw_unwind_to_user(info) < 0) return; /* Skip pos 0 and 1 */ membuf_zero(&to, 2 * sizeof(elf_fpreg_t)); /* fr2-fr31 */ for (n = 2; to.left && n < 32; n++) { if (unw_get_fr(info, n, &reg)) { dst->ret = -EIO; return; } membuf_write(&to, &reg, sizeof(reg)); } /* fph */ if (!to.left) return; ia64_flush_fph(task); if (task->thread.flags & IA64_THREAD_FPH_VALID) membuf_write(&to, &task->thread.fph, 96 * sizeof(reg)); else membuf_zero(&to, 96 * sizeof(reg)); } static void do_fpregs_set(struct unw_frame_info *info, void *arg) { struct regset_getset *dst = arg; elf_fpreg_t fpreg, tmp[30]; int index, start, end; if (unw_unwind_to_user(info) < 0) return; /* Skip pos 0 and 1 */ if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) { user_regset_copyin_ignore(&dst->pos, &dst->count, &dst->u.set.kbuf, &dst->u.set.ubuf, 0, ELF_FP_OFFSET(2)); dst->ret = 0; if (dst->count == 0) return; } /* fr2-fr31 */ if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) { start = dst->pos; end = min(((unsigned int)ELF_FP_OFFSET(32)), dst->pos + dst->count); dst->ret = user_regset_copyin(&dst->pos, &dst->count, &dst->u.set.kbuf, &dst->u.set.ubuf, tmp, ELF_FP_OFFSET(2), ELF_FP_OFFSET(32)); if (dst->ret) return; if (start & 0xF) { /* only write high part */ if (unw_get_fr(info, start / sizeof(elf_fpreg_t), &fpreg)) { dst->ret = -EIO; return; } tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0] = fpreg.u.bits[0]; start &= ~0xFUL; } if (end & 0xF) { /* only write low part */ if (unw_get_fr(info, end / sizeof(elf_fpreg_t), &fpreg)) { dst->ret = -EIO; return; } tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1] = fpreg.u.bits[1]; end = (end + 0xF) & ~0xFUL; } for ( ; start < end ; start += sizeof(elf_fpreg_t)) { index = start / sizeof(elf_fpreg_t); if (unw_set_fr(info, index, tmp[index - 2])) { dst->ret = -EIO; return; } } if (dst->ret || dst->count == 0) return; } /* fph */ if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) { ia64_sync_fph(dst->target); dst->ret = user_regset_copyin(&dst->pos, &dst->count, &dst->u.set.kbuf, &dst->u.set.ubuf, &dst->target->thread.fph, ELF_FP_OFFSET(32), -1); } } static void unwind_and_call(void (*call)(struct unw_frame_info *, void *), struct task_struct *target, void *data) { if (target == current) unw_init_running(call, data); else { struct unw_frame_info info; memset(&info, 0, sizeof(info)); unw_init_from_blocked_task(&info, target); (*call)(&info, data); } } static int do_regset_call(void (*call)(struct unw_frame_info *, void *), struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct regset_getset info = { .target = target, .regset = regset, .pos = pos, .count = count, .u.set = { .kbuf = kbuf, .ubuf = ubuf }, .ret = 0 }; unwind_and_call(call, target, &info); return info.ret; } static int gpregs_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { struct regset_membuf info = {.to = to}; unwind_and_call(do_gpregs_get, target, &info); return info.ret; } static int gpregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { return do_regset_call(do_gpregs_set, target, regset, pos, count, kbuf, ubuf); } static void do_gpregs_writeback(struct unw_frame_info *info, void *arg) { do_sync_rbs(info, ia64_sync_user_rbs); } /* * This is called to write back the register backing store. * ptrace does this before it stops, so that a tracer reading the user * memory after the thread stops will get the current register data. */ static int gpregs_writeback(struct task_struct *target, const struct user_regset *regset, int now) { if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE)) return 0; set_notify_resume(target); return do_regset_call(do_gpregs_writeback, target, regset, 0, 0, NULL, NULL); } static int fpregs_active(struct task_struct *target, const struct user_regset *regset) { return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32; } static int fpregs_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { struct regset_membuf info = {.to = to}; unwind_and_call(do_fpregs_get, target, &info); return info.ret; } static int fpregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { return do_regset_call(do_fpregs_set, target, regset, pos, count, kbuf, ubuf); } static int access_uarea(struct task_struct *child, unsigned long addr, unsigned long *data, int write_access) { unsigned int pos = -1; /* an invalid value */ unsigned long *ptr, regnum; if ((addr & 0x7) != 0) { dprintk("ptrace: unaligned register address 0x%lx\n", addr); return -1; } if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) || (addr >= PT_R7 + 8 && addr < PT_B1) || (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) || (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) { dprintk("ptrace: rejecting access to register " "address 0x%lx\n", addr); return -1; } switch (addr) { case PT_F32 ... (PT_F127 + 15): pos = addr - PT_F32 + ELF_FP_OFFSET(32); break; case PT_F2 ... (PT_F5 + 15): pos = addr - PT_F2 + ELF_FP_OFFSET(2); break; case PT_F10 ... (PT_F31 + 15): pos = addr - PT_F10 + ELF_FP_OFFSET(10); break; case PT_F6 ... (PT_F9 + 15): pos = addr - PT_F6 + ELF_FP_OFFSET(6); break; } if (pos != -1) { unsigned reg = pos / sizeof(elf_fpreg_t); int which_half = (pos / sizeof(unsigned long)) & 1; if (reg < 32) { /* fr2-fr31 */ struct unw_frame_info info; elf_fpreg_t fpreg; memset(&info, 0, sizeof(info)); unw_init_from_blocked_task(&info, child); if (unw_unwind_to_user(&info) < 0) return 0; if (unw_get_fr(&info, reg, &fpreg)) return -1; if (write_access) { fpreg.u.bits[which_half] = *data; if (unw_set_fr(&info, reg, fpreg)) return -1; } else { *data = fpreg.u.bits[which_half]; } } else { /* fph */ elf_fpreg_t *p = &child->thread.fph[reg - 32]; unsigned long *bits = &p->u.bits[which_half]; ia64_sync_fph(child); if (write_access) *bits = *data; else if (child->thread.flags & IA64_THREAD_FPH_VALID) *data = *bits; else *data = 0; } return 0; } switch (addr) { case PT_NAT_BITS: pos = ELF_NAT_OFFSET; break; case PT_R4 ... PT_R7: pos = addr - PT_R4 + ELF_GR_OFFSET(4); break; case PT_B1 ... PT_B5: pos = addr - PT_B1 + ELF_BR_OFFSET(1); break; case PT_AR_EC: pos = ELF_AR_EC_OFFSET; break; case PT_AR_LC: pos = ELF_AR_LC_OFFSET; break; case PT_CR_IPSR: pos = ELF_CR_IPSR_OFFSET; break; case PT_CR_IIP: pos = ELF_CR_IIP_OFFSET; break; case PT_CFM: pos = ELF_CFM_OFFSET; break; case PT_AR_UNAT: pos = ELF_AR_UNAT_OFFSET; break; case PT_AR_PFS: pos = ELF_AR_PFS_OFFSET; break; case PT_AR_RSC: pos = ELF_AR_RSC_OFFSET; break; case PT_AR_RNAT: pos = ELF_AR_RNAT_OFFSET; break; case PT_AR_BSPSTORE: pos = ELF_AR_BSPSTORE_OFFSET; break; case PT_PR: pos = ELF_PR_OFFSET; break; case PT_B6: pos = ELF_BR_OFFSET(6); break; case PT_AR_BSP: pos = ELF_AR_BSP_OFFSET; break; case PT_R1 ... PT_R3: pos = addr - PT_R1 + ELF_GR_OFFSET(1); break; case PT_R12 ... PT_R15: pos = addr - PT_R12 + ELF_GR_OFFSET(12); break; case PT_R8 ... PT_R11: pos = addr - PT_R8 + ELF_GR_OFFSET(8); break; case PT_R16 ... PT_R31: pos = addr - PT_R16 + ELF_GR_OFFSET(16); break; case PT_AR_CCV: pos = ELF_AR_CCV_OFFSET; break; case PT_AR_FPSR: pos = ELF_AR_FPSR_OFFSET; break; case PT_B0: pos = ELF_BR_OFFSET(0); break; case PT_B7: pos = ELF_BR_OFFSET(7); break; case PT_AR_CSD: pos = ELF_AR_CSD_OFFSET; break; case PT_AR_SSD: pos = ELF_AR_SSD_OFFSET; break; } if (pos != -1) { struct unw_frame_info info; memset(&info, 0, sizeof(info)); unw_init_from_blocked_task(&info, child); if (unw_unwind_to_user(&info) < 0) return 0; return access_elf_reg(child, &info, pos, data, write_access); } /* access debug registers */ if (addr >= PT_IBR) { regnum = (addr - PT_IBR) >> 3; ptr = &child->thread.ibr[0]; } else { regnum = (addr - PT_DBR) >> 3; ptr = &child->thread.dbr[0]; } if (regnum >= 8) { dprintk("ptrace: rejecting access to register " "address 0x%lx\n", addr); return -1; } if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) { child->thread.flags |= IA64_THREAD_DBG_VALID; memset(child->thread.dbr, 0, sizeof(child->thread.dbr)); memset(child->thread.ibr, 0, sizeof(child->thread.ibr)); } ptr += regnum; if ((regnum & 1) && write_access) { /* don't let the user set kernel-level breakpoints: */ *ptr = *data & ~(7UL << 56); return 0; } if (write_access) *ptr = *data; else *data = *ptr; return 0; } static const struct user_regset native_regsets[] = { { .core_note_type = NT_PRSTATUS, .n = ELF_NGREG, .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t), .regset_get = gpregs_get, .set = gpregs_set, .writeback = gpregs_writeback }, { .core_note_type = NT_PRFPREG, .n = ELF_NFPREG, .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t), .regset_get = fpregs_get, .set = fpregs_set, .active = fpregs_active }, }; static const struct user_regset_view user_ia64_view = { .name = "ia64", .e_machine = EM_IA_64, .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets) }; const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) { return &user_ia64_view; } struct syscall_get_args { unsigned int i; unsigned int n; unsigned long *args; struct pt_regs *regs; }; static void syscall_get_args_cb(struct unw_frame_info *info, void *data) { struct syscall_get_args *args = data; struct pt_regs *pt = args->regs; unsigned long *krbs, cfm, ndirty, nlocals, nouts; int i, count; if (unw_unwind_to_user(info) < 0) return; /* * We get here via a few paths: * - break instruction: cfm is shared with caller. * syscall args are in out= regs, locals are non-empty. * - epsinstruction: cfm is set by br.call * locals don't exist. * * For both cases arguments are reachable in cfm.sof - cfm.sol. * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ] */ cfm = pt->cr_ifs; nlocals = (cfm >> 7) & 0x7f; /* aka sol */ nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */ krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8; ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); count = 0; if (in_syscall(pt)) count = min_t(int, args->n, nouts); /* Iterate over outs. */ for (i = 0; i < count; i++) { int j = ndirty + nlocals + i + args->i; args->args[i] = *ia64_rse_skip_regs(krbs, j); } while (i < args->n) { args->args[i] = 0; i++; } } void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, unsigned long *args) { struct syscall_get_args data = { .i = 0, .n = 6, .args = args, .regs = regs, }; if (task == current) unw_init_running(syscall_get_args_cb, &data); else { struct unw_frame_info ufi; memset(&ufi, 0, sizeof(ufi)); unw_init_from_blocked_task(&ufi, task); syscall_get_args_cb(&ufi, &data); } }
linux-master
arch/ia64/kernel/ptrace.c
// SPDX-License-Identifier: GPL-2.0-only /* * (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P. * Alex Williamson <[email protected]> * Bjorn Helgaas <[email protected]> */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/acpi.h> #include <asm/acpi-ext.h> /* * Device CSRs that do not appear in PCI config space should be described * via ACPI. This would normally be done with Address Space Descriptors * marked as "consumer-only," but old versions of Windows and Linux ignore * the producer/consumer flag, so HP invented a vendor-defined resource to * describe the location and size of CSR space. */ struct acpi_vendor_uuid hp_ccsr_uuid = { .subtype = 2, .data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad }, }; static acpi_status hp_ccsr_locate(acpi_handle obj, u64 *base, u64 *length) { acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_resource *resource; struct acpi_resource_vendor_typed *vendor; status = acpi_get_vendor_resource(obj, METHOD_NAME__CRS, &hp_ccsr_uuid, &buffer); resource = buffer.pointer; vendor = &resource->data.vendor_typed; if (ACPI_FAILURE(status) || vendor->byte_length < 16) { status = AE_NOT_FOUND; goto exit; } memcpy(base, vendor->byte_data, sizeof(*base)); memcpy(length, vendor->byte_data + 8, sizeof(*length)); exit: kfree(buffer.pointer); return status; } struct csr_space { u64 base; u64 length; }; static acpi_status find_csr_space(struct acpi_resource *resource, void *data) { struct csr_space *space = data; struct acpi_resource_address64 addr; acpi_status status; status = acpi_resource_to_address64(resource, &addr); if (ACPI_SUCCESS(status) && addr.resource_type == ACPI_MEMORY_RANGE && addr.address.address_length && addr.producer_consumer == ACPI_CONSUMER) { space->base = addr.address.minimum; space->length = addr.address.address_length; return AE_CTRL_TERMINATE; } return AE_OK; /* keep looking */ } static acpi_status hp_crs_locate(acpi_handle obj, u64 *base, u64 *length) { struct csr_space space = { 0, 0 }; acpi_walk_resources(obj, METHOD_NAME__CRS, find_csr_space, &space); if (!space.length) return AE_NOT_FOUND; *base = space.base; *length = space.length; return AE_OK; } acpi_status hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length) { acpi_status status; status = hp_ccsr_locate(obj, csr_base, csr_length); if (ACPI_SUCCESS(status)) return status; return hp_crs_locate(obj, csr_base, csr_length); } EXPORT_SYMBOL(hp_acpi_csr_space);
linux-master
arch/ia64/kernel/acpi-ext.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/dma-map-ops.h> #include <linux/export.h> /* Set this to 1 if there is a HW IOMMU in the system */ int iommu_detected __read_mostly; const struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops);
linux-master
arch/ia64/kernel/dma-mapping.c
// SPDX-License-Identifier: GPL-2.0 /* * Architecture-specific trap handling. * * Copyright (C) 1998-2003 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> * * 05/12/00 grao <[email protected]> : added isr in siginfo for SIGFPE */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched/signal.h> #include <linux/sched/debug.h> #include <linux/tty.h> #include <linux/vt_kern.h> /* For unblank_screen() */ #include <linux/export.h> #include <linux/extable.h> #include <linux/hardirq.h> #include <linux/kprobes.h> #include <linux/delay.h> /* for ssleep() */ #include <linux/kdebug.h> #include <linux/uaccess.h> #include <asm/fpswa.h> #include <asm/intrinsics.h> #include <asm/processor.h> #include <asm/exception.h> #include <asm/setup.h> fpswa_interface_t *fpswa_interface; EXPORT_SYMBOL(fpswa_interface); void __init trap_init (void) { if (ia64_boot_param->fpswa) /* FPSWA fixup: make the interface pointer a kernel virtual address: */ fpswa_interface = __va(ia64_boot_param->fpswa); } int die (const char *str, struct pt_regs *regs, long err) { static struct { spinlock_t lock; u32 lock_owner; int lock_owner_depth; } die = { .lock = __SPIN_LOCK_UNLOCKED(die.lock), .lock_owner = -1, .lock_owner_depth = 0 }; static int die_counter; int cpu = get_cpu(); if (die.lock_owner != cpu) { console_verbose(); spin_lock_irq(&die.lock); die.lock_owner = cpu; die.lock_owner_depth = 0; bust_spinlocks(1); } put_cpu(); if (++die.lock_owner_depth < 3) { printk("%s[%d]: %s %ld [%d]\n", current->comm, task_pid_nr(current), str, err, ++die_counter); if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) != NOTIFY_STOP) show_regs(regs); else regs = NULL; } else printk(KERN_ERR "Recursive die() failure, output suppressed\n"); bust_spinlocks(0); die.lock_owner = -1; add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irq(&die.lock); if (!regs) return 1; if (panic_on_oops) panic("Fatal exception"); make_task_dead(SIGSEGV); return 0; } int die_if_kernel (char *str, struct pt_regs *regs, long err) { if (!user_mode(regs)) return die(str, regs, err); return 0; } void __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs) { int sig, code; switch (break_num) { case 0: /* unknown error (used by GCC for __builtin_abort()) */ if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP) == NOTIFY_STOP) return; if (die_if_kernel("bugcheck!", regs, break_num)) return; sig = SIGILL; code = ILL_ILLOPC; break; case 1: /* integer divide by zero */ sig = SIGFPE; code = FPE_INTDIV; break; case 2: /* integer overflow */ sig = SIGFPE; code = FPE_INTOVF; break; case 3: /* range check/bounds check */ sig = SIGFPE; code = FPE_FLTSUB; break; case 4: /* null pointer dereference */ sig = SIGSEGV; code = SEGV_MAPERR; break; case 5: /* misaligned data */ sig = SIGSEGV; code = BUS_ADRALN; break; case 6: /* decimal overflow */ sig = SIGFPE; code = __FPE_DECOVF; break; case 7: /* decimal divide by zero */ sig = SIGFPE; code = __FPE_DECDIV; break; case 8: /* packed decimal error */ sig = SIGFPE; code = __FPE_DECERR; break; case 9: /* invalid ASCII digit */ sig = SIGFPE; code = __FPE_INVASC; break; case 10: /* invalid decimal digit */ sig = SIGFPE; code = __FPE_INVDEC; break; case 11: /* paragraph stack overflow */ sig = SIGSEGV; code = __SEGV_PSTKOVF; break; case 0x3f000 ... 0x3ffff: /* bundle-update in progress */ sig = SIGILL; code = __ILL_BNDMOD; break; default: if ((break_num < 0x40000 || break_num > 0x100000) && die_if_kernel("Bad break", regs, break_num)) return; if (break_num < 0x80000) { sig = SIGILL; code = __ILL_BREAK; } else { if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP) == NOTIFY_STOP) return; sig = SIGTRAP; code = TRAP_BRKPT; } } force_sig_fault(sig, code, (void __user *) (regs->cr_iip + ia64_psr(regs)->ri), break_num, 0 /* clear __ISR_VALID */, 0); } /* * disabled_fph_fault() is called when a user-level process attempts to access f32..f127 * and it doesn't own the fp-high register partition. When this happens, we save the * current fph partition in the task_struct of the fpu-owner (if necessary) and then load * the fp-high partition of the current task (if necessary). Note that the kernel has * access to fph by the time we get here, as the IVT's "Disabled FP-Register" handler takes * care of clearing psr.dfh. */ static inline void disabled_fph_fault (struct pt_regs *regs) { struct ia64_psr *psr = ia64_psr(regs); /* first, grant user-level access to fph partition: */ psr->dfh = 0; /* * Make sure that no other task gets in on this processor * while we're claiming the FPU */ preempt_disable(); #ifndef CONFIG_SMP { struct task_struct *fpu_owner = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER); if (ia64_is_local_fpu_owner(current)) { preempt_enable_no_resched(); return; } if (fpu_owner) ia64_flush_fph(fpu_owner); } #endif /* !CONFIG_SMP */ ia64_set_local_fpu_owner(current); if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) { __ia64_load_fpu(current->thread.fph); psr->mfh = 0; } else { __ia64_init_fpu(); /* * Set mfh because the state in thread.fph does not match the state in * the fph partition. */ psr->mfh = 1; } preempt_enable_no_resched(); } static inline int fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs, struct pt_regs *regs) { fp_state_t fp_state; fpswa_ret_t ret; if (!fpswa_interface) return -1; memset(&fp_state, 0, sizeof(fp_state_t)); /* * compute fp_state. only FP registers f6 - f11 are used by the * kernel, so set those bits in the mask and set the low volatile * pointer to point to these registers. */ fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */ fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6; /* * unsigned long (*EFI_FPSWA) ( * unsigned long trap_type, * void *Bundle, * unsigned long *pipsr, * unsigned long *pfsr, * unsigned long *pisr, * unsigned long *ppreds, * unsigned long *pifs, * void *fp_state); */ ret = (*fpswa_interface->fpswa)((unsigned long) fp_fault, bundle, (unsigned long *) ipsr, (unsigned long *) fpsr, (unsigned long *) isr, (unsigned long *) pr, (unsigned long *) ifs, &fp_state); return ret.status; } struct fpu_swa_msg { unsigned long count; unsigned long time; }; static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast); DECLARE_PER_CPU(struct fpu_swa_msg, cpulast); static struct fpu_swa_msg last __cacheline_aligned; /* * Handle floating-point assist faults and traps. */ static int handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr) { long exception, bundle[2]; unsigned long fault_ip; fault_ip = regs->cr_iip; if (!fp_fault && (ia64_psr(regs)->ri == 0)) fault_ip -= 16; if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle))) return -1; if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { unsigned long count, current_jiffies = jiffies; struct fpu_swa_msg *cp = this_cpu_ptr(&cpulast); if (unlikely(current_jiffies > cp->time)) cp->count = 0; if (unlikely(cp->count < 5)) { cp->count++; cp->time = current_jiffies + 5 * HZ; /* minimize races by grabbing a copy of count BEFORE checking last.time. */ count = last.count; barrier(); /* * Lower 4 bits are used as a count. Upper bits are a sequence * number that is updated when count is reset. The cmpxchg will * fail is seqno has changed. This minimizes multiple cpus * resetting the count. */ if (current_jiffies > last.time) (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15)); /* used fetchadd to atomically update the count */ if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) { last.time = current_jiffies + 5 * HZ; printk(KERN_WARNING "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr); } } } exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr, &regs->cr_ifs, regs); if (fp_fault) { if (exception == 0) { /* emulation was successful */ ia64_increment_ip(regs); } else if (exception == -1) { printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n"); return -1; } else { /* is next instruction a trap? */ int si_code; if (exception & 2) { ia64_increment_ip(regs); } si_code = FPE_FLTUNK; /* default code */ if (isr & 0x11) { si_code = FPE_FLTINV; } else if (isr & 0x22) { /* denormal operand gets the same si_code as underflow * see arch/i386/kernel/traps.c:math_error() */ si_code = FPE_FLTUND; } else if (isr & 0x44) { si_code = FPE_FLTDIV; } force_sig_fault(SIGFPE, si_code, (void __user *) (regs->cr_iip + ia64_psr(regs)->ri), 0, __ISR_VALID, isr); } } else { if (exception == -1) { printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n"); return -1; } else if (exception != 0) { /* raise exception */ int si_code; si_code = FPE_FLTUNK; /* default code */ if (isr & 0x880) { si_code = FPE_FLTOVF; } else if (isr & 0x1100) { si_code = FPE_FLTUND; } else if (isr & 0x2200) { si_code = FPE_FLTRES; } force_sig_fault(SIGFPE, si_code, (void __user *) (regs->cr_iip + ia64_psr(regs)->ri), 0, __ISR_VALID, isr); } } return 0; } struct illegal_op_return { unsigned long fkt, arg1, arg2, arg3; }; struct illegal_op_return ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, struct pt_regs regs) { struct illegal_op_return rv; char buf[128]; #ifdef CONFIG_IA64_BRL_EMU { extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long); rv = ia64_emulate_brl(&regs, ec); if (rv.fkt != (unsigned long) -1) return rv; } #endif sprintf(buf, "IA-64 Illegal operation fault"); rv.fkt = 0; if (die_if_kernel(buf, &regs, 0)) return rv; force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri), 0, 0, 0); return rv; } void __kprobes ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, unsigned long iim, unsigned long itir, long arg5, long arg6, long arg7, struct pt_regs regs) { unsigned long code, error = isr, iip; char buf[128]; int result, sig, si_code; static const char *reason[] = { "IA-64 Illegal Operation fault", "IA-64 Privileged Operation fault", "IA-64 Privileged Register fault", "IA-64 Reserved Register/Field fault", "Disabled Instruction Set Transition fault", "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault", "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12", "Unknown fault 13", "Unknown fault 14", "Unknown fault 15" }; if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { /* * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel * the lfetch. */ ia64_psr(&regs)->ed = 1; return; } iip = regs.cr_iip + ia64_psr(&regs)->ri; switch (vector) { case 24: /* General Exception */ code = (isr >> 4) & 0xf; sprintf(buf, "General Exception: %s%s", reason[code], (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" : " (data access)") : ""); if (code == 8) { # ifdef CONFIG_IA64_PRINT_HAZARDS printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n", current->comm, task_pid_nr(current), regs.cr_iip + ia64_psr(&regs)->ri, regs.pr); # endif return; } break; case 25: /* Disabled FP-Register */ if (isr & 2) { disabled_fph_fault(&regs); return; } sprintf(buf, "Disabled FPL fault---not supposed to happen!"); break; case 26: /* NaT Consumption */ if (user_mode(&regs)) { void __user *addr; if (((isr >> 4) & 0xf) == 2) { /* NaT page consumption */ sig = SIGSEGV; code = SEGV_ACCERR; addr = (void __user *) ifa; } else { /* register NaT consumption */ sig = SIGILL; code = ILL_ILLOPN; addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri); } force_sig_fault(sig, code, addr, vector, __ISR_VALID, isr); return; } else if (ia64_done_with_exception(&regs)) return; sprintf(buf, "NaT consumption"); break; case 31: /* Unsupported Data Reference */ if (user_mode(&regs)) { force_sig_fault(SIGILL, ILL_ILLOPN, (void __user *) iip, vector, __ISR_VALID, isr); return; } sprintf(buf, "Unsupported data reference"); break; case 29: /* Debug */ case 35: /* Taken Branch Trap */ case 36: /* Single Step Trap */ if (fsys_mode(current, &regs)) { extern char __kernel_syscall_via_break[]; /* * Got a trap in fsys-mode: Taken Branch Trap * and Single Step trap need special handling; * Debug trap is ignored (we disable it here * and re-enable it in the lower-privilege trap). */ if (unlikely(vector == 29)) { set_thread_flag(TIF_DB_DISABLED); ia64_psr(&regs)->db = 0; ia64_psr(&regs)->lp = 1; return; } /* re-do the system call via break 0x100000: */ regs.cr_iip = (unsigned long) __kernel_syscall_via_break; ia64_psr(&regs)->ri = 0; ia64_psr(&regs)->cpl = 3; return; } switch (vector) { default: case 29: si_code = TRAP_HWBKPT; #ifdef CONFIG_ITANIUM /* * Erratum 10 (IFA may contain incorrect address) now has * "NoFix" status. There are no plans for fixing this. */ if (ia64_psr(&regs)->is == 0) ifa = regs.cr_iip; #endif break; case 35: si_code = TRAP_BRANCH; ifa = 0; break; case 36: si_code = TRAP_TRACE; ifa = 0; break; } if (notify_die(DIE_FAULT, "ia64_fault", &regs, vector, si_code, SIGTRAP) == NOTIFY_STOP) return; force_sig_fault(SIGTRAP, si_code, (void __user *) ifa, 0, __ISR_VALID, isr); return; case 32: /* fp fault */ case 33: /* fp trap */ result = handle_fpu_swa((vector == 32) ? 1 : 0, &regs, isr); if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) { force_sig_fault(SIGFPE, FPE_FLTINV, (void __user *) iip, 0, __ISR_VALID, isr); } return; case 34: if (isr & 0x2) { /* Lower-Privilege Transfer Trap */ /* If we disabled debug traps during an fsyscall, * re-enable them here. */ if (test_thread_flag(TIF_DB_DISABLED)) { clear_thread_flag(TIF_DB_DISABLED); ia64_psr(&regs)->db = 1; } /* * Just clear PSR.lp and then return immediately: * all the interesting work (e.g., signal delivery) * is done in the kernel exit path. */ ia64_psr(&regs)->lp = 0; return; } else { /* Unimplemented Instr. Address Trap */ if (user_mode(&regs)) { force_sig_fault(SIGILL, ILL_BADIADDR, (void __user *) iip, 0, 0, 0); return; } sprintf(buf, "Unimplemented Instruction Address fault"); } break; case 45: printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n"); printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", iip, ifa, isr); force_sig(SIGSEGV); return; case 46: printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n", iip, ifa, isr, iim); force_sig(SIGSEGV); return; case 47: sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16); break; default: sprintf(buf, "Fault %lu", vector); break; } if (!die_if_kernel(buf, &regs, error)) force_sig(SIGILL); }
linux-master
arch/ia64/kernel/traps.c
// SPDX-License-Identifier: GPL-2.0 /* * Generate definitions needed by assembly language modules. * This code generates raw asm output which is post-processed * to extract and format the required data. */ #define ASM_OFFSETS_C 1 #include <linux/sched/signal.h> #include <linux/pid.h> #include <linux/clocksource.h> #include <linux/kbuild.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/siginfo.h> #include <asm/sigcontext.h> #include <asm/mca.h> #include "../kernel/sigframe.h" #include "../kernel/fsyscall_gtod_data.h" void foo(void) { DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct)); DEFINE(IA64_THREAD_INFO_SIZE, sizeof (struct thread_info)); DEFINE(IA64_PT_REGS_SIZE, sizeof (struct pt_regs)); DEFINE(IA64_SWITCH_STACK_SIZE, sizeof (struct switch_stack)); DEFINE(IA64_SIGINFO_SIZE, sizeof (struct siginfo)); DEFINE(IA64_CPU_SIZE, sizeof (struct cpuinfo_ia64)); DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe)); DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info)); BUILD_BUG_ON(sizeof(struct upid) != 16); DEFINE(IA64_UPID_SHIFT, 4); BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp)); DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave)); DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime)); DEFINE(TI_AC_UTIME, offsetof(struct thread_info, ac_utime)); #endif BLANK(); DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked)); DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid)); DEFINE(IA64_TASK_THREAD_PID_OFFSET,offsetof (struct task_struct, thread_pid)); DEFINE(IA64_PID_LEVEL_OFFSET, offsetof (struct pid, level)); DEFINE(IA64_PID_UPID_OFFSET, offsetof (struct pid, numbers[0])); DEFINE(IA64_TASK_PENDING_OFFSET,offsetof (struct task_struct, pending)); DEFINE(IA64_TASK_PID_OFFSET, offsetof (struct task_struct, pid)); DEFINE(IA64_TASK_REAL_PARENT_OFFSET, offsetof (struct task_struct, real_parent)); DEFINE(IA64_TASK_SIGNAL_OFFSET,offsetof (struct task_struct, signal)); DEFINE(IA64_TASK_TGID_OFFSET, offsetof (struct task_struct, tgid)); DEFINE(IA64_TASK_THREAD_KSP_OFFSET, offsetof (struct task_struct, thread.ksp)); DEFINE(IA64_TASK_THREAD_ON_USTACK_OFFSET, offsetof (struct task_struct, thread.on_ustack)); BLANK(); DEFINE(IA64_SIGNAL_GROUP_STOP_COUNT_OFFSET,offsetof (struct signal_struct, group_stop_count)); DEFINE(IA64_SIGNAL_SHARED_PENDING_OFFSET,offsetof (struct signal_struct, shared_pending)); DEFINE(IA64_SIGNAL_PIDS_TGID_OFFSET, offsetof (struct signal_struct, pids[PIDTYPE_TGID])); BLANK(); DEFINE(IA64_PT_REGS_B6_OFFSET, offsetof (struct pt_regs, b6)); DEFINE(IA64_PT_REGS_B7_OFFSET, offsetof (struct pt_regs, b7)); DEFINE(IA64_PT_REGS_AR_CSD_OFFSET, offsetof (struct pt_regs, ar_csd)); DEFINE(IA64_PT_REGS_AR_SSD_OFFSET, offsetof (struct pt_regs, ar_ssd)); DEFINE(IA64_PT_REGS_R8_OFFSET, offsetof (struct pt_regs, r8)); DEFINE(IA64_PT_REGS_R9_OFFSET, offsetof (struct pt_regs, r9)); DEFINE(IA64_PT_REGS_R10_OFFSET, offsetof (struct pt_regs, r10)); DEFINE(IA64_PT_REGS_R11_OFFSET, offsetof (struct pt_regs, r11)); DEFINE(IA64_PT_REGS_CR_IPSR_OFFSET, offsetof (struct pt_regs, cr_ipsr)); DEFINE(IA64_PT_REGS_CR_IIP_OFFSET, offsetof (struct pt_regs, cr_iip)); DEFINE(IA64_PT_REGS_CR_IFS_OFFSET, offsetof (struct pt_regs, cr_ifs)); DEFINE(IA64_PT_REGS_AR_UNAT_OFFSET, offsetof (struct pt_regs, ar_unat)); DEFINE(IA64_PT_REGS_AR_PFS_OFFSET, offsetof (struct pt_regs, ar_pfs)); DEFINE(IA64_PT_REGS_AR_RSC_OFFSET, offsetof (struct pt_regs, ar_rsc)); DEFINE(IA64_PT_REGS_AR_RNAT_OFFSET, offsetof (struct pt_regs, ar_rnat)); DEFINE(IA64_PT_REGS_AR_BSPSTORE_OFFSET, offsetof (struct pt_regs, ar_bspstore)); DEFINE(IA64_PT_REGS_PR_OFFSET, offsetof (struct pt_regs, pr)); DEFINE(IA64_PT_REGS_B0_OFFSET, offsetof (struct pt_regs, b0)); DEFINE(IA64_PT_REGS_LOADRS_OFFSET, offsetof (struct pt_regs, loadrs)); DEFINE(IA64_PT_REGS_R1_OFFSET, offsetof (struct pt_regs, r1)); DEFINE(IA64_PT_REGS_R12_OFFSET, offsetof (struct pt_regs, r12)); DEFINE(IA64_PT_REGS_R13_OFFSET, offsetof (struct pt_regs, r13)); DEFINE(IA64_PT_REGS_AR_FPSR_OFFSET, offsetof (struct pt_regs, ar_fpsr)); DEFINE(IA64_PT_REGS_R15_OFFSET, offsetof (struct pt_regs, r15)); DEFINE(IA64_PT_REGS_R14_OFFSET, offsetof (struct pt_regs, r14)); DEFINE(IA64_PT_REGS_R2_OFFSET, offsetof (struct pt_regs, r2)); DEFINE(IA64_PT_REGS_R3_OFFSET, offsetof (struct pt_regs, r3)); DEFINE(IA64_PT_REGS_R16_OFFSET, offsetof (struct pt_regs, r16)); DEFINE(IA64_PT_REGS_R17_OFFSET, offsetof (struct pt_regs, r17)); DEFINE(IA64_PT_REGS_R18_OFFSET, offsetof (struct pt_regs, r18)); DEFINE(IA64_PT_REGS_R19_OFFSET, offsetof (struct pt_regs, r19)); DEFINE(IA64_PT_REGS_R20_OFFSET, offsetof (struct pt_regs, r20)); DEFINE(IA64_PT_REGS_R21_OFFSET, offsetof (struct pt_regs, r21)); DEFINE(IA64_PT_REGS_R22_OFFSET, offsetof (struct pt_regs, r22)); DEFINE(IA64_PT_REGS_R23_OFFSET, offsetof (struct pt_regs, r23)); DEFINE(IA64_PT_REGS_R24_OFFSET, offsetof (struct pt_regs, r24)); DEFINE(IA64_PT_REGS_R25_OFFSET, offsetof (struct pt_regs, r25)); DEFINE(IA64_PT_REGS_R26_OFFSET, offsetof (struct pt_regs, r26)); DEFINE(IA64_PT_REGS_R27_OFFSET, offsetof (struct pt_regs, r27)); DEFINE(IA64_PT_REGS_R28_OFFSET, offsetof (struct pt_regs, r28)); DEFINE(IA64_PT_REGS_R29_OFFSET, offsetof (struct pt_regs, r29)); DEFINE(IA64_PT_REGS_R30_OFFSET, offsetof (struct pt_regs, r30)); DEFINE(IA64_PT_REGS_R31_OFFSET, offsetof (struct pt_regs, r31)); DEFINE(IA64_PT_REGS_AR_CCV_OFFSET, offsetof (struct pt_regs, ar_ccv)); DEFINE(IA64_PT_REGS_F6_OFFSET, offsetof (struct pt_regs, f6)); DEFINE(IA64_PT_REGS_F7_OFFSET, offsetof (struct pt_regs, f7)); DEFINE(IA64_PT_REGS_F8_OFFSET, offsetof (struct pt_regs, f8)); DEFINE(IA64_PT_REGS_F9_OFFSET, offsetof (struct pt_regs, f9)); DEFINE(IA64_PT_REGS_F10_OFFSET, offsetof (struct pt_regs, f10)); DEFINE(IA64_PT_REGS_F11_OFFSET, offsetof (struct pt_regs, f11)); BLANK(); DEFINE(IA64_SWITCH_STACK_CALLER_UNAT_OFFSET, offsetof (struct switch_stack, caller_unat)); DEFINE(IA64_SWITCH_STACK_AR_FPSR_OFFSET, offsetof (struct switch_stack, ar_fpsr)); DEFINE(IA64_SWITCH_STACK_F2_OFFSET, offsetof (struct switch_stack, f2)); DEFINE(IA64_SWITCH_STACK_F3_OFFSET, offsetof (struct switch_stack, f3)); DEFINE(IA64_SWITCH_STACK_F4_OFFSET, offsetof (struct switch_stack, f4)); DEFINE(IA64_SWITCH_STACK_F5_OFFSET, offsetof (struct switch_stack, f5)); DEFINE(IA64_SWITCH_STACK_F12_OFFSET, offsetof (struct switch_stack, f12)); DEFINE(IA64_SWITCH_STACK_F13_OFFSET, offsetof (struct switch_stack, f13)); DEFINE(IA64_SWITCH_STACK_F14_OFFSET, offsetof (struct switch_stack, f14)); DEFINE(IA64_SWITCH_STACK_F15_OFFSET, offsetof (struct switch_stack, f15)); DEFINE(IA64_SWITCH_STACK_F16_OFFSET, offsetof (struct switch_stack, f16)); DEFINE(IA64_SWITCH_STACK_F17_OFFSET, offsetof (struct switch_stack, f17)); DEFINE(IA64_SWITCH_STACK_F18_OFFSET, offsetof (struct switch_stack, f18)); DEFINE(IA64_SWITCH_STACK_F19_OFFSET, offsetof (struct switch_stack, f19)); DEFINE(IA64_SWITCH_STACK_F20_OFFSET, offsetof (struct switch_stack, f20)); DEFINE(IA64_SWITCH_STACK_F21_OFFSET, offsetof (struct switch_stack, f21)); DEFINE(IA64_SWITCH_STACK_F22_OFFSET, offsetof (struct switch_stack, f22)); DEFINE(IA64_SWITCH_STACK_F23_OFFSET, offsetof (struct switch_stack, f23)); DEFINE(IA64_SWITCH_STACK_F24_OFFSET, offsetof (struct switch_stack, f24)); DEFINE(IA64_SWITCH_STACK_F25_OFFSET, offsetof (struct switch_stack, f25)); DEFINE(IA64_SWITCH_STACK_F26_OFFSET, offsetof (struct switch_stack, f26)); DEFINE(IA64_SWITCH_STACK_F27_OFFSET, offsetof (struct switch_stack, f27)); DEFINE(IA64_SWITCH_STACK_F28_OFFSET, offsetof (struct switch_stack, f28)); DEFINE(IA64_SWITCH_STACK_F29_OFFSET, offsetof (struct switch_stack, f29)); DEFINE(IA64_SWITCH_STACK_F30_OFFSET, offsetof (struct switch_stack, f30)); DEFINE(IA64_SWITCH_STACK_F31_OFFSET, offsetof (struct switch_stack, f31)); DEFINE(IA64_SWITCH_STACK_R4_OFFSET, offsetof (struct switch_stack, r4)); DEFINE(IA64_SWITCH_STACK_R5_OFFSET, offsetof (struct switch_stack, r5)); DEFINE(IA64_SWITCH_STACK_R6_OFFSET, offsetof (struct switch_stack, r6)); DEFINE(IA64_SWITCH_STACK_R7_OFFSET, offsetof (struct switch_stack, r7)); DEFINE(IA64_SWITCH_STACK_B0_OFFSET, offsetof (struct switch_stack, b0)); DEFINE(IA64_SWITCH_STACK_B1_OFFSET, offsetof (struct switch_stack, b1)); DEFINE(IA64_SWITCH_STACK_B2_OFFSET, offsetof (struct switch_stack, b2)); DEFINE(IA64_SWITCH_STACK_B3_OFFSET, offsetof (struct switch_stack, b3)); DEFINE(IA64_SWITCH_STACK_B4_OFFSET, offsetof (struct switch_stack, b4)); DEFINE(IA64_SWITCH_STACK_B5_OFFSET, offsetof (struct switch_stack, b5)); DEFINE(IA64_SWITCH_STACK_AR_PFS_OFFSET, offsetof (struct switch_stack, ar_pfs)); DEFINE(IA64_SWITCH_STACK_AR_LC_OFFSET, offsetof (struct switch_stack, ar_lc)); DEFINE(IA64_SWITCH_STACK_AR_UNAT_OFFSET, offsetof (struct switch_stack, ar_unat)); DEFINE(IA64_SWITCH_STACK_AR_RNAT_OFFSET, offsetof (struct switch_stack, ar_rnat)); DEFINE(IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET, offsetof (struct switch_stack, ar_bspstore)); DEFINE(IA64_SWITCH_STACK_PR_OFFSET, offsetof (struct switch_stack, pr)); BLANK(); DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, sc_ip)); DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, sc_ar_bsp)); DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, sc_ar_fpsr)); DEFINE(IA64_SIGCONTEXT_AR_RNAT_OFFSET, offsetof (struct sigcontext, sc_ar_rnat)); DEFINE(IA64_SIGCONTEXT_AR_UNAT_OFFSET, offsetof (struct sigcontext, sc_ar_unat)); DEFINE(IA64_SIGCONTEXT_B0_OFFSET, offsetof (struct sigcontext, sc_br[0])); DEFINE(IA64_SIGCONTEXT_CFM_OFFSET, offsetof (struct sigcontext, sc_cfm)); DEFINE(IA64_SIGCONTEXT_FLAGS_OFFSET, offsetof (struct sigcontext, sc_flags)); DEFINE(IA64_SIGCONTEXT_FR6_OFFSET, offsetof (struct sigcontext, sc_fr[6])); DEFINE(IA64_SIGCONTEXT_PR_OFFSET, offsetof (struct sigcontext, sc_pr)); DEFINE(IA64_SIGCONTEXT_R12_OFFSET, offsetof (struct sigcontext, sc_gr[12])); DEFINE(IA64_SIGCONTEXT_RBS_BASE_OFFSET,offsetof (struct sigcontext, sc_rbs_base)); DEFINE(IA64_SIGCONTEXT_LOADRS_OFFSET, offsetof (struct sigcontext, sc_loadrs)); BLANK(); DEFINE(IA64_SIGPENDING_SIGNAL_OFFSET, offsetof (struct sigpending, signal)); BLANK(); DEFINE(IA64_SIGFRAME_ARG0_OFFSET, offsetof (struct sigframe, arg0)); DEFINE(IA64_SIGFRAME_ARG1_OFFSET, offsetof (struct sigframe, arg1)); DEFINE(IA64_SIGFRAME_ARG2_OFFSET, offsetof (struct sigframe, arg2)); DEFINE(IA64_SIGFRAME_HANDLER_OFFSET, offsetof (struct sigframe, handler)); DEFINE(IA64_SIGFRAME_SIGCONTEXT_OFFSET, offsetof (struct sigframe, sc)); BLANK(); /* for assembly files which can't include sched.h: */ DEFINE(IA64_CLONE_VFORK, CLONE_VFORK); DEFINE(IA64_CLONE_VM, CLONE_VM); BLANK(); DEFINE(IA64_CPUINFO_NSEC_PER_CYC_OFFSET, offsetof (struct cpuinfo_ia64, nsec_per_cyc)); DEFINE(IA64_CPUINFO_PTCE_BASE_OFFSET, offsetof (struct cpuinfo_ia64, ptce_base)); DEFINE(IA64_CPUINFO_PTCE_COUNT_OFFSET, offsetof (struct cpuinfo_ia64, ptce_count)); DEFINE(IA64_CPUINFO_PTCE_STRIDE_OFFSET, offsetof (struct cpuinfo_ia64, ptce_stride)); BLANK(); DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct __kernel_old_timespec, tv_nsec)); DEFINE(IA64_TIME_SN_SPEC_SNSEC_OFFSET, offsetof (struct time_sn_spec, snsec)); DEFINE(CLONE_SETTLS_BIT, 19); #if CLONE_SETTLS != (1<<19) # error "CLONE_SETTLS_BIT incorrect, please fix" #endif BLANK(); DEFINE(IA64_MCA_CPU_MCA_STACK_OFFSET, offsetof (struct ia64_mca_cpu, mca_stack)); DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET, offsetof (struct ia64_mca_cpu, init_stack)); BLANK(); DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET, offsetof (struct ia64_sal_os_state, os_gp)); DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, offsetof (struct ia64_sal_os_state, proc_state_param)); DEFINE(IA64_SAL_OS_STATE_SAL_RA_OFFSET, offsetof (struct ia64_sal_os_state, sal_ra)); DEFINE(IA64_SAL_OS_STATE_SAL_GP_OFFSET, offsetof (struct ia64_sal_os_state, sal_gp)); DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, offsetof (struct ia64_sal_os_state, pal_min_state)); DEFINE(IA64_SAL_OS_STATE_OS_STATUS_OFFSET, offsetof (struct ia64_sal_os_state, os_status)); DEFINE(IA64_SAL_OS_STATE_CONTEXT_OFFSET, offsetof (struct ia64_sal_os_state, context)); DEFINE(IA64_SAL_OS_STATE_SIZE, sizeof (struct ia64_sal_os_state)); BLANK(); DEFINE(IA64_PMSA_GR_OFFSET, offsetof(struct pal_min_state_area, pmsa_gr)); DEFINE(IA64_PMSA_BANK1_GR_OFFSET, offsetof(struct pal_min_state_area, pmsa_bank1_gr)); DEFINE(IA64_PMSA_PR_OFFSET, offsetof(struct pal_min_state_area, pmsa_pr)); DEFINE(IA64_PMSA_BR0_OFFSET, offsetof(struct pal_min_state_area, pmsa_br0)); DEFINE(IA64_PMSA_RSC_OFFSET, offsetof(struct pal_min_state_area, pmsa_rsc)); DEFINE(IA64_PMSA_IIP_OFFSET, offsetof(struct pal_min_state_area, pmsa_iip)); DEFINE(IA64_PMSA_IPSR_OFFSET, offsetof(struct pal_min_state_area, pmsa_ipsr)); DEFINE(IA64_PMSA_IFS_OFFSET, offsetof(struct pal_min_state_area, pmsa_ifs)); DEFINE(IA64_PMSA_XIP_OFFSET, offsetof(struct pal_min_state_area, pmsa_xip)); BLANK(); /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ DEFINE(IA64_GTOD_SEQ_OFFSET, offsetof (struct fsyscall_gtod_data_t, seq)); DEFINE(IA64_GTOD_WALL_TIME_OFFSET, offsetof (struct fsyscall_gtod_data_t, wall_time)); DEFINE(IA64_GTOD_MONO_TIME_OFFSET, offsetof (struct fsyscall_gtod_data_t, monotonic_time)); DEFINE(IA64_CLKSRC_MASK_OFFSET, offsetof (struct fsyscall_gtod_data_t, clk_mask)); DEFINE(IA64_CLKSRC_MULT_OFFSET, offsetof (struct fsyscall_gtod_data_t, clk_mult)); DEFINE(IA64_CLKSRC_SHIFT_OFFSET, offsetof (struct fsyscall_gtod_data_t, clk_shift)); DEFINE(IA64_CLKSRC_MMIO_OFFSET, offsetof (struct fsyscall_gtod_data_t, clk_fsys_mmio)); DEFINE(IA64_CLKSRC_CYCLE_LAST_OFFSET, offsetof (struct fsyscall_gtod_data_t, clk_cycle_last)); DEFINE(IA64_ITC_JITTER_OFFSET, offsetof (struct itc_jitter_data_t, itc_jitter)); DEFINE(IA64_ITC_LASTCYCLE_OFFSET, offsetof (struct itc_jitter_data_t, itc_lastcycle)); }
linux-master
arch/ia64/kernel/asm-offsets.c
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * This file contains NUMA specific variables and functions which are used on * NUMA machines with contiguous memory. * 2002/08/07 Erich Focht <[email protected]> * Populate cpu entries in sysfs for non-numa systems as well * Intel Corporation - Ashok Raj * 02/27/2006 Zhang, Yanmin * Populate cpu cache entries in sysfs for cpu cache info */ #include <linux/cpu.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/node.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/nodemask.h> #include <linux/notifier.h> #include <linux/export.h> #include <asm/mmzone.h> #include <asm/numa.h> #include <asm/cpu.h> static struct ia64_cpu *sysfs_cpus; void arch_fix_phys_package_id(int num, u32 slot) { #ifdef CONFIG_SMP if (cpu_data(num)->socket_id == -1) cpu_data(num)->socket_id = slot; #endif } EXPORT_SYMBOL_GPL(arch_fix_phys_package_id); #ifdef CONFIG_HOTPLUG_CPU int __ref arch_register_cpu(int num) { /* * If CPEI can be re-targeted or if this is not * CPEI target, then it is hotpluggable */ if (can_cpei_retarget() || !is_cpu_cpei_target(num)) sysfs_cpus[num].cpu.hotpluggable = 1; map_cpu_to_node(num, node_cpuid[num].nid); return register_cpu(&sysfs_cpus[num].cpu, num); } EXPORT_SYMBOL(arch_register_cpu); void __ref arch_unregister_cpu(int num) { unregister_cpu(&sysfs_cpus[num].cpu); unmap_cpu_from_node(num, cpu_to_node(num)); } EXPORT_SYMBOL(arch_unregister_cpu); #else static int __init arch_register_cpu(int num) { return register_cpu(&sysfs_cpus[num].cpu, num); } #endif /*CONFIG_HOTPLUG_CPU*/ static int __init topology_init(void) { int i, err = 0; sysfs_cpus = kcalloc(NR_CPUS, sizeof(struct ia64_cpu), GFP_KERNEL); if (!sysfs_cpus) panic("kzalloc in topology_init failed - NR_CPUS too big?"); for_each_present_cpu(i) { if((err = arch_register_cpu(i))) goto out; } out: return err; } subsys_initcall(topology_init); /* * Export cpu cache information through sysfs */ /* * A bunch of string array to get pretty printing */ static const char *cache_types[] = { "", /* not used */ "Instruction", "Data", "Unified" /* unified */ }; static const char *cache_mattrib[]={ "WriteThrough", "WriteBack", "", /* reserved */ "" /* reserved */ }; struct cache_info { pal_cache_config_info_t cci; cpumask_t shared_cpu_map; int level; int type; struct kobject kobj; }; struct cpu_cache_info { struct cache_info *cache_leaves; int num_cache_leaves; struct kobject kobj; }; static struct cpu_cache_info all_cpu_cache_info[NR_CPUS]; #define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y]) #ifdef CONFIG_SMP static void cache_shared_cpu_map_setup(unsigned int cpu, struct cache_info * this_leaf) { pal_cache_shared_info_t csi; int num_shared, i = 0; unsigned int j; if (cpu_data(cpu)->threads_per_core <= 1 && cpu_data(cpu)->cores_per_socket <= 1) { cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); return; } if (ia64_pal_cache_shared_info(this_leaf->level, this_leaf->type, 0, &csi) != PAL_STATUS_SUCCESS) return; num_shared = (int) csi.num_shared; do { for_each_possible_cpu(j) if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id && cpu_data(j)->core_id == csi.log1_cid && cpu_data(j)->thread_id == csi.log1_tid) cpumask_set_cpu(j, &this_leaf->shared_cpu_map); i++; } while (i < num_shared && ia64_pal_cache_shared_info(this_leaf->level, this_leaf->type, i, &csi) == PAL_STATUS_SUCCESS); } #else static void cache_shared_cpu_map_setup(unsigned int cpu, struct cache_info * this_leaf) { cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); return; } #endif static ssize_t show_coherency_line_size(struct cache_info *this_leaf, char *buf) { return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size); } static ssize_t show_ways_of_associativity(struct cache_info *this_leaf, char *buf) { return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc); } static ssize_t show_attributes(struct cache_info *this_leaf, char *buf) { return sprintf(buf, "%s\n", cache_mattrib[this_leaf->cci.pcci_cache_attr]); } static ssize_t show_size(struct cache_info *this_leaf, char *buf) { return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024); } static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf) { unsigned number_of_sets = this_leaf->cci.pcci_cache_size; number_of_sets /= this_leaf->cci.pcci_assoc; number_of_sets /= 1 << this_leaf->cci.pcci_line_size; return sprintf(buf, "%u\n", number_of_sets); } static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf) { cpumask_t shared_cpu_map; cpumask_and(&shared_cpu_map, &this_leaf->shared_cpu_map, cpu_online_mask); return scnprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(&shared_cpu_map)); } static ssize_t show_type(struct cache_info *this_leaf, char *buf) { int type = this_leaf->type + this_leaf->cci.pcci_unified; return sprintf(buf, "%s\n", cache_types[type]); } static ssize_t show_level(struct cache_info *this_leaf, char *buf) { return sprintf(buf, "%u\n", this_leaf->level); } struct cache_attr { struct attribute attr; ssize_t (*show)(struct cache_info *, char *); ssize_t (*store)(struct cache_info *, const char *, size_t count); }; #ifdef define_one_ro #undef define_one_ro #endif #define define_one_ro(_name) \ static struct cache_attr _name = \ __ATTR(_name, 0444, show_##_name, NULL) define_one_ro(level); define_one_ro(type); define_one_ro(coherency_line_size); define_one_ro(ways_of_associativity); define_one_ro(size); define_one_ro(number_of_sets); define_one_ro(shared_cpu_map); define_one_ro(attributes); static struct attribute * cache_default_attrs[] = { &type.attr, &level.attr, &coherency_line_size.attr, &ways_of_associativity.attr, &attributes.attr, &size.attr, &number_of_sets.attr, &shared_cpu_map.attr, NULL }; ATTRIBUTE_GROUPS(cache_default); #define to_object(k) container_of(k, struct cache_info, kobj) #define to_attr(a) container_of(a, struct cache_attr, attr) static ssize_t ia64_cache_show(struct kobject * kobj, struct attribute * attr, char * buf) { struct cache_attr *fattr = to_attr(attr); struct cache_info *this_leaf = to_object(kobj); ssize_t ret; ret = fattr->show ? fattr->show(this_leaf, buf) : 0; return ret; } static const struct sysfs_ops cache_sysfs_ops = { .show = ia64_cache_show }; static struct kobj_type cache_ktype = { .sysfs_ops = &cache_sysfs_ops, .default_groups = cache_default_groups, }; static struct kobj_type cache_ktype_percpu_entry = { .sysfs_ops = &cache_sysfs_ops, }; static void cpu_cache_sysfs_exit(unsigned int cpu) { kfree(all_cpu_cache_info[cpu].cache_leaves); all_cpu_cache_info[cpu].cache_leaves = NULL; all_cpu_cache_info[cpu].num_cache_leaves = 0; memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); return; } static int cpu_cache_sysfs_init(unsigned int cpu) { unsigned long i, levels, unique_caches; pal_cache_config_info_t cci; int j; long status; struct cache_info *this_cache; int num_cache_leaves = 0; if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) { printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status); return -1; } this_cache=kcalloc(unique_caches, sizeof(struct cache_info), GFP_KERNEL); if (this_cache == NULL) return -ENOMEM; for (i=0; i < levels; i++) { for (j=2; j >0 ; j--) { if ((status=ia64_pal_cache_config_info(i,j, &cci)) != PAL_STATUS_SUCCESS) continue; this_cache[num_cache_leaves].cci = cci; this_cache[num_cache_leaves].level = i + 1; this_cache[num_cache_leaves].type = j; cache_shared_cpu_map_setup(cpu, &this_cache[num_cache_leaves]); num_cache_leaves ++; } } all_cpu_cache_info[cpu].cache_leaves = this_cache; all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves; memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); return 0; } /* Add cache interface for CPU device */ static int cache_add_dev(unsigned int cpu) { struct device *sys_dev = get_cpu_device(cpu); unsigned long i, j; struct cache_info *this_object; int retval = 0; if (all_cpu_cache_info[cpu].kobj.parent) return 0; retval = cpu_cache_sysfs_init(cpu); if (unlikely(retval < 0)) return retval; retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj, &cache_ktype_percpu_entry, &sys_dev->kobj, "%s", "cache"); if (unlikely(retval < 0)) { cpu_cache_sysfs_exit(cpu); return retval; } for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { this_object = LEAF_KOBJECT_PTR(cpu,i); retval = kobject_init_and_add(&(this_object->kobj), &cache_ktype, &all_cpu_cache_info[cpu].kobj, "index%1lu", i); if (unlikely(retval)) { for (j = 0; j < i; j++) { kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj)); } kobject_put(&all_cpu_cache_info[cpu].kobj); cpu_cache_sysfs_exit(cpu); return retval; } kobject_uevent(&(this_object->kobj), KOBJ_ADD); } kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD); return retval; } /* Remove cache interface for CPU device */ static int cache_remove_dev(unsigned int cpu) { unsigned long i; for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj)); if (all_cpu_cache_info[cpu].kobj.parent) { kobject_put(&all_cpu_cache_info[cpu].kobj); memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); } cpu_cache_sysfs_exit(cpu); return 0; } static int __init cache_sysfs_init(void) { int ret; ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/topology:online", cache_add_dev, cache_remove_dev); WARN_ON(ret < 0); return 0; } device_initcall(cache_sysfs_init);
linux-master
arch/ia64/kernel/topology.c
// SPDX-License-Identifier: GPL-2.0 /* * MSI hooks for standard x86 apic */ #include <linux/pci.h> #include <linux/irq.h> #include <linux/msi.h> #include <linux/dmar.h> #include <asm/smp.h> #include <asm/msidef.h> static struct irq_chip ia64_msi_chip; #ifdef CONFIG_SMP static int ia64_set_msi_irq_affinity(struct irq_data *idata, const cpumask_t *cpu_mask, bool force) { struct msi_msg msg; u32 addr, data; int cpu = cpumask_first_and(cpu_mask, cpu_online_mask); unsigned int irq = idata->irq; if (irq_prepare_move(irq, cpu)) return -1; __get_cached_msi_msg(irq_data_get_msi_desc(idata), &msg); addr = msg.address_lo; addr &= MSI_ADDR_DEST_ID_MASK; addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); msg.address_lo = addr; data = msg.data; data &= MSI_DATA_VECTOR_MASK; data |= MSI_DATA_VECTOR(irq_to_vector(irq)); msg.data = data; pci_write_msi_msg(irq, &msg); irq_data_update_affinity(idata, cpumask_of(cpu)); return 0; } #endif /* CONFIG_SMP */ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) { struct msi_msg msg; unsigned long dest_phys_id; int irq, vector; irq = create_irq(); if (irq < 0) return irq; irq_set_msi_desc(irq, desc); dest_phys_id = cpu_physical_id(cpumask_any_and(&(irq_to_domain(irq)), cpu_online_mask)); vector = irq_to_vector(irq); msg.address_hi = 0; msg.address_lo = MSI_ADDR_HEADER | MSI_ADDR_DEST_MODE_PHYS | MSI_ADDR_REDIRECTION_CPU | MSI_ADDR_DEST_ID_CPU(dest_phys_id); msg.data = MSI_DATA_TRIGGER_EDGE | MSI_DATA_LEVEL_ASSERT | MSI_DATA_DELIVERY_FIXED | MSI_DATA_VECTOR(vector); pci_write_msi_msg(irq, &msg); irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); return 0; } void arch_teardown_msi_irq(unsigned int irq) { destroy_irq(irq); } static void ia64_ack_msi_irq(struct irq_data *data) { irq_complete_move(data->irq); irq_move_irq(data); ia64_eoi(); } static int ia64_msi_retrigger_irq(struct irq_data *data) { unsigned int vector = irq_to_vector(data->irq); ia64_resend_irq(vector); return 1; } /* * Generic ops used on most IA64 platforms. */ static struct irq_chip ia64_msi_chip = { .name = "PCI-MSI", .irq_mask = pci_msi_mask_irq, .irq_unmask = pci_msi_unmask_irq, .irq_ack = ia64_ack_msi_irq, #ifdef CONFIG_SMP .irq_set_affinity = ia64_set_msi_irq_affinity, #endif .irq_retrigger = ia64_msi_retrigger_irq, }; #ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_SMP static int dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { unsigned int irq = data->irq; struct irq_cfg *cfg = irq_cfg + irq; struct msi_msg msg; int cpu = cpumask_first_and(mask, cpu_online_mask); if (irq_prepare_move(irq, cpu)) return -1; dmar_msi_read(irq, &msg); msg.data &= ~MSI_DATA_VECTOR_MASK; msg.data |= MSI_DATA_VECTOR(cfg->vector); msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); dmar_msi_write(irq, &msg); irq_data_update_affinity(data, mask); return 0; } #endif /* CONFIG_SMP */ static struct irq_chip dmar_msi_type = { .name = "DMAR_MSI", .irq_unmask = dmar_msi_unmask, .irq_mask = dmar_msi_mask, .irq_ack = ia64_ack_msi_irq, #ifdef CONFIG_SMP .irq_set_affinity = dmar_msi_set_affinity, #endif .irq_retrigger = ia64_msi_retrigger_irq, }; static void msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) { struct irq_cfg *cfg = irq_cfg + irq; unsigned dest; dest = cpu_physical_id(cpumask_first_and(&(irq_to_domain(irq)), cpu_online_mask)); msg->address_hi = 0; msg->address_lo = MSI_ADDR_HEADER | MSI_ADDR_DEST_MODE_PHYS | MSI_ADDR_REDIRECTION_CPU | MSI_ADDR_DEST_ID_CPU(dest); msg->data = MSI_DATA_TRIGGER_EDGE | MSI_DATA_LEVEL_ASSERT | MSI_DATA_DELIVERY_FIXED | MSI_DATA_VECTOR(cfg->vector); } int dmar_alloc_hwirq(int id, int node, void *arg) { int irq; struct msi_msg msg; irq = create_irq(); if (irq > 0) { irq_set_handler_data(irq, arg); irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, "edge"); msi_compose_msg(NULL, irq, &msg); dmar_msi_write(irq, &msg); } return irq; } void dmar_free_hwirq(int irq) { irq_set_handler_data(irq, NULL); destroy_irq(irq); } #endif /* CONFIG_INTEL_IOMMU */
linux-master
arch/ia64/kernel/msi_ia64.c
// SPDX-License-Identifier: GPL-2.0 /* * IA-64-specific support for kernel module loader. * * Copyright (C) 2003 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> * * Loosely based on patch by Rusty Russell. */ /* relocs tested so far: DIR64LSB FPTR64LSB GPREL22 LDXMOV LDXMOV LTOFF22 LTOFF22X LTOFF22X LTOFF_FPTR22 PCREL21B (for br.call only; br.cond is not supported out of modules!) PCREL60B (for brl.cond only; brl.call is not supported for modules!) PCREL64LSB SECREL32LSB SEGREL64LSB */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/elf.h> #include <linux/moduleloader.h> #include <linux/string.h> #include <linux/vmalloc.h> #include <asm/patch.h> #include <asm/unaligned.h> #include <asm/sections.h> #define ARCH_MODULE_DEBUG 0 #if ARCH_MODULE_DEBUG # define DEBUGP printk # define inline #else # define DEBUGP(fmt , a...) #endif #ifdef CONFIG_ITANIUM # define USE_BRL 0 #else # define USE_BRL 1 #endif #define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */ /* Define some relocation helper macros/types: */ #define FORMAT_SHIFT 0 #define FORMAT_BITS 3 #define FORMAT_MASK ((1 << FORMAT_BITS) - 1) #define VALUE_SHIFT 3 #define VALUE_BITS 5 #define VALUE_MASK ((1 << VALUE_BITS) - 1) enum reloc_target_format { /* direct encoded formats: */ RF_NONE = 0, RF_INSN14 = 1, RF_INSN22 = 2, RF_INSN64 = 3, RF_32MSB = 4, RF_32LSB = 5, RF_64MSB = 6, RF_64LSB = 7, /* formats that cannot be directly decoded: */ RF_INSN60, RF_INSN21B, /* imm21 form 1 */ RF_INSN21M, /* imm21 form 2 */ RF_INSN21F /* imm21 form 3 */ }; enum reloc_value_formula { RV_DIRECT = 4, /* S + A */ RV_GPREL = 5, /* @gprel(S + A) */ RV_LTREL = 6, /* @ltoff(S + A) */ RV_PLTREL = 7, /* @pltoff(S + A) */ RV_FPTR = 8, /* @fptr(S + A) */ RV_PCREL = 9, /* S + A - P */ RV_LTREL_FPTR = 10, /* @ltoff(@fptr(S + A)) */ RV_SEGREL = 11, /* @segrel(S + A) */ RV_SECREL = 12, /* @secrel(S + A) */ RV_BDREL = 13, /* BD + A */ RV_LTV = 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */ RV_PCREL2 = 15, /* S + A - P */ RV_SPECIAL = 16, /* various (see below) */ RV_RSVD17 = 17, RV_TPREL = 18, /* @tprel(S + A) */ RV_LTREL_TPREL = 19, /* @ltoff(@tprel(S + A)) */ RV_DTPMOD = 20, /* @dtpmod(S + A) */ RV_LTREL_DTPMOD = 21, /* @ltoff(@dtpmod(S + A)) */ RV_DTPREL = 22, /* @dtprel(S + A) */ RV_LTREL_DTPREL = 23, /* @ltoff(@dtprel(S + A)) */ RV_RSVD24 = 24, RV_RSVD25 = 25, RV_RSVD26 = 26, RV_RSVD27 = 27 /* 28-31 reserved for implementation-specific purposes. */ }; #define N(reloc) [R_IA64_##reloc] = #reloc static const char *reloc_name[256] = { N(NONE), N(IMM14), N(IMM22), N(IMM64), N(DIR32MSB), N(DIR32LSB), N(DIR64MSB), N(DIR64LSB), N(GPREL22), N(GPREL64I), N(GPREL32MSB), N(GPREL32LSB), N(GPREL64MSB), N(GPREL64LSB), N(LTOFF22), N(LTOFF64I), N(PLTOFF22), N(PLTOFF64I), N(PLTOFF64MSB), N(PLTOFF64LSB), N(FPTR64I), N(FPTR32MSB), N(FPTR32LSB), N(FPTR64MSB), N(FPTR64LSB), N(PCREL60B), N(PCREL21B), N(PCREL21M), N(PCREL21F), N(PCREL32MSB), N(PCREL32LSB), N(PCREL64MSB), N(PCREL64LSB), N(LTOFF_FPTR22), N(LTOFF_FPTR64I), N(LTOFF_FPTR32MSB), N(LTOFF_FPTR32LSB), N(LTOFF_FPTR64MSB), N(LTOFF_FPTR64LSB), N(SEGREL32MSB), N(SEGREL32LSB), N(SEGREL64MSB), N(SEGREL64LSB), N(SECREL32MSB), N(SECREL32LSB), N(SECREL64MSB), N(SECREL64LSB), N(REL32MSB), N(REL32LSB), N(REL64MSB), N(REL64LSB), N(LTV32MSB), N(LTV32LSB), N(LTV64MSB), N(LTV64LSB), N(PCREL21BI), N(PCREL22), N(PCREL64I), N(IPLTMSB), N(IPLTLSB), N(COPY), N(LTOFF22X), N(LDXMOV), N(TPREL14), N(TPREL22), N(TPREL64I), N(TPREL64MSB), N(TPREL64LSB), N(LTOFF_TPREL22), N(DTPMOD64MSB), N(DTPMOD64LSB), N(LTOFF_DTPMOD22), N(DTPREL14), N(DTPREL22), N(DTPREL64I), N(DTPREL32MSB), N(DTPREL32LSB), N(DTPREL64MSB), N(DTPREL64LSB), N(LTOFF_DTPREL22) }; #undef N /* Opaque struct for insns, to protect against derefs. */ struct insn; static inline uint64_t bundle (const struct insn *insn) { return (uint64_t) insn & ~0xfUL; } static inline int slot (const struct insn *insn) { return (uint64_t) insn & 0x3; } static int apply_imm64 (struct module *mod, struct insn *insn, uint64_t val) { if (slot(insn) != 1 && slot(insn) != 2) { printk(KERN_ERR "%s: invalid slot number %d for IMM64\n", mod->name, slot(insn)); return 0; } ia64_patch_imm64((u64) insn, val); return 1; } static int apply_imm60 (struct module *mod, struct insn *insn, uint64_t val) { if (slot(insn) != 1 && slot(insn) != 2) { printk(KERN_ERR "%s: invalid slot number %d for IMM60\n", mod->name, slot(insn)); return 0; } if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) { printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (long) val); return 0; } ia64_patch_imm60((u64) insn, val); return 1; } static int apply_imm22 (struct module *mod, struct insn *insn, uint64_t val) { if (val + (1 << 21) >= (1 << 22)) { printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (long)val); return 0; } ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */ | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */ | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */ | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */)); return 1; } static int apply_imm21b (struct module *mod, struct insn *insn, uint64_t val) { if (val + (1 << 20) >= (1 << 21)) { printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (long)val); return 0; } ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */ | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */)); return 1; } #if USE_BRL struct plt_entry { /* Three instruction bundles in PLT. */ unsigned char bundle[2][16]; }; static const struct plt_entry ia64_plt_template = { { { 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */ 0x00, 0x00, 0x00, 0x60 }, { 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */ 0x08, 0x00, 0x00, 0xc0 } } }; static int patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp) { if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp) && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2), (target_ip - (int64_t) plt->bundle[1]) / 16)) return 1; return 0; } unsigned long plt_target (struct plt_entry *plt) { uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1]; long off; b0 = b[0]; b1 = b[1]; off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */ | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */ | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */ return (long) plt->bundle[1] + 16*off; } #else /* !USE_BRL */ struct plt_entry { /* Three instruction bundles in PLT. */ unsigned char bundle[3][16]; }; static const struct plt_entry ia64_plt_template = { { { 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */ 0x02, 0x00, 0x00, 0x60 }, { 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */ 0x00, 0x00, 0x00, 0x60 }, { 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */ 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */ 0x60, 0x00, 0x80, 0x00 /* br.few b6 */ } } }; static int patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp) { if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip) && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp)) return 1; return 0; } unsigned long plt_target (struct plt_entry *plt) { uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0]; b0 = b[0]; b1 = b[1]; return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */ | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */ | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */ | ((b1 & 0x0000100000000000) >> 23) /* ic -> bit 21 */ | ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40) /* imm41 -> bit 22 */ | ((b1 & 0x0800000000000000) << 4)); /* i -> bit 63 */ } #endif /* !USE_BRL */ void module_arch_freeing_init (struct module *mod) { if (mod->arch.init_unw_table) { unw_remove_unwind_table(mod->arch.init_unw_table); mod->arch.init_unw_table = NULL; } } /* Have we already seen one of these relocations? */ /* FIXME: we could look in other sections, too --RR */ static int duplicate_reloc (const Elf64_Rela *rela, unsigned int num) { unsigned int i; for (i = 0; i < num; i++) { if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend) return 1; } return 0; } /* Count how many GOT entries we may need */ static unsigned int count_gots (const Elf64_Rela *rela, unsigned int num) { unsigned int i, ret = 0; /* Sure, this is order(n^2), but it's usually short, and not time critical */ for (i = 0; i < num; i++) { switch (ELF64_R_TYPE(rela[i].r_info)) { case R_IA64_LTOFF22: case R_IA64_LTOFF22X: case R_IA64_LTOFF64I: case R_IA64_LTOFF_FPTR22: case R_IA64_LTOFF_FPTR64I: case R_IA64_LTOFF_FPTR32MSB: case R_IA64_LTOFF_FPTR32LSB: case R_IA64_LTOFF_FPTR64MSB: case R_IA64_LTOFF_FPTR64LSB: if (!duplicate_reloc(rela, i)) ret++; break; } } return ret; } /* Count how many PLT entries we may need */ static unsigned int count_plts (const Elf64_Rela *rela, unsigned int num) { unsigned int i, ret = 0; /* Sure, this is order(n^2), but it's usually short, and not time critical */ for (i = 0; i < num; i++) { switch (ELF64_R_TYPE(rela[i].r_info)) { case R_IA64_PCREL21B: case R_IA64_PLTOFF22: case R_IA64_PLTOFF64I: case R_IA64_PLTOFF64MSB: case R_IA64_PLTOFF64LSB: case R_IA64_IPLTMSB: case R_IA64_IPLTLSB: if (!duplicate_reloc(rela, i)) ret++; break; } } return ret; } /* We need to create an function-descriptors for any internal function which is referenced. */ static unsigned int count_fdescs (const Elf64_Rela *rela, unsigned int num) { unsigned int i, ret = 0; /* Sure, this is order(n^2), but it's usually short, and not time critical. */ for (i = 0; i < num; i++) { switch (ELF64_R_TYPE(rela[i].r_info)) { case R_IA64_FPTR64I: case R_IA64_FPTR32LSB: case R_IA64_FPTR32MSB: case R_IA64_FPTR64LSB: case R_IA64_FPTR64MSB: case R_IA64_LTOFF_FPTR22: case R_IA64_LTOFF_FPTR32LSB: case R_IA64_LTOFF_FPTR32MSB: case R_IA64_LTOFF_FPTR64I: case R_IA64_LTOFF_FPTR64LSB: case R_IA64_LTOFF_FPTR64MSB: case R_IA64_IPLTMSB: case R_IA64_IPLTLSB: /* * Jumps to static functions sometimes go straight to their * offset. Of course, that may not be possible if the jump is * from init -> core or vice. versa, so we need to generate an * FDESC (and PLT etc) for that. */ case R_IA64_PCREL21B: if (!duplicate_reloc(rela, i)) ret++; break; } } return ret; } int module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0; Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; /* * To store the PLTs and function-descriptors, we expand the .text section for * core module-code and the .init.text section for initialization code. */ for (s = sechdrs; s < sechdrs_end; ++s) if (strcmp(".core.plt", secstrings + s->sh_name) == 0) mod->arch.core_plt = s; else if (strcmp(".init.plt", secstrings + s->sh_name) == 0) mod->arch.init_plt = s; else if (strcmp(".got", secstrings + s->sh_name) == 0) mod->arch.got = s; else if (strcmp(".opd", secstrings + s->sh_name) == 0) mod->arch.opd = s; else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) mod->arch.unwind = s; if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { printk(KERN_ERR "%s: sections missing\n", mod->name); return -ENOEXEC; } /* GOT and PLTs can occur in any relocated section... */ for (s = sechdrs + 1; s < sechdrs_end; ++s) { const Elf64_Rela *rels = (void *)ehdr + s->sh_offset; unsigned long numrels = s->sh_size/sizeof(Elf64_Rela); if (s->sh_type != SHT_RELA) continue; gots += count_gots(rels, numrels); fdescs += count_fdescs(rels, numrels); if (strstr(secstrings + s->sh_name, ".init")) init_plts += count_plts(rels, numrels); else core_plts += count_plts(rels, numrels); } mod->arch.core_plt->sh_type = SHT_NOBITS; mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; mod->arch.core_plt->sh_addralign = 16; mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry); mod->arch.init_plt->sh_type = SHT_NOBITS; mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; mod->arch.init_plt->sh_addralign = 16; mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry); mod->arch.got->sh_type = SHT_NOBITS; mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC; mod->arch.got->sh_addralign = 8; mod->arch.got->sh_size = gots * sizeof(struct got_entry); mod->arch.opd->sh_type = SHT_NOBITS; mod->arch.opd->sh_flags = SHF_ALLOC; mod->arch.opd->sh_addralign = 8; mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc); DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n", __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size, mod->arch.got->sh_size, mod->arch.opd->sh_size); return 0; } static inline bool in_init (const struct module *mod, uint64_t addr) { return within_module_init(addr, mod); } static inline bool in_core (const struct module *mod, uint64_t addr) { return within_module_core(addr, mod); } static inline bool is_internal (const struct module *mod, uint64_t value) { return in_init(mod, value) || in_core(mod, value); } /* * Get gp-relative offset for the linkage-table entry of VALUE. */ static uint64_t get_ltoff (struct module *mod, uint64_t value, int *okp) { struct got_entry *got, *e; if (!*okp) return 0; got = (void *) mod->arch.got->sh_addr; for (e = got; e < got + mod->arch.next_got_entry; ++e) if (e->val == value) goto found; /* Not enough GOT entries? */ BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)); e->val = value; ++mod->arch.next_got_entry; found: return (uint64_t) e - mod->arch.gp; } static inline int gp_addressable (struct module *mod, uint64_t value) { return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF; } /* Get PC-relative PLT entry for this value. Returns 0 on failure. */ static uint64_t get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp) { struct plt_entry *plt, *plt_end; uint64_t target_ip, target_gp; if (!*okp) return 0; if (in_init(mod, (uint64_t) insn)) { plt = (void *) mod->arch.init_plt->sh_addr; plt_end = (void *) plt + mod->arch.init_plt->sh_size; } else { plt = (void *) mod->arch.core_plt->sh_addr; plt_end = (void *) plt + mod->arch.core_plt->sh_size; } /* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */ target_ip = ((uint64_t *) value)[0]; target_gp = ((uint64_t *) value)[1]; /* Look for existing PLT entry. */ while (plt->bundle[0][0]) { if (plt_target(plt) == target_ip) goto found; if (++plt >= plt_end) BUG(); } *plt = ia64_plt_template; if (!patch_plt(mod, plt, target_ip, target_gp)) { *okp = 0; return 0; } #if ARCH_MODULE_DEBUG if (plt_target(plt) != target_ip) { printk("%s: mistargeted PLT: wanted %lx, got %lx\n", __func__, target_ip, plt_target(plt)); *okp = 0; return 0; } #endif found: return (uint64_t) plt; } /* Get function descriptor for VALUE. */ static uint64_t get_fdesc (struct module *mod, uint64_t value, int *okp) { struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr; if (!*okp) return 0; if (!value) { printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name); return 0; } if (!is_internal(mod, value)) /* * If it's not a module-local entry-point, "value" already points to a * function-descriptor. */ return value; /* Look for existing function descriptor. */ while (fdesc->addr) { if (fdesc->addr == value) return (uint64_t)fdesc; if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size) BUG(); } /* Create new one */ fdesc->addr = value; fdesc->gp = mod->arch.gp; return (uint64_t) fdesc; } static inline int do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, Elf64_Shdr *sec, void *location) { enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK; enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK; uint64_t val; int ok = 1; val = sym->st_value + addend; switch (formula) { case RV_SEGREL: /* segment base is arbitrarily chosen to be 0 for kernel modules */ case RV_DIRECT: break; case RV_GPREL: val -= mod->arch.gp; break; case RV_LTREL: val = get_ltoff(mod, val, &ok); break; case RV_PLTREL: val = get_plt(mod, location, val, &ok); break; case RV_FPTR: val = get_fdesc(mod, val, &ok); break; case RV_SECREL: val -= sec->sh_addr; break; case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break; case RV_PCREL: switch (r_type) { case R_IA64_PCREL21B: if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) || (in_core(mod, val) && in_init(mod, (uint64_t)location))) { /* * Init section may have been allocated far away from core, * if the branch won't reach, then allocate a plt for it. */ uint64_t delta = ((int64_t)val - (int64_t)location) / 16; if (delta + (1 << 20) >= (1 << 21)) { val = get_fdesc(mod, val, &ok); val = get_plt(mod, location, val, &ok); } } else if (!is_internal(mod, val)) val = get_plt(mod, location, val, &ok); fallthrough; default: val -= bundle(location); break; case R_IA64_PCREL32MSB: case R_IA64_PCREL32LSB: case R_IA64_PCREL64MSB: case R_IA64_PCREL64LSB: val -= (uint64_t) location; break; } switch (r_type) { case R_IA64_PCREL60B: format = RF_INSN60; break; case R_IA64_PCREL21B: format = RF_INSN21B; break; case R_IA64_PCREL21M: format = RF_INSN21M; break; case R_IA64_PCREL21F: format = RF_INSN21F; break; default: break; } break; case RV_BDREL: val -= (uint64_t) (in_init(mod, val) ? mod->mem[MOD_INIT_TEXT].base : mod->mem[MOD_TEXT].base); break; case RV_LTV: /* can link-time value relocs happen here? */ BUG(); break; case RV_PCREL2: if (r_type == R_IA64_PCREL21BI) { if (!is_internal(mod, val)) { printk(KERN_ERR "%s: %s reloc against " "non-local symbol (%lx)\n", __func__, reloc_name[r_type], (unsigned long)val); return -ENOEXEC; } format = RF_INSN21B; } val -= bundle(location); break; case RV_SPECIAL: switch (r_type) { case R_IA64_IPLTMSB: case R_IA64_IPLTLSB: val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok); format = RF_64LSB; if (r_type == R_IA64_IPLTMSB) format = RF_64MSB; break; case R_IA64_SUB: val = addend - sym->st_value; format = RF_INSN64; break; case R_IA64_LTOFF22X: if (gp_addressable(mod, val)) val -= mod->arch.gp; else val = get_ltoff(mod, val, &ok); format = RF_INSN22; break; case R_IA64_LDXMOV: if (gp_addressable(mod, val)) { /* turn "ld8" into "mov": */ DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location); ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL); } return 0; default: if (reloc_name[r_type]) printk(KERN_ERR "%s: special reloc %s not supported", mod->name, reloc_name[r_type]); else printk(KERN_ERR "%s: unknown special reloc %x\n", mod->name, r_type); return -ENOEXEC; } break; case RV_TPREL: case RV_LTREL_TPREL: case RV_DTPMOD: case RV_LTREL_DTPMOD: case RV_DTPREL: case RV_LTREL_DTPREL: printk(KERN_ERR "%s: %s reloc not supported\n", mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?"); return -ENOEXEC; default: printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type); return -ENOEXEC; } if (!ok) return -ENOEXEC; DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val, reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend); switch (format) { case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16); break; case RF_INSN22: ok = apply_imm22(mod, location, val); break; case RF_INSN64: ok = apply_imm64(mod, location, val); break; case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16); break; case RF_32LSB: put_unaligned(val, (uint32_t *) location); break; case RF_64LSB: put_unaligned(val, (uint64_t *) location); break; case RF_32MSB: /* ia64 Linux is little-endian... */ case RF_64MSB: /* ia64 Linux is little-endian... */ case RF_INSN14: /* must be within-module, i.e., resolved by "ld -r" */ case RF_INSN21M: /* must be within-module, i.e., resolved by "ld -r" */ case RF_INSN21F: /* must be within-module, i.e., resolved by "ld -r" */ printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n", mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?"); return -ENOEXEC; default: printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n", mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format); return -ENOEXEC; } return ok ? 0 : -ENOEXEC; } int apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *mod) { unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela); Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr; Elf64_Shdr *target_sec; int ret; DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__, relsec, n, sechdrs[relsec].sh_info); target_sec = sechdrs + sechdrs[relsec].sh_info; if (target_sec->sh_entsize == ~0UL) /* * If target section wasn't allocated, we don't need to relocate it. * Happens, e.g., for debug sections. */ return 0; if (!mod->arch.gp) { /* * XXX Should have an arch-hook for running this after final section * addresses have been selected... */ uint64_t gp; struct module_memory *mod_mem; mod_mem = &mod->mem[MOD_DATA]; if (mod_mem->size > MAX_LTOFF) /* * This takes advantage of fact that SHF_ARCH_SMALL gets allocated * at the end of the module. */ gp = mod_mem->size - MAX_LTOFF / 2; else gp = mod_mem->size / 2; gp = (uint64_t) mod_mem->base + ((gp + 7) & -8); mod->arch.gp = gp; DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); } for (i = 0; i < n; i++) { ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info), ((Elf64_Sym *) sechdrs[symindex].sh_addr + ELF64_R_SYM(rela[i].r_info)), rela[i].r_addend, target_sec, (void *) target_sec->sh_addr + rela[i].r_offset); if (ret < 0) return ret; } return 0; } /* * Modules contain a single unwind table which covers both the core and the init text * sections but since the two are not contiguous, we need to split this table up such that * we can register (and unregister) each "segment" separately. Fortunately, this sounds * more complicated than it really is. */ static void register_unwind_table (struct module *mod) { struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr; struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start); struct unw_table_entry *e1, *e2, *core, *init; unsigned long num_init = 0, num_core = 0; /* First, count how many init and core unwind-table entries there are. */ for (e1 = start; e1 < end; ++e1) if (in_init(mod, e1->start_offset)) ++num_init; else ++num_core; /* * Second, sort the table such that all unwind-table entries for the init and core * text sections are nicely separated. We do this with a stupid bubble sort * (unwind tables don't get ridiculously huge). */ for (e1 = start; e1 < end; ++e1) { for (e2 = e1 + 1; e2 < end; ++e2) { if (e2->start_offset < e1->start_offset) { swap(*e1, *e2); } } } /* * Third, locate the init and core segments in the unwind table: */ if (in_init(mod, start->start_offset)) { init = start; core = start + num_init; } else { core = start; init = start + num_core; } DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__, mod->name, mod->arch.gp, num_init, num_core); /* * Fourth, register both tables (if not empty). */ if (num_core > 0) { mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, core, core + num_core); DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__, mod->arch.core_unw_table, core, core + num_core); } if (num_init > 0) { mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, init, init + num_init); DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__, mod->arch.init_unw_table, init, init + num_init); } } int module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { struct mod_arch_specific *mas = &mod->arch; DEBUGP("%s: init: entry=%p\n", __func__, mod->init); if (mas->unwind) register_unwind_table(mod); /* * ".opd" was already relocated to the final destination. Store * it's address for use in symbolizer. */ mas->opd_addr = (void *)mas->opd->sh_addr; mas->opd_size = mas->opd->sh_size; /* * Module relocation was already done at this point. Section * headers are about to be deleted. Wipe out load-time context. */ mas->core_plt = NULL; mas->init_plt = NULL; mas->got = NULL; mas->opd = NULL; mas->unwind = NULL; mas->gp = 0; mas->next_got_entry = 0; return 0; } void module_arch_cleanup (struct module *mod) { if (mod->arch.init_unw_table) { unw_remove_unwind_table(mod->arch.init_unw_table); mod->arch.init_unw_table = NULL; } if (mod->arch.core_unw_table) { unw_remove_unwind_table(mod->arch.core_unw_table); mod->arch.core_unw_table = NULL; } } void *dereference_module_function_descriptor(struct module *mod, void *ptr) { struct mod_arch_specific *mas = &mod->arch; if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size) return ptr; return dereference_function_descriptor(ptr); }
linux-master
arch/ia64/kernel/module.c
// SPDX-License-Identifier: GPL-2.0-only /* * System Abstraction Layer (SAL) interface routines. * * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/string.h> #include <asm/delay.h> #include <asm/page.h> #include <asm/sal.h> #include <asm/pal.h> #include <asm/xtp.h> __cacheline_aligned DEFINE_SPINLOCK(sal_lock); unsigned long sal_platform_features; unsigned short sal_revision; unsigned short sal_version; #define SAL_MAJOR(x) ((x) >> 8) #define SAL_MINOR(x) ((x) & 0xff) static struct { void *addr; /* function entry point */ void *gpval; /* gp value to use */ } pdesc; static long default_handler (void) { return -1; } ia64_sal_handler ia64_sal = (ia64_sal_handler) default_handler; ia64_sal_desc_ptc_t *ia64_ptc_domain_info; const char * ia64_sal_strerror (long status) { const char *str; switch (status) { case 0: str = "Call completed without error"; break; case 1: str = "Effect a warm boot of the system to complete " "the update"; break; case -1: str = "Not implemented"; break; case -2: str = "Invalid argument"; break; case -3: str = "Call completed with error"; break; case -4: str = "Virtual address not registered"; break; case -5: str = "No information available"; break; case -6: str = "Insufficient space to add the entry"; break; case -7: str = "Invalid entry_addr value"; break; case -8: str = "Invalid interrupt vector"; break; case -9: str = "Requested memory not available"; break; case -10: str = "Unable to write to the NVM device"; break; case -11: str = "Invalid partition type specified"; break; case -12: str = "Invalid NVM_Object id specified"; break; case -13: str = "NVM_Object already has the maximum number " "of partitions"; break; case -14: str = "Insufficient space in partition for the " "requested write sub-function"; break; case -15: str = "Insufficient data buffer space for the " "requested read record sub-function"; break; case -16: str = "Scratch buffer required for the write/delete " "sub-function"; break; case -17: str = "Insufficient space in the NVM_Object for the " "requested create sub-function"; break; case -18: str = "Invalid value specified in the partition_rec " "argument"; break; case -19: str = "Record oriented I/O not supported for this " "partition"; break; case -20: str = "Bad format of record to be written or " "required keyword variable not " "specified"; break; default: str = "Unknown SAL status code"; break; } return str; } void __init ia64_sal_handler_init (void *entry_point, void *gpval) { /* fill in the SAL procedure descriptor and point ia64_sal to it: */ pdesc.addr = entry_point; pdesc.gpval = gpval; ia64_sal = (ia64_sal_handler) &pdesc; } static void __init check_versions (struct ia64_sal_systab *systab) { sal_revision = (systab->sal_rev_major << 8) | systab->sal_rev_minor; sal_version = (systab->sal_b_rev_major << 8) | systab->sal_b_rev_minor; /* Check for broken firmware */ if ((sal_revision == SAL_VERSION_CODE(49, 29)) && (sal_version == SAL_VERSION_CODE(49, 29))) { /* * Old firmware for zx2000 prototypes have this weird version number, * reset it to something sane. */ sal_revision = SAL_VERSION_CODE(2, 8); sal_version = SAL_VERSION_CODE(0, 0); } } static void __init sal_desc_entry_point (void *p) { struct ia64_sal_desc_entry_point *ep = p; ia64_pal_handler_init(__va(ep->pal_proc)); ia64_sal_handler_init(__va(ep->sal_proc), __va(ep->gp)); } #ifdef CONFIG_SMP static void __init set_smp_redirect (int flag) { #ifndef CONFIG_HOTPLUG_CPU if (no_int_routing) smp_int_redirect &= ~flag; else smp_int_redirect |= flag; #else /* * For CPU Hotplug we dont want to do any chipset supported * interrupt redirection. The reason is this would require that * All interrupts be stopped and hard bind the irq to a cpu. * Later when the interrupt is fired we need to set the redir hint * on again in the vector. This is cumbersome for something that the * user mode irq balancer will solve anyways. */ no_int_routing=1; smp_int_redirect &= ~flag; #endif } #else #define set_smp_redirect(flag) do { } while (0) #endif static void __init sal_desc_platform_feature (void *p) { struct ia64_sal_desc_platform_feature *pf = p; sal_platform_features = pf->feature_mask; printk(KERN_INFO "SAL Platform features:"); if (!sal_platform_features) { printk(" None\n"); return; } if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_BUS_LOCK) printk(" BusLock"); if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT) { printk(" IRQ_Redirection"); set_smp_redirect(SMP_IRQ_REDIRECTION); } if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT) { printk(" IPI_Redirection"); set_smp_redirect(SMP_IPI_REDIRECTION); } if (sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT) printk(" ITC_Drift"); printk("\n"); } #ifdef CONFIG_SMP static void __init sal_desc_ap_wakeup (void *p) { struct ia64_sal_desc_ap_wakeup *ap = p; switch (ap->mechanism) { case IA64_SAL_AP_EXTERNAL_INT: ap_wakeup_vector = ap->vector; printk(KERN_INFO "SAL: AP wakeup using external interrupt " "vector 0x%lx\n", ap_wakeup_vector); break; default: printk(KERN_ERR "SAL: AP wakeup mechanism unsupported!\n"); break; } } static void __init chk_nointroute_opt(void) { char *cp; for (cp = boot_command_line; *cp; ) { if (memcmp(cp, "nointroute", 10) == 0) { no_int_routing = 1; printk ("no_int_routing on\n"); break; } else { while (*cp != ' ' && *cp) ++cp; while (*cp == ' ') ++cp; } } } #else static void __init sal_desc_ap_wakeup(void *p) { } #endif /* * HP rx5670 firmware polls for interrupts during SAL_CACHE_FLUSH by reading * cr.ivr, but it never writes cr.eoi. This leaves any interrupt marked as * "in-service" and masks other interrupts of equal or lower priority. * * HP internal defect reports: F1859, F2775, F3031. */ static int sal_cache_flush_drops_interrupts; static int __init force_pal_cache_flush(char *str) { sal_cache_flush_drops_interrupts = 1; return 0; } early_param("force_pal_cache_flush", force_pal_cache_flush); void __init check_sal_cache_flush (void) { unsigned long flags; int cpu; u64 vector, cache_type = 3; struct ia64_sal_retval isrv; if (sal_cache_flush_drops_interrupts) return; cpu = get_cpu(); local_irq_save(flags); /* * Send ourselves a timer interrupt, wait until it's reported, and see * if SAL_CACHE_FLUSH drops it. */ ia64_send_ipi(cpu, IA64_TIMER_VECTOR, IA64_IPI_DM_INT, 0); while (!ia64_get_irr(IA64_TIMER_VECTOR)) cpu_relax(); SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0); if (isrv.status) printk(KERN_ERR "SAL_CAL_FLUSH failed with %ld\n", isrv.status); if (ia64_get_irr(IA64_TIMER_VECTOR)) { vector = ia64_get_ivr(); ia64_eoi(); WARN_ON(vector != IA64_TIMER_VECTOR); } else { sal_cache_flush_drops_interrupts = 1; printk(KERN_ERR "SAL: SAL_CACHE_FLUSH drops interrupts; " "PAL_CACHE_FLUSH will be used instead\n"); ia64_eoi(); } local_irq_restore(flags); put_cpu(); } s64 ia64_sal_cache_flush (u64 cache_type) { struct ia64_sal_retval isrv; if (sal_cache_flush_drops_interrupts) { unsigned long flags; u64 progress; s64 rc; progress = 0; local_irq_save(flags); rc = ia64_pal_cache_flush(cache_type, PAL_CACHE_FLUSH_INVALIDATE, &progress, NULL); local_irq_restore(flags); return rc; } SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0); return isrv.status; } EXPORT_SYMBOL_GPL(ia64_sal_cache_flush); void __init ia64_sal_init (struct ia64_sal_systab *systab) { char *p; int i; if (!systab) { printk(KERN_WARNING "Hmm, no SAL System Table.\n"); return; } if (strncmp(systab->signature, "SST_", 4) != 0) printk(KERN_ERR "bad signature in system table!"); check_versions(systab); #ifdef CONFIG_SMP chk_nointroute_opt(); #endif /* revisions are coded in BCD, so %x does the job for us */ printk(KERN_INFO "SAL %x.%x: %.32s %.32s%sversion %x.%x\n", SAL_MAJOR(sal_revision), SAL_MINOR(sal_revision), systab->oem_id, systab->product_id, systab->product_id[0] ? " " : "", SAL_MAJOR(sal_version), SAL_MINOR(sal_version)); p = (char *) (systab + 1); for (i = 0; i < systab->entry_count; i++) { /* * The first byte of each entry type contains the type * descriptor. */ switch (*p) { case SAL_DESC_ENTRY_POINT: sal_desc_entry_point(p); break; case SAL_DESC_PLATFORM_FEATURE: sal_desc_platform_feature(p); break; case SAL_DESC_PTC: ia64_ptc_domain_info = (ia64_sal_desc_ptc_t *)p; break; case SAL_DESC_AP_WAKEUP: sal_desc_ap_wakeup(p); break; } p += SAL_DESC_SIZE(*p); } } int ia64_sal_oemcall(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7) { if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX) return -1; SAL_CALL(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6, arg7); return 0; } EXPORT_SYMBOL(ia64_sal_oemcall); int ia64_sal_oemcall_nolock(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7) { if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX) return -1; SAL_CALL_NOLOCK(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6, arg7); return 0; } EXPORT_SYMBOL(ia64_sal_oemcall_nolock); int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *isrvp, u64 oemfunc, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7) { if (oemfunc < IA64_SAL_OEMFUNC_MIN || oemfunc > IA64_SAL_OEMFUNC_MAX) return -1; SAL_CALL_REENTRANT(*isrvp, oemfunc, arg1, arg2, arg3, arg4, arg5, arg6, arg7); return 0; } EXPORT_SYMBOL(ia64_sal_oemcall_reentrant); long ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second, unsigned long *drift_info) { struct ia64_sal_retval isrv; SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0); *ticks_per_second = isrv.v0; *drift_info = isrv.v1; return isrv.status; } EXPORT_SYMBOL_GPL(ia64_sal_freq_base);
linux-master
arch/ia64/kernel/sal.c
// SPDX-License-Identifier: GPL-2.0 /* * kernel/crash_dump.c - Memory preserving reboot related code. * * Created by: Simon Horman <[email protected]> * Original code moved from kernel/crash.c * Original code comment copied from the i386 version of this file */ #include <linux/errno.h> #include <linux/types.h> #include <linux/crash_dump.h> #include <linux/uio.h> #include <asm/page.h> ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize, unsigned long offset) { void *vaddr; if (!csize) return 0; vaddr = __va(pfn<<PAGE_SHIFT); csize = copy_to_iter(vaddr + offset, csize, iter); return csize; }
linux-master
arch/ia64/kernel/crash_dump.c
// SPDX-License-Identifier: GPL-2.0 /* * Extensible Firmware Interface * * Based on Extensible Firmware Interface Specification version 0.9 * April 30, 1999 * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <[email protected]> * Copyright (C) 1999-2003 Hewlett-Packard Co. * David Mosberger-Tang <[email protected]> * Stephane Eranian <[email protected]> * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <[email protected]> * * All EFI Runtime Services are not implemented yet as EFI only * supports physical mode addressing on SoftSDV. This is to be fixed * in a future version. --drummond 1999-07-20 * * Implemented EFI runtime services and virtual mode calls. --davidm * * Goutham Rao: <[email protected]> * Skip non-WB memory and ignore empty memory ranges. */ #include <linux/module.h> #include <linux/memblock.h> #include <linux/crash_dump.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/efi.h> #include <linux/kexec.h> #include <linux/mm.h> #include <asm/efi.h> #include <asm/io.h> #include <asm/kregs.h> #include <asm/meminit.h> #include <asm/processor.h> #include <asm/mca.h> #include <asm/sal.h> #include <asm/setup.h> #include <asm/tlbflush.h> #define EFI_DEBUG 0 #define ESI_TABLE_GUID \ EFI_GUID(0x43EA58DC, 0xCF28, 0x4b06, 0xB3, \ 0x91, 0xB7, 0x50, 0x59, 0x34, 0x2B, 0xD4) static unsigned long mps_phys = EFI_INVALID_TABLE_ADDR; static __initdata unsigned long palo_phys; unsigned long __initdata esi_phys = EFI_INVALID_TABLE_ADDR; unsigned long hcdp_phys = EFI_INVALID_TABLE_ADDR; unsigned long sal_systab_phys = EFI_INVALID_TABLE_ADDR; static const efi_config_table_type_t arch_tables[] __initconst = { {ESI_TABLE_GUID, &esi_phys, "ESI" }, {HCDP_TABLE_GUID, &hcdp_phys, "HCDP" }, {MPS_TABLE_GUID, &mps_phys, "MPS" }, {PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID, &palo_phys, "PALO" }, {SAL_SYSTEM_TABLE_GUID, &sal_systab_phys, "SALsystab" }, {}, }; extern efi_status_t efi_call_phys (void *, ...); static efi_runtime_services_t *runtime; static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL; #define efi_call_virt(f, args...) (*(f))(args) #define STUB_GET_TIME(prefix, adjust_arg) \ static efi_status_t \ prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \ { \ struct ia64_fpreg fr[6]; \ efi_time_cap_t *atc = NULL; \ efi_status_t ret; \ \ if (tc) \ atc = adjust_arg(tc); \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \ adjust_arg(tm), atc); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_SET_TIME(prefix, adjust_arg) \ static efi_status_t \ prefix##_set_time (efi_time_t *tm) \ { \ struct ia64_fpreg fr[6]; \ efi_status_t ret; \ \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \ adjust_arg(tm)); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \ static efi_status_t \ prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \ efi_time_t *tm) \ { \ struct ia64_fpreg fr[6]; \ efi_status_t ret; \ \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix( \ (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \ adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \ static efi_status_t \ prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \ { \ struct ia64_fpreg fr[6]; \ efi_time_t *atm = NULL; \ efi_status_t ret; \ \ if (tm) \ atm = adjust_arg(tm); \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix( \ (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \ enabled, atm); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_GET_VARIABLE(prefix, adjust_arg) \ static efi_status_t \ prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \ unsigned long *data_size, void *data) \ { \ struct ia64_fpreg fr[6]; \ u32 *aattr = NULL; \ efi_status_t ret; \ \ if (attr) \ aattr = adjust_arg(attr); \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix( \ (efi_get_variable_t *) __va(runtime->get_variable), \ adjust_arg(name), adjust_arg(vendor), aattr, \ adjust_arg(data_size), adjust_arg(data)); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \ static efi_status_t \ prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \ efi_guid_t *vendor) \ { \ struct ia64_fpreg fr[6]; \ efi_status_t ret; \ \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix( \ (efi_get_next_variable_t *) __va(runtime->get_next_variable), \ adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_SET_VARIABLE(prefix, adjust_arg) \ static efi_status_t \ prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \ u32 attr, unsigned long data_size, \ void *data) \ { \ struct ia64_fpreg fr[6]; \ efi_status_t ret; \ \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix( \ (efi_set_variable_t *) __va(runtime->set_variable), \ adjust_arg(name), adjust_arg(vendor), attr, data_size, \ adjust_arg(data)); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \ static efi_status_t \ prefix##_get_next_high_mono_count (u32 *count) \ { \ struct ia64_fpreg fr[6]; \ efi_status_t ret; \ \ ia64_save_scratch_fpregs(fr); \ ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \ __va(runtime->get_next_high_mono_count), \ adjust_arg(count)); \ ia64_load_scratch_fpregs(fr); \ return ret; \ } #define STUB_RESET_SYSTEM(prefix, adjust_arg) \ static void \ prefix##_reset_system (int reset_type, efi_status_t status, \ unsigned long data_size, efi_char16_t *data) \ { \ struct ia64_fpreg fr[6]; \ efi_char16_t *adata = NULL; \ \ if (data) \ adata = adjust_arg(data); \ \ ia64_save_scratch_fpregs(fr); \ efi_call_##prefix( \ (efi_reset_system_t *) __va(runtime->reset_system), \ reset_type, status, data_size, adata); \ /* should not return, but just in case... */ \ ia64_load_scratch_fpregs(fr); \ } #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg)) STUB_GET_TIME(phys, phys_ptr) STUB_SET_TIME(phys, phys_ptr) STUB_GET_WAKEUP_TIME(phys, phys_ptr) STUB_SET_WAKEUP_TIME(phys, phys_ptr) STUB_GET_VARIABLE(phys, phys_ptr) STUB_GET_NEXT_VARIABLE(phys, phys_ptr) STUB_SET_VARIABLE(phys, phys_ptr) STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr) STUB_RESET_SYSTEM(phys, phys_ptr) #define id(arg) arg STUB_GET_TIME(virt, id) STUB_SET_TIME(virt, id) STUB_GET_WAKEUP_TIME(virt, id) STUB_SET_WAKEUP_TIME(virt, id) STUB_GET_VARIABLE(virt, id) STUB_GET_NEXT_VARIABLE(virt, id) STUB_SET_VARIABLE(virt, id) STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id) STUB_RESET_SYSTEM(virt, id) void efi_gettimeofday (struct timespec64 *ts) { efi_time_t tm; if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) { memset(ts, 0, sizeof(*ts)); return; } ts->tv_sec = mktime64(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); ts->tv_nsec = tm.nanosecond; } static int is_memory_available (efi_memory_desc_t *md) { if (!(md->attribute & EFI_MEMORY_WB)) return 0; switch (md->type) { case EFI_LOADER_CODE: case EFI_LOADER_DATA: case EFI_BOOT_SERVICES_CODE: case EFI_BOOT_SERVICES_DATA: case EFI_CONVENTIONAL_MEMORY: return 1; } return 0; } typedef struct kern_memdesc { u64 attribute; u64 start; u64 num_pages; } kern_memdesc_t; static kern_memdesc_t *kern_memmap; #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT) static inline u64 kmd_end(kern_memdesc_t *kmd) { return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT)); } static inline u64 efi_md_end(efi_memory_desc_t *md) { return (md->phys_addr + efi_md_size(md)); } static inline int efi_wb(efi_memory_desc_t *md) { return (md->attribute & EFI_MEMORY_WB); } static inline int efi_uc(efi_memory_desc_t *md) { return (md->attribute & EFI_MEMORY_UC); } static void walk (efi_freemem_callback_t callback, void *arg, u64 attr) { kern_memdesc_t *k; u64 start, end, voff; voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET; for (k = kern_memmap; k->start != ~0UL; k++) { if (k->attribute != attr) continue; start = PAGE_ALIGN(k->start); end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK; if (start < end) if ((*callback)(start + voff, end + voff, arg) < 0) return; } } /* * Walk the EFI memory map and call CALLBACK once for each EFI memory * descriptor that has memory that is available for OS use. */ void efi_memmap_walk (efi_freemem_callback_t callback, void *arg) { walk(callback, arg, EFI_MEMORY_WB); } /* * Walk the EFI memory map and call CALLBACK once for each EFI memory * descriptor that has memory that is available for uncached allocator. */ void efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg) { walk(callback, arg, EFI_MEMORY_UC); } /* * Look for the PAL_CODE region reported by EFI and map it using an * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor * Abstraction Layer chapter 11 in ADAG */ void * efi_get_pal_addr (void) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; int pal_code_count = 0; u64 vaddr, mask; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (md->type != EFI_PAL_CODE) continue; if (++pal_code_count > 1) { printk(KERN_ERR "Too many EFI Pal Code memory ranges, " "dropped @ %llx\n", md->phys_addr); continue; } /* * The only ITLB entry in region 7 that is used is the one * installed by __start(). That entry covers a 64MB range. */ mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1); vaddr = PAGE_OFFSET + md->phys_addr; /* * We must check that the PAL mapping won't overlap with the * kernel mapping. * * PAL code is guaranteed to be aligned on a power of 2 between * 4k and 256KB and that only one ITR is needed to map it. This * implies that the PAL code is always aligned on its size, * i.e., the closest matching page size supported by the TLB. * Therefore PAL code is guaranteed never to cross a 64MB unless * it is bigger than 64MB (very unlikely!). So for now the * following test is enough to determine whether or not we need * a dedicated ITR for the PAL code. */ if ((vaddr & mask) == (KERNEL_START & mask)) { printk(KERN_INFO "%s: no need to install ITR for PAL code\n", __func__); continue; } if (efi_md_size(md) > IA64_GRANULE_SIZE) panic("Whoa! PAL code size bigger than a granule!"); #if EFI_DEBUG mask = ~((1 << IA64_GRANULE_SHIFT) - 1); printk(KERN_INFO "CPU %d: mapping PAL code " "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n", smp_processor_id(), md->phys_addr, md->phys_addr + efi_md_size(md), vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); #endif return __va(md->phys_addr); } printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", __func__); return NULL; } static u8 __init palo_checksum(u8 *buffer, u32 length) { u8 sum = 0; u8 *end = buffer + length; while (buffer < end) sum = (u8) (sum + *(buffer++)); return sum; } /* * Parse and handle PALO table which is published at: * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf */ static void __init handle_palo(unsigned long phys_addr) { struct palo_table *palo = __va(phys_addr); u8 checksum; if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) { printk(KERN_INFO "PALO signature incorrect.\n"); return; } checksum = palo_checksum((u8 *)palo, palo->length); if (checksum) { printk(KERN_INFO "PALO checksum incorrect.\n"); return; } setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO); } void efi_map_pal_code (void) { void *pal_vaddr = efi_get_pal_addr (); u64 psr; if (!pal_vaddr) return; /* * Cannot write to CRx with PSR.ic=1 */ psr = ia64_clear_ic(); ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr), pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), IA64_GRANULE_SHIFT); ia64_set_psr(psr); /* restore psr */ } void __init efi_init (void) { const efi_system_table_t *efi_systab; void *efi_map_start, *efi_map_end; u64 efi_desc_size; char *cp; set_bit(EFI_BOOT, &efi.flags); set_bit(EFI_64BIT, &efi.flags); /* * It's too early to be able to use the standard kernel command line * support... */ for (cp = boot_command_line; *cp; ) { if (memcmp(cp, "mem=", 4) == 0) { mem_limit = memparse(cp + 4, &cp); } else if (memcmp(cp, "max_addr=", 9) == 0) { max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); } else if (memcmp(cp, "min_addr=", 9) == 0) { min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp)); } else { while (*cp != ' ' && *cp) ++cp; while (*cp == ' ') ++cp; } } if (min_addr != 0UL) printk(KERN_INFO "Ignoring memory below %lluMB\n", min_addr >> 20); if (max_addr != ~0UL) printk(KERN_INFO "Ignoring memory above %lluMB\n", max_addr >> 20); efi_systab = __va(ia64_boot_param->efi_systab); /* * Verify the EFI Table */ if (efi_systab == NULL) panic("Whoa! Can't find EFI system table.\n"); if (efi_systab_check_header(&efi_systab->hdr)) panic("Whoa! EFI system table signature incorrect\n"); efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor); palo_phys = EFI_INVALID_TABLE_ADDR; if (efi_config_parse_tables(__va(efi_systab->tables), efi_systab->nr_tables, arch_tables) != 0) return; if (palo_phys != EFI_INVALID_TABLE_ADDR) handle_palo(palo_phys); runtime = __va(efi_systab->runtime); efi.get_time = phys_get_time; efi.set_time = phys_set_time; efi.get_wakeup_time = phys_get_wakeup_time; efi.set_wakeup_time = phys_set_wakeup_time; efi.get_variable = phys_get_variable; efi.get_next_variable = phys_get_next_variable; efi.set_variable = phys_set_variable; efi.get_next_high_mono_count = phys_get_next_high_mono_count; efi.reset_system = phys_reset_system; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; #if EFI_DEBUG /* print EFI memory map: */ { efi_memory_desc_t *md; void *p; unsigned int i; for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) { const char *unit; unsigned long size; char buf[64]; md = p; size = md->num_pages << EFI_PAGE_SHIFT; if ((size >> 40) > 0) { size >>= 40; unit = "TB"; } else if ((size >> 30) > 0) { size >>= 30; unit = "GB"; } else if ((size >> 20) > 0) { size >>= 20; unit = "MB"; } else { size >>= 10; unit = "KB"; } printk("mem%02d: %s " "range=[0x%016llx-0x%016llx) (%4lu%s)\n", i, efi_md_typeattr_format(buf, sizeof(buf), md), md->phys_addr, md->phys_addr + efi_md_size(md), size, unit); } } #endif efi_map_pal_code(); efi_enter_virtual_mode(); } void efi_enter_virtual_mode (void) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; efi_status_t status; u64 efi_desc_size; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (md->attribute & EFI_MEMORY_RUNTIME) { /* * Some descriptors have multiple bits set, so the * order of the tests is relevant. */ if (md->attribute & EFI_MEMORY_WB) { md->virt_addr = (u64) __va(md->phys_addr); } else if (md->attribute & EFI_MEMORY_UC) { md->virt_addr = (u64) ioremap(md->phys_addr, 0); } else if (md->attribute & EFI_MEMORY_WC) { #if 0 md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WC | _PAGE_PL_0 | _PAGE_AR_RW)); #else printk(KERN_INFO "EFI_MEMORY_WC mapping\n"); md->virt_addr = (u64) ioremap(md->phys_addr, 0); #endif } else if (md->attribute & EFI_MEMORY_WT) { #if 0 md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WT | _PAGE_PL_0 | _PAGE_AR_RW)); #else printk(KERN_INFO "EFI_MEMORY_WT mapping\n"); md->virt_addr = (u64) ioremap(md->phys_addr, 0); #endif } } } status = efi_call_phys(__va(runtime->set_virtual_address_map), ia64_boot_param->efi_memmap_size, efi_desc_size, ia64_boot_param->efi_memdesc_version, ia64_boot_param->efi_memmap); if (status != EFI_SUCCESS) { printk(KERN_WARNING "warning: unable to switch EFI into " "virtual mode (status=%lu)\n", status); return; } set_bit(EFI_RUNTIME_SERVICES, &efi.flags); /* * Now that EFI is in virtual mode, we call the EFI functions more * efficiently: */ efi.get_time = virt_get_time; efi.set_time = virt_set_time; efi.get_wakeup_time = virt_get_wakeup_time; efi.set_wakeup_time = virt_set_wakeup_time; efi.get_variable = virt_get_variable; efi.get_next_variable = virt_get_next_variable; efi.set_variable = virt_set_variable; efi.get_next_high_mono_count = virt_get_next_high_mono_count; efi.reset_system = virt_reset_system; } /* * Walk the EFI memory map looking for the I/O port range. There can only be * one entry of this type, other I/O port ranges should be described via ACPI. */ u64 efi_get_iobase (void) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) { if (md->attribute & EFI_MEMORY_UC) return md->phys_addr; } } return 0; } static struct kern_memdesc * kern_memory_descriptor (unsigned long phys_addr) { struct kern_memdesc *md; for (md = kern_memmap; md->start != ~0UL; md++) { if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT)) return md; } return NULL; } static efi_memory_desc_t * efi_memory_descriptor (unsigned long phys_addr) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (phys_addr - md->phys_addr < efi_md_size(md)) return md; } return NULL; } static int efi_memmap_intersects (unsigned long phys_addr, unsigned long size) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; unsigned long end; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; end = phys_addr + size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (md->phys_addr < end && efi_md_end(md) > phys_addr) return 1; } return 0; } int efi_mem_type (unsigned long phys_addr) { efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); if (md) return md->type; return -EINVAL; } u64 efi_mem_attributes (unsigned long phys_addr) { efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); if (md) return md->attribute; return 0; } EXPORT_SYMBOL(efi_mem_attributes); u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size) { unsigned long end = phys_addr + size; efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); u64 attr; if (!md) return 0; /* * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells * the kernel that firmware needs this region mapped. */ attr = md->attribute & ~EFI_MEMORY_RUNTIME; do { unsigned long md_end = efi_md_end(md); if (end <= md_end) return attr; md = efi_memory_descriptor(md_end); if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr) return 0; } while (md); return 0; /* never reached */ } u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size) { unsigned long end = phys_addr + size; struct kern_memdesc *md; u64 attr; /* * This is a hack for ioremap calls before we set up kern_memmap. * Maybe we should do efi_memmap_init() earlier instead. */ if (!kern_memmap) { attr = efi_mem_attribute(phys_addr, size); if (attr & EFI_MEMORY_WB) return EFI_MEMORY_WB; return 0; } md = kern_memory_descriptor(phys_addr); if (!md) return 0; attr = md->attribute; do { unsigned long md_end = kmd_end(md); if (end <= md_end) return attr; md = kern_memory_descriptor(md_end); if (!md || md->attribute != attr) return 0; } while (md); return 0; /* never reached */ } int valid_phys_addr_range (phys_addr_t phys_addr, unsigned long size) { u64 attr; /* * /dev/mem reads and writes use copy_to_user(), which implicitly * uses a granule-sized kernel identity mapping. It's really * only safe to do this for regions in kern_memmap. For more * details, see Documentation/arch/ia64/aliasing.rst. */ attr = kern_mem_attribute(phys_addr, size); if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) return 1; return 0; } int valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size) { unsigned long phys_addr = pfn << PAGE_SHIFT; u64 attr; attr = efi_mem_attribute(phys_addr, size); /* * /dev/mem mmap uses normal user pages, so we don't need the entire * granule, but the entire region we're mapping must support the same * attribute. */ if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC) return 1; /* * Intel firmware doesn't tell us about all the MMIO regions, so * in general we have to allow mmap requests. But if EFI *does* * tell us about anything inside this region, we should deny it. * The user can always map a smaller region to avoid the overlap. */ if (efi_memmap_intersects(phys_addr, size)) return 0; return 1; } pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { unsigned long phys_addr = pfn << PAGE_SHIFT; u64 attr; /* * For /dev/mem mmap, we use user mappings, but if the region is * in kern_memmap (and hence may be covered by a kernel mapping), * we must use the same attribute as the kernel mapping. */ attr = kern_mem_attribute(phys_addr, size); if (attr & EFI_MEMORY_WB) return pgprot_cacheable(vma_prot); else if (attr & EFI_MEMORY_UC) return pgprot_noncached(vma_prot); /* * Some chipsets don't support UC access to memory. If * WB is supported, we prefer that. */ if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) return pgprot_cacheable(vma_prot); return pgprot_noncached(vma_prot); } int __init efi_uart_console_only(void) { efi_status_t status; char *s, name[] = "ConOut"; efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; efi_char16_t *utf16, name_utf16[32]; unsigned char data[1024]; unsigned long size = sizeof(data); struct efi_generic_dev_path *hdr, *end_addr; int uart = 0; /* Convert to UTF-16 */ utf16 = name_utf16; s = name; while (*s) *utf16++ = *s++ & 0x7f; *utf16 = 0; status = efi.get_variable(name_utf16, &guid, NULL, &size, data); if (status != EFI_SUCCESS) { printk(KERN_ERR "No EFI %s variable?\n", name); return 0; } hdr = (struct efi_generic_dev_path *) data; end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size); while (hdr < end_addr) { if (hdr->type == EFI_DEV_MSG && hdr->sub_type == EFI_DEV_MSG_UART) uart = 1; else if (hdr->type == EFI_DEV_END_PATH || hdr->type == EFI_DEV_END_PATH2) { if (!uart) return 0; if (hdr->sub_type == EFI_DEV_END_ENTIRE) return 1; uart = 0; } hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length); } printk(KERN_ERR "Malformed %s value\n", name); return 0; } /* * Look for the first granule aligned memory descriptor memory * that is big enough to hold EFI memory map. Make sure this * descriptor is at least granule sized so it does not get trimmed */ struct kern_memdesc * find_memmap_space (void) { u64 contig_low=0, contig_high=0; u64 as = 0, ae; void *efi_map_start, *efi_map_end, *p, *q; efi_memory_desc_t *md, *pmd = NULL, *check_md; u64 space_needed, efi_desc_size; unsigned long total_mem = 0; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; /* * Worst case: we need 3 kernel descriptors for each efi descriptor * (if every entry has a WB part in the middle, and UC head and tail), * plus one for the end marker. */ space_needed = sizeof(kern_memdesc_t) * (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1); for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { md = p; if (!efi_wb(md)) { continue; } if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { contig_low = GRANULEROUNDUP(md->phys_addr); contig_high = efi_md_end(md); for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { check_md = q; if (!efi_wb(check_md)) break; if (contig_high != check_md->phys_addr) break; contig_high = efi_md_end(check_md); } contig_high = GRANULEROUNDDOWN(contig_high); } if (!is_memory_available(md) || md->type == EFI_LOADER_DATA) continue; /* Round ends inward to granule boundaries */ as = max(contig_low, md->phys_addr); ae = min(contig_high, efi_md_end(md)); /* keep within max_addr= and min_addr= command line arg */ as = max(as, min_addr); ae = min(ae, max_addr); if (ae <= as) continue; /* avoid going over mem= command line arg */ if (total_mem + (ae - as) > mem_limit) ae -= total_mem + (ae - as) - mem_limit; if (ae <= as) continue; if (ae - as > space_needed) break; } if (p >= efi_map_end) panic("Can't allocate space for kernel memory descriptors"); return __va(as); } /* * Walk the EFI memory map and gather all memory available for kernel * to use. We can allocate partial granules only if the unavailable * parts exist, and are WB. */ unsigned long efi_memmap_init(u64 *s, u64 *e) { struct kern_memdesc *k, *prev = NULL; u64 contig_low=0, contig_high=0; u64 as, ae, lim; void *efi_map_start, *efi_map_end, *p, *q; efi_memory_desc_t *md, *pmd = NULL, *check_md; u64 efi_desc_size; unsigned long total_mem = 0; k = kern_memmap = find_memmap_space(); efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) { md = p; if (!efi_wb(md)) { if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY || md->type == EFI_BOOT_SERVICES_DATA)) { k->attribute = EFI_MEMORY_UC; k->start = md->phys_addr; k->num_pages = md->num_pages; k++; } continue; } if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) { contig_low = GRANULEROUNDUP(md->phys_addr); contig_high = efi_md_end(md); for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) { check_md = q; if (!efi_wb(check_md)) break; if (contig_high != check_md->phys_addr) break; contig_high = efi_md_end(check_md); } contig_high = GRANULEROUNDDOWN(contig_high); } if (!is_memory_available(md)) continue; /* * Round ends inward to granule boundaries * Give trimmings to uncached allocator */ if (md->phys_addr < contig_low) { lim = min(efi_md_end(md), contig_low); if (efi_uc(md)) { if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC && kmd_end(k-1) == md->phys_addr) { (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT; } else { k->attribute = EFI_MEMORY_UC; k->start = md->phys_addr; k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT; k++; } } as = contig_low; } else as = md->phys_addr; if (efi_md_end(md) > contig_high) { lim = max(md->phys_addr, contig_high); if (efi_uc(md)) { if (lim == md->phys_addr && k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC && kmd_end(k-1) == md->phys_addr) { (k-1)->num_pages += md->num_pages; } else { k->attribute = EFI_MEMORY_UC; k->start = lim; k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT; k++; } } ae = contig_high; } else ae = efi_md_end(md); /* keep within max_addr= and min_addr= command line arg */ as = max(as, min_addr); ae = min(ae, max_addr); if (ae <= as) continue; /* avoid going over mem= command line arg */ if (total_mem + (ae - as) > mem_limit) ae -= total_mem + (ae - as) - mem_limit; if (ae <= as) continue; if (prev && kmd_end(prev) == md->phys_addr) { prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT; total_mem += ae - as; continue; } k->attribute = EFI_MEMORY_WB; k->start = as; k->num_pages = (ae - as) >> EFI_PAGE_SHIFT; total_mem += ae - as; prev = k++; } k->start = ~0L; /* end-marker */ /* reserve the memory we are using for kern_memmap */ *s = (u64)kern_memmap; *e = (u64)++k; return total_mem; } void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource) { struct resource *res; void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; char *name; unsigned long flags, desc; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; res = NULL; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (md->num_pages == 0) /* should not happen */ continue; flags = IORESOURCE_MEM | IORESOURCE_BUSY; desc = IORES_DESC_NONE; switch (md->type) { case EFI_MEMORY_MAPPED_IO: case EFI_MEMORY_MAPPED_IO_PORT_SPACE: continue; case EFI_LOADER_CODE: case EFI_LOADER_DATA: case EFI_BOOT_SERVICES_DATA: case EFI_BOOT_SERVICES_CODE: case EFI_CONVENTIONAL_MEMORY: if (md->attribute & EFI_MEMORY_WP) { name = "System ROM"; flags |= IORESOURCE_READONLY; } else if (md->attribute == EFI_MEMORY_UC) { name = "Uncached RAM"; } else { name = "System RAM"; flags |= IORESOURCE_SYSRAM; } break; case EFI_ACPI_MEMORY_NVS: name = "ACPI Non-volatile Storage"; desc = IORES_DESC_ACPI_NV_STORAGE; break; case EFI_UNUSABLE_MEMORY: name = "reserved"; flags |= IORESOURCE_DISABLED; break; case EFI_PERSISTENT_MEMORY: name = "Persistent Memory"; desc = IORES_DESC_PERSISTENT_MEMORY; break; case EFI_RESERVED_TYPE: case EFI_RUNTIME_SERVICES_CODE: case EFI_RUNTIME_SERVICES_DATA: case EFI_ACPI_RECLAIM_MEMORY: default: name = "reserved"; break; } if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { printk(KERN_ERR "failed to allocate resource for iomem\n"); return; } res->name = name; res->start = md->phys_addr; res->end = md->phys_addr + efi_md_size(md) - 1; res->flags = flags; res->desc = desc; if (insert_resource(&iomem_resource, res) < 0) kfree(res); else { /* * We don't know which region contains * kernel data so we try it repeatedly and * let the resource manager test it. */ insert_resource(res, code_resource); insert_resource(res, data_resource); insert_resource(res, bss_resource); #ifdef CONFIG_KEXEC insert_resource(res, &efi_memmap_res); insert_resource(res, &boot_param_res); if (crashk_res.end > crashk_res.start) insert_resource(res, &crashk_res); #endif } } } #ifdef CONFIG_KEXEC /* find a block of memory aligned to 64M exclude reserved regions rsvd_regions are sorted */ unsigned long __init kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n) { int i; u64 start, end; u64 alignment = 1UL << _PAGE_SIZE_64M; void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (!efi_wb(md)) continue; start = ALIGN(md->phys_addr, alignment); end = efi_md_end(md); for (i = 0; i < n; i++) { if (__pa(r[i].start) >= start && __pa(r[i].end) < end) { if (__pa(r[i].start) > start + size) return start; start = ALIGN(__pa(r[i].end), alignment); if (i < n-1 && __pa(r[i+1].start) < start + size) continue; else break; } } if (end > start + size) return start; } printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n", size); return ~0UL; } #endif #ifdef CONFIG_CRASH_DUMP /* locate the size find a the descriptor at a certain address */ unsigned long __init vmcore_find_descriptor_size (unsigned long address) { void *efi_map_start, *efi_map_end, *p; efi_memory_desc_t *md; u64 efi_desc_size; unsigned long ret = 0; efi_map_start = __va(ia64_boot_param->efi_memmap); efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; efi_desc_size = ia64_boot_param->efi_memdesc_size; for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { md = p; if (efi_wb(md) && md->type == EFI_LOADER_DATA && md->phys_addr == address) { ret = efi_md_size(md); break; } } if (ret == 0) printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n"); return ret; } #endif char *efi_systab_show_arch(char *str) { if (mps_phys != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "MPS=0x%lx\n", mps_phys); if (hcdp_phys != EFI_INVALID_TABLE_ADDR) str += sprintf(str, "HCDP=0x%lx\n", hcdp_phys); return str; }
linux-master
arch/ia64/kernel/efi.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1999-2004 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> * Copyright (C) 2003 Fenghua Yu <[email protected]> * - Change pt_regs_off() to make it less dependent on pt_regs structure. */ /* * This file implements call frame unwind support for the Linux * kernel. Parsing and processing the unwind information is * time-consuming, so this implementation translates the unwind * descriptors into unwind scripts. These scripts are very simple * (basically a sequence of assignments) and efficient to execute. * They are cached for later re-use. Each script is specific for a * given instruction pointer address and the set of predicate values * that the script depends on (most unwind descriptors are * unconditional and scripts often do not depend on predicates at * all). This code is based on the unwind conventions described in * the "IA-64 Software Conventions and Runtime Architecture" manual. * * SMP conventions: * o updates to the global unwind data (in structure "unw") are serialized * by the unw.lock spinlock * o each unwind script has its own read-write lock; a thread must acquire * a read lock before executing a script and must acquire a write lock * before modifying a script * o if both the unw.lock spinlock and a script's read-write lock must be * acquired, then the read-write lock must be acquired first. */ #include <linux/module.h> #include <linux/memblock.h> #include <linux/elf.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <asm/unwind.h> #include <asm/delay.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/ptrace_offsets.h> #include <asm/rse.h> #include <asm/sections.h> #include <linux/uaccess.h> #include "entry.h" #include "unwind_i.h" #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */ #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE) #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1) #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE) #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */ #ifdef UNW_DEBUG static unsigned int unw_debug_level = UNW_DEBUG; # define UNW_DEBUG_ON(n) unw_debug_level >= n /* Do not code a printk level, not all debug lines end in newline */ # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__) # undef inline # define inline #else /* !UNW_DEBUG */ # define UNW_DEBUG_ON(n) 0 # define UNW_DPRINT(n, ...) #endif /* UNW_DEBUG */ #if UNW_STATS # define STAT(x...) x #else # define STAT(x...) #endif #define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC) #define free_reg_state(usr) kfree(usr) #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC) #define free_labeled_state(usr) kfree(usr) typedef unsigned long unw_word; typedef unsigned char unw_hash_index_t; static struct { spinlock_t lock; /* spinlock for unwind data */ /* list of unwind tables (one per load-module) */ struct unw_table *tables; unsigned long r0; /* constant 0 for r0 */ /* table of registers that prologues can save (and order in which they're saved): */ const unsigned char save_order[8]; /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */ unsigned short sw_off[sizeof(struct unw_frame_info) / 8]; unsigned short lru_head; /* index of lead-recently used script */ unsigned short lru_tail; /* index of most-recently used script */ /* index into unw_frame_info for preserved register i */ unsigned short preg_index[UNW_NUM_REGS]; short pt_regs_offsets[32]; /* unwind table for the kernel: */ struct unw_table kernel_table; /* unwind table describing the gate page (kernel code that is mapped into user space): */ size_t gate_table_size; unsigned long *gate_table; /* hash table that maps instruction pointer to script index: */ unsigned short hash[UNW_HASH_SIZE]; /* script cache: */ struct unw_script cache[UNW_CACHE_SIZE]; # ifdef UNW_DEBUG const char *preg_name[UNW_NUM_REGS]; # endif # if UNW_STATS struct { struct { int lookups; int hinted_hits; int normal_hits; int collision_chain_traversals; } cache; struct { unsigned long build_time; unsigned long run_time; unsigned long parse_time; int builds; int news; int collisions; int runs; } script; struct { unsigned long init_time; unsigned long unwind_time; int inits; int unwinds; } api; } stat; # endif } unw = { .tables = &unw.kernel_table, .lock = __SPIN_LOCK_UNLOCKED(unw.lock), .save_order = { UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR, UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR }, .preg_index = { offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */ offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */ offsetof(struct unw_frame_info, bsp_loc)/8, offsetof(struct unw_frame_info, bspstore_loc)/8, offsetof(struct unw_frame_info, pfs_loc)/8, offsetof(struct unw_frame_info, rnat_loc)/8, offsetof(struct unw_frame_info, psp)/8, offsetof(struct unw_frame_info, rp_loc)/8, offsetof(struct unw_frame_info, r4)/8, offsetof(struct unw_frame_info, r5)/8, offsetof(struct unw_frame_info, r6)/8, offsetof(struct unw_frame_info, r7)/8, offsetof(struct unw_frame_info, unat_loc)/8, offsetof(struct unw_frame_info, pr_loc)/8, offsetof(struct unw_frame_info, lc_loc)/8, offsetof(struct unw_frame_info, fpsr_loc)/8, offsetof(struct unw_frame_info, b1_loc)/8, offsetof(struct unw_frame_info, b2_loc)/8, offsetof(struct unw_frame_info, b3_loc)/8, offsetof(struct unw_frame_info, b4_loc)/8, offsetof(struct unw_frame_info, b5_loc)/8, offsetof(struct unw_frame_info, f2_loc)/8, offsetof(struct unw_frame_info, f3_loc)/8, offsetof(struct unw_frame_info, f4_loc)/8, offsetof(struct unw_frame_info, f5_loc)/8, offsetof(struct unw_frame_info, fr_loc[16 - 16])/8, offsetof(struct unw_frame_info, fr_loc[17 - 16])/8, offsetof(struct unw_frame_info, fr_loc[18 - 16])/8, offsetof(struct unw_frame_info, fr_loc[19 - 16])/8, offsetof(struct unw_frame_info, fr_loc[20 - 16])/8, offsetof(struct unw_frame_info, fr_loc[21 - 16])/8, offsetof(struct unw_frame_info, fr_loc[22 - 16])/8, offsetof(struct unw_frame_info, fr_loc[23 - 16])/8, offsetof(struct unw_frame_info, fr_loc[24 - 16])/8, offsetof(struct unw_frame_info, fr_loc[25 - 16])/8, offsetof(struct unw_frame_info, fr_loc[26 - 16])/8, offsetof(struct unw_frame_info, fr_loc[27 - 16])/8, offsetof(struct unw_frame_info, fr_loc[28 - 16])/8, offsetof(struct unw_frame_info, fr_loc[29 - 16])/8, offsetof(struct unw_frame_info, fr_loc[30 - 16])/8, offsetof(struct unw_frame_info, fr_loc[31 - 16])/8, }, .pt_regs_offsets = { [0] = -1, offsetof(struct pt_regs, r1), offsetof(struct pt_regs, r2), offsetof(struct pt_regs, r3), [4] = -1, [5] = -1, [6] = -1, [7] = -1, offsetof(struct pt_regs, r8), offsetof(struct pt_regs, r9), offsetof(struct pt_regs, r10), offsetof(struct pt_regs, r11), offsetof(struct pt_regs, r12), offsetof(struct pt_regs, r13), offsetof(struct pt_regs, r14), offsetof(struct pt_regs, r15), offsetof(struct pt_regs, r16), offsetof(struct pt_regs, r17), offsetof(struct pt_regs, r18), offsetof(struct pt_regs, r19), offsetof(struct pt_regs, r20), offsetof(struct pt_regs, r21), offsetof(struct pt_regs, r22), offsetof(struct pt_regs, r23), offsetof(struct pt_regs, r24), offsetof(struct pt_regs, r25), offsetof(struct pt_regs, r26), offsetof(struct pt_regs, r27), offsetof(struct pt_regs, r28), offsetof(struct pt_regs, r29), offsetof(struct pt_regs, r30), offsetof(struct pt_regs, r31), }, .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 }, #ifdef UNW_DEBUG .preg_name = { "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp", "r4", "r5", "r6", "r7", "ar.unat", "pr", "ar.lc", "ar.fpsr", "b1", "b2", "b3", "b4", "b5", "f2", "f3", "f4", "f5", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31" } #endif }; static inline int read_only (void *addr) { return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0); } /* * Returns offset of rREG in struct pt_regs. */ static inline unsigned long pt_regs_off (unsigned long reg) { short off = -1; if (reg < ARRAY_SIZE(unw.pt_regs_offsets)) off = unw.pt_regs_offsets[reg]; if (off < 0) { UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__, reg); off = 0; } return (unsigned long) off; } static inline struct pt_regs * get_scratch_regs (struct unw_frame_info *info) { if (!info->pt) { /* This should not happen with valid unwind info. */ UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__); if (info->flags & UNW_FLAG_INTERRUPT_FRAME) info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1); else info->pt = info->sp - 16; } UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__, info->sp, info->pt); return (struct pt_regs *) info->pt; } /* Unwind accessors. */ int unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write) { unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat; struct unw_ireg *ireg; struct pt_regs *pt; if ((unsigned) regnum - 1 >= 127) { if (regnum == 0 && !write) { *val = 0; /* read r0 always returns 0 */ *nat = 0; return 0; } UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n", __func__, regnum); return -1; } if (regnum < 32) { if (regnum >= 4 && regnum <= 7) { /* access a preserved register */ ireg = &info->r4 + (regnum - 4); addr = ireg->loc; if (addr) { nat_addr = addr + ireg->nat.off; switch (ireg->nat.type) { case UNW_NAT_VAL: /* simulate getf.sig/setf.sig */ if (write) { if (*nat) { /* write NaTVal and be done with it */ addr[0] = 0; addr[1] = 0x1fffe; return 0; } addr[1] = 0x1003e; } else { if (addr[0] == 0 && addr[1] == 0x1ffe) { /* return NaT and be done with it */ *val = 0; *nat = 1; return 0; } } fallthrough; case UNW_NAT_NONE: dummy_nat = 0; nat_addr = &dummy_nat; break; case UNW_NAT_MEMSTK: nat_mask = (1UL << ((long) addr & 0x1f8)/8); break; case UNW_NAT_REGSTK: nat_addr = ia64_rse_rnat_addr(addr); if ((unsigned long) addr < info->regstk.limit || (unsigned long) addr >= info->regstk.top) { UNW_DPRINT(0, "unwind.%s: %p outside of regstk " "[0x%lx-0x%lx)\n", __func__, (void *) addr, info->regstk.limit, info->regstk.top); return -1; } if ((unsigned long) nat_addr >= info->regstk.top) nat_addr = &info->sw->ar_rnat; nat_mask = (1UL << ia64_rse_slot_num(addr)); break; } } else { addr = &info->sw->r4 + (regnum - 4); nat_addr = &info->sw->ar_unat; nat_mask = (1UL << ((long) addr & 0x1f8)/8); } } else { /* access a scratch register */ pt = get_scratch_regs(info); addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum)); if (info->pri_unat_loc) nat_addr = info->pri_unat_loc; else nat_addr = &info->sw->caller_unat; nat_mask = (1UL << ((long) addr & 0x1f8)/8); } } else { /* access a stacked register */ addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32); nat_addr = ia64_rse_rnat_addr(addr); if ((unsigned long) addr < info->regstk.limit || (unsigned long) addr >= info->regstk.top) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside " "of rbs\n", __func__); return -1; } if ((unsigned long) nat_addr >= info->regstk.top) nat_addr = &info->sw->ar_rnat; nat_mask = (1UL << ia64_rse_slot_num(addr)); } if (write) { if (read_only(addr)) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", __func__); } else { *addr = *val; if (*nat) *nat_addr |= nat_mask; else *nat_addr &= ~nat_mask; } } else { if ((*nat_addr & nat_mask) == 0) { *val = *addr; *nat = 0; } else { *val = 0; /* if register is a NaT, *addr may contain kernel data! */ *nat = 1; } } return 0; } EXPORT_SYMBOL(unw_access_gr); int unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write) { unsigned long *addr; struct pt_regs *pt; switch (regnum) { /* scratch: */ case 0: pt = get_scratch_regs(info); addr = &pt->b0; break; case 6: pt = get_scratch_regs(info); addr = &pt->b6; break; case 7: pt = get_scratch_regs(info); addr = &pt->b7; break; /* preserved: */ case 1: case 2: case 3: case 4: case 5: addr = *(&info->b1_loc + (regnum - 1)); if (!addr) addr = &info->sw->b1 + (regnum - 1); break; default: UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n", __func__, regnum); return -1; } if (write) if (read_only(addr)) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", __func__); } else *addr = *val; else *val = *addr; return 0; } EXPORT_SYMBOL(unw_access_br); int unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write) { struct ia64_fpreg *addr = NULL; struct pt_regs *pt; if ((unsigned) (regnum - 2) >= 126) { UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n", __func__, regnum); return -1; } if (regnum <= 5) { addr = *(&info->f2_loc + (regnum - 2)); if (!addr) addr = &info->sw->f2 + (regnum - 2); } else if (regnum <= 15) { if (regnum <= 11) { pt = get_scratch_regs(info); addr = &pt->f6 + (regnum - 6); } else addr = &info->sw->f12 + (regnum - 12); } else if (regnum <= 31) { addr = info->fr_loc[regnum - 16]; if (!addr) addr = &info->sw->f16 + (regnum - 16); } else { struct task_struct *t = info->task; if (write) ia64_sync_fph(t); else ia64_flush_fph(t); addr = t->thread.fph + (regnum - 32); } if (write) if (read_only(addr)) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", __func__); } else *addr = *val; else *val = *addr; return 0; } EXPORT_SYMBOL(unw_access_fr); int unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write) { unsigned long *addr; struct pt_regs *pt; switch (regnum) { case UNW_AR_BSP: addr = info->bsp_loc; if (!addr) addr = &info->sw->ar_bspstore; break; case UNW_AR_BSPSTORE: addr = info->bspstore_loc; if (!addr) addr = &info->sw->ar_bspstore; break; case UNW_AR_PFS: addr = info->pfs_loc; if (!addr) addr = &info->sw->ar_pfs; break; case UNW_AR_RNAT: addr = info->rnat_loc; if (!addr) addr = &info->sw->ar_rnat; break; case UNW_AR_UNAT: addr = info->unat_loc; if (!addr) addr = &info->sw->caller_unat; break; case UNW_AR_LC: addr = info->lc_loc; if (!addr) addr = &info->sw->ar_lc; break; case UNW_AR_EC: if (!info->cfm_loc) return -1; if (write) *info->cfm_loc = (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52); else *val = (*info->cfm_loc >> 52) & 0x3f; return 0; case UNW_AR_FPSR: addr = info->fpsr_loc; if (!addr) addr = &info->sw->ar_fpsr; break; case UNW_AR_RSC: pt = get_scratch_regs(info); addr = &pt->ar_rsc; break; case UNW_AR_CCV: pt = get_scratch_regs(info); addr = &pt->ar_ccv; break; case UNW_AR_CSD: pt = get_scratch_regs(info); addr = &pt->ar_csd; break; case UNW_AR_SSD: pt = get_scratch_regs(info); addr = &pt->ar_ssd; break; default: UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n", __func__, regnum); return -1; } if (write) { if (read_only(addr)) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", __func__); } else *addr = *val; } else *val = *addr; return 0; } EXPORT_SYMBOL(unw_access_ar); int unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write) { unsigned long *addr; addr = info->pr_loc; if (!addr) addr = &info->sw->pr; if (write) { if (read_only(addr)) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", __func__); } else *addr = *val; } else *val = *addr; return 0; } EXPORT_SYMBOL(unw_access_pr); /* Routines to manipulate the state stack. */ static inline void push (struct unw_state_record *sr) { struct unw_reg_state *rs; rs = alloc_reg_state(); if (!rs) { printk(KERN_ERR "unwind: cannot stack reg state!\n"); return; } memcpy(rs, &sr->curr, sizeof(*rs)); sr->curr.next = rs; } static void pop (struct unw_state_record *sr) { struct unw_reg_state *rs = sr->curr.next; if (!rs) { printk(KERN_ERR "unwind: stack underflow!\n"); return; } memcpy(&sr->curr, rs, sizeof(*rs)); free_reg_state(rs); } /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */ static struct unw_reg_state * dup_state_stack (struct unw_reg_state *rs) { struct unw_reg_state *copy, *prev = NULL, *first = NULL; while (rs) { copy = alloc_reg_state(); if (!copy) { printk(KERN_ERR "unwind.dup_state_stack: out of memory\n"); return NULL; } memcpy(copy, rs, sizeof(*copy)); if (first) prev->next = copy; else first = copy; rs = rs->next; prev = copy; } return first; } /* Free all stacked register states (but not RS itself). */ static void free_state_stack (struct unw_reg_state *rs) { struct unw_reg_state *p, *next; for (p = rs->next; p != NULL; p = next) { next = p->next; free_reg_state(p); } rs->next = NULL; } /* Unwind decoder routines */ static enum unw_register_index __attribute_const__ decode_abreg (unsigned char abreg, int memory) { switch (abreg) { case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04); case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22); case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30); case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41); case 0x60: return UNW_REG_PR; case 0x61: return UNW_REG_PSP; case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR; case 0x63: return UNW_REG_RP; case 0x64: return UNW_REG_BSP; case 0x65: return UNW_REG_BSPSTORE; case 0x66: return UNW_REG_RNAT; case 0x67: return UNW_REG_UNAT; case 0x68: return UNW_REG_FPSR; case 0x69: return UNW_REG_PFS; case 0x6a: return UNW_REG_LC; default: break; } UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__, abreg); return UNW_REG_LC; } static void set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val) { reg->val = val; reg->where = where; if (reg->when == UNW_WHEN_NEVER) reg->when = when; } static void alloc_spill_area (unsigned long *offp, unsigned long regsize, struct unw_reg_info *lo, struct unw_reg_info *hi) { struct unw_reg_info *reg; for (reg = hi; reg >= lo; --reg) { if (reg->where == UNW_WHERE_SPILL_HOME) { reg->where = UNW_WHERE_PSPREL; *offp -= regsize; reg->val = *offp; } } } static inline void spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t) { struct unw_reg_info *reg; for (reg = *regp; reg <= lim; ++reg) { if (reg->where == UNW_WHERE_SPILL_HOME) { reg->when = t; *regp = reg + 1; return; } } UNW_DPRINT(0, "unwind.%s: excess spill!\n", __func__); } static inline void finish_prologue (struct unw_state_record *sr) { struct unw_reg_info *reg; unsigned long off; int i; /* * First, resolve implicit register save locations (see Section "11.4.2.3 Rules * for Using Unwind Descriptors", rule 3): */ for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) { reg = sr->curr.reg + unw.save_order[i]; if (reg->where == UNW_WHERE_GR_SAVE) { reg->where = UNW_WHERE_GR; reg->val = sr->gr_save_loc++; } } /* * Next, compute when the fp, general, and branch registers get * saved. This must come before alloc_spill_area() because * we need to know which registers are spilled to their home * locations. */ if (sr->imask) { unsigned char kind, mask = 0, *cp = sr->imask; int t; static const unsigned char limit[3] = { UNW_REG_F31, UNW_REG_R7, UNW_REG_B5 }; struct unw_reg_info *(regs[3]); regs[0] = sr->curr.reg + UNW_REG_F2; regs[1] = sr->curr.reg + UNW_REG_R4; regs[2] = sr->curr.reg + UNW_REG_B1; for (t = 0; t < sr->region_len; ++t) { if ((t & 3) == 0) mask = *cp++; kind = (mask >> 2*(3-(t & 3))) & 3; if (kind > 0) spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1], sr->region_start + t); } } /* * Next, lay out the memory stack spill area: */ if (sr->any_spills) { off = sr->spill_offset; alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31); alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5); alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7); } } /* * Region header descriptors. */ static void desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave, struct unw_state_record *sr) { int i, region_start; if (!(sr->in_body || sr->first_region)) finish_prologue(sr); sr->first_region = 0; /* check if we're done: */ if (sr->when_target < sr->region_start + sr->region_len) { sr->done = 1; return; } region_start = sr->region_start + sr->region_len; for (i = 0; i < sr->epilogue_count; ++i) pop(sr); sr->epilogue_count = 0; sr->epilogue_start = UNW_WHEN_NEVER; sr->region_start = region_start; sr->region_len = rlen; sr->in_body = body; if (!body) { push(sr); for (i = 0; i < 4; ++i) { if (mask & 0x8) set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR, sr->region_start + sr->region_len - 1, grsave++); mask <<= 1; } sr->gr_save_loc = grsave; sr->any_spills = 0; sr->imask = NULL; sr->spill_offset = 0x10; /* default to psp+16 */ } } /* * Prologue descriptors. */ static inline void desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr) { if (abi == 3 && context == 'i') { sr->flags |= UNW_FLAG_INTERRUPT_FRAME; UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __func__); } else UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n", __func__, abi, context); } static inline void desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr) { int i; for (i = 0; i < 5; ++i) { if (brmask & 1) set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, gr++); brmask >>= 1; } } static inline void desc_br_mem (unsigned char brmask, struct unw_state_record *sr) { int i; for (i = 0; i < 5; ++i) { if (brmask & 1) { set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } brmask >>= 1; } } static inline void desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } grmask >>= 1; } for (i = 0; i < 20; ++i) { if ((frmask & 1) != 0) { int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4; set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } } static inline void desc_fr_mem (unsigned char frmask, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((frmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } } static inline void desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, gr++); grmask >>= 1; } } static inline void desc_gr_mem (unsigned char grmask, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } grmask >>= 1; } } static inline void desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr) { set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE, sr->region_start + min_t(int, t, sr->region_len - 1), 16*size); } static inline void desc_mem_stack_v (unw_word t, struct unw_state_record *sr) { sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1); } static inline void desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr) { set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst); } static inline void desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr) { set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1, 0x10 - 4*pspoff); } static inline void desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr) { set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1, 4*spoff); } static inline void desc_rp_br (unsigned char dst, struct unw_state_record *sr) { sr->return_link_reg = dst; } static inline void desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr) { struct unw_reg_info *reg = sr->curr.reg + regnum; if (reg->where == UNW_WHERE_NONE) reg->where = UNW_WHERE_GR_SAVE; reg->when = sr->region_start + min_t(int, t, sr->region_len - 1); } static inline void desc_spill_base (unw_word pspoff, struct unw_state_record *sr) { sr->spill_offset = 0x10 - 4*pspoff; } static inline unsigned char * desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr) { sr->imask = imaskp; return imaskp + (2*sr->region_len + 7)/8; } /* * Body descriptors. */ static inline void desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr) { sr->epilogue_start = sr->region_start + sr->region_len - 1 - t; sr->epilogue_count = ecount + 1; } static inline void desc_copy_state (unw_word label, struct unw_state_record *sr) { struct unw_labeled_state *ls; for (ls = sr->labeled_states; ls; ls = ls->next) { if (ls->label == label) { free_state_stack(&sr->curr); memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr)); sr->curr.next = dup_state_stack(ls->saved_state.next); return; } } printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label); } static inline void desc_label_state (unw_word label, struct unw_state_record *sr) { struct unw_labeled_state *ls; ls = alloc_labeled_state(); if (!ls) { printk(KERN_ERR "unwind.desc_label_state(): out of memory\n"); return; } ls->label = label; memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state)); ls->saved_state.next = dup_state_stack(sr->curr.next); /* insert into list of labeled states: */ ls->next = sr->labeled_states; sr->labeled_states = ls; } /* * General descriptors. */ static inline int desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr) { if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1)) return 0; if (qp > 0) { if ((sr->pr_val & (1UL << qp)) == 0) return 0; sr->pr_mask |= (1UL << qp); } return 1; } static inline void desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr) { struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; r = sr->curr.reg + decode_abreg(abreg, 0); r->where = UNW_WHERE_NONE; r->when = UNW_WHEN_NEVER; r->val = 0; } static inline void desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, unsigned char ytreg, struct unw_state_record *sr) { enum unw_where where = UNW_WHERE_GR; struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; if (x) where = UNW_WHERE_BR; else if (ytreg & 0x80) where = UNW_WHERE_FR; r = sr->curr.reg + decode_abreg(abreg, 0); r->where = where; r->when = sr->region_start + min_t(int, t, sr->region_len - 1); r->val = (ytreg & 0x7f); } static inline void desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, struct unw_state_record *sr) { struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; r = sr->curr.reg + decode_abreg(abreg, 1); r->where = UNW_WHERE_PSPREL; r->when = sr->region_start + min_t(int, t, sr->region_len - 1); r->val = 0x10 - 4*pspoff; } static inline void desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff, struct unw_state_record *sr) { struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; r = sr->curr.reg + decode_abreg(abreg, 1); r->where = UNW_WHERE_SPREL; r->when = sr->region_start + min_t(int, t, sr->region_len - 1); r->val = 4*spoff; } #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \ code); /* * region headers: */ #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg) #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg) /* * prologue descriptors: */ #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg) #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg) #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg) #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg) #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg) #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg) #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg) #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg) #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg) #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg) #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg) #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg) #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg) #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg) #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg) #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg) #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg) #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg) #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg) #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg) #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg)) /* * body descriptors: */ #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg) #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg) #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg) /* * general unwind descriptors: */ #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg) #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg) #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg) #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg) #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg) #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg) #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg) #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg) #include "unwind_decoder.c" /* Unwind scripts. */ static inline unw_hash_index_t hash (unsigned long ip) { /* magic number = ((sqrt(5)-1)/2)*2^64 */ static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL; return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE); } static inline long cache_match (struct unw_script *script, unsigned long ip, unsigned long pr) { read_lock(&script->lock); if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0) /* keep the read lock... */ return 1; read_unlock(&script->lock); return 0; } static inline struct unw_script * script_lookup (struct unw_frame_info *info) { struct unw_script *script = unw.cache + info->hint; unsigned short index; unsigned long ip, pr; if (UNW_DEBUG_ON(0)) return NULL; /* Always regenerate scripts in debug mode */ STAT(++unw.stat.cache.lookups); ip = info->ip; pr = info->pr; if (cache_match(script, ip, pr)) { STAT(++unw.stat.cache.hinted_hits); return script; } index = unw.hash[hash(ip)]; if (index >= UNW_CACHE_SIZE) return NULL; script = unw.cache + index; while (1) { if (cache_match(script, ip, pr)) { /* update hint; no locking required as single-word writes are atomic */ STAT(++unw.stat.cache.normal_hits); unw.cache[info->prev_script].hint = script - unw.cache; return script; } if (script->coll_chain >= UNW_HASH_SIZE) return NULL; script = unw.cache + script->coll_chain; STAT(++unw.stat.cache.collision_chain_traversals); } } /* * On returning, a write lock for the SCRIPT is still being held. */ static inline struct unw_script * script_new (unsigned long ip) { struct unw_script *script, *prev, *tmp; unw_hash_index_t index; unsigned short head; STAT(++unw.stat.script.news); /* * Can't (easily) use cmpxchg() here because of ABA problem * that is intrinsic in cmpxchg()... */ head = unw.lru_head; script = unw.cache + head; unw.lru_head = script->lru_chain; /* * We'd deadlock here if we interrupted a thread that is holding a read lock on * script->lock. Thus, if the write_trylock() fails, we simply bail out. The * alternative would be to disable interrupts whenever we hold a read-lock, but * that seems silly. */ if (!write_trylock(&script->lock)) return NULL; /* re-insert script at the tail of the LRU chain: */ unw.cache[unw.lru_tail].lru_chain = head; unw.lru_tail = head; /* remove the old script from the hash table (if it's there): */ if (script->ip) { index = hash(script->ip); tmp = unw.cache + unw.hash[index]; prev = NULL; while (1) { if (tmp == script) { if (prev) prev->coll_chain = tmp->coll_chain; else unw.hash[index] = tmp->coll_chain; break; } else prev = tmp; if (tmp->coll_chain >= UNW_CACHE_SIZE) /* old script wasn't in the hash-table */ break; tmp = unw.cache + tmp->coll_chain; } } /* enter new script in the hash table */ index = hash(ip); script->coll_chain = unw.hash[index]; unw.hash[index] = script - unw.cache; script->ip = ip; /* set new IP while we're holding the locks */ STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions); script->flags = 0; script->hint = 0; script->count = 0; return script; } static void script_finalize (struct unw_script *script, struct unw_state_record *sr) { script->pr_mask = sr->pr_mask; script->pr_val = sr->pr_val; /* * We could down-grade our write-lock on script->lock here but * the rwlock API doesn't offer atomic lock downgrading, so * we'll just keep the write-lock and release it later when * we're done using the script. */ } static inline void script_emit (struct unw_script *script, struct unw_insn insn) { if (script->count >= UNW_MAX_SCRIPT_LEN) { UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n", __func__, UNW_MAX_SCRIPT_LEN); return; } script->insn[script->count++] = insn; } static inline void emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script) { struct unw_reg_info *r = sr->curr.reg + i; enum unw_insn_opcode opc; struct unw_insn insn; unsigned long val = 0; switch (r->where) { case UNW_WHERE_GR: if (r->val >= 32) { /* register got spilled to a stacked register */ opc = UNW_INSN_SETNAT_TYPE; val = UNW_NAT_REGSTK; } else /* register got spilled to a scratch register */ opc = UNW_INSN_SETNAT_MEMSTK; break; case UNW_WHERE_FR: opc = UNW_INSN_SETNAT_TYPE; val = UNW_NAT_VAL; break; case UNW_WHERE_BR: opc = UNW_INSN_SETNAT_TYPE; val = UNW_NAT_NONE; break; case UNW_WHERE_PSPREL: case UNW_WHERE_SPREL: opc = UNW_INSN_SETNAT_MEMSTK; break; default: UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n", __func__, r->where); return; } insn.opc = opc; insn.dst = unw.preg_index[i]; insn.val = val; script_emit(script, insn); } static void compile_reg (struct unw_state_record *sr, int i, struct unw_script *script) { struct unw_reg_info *r = sr->curr.reg + i; enum unw_insn_opcode opc; unsigned long val, rval; struct unw_insn insn; long need_nat_info; if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target) return; opc = UNW_INSN_MOVE; val = rval = r->val; need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7); switch (r->where) { case UNW_WHERE_GR: if (rval >= 32) { opc = UNW_INSN_MOVE_STACKED; val = rval - 32; } else if (rval >= 4 && rval <= 7) { if (need_nat_info) { opc = UNW_INSN_MOVE2; need_nat_info = 0; } val = unw.preg_index[UNW_REG_R4 + (rval - 4)]; } else if (rval == 0) { opc = UNW_INSN_MOVE_CONST; val = 0; } else { /* register got spilled to a scratch register */ opc = UNW_INSN_MOVE_SCRATCH; val = pt_regs_off(rval); } break; case UNW_WHERE_FR: if (rval <= 5) val = unw.preg_index[UNW_REG_F2 + (rval - 2)]; else if (rval >= 16 && rval <= 31) val = unw.preg_index[UNW_REG_F16 + (rval - 16)]; else { opc = UNW_INSN_MOVE_SCRATCH; if (rval <= 11) val = offsetof(struct pt_regs, f6) + 16*(rval - 6); else UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n", __func__, rval); } break; case UNW_WHERE_BR: if (rval >= 1 && rval <= 5) val = unw.preg_index[UNW_REG_B1 + (rval - 1)]; else { opc = UNW_INSN_MOVE_SCRATCH; if (rval == 0) val = offsetof(struct pt_regs, b0); else if (rval == 6) val = offsetof(struct pt_regs, b6); else val = offsetof(struct pt_regs, b7); } break; case UNW_WHERE_SPREL: opc = UNW_INSN_ADD_SP; break; case UNW_WHERE_PSPREL: opc = UNW_INSN_ADD_PSP; break; default: UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n", __func__, i, r->where); break; } insn.opc = opc; insn.dst = unw.preg_index[i]; insn.val = val; script_emit(script, insn); if (need_nat_info) emit_nat_info(sr, i, script); if (i == UNW_REG_PSP) { /* * info->psp must contain the _value_ of the previous * sp, not it's save location. We get this by * dereferencing the value we just stored in * info->psp: */ insn.opc = UNW_INSN_LOAD; insn.dst = insn.val = unw.preg_index[UNW_REG_PSP]; script_emit(script, insn); } } static inline const struct unw_table_entry * lookup (struct unw_table *table, unsigned long rel_ip) { const struct unw_table_entry *e = NULL; unsigned long lo, hi, mid; /* do a binary search for right entry: */ for (lo = 0, hi = table->length; lo < hi; ) { mid = (lo + hi) / 2; e = &table->array[mid]; if (rel_ip < e->start_offset) hi = mid; else if (rel_ip >= e->end_offset) lo = mid + 1; else break; } if (rel_ip < e->start_offset || rel_ip >= e->end_offset) return NULL; return e; } /* * Build an unwind script that unwinds from state OLD_STATE to the * entrypoint of the function that called OLD_STATE. */ static inline struct unw_script * build_script (struct unw_frame_info *info) { const struct unw_table_entry *e = NULL; struct unw_script *script = NULL; struct unw_labeled_state *ls, *next; unsigned long ip = info->ip; struct unw_state_record sr; struct unw_table *table, *prev; struct unw_reg_info *r; struct unw_insn insn; u8 *dp, *desc_end; u64 hdr; int i; STAT(unsigned long start, parse_start;) STAT(++unw.stat.script.builds; start = ia64_get_itc()); /* build state record */ memset(&sr, 0, sizeof(sr)); for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) r->when = UNW_WHEN_NEVER; sr.pr_val = info->pr; UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__, ip); script = script_new(ip); if (!script) { UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __func__); STAT(unw.stat.script.build_time += ia64_get_itc() - start); return NULL; } unw.cache[info->prev_script].hint = script - unw.cache; /* search the kernels and the modules' unwind tables for IP: */ STAT(parse_start = ia64_get_itc()); prev = NULL; for (table = unw.tables; table; table = table->next) { if (ip >= table->start && ip < table->end) { /* * Leave the kernel unwind table at the very front, * lest moving it breaks some assumption elsewhere. * Otherwise, move the matching table to the second * position in the list so that traversals can benefit * from commonality in backtrace paths. */ if (prev && prev != unw.tables) { /* unw is safe - we're already spinlocked */ prev->next = table->next; table->next = unw.tables->next; unw.tables->next = table; } e = lookup(table, ip - table->segment_base); break; } prev = table; } if (!e) { /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n", __func__, ip, unw.cache[info->prev_script].ip); sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; sr.curr.reg[UNW_REG_RP].when = -1; sr.curr.reg[UNW_REG_RP].val = 0; compile_reg(&sr, UNW_REG_RP, script); script_finalize(script, &sr); STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start); STAT(unw.stat.script.build_time += ia64_get_itc() - start); return script; } sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16 + (ip & 0xfUL)); hdr = *(u64 *) (table->segment_base + e->info_offset); dp = (u8 *) (table->segment_base + e->info_offset + 8); desc_end = dp + 8*UNW_LENGTH(hdr); while (!sr.done && dp < desc_end) dp = unw_decode(dp, sr.in_body, &sr); if (sr.when_target > sr.epilogue_start) { /* * sp has been restored and all values on the memory stack below * psp also have been restored. */ sr.curr.reg[UNW_REG_PSP].val = 0; sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE; sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER; for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10) || r->where == UNW_WHERE_SPREL) { r->val = 0; r->where = UNW_WHERE_NONE; r->when = UNW_WHEN_NEVER; } } script->flags = sr.flags; /* * If RP did't get saved, generate entry for the return link * register. */ if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) { sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; sr.curr.reg[UNW_REG_RP].when = -1; sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg; UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n", __func__, ip, sr.curr.reg[UNW_REG_RP].where, sr.curr.reg[UNW_REG_RP].val); } #ifdef UNW_DEBUG UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n", __func__, table->segment_base + e->start_offset, sr.when_target); for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) { if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) { UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]); switch (r->where) { case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break; case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break; case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break; case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break; case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break; case UNW_WHERE_NONE: UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val); break; default: UNW_DPRINT(1, "BADWHERE(%d)", r->where); break; } UNW_DPRINT(1, "\t\t%d\n", r->when); } } #endif STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start); /* translate state record into unwinder instructions: */ /* * First, set psp if we're dealing with a fixed-size frame; * subsequent instructions may depend on this value. */ if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE) && sr.curr.reg[UNW_REG_PSP].val != 0) { /* new psp is sp plus frame size */ insn.opc = UNW_INSN_ADD; insn.dst = offsetof(struct unw_frame_info, psp)/8; insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */ script_emit(script, insn); } /* determine where the primary UNaT is: */ if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when) i = UNW_REG_PRI_UNAT_MEM; else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when) i = UNW_REG_PRI_UNAT_GR; else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when) i = UNW_REG_PRI_UNAT_MEM; else i = UNW_REG_PRI_UNAT_GR; compile_reg(&sr, i, script); for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i) compile_reg(&sr, i, script); /* free labeled register states & stack: */ STAT(parse_start = ia64_get_itc()); for (ls = sr.labeled_states; ls; ls = next) { next = ls->next; free_state_stack(&ls->saved_state); free_labeled_state(ls); } free_state_stack(&sr.curr); STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start); script_finalize(script, &sr); STAT(unw.stat.script.build_time += ia64_get_itc() - start); return script; } /* * Apply the unwinding actions represented by OPS and update SR to * reflect the state that existed upon entry to the function that this * unwinder represents. */ static inline void run_script (struct unw_script *script, struct unw_frame_info *state) { struct unw_insn *ip, *limit, next_insn; unsigned long opc, dst, val, off; unsigned long *s = (unsigned long *) state; STAT(unsigned long start;) STAT(++unw.stat.script.runs; start = ia64_get_itc()); state->flags = script->flags; ip = script->insn; limit = script->insn + script->count; next_insn = *ip; while (ip++ < limit) { opc = next_insn.opc; dst = next_insn.dst; val = next_insn.val; next_insn = *ip; redo: switch (opc) { case UNW_INSN_ADD: s[dst] += val; break; case UNW_INSN_MOVE2: if (!s[val]) goto lazy_init; s[dst+1] = s[val+1]; s[dst] = s[val]; break; case UNW_INSN_MOVE: if (!s[val]) goto lazy_init; s[dst] = s[val]; break; case UNW_INSN_MOVE_SCRATCH: if (state->pt) { s[dst] = (unsigned long) get_scratch_regs(state) + val; } else { s[dst] = 0; UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n", __func__, dst, val); } break; case UNW_INSN_MOVE_CONST: if (val == 0) s[dst] = (unsigned long) &unw.r0; else { s[dst] = 0; UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n", __func__, val); } break; case UNW_INSN_MOVE_STACKED: s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp, val); break; case UNW_INSN_ADD_PSP: s[dst] = state->psp + val; break; case UNW_INSN_ADD_SP: s[dst] = state->sp + val; break; case UNW_INSN_SETNAT_MEMSTK: if (!state->pri_unat_loc) state->pri_unat_loc = &state->sw->caller_unat; /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */ s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK; break; case UNW_INSN_SETNAT_TYPE: s[dst+1] = val; break; case UNW_INSN_LOAD: #ifdef UNW_DEBUG if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0 || s[val] < TASK_SIZE) { UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n", __func__, s[val]); break; } #endif s[dst] = *(unsigned long *) s[val]; break; } } STAT(unw.stat.script.run_time += ia64_get_itc() - start); return; lazy_init: off = unw.sw_off[val]; s[val] = (unsigned long) state->sw + off; if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7)) /* * We're initializing a general register: init NaT info, too. Note that * the offset is a multiple of 8 which gives us the 3 bits needed for * the type field. */ s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK; goto redo; } static int find_save_locs (struct unw_frame_info *info) { int have_write_lock = 0; struct unw_script *scr; unsigned long flags = 0; if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) { /* don't let obviously bad addresses pollute the cache */ /* FIXME: should really be level 0 but it occurs too often. KAO */ UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__, info->ip); info->rp_loc = NULL; return -1; } scr = script_lookup(info); if (!scr) { spin_lock_irqsave(&unw.lock, flags); scr = build_script(info); if (!scr) { spin_unlock_irqrestore(&unw.lock, flags); UNW_DPRINT(0, "unwind.%s: failed to locate/build unwind script for ip %lx\n", __func__, info->ip); return -1; } have_write_lock = 1; } info->hint = scr->hint; info->prev_script = scr - unw.cache; run_script(scr, info); if (have_write_lock) { write_unlock(&scr->lock); spin_unlock_irqrestore(&unw.lock, flags); } else read_unlock(&scr->lock); return 0; } static int unw_valid(const struct unw_frame_info *info, unsigned long* p) { unsigned long loc = (unsigned long)p; return (loc >= info->regstk.limit && loc < info->regstk.top) || (loc >= info->memstk.top && loc < info->memstk.limit); } int unw_unwind (struct unw_frame_info *info) { unsigned long prev_ip, prev_sp, prev_bsp; unsigned long ip, pr, num_regs; STAT(unsigned long start, flags;) int retval; STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc()); prev_ip = info->ip; prev_sp = info->sp; prev_bsp = info->bsp; /* validate the return IP pointer */ if (!unw_valid(info, info->rp_loc)) { /* FIXME: should really be level 0 but it occurs too often. KAO */ UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n", __func__, info->ip); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return -1; } /* restore the ip */ ip = info->ip = *info->rp_loc; if (ip < GATE_ADDR) { UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__, ip); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return -1; } /* validate the previous stack frame pointer */ if (!unw_valid(info, info->pfs_loc)) { UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return -1; } /* restore the cfm: */ info->cfm_loc = info->pfs_loc; /* restore the bsp: */ pr = info->pr; num_regs = 0; if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) { info->pt = info->sp + 16; if ((pr & (1UL << PRED_NON_SYSCALL)) != 0) num_regs = *info->cfm_loc & 0x7f; /* size of frame */ info->pfs_loc = (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs)); UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__, info->pt); } else num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */ info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs); if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) { UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n", __func__, info->bsp, info->regstk.limit, info->regstk.top); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return -1; } /* restore the sp: */ info->sp = info->psp; if (info->sp < info->memstk.top || info->sp > info->memstk.limit) { UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n", __func__, info->sp, info->memstk.top, info->memstk.limit); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return -1; } if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) { UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n", __func__, ip); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return -1; } /* as we unwind, the saved ar.unat becomes the primary unat: */ info->pri_unat_loc = info->unat_loc; /* finally, restore the predicates: */ unw_get_pr(info, &info->pr); retval = find_save_locs(info); STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); return retval; } EXPORT_SYMBOL(unw_unwind); int unw_unwind_to_user (struct unw_frame_info *info) { unsigned long ip, sp, pr = info->pr; do { unw_get_sp(info, &sp); if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) < IA64_PT_REGS_SIZE) { UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n", __func__); break; } if (unw_is_intr_frame(info) && (pr & (1UL << PRED_USER_STACK))) return 0; if (unw_get_pr (info, &pr) < 0) { unw_get_rp(info, &ip); UNW_DPRINT(0, "unwind.%s: failed to read " "predicate register (ip=0x%lx)\n", __func__, ip); return -1; } } while (unw_unwind(info) >= 0); unw_get_ip(info, &ip); UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __func__, ip); return -1; } EXPORT_SYMBOL(unw_unwind_to_user); static void init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw, unsigned long stktop) { unsigned long rbslimit, rbstop, stklimit; STAT(unsigned long start, flags;) STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc()); /* * Subtle stuff here: we _could_ unwind through the switch_stack frame but we * don't want to do that because it would be slow as each preserved register would * have to be processed. Instead, what we do here is zero out the frame info and * start the unwind process at the function that created the switch_stack frame. * When a preserved value in switch_stack needs to be accessed, run_script() will * initialize the appropriate pointer on demand. */ memset(info, 0, sizeof(*info)); rbslimit = (unsigned long) t + IA64_RBS_OFFSET; stklimit = (unsigned long) t + IA64_STK_OFFSET; rbstop = sw->ar_bspstore; if (rbstop > stklimit || rbstop < rbslimit) rbstop = rbslimit; if (stktop <= rbstop) stktop = rbstop; if (stktop > stklimit) stktop = stklimit; info->regstk.limit = rbslimit; info->regstk.top = rbstop; info->memstk.limit = stklimit; info->memstk.top = stktop; info->task = t; info->sw = sw; info->sp = info->psp = stktop; info->pr = sw->pr; UNW_DPRINT(3, "unwind.%s:\n" " task 0x%lx\n" " rbs = [0x%lx-0x%lx)\n" " stk = [0x%lx-0x%lx)\n" " pr 0x%lx\n" " sw 0x%lx\n" " sp 0x%lx\n", __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit, info->pr, (unsigned long) info->sw, info->sp); STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags)); } void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) { unsigned long sol; init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16); info->cfm_loc = &sw->ar_pfs; sol = (*info->cfm_loc >> 7) & 0x7f; info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol); info->ip = sw->b0; UNW_DPRINT(3, "unwind.%s:\n" " bsp 0x%lx\n" " sol 0x%lx\n" " ip 0x%lx\n", __func__, info->bsp, sol, info->ip); find_save_locs(info); } EXPORT_SYMBOL(unw_init_frame_info); void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t) { struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16); UNW_DPRINT(1, "unwind.%s\n", __func__); unw_init_frame_info(info, t, sw); } EXPORT_SYMBOL(unw_init_from_blocked_task); static void init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base, unsigned long gp, const void *table_start, const void *table_end) { const struct unw_table_entry *start = table_start, *end = table_end; table->name = name; table->segment_base = segment_base; table->gp = gp; table->start = segment_base + start[0].start_offset; table->end = segment_base + end[-1].end_offset; table->array = start; table->length = end - start; } void * unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp, const void *table_start, const void *table_end) { const struct unw_table_entry *start = table_start, *end = table_end; struct unw_table *table; unsigned long flags; if (end - start <= 0) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n", __func__); return NULL; } table = kmalloc(sizeof(*table), GFP_USER); if (!table) return NULL; init_unwind_table(table, name, segment_base, gp, table_start, table_end); spin_lock_irqsave(&unw.lock, flags); { /* keep kernel unwind table at the front (it's searched most commonly): */ table->next = unw.tables->next; unw.tables->next = table; } spin_unlock_irqrestore(&unw.lock, flags); return table; } void unw_remove_unwind_table (void *handle) { struct unw_table *table, *prev; struct unw_script *tmp; unsigned long flags; long index; if (!handle) { UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n", __func__); return; } table = handle; if (table == &unw.kernel_table) { UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a " "no-can-do!\n", __func__); return; } spin_lock_irqsave(&unw.lock, flags); { /* first, delete the table: */ for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next) if (prev->next == table) break; if (!prev) { UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n", __func__, (void *) table); spin_unlock_irqrestore(&unw.lock, flags); return; } prev->next = table->next; } spin_unlock_irqrestore(&unw.lock, flags); /* next, remove hash table entries for this table */ for (index = 0; index < UNW_HASH_SIZE; ++index) { tmp = unw.cache + unw.hash[index]; if (unw.hash[index] >= UNW_CACHE_SIZE || tmp->ip < table->start || tmp->ip >= table->end) continue; write_lock(&tmp->lock); { if (tmp->ip >= table->start && tmp->ip < table->end) { unw.hash[index] = tmp->coll_chain; tmp->ip = 0; } } write_unlock(&tmp->lock); } kfree(table); } static int __init create_gate_table (void) { const struct unw_table_entry *entry, *start, *end; unsigned long *lp, segbase = GATE_ADDR; size_t info_size, size; char *info; Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); int i; for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr) if (phdr->p_type == PT_IA_64_UNWIND) { punw = phdr; break; } if (!punw) { printk("%s: failed to find gate DSO's unwind table!\n", __func__); return 0; } start = (const struct unw_table_entry *) punw->p_vaddr; end = (struct unw_table_entry *) ((char *) start + punw->p_memsz); size = 0; unw_add_unwind_table("linux-gate.so", segbase, 0, start, end); for (entry = start; entry < end; ++entry) size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset)); size += 8; /* reserve space for "end of table" marker */ unw.gate_table = kmalloc(size, GFP_KERNEL); if (!unw.gate_table) { unw.gate_table_size = 0; printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__); return 0; } unw.gate_table_size = size; lp = unw.gate_table; info = (char *) unw.gate_table + size; for (entry = start; entry < end; ++entry, lp += 3) { info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset)); info -= info_size; memcpy(info, (char *) segbase + entry->info_offset, info_size); lp[0] = segbase + entry->start_offset; /* start */ lp[1] = segbase + entry->end_offset; /* end */ lp[2] = info - (char *) unw.gate_table; /* info */ } *lp = 0; /* end-of-table marker */ return 0; } __initcall(create_gate_table); void __init unw_init (void) { extern char __gp[]; extern void unw_hash_index_t_is_too_narrow (void); long i, off; if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE) unw_hash_index_t_is_too_narrow(); unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT); unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE); unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS); unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0); unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT); unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR); unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC); unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR); for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8) unw.sw_off[unw.preg_index[i]] = off; for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8) unw.sw_off[unw.preg_index[i]] = off; for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16) unw.sw_off[unw.preg_index[i]] = off; for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16) unw.sw_off[unw.preg_index[i]] = off; for (i = 0; i < UNW_CACHE_SIZE; ++i) { if (i > 0) unw.cache[i].lru_chain = (i - 1); unw.cache[i].coll_chain = -1; rwlock_init(&unw.cache[i].lock); } unw.lru_head = UNW_CACHE_SIZE - 1; unw.lru_tail = 0; init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp, __start_unwind, __end_unwind); } /* * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED * * This system call has been deprecated. The new and improved way to get * at the kernel's unwind info is via the gate DSO. The address of the * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR. * * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED * * This system call copies the unwind data into the buffer pointed to by BUF and returns * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data * or if BUF is NULL, nothing is copied, but the system call still returns the size of the * unwind data. * * The first portion of the unwind data contains an unwind table and rest contains the * associated unwind info (in no particular order). The unwind table consists of a table * of entries of the form: * * u64 start; (64-bit address of start of function) * u64 end; (64-bit address of start of function) * u64 info; (BUF-relative offset to unwind info) * * The end of the unwind table is indicated by an entry with a START address of zero. * * Please see the IA-64 Software Conventions and Runtime Architecture manual for details * on the format of the unwind info. * * ERRORS * EFAULT BUF points outside your accessible address space. */ asmlinkage long sys_getunwind (void __user *buf, size_t buf_size) { if (buf && buf_size >= unw.gate_table_size) if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0) return -EFAULT; return unw.gate_table_size; }
linux-master
arch/ia64/kernel/unwind.c
// SPDX-License-Identifier: GPL-2.0 /* * Architecture-specific setup. * * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> * Stephane Eranian <[email protected]> * Copyright (C) 2000, 2004 Intel Corp * Rohit Seth <[email protected]> * Suresh Siddha <[email protected]> * Gordon Jin <[email protected]> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <[email protected]> * * 12/26/04 S.Siddha, G.Jin, R.Seth * Add multi-threading and multi-core detection * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map * 03/31/00 R.Seth cpu_initialized and current->processor fixes * 02/04/00 D.Mosberger some more get_cpuinfo fixes... * 02/01/00 R.Seth fixed get_cpuinfo for SMP * 01/07/99 S.Eranian added the support for command line argument * 06/24/99 W.Drummond added boot_cpu_data. * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" */ #include <linux/module.h> #include <linux/init.h> #include <linux/pgtable.h> #include <linux/acpi.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/cpu.h> #include <linux/kdev_t.h> #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/reboot.h> #include <linux/sched/mm.h> #include <linux/sched/clock.h> #include <linux/sched/task_stack.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/threads.h> #include <linux/screen_info.h> #include <linux/dmi.h> #include <linux/root_dev.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/efi.h> #include <linux/initrd.h> #include <linux/pm.h> #include <linux/cpufreq.h> #include <linux/kexec.h> #include <linux/crash_dump.h> #include <asm/mca.h> #include <asm/meminit.h> #include <asm/page.h> #include <asm/patch.h> #include <asm/processor.h> #include <asm/sal.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp.h> #include <asm/tlbflush.h> #include <asm/unistd.h> #include <asm/uv/uv.h> #include <asm/xtp.h> #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) # error "struct cpuinfo_ia64 too big!" #endif char ia64_platform_name[64]; #ifdef CONFIG_SMP unsigned long __per_cpu_offset[NR_CPUS]; EXPORT_SYMBOL(__per_cpu_offset); #endif DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); EXPORT_SYMBOL(ia64_cpu_info); DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); #ifdef CONFIG_SMP EXPORT_SYMBOL(local_per_cpu_offset); #endif unsigned long ia64_cycles_per_usec; struct ia64_boot_param *ia64_boot_param; struct screen_info screen_info; unsigned long vga_console_iobase; unsigned long vga_console_membase; static struct resource data_resource = { .name = "Kernel data", .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM }; static struct resource code_resource = { .name = "Kernel code", .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM }; static struct resource bss_resource = { .name = "Kernel bss", .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM }; unsigned long ia64_max_cacheline_size; unsigned long ia64_iobase; /* virtual address for I/O accesses */ EXPORT_SYMBOL(ia64_iobase); struct io_space io_space[MAX_IO_SPACES]; EXPORT_SYMBOL(io_space); unsigned int num_io_spaces; /* * "flush_icache_range()" needs to know what processor dependent stride size to use * when it makes i-cache(s) coherent with d-caches. */ #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ unsigned long ia64_i_cache_stride_shift = ~0; /* * "clflush_cache_range()" needs to know what processor dependent stride size to * use when it flushes cache lines including both d-cache and i-cache. */ /* Safest way to go: 32 bytes by 32 bytes */ #define CACHE_STRIDE_SHIFT 5 unsigned long ia64_cache_stride_shift = ~0; /* * We use a special marker for the end of memory and it uses the extra (+1) slot */ struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; static int num_rsvd_regions __initdata; /* * Filter incoming memory segments based on the primitive map created from the boot * parameters. Segments contained in the map are removed from the memory ranges. A * caller-specified function is called with the memory ranges that remain after filtering. * This routine does not assume the incoming segments are sorted. */ int __init filter_rsvd_memory (u64 start, u64 end, void *arg) { u64 range_start, range_end, prev_start; void (*func)(unsigned long, unsigned long, int); int i; #if IGNORE_PFN0 if (start == PAGE_OFFSET) { printk(KERN_WARNING "warning: skipping physical page 0\n"); start += PAGE_SIZE; if (start >= end) return 0; } #endif /* * lowest possible address(walker uses virtual) */ prev_start = PAGE_OFFSET; func = arg; for (i = 0; i < num_rsvd_regions; ++i) { range_start = max(start, prev_start); range_end = min(end, rsvd_region[i].start); if (range_start < range_end) call_pernode_memory(__pa(range_start), range_end - range_start, func); /* nothing more available in this segment */ if (range_end == end) return 0; prev_start = rsvd_region[i].end; } /* end of memory marker allows full processing inside loop body */ return 0; } /* * Similar to "filter_rsvd_memory()", but the reserved memory ranges * are not filtered out. */ int __init filter_memory(u64 start, u64 end, void *arg) { void (*func)(unsigned long, unsigned long, int); #if IGNORE_PFN0 if (start == PAGE_OFFSET) { printk(KERN_WARNING "warning: skipping physical page 0\n"); start += PAGE_SIZE; if (start >= end) return 0; } #endif func = arg; if (start < end) call_pernode_memory(__pa(start), end - start, func); return 0; } static void __init sort_regions (struct rsvd_region *rsvd_region, int max) { int j; /* simple bubble sorting */ while (max--) { for (j = 0; j < max; ++j) { if (rsvd_region[j].start > rsvd_region[j+1].start) { swap(rsvd_region[j], rsvd_region[j + 1]); } } } } /* merge overlaps */ static int __init merge_regions (struct rsvd_region *rsvd_region, int max) { int i; for (i = 1; i < max; ++i) { if (rsvd_region[i].start >= rsvd_region[i-1].end) continue; if (rsvd_region[i].end > rsvd_region[i-1].end) rsvd_region[i-1].end = rsvd_region[i].end; --max; memmove(&rsvd_region[i], &rsvd_region[i+1], (max - i) * sizeof(struct rsvd_region)); } return max; } /* * Request address space for all standard resources */ static int __init register_memory(void) { code_resource.start = ia64_tpa(_text); code_resource.end = ia64_tpa(_etext) - 1; data_resource.start = ia64_tpa(_etext); data_resource.end = ia64_tpa(_edata) - 1; bss_resource.start = ia64_tpa(__bss_start); bss_resource.end = ia64_tpa(_end) - 1; efi_initialize_iomem_resources(&code_resource, &data_resource, &bss_resource); return 0; } __initcall(register_memory); #ifdef CONFIG_KEXEC /* * This function checks if the reserved crashkernel is allowed on the specific * IA64 machine flavour. Machines without an IO TLB use swiotlb and require * some memory below 4 GB (i.e. in 32 bit area), see the implementation of * kernel/dma/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that * in kdump case. See the comment in sba_init() in sba_iommu.c. * * So, the only machvec that really supports loading the kdump kernel * over 4 GB is "uv". */ static int __init check_crashkernel_memory(unsigned long pbase, size_t size) { if (is_uv_system()) return 1; else return pbase < (1UL << 32); } static void __init setup_crashkernel(unsigned long total, int *n) { unsigned long long base = 0, size = 0; int ret; ret = parse_crashkernel(boot_command_line, total, &size, &base); if (ret == 0 && size > 0) { if (!base) { sort_regions(rsvd_region, *n); *n = merge_regions(rsvd_region, *n); base = kdump_find_rsvd_region(size, rsvd_region, *n); } if (!check_crashkernel_memory(base, size)) { pr_warn("crashkernel: There would be kdump memory " "at %ld GB but this is unusable because it " "must\nbe below 4 GB. Change the memory " "configuration of the machine.\n", (unsigned long)(base >> 30)); return; } if (base != ~0UL) { printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(size >> 20), (unsigned long)(base >> 20), (unsigned long)(total >> 20)); rsvd_region[*n].start = (unsigned long)__va(base); rsvd_region[*n].end = (unsigned long)__va(base + size); (*n)++; crashk_res.start = base; crashk_res.end = base + size - 1; } } efi_memmap_res.start = ia64_boot_param->efi_memmap; efi_memmap_res.end = efi_memmap_res.start + ia64_boot_param->efi_memmap_size; boot_param_res.start = __pa(ia64_boot_param); boot_param_res.end = boot_param_res.start + sizeof(*ia64_boot_param); } #else static inline void __init setup_crashkernel(unsigned long total, int *n) {} #endif #ifdef CONFIG_CRASH_DUMP static int __init reserve_elfcorehdr(u64 *start, u64 *end) { u64 length; /* We get the address using the kernel command line, * but the size is extracted from the EFI tables. * Both address and size are required for reservation * to work properly. */ if (!is_vmcore_usable()) return -EINVAL; if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { vmcore_unusable(); return -EINVAL; } *start = (unsigned long)__va(elfcorehdr_addr); *end = *start + length; return 0; } #endif /* CONFIG_CRASH_DUMP */ /** * reserve_memory - setup reserved memory areas * * Setup the reserved memory areas set aside for the boot parameters, * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, * see arch/ia64/include/asm/meminit.h if you need to define more. */ void __init reserve_memory (void) { int n = 0; unsigned long total_memory; /* * none of the entries in this table overlap */ rsvd_region[n].start = (unsigned long) ia64_boot_param; rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); n++; rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; n++; rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); rsvd_region[n].end = (rsvd_region[n].start + strlen(__va(ia64_boot_param->command_line)) + 1); n++; rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); rsvd_region[n].end = (unsigned long) ia64_imva(_end); n++; #ifdef CONFIG_BLK_DEV_INITRD if (ia64_boot_param->initrd_start) { rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; n++; } #endif #ifdef CONFIG_CRASH_DUMP if (reserve_elfcorehdr(&rsvd_region[n].start, &rsvd_region[n].end) == 0) n++; #endif total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); n++; setup_crashkernel(total_memory, &n); /* end of memory marker */ rsvd_region[n].start = ~0UL; rsvd_region[n].end = ~0UL; n++; num_rsvd_regions = n; BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); sort_regions(rsvd_region, num_rsvd_regions); num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions); /* reserve all regions except the end of memory marker with memblock */ for (n = 0; n < num_rsvd_regions - 1; n++) { struct rsvd_region *region = &rsvd_region[n]; phys_addr_t addr = __pa(region->start); phys_addr_t size = region->end - region->start; memblock_reserve(addr, size); } } /** * find_initrd - get initrd parameters from the boot parameter structure * * Grab the initrd start and end from the boot parameter struct given us by * the boot loader. */ void __init find_initrd (void) { #ifdef CONFIG_BLK_DEV_INITRD if (ia64_boot_param->initrd_start) { initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); initrd_end = initrd_start+ia64_boot_param->initrd_size; printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n", initrd_start, ia64_boot_param->initrd_size); } #endif } static void __init io_port_init (void) { unsigned long phys_iobase; /* * Set `iobase' based on the EFI memory map or, failing that, the * value firmware left in ar.k0. * * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute * the port's virtual address, so ia32_load_state() loads it with a * user virtual address. But in ia64 mode, glibc uses the * *physical* address in ar.k0 to mmap the appropriate area from * /dev/mem, and the inX()/outX() interfaces use MMIO. In both * cases, user-mode can only use the legacy 0-64K I/O port space. * * ar.k0 is not involved in kernel I/O port accesses, which can use * any of the I/O port spaces and are done via MMIO using the * virtual mmio_base from the appropriate io_space[]. */ phys_iobase = efi_get_iobase(); if (!phys_iobase) { phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); printk(KERN_INFO "No I/O port range found in EFI memory map, " "falling back to AR.KR0 (0x%lx)\n", phys_iobase); } ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); /* setup legacy IO port space */ io_space[0].mmio_base = ia64_iobase; io_space[0].sparse = 1; num_io_spaces = 1; } /** * early_console_setup - setup debugging console * * Consoles started here require little enough setup that we can start using * them very early in the boot process, either right after the machine * vector initialization, or even before if the drivers can detect their hw. * * Returns non-zero if a console couldn't be setup. */ static inline int __init early_console_setup (char *cmdline) { #ifdef CONFIG_EFI_PCDP if (!efi_setup_pcdp_console(cmdline)) return 0; #endif return -1; } static void __init screen_info_setup(void) { unsigned int orig_x, orig_y, num_cols, num_rows, font_height; memset(&screen_info, 0, sizeof(screen_info)); if (!ia64_boot_param->console_info.num_rows || !ia64_boot_param->console_info.num_cols) { printk(KERN_WARNING "invalid screen-info, guessing 80x25\n"); orig_x = 0; orig_y = 0; num_cols = 80; num_rows = 25; font_height = 16; } else { orig_x = ia64_boot_param->console_info.orig_x; orig_y = ia64_boot_param->console_info.orig_y; num_cols = ia64_boot_param->console_info.num_cols; num_rows = ia64_boot_param->console_info.num_rows; font_height = 400 / num_rows; } screen_info.orig_x = orig_x; screen_info.orig_y = orig_y; screen_info.orig_video_cols = num_cols; screen_info.orig_video_lines = num_rows; screen_info.orig_video_points = font_height; screen_info.orig_video_mode = 3; /* XXX fake */ screen_info.orig_video_isVGA = 1; /* XXX fake */ screen_info.orig_video_ega_bx = 3; /* XXX fake */ } static inline void mark_bsp_online (void) { #ifdef CONFIG_SMP /* If we register an early console, allow CPU 0 to printk */ set_cpu_online(smp_processor_id(), true); #endif } static __initdata int nomca; static __init int setup_nomca(char *s) { nomca = 1; return 0; } early_param("nomca", setup_nomca); void __init setup_arch (char **cmdline_p) { unw_init(); ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); *cmdline_p = __va(ia64_boot_param->command_line); strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); efi_init(); io_port_init(); uv_probe_system_type(); parse_early_param(); if (early_console_setup(*cmdline_p) == 0) mark_bsp_online(); /* Initialize the ACPI boot-time table parser */ acpi_table_init(); early_acpi_boot_init(); #ifdef CONFIG_ACPI_NUMA acpi_numa_init(); acpi_numa_fixup(); #ifdef CONFIG_ACPI_HOTPLUG_CPU prefill_possible_map(); #endif per_cpu_scan_finalize((cpumask_empty(&early_cpu_possible_map) ? 32 : cpumask_weight(&early_cpu_possible_map)), additional_cpus > 0 ? additional_cpus : 0); #endif /* CONFIG_ACPI_NUMA */ #ifdef CONFIG_SMP smp_build_cpu_map(); #endif find_memory(); /* process SAL system table: */ ia64_sal_init(__va(sal_systab_phys)); #ifdef CONFIG_ITANIUM ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); #else { unsigned long num_phys_stacked; if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); } #endif #ifdef CONFIG_SMP cpu_physical_id(0) = hard_smp_processor_id(); #endif cpu_init(); /* initialize the bootstrap CPU */ mmu_context_init(); /* initialize context_id bitmap */ #ifdef CONFIG_VT if (!conswitchp) { # if defined(CONFIG_VGA_CONSOLE) /* * Non-legacy systems may route legacy VGA MMIO range to system * memory. vga_con probes the MMIO hole, so memory looks like * a VGA device to it. The EFI memory map can tell us if it's * memory so we can avoid this problem. */ if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) conswitchp = &vga_con; # endif } #endif /* enable IA-64 Machine Check Abort Handling unless disabled */ if (!nomca) ia64_mca_init(); /* * Default to /dev/sda2. This assumes that the EFI partition * is physical disk 1 partition 1 and the Linux root disk is * physical disk 1 partition 2. */ ROOT_DEV = MKDEV(SCSI_DISK0_MAJOR, 2); if (is_uv_system()) uv_setup(cmdline_p); #ifdef CONFIG_SMP else init_smp_config(); #endif screen_info_setup(); paging_init(); clear_sched_clock_stable(); } /* * Display cpu info for all CPUs. */ static int show_cpuinfo (struct seq_file *m, void *v) { #ifdef CONFIG_SMP # define lpj c->loops_per_jiffy # define cpunum c->cpu #else # define lpj loops_per_jiffy # define cpunum 0 #endif static struct { unsigned long mask; const char *feature_name; } feature_bits[] = { { 1UL << 0, "branchlong" }, { 1UL << 1, "spontaneous deferral"}, { 1UL << 2, "16-byte atomic ops" } }; char features[128], *cp, *sep; struct cpuinfo_ia64 *c = v; unsigned long mask; unsigned long proc_freq; int i, size; mask = c->features; /* build the feature string: */ memcpy(features, "standard", 9); cp = features; size = sizeof(features); sep = ""; for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) { if (mask & feature_bits[i].mask) { cp += snprintf(cp, size, "%s%s", sep, feature_bits[i].feature_name), sep = ", "; mask &= ~feature_bits[i].mask; size = sizeof(features) - (cp - features); } } if (mask && size > 1) { /* print unknown features as a hex value */ snprintf(cp, size, "%s0x%lx", sep, mask); } proc_freq = cpufreq_quick_get(cpunum); if (!proc_freq) proc_freq = c->proc_freq / 1000; seq_printf(m, "processor : %d\n" "vendor : %s\n" "arch : IA-64\n" "family : %u\n" "model : %u\n" "model name : %s\n" "revision : %u\n" "archrev : %u\n" "features : %s\n" "cpu number : %lu\n" "cpu regs : %u\n" "cpu MHz : %lu.%03lu\n" "itc MHz : %lu.%06lu\n" "BogoMIPS : %lu.%02lu\n", cpunum, c->vendor, c->family, c->model, c->model_name, c->revision, c->archrev, features, c->ppn, c->number, proc_freq / 1000, proc_freq % 1000, c->itc_freq / 1000000, c->itc_freq % 1000000, lpj*HZ/500000, (lpj*HZ/5000) % 100); #ifdef CONFIG_SMP seq_printf(m, "siblings : %u\n", cpumask_weight(&cpu_core_map[cpunum])); if (c->socket_id != -1) seq_printf(m, "physical id: %u\n", c->socket_id); if (c->threads_per_core > 1 || c->cores_per_socket > 1) seq_printf(m, "core id : %u\n" "thread id : %u\n", c->core_id, c->thread_id); #endif seq_printf(m,"\n"); return 0; } static void * c_start (struct seq_file *m, loff_t *pos) { #ifdef CONFIG_SMP while (*pos < nr_cpu_ids && !cpu_online(*pos)) ++*pos; #endif return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; } static void * c_next (struct seq_file *m, void *v, loff_t *pos) { ++*pos; return c_start(m, pos); } static void c_stop (struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo }; #define MAX_BRANDS 8 static char brandname[MAX_BRANDS][128]; static char * get_model_name(__u8 family, __u8 model) { static int overflow; char brand[128]; int i; memcpy(brand, "Unknown", 8); if (ia64_pal_get_brand_info(brand)) { if (family == 0x7) memcpy(brand, "Merced", 7); else if (family == 0x1f) switch (model) { case 0: memcpy(brand, "McKinley", 9); break; case 1: memcpy(brand, "Madison", 8); break; case 2: memcpy(brand, "Madison up to 9M cache", 23); break; } } for (i = 0; i < MAX_BRANDS; i++) if (strcmp(brandname[i], brand) == 0) return brandname[i]; for (i = 0; i < MAX_BRANDS; i++) if (brandname[i][0] == '\0') return strcpy(brandname[i], brand); if (overflow++ == 0) printk(KERN_ERR "%s: Table overflow. Some processor model information will be missing\n", __func__); return "Unknown"; } static void identify_cpu (struct cpuinfo_ia64 *c) { union { unsigned long bits[5]; struct { /* id 0 & 1: */ char vendor[16]; /* id 2 */ u64 ppn; /* processor serial number */ /* id 3: */ unsigned number : 8; unsigned revision : 8; unsigned model : 8; unsigned family : 8; unsigned archrev : 8; unsigned reserved : 24; /* id 4: */ u64 features; } field; } cpuid; pal_vm_info_1_u_t vm1; pal_vm_info_2_u_t vm2; pal_status_t status; unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ int i; for (i = 0; i < 5; ++i) cpuid.bits[i] = ia64_get_cpuid(i); memcpy(c->vendor, cpuid.field.vendor, 16); #ifdef CONFIG_SMP c->cpu = smp_processor_id(); /* below default values will be overwritten by identify_siblings() * for Multi-Threading/Multi-Core capable CPUs */ c->threads_per_core = c->cores_per_socket = c->num_log = 1; c->socket_id = -1; identify_siblings(c); if (c->threads_per_core > smp_num_siblings) smp_num_siblings = c->threads_per_core; #endif c->ppn = cpuid.field.ppn; c->number = cpuid.field.number; c->revision = cpuid.field.revision; c->model = cpuid.field.model; c->family = cpuid.field.family; c->archrev = cpuid.field.archrev; c->features = cpuid.field.features; c->model_name = get_model_name(c->family, c->model); status = ia64_pal_vm_summary(&vm1, &vm2); if (status == PAL_STATUS_SUCCESS) { impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; } c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); } /* * Do the following calculations: * * 1. the max. cache line size. * 2. the minimum of the i-cache stride sizes for "flush_icache_range()". * 3. the minimum of the cache stride sizes for "clflush_cache_range()". */ static void get_cache_info(void) { unsigned long line_size, max = 1; unsigned long l, levels, unique_caches; pal_cache_config_info_t cci; long status; status = ia64_pal_cache_summary(&levels, &unique_caches); if (status != 0) { printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", __func__, status); max = SMP_CACHE_BYTES; /* Safest setup for "flush_icache_range()" */ ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; /* Safest setup for "clflush_cache_range()" */ ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; goto out; } for (l = 0; l < levels; ++l) { /* cache_type (data_or_unified)=2 */ status = ia64_pal_cache_config_info(l, 2, &cci); if (status != 0) { printk(KERN_ERR "%s: ia64_pal_cache_config_info" "(l=%lu, 2) failed (status=%ld)\n", __func__, l, status); max = SMP_CACHE_BYTES; /* The safest setup for "flush_icache_range()" */ cci.pcci_stride = I_CACHE_STRIDE_SHIFT; /* The safest setup for "clflush_cache_range()" */ ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; cci.pcci_unified = 1; } else { if (cci.pcci_stride < ia64_cache_stride_shift) ia64_cache_stride_shift = cci.pcci_stride; line_size = 1 << cci.pcci_line_size; if (line_size > max) max = line_size; } if (!cci.pcci_unified) { /* cache_type (instruction)=1*/ status = ia64_pal_cache_config_info(l, 1, &cci); if (status != 0) { printk(KERN_ERR "%s: ia64_pal_cache_config_info" "(l=%lu, 1) failed (status=%ld)\n", __func__, l, status); /* The safest setup for flush_icache_range() */ cci.pcci_stride = I_CACHE_STRIDE_SHIFT; } } if (cci.pcci_stride < ia64_i_cache_stride_shift) ia64_i_cache_stride_shift = cci.pcci_stride; } out: if (max > ia64_max_cacheline_size) ia64_max_cacheline_size = max; } /* * cpu_init() initializes state that is per-CPU. This function acts * as a 'CPU state barrier', nothing should get across. */ void cpu_init (void) { extern void ia64_mmu_init(void *); static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; unsigned long num_phys_stacked; pal_vm_info_2_u_t vmi; unsigned int max_ctx; struct cpuinfo_ia64 *cpu_info; void *cpu_data; cpu_data = per_cpu_init(); #ifdef CONFIG_SMP /* * insert boot cpu into sibling and core mapes * (must be done after per_cpu area is setup) */ if (smp_processor_id() == 0) { cpumask_set_cpu(0, &per_cpu(cpu_sibling_map, 0)); cpumask_set_cpu(0, &cpu_core_map[0]); } else { /* * Set ar.k3 so that assembly code in MCA handler can compute * physical addresses of per cpu variables with a simple: * phys = ar.k3 + &per_cpu_var * and the alt-dtlb-miss handler can set per-cpu mapping into * the TLB when needed. head.S already did this for cpu0. */ ia64_set_kr(IA64_KR_PER_CPU_DATA, ia64_tpa(cpu_data) - (long) __per_cpu_start); } #endif get_cache_info(); /* * We can't pass "local_cpu_data" to identify_cpu() because we haven't called * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it * depends on the data returned by identify_cpu(). We break the dependency by * accessing cpu_data() through the canonical per-CPU address. */ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start); identify_cpu(cpu_info); #ifdef CONFIG_MCKINLEY { # define FEATURE_SET 16 struct ia64_pal_retval iprv; if (cpu_info->family == 0x1f) { PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, (iprv.v1 | 0x80), FEATURE_SET, 0); } } #endif /* Clear the stack memory reserved for pt_regs: */ memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); ia64_set_kr(IA64_KR_FPU_OWNER, 0); /* * Initialize the page-table base register to a global * directory with all zeroes. This ensure that we can handle * TLB-misses to user address-space even before we created the * first user address-space. This may happen, e.g., due to * aggressive use of lfetch.fault. */ ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); /* * Initialize default control register to defer speculative faults except * for those arising from TLB misses, which are not deferred. The * kernel MUST NOT depend on a particular setting of these bits (in other words, * the kernel must have recovery code for all speculative accesses). Turn on * dcr.lc as per recommendation by the architecture team. Most IA-32 apps * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * be fine). */ ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); mmgrab(&init_mm); current->active_mm = &init_mm; BUG_ON(current->mm); ia64_mmu_init(ia64_imva(cpu_data)); ia64_mca_cpu_init(ia64_imva(cpu_data)); /* Clear ITC to eliminate sched_clock() overflows in human time. */ ia64_set_itc(0); /* disable all local interrupt sources: */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* clear TPR & XTP to enable all interrupt classes: */ ia64_setreg(_IA64_REG_CR_TPR, 0); /* Clear any pending interrupts left by SAL/EFI */ while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); #ifdef CONFIG_SMP normal_xtp(); #endif /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ if (ia64_pal_vm_summary(NULL, &vmi) == 0) { max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL); } else { printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); max_ctx = (1U << 15) - 1; /* use architected minimum */ } while (max_ctx < ia64_ctx.max_ctx) { unsigned int old = ia64_ctx.max_ctx; if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) break; } if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " "stacked regs\n"); num_phys_stacked = 96; } /* size of physical stacked register partition plus 8 bytes: */ if (num_phys_stacked > max_num_phys_stacked) { ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); max_num_phys_stacked = num_phys_stacked; } } void __init arch_cpu_finalize_init(void) { ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, (unsigned long) __end___mckinley_e9_bundles); } static int __init run_dmi_scan(void) { dmi_setup(); return 0; } core_initcall(run_dmi_scan);
linux-master
arch/ia64/kernel/setup.c
// SPDX-License-Identifier: GPL-2.0 /* * Architecture-specific unaligned trap handling. * * Copyright (C) 1999-2002, 2004 Hewlett-Packard Co * Stephane Eranian <[email protected]> * David Mosberger-Tang <[email protected]> * * 2002/12/09 Fix rotating register handling (off-by-1 error, missing fr-rotation). Fix * get_rse_reg() to not leak kernel bits to user-level (reading an out-of-frame * stacked register returns an undefined value; it does NOT trigger a * "rsvd register fault"). * 2001/10/11 Fix unaligned access to rotating registers in s/w pipelined loops. * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes. * 2001/01/17 Add support emulation of unaligned kernel accesses. */ #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/tty.h> #include <linux/extable.h> #include <linux/ratelimit.h> #include <linux/uaccess.h> #include <asm/intrinsics.h> #include <asm/processor.h> #include <asm/rse.h> #include <asm/exception.h> #include <asm/unaligned.h> extern int die_if_kernel(char *str, struct pt_regs *regs, long err); #undef DEBUG_UNALIGNED_TRAP #ifdef DEBUG_UNALIGNED_TRAP # define DPRINT(a...) do { printk("%s %u: ", __func__, __LINE__); printk (a); } while (0) # define DDUMP(str,vp,len) dump(str, vp, len) static void dump (const char *str, void *vp, size_t len) { unsigned char *cp = vp; int i; printk("%s", str); for (i = 0; i < len; ++i) printk (" %02x", *cp++); printk("\n"); } #else # define DPRINT(a...) # define DDUMP(str,vp,len) #endif #define IA64_FIRST_STACKED_GR 32 #define IA64_FIRST_ROTATING_FR 32 #define SIGN_EXT9 0xffffffffffffff00ul /* * sysctl settable hook which tells the kernel whether to honor the * IA64_THREAD_UAC_NOPRINT prctl. Because this is user settable, we want * to allow the super user to enable/disable this for security reasons * (i.e. don't allow attacker to fill up logs with unaligned accesses). */ int no_unaligned_warning; int unaligned_dump_stack; /* * For M-unit: * * opcode | m | x6 | * --------|------|---------| * [40-37] | [36] | [35:30] | * --------|------|---------| * 4 | 1 | 6 | = 11 bits * -------------------------- * However bits [31:30] are not directly useful to distinguish between * load/store so we can use [35:32] instead, which gives the following * mask ([40:32]) using 9 bits. The 'e' comes from the fact that we defer * checking the m-bit until later in the load/store emulation. */ #define IA64_OPCODE_MASK 0x1ef #define IA64_OPCODE_SHIFT 32 /* * Table C-28 Integer Load/Store * * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF * * ld8.fill, st8.fill MUST be aligned because the RNATs are based on * the address (bits [8:3]), so we must failed. */ #define LD_OP 0x080 #define LDS_OP 0x081 #define LDA_OP 0x082 #define LDSA_OP 0x083 #define LDBIAS_OP 0x084 #define LDACQ_OP 0x085 /* 0x086, 0x087 are not relevant */ #define LDCCLR_OP 0x088 #define LDCNC_OP 0x089 #define LDCCLRACQ_OP 0x08a #define ST_OP 0x08c #define STREL_OP 0x08d /* 0x08e,0x8f are not relevant */ /* * Table C-29 Integer Load +Reg * * we use the ld->m (bit [36:36]) field to determine whether or not we have * a load/store of this form. */ /* * Table C-30 Integer Load/Store +Imm * * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF * * ld8.fill, st8.fill must be aligned because the Nat register are based on * the address, so we must fail and the program must be fixed. */ #define LD_IMM_OP 0x0a0 #define LDS_IMM_OP 0x0a1 #define LDA_IMM_OP 0x0a2 #define LDSA_IMM_OP 0x0a3 #define LDBIAS_IMM_OP 0x0a4 #define LDACQ_IMM_OP 0x0a5 /* 0x0a6, 0xa7 are not relevant */ #define LDCCLR_IMM_OP 0x0a8 #define LDCNC_IMM_OP 0x0a9 #define LDCCLRACQ_IMM_OP 0x0aa #define ST_IMM_OP 0x0ac #define STREL_IMM_OP 0x0ad /* 0x0ae,0xaf are not relevant */ /* * Table C-32 Floating-point Load/Store */ #define LDF_OP 0x0c0 #define LDFS_OP 0x0c1 #define LDFA_OP 0x0c2 #define LDFSA_OP 0x0c3 /* 0x0c6 is irrelevant */ #define LDFCCLR_OP 0x0c8 #define LDFCNC_OP 0x0c9 /* 0x0cb is irrelevant */ #define STF_OP 0x0cc /* * Table C-33 Floating-point Load +Reg * * we use the ld->m (bit [36:36]) field to determine whether or not we have * a load/store of this form. */ /* * Table C-34 Floating-point Load/Store +Imm */ #define LDF_IMM_OP 0x0e0 #define LDFS_IMM_OP 0x0e1 #define LDFA_IMM_OP 0x0e2 #define LDFSA_IMM_OP 0x0e3 /* 0x0e6 is irrelevant */ #define LDFCCLR_IMM_OP 0x0e8 #define LDFCNC_IMM_OP 0x0e9 #define STF_IMM_OP 0x0ec typedef struct { unsigned long qp:6; /* [0:5] */ unsigned long r1:7; /* [6:12] */ unsigned long imm:7; /* [13:19] */ unsigned long r3:7; /* [20:26] */ unsigned long x:1; /* [27:27] */ unsigned long hint:2; /* [28:29] */ unsigned long x6_sz:2; /* [30:31] */ unsigned long x6_op:4; /* [32:35], x6 = x6_sz|x6_op */ unsigned long m:1; /* [36:36] */ unsigned long op:4; /* [37:40] */ unsigned long pad:23; /* [41:63] */ } load_store_t; typedef enum { UPD_IMMEDIATE, /* ldXZ r1=[r3],imm(9) */ UPD_REG /* ldXZ r1=[r3],r2 */ } update_t; /* * We use tables to keep track of the offsets of registers in the saved state. * This way we save having big switch/case statements. * * We use bit 0 to indicate switch_stack or pt_regs. * The offset is simply shifted by 1 bit. * A 2-byte value should be enough to hold any kind of offset * * In case the calling convention changes (and thus pt_regs/switch_stack) * simply use RSW instead of RPT or vice-versa. */ #define RPO(x) ((size_t) &((struct pt_regs *)0)->x) #define RSO(x) ((size_t) &((struct switch_stack *)0)->x) #define RPT(x) (RPO(x) << 1) #define RSW(x) (1| RSO(x)<<1) #define GR_OFFS(x) (gr_info[x]>>1) #define GR_IN_SW(x) (gr_info[x] & 0x1) #define FR_OFFS(x) (fr_info[x]>>1) #define FR_IN_SW(x) (fr_info[x] & 0x1) static u16 gr_info[32]={ 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */ RPT(r1), RPT(r2), RPT(r3), RSW(r4), RSW(r5), RSW(r6), RSW(r7), RPT(r8), RPT(r9), RPT(r10), RPT(r11), RPT(r12), RPT(r13), RPT(r14), RPT(r15), RPT(r16), RPT(r17), RPT(r18), RPT(r19), RPT(r20), RPT(r21), RPT(r22), RPT(r23), RPT(r24), RPT(r25), RPT(r26), RPT(r27), RPT(r28), RPT(r29), RPT(r30), RPT(r31) }; static u16 fr_info[32]={ 0, /* constant : WE SHOULD NEVER GET THIS */ 0, /* constant : WE SHOULD NEVER GET THIS */ RSW(f2), RSW(f3), RSW(f4), RSW(f5), RPT(f6), RPT(f7), RPT(f8), RPT(f9), RPT(f10), RPT(f11), RSW(f12), RSW(f13), RSW(f14), RSW(f15), RSW(f16), RSW(f17), RSW(f18), RSW(f19), RSW(f20), RSW(f21), RSW(f22), RSW(f23), RSW(f24), RSW(f25), RSW(f26), RSW(f27), RSW(f28), RSW(f29), RSW(f30), RSW(f31) }; /* Invalidate ALAT entry for integer register REGNO. */ static void invala_gr (int regno) { # define F(reg) case reg: ia64_invala_gr(reg); break switch (regno) { F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7); F( 8); F( 9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15); F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23); F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31); F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39); F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47); F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55); F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63); F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71); F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79); F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87); F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95); F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103); F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111); F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119); F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127); } # undef F } /* Invalidate ALAT entry for floating-point register REGNO. */ static void invala_fr (int regno) { # define F(reg) case reg: ia64_invala_fr(reg); break switch (regno) { F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7); F( 8); F( 9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15); F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23); F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31); F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39); F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47); F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55); F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63); F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71); F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79); F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87); F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95); F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103); F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111); F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119); F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127); } # undef F } static inline unsigned long rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg) { reg += rrb; if (reg >= sor) reg -= sor; return reg; } static void set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat) { struct switch_stack *sw = (struct switch_stack *) regs - 1; unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end; unsigned long *kbs = (void *) current + IA64_RBS_OFFSET; unsigned long rnats, nat_mask; unsigned long on_kbs; long sof = (regs->cr_ifs) & 0x7f; long sor = 8 * ((regs->cr_ifs >> 14) & 0xf); long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; long ridx = r1 - 32; if (ridx >= sof) { /* this should never happen, as the "rsvd register fault" has higher priority */ DPRINT("ignoring write to r%lu; only %lu registers are allocated!\n", r1, sof); return; } if (ridx < sor) ridx = rotate_reg(sor, rrb_gr, ridx); DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n", r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx); on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore); addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx); if (addr >= kbs) { /* the register is on the kernel backing store: easy... */ rnat_addr = ia64_rse_rnat_addr(addr); if ((unsigned long) rnat_addr >= sw->ar_bspstore) rnat_addr = &sw->ar_rnat; nat_mask = 1UL << ia64_rse_slot_num(addr); *addr = val; if (nat) *rnat_addr |= nat_mask; else *rnat_addr &= ~nat_mask; return; } if (!user_stack(current, regs)) { DPRINT("ignoring kernel write to r%lu; register isn't on the kernel RBS!", r1); return; } bspstore = (unsigned long *)regs->ar_bspstore; ubs_end = ia64_rse_skip_regs(bspstore, on_kbs); bsp = ia64_rse_skip_regs(ubs_end, -sof); addr = ia64_rse_skip_regs(bsp, ridx); DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr); ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val); rnat_addr = ia64_rse_rnat_addr(addr); ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats); DPRINT("rnat @%p = 0x%lx nat=%d old nat=%ld\n", (void *) rnat_addr, rnats, nat, (rnats >> ia64_rse_slot_num(addr)) & 1); nat_mask = 1UL << ia64_rse_slot_num(addr); if (nat) rnats |= nat_mask; else rnats &= ~nat_mask; ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, rnats); DPRINT("rnat changed to @%p = 0x%lx\n", (void *) rnat_addr, rnats); } static void get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int *nat) { struct switch_stack *sw = (struct switch_stack *) regs - 1; unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore; unsigned long *kbs = (void *) current + IA64_RBS_OFFSET; unsigned long rnats, nat_mask; unsigned long on_kbs; long sof = (regs->cr_ifs) & 0x7f; long sor = 8 * ((regs->cr_ifs >> 14) & 0xf); long rrb_gr = (regs->cr_ifs >> 18) & 0x7f; long ridx = r1 - 32; if (ridx >= sof) { /* read of out-of-frame register returns an undefined value; 0 in our case. */ DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof); goto fail; } if (ridx < sor) ridx = rotate_reg(sor, rrb_gr, ridx); DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n", r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx); on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore); addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx); if (addr >= kbs) { /* the register is on the kernel backing store: easy... */ *val = *addr; if (nat) { rnat_addr = ia64_rse_rnat_addr(addr); if ((unsigned long) rnat_addr >= sw->ar_bspstore) rnat_addr = &sw->ar_rnat; nat_mask = 1UL << ia64_rse_slot_num(addr); *nat = (*rnat_addr & nat_mask) != 0; } return; } if (!user_stack(current, regs)) { DPRINT("ignoring kernel read of r%lu; register isn't on the RBS!", r1); goto fail; } bspstore = (unsigned long *)regs->ar_bspstore; ubs_end = ia64_rse_skip_regs(bspstore, on_kbs); bsp = ia64_rse_skip_regs(ubs_end, -sof); addr = ia64_rse_skip_regs(bsp, ridx); DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr); ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val); if (nat) { rnat_addr = ia64_rse_rnat_addr(addr); nat_mask = 1UL << ia64_rse_slot_num(addr); DPRINT("rnat @%p = 0x%lx\n", (void *) rnat_addr, rnats); ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats); *nat = (rnats & nat_mask) != 0; } return; fail: *val = 0; if (nat) *nat = 0; return; } static void setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs) { struct switch_stack *sw = (struct switch_stack *) regs - 1; unsigned long addr; unsigned long bitmask; unsigned long *unat; /* * First takes care of stacked registers */ if (regnum >= IA64_FIRST_STACKED_GR) { set_rse_reg(regs, regnum, val, nat); return; } /* * Using r0 as a target raises a General Exception fault which has higher priority * than the Unaligned Reference fault. */ /* * Now look at registers in [0-31] range and init correct UNAT */ if (GR_IN_SW(regnum)) { addr = (unsigned long)sw; unat = &sw->ar_unat; } else { addr = (unsigned long)regs; unat = &sw->caller_unat; } DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n", addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum)); /* * add offset from base of struct * and do it ! */ addr += GR_OFFS(regnum); *(unsigned long *)addr = val; /* * We need to clear the corresponding UNAT bit to fully emulate the load * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4 */ bitmask = 1UL << (addr >> 3 & 0x3f); DPRINT("*0x%lx=0x%lx NaT=%d prev_unat @%p=%lx\n", addr, val, nat, (void *) unat, *unat); if (nat) { *unat |= bitmask; } else { *unat &= ~bitmask; } DPRINT("*0x%lx=0x%lx NaT=%d new unat: %p=%lx\n", addr, val, nat, (void *) unat,*unat); } /* * Return the (rotated) index for floating point register REGNUM (REGNUM must be in the * range from 32-127, result is in the range from 0-95. */ static inline unsigned long fph_index (struct pt_regs *regs, long regnum) { unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f; return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR)); } static void setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs) { struct switch_stack *sw = (struct switch_stack *)regs - 1; unsigned long addr; /* * From EAS-2.5: FPDisableFault has higher priority than Unaligned * Fault. Thus, when we get here, we know the partition is enabled. * To update f32-f127, there are three choices: * * (1) save f32-f127 to thread.fph and update the values there * (2) use a gigantic switch statement to directly access the registers * (3) generate code on the fly to update the desired register * * For now, we are using approach (1). */ if (regnum >= IA64_FIRST_ROTATING_FR) { ia64_sync_fph(current); current->thread.fph[fph_index(regs, regnum)] = *fpval; } else { /* * pt_regs or switch_stack ? */ if (FR_IN_SW(regnum)) { addr = (unsigned long)sw; } else { addr = (unsigned long)regs; } DPRINT("tmp_base=%lx offset=%d\n", addr, FR_OFFS(regnum)); addr += FR_OFFS(regnum); *(struct ia64_fpreg *)addr = *fpval; /* * mark the low partition as being used now * * It is highly unlikely that this bit is not already set, but * let's do it for safety. */ regs->cr_ipsr |= IA64_PSR_MFL; } } /* * Those 2 inline functions generate the spilled versions of the constant floating point * registers which can be used with stfX */ static inline void float_spill_f0 (struct ia64_fpreg *final) { ia64_stf_spill(final, 0); } static inline void float_spill_f1 (struct ia64_fpreg *final) { ia64_stf_spill(final, 1); } static void getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs) { struct switch_stack *sw = (struct switch_stack *) regs - 1; unsigned long addr; /* * From EAS-2.5: FPDisableFault has higher priority than * Unaligned Fault. Thus, when we get here, we know the partition is * enabled. * * When regnum > 31, the register is still live and we need to force a save * to current->thread.fph to get access to it. See discussion in setfpreg() * for reasons and other ways of doing this. */ if (regnum >= IA64_FIRST_ROTATING_FR) { ia64_flush_fph(current); *fpval = current->thread.fph[fph_index(regs, regnum)]; } else { /* * f0 = 0.0, f1= 1.0. Those registers are constant and are thus * not saved, we must generate their spilled form on the fly */ switch(regnum) { case 0: float_spill_f0(fpval); break; case 1: float_spill_f1(fpval); break; default: /* * pt_regs or switch_stack ? */ addr = FR_IN_SW(regnum) ? (unsigned long)sw : (unsigned long)regs; DPRINT("is_sw=%d tmp_base=%lx offset=0x%x\n", FR_IN_SW(regnum), addr, FR_OFFS(regnum)); addr += FR_OFFS(regnum); *fpval = *(struct ia64_fpreg *)addr; } } } static void getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs) { struct switch_stack *sw = (struct switch_stack *) regs - 1; unsigned long addr, *unat; if (regnum >= IA64_FIRST_STACKED_GR) { get_rse_reg(regs, regnum, val, nat); return; } /* * take care of r0 (read-only always evaluate to 0) */ if (regnum == 0) { *val = 0; if (nat) *nat = 0; return; } /* * Now look at registers in [0-31] range and init correct UNAT */ if (GR_IN_SW(regnum)) { addr = (unsigned long)sw; unat = &sw->ar_unat; } else { addr = (unsigned long)regs; unat = &sw->caller_unat; } DPRINT("addr_base=%lx offset=0x%x\n", addr, GR_OFFS(regnum)); addr += GR_OFFS(regnum); *val = *(unsigned long *)addr; /* * do it only when requested */ if (nat) *nat = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL; } static void emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsigned long ifa) { /* * IMPORTANT: * Given the way we handle unaligned speculative loads, we should * not get to this point in the code but we keep this sanity check, * just in case. */ if (ld.x6_op == 1 || ld.x6_op == 3) { printk(KERN_ERR "%s: register update on speculative load, error\n", __func__); if (die_if_kernel("unaligned reference on speculative load with register update\n", regs, 30)) return; } /* * at this point, we know that the base register to update is valid i.e., * it's not r0 */ if (type == UPD_IMMEDIATE) { unsigned long imm; /* * Load +Imm: ldXZ r1=[r3],imm(9) * * * form imm9: [13:19] contain the first 7 bits */ imm = ld.x << 7 | ld.imm; /* * sign extend (1+8bits) if m set */ if (ld.m) imm |= SIGN_EXT9; /* * ifa == r3 and we know that the NaT bit on r3 was clear so * we can directly use ifa. */ ifa += imm; setreg(ld.r3, ifa, 0, regs); DPRINT("ld.x=%d ld.m=%d imm=%ld r3=0x%lx\n", ld.x, ld.m, imm, ifa); } else if (ld.m) { unsigned long r2; int nat_r2; /* * Load +Reg Opcode: ldXZ r1=[r3],r2 * * Note: that we update r3 even in the case of ldfX.a * (where the load does not happen) * * The way the load algorithm works, we know that r3 does not * have its NaT bit set (would have gotten NaT consumption * before getting the unaligned fault). So we can use ifa * which equals r3 at this point. * * IMPORTANT: * The above statement holds ONLY because we know that we * never reach this code when trying to do a ldX.s. * If we ever make it to here on an ldfX.s then */ getreg(ld.imm, &r2, &nat_r2, regs); ifa += r2; /* * propagate Nat r2 -> r3 */ setreg(ld.r3, ifa, nat_r2, regs); DPRINT("imm=%d r2=%ld r3=0x%lx nat_r2=%d\n",ld.imm, r2, ifa, nat_r2); } } static int emulate_store(unsigned long ifa, void *val, int len, bool kernel_mode) { if (kernel_mode) return copy_to_kernel_nofault((void *)ifa, val, len); return copy_to_user((void __user *)ifa, val, len); } static int emulate_load(void *val, unsigned long ifa, int len, bool kernel_mode) { if (kernel_mode) return copy_from_kernel_nofault(val, (void *)ifa, len); return copy_from_user(val, (void __user *)ifa, len); } static int emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs, bool kernel_mode) { unsigned int len = 1 << ld.x6_sz; unsigned long val = 0; /* * r0, as target, doesn't need to be checked because Illegal Instruction * faults have higher priority than unaligned faults. * * r0 cannot be found as the base as it would never generate an * unaligned reference. */ /* * ldX.a we will emulate load and also invalidate the ALAT entry. * See comment below for explanation on how we handle ldX.a */ if (len != 2 && len != 4 && len != 8) { DPRINT("unknown size: x6=%d\n", ld.x6_sz); return -1; } /* this assumes little-endian byte-order: */ if (emulate_load(&val, ifa, len, kernel_mode)) return -1; setreg(ld.r1, val, 0, regs); /* * check for updates on any kind of loads */ if (ld.op == 0x5 || ld.m) emulate_load_updates(ld.op == 0x5 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa); /* * handling of various loads (based on EAS2.4): * * ldX.acq (ordered load): * - acquire semantics would have been used, so force fence instead. * * ldX.c.clr (check load and clear): * - if we get to this handler, it's because the entry was not in the ALAT. * Therefore the operation reverts to a normal load * * ldX.c.nc (check load no clear): * - same as previous one * * ldX.c.clr.acq (ordered check load and clear): * - same as above for c.clr part. The load needs to have acquire semantics. So * we use the fence semantics which is stronger and thus ensures correctness. * * ldX.a (advanced load): * - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's because the * address doesn't match requested size alignment. This means that we would * possibly need more than one load to get the result. * * The load part can be handled just like a normal load, however the difficult * part is to get the right thing into the ALAT. The critical piece of information * in the base address of the load & size. To do that, a ld.a must be executed, * clearly any address can be pushed into the table by using ld1.a r1=[r3]. Now * if we use the same target register, we will be okay for the check.a instruction. * If we look at the store, basically a stX [r3]=r1 checks the ALAT for any entry * which would overlap within [r3,r3+X] (the size of the load was store in the * ALAT). If such an entry is found the entry is invalidated. But this is not good * enough, take the following example: * r3=3 * ld4.a r1=[r3] * * Could be emulated by doing: * ld1.a r1=[r3],1 * store to temporary; * ld1.a r1=[r3],1 * store & shift to temporary; * ld1.a r1=[r3],1 * store & shift to temporary; * ld1.a r1=[r3] * store & shift to temporary; * r1=temporary * * So in this case, you would get the right value is r1 but the wrong info in * the ALAT. Notice that you could do it in reverse to finish with address 3 * but you would still get the size wrong. To get the size right, one needs to * execute exactly the same kind of load. You could do it from a aligned * temporary location, but you would get the address wrong. * * So no matter what, it is not possible to emulate an advanced load * correctly. But is that really critical ? * * We will always convert ld.a into a normal load with ALAT invalidated. This * will enable compiler to do optimization where certain code path after ld.a * is not required to have ld.c/chk.a, e.g., code path with no intervening stores. * * If there is a store after the advanced load, one must either do a ld.c.* or * chk.a.* to reuse the value stored in the ALAT. Both can "fail" (meaning no * entry found in ALAT), and that's perfectly ok because: * * - ld.c.*, if the entry is not present a normal load is executed * - chk.a.*, if the entry is not present, execution jumps to recovery code * * In either case, the load can be potentially retried in another form. * * ALAT must be invalidated for the register (so that chk.a or ld.c don't pick * up a stale entry later). The register base update MUST also be performed. */ /* * when the load has the .acq completer then * use ordering fence. */ if (ld.x6_op == 0x5 || ld.x6_op == 0xa) mb(); /* * invalidate ALAT entry in case of advanced load */ if (ld.x6_op == 0x2) invala_gr(ld.r1); return 0; } static int emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs, bool kernel_mode) { unsigned long r2; unsigned int len = 1 << ld.x6_sz; /* * if we get to this handler, Nat bits on both r3 and r2 have already * been checked. so we don't need to do it * * extract the value to be stored */ getreg(ld.imm, &r2, NULL, regs); /* * we rely on the macros in unaligned.h for now i.e., * we let the compiler figure out how to read memory gracefully. * * We need this switch/case because the way the inline function * works. The code is optimized by the compiler and looks like * a single switch/case. */ DPRINT("st%d [%lx]=%lx\n", len, ifa, r2); if (len != 2 && len != 4 && len != 8) { DPRINT("unknown size: x6=%d\n", ld.x6_sz); return -1; } /* this assumes little-endian byte-order: */ if (emulate_store(ifa, &r2, len, kernel_mode)) return -1; /* * stX [r3]=r2,imm(9) * * NOTE: * ld.r3 can never be r0, because r0 would not generate an * unaligned access. */ if (ld.op == 0x5) { unsigned long imm; /* * form imm9: [12:6] contain first 7bits */ imm = ld.x << 7 | ld.r1; /* * sign extend (8bits) if m set */ if (ld.m) imm |= SIGN_EXT9; /* * ifa == r3 (NaT is necessarily cleared) */ ifa += imm; DPRINT("imm=%lx r3=%lx\n", imm, ifa); setreg(ld.r3, ifa, 0, regs); } /* * we don't have alat_invalidate_multiple() so we need * to do the complete flush :-<< */ ia64_invala(); /* * stX.rel: use fence instead of release */ if (ld.x6_op == 0xd) mb(); return 0; } /* * floating point operations sizes in bytes */ static const unsigned char float_fsz[4]={ 10, /* extended precision (e) */ 8, /* integer (8) */ 4, /* single precision (s) */ 8 /* double precision (d) */ }; static inline void mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final) { ia64_ldfe(6, init); ia64_stop(); ia64_stf_spill(final, 6); } static inline void mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final) { ia64_ldf8(6, init); ia64_stop(); ia64_stf_spill(final, 6); } static inline void mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final) { ia64_ldfs(6, init); ia64_stop(); ia64_stf_spill(final, 6); } static inline void mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final) { ia64_ldfd(6, init); ia64_stop(); ia64_stf_spill(final, 6); } static inline void float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final) { ia64_ldf_fill(6, init); ia64_stop(); ia64_stfe(final, 6); } static inline void float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final) { ia64_ldf_fill(6, init); ia64_stop(); ia64_stf8(final, 6); } static inline void float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final) { ia64_ldf_fill(6, init); ia64_stop(); ia64_stfs(final, 6); } static inline void float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final) { ia64_ldf_fill(6, init); ia64_stop(); ia64_stfd(final, 6); } static int emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs, bool kernel_mode) { struct ia64_fpreg fpr_init[2]; struct ia64_fpreg fpr_final[2]; unsigned long len = float_fsz[ld.x6_sz]; /* * fr0 & fr1 don't need to be checked because Illegal Instruction faults have * higher priority than unaligned faults. * * r0 cannot be found as the base as it would never generate an unaligned * reference. */ /* * make sure we get clean buffers */ memset(&fpr_init, 0, sizeof(fpr_init)); memset(&fpr_final, 0, sizeof(fpr_final)); /* * ldfpX.a: we don't try to emulate anything but we must * invalidate the ALAT entry and execute updates, if any. */ if (ld.x6_op != 0x2) { /* * This assumes little-endian byte-order. Note that there is no "ldfpe" * instruction: */ if (emulate_load(&fpr_init[0], ifa, len, kernel_mode) || emulate_load(&fpr_init[1], (ifa + len), len, kernel_mode)) return -1; DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz); DDUMP("frp_init =", &fpr_init, 2*len); /* * XXX fixme * Could optimize inlines by using ldfpX & 2 spills */ switch( ld.x6_sz ) { case 0: mem2float_extended(&fpr_init[0], &fpr_final[0]); mem2float_extended(&fpr_init[1], &fpr_final[1]); break; case 1: mem2float_integer(&fpr_init[0], &fpr_final[0]); mem2float_integer(&fpr_init[1], &fpr_final[1]); break; case 2: mem2float_single(&fpr_init[0], &fpr_final[0]); mem2float_single(&fpr_init[1], &fpr_final[1]); break; case 3: mem2float_double(&fpr_init[0], &fpr_final[0]); mem2float_double(&fpr_init[1], &fpr_final[1]); break; } DDUMP("fpr_final =", &fpr_final, 2*len); /* * XXX fixme * * A possible optimization would be to drop fpr_final and directly * use the storage from the saved context i.e., the actual final * destination (pt_regs, switch_stack or thread structure). */ setfpreg(ld.r1, &fpr_final[0], regs); setfpreg(ld.imm, &fpr_final[1], regs); } /* * Check for updates: only immediate updates are available for this * instruction. */ if (ld.m) { /* * the immediate is implicit given the ldsz of the operation: * single: 8 (2x4) and for all others it's 16 (2x8) */ ifa += len<<1; /* * IMPORTANT: * the fact that we force the NaT of r3 to zero is ONLY valid * as long as we don't come here with a ldfpX.s. * For this reason we keep this sanity check */ if (ld.x6_op == 1 || ld.x6_op == 3) printk(KERN_ERR "%s: register update on speculative load pair, error\n", __func__); setreg(ld.r3, ifa, 0, regs); } /* * Invalidate ALAT entries, if any, for both registers. */ if (ld.x6_op == 0x2) { invala_fr(ld.r1); invala_fr(ld.imm); } return 0; } static int emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs, bool kernel_mode) { struct ia64_fpreg fpr_init; struct ia64_fpreg fpr_final; unsigned long len = float_fsz[ld.x6_sz]; /* * fr0 & fr1 don't need to be checked because Illegal Instruction * faults have higher priority than unaligned faults. * * r0 cannot be found as the base as it would never generate an * unaligned reference. */ /* * make sure we get clean buffers */ memset(&fpr_init,0, sizeof(fpr_init)); memset(&fpr_final,0, sizeof(fpr_final)); /* * ldfX.a we don't try to emulate anything but we must * invalidate the ALAT entry. * See comments in ldX for descriptions on how the various loads are handled. */ if (ld.x6_op != 0x2) { if (emulate_load(&fpr_init, ifa, len, kernel_mode)) return -1; DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz); DDUMP("fpr_init =", &fpr_init, len); /* * we only do something for x6_op={0,8,9} */ switch( ld.x6_sz ) { case 0: mem2float_extended(&fpr_init, &fpr_final); break; case 1: mem2float_integer(&fpr_init, &fpr_final); break; case 2: mem2float_single(&fpr_init, &fpr_final); break; case 3: mem2float_double(&fpr_init, &fpr_final); break; } DDUMP("fpr_final =", &fpr_final, len); /* * XXX fixme * * A possible optimization would be to drop fpr_final and directly * use the storage from the saved context i.e., the actual final * destination (pt_regs, switch_stack or thread structure). */ setfpreg(ld.r1, &fpr_final, regs); } /* * check for updates on any loads */ if (ld.op == 0x7 || ld.m) emulate_load_updates(ld.op == 0x7 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa); /* * invalidate ALAT entry in case of advanced floating point loads */ if (ld.x6_op == 0x2) invala_fr(ld.r1); return 0; } static int emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs, bool kernel_mode) { struct ia64_fpreg fpr_init; struct ia64_fpreg fpr_final; unsigned long len = float_fsz[ld.x6_sz]; /* * make sure we get clean buffers */ memset(&fpr_init,0, sizeof(fpr_init)); memset(&fpr_final,0, sizeof(fpr_final)); /* * if we get to this handler, Nat bits on both r3 and r2 have already * been checked. so we don't need to do it * * extract the value to be stored */ getfpreg(ld.imm, &fpr_init, regs); /* * during this step, we extract the spilled registers from the saved * context i.e., we refill. Then we store (no spill) to temporary * aligned location */ switch( ld.x6_sz ) { case 0: float2mem_extended(&fpr_init, &fpr_final); break; case 1: float2mem_integer(&fpr_init, &fpr_final); break; case 2: float2mem_single(&fpr_init, &fpr_final); break; case 3: float2mem_double(&fpr_init, &fpr_final); break; } DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz); DDUMP("fpr_init =", &fpr_init, len); DDUMP("fpr_final =", &fpr_final, len); if (emulate_store(ifa, &fpr_final, len, kernel_mode)) return -1; /* * stfX [r3]=r2,imm(9) * * NOTE: * ld.r3 can never be r0, because r0 would not generate an * unaligned access. */ if (ld.op == 0x7) { unsigned long imm; /* * form imm9: [12:6] contain first 7bits */ imm = ld.x << 7 | ld.r1; /* * sign extend (8bits) if m set */ if (ld.m) imm |= SIGN_EXT9; /* * ifa == r3 (NaT is necessarily cleared) */ ifa += imm; DPRINT("imm=%lx r3=%lx\n", imm, ifa); setreg(ld.r3, ifa, 0, regs); } /* * we don't have alat_invalidate_multiple() so we need * to do the complete flush :-<< */ ia64_invala(); return 0; } /* * Make sure we log the unaligned access, so that user/sysadmin can notice it and * eventually fix the program. However, we don't want to do that for every access so we * pace it with jiffies. */ static DEFINE_RATELIMIT_STATE(logging_rate_limit, 5 * HZ, 5); void ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs) { struct ia64_psr *ipsr = ia64_psr(regs); unsigned long bundle[2]; unsigned long opcode; const struct exception_table_entry *eh = NULL; union { unsigned long l; load_store_t insn; } u; int ret = -1; bool kernel_mode = false; if (ia64_psr(regs)->be) { /* we don't support big-endian accesses */ if (die_if_kernel("big-endian unaligned accesses are not supported", regs, 0)) return; goto force_sigbus; } /* * Treat kernel accesses for which there is an exception handler entry the same as * user-level unaligned accesses. Otherwise, a clever program could trick this * handler into reading an arbitrary kernel addresses... */ if (!user_mode(regs)) eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri); if (user_mode(regs) || eh) { if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0) goto force_sigbus; if (!no_unaligned_warning && !(current->thread.flags & IA64_THREAD_UAC_NOPRINT) && __ratelimit(&logging_rate_limit)) { char buf[200]; /* comm[] is at most 16 bytes... */ size_t len; len = sprintf(buf, "%s(%d): unaligned access to 0x%016lx, " "ip=0x%016lx\n\r", current->comm, task_pid_nr(current), ifa, regs->cr_iip + ipsr->ri); /* * Don't call tty_write_message() if we're in the kernel; we might * be holding locks... */ if (user_mode(regs)) { struct tty_struct *tty = get_current_tty(); tty_write_message(tty, buf); tty_kref_put(tty); } buf[len-1] = '\0'; /* drop '\r' */ /* watch for command names containing %s */ printk(KERN_WARNING "%s", buf); } else { if (no_unaligned_warning) { printk_once(KERN_WARNING "%s(%d) encountered an " "unaligned exception which required\n" "kernel assistance, which degrades " "the performance of the application.\n" "Unaligned exception warnings have " "been disabled by the system " "administrator\n" "echo 0 > /proc/sys/kernel/ignore-" "unaligned-usertrap to re-enable\n", current->comm, task_pid_nr(current)); } } } else { if (__ratelimit(&logging_rate_limit)) { printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n", ifa, regs->cr_iip + ipsr->ri); if (unaligned_dump_stack) dump_stack(); } kernel_mode = true; } DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n", regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it); if (emulate_load(bundle, regs->cr_iip, 16, kernel_mode)) goto failure; /* * extract the instruction from the bundle given the slot number */ switch (ipsr->ri) { default: case 0: u.l = (bundle[0] >> 5); break; case 1: u.l = (bundle[0] >> 46) | (bundle[1] << 18); break; case 2: u.l = (bundle[1] >> 23); break; } opcode = (u.l >> IA64_OPCODE_SHIFT) & IA64_OPCODE_MASK; DPRINT("opcode=%lx ld.qp=%d ld.r1=%d ld.imm=%d ld.r3=%d ld.x=%d ld.hint=%d " "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, u.insn.imm, u.insn.r3, u.insn.x, u.insn.hint, u.insn.x6_sz, u.insn.m, u.insn.op); /* * IMPORTANT: * Notice that the switch statement DOES not cover all possible instructions * that DO generate unaligned references. This is made on purpose because for some * instructions it DOES NOT make sense to try and emulate the access. Sometimes it * is WRONG to try and emulate. Here is a list of instruction we don't emulate i.e., * the program will get a signal and die: * * load/store: * - ldX.spill * - stX.spill * Reason: RNATs are based on addresses * - ld16 * - st16 * Reason: ld16 and st16 are supposed to occur in a single * memory op * * synchronization: * - cmpxchg * - fetchadd * - xchg * Reason: ATOMIC operations cannot be emulated properly using multiple * instructions. * * speculative loads: * - ldX.sZ * Reason: side effects, code must be ready to deal with failure so simpler * to let the load fail. * --------------------------------------------------------------------------------- * XXX fixme * * I would like to get rid of this switch case and do something * more elegant. */ switch (opcode) { case LDS_OP: case LDSA_OP: if (u.insn.x) /* oops, really a semaphore op (cmpxchg, etc) */ goto failure; fallthrough; case LDS_IMM_OP: case LDSA_IMM_OP: case LDFS_OP: case LDFSA_OP: case LDFS_IMM_OP: /* * The instruction will be retried with deferred exceptions turned on, and * we should get Nat bit installed * * IMPORTANT: When PSR_ED is set, the register & immediate update forms * are actually executed even though the operation failed. So we don't * need to take care of this. */ DPRINT("forcing PSR_ED\n"); regs->cr_ipsr |= IA64_PSR_ED; goto done; case LD_OP: case LDA_OP: case LDBIAS_OP: case LDACQ_OP: case LDCCLR_OP: case LDCNC_OP: case LDCCLRACQ_OP: if (u.insn.x) /* oops, really a semaphore op (cmpxchg, etc) */ goto failure; fallthrough; case LD_IMM_OP: case LDA_IMM_OP: case LDBIAS_IMM_OP: case LDACQ_IMM_OP: case LDCCLR_IMM_OP: case LDCNC_IMM_OP: case LDCCLRACQ_IMM_OP: ret = emulate_load_int(ifa, u.insn, regs, kernel_mode); break; case ST_OP: case STREL_OP: if (u.insn.x) /* oops, really a semaphore op (cmpxchg, etc) */ goto failure; fallthrough; case ST_IMM_OP: case STREL_IMM_OP: ret = emulate_store_int(ifa, u.insn, regs, kernel_mode); break; case LDF_OP: case LDFA_OP: case LDFCCLR_OP: case LDFCNC_OP: if (u.insn.x) ret = emulate_load_floatpair(ifa, u.insn, regs, kernel_mode); else ret = emulate_load_float(ifa, u.insn, regs, kernel_mode); break; case LDF_IMM_OP: case LDFA_IMM_OP: case LDFCCLR_IMM_OP: case LDFCNC_IMM_OP: ret = emulate_load_float(ifa, u.insn, regs, kernel_mode); break; case STF_OP: case STF_IMM_OP: ret = emulate_store_float(ifa, u.insn, regs, kernel_mode); break; default: goto failure; } DPRINT("ret=%d\n", ret); if (ret) goto failure; if (ipsr->ri == 2) /* * given today's architecture this case is not likely to happen because a * memory access instruction (M) can never be in the last slot of a * bundle. But let's keep it for now. */ regs->cr_iip += 16; ipsr->ri = (ipsr->ri + 1) & 0x3; DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip); done: return; failure: /* something went wrong... */ if (!user_mode(regs)) { if (eh) { ia64_handle_exception(regs, eh); goto done; } if (die_if_kernel("error during unaligned kernel access\n", regs, ret)) return; /* NOT_REACHED */ } force_sigbus: force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) ifa, 0, 0, 0); goto done; }
linux-master
arch/ia64/kernel/unaligned.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/init.h> #include <linux/types.h> #include <linux/audit.h> #include <asm/unistd.h> static unsigned dir_class[] = { #include <asm-generic/audit_dir_write.h> ~0U }; static unsigned read_class[] = { #include <asm-generic/audit_read.h> ~0U }; static unsigned write_class[] = { #include <asm-generic/audit_write.h> ~0U }; static unsigned chattr_class[] = { #include <asm-generic/audit_change_attr.h> ~0U }; static unsigned signal_class[] = { #include <asm-generic/audit_signal.h> ~0U }; int audit_classify_arch(int arch) { return 0; } int audit_classify_syscall(int abi, unsigned syscall) { switch(syscall) { case __NR_open: return AUDITSC_OPEN; case __NR_openat: return AUDITSC_OPENAT; case __NR_execve: return AUDITSC_EXECVE; case __NR_openat2: return AUDITSC_OPENAT2; default: return AUDITSC_NATIVE; } } static int __init audit_classes_init(void) { audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } __initcall(audit_classes_init);
linux-master
arch/ia64/kernel/audit.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include <linux/smp.h> #include <linux/time.h> #include <linux/errno.h> #include <linux/timex.h> #include <linux/clocksource.h> #include <linux/io.h> /* IBM Summit (EXA) Cyclone counter code*/ #define CYCLONE_CBAR_ADDR 0xFEB00CD0 #define CYCLONE_PMCC_OFFSET 0x51A0 #define CYCLONE_MPMC_OFFSET 0x51D0 #define CYCLONE_MPCS_OFFSET 0x51A8 #define CYCLONE_TIMER_FREQ 100000000 int use_cyclone; void __init cyclone_setup(void) { use_cyclone = 1; } static void __iomem *cyclone_mc; static u64 read_cyclone(struct clocksource *cs) { return (u64)readq((void __iomem *)cyclone_mc); } static struct clocksource clocksource_cyclone = { .name = "cyclone", .rating = 300, .read = read_cyclone, .mask = (1LL << 40) - 1, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; int __init init_cyclone_clock(void) { u64 __iomem *reg; u64 base; /* saved cyclone base address */ u64 offset; /* offset from pageaddr to cyclone_timer register */ int i; u32 __iomem *cyclone_timer; /* Cyclone MPMC0 register */ if (!use_cyclone) return 0; printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n"); /* find base address */ offset = (CYCLONE_CBAR_ADDR); reg = ioremap(offset, sizeof(u64)); if(!reg){ printk(KERN_ERR "Summit chipset: Could not find valid CBAR" " register.\n"); use_cyclone = 0; return -ENODEV; } base = readq(reg); iounmap(reg); if(!base){ printk(KERN_ERR "Summit chipset: Could not find valid CBAR" " value.\n"); use_cyclone = 0; return -ENODEV; } /* setup PMCC */ offset = (base + CYCLONE_PMCC_OFFSET); reg = ioremap(offset, sizeof(u64)); if(!reg){ printk(KERN_ERR "Summit chipset: Could not find valid PMCC" " register.\n"); use_cyclone = 0; return -ENODEV; } writel(0x00000001,reg); iounmap(reg); /* setup MPCS */ offset = (base + CYCLONE_MPCS_OFFSET); reg = ioremap(offset, sizeof(u64)); if(!reg){ printk(KERN_ERR "Summit chipset: Could not find valid MPCS" " register.\n"); use_cyclone = 0; return -ENODEV; } writel(0x00000001,reg); iounmap(reg); /* map in cyclone_timer */ offset = (base + CYCLONE_MPMC_OFFSET); cyclone_timer = ioremap(offset, sizeof(u32)); if(!cyclone_timer){ printk(KERN_ERR "Summit chipset: Could not find valid MPMC" " register.\n"); use_cyclone = 0; return -ENODEV; } /*quick test to make sure its ticking*/ for(i=0; i<3; i++){ u32 old = readl(cyclone_timer); int stall = 100; while(stall--) barrier(); if(readl(cyclone_timer) == old){ printk(KERN_ERR "Summit chipset: Counter not counting!" " DISABLED\n"); iounmap(cyclone_timer); cyclone_timer = NULL; use_cyclone = 0; return -ENODEV; } } /* initialize last tick */ cyclone_mc = cyclone_timer; clocksource_cyclone.archdata.fsys_mmio = cyclone_timer; clocksource_register_hz(&clocksource_cyclone, CYCLONE_TIMER_FREQ); return 0; } __initcall(init_cyclone_clock);
linux-master
arch/ia64/kernel/cyclone.c
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/ia64/kernel/time.c * * Copyright (C) 1998-2003 Hewlett-Packard Co * Stephane Eranian <[email protected]> * David Mosberger <[email protected]> * Copyright (C) 1999 Don Dugger <[email protected]> * Copyright (C) 1999-2000 VA Linux Systems * Copyright (C) 1999-2000 Walt Drummond <[email protected]> */ #include <linux/cpu.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/profile.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/nmi.h> #include <linux/interrupt.h> #include <linux/efi.h> #include <linux/timex.h> #include <linux/timekeeper_internal.h> #include <linux/platform_device.h> #include <linux/sched/cputime.h> #include <asm/cputime.h> #include <asm/delay.h> #include <asm/efi.h> #include <asm/hw_irq.h> #include <asm/ptrace.h> #include <asm/sal.h> #include <asm/sections.h> #include "fsyscall_gtod_data.h" #include "irq.h" static u64 itc_get_cycles(struct clocksource *cs); struct fsyscall_gtod_data_t fsyscall_gtod_data; struct itc_jitter_data_t itc_jitter_data; volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ #ifdef CONFIG_IA64_DEBUG_IRQ unsigned long last_cli_ip; EXPORT_SYMBOL(last_cli_ip); #endif static struct clocksource clocksource_itc = { .name = "itc", .rating = 350, .read = itc_get_cycles, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static struct clocksource *itc_clocksource; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #include <linux/kernel_stat.h> extern u64 cycle_to_nsec(u64 cyc); void vtime_flush(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); u64 delta; if (ti->utime) account_user_time(tsk, cycle_to_nsec(ti->utime)); if (ti->gtime) account_guest_time(tsk, cycle_to_nsec(ti->gtime)); if (ti->idle_time) account_idle_time(cycle_to_nsec(ti->idle_time)); if (ti->stime) { delta = cycle_to_nsec(ti->stime); account_system_index_time(tsk, delta, CPUTIME_SYSTEM); } if (ti->hardirq_time) { delta = cycle_to_nsec(ti->hardirq_time); account_system_index_time(tsk, delta, CPUTIME_IRQ); } if (ti->softirq_time) { delta = cycle_to_nsec(ti->softirq_time); account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); } ti->utime = 0; ti->gtime = 0; ti->idle_time = 0; ti->stime = 0; ti->hardirq_time = 0; ti->softirq_time = 0; } /* * Called from the context switch with interrupts disabled, to charge all * accumulated times to the current process, and to prepare accounting on * the next process. */ void arch_vtime_task_switch(struct task_struct *prev) { struct thread_info *pi = task_thread_info(prev); struct thread_info *ni = task_thread_info(current); ni->ac_stamp = pi->ac_stamp; ni->ac_stime = ni->ac_utime = 0; } /* * Account time for a transition between system, hard irq or soft irq state. * Note that this function is called with interrupts enabled. */ static __u64 vtime_delta(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); __u64 now, delta_stime; WARN_ON_ONCE(!irqs_disabled()); now = ia64_get_itc(); delta_stime = now - ti->ac_stamp; ti->ac_stamp = now; return delta_stime; } void vtime_account_kernel(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); __u64 stime = vtime_delta(tsk); if (tsk->flags & PF_VCPU) ti->gtime += stime; else ti->stime += stime; } EXPORT_SYMBOL_GPL(vtime_account_kernel); void vtime_account_idle(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); ti->idle_time += vtime_delta(tsk); } void vtime_account_softirq(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); ti->softirq_time += vtime_delta(tsk); } void vtime_account_hardirq(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); ti->hardirq_time += vtime_delta(tsk); } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ static irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long new_itm; if (cpu_is_offline(smp_processor_id())) { return IRQ_HANDLED; } new_itm = local_cpu_data->itm_next; if (!time_after(ia64_get_itc(), new_itm)) printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", ia64_get_itc(), new_itm); while (1) { new_itm += local_cpu_data->itm_delta; legacy_timer_tick(smp_processor_id() == time_keeper_id); local_cpu_data->itm_next = new_itm; if (time_after(new_itm, ia64_get_itc())) break; /* * Allow IPIs to interrupt the timer loop. */ local_irq_enable(); local_irq_disable(); } do { /* * If we're too close to the next clock tick for * comfort, we increase the safety margin by * intentionally dropping the next tick(s). We do NOT * update itm.next because that would force us to call * xtime_update() which in turn would let our clock run * too fast (with the potentially devastating effect * of losing monotony of time). */ while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) new_itm += local_cpu_data->itm_delta; ia64_set_itm(new_itm); /* double check, in case we got hit by a (slow) PMI: */ } while (time_after_eq(ia64_get_itc(), new_itm)); return IRQ_HANDLED; } /* * Encapsulate access to the itm structure for SMP. */ void ia64_cpu_local_tick (void) { int cpu = smp_processor_id(); unsigned long shift = 0, delta; /* arrange for the cycle counter to generate a timer interrupt: */ ia64_set_itv(IA64_TIMER_VECTOR); delta = local_cpu_data->itm_delta; /* * Stagger the timer tick for each CPU so they don't occur all at (almost) the * same time: */ if (cpu) { unsigned long hi = 1UL << ia64_fls(cpu); shift = (2*(cpu - hi) + 1) * delta/hi/2; } local_cpu_data->itm_next = ia64_get_itc() + delta + shift; ia64_set_itm(local_cpu_data->itm_next); } static int nojitter; static int __init nojitter_setup(char *str) { nojitter = 1; printk("Jitter checking for ITC timers disabled\n"); return 1; } __setup("nojitter", nojitter_setup); void ia64_init_itm(void) { unsigned long platform_base_freq, itc_freq; struct pal_freq_ratio itc_ratio, proc_ratio; long status, platform_base_drift, itc_drift; /* * According to SAL v2.6, we need to use a SAL call to determine the platform base * frequency and then a PAL call to determine the frequency ratio between the ITC * and the base frequency. */ status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, &platform_base_freq, &platform_base_drift); if (status != 0) { printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status)); } else { status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio); if (status != 0) printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status); } if (status != 0) { /* invent "random" values */ printk(KERN_ERR "SAL/PAL failed to obtain frequency info---inventing reasonable values\n"); platform_base_freq = 100000000; platform_base_drift = -1; /* no drift info */ itc_ratio.num = 3; itc_ratio.den = 1; } if (platform_base_freq < 40000000) { printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n", platform_base_freq); platform_base_freq = 75000000; platform_base_drift = -1; } if (!proc_ratio.den) proc_ratio.den = 1; /* avoid division by zero */ if (!itc_ratio.den) itc_ratio.den = 1; /* avoid division by zero */ itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den; local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ; printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, " "ITC freq=%lu.%03luMHz", smp_processor_id(), platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000, itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000); if (platform_base_drift != -1) { itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den; printk("+/-%ldppm\n", itc_drift); } else { itc_drift = -1; printk("\n"); } local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den; local_cpu_data->itc_freq = itc_freq; local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC; local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT) + itc_freq/2)/itc_freq; if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { #ifdef CONFIG_SMP /* On IA64 in an SMP configuration ITCs are never accurately synchronized. * Jitter compensation requires a cmpxchg which may limit * the scalability of the syscalls for retrieving time. * The ITC synchronization is usually successful to within a few * ITC ticks but this is not a sure thing. If you need to improve * timer performance in SMP situations then boot the kernel with the * "nojitter" option. However, doing so may result in time fluctuating (maybe * even going backward) if the ITC offsets between the individual CPUs * are too large. */ if (!nojitter) itc_jitter_data.itc_jitter = 1; #endif } else /* * ITC is drifty and we have not synchronized the ITCs in smpboot.c. * ITC values may fluctuate significantly between processors. * Clock should not be used for hrtimers. Mark itc as only * useful for boot and testing. * * Note that jitter compensation is off! There is no point of * synchronizing ITCs since they may be large differentials * that change over time. * * The only way to fix this would be to repeatedly sync the * ITCs. Until that time we have to avoid ITC. */ clocksource_itc.rating = 50; /* avoid softlock up message when cpu is unplug and plugged again. */ touch_softlockup_watchdog(); /* Setup the CPU local timer tick */ ia64_cpu_local_tick(); if (!itc_clocksource) { clocksource_register_hz(&clocksource_itc, local_cpu_data->itc_freq); itc_clocksource = &clocksource_itc; } } static u64 itc_get_cycles(struct clocksource *cs) { unsigned long lcycle, now, ret; if (!itc_jitter_data.itc_jitter) return get_cycles(); lcycle = itc_jitter_data.itc_lastcycle; now = get_cycles(); if (lcycle && time_after(lcycle, now)) return lcycle; /* * Keep track of the last timer value returned. * In an SMP environment, you could lose out in contention of * cmpxchg. If so, your cmpxchg returns new value which the * winner of contention updated to. Use the new value instead. */ ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now); if (unlikely(ret != lcycle)) return ret; return now; } void read_persistent_clock64(struct timespec64 *ts) { efi_gettimeofday(ts); } void __init time_init (void) { register_percpu_irq(IA64_TIMER_VECTOR, timer_interrupt, IRQF_IRQPOLL, "timer"); ia64_init_itm(); } /* * Generic udelay assumes that if preemption is allowed and the thread * migrates to another CPU, that the ITC values are synchronized across * all CPUs. */ static void ia64_itc_udelay (unsigned long usecs) { unsigned long start = ia64_get_itc(); unsigned long end = start + usecs*local_cpu_data->cyc_per_usec; while (time_before(ia64_get_itc(), end)) cpu_relax(); } void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay; void udelay (unsigned long usecs) { (*ia64_udelay)(usecs); } EXPORT_SYMBOL(udelay); /* IA64 doesn't cache the timezone */ void update_vsyscall_tz(void) { } void update_vsyscall(struct timekeeper *tk) { write_seqcount_begin(&fsyscall_gtod_data.seq); /* copy vsyscall data */ fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask; fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult; fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift; fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio; fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last; fsyscall_gtod_data.wall_time.sec = tk->xtime_sec; fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec; fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec + ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift); /* normalize */ while (fsyscall_gtod_data.monotonic_time.snsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { fsyscall_gtod_data.monotonic_time.snsec -= ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift; fsyscall_gtod_data.monotonic_time.sec++; } write_seqcount_end(&fsyscall_gtod_data.seq); }
linux-master
arch/ia64/kernel/time.c
// SPDX-License-Identifier: GPL-2.0-only /* * arch/ia64/kernel/machine_kexec.c * * Handle transition of Linux booting another kernel * Copyright (C) 2005 Hewlett-Packard Development Comapny, L.P. * Copyright (C) 2005 Khalid Aziz <[email protected]> * Copyright (C) 2006 Intel Corp, Zou Nan hai <[email protected]> */ #include <linux/mm.h> #include <linux/kexec.h> #include <linux/cpu.h> #include <linux/irq.h> #include <linux/efi.h> #include <linux/numa.h> #include <linux/mmzone.h> #include <asm/efi.h> #include <asm/numa.h> #include <asm/mmu_context.h> #include <asm/setup.h> #include <asm/delay.h> #include <asm/meminit.h> #include <asm/processor.h> #include <asm/sal.h> #include <asm/mca.h> typedef void (*relocate_new_kernel_t)( unsigned long indirection_page, unsigned long start_address, struct ia64_boot_param *boot_param, unsigned long pal_addr) __noreturn; struct kimage *ia64_kimage; struct resource efi_memmap_res = { .name = "EFI Memory Map", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; struct resource boot_param_res = { .name = "Boot parameter", .start = 0, .end = 0, .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; /* * Do what every setup is needed on image and the * reboot code buffer to allow us to avoid allocations * later. */ int machine_kexec_prepare(struct kimage *image) { void *control_code_buffer; const unsigned long *func; func = (unsigned long *)&relocate_new_kernel; /* Pre-load control code buffer to minimize work in kexec path */ control_code_buffer = page_address(image->control_code_page); memcpy((void *)control_code_buffer, (const void *)func[0], relocate_new_kernel_size); flush_icache_range((unsigned long)control_code_buffer, (unsigned long)control_code_buffer + relocate_new_kernel_size); ia64_kimage = image; return 0; } void machine_kexec_cleanup(struct kimage *image) { } /* * Do not allocate memory (or fail in any way) in machine_kexec(). * We are past the point of no return, committed to rebooting now. */ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) { struct kimage *image = arg; relocate_new_kernel_t rnk; void *pal_addr = efi_get_pal_addr(); unsigned long code_addr; int ii; u64 fp, gp; ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump; BUG_ON(!image); code_addr = (unsigned long)page_address(image->control_code_page); if (image->type == KEXEC_TYPE_CRASH) { crash_save_this_cpu(); current->thread.ksp = (__u64)info->sw - 16; /* Register noop init handler */ fp = ia64_tpa(init_handler->fp); gp = ia64_tpa(ia64_getreg(_IA64_REG_GP)); ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0); } else { /* Unregister init handlers of current kernel */ ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0); } /* Unregister mca handler - No more recovery on current kernel */ ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0); /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); /* Mask CMC and Performance Monitor interrupts */ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* Mask ITV and Local Redirect Registers */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); /* terminate possible nested in-service interrupts */ for (ii = 0; ii < 16; ii++) ia64_eoi(); /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); rnk = (relocate_new_kernel_t)&code_addr; (*rnk)(image->head, image->start, ia64_boot_param, GRANULEROUNDDOWN((unsigned long) pal_addr)); BUG(); } void machine_kexec(struct kimage *image) { BUG_ON(!image); unw_init_running(ia64_machine_kexec, image); for(;;); } void arch_crash_save_vmcoreinfo(void) { #if defined(CONFIG_SPARSEMEM) VMCOREINFO_SYMBOL(pgdat_list); VMCOREINFO_LENGTH(pgdat_list, MAX_NUMNODES); #endif #ifdef CONFIG_NUMA VMCOREINFO_SYMBOL(node_memblk); VMCOREINFO_LENGTH(node_memblk, NR_NODE_MEMBLKS); VMCOREINFO_STRUCT_SIZE(node_memblk_s); VMCOREINFO_OFFSET(node_memblk_s, start_paddr); VMCOREINFO_OFFSET(node_memblk_s, size); #endif #if CONFIG_PGTABLE_LEVELS == 3 VMCOREINFO_CONFIG(PGTABLE_3); #elif CONFIG_PGTABLE_LEVELS == 4 VMCOREINFO_CONFIG(PGTABLE_4); #endif }
linux-master
arch/ia64/kernel/machine_kexec.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/ia64/kernel/irq_ia64.c * * Copyright (C) 1998-2001 Hewlett-Packard Co * Stephane Eranian <[email protected]> * David Mosberger-Tang <[email protected]> * * 6/10/99: Updated to bring in sync with x86 version to facilitate * support for SMP and different interrupt controllers. * * 09/15/00 Goutham Rao <[email protected]> Implemented pci_irq_to_vector * PCI to vector allocation routine. * 04/14/2004 Ashok Raj <[email protected]> * Added CPU Hotplug handling for IPF. */ #include <linux/module.h> #include <linux/pgtable.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/kernel_stat.h> #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/threads.h> #include <linux/bitops.h> #include <linux/irq.h> #include <linux/ratelimit.h> #include <linux/acpi.h> #include <linux/sched.h> #include <asm/delay.h> #include <asm/intrinsics.h> #include <asm/io.h> #include <asm/hw_irq.h> #include <asm/tlbflush.h> #define IRQ_DEBUG 0 #define IRQ_VECTOR_UNASSIGNED (0) #define IRQ_UNUSED (0) #define IRQ_USED (1) #define IRQ_RSVD (2) int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; /* default base addr of IPI table */ void __iomem *ipi_base_addr = ((void __iomem *) (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); static cpumask_t vector_allocation_domain(int cpu); /* * Legacy IRQ to IA-64 vector translation table. */ __u8 isa_irq_to_vector_map[16] = { /* 8259 IRQ translation, first 16 entries */ 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21 }; EXPORT_SYMBOL(isa_irq_to_vector_map); DEFINE_SPINLOCK(vector_lock); struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = { [0 ... NR_IRQS - 1] = { .vector = IRQ_VECTOR_UNASSIGNED, .domain = CPU_MASK_NONE } }; DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = { [0 ... IA64_NUM_VECTORS - 1] = -1 }; static cpumask_t vector_table[IA64_NUM_VECTORS] = { [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE }; static int irq_status[NR_IRQS] = { [0 ... NR_IRQS -1] = IRQ_UNUSED }; static inline int find_unassigned_irq(void) { int irq; for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++) if (irq_status[irq] == IRQ_UNUSED) return irq; return -ENOSPC; } static inline int find_unassigned_vector(cpumask_t domain) { cpumask_t mask; int pos, vector; cpumask_and(&mask, &domain, cpu_online_mask); if (cpumask_empty(&mask)) return -EINVAL; for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) { vector = IA64_FIRST_DEVICE_VECTOR + pos; cpumask_and(&mask, &domain, &vector_table[vector]); if (!cpumask_empty(&mask)) continue; return vector; } return -ENOSPC; } static int __bind_irq_vector(int irq, int vector, cpumask_t domain) { cpumask_t mask; int cpu; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON((unsigned)vector >= IA64_NUM_VECTORS); cpumask_and(&mask, &domain, cpu_online_mask); if (cpumask_empty(&mask)) return -EINVAL; if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain)) return 0; if (cfg->vector != IRQ_VECTOR_UNASSIGNED) return -EBUSY; for_each_cpu(cpu, &mask) per_cpu(vector_irq, cpu)[vector] = irq; cfg->vector = vector; cfg->domain = domain; irq_status[irq] = IRQ_USED; cpumask_or(&vector_table[vector], &vector_table[vector], &domain); return 0; } int bind_irq_vector(int irq, int vector, cpumask_t domain) { unsigned long flags; int ret; spin_lock_irqsave(&vector_lock, flags); ret = __bind_irq_vector(irq, vector, domain); spin_unlock_irqrestore(&vector_lock, flags); return ret; } static void __clear_irq_vector(int irq) { int vector, cpu; cpumask_t domain; struct irq_cfg *cfg = &irq_cfg[irq]; BUG_ON((unsigned)irq >= NR_IRQS); BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED); vector = cfg->vector; domain = cfg->domain; for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; irq_status[irq] = IRQ_UNUSED; cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain); } static void clear_irq_vector(int irq) { unsigned long flags; spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); spin_unlock_irqrestore(&vector_lock, flags); } int ia64_native_assign_irq_vector (int irq) { unsigned long flags; int vector, cpu; cpumask_t domain = CPU_MASK_NONE; vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector >= 0) break; } if (vector < 0) goto out; if (irq == AUTO_ASSIGN) irq = vector; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); return vector; } void ia64_native_free_irq_vector (int vector) { if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return; clear_irq_vector(vector); } int reserve_irq_vector (int vector) { if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR) return -EINVAL; return !!bind_irq_vector(vector, vector, CPU_MASK_ALL); } /* * Initialize vector_irq on a new cpu. This function must be called * with vector_lock held. */ void __setup_vector_irq(int cpu) { int irq, vector; /* Clear vector_irq */ for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) per_cpu(vector_irq, cpu)[vector] = -1; /* Mark the inuse vectors */ for (irq = 0; irq < NR_IRQS; ++irq) { if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain)) continue; vector = irq_to_vector(irq); per_cpu(vector_irq, cpu)[vector] = irq; } } #ifdef CONFIG_SMP static enum vector_domain_type { VECTOR_DOMAIN_NONE, VECTOR_DOMAIN_PERCPU } vector_domain_type = VECTOR_DOMAIN_NONE; static cpumask_t vector_allocation_domain(int cpu) { if (vector_domain_type == VECTOR_DOMAIN_PERCPU) return *cpumask_of(cpu); return CPU_MASK_ALL; } static int __irq_prepare_move(int irq, int cpu) { struct irq_cfg *cfg = &irq_cfg[irq]; int vector; cpumask_t domain; if (cfg->move_in_progress || cfg->move_cleanup_count) return -EBUSY; if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) return -EINVAL; if (cpumask_test_cpu(cpu, &cfg->domain)) return 0; domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector < 0) return -ENOSPC; cfg->move_in_progress = 1; cfg->old_domain = cfg->domain; cfg->vector = IRQ_VECTOR_UNASSIGNED; cfg->domain = CPU_MASK_NONE; BUG_ON(__bind_irq_vector(irq, vector, domain)); return 0; } int irq_prepare_move(int irq, int cpu) { unsigned long flags; int ret; spin_lock_irqsave(&vector_lock, flags); ret = __irq_prepare_move(irq, cpu); spin_unlock_irqrestore(&vector_lock, flags); return ret; } void irq_complete_move(unsigned irq) { struct irq_cfg *cfg = &irq_cfg[irq]; cpumask_t cleanup_mask; int i; if (likely(!cfg->move_in_progress)) return; if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain))) return; cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask); cfg->move_cleanup_count = cpumask_weight(&cleanup_mask); for_each_cpu(i, &cleanup_mask) ia64_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); cfg->move_in_progress = 0; } static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) { int me = smp_processor_id(); ia64_vector vector; unsigned long flags; for (vector = IA64_FIRST_DEVICE_VECTOR; vector < IA64_LAST_DEVICE_VECTOR; vector++) { int irq; struct irq_desc *desc; struct irq_cfg *cfg; irq = __this_cpu_read(vector_irq[vector]); if (irq < 0) continue; desc = irq_to_desc(irq); cfg = irq_cfg + irq; raw_spin_lock(&desc->lock); if (!cfg->move_cleanup_count) goto unlock; if (!cpumask_test_cpu(me, &cfg->old_domain)) goto unlock; spin_lock_irqsave(&vector_lock, flags); __this_cpu_write(vector_irq[vector], -1); cpumask_clear_cpu(me, &vector_table[vector]); spin_unlock_irqrestore(&vector_lock, flags); cfg->move_cleanup_count--; unlock: raw_spin_unlock(&desc->lock); } return IRQ_HANDLED; } static int __init parse_vector_domain(char *arg) { if (!arg) return -EINVAL; if (!strcmp(arg, "percpu")) { vector_domain_type = VECTOR_DOMAIN_PERCPU; no_int_routing = 1; } return 0; } early_param("vector", parse_vector_domain); #else static cpumask_t vector_allocation_domain(int cpu) { return CPU_MASK_ALL; } #endif void destroy_and_reserve_irq(unsigned int irq) { unsigned long flags; irq_init_desc(irq); spin_lock_irqsave(&vector_lock, flags); __clear_irq_vector(irq); irq_status[irq] = IRQ_RSVD; spin_unlock_irqrestore(&vector_lock, flags); } /* * Dynamic irq allocate and deallocation for MSI */ int create_irq(void) { unsigned long flags; int irq, vector, cpu; cpumask_t domain = CPU_MASK_NONE; irq = vector = -ENOSPC; spin_lock_irqsave(&vector_lock, flags); for_each_online_cpu(cpu) { domain = vector_allocation_domain(cpu); vector = find_unassigned_vector(domain); if (vector >= 0) break; } if (vector < 0) goto out; irq = find_unassigned_irq(); if (irq < 0) goto out; BUG_ON(__bind_irq_vector(irq, vector, domain)); out: spin_unlock_irqrestore(&vector_lock, flags); if (irq >= 0) irq_init_desc(irq); return irq; } void destroy_irq(unsigned int irq) { irq_init_desc(irq); clear_irq_vector(irq); } #ifdef CONFIG_SMP # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) # define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH) #else # define IS_RESCHEDULE(vec) (0) # define IS_LOCAL_TLB_FLUSH(vec) (0) #endif /* * That's where the IVT branches when we get an external * interrupt. This branches to the correct hardware IRQ handler via * function ptr. */ void ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned long saved_tpr; #if IRQ_DEBUG { unsigned long bsp, sp; /* * Note: if the interrupt happened while executing in * the context switch routine (ia64_switch_to), we may * get a spurious stack overflow here. This is * because the register and the memory stack are not * switched atomically. */ bsp = ia64_getreg(_IA64_REG_AR_BSP); sp = ia64_getreg(_IA64_REG_SP); if ((sp - bsp) < 1024) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); if (__ratelimit(&ratelimit)) { printk("ia64_handle_irq: DANGER: less than " "1KB of free stack space!!\n" "(bsp=0x%lx, sp=%lx)\n", bsp, sp); } } } #endif /* IRQ_DEBUG */ /* * Always set TPR to limit maximum interrupt nesting depth to * 16 (without this, it would be ~240, which could easily lead * to kernel stack overflows). */ irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irq_this_cpu(irq); } else if (unlikely(IS_RESCHEDULE(vector))) { scheduler_ipi(); kstat_incr_irq_this_cpu(irq); } else { ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d is not mapped " "to any IRQ!\n", __func__, vector, smp_processor_id()); } else generic_handle_irq(irq); /* * Disable interrupts and send EOI: */ local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } /* * This must be done *after* the ia64_eoi(). For example, the keyboard softirq * handler needs to be able to wait for further keyboard interrupts, which can't * come through until ia64_eoi() has been done. */ irq_exit(); set_irq_regs(old_regs); } #ifdef CONFIG_HOTPLUG_CPU /* * This function emulates a interrupt processing when a cpu is about to be * brought down. */ void ia64_process_pending_intr(void) { ia64_vector vector; unsigned long saved_tpr; extern unsigned int vectors_in_migration[NR_IRQS]; vector = ia64_get_ivr(); irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); /* * Perform normal interrupt style processing */ while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irq_this_cpu(irq); } else if (unlikely(IS_RESCHEDULE(vector))) { kstat_incr_irq_this_cpu(irq); } else { struct pt_regs *old_regs = set_irq_regs(NULL); ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); /* * Now try calling normal ia64_handle_irq as it would have got called * from a real intr handler. Try passing null for pt_regs, hopefully * it will work. I hope it works!. * Probably could shared code. */ if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d not being mapped " "to any IRQ!!\n", __func__, vector, smp_processor_id()); } else { vectors_in_migration[irq]=0; generic_handle_irq(irq); } set_irq_regs(old_regs); /* * Disable interrupts and send EOI */ local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } irq_exit(); } #endif #ifdef CONFIG_SMP static irqreturn_t dummy_handler (int irq, void *dev_id) { BUG(); return IRQ_NONE; } /* * KVM uses this interrupt to force a cpu out of guest mode */ #endif void register_percpu_irq(ia64_vector vec, irq_handler_t handler, unsigned long flags, const char *name) { unsigned int irq; irq = vec; BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL)); irq_set_status_flags(irq, IRQ_PER_CPU); irq_set_chip(irq, &irq_type_ia64_lsapic); if (handler) if (request_irq(irq, handler, flags, name, NULL)) pr_err("Failed to request irq %u (%s)\n", irq, name); irq_set_handler(irq, handle_percpu_irq); } void __init ia64_native_register_ipi(void) { #ifdef CONFIG_SMP register_percpu_irq(IA64_IPI_VECTOR, handle_IPI, 0, "IPI"); register_percpu_irq(IA64_IPI_RESCHEDULE, dummy_handler, 0, "resched"); register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, dummy_handler, 0, "tlb_flush"); #endif } void __init init_IRQ (void) { acpi_boot_init(); ia64_register_ipi(); register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL, 0, NULL); #ifdef CONFIG_SMP if (vector_domain_type != VECTOR_DOMAIN_NONE) { register_percpu_irq(IA64_IRQ_MOVE_VECTOR, smp_irq_move_cleanup_interrupt, 0, "irq_move"); } #endif } void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) { void __iomem *ipi_addr; unsigned long ipi_data; unsigned long phys_cpu_id; phys_cpu_id = cpu_physical_id(cpu); /* * cpu number is in 8bit ID and 8bit EID */ ipi_data = (delivery_mode << 8) | (vector & 0xff); ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3)); writeq(ipi_data, ipi_addr); }
linux-master
arch/ia64/kernel/irq_ia64.c
// SPDX-License-Identifier: GPL-2.0-only /* * File: mca_drv.c * Purpose: Generic MCA handling layer * * Copyright (C) 2004 FUJITSU LIMITED * Copyright (C) 2004 Hidetoshi Seto <[email protected]> * Copyright (C) 2005 Silicon Graphics, Inc * Copyright (C) 2005 Keith Owens <[email protected]> * Copyright (C) 2006 Russ Anderson <[email protected]> */ #include <linux/types.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kallsyms.h> #include <linux/memblock.h> #include <linux/acpi.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/smp.h> #include <linux/workqueue.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/delay.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/sal.h> #include <asm/mca.h> #include <asm/irq.h> #include <asm/hw_irq.h> #include "mca_drv.h" /* max size of SAL error record (default) */ static int sal_rec_max = 10000; /* from mca_drv_asm.S */ extern void *mca_handler_bhhook(void); static DEFINE_SPINLOCK(mca_bh_lock); typedef enum { MCA_IS_LOCAL = 0, MCA_IS_GLOBAL = 1 } mca_type_t; #define MAX_PAGE_ISOLATE 1024 static struct page *page_isolate[MAX_PAGE_ISOLATE]; static int num_page_isolate = 0; typedef enum { ISOLATE_NG, ISOLATE_OK, ISOLATE_NONE } isolate_status_t; typedef enum { MCA_NOT_RECOVERED = 0, MCA_RECOVERED = 1 } recovery_status_t; /* * This pool keeps pointers to the section part of SAL error record */ static struct { slidx_list_t *buffer; /* section pointer list pool */ int cur_idx; /* Current index of section pointer list pool */ int max_idx; /* Maximum index of section pointer list pool */ } slidx_pool; static int fatal_mca(const char *fmt, ...) { va_list args; char buf[256]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); ia64_mca_printk(KERN_ALERT "MCA: %s\n", buf); return MCA_NOT_RECOVERED; } static int mca_recovered(const char *fmt, ...) { va_list args; char buf[256]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); ia64_mca_printk(KERN_INFO "MCA: %s\n", buf); return MCA_RECOVERED; } /** * mca_page_isolate - isolate a poisoned page in order not to use it later * @paddr: poisoned memory location * * Return value: * one of isolate_status_t, ISOLATE_OK/NG/NONE. */ static isolate_status_t mca_page_isolate(unsigned long paddr) { int i; struct page *p; /* whether physical address is valid or not */ if (!ia64_phys_addr_valid(paddr)) return ISOLATE_NONE; if (!pfn_valid(paddr >> PAGE_SHIFT)) return ISOLATE_NONE; /* convert physical address to physical page number */ p = pfn_to_page(paddr>>PAGE_SHIFT); /* check whether a page number have been already registered or not */ for (i = 0; i < num_page_isolate; i++) if (page_isolate[i] == p) return ISOLATE_OK; /* already listed */ /* limitation check */ if (num_page_isolate == MAX_PAGE_ISOLATE) return ISOLATE_NG; /* kick pages having attribute 'SLAB' or 'Reserved' */ if (PageSlab(p) || PageReserved(p)) return ISOLATE_NG; /* add attribute 'Reserved' and register the page */ get_page(p); SetPageReserved(p); page_isolate[num_page_isolate++] = p; return ISOLATE_OK; } /** * mca_hanlder_bh - Kill the process which occurred memory read error * @paddr: poisoned address received from MCA Handler */ void mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr) { ia64_mlogbuf_dump(); printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, " "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n", raw_smp_processor_id(), current->pid, from_kuid(&init_user_ns, current_uid()), iip, ipsr, paddr, current->comm); spin_lock(&mca_bh_lock); switch (mca_page_isolate(paddr)) { case ISOLATE_OK: printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr); break; case ISOLATE_NG: printk(KERN_CRIT "Page isolation: ( %lx ) failure.\n", paddr); break; default: break; } spin_unlock(&mca_bh_lock); /* This process is about to be killed itself */ make_task_dead(SIGKILL); } /** * mca_make_peidx - Make index of processor error section * @slpi: pointer to record of processor error section * @peidx: pointer to index of processor error section */ static void mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) { /* * calculate the start address of * "struct cpuid_info" and "sal_processor_static_info_t". */ u64 total_check_num = slpi->valid.num_cache_check + slpi->valid.num_tlb_check + slpi->valid.num_bus_check + slpi->valid.num_reg_file_check + slpi->valid.num_ms_check; u64 head_size = sizeof(sal_log_mod_error_info_t) * total_check_num + sizeof(sal_log_processor_info_t); u64 mid_size = slpi->valid.cpuid_info * sizeof(struct sal_cpuid_info); peidx_head(peidx) = slpi; peidx_mid(peidx) = (struct sal_cpuid_info *) (slpi->valid.cpuid_info ? ((char*)slpi + head_size) : NULL); peidx_bottom(peidx) = (sal_processor_static_info_t *) (slpi->valid.psi_static_struct ? ((char*)slpi + head_size + mid_size) : NULL); } /** * mca_make_slidx - Make index of SAL error record * @buffer: pointer to SAL error record * @slidx: pointer to index of SAL error record * * Return value: * 1 if record has platform error / 0 if not */ #define LOG_INDEX_ADD_SECT_PTR(sect, ptr) \ {slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \ hl->hdr = ptr; \ list_add(&hl->list, &(sect)); \ slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; } static int mca_make_slidx(void *buffer, slidx_table_t *slidx) { int platform_err = 0; int record_len = ((sal_log_record_header_t*)buffer)->len; u32 ercd_pos; int sects; sal_log_section_hdr_t *sp; /* * Initialize index referring current record */ INIT_LIST_HEAD(&(slidx->proc_err)); INIT_LIST_HEAD(&(slidx->mem_dev_err)); INIT_LIST_HEAD(&(slidx->sel_dev_err)); INIT_LIST_HEAD(&(slidx->pci_bus_err)); INIT_LIST_HEAD(&(slidx->smbios_dev_err)); INIT_LIST_HEAD(&(slidx->pci_comp_err)); INIT_LIST_HEAD(&(slidx->plat_specific_err)); INIT_LIST_HEAD(&(slidx->host_ctlr_err)); INIT_LIST_HEAD(&(slidx->plat_bus_err)); INIT_LIST_HEAD(&(slidx->unsupported)); /* * Extract a Record Header */ slidx->header = buffer; /* * Extract each section records * (arranged from "int ia64_log_platform_info_print()") */ for (ercd_pos = sizeof(sal_log_record_header_t), sects = 0; ercd_pos < record_len; ercd_pos += sp->len, sects++) { sp = (sal_log_section_hdr_t *)((char*)buffer + ercd_pos); if (!efi_guidcmp(sp->guid, SAL_PROC_DEV_ERR_SECT_GUID)) { LOG_INDEX_ADD_SECT_PTR(slidx->proc_err, sp); } else if (!efi_guidcmp(sp->guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->mem_dev_err, sp); } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->sel_dev_err, sp); } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->pci_bus_err, sp); } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->smbios_dev_err, sp); } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->pci_comp_err, sp); } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->plat_specific_err, sp); } else if (!efi_guidcmp(sp->guid, SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->host_ctlr_err, sp); } else if (!efi_guidcmp(sp->guid, SAL_PLAT_BUS_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->plat_bus_err, sp); } else { LOG_INDEX_ADD_SECT_PTR(slidx->unsupported, sp); } } slidx->n_sections = sects; return platform_err; } /** * init_record_index_pools - Initialize pool of lists for SAL record index * * Return value: * 0 on Success / -ENOMEM on Failure */ static int init_record_index_pools(void) { int i; int rec_max_size; /* Maximum size of SAL error records */ int sect_min_size; /* Minimum size of SAL error sections */ /* minimum size table of each section */ static int sal_log_sect_min_sizes[] = { sizeof(sal_log_processor_info_t) + sizeof(sal_processor_static_info_t), sizeof(sal_log_mem_dev_err_info_t), sizeof(sal_log_sel_dev_err_info_t), sizeof(sal_log_pci_bus_err_info_t), sizeof(sal_log_smbios_dev_err_info_t), sizeof(sal_log_pci_comp_err_info_t), sizeof(sal_log_plat_specific_err_info_t), sizeof(sal_log_host_ctlr_err_info_t), sizeof(sal_log_plat_bus_err_info_t), }; /* * MCA handler cannot allocate new memory on flight, * so we preallocate enough memory to handle a SAL record. * * Initialize a handling set of slidx_pool: * 1. Pick up the max size of SAL error records * 2. Pick up the min size of SAL error sections * 3. Allocate the pool as enough to 2 SAL records * (now we can estimate the maxinum of section in a record.) */ /* - 1 - */ rec_max_size = sal_rec_max; /* - 2 - */ sect_min_size = sal_log_sect_min_sizes[0]; for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++) if (sect_min_size > sal_log_sect_min_sizes[i]) sect_min_size = sal_log_sect_min_sizes[i]; /* - 3 - */ slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; slidx_pool.buffer = kmalloc_array(slidx_pool.max_idx, sizeof(slidx_list_t), GFP_KERNEL); return slidx_pool.buffer ? 0 : -ENOMEM; } /***************************************************************************** * Recovery functions * *****************************************************************************/ /** * is_mca_global - Check whether this MCA is global or not * @peidx: pointer of index of processor error section * @pbci: pointer to pal_bus_check_info_t * @sos: pointer to hand off struct between SAL and OS * * Return value: * MCA_IS_LOCAL / MCA_IS_GLOBAL */ static mca_type_t is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); /* * PAL can request a rendezvous, if the MCA has a global scope. * If "rz_always" flag is set, SAL requests MCA rendezvous * in spite of global MCA. * Therefore it is local MCA when rendezvous has not been requested. * Failed to rendezvous, the system must be down. */ switch (sos->rv_rc) { case -1: /* SAL rendezvous unsuccessful */ return MCA_IS_GLOBAL; case 0: /* SAL rendezvous not required */ return MCA_IS_LOCAL; case 1: /* SAL rendezvous successful int */ case 2: /* SAL rendezvous successful int with init */ default: break; } /* * If One or more Cache/TLB/Reg_File/Uarch_Check is here, * it would be a local MCA. (i.e. processor internal error) */ if (psp->tc || psp->cc || psp->rc || psp->uc) return MCA_IS_LOCAL; /* * Bus_Check structure with Bus_Check.ib (internal bus error) flag set * would be a global MCA. (e.g. a system bus address parity error) */ if (!pbci || pbci->ib) return MCA_IS_GLOBAL; /* * Bus_Check structure with Bus_Check.eb (external bus error) flag set * could be either a local MCA or a global MCA. * * Referring Bus_Check.bsi: * 0: Unknown/unclassified * 1: BERR# * 2: BINIT# * 3: Hard Fail * (FIXME: Are these SGI specific or generic bsi values?) */ if (pbci->eb) switch (pbci->bsi) { case 0: /* e.g. a load from poisoned memory */ return MCA_IS_LOCAL; case 1: case 2: case 3: return MCA_IS_GLOBAL; } return MCA_IS_GLOBAL; } /** * get_target_identifier - Get the valid Cache or Bus check target identifier. * @peidx: pointer of index of processor error section * * Return value: * target address on Success / 0 on Failure */ static u64 get_target_identifier(peidx_table_t *peidx) { u64 target_address = 0; sal_log_mod_error_info_t *smei; pal_cache_check_info_t *pcci; int i, level = 9; /* * Look through the cache checks for a valid target identifier * If more than one valid target identifier, return the one * with the lowest cache level. */ for (i = 0; i < peidx_cache_check_num(peidx); i++) { smei = (sal_log_mod_error_info_t *)peidx_cache_check(peidx, i); if (smei->valid.target_identifier && smei->target_identifier) { pcci = (pal_cache_check_info_t *)&(smei->check_info); if (!target_address || (pcci->level < level)) { target_address = smei->target_identifier; level = pcci->level; continue; } } } if (target_address) return target_address; /* * Look at the bus check for a valid target identifier */ smei = peidx_bus_check(peidx, 0); if (smei && smei->valid.target_identifier) return smei->target_identifier; return 0; } /** * recover_from_read_error - Try to recover the errors which type are "read"s. * @slidx: pointer of index of SAL error record * @peidx: pointer of index of processor error section * @pbci: pointer of pal_bus_check_info * @sos: pointer to hand off struct between SAL and OS * * Return value: * 1 on Success / 0 on Failure */ static int recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { u64 target_identifier; struct pal_min_state_area *pmsa; struct ia64_psr *psr1, *psr2; ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook; /* Is target address valid? */ target_identifier = get_target_identifier(peidx); if (!target_identifier) return fatal_mca("target address not valid"); /* * cpu read or memory-mapped io read * * offending process affected process OS MCA do * kernel mode kernel mode down system * kernel mode user mode kill the process * user mode kernel mode down system (*) * user mode user mode kill the process * * (*) You could terminate offending user-mode process * if (pbci->pv && pbci->pl != 0) *and* if you sure * the process not have any locks of kernel. */ /* Is minstate valid? */ if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate)) return fatal_mca("minstate not valid"); psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr); psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr); /* * Check the privilege level of interrupted context. * If it is user-mode, then terminate affected process. */ pmsa = sos->pal_min_state; if (psr1->cpl != 0 || ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) { /* * setup for resume to bottom half of MCA, * "mca_handler_bhhook" */ /* pass to bhhook as argument (gr8, ...) */ pmsa->pmsa_gr[8-1] = target_identifier; pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip; pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr; /* set interrupted return address (but no use) */ pmsa->pmsa_br0 = pmsa->pmsa_iip; /* change resume address to bottom half */ pmsa->pmsa_iip = mca_hdlr_bh->fp; pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp; /* set cpl with kernel mode */ psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; psr2->cpl = 0; psr2->ri = 0; psr2->bn = 1; psr2->i = 0; return mca_recovered("user memory corruption. " "kill affected process - recovered."); } return fatal_mca("kernel context not recovered, iip 0x%lx\n", pmsa->pmsa_iip); } /** * recover_from_platform_error - Recover from platform error. * @slidx: pointer of index of SAL error record * @peidx: pointer of index of processor error section * @pbci: pointer of pal_bus_check_info * @sos: pointer to hand off struct between SAL and OS * * Return value: * 1 on Success / 0 on Failure */ static int recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { int status = 0; pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); if (psp->bc && pbci->eb && pbci->bsi == 0) { switch(pbci->type) { case 1: /* partial read */ case 3: /* full line(cpu) read */ case 9: /* I/O space read */ status = recover_from_read_error(slidx, peidx, pbci, sos); break; case 0: /* unknown */ case 2: /* partial write */ case 4: /* full line write */ case 5: /* implicit or explicit write-back operation */ case 6: /* snoop probe */ case 7: /* incoming or outgoing ptc.g */ case 8: /* write coalescing transactions */ case 10: /* I/O space write */ case 11: /* inter-processor interrupt message(IPI) */ case 12: /* interrupt acknowledge or external task priority cycle */ default: break; } } else if (psp->cc && !psp->bc) { /* Cache error */ status = recover_from_read_error(slidx, peidx, pbci, sos); } return status; } /* * recover_from_tlb_check * @peidx: pointer of index of processor error section * * Return value: * 1 on Success / 0 on Failure */ static int recover_from_tlb_check(peidx_table_t *peidx) { sal_log_mod_error_info_t *smei; pal_tlb_check_info_t *ptci; smei = (sal_log_mod_error_info_t *)peidx_tlb_check(peidx, 0); ptci = (pal_tlb_check_info_t *)&(smei->check_info); /* * Look for signature of a duplicate TLB DTC entry, which is * a SW bug and always fatal. */ if (ptci->op == PAL_TLB_CHECK_OP_PURGE && !(ptci->itr || ptci->dtc || ptci->itc)) return fatal_mca("Duplicate TLB entry"); return mca_recovered("TLB check recovered"); } /** * recover_from_processor_error * @platform: whether there are some platform error section or not * @slidx: pointer of index of SAL error record * @peidx: pointer of index of processor error section * @pbci: pointer of pal_bus_check_info * @sos: pointer to hand off struct between SAL and OS * * Return value: * 1 on Success / 0 on Failure */ static int recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); /* * Processor recovery status must key off of the PAL recovery * status in the Processor State Parameter. */ /* * The machine check is corrected. */ if (psp->cm == 1) return mca_recovered("machine check is already corrected."); /* * The error was not contained. Software must be reset. */ if (psp->us || psp->ci == 0) return fatal_mca("error not contained"); /* * Look for recoverable TLB check */ if (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) return recover_from_tlb_check(peidx); /* * The cache check and bus check bits have four possible states * cc bc * 1 1 Memory error, attempt recovery * 1 0 Cache error, attempt recovery * 0 1 I/O error, attempt recovery * 0 0 Other error type, not recovered */ if (psp->cc == 0 && (psp->bc == 0 || pbci == NULL)) return fatal_mca("No cache or bus check"); /* * Cannot handle more than one bus check. */ if (peidx_bus_check_num(peidx) > 1) return fatal_mca("Too many bus checks"); if (pbci->ib) return fatal_mca("Internal Bus error"); if (pbci->eb && pbci->bsi > 0) return fatal_mca("External bus check fatal status"); /* * This is a local MCA and estimated as a recoverable error. */ if (platform) return recover_from_platform_error(slidx, peidx, pbci, sos); /* * On account of strange SAL error record, we cannot recover. */ return fatal_mca("Strange SAL record"); } /** * mca_try_to_recover - Try to recover from MCA * @rec: pointer to a SAL error record * @sos: pointer to hand off struct between SAL and OS * * Return value: * 1 on Success / 0 on Failure */ static int mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos) { int platform_err; int n_proc_err; slidx_table_t slidx; peidx_table_t peidx; pal_bus_check_info_t pbci; /* Make index of SAL error record */ platform_err = mca_make_slidx(rec, &slidx); /* Count processor error sections */ n_proc_err = slidx_count(&slidx, proc_err); /* Now, OS can recover when there is one processor error section */ if (n_proc_err > 1) return fatal_mca("Too Many Errors"); else if (n_proc_err == 0) /* Weird SAL record ... We can't do anything */ return fatal_mca("Weird SAL record"); /* Make index of processor error section */ mca_make_peidx((sal_log_processor_info_t*) slidx_first_entry(&slidx.proc_err)->hdr, &peidx); /* Extract Processor BUS_CHECK[0] */ *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); /* Check whether MCA is global or not */ if (is_mca_global(&peidx, &pbci, sos)) return fatal_mca("global MCA"); /* Try to recover a processor error */ return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos); } /* * ============================================================================= */ int __init mca_external_handler_init(void) { if (init_record_index_pools()) return -ENOMEM; /* register external mca handlers */ if (ia64_reg_MCA_extension(mca_try_to_recover)) { printk(KERN_ERR "ia64_reg_MCA_extension failed.\n"); kfree(slidx_pool.buffer); return -EFAULT; } return 0; } void __exit mca_external_handler_exit(void) { /* unregister external mca handlers */ ia64_unreg_MCA_extension(); kfree(slidx_pool.buffer); } module_init(mca_external_handler_init); module_exit(mca_external_handler_exit); module_param(sal_rec_max, int, 0644); MODULE_PARM_DESC(sal_rec_max, "Max size of SAL error record"); MODULE_DESCRIPTION("ia64 platform dependent mca handler driver"); MODULE_LICENSE("GPL");
linux-master
arch/ia64/kernel/mca_drv.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * ia64 kernel NUMA specific stuff * * Copyright (C) 2002 Erich Focht <[email protected]> * Copyright (C) 2004 Silicon Graphics, Inc. * Jesse Barnes <[email protected]> */ #include <linux/topology.h> #include <linux/module.h> #include <asm/processor.h> #include <asm/smp.h> u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; EXPORT_SYMBOL(cpu_to_node_map); cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; EXPORT_SYMBOL(node_to_cpu_mask); void map_cpu_to_node(int cpu, int nid) { int oldnid; if (nid < 0) { /* just initialize by zero */ cpu_to_node_map[cpu] = 0; return; } /* sanity check first */ oldnid = cpu_to_node_map[cpu]; if (cpumask_test_cpu(cpu, &node_to_cpu_mask[oldnid])) { return; /* nothing to do */ } /* we don't have cpu-driven node hot add yet... In usual case, node is created from SRAT at boot time. */ if (!node_online(nid)) nid = first_online_node; cpu_to_node_map[cpu] = nid; cpumask_set_cpu(cpu, &node_to_cpu_mask[nid]); return; } void unmap_cpu_from_node(int cpu, int nid) { WARN_ON(!cpumask_test_cpu(cpu, &node_to_cpu_mask[nid])); WARN_ON(cpu_to_node_map[cpu] != nid); cpu_to_node_map[cpu] = 0; cpumask_clear_cpu(cpu, &node_to_cpu_mask[nid]); } /** * build_cpu_to_node_map - setup cpu to node and node to cpumask arrays * * Build cpu to node mapping and initialize the per node cpu masks using * info from the node_cpuid array handed to us by ACPI. */ void __init build_cpu_to_node_map(void) { int cpu, i, node; for(node=0; node < MAX_NUMNODES; node++) cpumask_clear(&node_to_cpu_mask[node]); for_each_possible_early_cpu(cpu) { node = NUMA_NO_NODE; for (i = 0; i < NR_CPUS; ++i) if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { node = node_cpuid[i].nid; break; } map_cpu_to_node(cpu, node); } }
linux-master
arch/ia64/kernel/numa.c
// SPDX-License-Identifier: GPL-2.0 /* * arch/ia64/kernel/stacktrace.c * * Stack trace management functions * */ #include <linux/sched.h> #include <linux/stacktrace.h> #include <linux/module.h> static void ia64_do_save_stack(struct unw_frame_info *info, void *arg) { struct stack_trace *trace = arg; unsigned long ip; int skip = trace->skip; trace->nr_entries = 0; do { unw_get_ip(info, &ip); if (ip == 0) break; if (skip == 0) { trace->entries[trace->nr_entries++] = ip; if (trace->nr_entries == trace->max_entries) break; } else skip--; } while (unw_unwind(info) >= 0); } /* * Save stack-backtrace addresses into a stack_trace buffer. */ void save_stack_trace(struct stack_trace *trace) { unw_init_running(ia64_do_save_stack, trace); } EXPORT_SYMBOL(save_stack_trace);
linux-master
arch/ia64/kernel/stacktrace.c
// SPDX-License-Identifier: GPL-2.0-only /* * palinfo.c * * Prints processor specific information reported by PAL. * This code is based on specification of PAL as of the * Intel IA-64 Architecture Software Developer's Manual v1.0. * * * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co * Stephane Eranian <[email protected]> * Copyright (C) 2004 Intel Corporation * Ashok Raj <[email protected]> * * 05/26/2000 S.Eranian initial release * 08/21/2000 S.Eranian updated to July 2000 PAL specs * 02/05/2001 S.Eranian fixed module support * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes * 03/24/2004 Ashok Raj updated to work with CPU Hotplug * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec */ #include <linux/types.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/efi.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/cpumask.h> #include <asm/pal.h> #include <asm/sal.h> #include <asm/page.h> #include <asm/processor.h> #include <linux/smp.h> MODULE_AUTHOR("Stephane Eranian <[email protected]>"); MODULE_DESCRIPTION("/proc interface to IA-64 PAL"); MODULE_LICENSE("GPL"); #define PALINFO_VERSION "0.5" typedef int (*palinfo_func_t)(struct seq_file *); typedef struct { const char *name; /* name of the proc entry */ palinfo_func_t proc_read; /* function to call for reading */ struct proc_dir_entry *entry; /* registered entry (removal) */ } palinfo_entry_t; /* * A bunch of string array to get pretty printing */ static const char *cache_types[] = { "", /* not used */ "Instruction", "Data", "Data/Instruction" /* unified */ }; static const char *cache_mattrib[]={ "WriteThrough", "WriteBack", "", /* reserved */ "" /* reserved */ }; static const char *cache_st_hints[]={ "Temporal, level 1", "Reserved", "Reserved", "Non-temporal, all levels", "Reserved", "Reserved", "Reserved", "Reserved" }; static const char *cache_ld_hints[]={ "Temporal, level 1", "Non-temporal, level 1", "Reserved", "Non-temporal, all levels", "Reserved", "Reserved", "Reserved", "Reserved" }; static const char *rse_hints[]={ "enforced lazy", "eager stores", "eager loads", "eager loads and stores" }; #define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints) static const char *mem_attrib[]={ "WB", /* 000 */ "SW", /* 001 */ "010", /* 010 */ "011", /* 011 */ "UC", /* 100 */ "UCE", /* 101 */ "WC", /* 110 */ "NaTPage" /* 111 */ }; /* * Take a 64bit vector and produces a string such that * if bit n is set then 2^n in clear text is generated. The adjustment * to the right unit is also done. * * Input: * - a pointer to a buffer to hold the string * - a 64-bit vector * Output: * - a pointer to the end of the buffer * */ static void bitvector_process(struct seq_file *m, u64 vector) { int i,j; static const char *units[]={ "", "K", "M", "G", "T" }; for (i=0, j=0; i < 64; i++ , j=i/10) { if (vector & 0x1) seq_printf(m, "%d%s ", 1 << (i-j*10), units[j]); vector >>= 1; } } /* * Take a 64bit vector and produces a string such that * if bit n is set then register n is present. The function * takes into account consecutive registers and prints out ranges. * * Input: * - a pointer to a buffer to hold the string * - a 64-bit vector * Ouput: * - a pointer to the end of the buffer * */ static void bitregister_process(struct seq_file *m, u64 *reg_info, int max) { int i, begin, skip = 0; u64 value = reg_info[0]; value >>= i = begin = ffs(value) - 1; for(; i < max; i++ ) { if (i != 0 && (i%64) == 0) value = *++reg_info; if ((value & 0x1) == 0 && skip == 0) { if (begin <= i - 2) seq_printf(m, "%d-%d ", begin, i-1); else seq_printf(m, "%d ", i-1); skip = 1; begin = -1; } else if ((value & 0x1) && skip == 1) { skip = 0; begin = i; } value >>=1; } if (begin > -1) { if (begin < 127) seq_printf(m, "%d-127", begin); else seq_puts(m, "127"); } } static int power_info(struct seq_file *m) { s64 status; u64 halt_info_buffer[8]; pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer; int i; status = ia64_pal_halt_info(halt_info); if (status != 0) return 0; for (i=0; i < 8 ; i++ ) { if (halt_info[i].pal_power_mgmt_info_s.im == 1) { seq_printf(m, "Power level %d:\n" "\tentry_latency : %d cycles\n" "\texit_latency : %d cycles\n" "\tpower consumption : %d mW\n" "\tCache+TLB coherency : %s\n", i, halt_info[i].pal_power_mgmt_info_s.entry_latency, halt_info[i].pal_power_mgmt_info_s.exit_latency, halt_info[i].pal_power_mgmt_info_s.power_consumption, halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No"); } else { seq_printf(m,"Power level %d: not implemented\n", i); } } return 0; } static int cache_info(struct seq_file *m) { unsigned long i, levels, unique_caches; pal_cache_config_info_t cci; int j, k; long status; if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) { printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status); return 0; } seq_printf(m, "Cache levels : %ld\nUnique caches : %ld\n\n", levels, unique_caches); for (i=0; i < levels; i++) { for (j=2; j >0 ; j--) { /* even without unification some level may not be present */ if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) continue; seq_printf(m, "%s Cache level %lu:\n" "\tSize : %u bytes\n" "\tAttributes : ", cache_types[j+cci.pcci_unified], i+1, cci.pcci_cache_size); if (cci.pcci_unified) seq_puts(m, "Unified "); seq_printf(m, "%s\n", cache_mattrib[cci.pcci_cache_attr]); seq_printf(m, "\tAssociativity : %d\n" "\tLine size : %d bytes\n" "\tStride : %d bytes\n", cci.pcci_assoc, 1<<cci.pcci_line_size, 1<<cci.pcci_stride); if (j == 1) seq_puts(m, "\tStore latency : N/A\n"); else seq_printf(m, "\tStore latency : %d cycle(s)\n", cci.pcci_st_latency); seq_printf(m, "\tLoad latency : %d cycle(s)\n" "\tStore hints : ", cci.pcci_ld_latency); for(k=0; k < 8; k++ ) { if ( cci.pcci_st_hints & 0x1) seq_printf(m, "[%s]", cache_st_hints[k]); cci.pcci_st_hints >>=1; } seq_puts(m, "\n\tLoad hints : "); for(k=0; k < 8; k++ ) { if (cci.pcci_ld_hints & 0x1) seq_printf(m, "[%s]", cache_ld_hints[k]); cci.pcci_ld_hints >>=1; } seq_printf(m, "\n\tAlias boundary : %d byte(s)\n" "\tTag LSB : %d\n" "\tTag MSB : %d\n", 1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb, cci.pcci_tag_msb); /* when unified, data(j=2) is enough */ if (cci.pcci_unified) break; } } return 0; } static int vm_info(struct seq_file *m) { u64 tr_pages =0, vw_pages=0, tc_pages; u64 attrib; pal_vm_info_1_u_t vm_info_1; pal_vm_info_2_u_t vm_info_2; pal_tc_info_u_t tc_info; ia64_ptce_info_t ptce; const char *sep; int i, j; long status; if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); } else { seq_printf(m, "Physical Address Space : %d bits\n" "Virtual Address Space : %d bits\n" "Protection Key Registers(PKR) : %d\n" "Implemented bits in PKR.key : %d\n" "Hash Tag ID : 0x%x\n" "Size of RR.rid : %d\n" "Max Purges : ", vm_info_1.pal_vm_info_1_s.phys_add_size, vm_info_2.pal_vm_info_2_s.impl_va_msb+1, vm_info_1.pal_vm_info_1_s.max_pkr+1, vm_info_1.pal_vm_info_1_s.key_size, vm_info_1.pal_vm_info_1_s.hash_tag_id, vm_info_2.pal_vm_info_2_s.rid_size); if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES) seq_puts(m, "unlimited\n"); else seq_printf(m, "%d\n", vm_info_2.pal_vm_info_2_s.max_purges ? vm_info_2.pal_vm_info_2_s.max_purges : 1); } if (ia64_pal_mem_attrib(&attrib) == 0) { seq_puts(m, "Supported memory attributes : "); sep = ""; for (i = 0; i < 8; i++) { if (attrib & (1 << i)) { seq_printf(m, "%s%s", sep, mem_attrib[i]); sep = ", "; } } seq_putc(m, '\n'); } if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) { printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status); } else { seq_printf(m, "\nTLB walker : %simplemented\n" "Number of DTR : %d\n" "Number of ITR : %d\n" "TLB insertable page sizes : ", vm_info_1.pal_vm_info_1_s.vw ? "" : "not ", vm_info_1.pal_vm_info_1_s.max_dtr_entry+1, vm_info_1.pal_vm_info_1_s.max_itr_entry+1); bitvector_process(m, tr_pages); seq_puts(m, "\nTLB purgeable page sizes : "); bitvector_process(m, vw_pages); } if ((status = ia64_get_ptce(&ptce)) != 0) { printk(KERN_ERR "ia64_get_ptce=%ld\n", status); } else { seq_printf(m, "\nPurge base address : 0x%016lx\n" "Purge outer loop count : %d\n" "Purge inner loop count : %d\n" "Purge outer loop stride : %d\n" "Purge inner loop stride : %d\n", ptce.base, ptce.count[0], ptce.count[1], ptce.stride[0], ptce.stride[1]); seq_printf(m, "TC Levels : %d\n" "Unique TC(s) : %d\n", vm_info_1.pal_vm_info_1_s.num_tc_levels, vm_info_1.pal_vm_info_1_s.max_unique_tcs); for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) { for (j=2; j>0 ; j--) { tc_pages = 0; /* just in case */ /* even without unification, some levels may not be present */ if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) continue; seq_printf(m, "\n%s Translation Cache Level %d:\n" "\tHash sets : %d\n" "\tAssociativity : %d\n" "\tNumber of entries : %d\n" "\tFlags : ", cache_types[j+tc_info.tc_unified], i+1, tc_info.tc_num_sets, tc_info.tc_associativity, tc_info.tc_num_entries); if (tc_info.tc_pf) seq_puts(m, "PreferredPageSizeOptimized "); if (tc_info.tc_unified) seq_puts(m, "Unified "); if (tc_info.tc_reduce_tr) seq_puts(m, "TCReduction"); seq_puts(m, "\n\tSupported page sizes: "); bitvector_process(m, tc_pages); /* when unified date (j=2) is enough */ if (tc_info.tc_unified) break; } } } seq_putc(m, '\n'); return 0; } static int register_info(struct seq_file *m) { u64 reg_info[2]; u64 info; unsigned long phys_stacked; pal_hints_u_t hints; unsigned long iregs, dregs; static const char * const info_type[] = { "Implemented AR(s)", "AR(s) with read side-effects", "Implemented CR(s)", "CR(s) with read side-effects", }; for(info=0; info < 4; info++) { if (ia64_pal_register_info(info, &reg_info[0], &reg_info[1]) != 0) return 0; seq_printf(m, "%-32s : ", info_type[info]); bitregister_process(m, reg_info, 128); seq_putc(m, '\n'); } if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) seq_printf(m, "RSE stacked physical registers : %ld\n" "RSE load/store hints : %ld (%s)\n", phys_stacked, hints.ph_data, hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); if (ia64_pal_debug_info(&iregs, &dregs)) return 0; seq_printf(m, "Instruction debug register pairs : %ld\n" "Data debug register pairs : %ld\n", iregs, dregs); return 0; } static const char *const proc_features_0[]={ /* Feature set 0 */ NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL, "Unimplemented instruction address fault", "INIT, PMI, and LINT pins", "Simple unimplemented instr addresses", "Variable P-state performance", "Virtual machine features implemented", "XIP,XPSR,XFS implemented", "XR1-XR3 implemented", "Disable dynamic predicate prediction", "Disable processor physical number", "Disable dynamic data cache prefetch", "Disable dynamic inst cache prefetch", "Disable dynamic branch prediction", NULL, NULL, NULL, NULL, "Disable P-states", "Enable MCA on Data Poisoning", "Enable vmsw instruction", "Enable extern environmental notification", "Disable BINIT on processor time-out", "Disable dynamic power management (DPM)", "Disable coherency", "Disable cache", "Enable CMCI promotion", "Enable MCA to BINIT promotion", "Enable MCA promotion", "Enable BERR promotion" }; static const char *const proc_features_16[]={ /* Feature set 16 */ "Disable ETM", "Enable ETM", "Enable MCA on half-way timer", "Enable snoop WC", NULL, "Enable Fast Deferral", "Disable MCA on memory aliasing", "Enable RSB", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "DP system processor", "Low Voltage", "HT supported", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }; static const char *const *const proc_features[]={ proc_features_0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, proc_features_16, NULL, NULL, NULL, NULL, }; static void feature_set_info(struct seq_file *m, u64 avail, u64 status, u64 control, unsigned long set) { const char *const *vf, *const *v; int i; vf = v = proc_features[set]; for(i=0; i < 64; i++, avail >>=1, status >>=1, control >>=1) { if (!(control)) /* No remaining bits set */ break; if (!(avail & 0x1)) /* Print only bits that are available */ continue; if (vf) v = vf + i; if ( v && *v ) { seq_printf(m, "%-40s : %s %s\n", *v, avail & 0x1 ? (status & 0x1 ? "On " : "Off"): "", avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): ""); } else { seq_printf(m, "Feature set %2ld bit %2d\t\t\t" " : %s %s\n", set, i, avail & 0x1 ? (status & 0x1 ? "On " : "Off"): "", avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): ""); } } } static int processor_info(struct seq_file *m) { u64 avail=1, status=1, control=1, feature_set=0; s64 ret; do { ret = ia64_pal_proc_get_features(&avail, &status, &control, feature_set); if (ret < 0) return 0; if (ret == 1) { feature_set++; continue; } feature_set_info(m, avail, status, control, feature_set); feature_set++; } while(1); return 0; } static const char *const bus_features[]={ NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL, NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL, "Request Bus Parking", "Bus Lock Mask", "Enable Half Transfer", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Enable Cache Line Repl. Shared", "Enable Cache Line Repl. Exclusive", "Disable Transaction Queuing", "Disable Response Error Checking", "Disable Bus Error Checking", "Disable Bus Requester Internal Error Signalling", "Disable Bus Requester Error Signalling", "Disable Bus Initialization Event Checking", "Disable Bus Initialization Event Signalling", "Disable Bus Address Error Checking", "Disable Bus Address Error Signalling", "Disable Bus Data Error Checking" }; static int bus_info(struct seq_file *m) { const char *const *v = bus_features; pal_bus_features_u_t av, st, ct; u64 avail, status, control; int i; s64 ret; if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) return 0; avail = av.pal_bus_features_val; status = st.pal_bus_features_val; control = ct.pal_bus_features_val; for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) { if ( ! *v ) continue; seq_printf(m, "%-48s : %s%s %s\n", *v, avail & 0x1 ? "" : "NotImpl", avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "", avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): ""); } return 0; } static int version_info(struct seq_file *m) { pal_version_u_t min_ver, cur_ver; if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0; seq_printf(m, "PAL_vendor : 0x%02x (min=0x%02x)\n" "PAL_A : %02x.%02x (min=%02x.%02x)\n" "PAL_B : %02x.%02x (min=%02x.%02x)\n", cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor, cur_ver.pal_version_s.pv_pal_a_model, cur_ver.pal_version_s.pv_pal_a_rev, min_ver.pal_version_s.pv_pal_a_model, min_ver.pal_version_s.pv_pal_a_rev, cur_ver.pal_version_s.pv_pal_b_model, cur_ver.pal_version_s.pv_pal_b_rev, min_ver.pal_version_s.pv_pal_b_model, min_ver.pal_version_s.pv_pal_b_rev); return 0; } static int frequency_info(struct seq_file *m) { struct pal_freq_ratio proc, itc, bus; unsigned long base; if (ia64_pal_freq_base(&base) == -1) seq_puts(m, "Output clock : not implemented\n"); else seq_printf(m, "Output clock : %ld ticks/s\n", base); if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0; seq_printf(m, "Processor/Clock ratio : %d/%d\n" "Bus/Clock ratio : %d/%d\n" "ITC/Clock ratio : %d/%d\n", proc.num, proc.den, bus.num, bus.den, itc.num, itc.den); return 0; } static int tr_info(struct seq_file *m) { long status; pal_tr_valid_u_t tr_valid; u64 tr_buffer[4]; pal_vm_info_1_u_t vm_info_1; pal_vm_info_2_u_t vm_info_2; unsigned long i, j; unsigned long max[3], pgm; struct ifa_reg { unsigned long valid:1; unsigned long ig:11; unsigned long vpn:52; } *ifa_reg; struct itir_reg { unsigned long rv1:2; unsigned long ps:6; unsigned long key:24; unsigned long rv2:32; } *itir_reg; struct gr_reg { unsigned long p:1; unsigned long rv1:1; unsigned long ma:3; unsigned long a:1; unsigned long d:1; unsigned long pl:2; unsigned long ar:3; unsigned long ppn:38; unsigned long rv2:2; unsigned long ed:1; unsigned long ig:11; } *gr_reg; struct rid_reg { unsigned long ig1:1; unsigned long rv1:1; unsigned long ig2:6; unsigned long rid:24; unsigned long rv2:32; } *rid_reg; if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) { printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); return 0; } max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1; max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1; for (i=0; i < 2; i++ ) { for (j=0; j < max[i]; j++) { status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid); if (status != 0) { printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n", i, j, status); continue; } ifa_reg = (struct ifa_reg *)&tr_buffer[2]; if (ifa_reg->valid == 0) continue; gr_reg = (struct gr_reg *)tr_buffer; itir_reg = (struct itir_reg *)&tr_buffer[1]; rid_reg = (struct rid_reg *)&tr_buffer[3]; pgm = -1 << (itir_reg->ps - 12); seq_printf(m, "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n" "\tppn : 0x%lx\n" "\tvpn : 0x%lx\n" "\tps : ", "ID"[i], j, tr_valid.pal_tr_valid_s.access_rights_valid, tr_valid.pal_tr_valid_s.priv_level_valid, tr_valid.pal_tr_valid_s.dirty_bit_valid, tr_valid.pal_tr_valid_s.mem_attr_valid, (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12); bitvector_process(m, 1<< itir_reg->ps); seq_printf(m, "\n\tpl : %d\n" "\tar : %d\n" "\trid : %x\n" "\tp : %d\n" "\tma : %d\n" "\td : %d\n", gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma, gr_reg->d); } } return 0; } /* * List {name,function} pairs for every entry in /proc/palinfo/cpu* */ static const palinfo_entry_t palinfo_entries[]={ { "version_info", version_info, }, { "vm_info", vm_info, }, { "cache_info", cache_info, }, { "power_info", power_info, }, { "register_info", register_info, }, { "processor_info", processor_info, }, { "frequency_info", frequency_info, }, { "bus_info", bus_info }, { "tr_info", tr_info, } }; #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) static struct proc_dir_entry *palinfo_dir; /* * This data structure is used to pass which cpu,function is being requested * It must fit in a 64bit quantity to be passed to the proc callback routine * * In SMP mode, when we get a request for another CPU, we must call that * other CPU using IPI and wait for the result before returning. */ typedef union { u64 value; struct { unsigned req_cpu: 32; /* for which CPU this info is */ unsigned func_id: 32; /* which function is requested */ } pal_func_cpu; } pal_func_cpu_u_t; #define req_cpu pal_func_cpu.req_cpu #define func_id pal_func_cpu.func_id #ifdef CONFIG_SMP /* * used to hold information about final function to call */ typedef struct { palinfo_func_t func; /* pointer to function to call */ struct seq_file *m; /* buffer to store results */ int ret; /* return value from call */ } palinfo_smp_data_t; /* * this function does the actual final call and he called * from the smp code, i.e., this is the palinfo callback routine */ static void palinfo_smp_call(void *info) { palinfo_smp_data_t *data = (palinfo_smp_data_t *)info; data->ret = (*data->func)(data->m); } /* * function called to trigger the IPI, we need to access a remote CPU * Return: * 0 : error or nothing to output * otherwise how many bytes in the "page" buffer were written */ static int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f) { palinfo_smp_data_t ptr; int ret; ptr.func = palinfo_entries[f->func_id].proc_read; ptr.m = m; ptr.ret = 0; /* just in case */ /* will send IPI to other CPU and wait for completion of remote call */ if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) { printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); return 0; } return ptr.ret; } #else /* ! CONFIG_SMP */ static int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f) { printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n"); return 0; } #endif /* CONFIG_SMP */ /* * Entry point routine: all calls go through this function */ static int proc_palinfo_show(struct seq_file *m, void *v) { pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&m->private; /* * in SMP mode, we may need to call another CPU to get correct * information. PAL, by definition, is processor specific */ if (f->req_cpu == get_cpu()) (*palinfo_entries[f->func_id].proc_read)(m); else palinfo_handle_smp(m, f); put_cpu(); return 0; } static int palinfo_add_proc(unsigned int cpu) { pal_func_cpu_u_t f; struct proc_dir_entry *cpu_dir; int j; char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ sprintf(cpustr, "cpu%d", cpu); cpu_dir = proc_mkdir(cpustr, palinfo_dir); if (!cpu_dir) return -EINVAL; f.req_cpu = cpu; for (j=0; j < NR_PALINFO_ENTRIES; j++) { f.func_id = j; proc_create_single_data(palinfo_entries[j].name, 0, cpu_dir, proc_palinfo_show, (void *)f.value); } return 0; } static int palinfo_del_proc(unsigned int hcpu) { char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ sprintf(cpustr, "cpu%d", hcpu); remove_proc_subtree(cpustr, palinfo_dir); return 0; } static enum cpuhp_state hp_online; static int __init palinfo_init(void) { int i = 0; printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); palinfo_dir = proc_mkdir("pal", NULL); if (!palinfo_dir) return -ENOMEM; i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/palinfo:online", palinfo_add_proc, palinfo_del_proc); if (i < 0) { remove_proc_subtree("pal", NULL); return i; } hp_online = i; return 0; } static void __exit palinfo_exit(void) { cpuhp_remove_state(hp_online); remove_proc_subtree("pal", NULL); } module_init(palinfo_init); module_exit(palinfo_exit);
linux-master
arch/ia64/kernel/palinfo.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2001-2008 Silicon Graphics, Inc. All rights reserved. * * A simple uncached page allocator using the generic allocator. This * allocator first utilizes the spare (spill) pages found in the EFI * memmap and will then start converting cached pages to uncached ones * at a granule at a time. Node awareness is implemented by having a * pool of pages per node. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/efi.h> #include <linux/nmi.h> #include <linux/genalloc.h> #include <linux/gfp.h> #include <linux/pgtable.h> #include <asm/efi.h> #include <asm/page.h> #include <asm/pal.h> #include <linux/atomic.h> #include <asm/tlbflush.h> struct uncached_pool { struct gen_pool *pool; struct mutex add_chunk_mutex; /* serialize adding a converted chunk */ int nchunks_added; /* #of converted chunks added to pool */ atomic_t status; /* smp called function's return status*/ }; #define MAX_CONVERTED_CHUNKS_PER_NODE 2 struct uncached_pool uncached_pools[MAX_NUMNODES]; static void uncached_ipi_visibility(void *data) { int status; struct uncached_pool *uc_pool = (struct uncached_pool *)data; status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); if ((status != PAL_VISIBILITY_OK) && (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) atomic_inc(&uc_pool->status); } static void uncached_ipi_mc_drain(void *data) { int status; struct uncached_pool *uc_pool = (struct uncached_pool *)data; status = ia64_pal_mc_drain(); if (status != PAL_STATUS_SUCCESS) atomic_inc(&uc_pool->status); } /* * Add a new chunk of uncached memory pages to the specified pool. * * @pool: pool to add new chunk of uncached memory to * @nid: node id of node to allocate memory from, or -1 * * This is accomplished by first allocating a granule of cached memory pages * and then converting them to uncached memory pages. */ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) { struct page *page; int status, i, nchunks_added = uc_pool->nchunks_added; unsigned long c_addr, uc_addr; if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0) return -1; /* interrupted by a signal */ if (uc_pool->nchunks_added > nchunks_added) { /* someone added a new chunk while we were waiting */ mutex_unlock(&uc_pool->add_chunk_mutex); return 0; } if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* attempt to allocate a granule's worth of cached memory pages */ page = __alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* convert the memory pages from cached to uncached */ c_addr = (unsigned long)page_address(page); uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET; /* * There's a small race here where it's possible for someone to * access the page through /dev/mem halfway through the conversion * to uncached - not sure it's really worth bothering about */ for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) SetPageUncached(&page[i]); flush_tlb_kernel_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { atomic_set(&uc_pool->status, 0); smp_call_function(uncached_ipi_visibility, uc_pool, 1); if (atomic_read(&uc_pool->status)) goto failed; } else if (status != PAL_VISIBILITY_OK) goto failed; preempt_disable(); flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE); /* flush the just introduced uncached translation from the TLB */ local_flush_tlb_all(); preempt_enable(); status = ia64_pal_mc_drain(); if (status != PAL_STATUS_SUCCESS) goto failed; atomic_set(&uc_pool->status, 0); smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); if (atomic_read(&uc_pool->status)) goto failed; /* * The chunk of memory pages has been converted to uncached so now we * can add it to the pool. */ status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid); if (status) goto failed; uc_pool->nchunks_added++; mutex_unlock(&uc_pool->add_chunk_mutex); return 0; /* failed to convert or add the chunk so give it back to the kernel */ failed: for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++) ClearPageUncached(&page[i]); free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); mutex_unlock(&uc_pool->add_chunk_mutex); return -1; } /* * uncached_alloc_page * * @starting_nid: node id of node to start with, or -1 * @n_pages: number of contiguous pages to allocate * * Allocate the specified number of contiguous uncached pages on the * requested node. If not enough contiguous uncached pages are available * on the requested node, roundrobin starting with the next higher node. */ unsigned long uncached_alloc_page(int starting_nid, int n_pages) { unsigned long uc_addr; struct uncached_pool *uc_pool; int nid; if (unlikely(starting_nid >= MAX_NUMNODES)) return 0; if (starting_nid < 0) starting_nid = numa_node_id(); nid = starting_nid; do { if (!node_state(nid, N_HIGH_MEMORY)) continue; uc_pool = &uncached_pools[nid]; if (uc_pool->pool == NULL) continue; do { uc_addr = gen_pool_alloc(uc_pool->pool, n_pages * PAGE_SIZE); if (uc_addr != 0) return uc_addr; } while (uncached_add_chunk(uc_pool, nid) == 0); } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); return 0; } EXPORT_SYMBOL(uncached_alloc_page); /* * uncached_free_page * * @uc_addr: uncached address of first page to free * @n_pages: number of contiguous pages to free * * Free the specified number of uncached pages. */ void uncached_free_page(unsigned long uc_addr, int n_pages) { int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); struct gen_pool *pool = uncached_pools[nid].pool; if (unlikely(pool == NULL)) return; if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET) panic("uncached_free_page invalid address %lx\n", uc_addr); gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE); } EXPORT_SYMBOL(uncached_free_page); /* * uncached_build_memmap, * * @uc_start: uncached starting address of a chunk of uncached memory * @uc_end: uncached ending address of a chunk of uncached memory * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc()) * * Called at boot time to build a map of pages that can be used for * memory special operations. */ static int __init uncached_build_memmap(u64 uc_start, u64 uc_end, void *arg) { int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); struct gen_pool *pool = uncached_pools[nid].pool; size_t size = uc_end - uc_start; touch_softlockup_watchdog(); if (pool != NULL) { memset((char *)uc_start, 0, size); (void) gen_pool_add(pool, uc_start, size, nid); } return 0; } static int __init uncached_init(void) { int nid; for_each_online_node(nid) { uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid); mutex_init(&uncached_pools[nid].add_chunk_mutex); } efi_memmap_walk_uc(uncached_build_memmap, NULL); return 0; } __initcall(uncached_init);
linux-master
arch/ia64/kernel/uncached.c
// SPDX-License-Identifier: GPL-2.0-only /* * Extensible SAL Interface (ESI) support routines. * * Copyright (C) 2006 Hewlett-Packard Co * Alex Williamson <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <asm/esi.h> #include <asm/sal.h> MODULE_AUTHOR("Alex Williamson <[email protected]>"); MODULE_DESCRIPTION("Extensible SAL Interface (ESI) support"); MODULE_LICENSE("GPL"); #define MODULE_NAME "esi" enum esi_systab_entry_type { ESI_DESC_ENTRY_POINT = 0 }; /* * Entry type: Size: * 0 48 */ #define ESI_DESC_SIZE(type) "\060"[(unsigned) (type)] typedef struct ia64_esi_desc_entry_point { u8 type; u8 reserved1[15]; u64 esi_proc; u64 gp; efi_guid_t guid; } ia64_esi_desc_entry_point_t; struct pdesc { void *addr; void *gp; }; static struct ia64_sal_systab *esi_systab; extern unsigned long esi_phys; static int __init esi_init (void) { struct ia64_sal_systab *systab; char *p; int i; if (esi_phys == EFI_INVALID_TABLE_ADDR) return -ENODEV; systab = __va(esi_phys); if (strncmp(systab->signature, "ESIT", 4) != 0) { printk(KERN_ERR "bad signature in ESI system table!"); return -ENODEV; } p = (char *) (systab + 1); for (i = 0; i < systab->entry_count; i++) { /* * The first byte of each entry type contains the type * descriptor. */ switch (*p) { case ESI_DESC_ENTRY_POINT: break; default: printk(KERN_WARNING "Unknown table type %d found in " "ESI table, ignoring rest of table\n", *p); return -ENODEV; } p += ESI_DESC_SIZE(*p); } esi_systab = systab; return 0; } int ia64_esi_call (efi_guid_t guid, struct ia64_sal_retval *isrvp, enum esi_proc_type proc_type, u64 func, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7) { struct ia64_fpreg fr[6]; unsigned long flags = 0; int i; char *p; if (!esi_systab) return -1; p = (char *) (esi_systab + 1); for (i = 0; i < esi_systab->entry_count; i++) { if (*p == ESI_DESC_ENTRY_POINT) { ia64_esi_desc_entry_point_t *esi = (void *)p; if (!efi_guidcmp(guid, esi->guid)) { ia64_sal_handler esi_proc; struct pdesc pdesc; pdesc.addr = __va(esi->esi_proc); pdesc.gp = __va(esi->gp); esi_proc = (ia64_sal_handler) &pdesc; ia64_save_scratch_fpregs(fr); if (proc_type == ESI_PROC_SERIALIZED) spin_lock_irqsave(&sal_lock, flags); else if (proc_type == ESI_PROC_MP_SAFE) local_irq_save(flags); else preempt_disable(); *isrvp = (*esi_proc)(func, arg1, arg2, arg3, arg4, arg5, arg6, arg7); if (proc_type == ESI_PROC_SERIALIZED) spin_unlock_irqrestore(&sal_lock, flags); else if (proc_type == ESI_PROC_MP_SAFE) local_irq_restore(flags); else preempt_enable(); ia64_load_scratch_fpregs(fr); return 0; } } p += ESI_DESC_SIZE(*p); } return -1; } EXPORT_SYMBOL_GPL(ia64_esi_call); int ia64_esi_call_phys (efi_guid_t guid, struct ia64_sal_retval *isrvp, u64 func, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7) { struct ia64_fpreg fr[6]; unsigned long flags; u64 esi_params[8]; char *p; int i; if (!esi_systab) return -1; p = (char *) (esi_systab + 1); for (i = 0; i < esi_systab->entry_count; i++) { if (*p == ESI_DESC_ENTRY_POINT) { ia64_esi_desc_entry_point_t *esi = (void *)p; if (!efi_guidcmp(guid, esi->guid)) { ia64_sal_handler esi_proc; struct pdesc pdesc; pdesc.addr = (void *)esi->esi_proc; pdesc.gp = (void *)esi->gp; esi_proc = (ia64_sal_handler) &pdesc; esi_params[0] = func; esi_params[1] = arg1; esi_params[2] = arg2; esi_params[3] = arg3; esi_params[4] = arg4; esi_params[5] = arg5; esi_params[6] = arg6; esi_params[7] = arg7; ia64_save_scratch_fpregs(fr); spin_lock_irqsave(&sal_lock, flags); *isrvp = esi_call_phys(esi_proc, esi_params); spin_unlock_irqrestore(&sal_lock, flags); ia64_load_scratch_fpregs(fr); return 0; } } p += ESI_DESC_SIZE(*p); } return -1; } EXPORT_SYMBOL_GPL(ia64_esi_call_phys); static void __exit esi_exit (void) { } module_init(esi_init); module_exit(esi_exit); /* makes module removable... */
linux-master
arch/ia64/kernel/esi.c
// SPDX-License-Identifier: GPL-2.0 /* * Architecture-specific signal handling support. * * Copyright (C) 1999-2004 Hewlett-Packard Co * David Mosberger-Tang <[email protected]> * * Derived from i386 and Alpha versions. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/unistd.h> #include <linux/wait.h> #include <asm/intrinsics.h> #include <linux/uaccess.h> #include <asm/rse.h> #include <asm/sigcontext.h> #include "sigframe.h" #define DEBUG_SIG 0 #define STACK_ALIGN 16 /* minimal alignment for stack pointer */ #if _NSIG_WORDS > 1 # define PUT_SIGSET(k,u) __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t)) # define GET_SIGSET(k,u) __copy_from_user((k)->sig, (u)->sig, sizeof(sigset_t)) #else # define PUT_SIGSET(k,u) __put_user((k)->sig[0], &(u)->sig[0]) # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) #endif static long restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) { unsigned long ip, flags, nat, um, cfm, rsc; long err; /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; /* restore scratch that always needs gets updated during signal delivery: */ err = __get_user(flags, &sc->sc_flags); err |= __get_user(nat, &sc->sc_nat); err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */ err |= __get_user(cfm, &sc->sc_cfm); err |= __get_user(um, &sc->sc_um); /* user mask */ err |= __get_user(rsc, &sc->sc_ar_rsc); err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat); err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */ err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */ err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */ err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */ err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */ scr->pt.cr_ifs = cfm | (1UL << 63); scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */ /* establish new instruction pointer: */ scr->pt.cr_iip = ip & ~0x3UL; ia64_psr(&scr->pt)->ri = ip & 0x3; scr->pt.cr_ipsr = (scr->pt.cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM); scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat); if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) { /* Restore most scratch-state only when not in syscall. */ err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ err |= __get_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */ err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */ err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */ err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */ } if ((flags & IA64_SC_FLAG_FPH_VALID) != 0) { struct ia64_psr *psr = ia64_psr(&scr->pt); err |= __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); psr->mfh = 0; /* drop signal handler's fph contents... */ preempt_disable(); if (psr->dfh) ia64_drop_fpu(current); else { /* We already own the local fph, otherwise psr->dfh wouldn't be 0. */ __ia64_load_fpu(current->thread.fph); ia64_set_local_fpu_owner(current); } preempt_enable(); } return err; } long ia64_rt_sigreturn (struct sigscratch *scr) { extern char ia64_strace_leave_kernel, ia64_leave_kernel; struct sigcontext __user *sc; sigset_t set; long retval; sc = &((struct sigframe __user *) (scr->pt.r12 + 16))->sc; /* * When we return to the previously executing context, r8 and r10 have already * been setup the way we want them. Indeed, if the signal wasn't delivered while * in a system call, we must not touch r8 or r10 as otherwise user-level state * could be corrupted. */ retval = (long) &ia64_leave_kernel; if (test_thread_flag(TIF_SYSCALL_TRACE) || test_thread_flag(TIF_SYSCALL_AUDIT)) /* * strace expects to be notified after sigreturn returns even though the * context to which we return may not be in the middle of a syscall. * Thus, the return-value that strace displays for sigreturn is * meaningless. */ retval = (long) &ia64_strace_leave_kernel; if (!access_ok(sc, sizeof(*sc))) goto give_sigsegv; if (GET_SIGSET(&set, &sc->sc_mask)) goto give_sigsegv; set_current_blocked(&set); if (restore_sigcontext(sc, scr)) goto give_sigsegv; #if DEBUG_SIG printk("SIG return (%s:%d): sp=%lx ip=%lx\n", current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip); #endif if (restore_altstack(&sc->sc_stack)) goto give_sigsegv; return retval; give_sigsegv: force_sig(SIGSEGV); return retval; } /* * This does just the minimum required setup of sigcontext. * Specifically, it only installs data that is either not knowable at * the user-level or that gets modified before execution in the * trampoline starts. Everything else is done at the user-level. */ static long setup_sigcontext (struct sigcontext __user *sc, sigset_t *mask, struct sigscratch *scr) { unsigned long flags = 0, ifs, cfm, nat; long err = 0; ifs = scr->pt.cr_ifs; if (on_sig_stack((unsigned long) sc)) flags |= IA64_SC_FLAG_ONSTACK; if ((ifs & (1UL << 63)) == 0) /* if cr_ifs doesn't have the valid bit set, we got here through a syscall */ flags |= IA64_SC_FLAG_IN_SYSCALL; cfm = ifs & ((1UL << 38) - 1); ia64_flush_fph(current); if ((current->thread.flags & IA64_THREAD_FPH_VALID)) { flags |= IA64_SC_FLAG_FPH_VALID; err = __copy_to_user(&sc->sc_fr[32], current->thread.fph, 96*16); } nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat); err |= __put_user(flags, &sc->sc_flags); err |= __put_user(nat, &sc->sc_nat); err |= PUT_SIGSET(mask, &sc->sc_mask); err |= __put_user(cfm, &sc->sc_cfm); err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um); err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc); err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */ err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */ err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */ err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */ err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */ err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */ err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */ err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip); if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) { /* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */ err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ err |= __put_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */ err |= __copy_to_user(&sc->sc_ar25, &scr->pt.ar_csd, 2*8); /* ar.csd & ar.ssd */ err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */ err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */ } return err; } /* * Check whether the register-backing store is already on the signal stack. */ static inline int rbs_on_sig_stack (unsigned long bsp) { return (bsp - current->sas_ss_sp < current->sas_ss_size); } static long setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr) { extern char __kernel_sigtramp[]; unsigned long tramp_addr, new_rbs = 0, new_sp; struct sigframe __user *frame; long err; new_sp = scr->pt.r12; tramp_addr = (unsigned long) __kernel_sigtramp; if (ksig->ka.sa.sa_flags & SA_ONSTACK) { int onstack = sas_ss_flags(new_sp); if (onstack == 0) { new_sp = current->sas_ss_sp + current->sas_ss_size; /* * We need to check for the register stack being on the * signal stack separately, because it's switched * separately (memory stack is switched in the kernel, * register stack is switched in the signal trampoline). */ if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) new_rbs = ALIGN(current->sas_ss_sp, sizeof(long)); } else if (onstack == SS_ONSTACK) { unsigned long check_sp; /* * If we are on the alternate signal stack and would * overflow it, don't. Return an always-bogus address * instead so we will die with SIGSEGV. */ check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN; if (!likely(on_sig_stack(check_sp))) { force_sigsegv(ksig->sig); return 1; } } } frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); if (!access_ok(frame, sizeof(*frame))) { force_sigsegv(ksig->sig); return 1; } err = __put_user(ksig->sig, &frame->arg0); err |= __put_user(&frame->info, &frame->arg1); err |= __put_user(&frame->sc, &frame->arg2); err |= __put_user(new_rbs, &frame->sc.sc_rbs_base); err |= __put_user(0, &frame->sc.sc_loadrs); /* initialize to zero */ err |= __put_user(ksig->ka.sa.sa_handler, &frame->handler); err |= copy_siginfo_to_user(&frame->info, &ksig->info); err |= __save_altstack(&frame->sc.sc_stack, scr->pt.r12); err |= setup_sigcontext(&frame->sc, set, scr); if (unlikely(err)) { force_sigsegv(ksig->sig); return 1; } scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */ scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ scr->pt.cr_iip = tramp_addr; ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */ ia64_psr(&scr->pt)->be = 0; /* force little-endian byte-order */ /* * Force the interruption function mask to zero. This has no effect when a * system-call got interrupted by a signal (since, in that case, scr->pt_cr_ifs is * ignored), but it has the desirable effect of making it possible to deliver a * signal with an incomplete register frame (which happens when a mandatory RSE * load faults). Furthermore, it has no negative effect on the getting the user's * dirty partition preserved, because that's governed by scr->pt.loadrs. */ scr->pt.cr_ifs = (1UL << 63); /* * Note: this affects only the NaT bits of the scratch regs (the ones saved in * pt_regs), which is exactly what we want. */ scr->scratch_unat = 0; /* ensure NaT bits of r12 is clear */ #if DEBUG_SIG printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%p\n", current->comm, current->pid, ksig->sig, scr->pt.r12, frame->sc.sc_ip, frame->handler); #endif return 0; } static long handle_signal (struct ksignal *ksig, struct sigscratch *scr) { int ret = setup_frame(ksig, sigmask_to_save(), scr); if (!ret) signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); return ret; } /* * Note that `init' is a special process: it doesn't get signals it doesn't want to * handle. Thus you cannot kill init even with a SIGKILL even by mistake. */ void ia64_do_signal (struct sigscratch *scr, long in_syscall) { long restart = in_syscall; long errno = scr->pt.r8; struct ksignal ksig; /* * This only loops in the rare cases of handle_signal() failing, in which case we * need to push through a forced SIGSEGV. */ while (1) { if (!get_signal(&ksig)) break; /* * get_signal() may have run a debugger (via notify_parent()) * and the debugger may have modified the state (e.g., to arrange for an * inferior call), thus it's important to check for restarting _after_ * get_signal(). */ if ((long) scr->pt.r10 != -1) /* * A system calls has to be restarted only if one of the error codes * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 * isn't -1 then r8 doesn't hold an error code and we don't need to * restart the syscall, so we can clear the "restart" flag here. */ restart = 0; if (ksig.sig <= 0) break; if (unlikely(restart)) { switch (errno) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: scr->pt.r8 = EINTR; /* note: scr->pt.r10 is already -1 */ break; case ERESTARTSYS: if ((ksig.ka.sa.sa_flags & SA_RESTART) == 0) { scr->pt.r8 = EINTR; /* note: scr->pt.r10 is already -1 */ break; } fallthrough; case ERESTARTNOINTR: ia64_decrement_ip(&scr->pt); restart = 0; /* don't restart twice if handle_signal() fails... */ } } /* * Whee! Actually deliver the signal. If the delivery failed, we need to * continue to iterate in this loop so we can deliver the SIGSEGV... */ if (handle_signal(&ksig, scr)) return; } /* Did we come from a system call? */ if (restart) { /* Restart the system call - no handlers present */ if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR || errno == ERESTART_RESTARTBLOCK) { /* * Note: the syscall number is in r15 which is saved in * pt_regs so all we need to do here is adjust ip so that * the "break" instruction gets re-executed. */ ia64_decrement_ip(&scr->pt); if (errno == ERESTART_RESTARTBLOCK) scr->pt.r15 = __NR_restart_syscall; } } /* if there's no signal to deliver, we just put the saved sigmask * back */ restore_saved_sigmask(); }
linux-master
arch/ia64/kernel/signal.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2000 Hewlett-Packard Co * Copyright (C) 2000 David Mosberger-Tang <[email protected]> * * Generic IA-64 unwind info decoder. * * This file is used both by the Linux kernel and objdump. Please keep * the two copies of this file in sync. * * You need to customize the decoder by defining the following * macros/constants before including this file: * * Types: * unw_word Unsigned integer type with at least 64 bits * * Register names: * UNW_REG_BSP * UNW_REG_BSPSTORE * UNW_REG_FPSR * UNW_REG_LC * UNW_REG_PFS * UNW_REG_PR * UNW_REG_RNAT * UNW_REG_PSP * UNW_REG_RP * UNW_REG_UNAT * * Decoder action macros: * UNW_DEC_BAD_CODE(code) * UNW_DEC_ABI(fmt,abi,context,arg) * UNW_DEC_BR_GR(fmt,brmask,gr,arg) * UNW_DEC_BR_MEM(fmt,brmask,arg) * UNW_DEC_COPY_STATE(fmt,label,arg) * UNW_DEC_EPILOGUE(fmt,t,ecount,arg) * UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg) * UNW_DEC_FR_MEM(fmt,frmask,arg) * UNW_DEC_GR_GR(fmt,grmask,gr,arg) * UNW_DEC_GR_MEM(fmt,grmask,arg) * UNW_DEC_LABEL_STATE(fmt,label,arg) * UNW_DEC_MEM_STACK_F(fmt,t,size,arg) * UNW_DEC_MEM_STACK_V(fmt,t,arg) * UNW_DEC_PRIUNAT_GR(fmt,r,arg) * UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) * UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) * UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg) * UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg) * UNW_DEC_PROLOGUE(fmt,body,rlen,arg) * UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg) * UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg) * UNW_DEC_REG_REG(fmt,src,dst,arg) * UNW_DEC_REG_SPREL(fmt,reg,spoff,arg) * UNW_DEC_REG_WHEN(fmt,reg,t,arg) * UNW_DEC_RESTORE(fmt,t,abreg,arg) * UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg) * UNW_DEC_SPILL_BASE(fmt,pspoff,arg) * UNW_DEC_SPILL_MASK(fmt,imaskp,arg) * UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg) * UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg) * UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg) * UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg) * UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg) * UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg) */ static unw_word unw_decode_uleb128 (unsigned char **dpp) { unsigned shift = 0; unw_word byte, result = 0; unsigned char *bp = *dpp; while (1) { byte = *bp++; result |= (byte & 0x7f) << shift; if ((byte & 0x80) == 0) break; shift += 7; } *dpp = bp; return result; } static unsigned char * unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, abreg; unw_word t, off; byte1 = *dp++; t = unw_decode_uleb128 (&dp); off = unw_decode_uleb128 (&dp); abreg = (byte1 & 0x7f); if (byte1 & 0x80) UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg); else UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg); return dp; } static unsigned char * unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, abreg, x, ytreg; unw_word t; byte1 = *dp++; byte2 = *dp++; t = unw_decode_uleb128 (&dp); abreg = (byte1 & 0x7f); ytreg = byte2; x = (byte1 >> 7) & 1; if ((byte1 & 0x80) == 0 && ytreg == 0) UNW_DEC_RESTORE(X2, t, abreg, arg); else UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg); return dp; } static unsigned char * unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, abreg, qp; unw_word t, off; byte1 = *dp++; byte2 = *dp++; t = unw_decode_uleb128 (&dp); off = unw_decode_uleb128 (&dp); qp = (byte1 & 0x3f); abreg = (byte2 & 0x7f); if (byte1 & 0x80) UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg); else UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg); return dp; } static unsigned char * unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg; unw_word t; byte1 = *dp++; byte2 = *dp++; byte3 = *dp++; t = unw_decode_uleb128 (&dp); qp = (byte1 & 0x3f); abreg = (byte2 & 0x7f); x = (byte2 >> 7) & 1; ytreg = byte3; if ((byte2 & 0x80) == 0 && byte3 == 0) UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg); else UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg); return dp; } static unsigned char * unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg) { int body = (code & 0x20) != 0; unw_word rlen; rlen = (code & 0x1f); UNW_DEC_PROLOGUE(R1, body, rlen, arg); return dp; } static unsigned char * unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, mask, grsave; unw_word rlen; byte1 = *dp++; mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1); grsave = (byte1 & 0x7f); rlen = unw_decode_uleb128 (&dp); UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg); return dp; } static unsigned char * unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg) { unw_word rlen; rlen = unw_decode_uleb128 (&dp); UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg); return dp; } static unsigned char * unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg) { unsigned char brmask = (code & 0x1f); UNW_DEC_BR_MEM(P1, brmask, arg); return dp; } static unsigned char * unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg) { if ((code & 0x10) == 0) { unsigned char byte1 = *dp++; UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1), (byte1 & 0x7f), arg); } else if ((code & 0x08) == 0) { unsigned char byte1 = *dp++, r, dst; r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1); dst = (byte1 & 0x7f); switch (r) { case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break; case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break; case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break; case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break; case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break; case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break; case 6: UNW_DEC_RP_BR(P3, dst, arg); break; case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break; case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break; case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break; case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break; case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } else if ((code & 0x7) == 0) UNW_DEC_SPILL_MASK(P4, dp, arg); else if ((code & 0x7) == 1) { unw_word grmask, frmask, byte1, byte2, byte3; byte1 = *dp++; byte2 = *dp++; byte3 = *dp++; grmask = ((byte1 >> 4) & 0xf); frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3; UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg); } else UNW_DEC_BAD_CODE(code); return dp; } static unsigned char * unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg) { int gregs = (code & 0x10) != 0; unsigned char mask = (code & 0x0f); if (gregs) UNW_DEC_GR_MEM(P6, mask, arg); else UNW_DEC_FR_MEM(P6, mask, arg); return dp; } static unsigned char * unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg) { unsigned char r, byte1, byte2; unw_word t, size; if ((code & 0x10) == 0) { r = (code & 0xf); t = unw_decode_uleb128 (&dp); switch (r) { case 0: size = unw_decode_uleb128 (&dp); UNW_DEC_MEM_STACK_F(P7, t, size, arg); break; case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break; case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break; case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break; case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break; case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break; case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break; case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break; case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break; case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break; case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break; case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break; case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break; case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break; case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break; case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } else { switch (code & 0xf) { case 0x0: /* p8 */ { r = *dp++; t = unw_decode_uleb128 (&dp); switch (r) { case 1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break; case 2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break; case 3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break; case 4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break; case 5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break; case 6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break; case 7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break; case 8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break; case 9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break; case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break; case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break; case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break; case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break; case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break; case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break; case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break; case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break; case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break; case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } break; case 0x1: byte1 = *dp++; byte2 = *dp++; UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg); break; case 0xf: /* p10 */ byte1 = *dp++; byte2 = *dp++; UNW_DEC_ABI(P10, byte1, byte2, arg); break; case 0x9: return unw_decode_x1 (dp, code, arg); case 0xa: return unw_decode_x2 (dp, code, arg); case 0xb: return unw_decode_x3 (dp, code, arg); case 0xc: return unw_decode_x4 (dp, code, arg); default: UNW_DEC_BAD_CODE(code); break; } } return dp; } static unsigned char * unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg) { unw_word label = (code & 0x1f); if ((code & 0x20) != 0) UNW_DEC_COPY_STATE(B1, label, arg); else UNW_DEC_LABEL_STATE(B1, label, arg); return dp; } static unsigned char * unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg) { unw_word t; t = unw_decode_uleb128 (&dp); UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg); return dp; } static unsigned char * unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg) { unw_word t, ecount, label; if ((code & 0x10) == 0) { t = unw_decode_uleb128 (&dp); ecount = unw_decode_uleb128 (&dp); UNW_DEC_EPILOGUE(B3, t, ecount, arg); } else if ((code & 0x07) == 0) { label = unw_decode_uleb128 (&dp); if ((code & 0x08) != 0) UNW_DEC_COPY_STATE(B4, label, arg); else UNW_DEC_LABEL_STATE(B4, label, arg); } else switch (code & 0x7) { case 1: return unw_decode_x1 (dp, code, arg); case 2: return unw_decode_x2 (dp, code, arg); case 3: return unw_decode_x3 (dp, code, arg); case 4: return unw_decode_x4 (dp, code, arg); default: UNW_DEC_BAD_CODE(code); break; } return dp; } typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *); static unw_decoder unw_decode_table[2][8] = { /* prologue table: */ { unw_decode_r1, /* 0 */ unw_decode_r1, unw_decode_r2, unw_decode_r3, unw_decode_p1, /* 4 */ unw_decode_p2_p5, unw_decode_p6, unw_decode_p7_p10 }, { unw_decode_r1, /* 0 */ unw_decode_r1, unw_decode_r2, unw_decode_r3, unw_decode_b1, /* 4 */ unw_decode_b1, unw_decode_b2, unw_decode_b3_x4 } }; /* * Decode one descriptor and return address of next descriptor. */ static inline unsigned char * unw_decode (unsigned char *dp, int inside_body, void *arg) { unw_decoder decoder; unsigned char code; code = *dp++; decoder = unw_decode_table[inside_body][code >> 5]; dp = (*decoder) (dp, code, arg); return dp; }
linux-master
arch/ia64/kernel/unwind_decoder.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Kernel Probes (KProbes) * arch/ia64/kernel/kprobes.c * * Copyright (C) IBM Corporation, 2002, 2004 * Copyright (C) Intel Corporation, 2005 * * 2005-Apr Rusty Lynch <[email protected]> and Anil S Keshavamurthy * <[email protected]> adapted from i386 */ #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/preempt.h> #include <linux/extable.h> #include <linux/kdebug.h> #include <linux/pgtable.h> #include <asm/sections.h> #include <asm/exception.h> DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; enum instruction_type {A, I, M, F, B, L, X, u}; static enum instruction_type bundle_encoding[32][3] = { [0x00] = { M, I, I }, [0x01] = { M, I, I }, [0x02] = { M, I, I }, [0x03] = { M, I, I }, [0x04] = { M, L, X }, [0x05] = { M, L, X }, [0x06] = { u, u, u }, [0x07] = { u, u, u }, [0x08] = { M, M, I }, [0x09] = { M, M, I }, [0x0A] = { M, M, I }, [0x0B] = { M, M, I }, [0x0C] = { M, F, I }, [0x0D] = { M, F, I }, [0x0E] = { M, M, F }, [0x0F] = { M, M, F }, [0x10] = { M, I, B }, [0x11] = { M, I, B }, [0x12] = { M, B, B }, [0x13] = { M, B, B }, [0x14] = { u, u, u }, [0x15] = { u, u, u }, [0x16] = { B, B, B }, [0x17] = { B, B, B }, [0x18] = { M, M, B }, [0x19] = { M, M, B }, [0x1A] = { u, u, u }, [0x1B] = { u, u, u }, [0x1C] = { M, F, B }, [0x1D] = { M, F, B }, [0x1E] = { u, u, u }, [0x1F] = { u, u, u }, }; /* Insert a long branch code */ static void __kprobes set_brl_inst(void *from, void *to) { s64 rel = ((s64) to - (s64) from) >> 4; bundle_t *brl; brl = (bundle_t *) ((u64) from & ~0xf); brl->quad0.template = 0x05; /* [MLX](stop) */ brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */ brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2; brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46); /* brl.cond.sptk.many.clr rel<<4 (qp=0) */ brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff); } /* * In this function we check to see if the instruction * is IP relative instruction and update the kprobe * inst flag accordingly */ static void __kprobes update_kprobe_inst_flag(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst, struct kprobe *p) { p->ainsn.inst_flag = 0; p->ainsn.target_br_reg = 0; p->ainsn.slot = slot; /* Check for Break instruction * Bits 37:40 Major opcode to be zero * Bits 27:32 X6 to be zero * Bits 32:35 X3 to be zero */ if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) { /* is a break instruction */ p->ainsn.inst_flag |= INST_FLAG_BREAK_INST; return; } if (bundle_encoding[template][slot] == B) { switch (major_opcode) { case INDIRECT_CALL_OPCODE: p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); break; case IP_RELATIVE_PREDICT_OPCODE: case IP_RELATIVE_BRANCH_OPCODE: p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; break; case IP_RELATIVE_CALL_OPCODE: p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); break; } } else if (bundle_encoding[template][slot] == X) { switch (major_opcode) { case LONG_CALL_OPCODE: p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); break; } } return; } /* * In this function we check to see if the instruction * (qp) cmpx.crel.ctype p1,p2=r2,r3 * on which we are inserting kprobe is cmp instruction * with ctype as unc. */ static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst) { cmp_inst_t cmp_inst; uint ctype_unc = 0; if (!((bundle_encoding[template][slot] == I) || (bundle_encoding[template][slot] == M))) goto out; if (!((major_opcode == 0xC) || (major_opcode == 0xD) || (major_opcode == 0xE))) goto out; cmp_inst.l = kprobe_inst; if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) { /* Integer compare - Register Register (A6 type)*/ if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) ctype_unc = 1; } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) { /* Integer compare - Immediate Register (A8 type)*/ if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) ctype_unc = 1; } out: return ctype_unc; } /* * In this function we check to see if the instruction * on which we are inserting kprobe is supported. * Returns qp value if supported * Returns -EINVAL if unsupported */ static int __kprobes unsupported_inst(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst, unsigned long addr) { int qp; qp = kprobe_inst & 0x3f; if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) { if (slot == 1 && qp) { printk(KERN_WARNING "Kprobes on cmp unc " "instruction on slot 1 at <0x%lx> " "is not supported\n", addr); return -EINVAL; } qp = 0; } else if (bundle_encoding[template][slot] == I) { if (major_opcode == 0) { /* * Check for Integer speculation instruction * - Bit 33-35 to be equal to 0x1 */ if (((kprobe_inst >> 33) & 0x7) == 1) { printk(KERN_WARNING "Kprobes on speculation inst at <0x%lx> not supported\n", addr); return -EINVAL; } /* * IP relative mov instruction * - Bit 27-35 to be equal to 0x30 */ if (((kprobe_inst >> 27) & 0x1FF) == 0x30) { printk(KERN_WARNING "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n", addr); return -EINVAL; } } else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) && (kprobe_inst & (0x1UL << 12))) { /* test bit instructions, tbit,tnat,tf * bit 33-36 to be equal to 0 * bit 12 to be equal to 1 */ if (slot == 1 && qp) { printk(KERN_WARNING "Kprobes on test bit " "instruction on slot at <0x%lx> " "is not supported\n", addr); return -EINVAL; } qp = 0; } } else if (bundle_encoding[template][slot] == B) { if (major_opcode == 7) { /* IP-Relative Predict major code is 7 */ printk(KERN_WARNING "Kprobes on IP-Relative" "Predict is not supported\n"); return -EINVAL; } else if (major_opcode == 2) { /* Indirect Predict, major code is 2 * bit 27-32 to be equal to 10 or 11 */ int x6=(kprobe_inst >> 27) & 0x3F; if ((x6 == 0x10) || (x6 == 0x11)) { printk(KERN_WARNING "Kprobes on " "Indirect Predict is not supported\n"); return -EINVAL; } } } /* kernel does not use float instruction, here for safety kprobe * will judge whether it is fcmp/flass/float approximation instruction */ else if (unlikely(bundle_encoding[template][slot] == F)) { if ((major_opcode == 4 || major_opcode == 5) && (kprobe_inst & (0x1 << 12))) { /* fcmp/fclass unc instruction */ if (slot == 1 && qp) { printk(KERN_WARNING "Kprobes on fcmp/fclass " "instruction on slot at <0x%lx> " "is not supported\n", addr); return -EINVAL; } qp = 0; } if ((major_opcode == 0 || major_opcode == 1) && (kprobe_inst & (0x1UL << 33))) { /* float Approximation instruction */ if (slot == 1 && qp) { printk(KERN_WARNING "Kprobes on float Approx " "instr at <0x%lx> is not supported\n", addr); return -EINVAL; } qp = 0; } } return qp; } /* * In this function we override the bundle with * the break instruction at the given slot. */ static void __kprobes prepare_break_inst(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst, struct kprobe *p, int qp) { unsigned long break_inst = BREAK_INST; bundle_t *bundle = &p->opcode.bundle; /* * Copy the original kprobe_inst qualifying predicate(qp) * to the break instruction */ break_inst |= qp; switch (slot) { case 0: bundle->quad0.slot0 = break_inst; break; case 1: bundle->quad0.slot1_p0 = break_inst; bundle->quad1.slot1_p1 = break_inst >> (64-46); break; case 2: bundle->quad1.slot2 = break_inst; break; } /* * Update the instruction flag, so that we can * emulate the instruction properly after we * single step on original instruction */ update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p); } static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot, unsigned long *kprobe_inst, uint *major_opcode) { unsigned long kprobe_inst_p0, kprobe_inst_p1; unsigned int template; template = bundle->quad0.template; switch (slot) { case 0: *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT); *kprobe_inst = bundle->quad0.slot0; break; case 1: *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); kprobe_inst_p0 = bundle->quad0.slot1_p0; kprobe_inst_p1 = bundle->quad1.slot1_p1; *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46)); break; case 2: *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT); *kprobe_inst = bundle->quad1.slot2; break; } } /* Returns non-zero if the addr is in the Interrupt Vector Table */ static int __kprobes in_ivt_functions(unsigned long addr) { return (addr >= (unsigned long)__start_ivt_text && addr < (unsigned long)__end_ivt_text); } static int __kprobes valid_kprobe_addr(int template, int slot, unsigned long addr) { if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { printk(KERN_WARNING "Attempting to insert unaligned kprobe " "at 0x%lx\n", addr); return -EINVAL; } if (in_ivt_functions(addr)) { printk(KERN_WARNING "Kprobes can't be inserted inside " "IVT functions at 0x%lx\n", addr); return -EINVAL; } return 0; } static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { unsigned int i; i = atomic_add_return(1, &kcb->prev_kprobe_index); kcb->prev_kprobe[i-1].kp = kprobe_running(); kcb->prev_kprobe[i-1].status = kcb->kprobe_status; } static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { unsigned int i; i = atomic_read(&kcb->prev_kprobe_index); __this_cpu_write(current_kprobe, kcb->prev_kprobe[i-1].kp); kcb->kprobe_status = kcb->prev_kprobe[i-1].status; atomic_sub(1, &kcb->prev_kprobe_index); } static void __kprobes set_current_kprobe(struct kprobe *p, struct kprobe_ctlblk *kcb) { __this_cpu_write(current_kprobe, p); } void __kretprobe_trampoline(void) { } int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) { regs->cr_iip = __kretprobe_trampoline_handler(regs, NULL); /* * By returning a non-zero value, we are telling * kprobe_handler() that we don't want the post_handler * to run (and have re-enabled preemption) */ return 1; } void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { ri->ret_addr = (kprobe_opcode_t *)regs->b0; ri->fp = NULL; /* Replace the return addr with trampoline addr */ regs->b0 = (unsigned long)dereference_function_descriptor(__kretprobe_trampoline); } /* Check the instruction in the slot is break */ static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot) { unsigned int major_opcode; unsigned int template = bundle->quad0.template; unsigned long kprobe_inst; /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ if (slot == 1 && bundle_encoding[template][1] == L) slot++; /* Get Kprobe probe instruction at given slot*/ get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); /* For break instruction, * Bits 37:40 Major opcode to be zero * Bits 27:32 X6 to be zero * Bits 32:35 X3 to be zero */ if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) { /* Not a break instruction */ return 0; } /* Is a break instruction */ return 1; } /* * In this function, we check whether the target bundle modifies IP or * it triggers an exception. If so, it cannot be boostable. */ static int __kprobes can_boost(bundle_t *bundle, uint slot, unsigned long bundle_addr) { unsigned int template = bundle->quad0.template; do { if (search_exception_tables(bundle_addr + slot) || __is_ia64_break_inst(bundle, slot)) return 0; /* exception may occur in this bundle*/ } while ((++slot) < 3); template &= 0x1e; if (template >= 0x10 /* including B unit */ || template == 0x04 /* including X unit */ || template == 0x06) /* undefined */ return 0; return 1; } /* Prepare long jump bundle and disables other boosters if need */ static void __kprobes prepare_booster(struct kprobe *p) { unsigned long addr = (unsigned long)p->addr & ~0xFULL; unsigned int slot = (unsigned long)p->addr & 0xf; struct kprobe *other_kp; if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) { set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1); p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE; } /* disables boosters in previous slots */ for (; addr < (unsigned long)p->addr; addr++) { other_kp = get_kprobe((void *)addr); if (other_kp) other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE; } } int __kprobes arch_prepare_kprobe(struct kprobe *p) { unsigned long addr = (unsigned long) p->addr; unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); unsigned long kprobe_inst=0; unsigned int slot = addr & 0xf, template, major_opcode = 0; bundle_t *bundle; int qp; bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; template = bundle->quad0.template; if(valid_kprobe_addr(template, slot, addr)) return -EINVAL; /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ if (slot == 1 && bundle_encoding[template][1] == L) slot++; /* Get kprobe_inst and major_opcode from the bundle */ get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr); if (qp < 0) return -EINVAL; p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) return -ENOMEM; memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); prepare_booster(p); return 0; } void __kprobes arch_arm_kprobe(struct kprobe *p) { unsigned long arm_addr; bundle_t *src, *dest; arm_addr = ((unsigned long)p->addr) & ~0xFUL; dest = &((kprobe_opcode_t *)arm_addr)->bundle; src = &p->opcode.bundle; flush_icache_range((unsigned long)p->ainsn.insn, (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t) * MAX_INSN_SIZE); switch (p->ainsn.slot) { case 0: dest->quad0.slot0 = src->quad0.slot0; break; case 1: dest->quad1.slot1_p1 = src->quad1.slot1_p1; break; case 2: dest->quad1.slot2 = src->quad1.slot2; break; } flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); } void __kprobes arch_disarm_kprobe(struct kprobe *p) { unsigned long arm_addr; bundle_t *src, *dest; arm_addr = ((unsigned long)p->addr) & ~0xFUL; dest = &((kprobe_opcode_t *)arm_addr)->bundle; /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ src = &p->ainsn.insn->bundle; switch (p->ainsn.slot) { case 0: dest->quad0.slot0 = src->quad0.slot0; break; case 1: dest->quad1.slot1_p1 = src->quad1.slot1_p1; break; case 2: dest->quad1.slot2 = src->quad1.slot2; break; } flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); } void __kprobes arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); p->ainsn.insn = NULL; } } /* * We are resuming execution after a single step fault, so the pt_regs * structure reflects the register state after we executed the instruction * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust * the ip to point back to the original stack address. To set the IP address * to original stack address, handle the case where we need to fixup the * relative IP address and/or fixup branch register. */ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) { unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle); unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; unsigned long template; int slot = ((unsigned long)p->addr & 0xf); template = p->ainsn.insn->bundle.quad0.template; if (slot == 1 && bundle_encoding[template][1] == L) slot = 2; if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) { if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { /* Fix relative IP address */ regs->cr_iip = (regs->cr_iip - bundle_addr) + resume_addr; } if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) { /* * Fix target branch register, software convention is * to use either b0 or b6 or b7, so just checking * only those registers */ switch (p->ainsn.target_br_reg) { case 0: if ((regs->b0 == bundle_addr) || (regs->b0 == bundle_addr + 0x10)) { regs->b0 = (regs->b0 - bundle_addr) + resume_addr; } break; case 6: if ((regs->b6 == bundle_addr) || (regs->b6 == bundle_addr + 0x10)) { regs->b6 = (regs->b6 - bundle_addr) + resume_addr; } break; case 7: if ((regs->b7 == bundle_addr) || (regs->b7 == bundle_addr + 0x10)) { regs->b7 = (regs->b7 - bundle_addr) + resume_addr; } break; } /* end switch */ } goto turn_ss_off; } if (slot == 2) { if (regs->cr_iip == bundle_addr + 0x10) { regs->cr_iip = resume_addr + 0x10; } } else { if (regs->cr_iip == bundle_addr) { regs->cr_iip = resume_addr; } } turn_ss_off: /* Turn off Single Step bit */ ia64_psr(regs)->ss = 0; } static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) { unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle; unsigned long slot = (unsigned long)p->addr & 0xf; /* single step inline if break instruction */ if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST) regs->cr_iip = (unsigned long)p->addr & ~0xFULL; else regs->cr_iip = bundle_addr & ~0xFULL; if (slot > 2) slot = 0; ia64_psr(regs)->ri = slot; /* turn on single stepping */ ia64_psr(regs)->ss = 1; } static int __kprobes is_ia64_break_inst(struct pt_regs *regs) { unsigned int slot = ia64_psr(regs)->ri; unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; bundle_t bundle; memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); return __is_ia64_break_inst(&bundle, slot); } static int __kprobes pre_kprobes_handler(struct die_args *args) { struct kprobe *p; int ret = 0; struct pt_regs *regs = args->regs; kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs); struct kprobe_ctlblk *kcb; /* * We don't want to be preempted for the entire * duration of kprobe processing */ preempt_disable(); kcb = get_kprobe_ctlblk(); /* Handle recursion cases */ if (kprobe_running()) { p = get_kprobe(addr); if (p) { if ((kcb->kprobe_status == KPROBE_HIT_SS) && (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { ia64_psr(regs)->ss = 0; goto no_kprobe; } /* We have reentered the pre_kprobe_handler(), since * another probe was hit while within the handler. * We here save the original kprobes variables and * just single step on the instruction of the new probe * without calling any user handlers. */ save_previous_kprobe(kcb); set_current_kprobe(p, kcb); kprobes_inc_nmissed_count(p); prepare_ss(p, regs); kcb->kprobe_status = KPROBE_REENTER; return 1; } else if (!is_ia64_break_inst(regs)) { /* The breakpoint instruction was removed by * another cpu right after we hit, no further * handling of this interrupt is appropriate */ ret = 1; goto no_kprobe; } else { /* Not our break */ goto no_kprobe; } } p = get_kprobe(addr); if (!p) { if (!is_ia64_break_inst(regs)) { /* * The breakpoint instruction was removed right * after we hit it. Another cpu has removed * either a probepoint or a debugger breakpoint * at this address. In either case, no further * handling of this interrupt is appropriate. */ ret = 1; } /* Not one of our break, let kernel handle it */ goto no_kprobe; } set_current_kprobe(p, kcb); kcb->kprobe_status = KPROBE_HIT_ACTIVE; if (p->pre_handler && p->pre_handler(p, regs)) { reset_current_kprobe(); preempt_enable_no_resched(); return 1; } #if !defined(CONFIG_PREEMPTION) if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ ia64_psr(regs)->ri = p->ainsn.slot; regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL; /* turn single stepping off */ ia64_psr(regs)->ss = 0; reset_current_kprobe(); preempt_enable_no_resched(); return 1; } #endif prepare_ss(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; return 1; no_kprobe: preempt_enable_no_resched(); return ret; } static int __kprobes post_kprobes_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); if (!cur) return 0; if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; cur->post_handler(cur, regs, 0); } resume_execution(cur, regs); /*Restore back the original saved kprobes variables and continue. */ if (kcb->kprobe_status == KPROBE_REENTER) { restore_previous_kprobe(kcb); goto out; } reset_current_kprobe(); out: preempt_enable_no_resched(); return 1; } int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); switch(kcb->kprobe_status) { case KPROBE_HIT_SS: case KPROBE_REENTER: /* * We are here because the instruction being single * stepped caused a page fault. We reset the current * kprobe and the instruction pointer points back to * the probe address and allow the page fault handler * to continue as a normal page fault. */ regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL; ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); else reset_current_kprobe(); preempt_enable_no_resched(); break; case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: /* * In case the user-specified fault handler returned * zero, try to fix up. */ if (ia64_done_with_exception(regs)) return 1; /* * Let ia64_do_page_fault() fix it. */ break; default: break; } return 0; } int __kprobes kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; if (args->regs && user_mode(args->regs)) return ret; switch(val) { case DIE_BREAK: /* err is break number from ia64_bad_break() */ if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12) || args->err == 0) if (pre_kprobes_handler(args)) ret = NOTIFY_STOP; break; case DIE_FAULT: /* err is vector number from ia64_fault() */ if (args->err == 36) if (post_kprobes_handler(args->regs)) ret = NOTIFY_STOP; break; default: break; } return ret; } static struct kprobe trampoline_p = { .pre_handler = trampoline_probe_handler }; int __init arch_init_kprobes(void) { trampoline_p.addr = dereference_function_descriptor(__kretprobe_trampoline); return register_kprobe(&trampoline_p); } int __kprobes arch_trampoline_kprobe(struct kprobe *p) { if (p->addr == dereference_function_descriptor(__kretprobe_trampoline)) return 1; return 0; }
linux-master
arch/ia64/kernel/kprobes.c
// SPDX-License-Identifier: GPL-2.0-only /* * salinfo.c * * Creates entries in /proc/sal for various system features. * * Copyright (c) 2003, 2006 Silicon Graphics, Inc. All rights reserved. * Copyright (c) 2003 Hewlett-Packard Co * Bjorn Helgaas <[email protected]> * * 10/30/2001 [email protected] copied much of Stephane's palinfo * code to create this file * Oct 23 2003 [email protected] * Replace IPI with set_cpus_allowed() to read a record from the required cpu. * Redesign salinfo log processing to separate interrupt and user space * contexts. * Cache the record across multi-block reads from user space. * Support > 64 cpus. * Delete module_exit and MOD_INC/DEC_COUNT, salinfo cannot be a module. * * Jan 28 2004 [email protected] * Periodically check for outstanding MCA or INIT records. * * Dec 5 2004 [email protected] * Standardize which records are cleared automatically. * * Aug 18 2005 [email protected] * mca.c may not pass a buffer, a NULL buffer just indicates that a new * record is available in SAL. * Replace some NR_CPUS by cpus_online, for hotplug cpu. * * Jan 5 2006 [email protected] * Handle hotplug cpus coming online. * Handle hotplug cpus going offline while they still have outstanding records. * Use the cpu_* macros consistently. * Replace the counting semaphore with a mutex and a test if the cpumask is non-empty. * Modify the locking to make the test for "work to do" an atomic operation. */ #include <linux/capability.h> #include <linux/cpu.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/module.h> #include <linux/smp.h> #include <linux/timer.h> #include <linux/vmalloc.h> #include <linux/semaphore.h> #include <asm/sal.h> #include <linux/uaccess.h> MODULE_AUTHOR("Jesse Barnes <[email protected]>"); MODULE_DESCRIPTION("/proc interface to IA-64 SAL features"); MODULE_LICENSE("GPL"); typedef struct { const char *name; /* name of the proc entry */ unsigned long feature; /* feature bit */ struct proc_dir_entry *entry; /* registered entry (removal) */ } salinfo_entry_t; /* * List {name,feature} pairs for every entry in /proc/sal/<feature> * that this module exports */ static const salinfo_entry_t salinfo_entries[]={ { "bus_lock", IA64_SAL_PLATFORM_FEATURE_BUS_LOCK, }, { "irq_redirection", IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT, }, { "ipi_redirection", IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT, }, { "itc_drift", IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT, }, }; #define NR_SALINFO_ENTRIES ARRAY_SIZE(salinfo_entries) static char *salinfo_log_name[] = { "mca", "init", "cmc", "cpe", }; static struct proc_dir_entry *salinfo_proc_entries[ ARRAY_SIZE(salinfo_entries) + /* /proc/sal/bus_lock */ ARRAY_SIZE(salinfo_log_name) + /* /proc/sal/{mca,...} */ (2 * ARRAY_SIZE(salinfo_log_name)) + /* /proc/sal/mca/{event,data} */ 1]; /* /proc/sal */ /* Some records we get ourselves, some are accessed as saved data in buffers * that are owned by mca.c. */ struct salinfo_data_saved { u8* buffer; u64 size; u64 id; int cpu; }; /* State transitions. Actions are :- * Write "read <cpunum>" to the data file. * Write "clear <cpunum>" to the data file. * Write "oemdata <cpunum> <offset> to the data file. * Read from the data file. * Close the data file. * * Start state is NO_DATA. * * NO_DATA * write "read <cpunum>" -> NO_DATA or LOG_RECORD. * write "clear <cpunum>" -> NO_DATA or LOG_RECORD. * write "oemdata <cpunum> <offset> -> return -EINVAL. * read data -> return EOF. * close -> unchanged. Free record areas. * * LOG_RECORD * write "read <cpunum>" -> NO_DATA or LOG_RECORD. * write "clear <cpunum>" -> NO_DATA or LOG_RECORD. * write "oemdata <cpunum> <offset> -> format the oem data, goto OEMDATA. * read data -> return the INIT/MCA/CMC/CPE record. * close -> unchanged. Keep record areas. * * OEMDATA * write "read <cpunum>" -> NO_DATA or LOG_RECORD. * write "clear <cpunum>" -> NO_DATA or LOG_RECORD. * write "oemdata <cpunum> <offset> -> format the oem data, goto OEMDATA. * read data -> return the formatted oemdata. * close -> unchanged. Keep record areas. * * Closing the data file does not change the state. This allows shell scripts * to manipulate salinfo data, each shell redirection opens the file, does one * action then closes it again. The record areas are only freed at close when * the state is NO_DATA. */ enum salinfo_state { STATE_NO_DATA, STATE_LOG_RECORD, STATE_OEMDATA, }; struct salinfo_data { cpumask_t cpu_event; /* which cpus have outstanding events */ wait_queue_head_t read_wait; u8 *log_buffer; u64 log_size; u8 *oemdata; /* decoded oem data */ u64 oemdata_size; int open; /* single-open to prevent races */ u8 type; u8 saved_num; /* using a saved record? */ enum salinfo_state state :8; /* processing state */ u8 padding; int cpu_check; /* next CPU to check */ struct salinfo_data_saved data_saved[5];/* save last 5 records from mca.c, must be < 255 */ }; static struct salinfo_data salinfo_data[ARRAY_SIZE(salinfo_log_name)]; static DEFINE_SPINLOCK(data_lock); static DEFINE_SPINLOCK(data_saved_lock); /** salinfo_platform_oemdata - optional callback to decode oemdata from an error * record. * @sect_header: pointer to the start of the section to decode. * @oemdata: returns vmalloc area containing the decoded output. * @oemdata_size: returns length of decoded output (strlen). * * Description: If user space asks for oem data to be decoded by the kernel * and/or prom and the platform has set salinfo_platform_oemdata to the address * of a platform specific routine then call that routine. salinfo_platform_oemdata * vmalloc's and formats its output area, returning the address of the text * and its strlen. Returns 0 for success, -ve for error. The callback is * invoked on the cpu that generated the error record. */ int (*salinfo_platform_oemdata)(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size); struct salinfo_platform_oemdata_parms { const u8 *efi_guid; u8 **oemdata; u64 *oemdata_size; }; static long salinfo_platform_oemdata_cpu(void *context) { struct salinfo_platform_oemdata_parms *parms = context; return salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size); } static void shift1_data_saved (struct salinfo_data *data, int shift) { memcpy(data->data_saved+shift, data->data_saved+shift+1, (ARRAY_SIZE(data->data_saved) - (shift+1)) * sizeof(data->data_saved[0])); memset(data->data_saved + ARRAY_SIZE(data->data_saved) - 1, 0, sizeof(data->data_saved[0])); } /* This routine is invoked in interrupt context. Note: mca.c enables * interrupts before calling this code for CMC/CPE. MCA and INIT events are * not irq safe, do not call any routines that use spinlocks, they may deadlock. * MCA and INIT records are recorded, a timer event will look for any * outstanding events and wake up the user space code. * * The buffer passed from mca.c points to the output from ia64_log_get. This is * a persistent buffer but its contents can change between the interrupt and * when user space processes the record. Save the record id to identify * changes. If the buffer is NULL then just update the bitmap. */ void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe) { struct salinfo_data *data = salinfo_data + type; struct salinfo_data_saved *data_saved; unsigned long flags = 0; int i; int saved_size = ARRAY_SIZE(data->data_saved); BUG_ON(type >= ARRAY_SIZE(salinfo_log_name)); if (irqsafe) spin_lock_irqsave(&data_saved_lock, flags); if (buffer) { for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { if (!data_saved->buffer) break; } if (i == saved_size) { if (!data->saved_num) { shift1_data_saved(data, 0); data_saved = data->data_saved + saved_size - 1; } else data_saved = NULL; } if (data_saved) { data_saved->cpu = smp_processor_id(); data_saved->id = ((sal_log_record_header_t *)buffer)->id; data_saved->size = size; data_saved->buffer = buffer; } } cpumask_set_cpu(smp_processor_id(), &data->cpu_event); if (irqsafe) { wake_up_interruptible(&data->read_wait); spin_unlock_irqrestore(&data_saved_lock, flags); } } /* Check for outstanding MCA/INIT records every minute (arbitrary) */ #define SALINFO_TIMER_DELAY (60*HZ) static struct timer_list salinfo_timer; extern void ia64_mlogbuf_dump(void); static void salinfo_timeout_check(struct salinfo_data *data) { if (!data->open) return; if (!cpumask_empty(&data->cpu_event)) wake_up_interruptible(&data->read_wait); } static void salinfo_timeout(struct timer_list *unused) { ia64_mlogbuf_dump(); salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA); salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_INIT); salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY; add_timer(&salinfo_timer); } static int salinfo_event_open(struct inode *inode, struct file *file) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; return 0; } static ssize_t salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct salinfo_data *data = pde_data(file_inode(file)); char cmd[32]; size_t size; int i, n, cpu = -1; retry: if (cpumask_empty(&data->cpu_event)) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(data->read_wait, !cpumask_empty(&data->cpu_event))) return -EINTR; } n = data->cpu_check; for (i = 0; i < nr_cpu_ids; i++) { if (cpumask_test_cpu(n, &data->cpu_event)) { if (!cpu_online(n)) { cpumask_clear_cpu(n, &data->cpu_event); continue; } cpu = n; break; } if (++n == nr_cpu_ids) n = 0; } if (cpu == -1) goto retry; ia64_mlogbuf_dump(); /* for next read, start checking at next CPU */ data->cpu_check = cpu; if (++data->cpu_check == nr_cpu_ids) data->cpu_check = 0; snprintf(cmd, sizeof(cmd), "read %d\n", cpu); size = strlen(cmd); if (size > count) size = count; if (copy_to_user(buffer, cmd, size)) return -EFAULT; return size; } static const struct proc_ops salinfo_event_proc_ops = { .proc_open = salinfo_event_open, .proc_read = salinfo_event_read, .proc_lseek = noop_llseek, }; static int salinfo_log_open(struct inode *inode, struct file *file) { struct salinfo_data *data = pde_data(inode); if (!capable(CAP_SYS_ADMIN)) return -EPERM; spin_lock(&data_lock); if (data->open) { spin_unlock(&data_lock); return -EBUSY; } data->open = 1; spin_unlock(&data_lock); if (data->state == STATE_NO_DATA && !(data->log_buffer = vmalloc(ia64_sal_get_state_info_size(data->type)))) { data->open = 0; return -ENOMEM; } return 0; } static int salinfo_log_release(struct inode *inode, struct file *file) { struct salinfo_data *data = pde_data(inode); if (data->state == STATE_NO_DATA) { vfree(data->log_buffer); vfree(data->oemdata); data->log_buffer = NULL; data->oemdata = NULL; } spin_lock(&data_lock); data->open = 0; spin_unlock(&data_lock); return 0; } static long salinfo_log_read_cpu(void *context) { struct salinfo_data *data = context; sal_log_record_header_t *rh; data->log_size = ia64_sal_get_state_info(data->type, (u64 *) data->log_buffer); rh = (sal_log_record_header_t *)(data->log_buffer); /* Clear corrected errors as they are read from SAL */ if (rh->severity == sal_log_severity_corrected) ia64_sal_clear_state_info(data->type); return 0; } static void salinfo_log_new_read(int cpu, struct salinfo_data *data) { struct salinfo_data_saved *data_saved; unsigned long flags; int i; int saved_size = ARRAY_SIZE(data->data_saved); data->saved_num = 0; spin_lock_irqsave(&data_saved_lock, flags); retry: for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { if (data_saved->buffer && data_saved->cpu == cpu) { sal_log_record_header_t *rh = (sal_log_record_header_t *)(data_saved->buffer); data->log_size = data_saved->size; memcpy(data->log_buffer, rh, data->log_size); barrier(); /* id check must not be moved */ if (rh->id == data_saved->id) { data->saved_num = i+1; break; } /* saved record changed by mca.c since interrupt, discard it */ shift1_data_saved(data, i); goto retry; } } spin_unlock_irqrestore(&data_saved_lock, flags); if (!data->saved_num) work_on_cpu_safe(cpu, salinfo_log_read_cpu, data); if (!data->log_size) { data->state = STATE_NO_DATA; cpumask_clear_cpu(cpu, &data->cpu_event); } else { data->state = STATE_LOG_RECORD; } } static ssize_t salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct salinfo_data *data = pde_data(file_inode(file)); u8 *buf; u64 bufsize; if (data->state == STATE_LOG_RECORD) { buf = data->log_buffer; bufsize = data->log_size; } else if (data->state == STATE_OEMDATA) { buf = data->oemdata; bufsize = data->oemdata_size; } else { buf = NULL; bufsize = 0; } return simple_read_from_buffer(buffer, count, ppos, buf, bufsize); } static long salinfo_log_clear_cpu(void *context) { struct salinfo_data *data = context; ia64_sal_clear_state_info(data->type); return 0; } static int salinfo_log_clear(struct salinfo_data *data, int cpu) { sal_log_record_header_t *rh; unsigned long flags; spin_lock_irqsave(&data_saved_lock, flags); data->state = STATE_NO_DATA; if (!cpumask_test_cpu(cpu, &data->cpu_event)) { spin_unlock_irqrestore(&data_saved_lock, flags); return 0; } cpumask_clear_cpu(cpu, &data->cpu_event); if (data->saved_num) { shift1_data_saved(data, data->saved_num - 1); data->saved_num = 0; } spin_unlock_irqrestore(&data_saved_lock, flags); rh = (sal_log_record_header_t *)(data->log_buffer); /* Corrected errors have already been cleared from SAL */ if (rh->severity != sal_log_severity_corrected) work_on_cpu_safe(cpu, salinfo_log_clear_cpu, data); /* clearing a record may make a new record visible */ salinfo_log_new_read(cpu, data); if (data->state == STATE_LOG_RECORD) { spin_lock_irqsave(&data_saved_lock, flags); cpumask_set_cpu(cpu, &data->cpu_event); wake_up_interruptible(&data->read_wait); spin_unlock_irqrestore(&data_saved_lock, flags); } return 0; } static ssize_t salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct salinfo_data *data = pde_data(file_inode(file)); char cmd[32]; size_t size; u32 offset; int cpu; size = sizeof(cmd); if (count < size) size = count; if (copy_from_user(cmd, buffer, size)) return -EFAULT; if (sscanf(cmd, "read %d", &cpu) == 1) { salinfo_log_new_read(cpu, data); } else if (sscanf(cmd, "clear %d", &cpu) == 1) { int ret; if ((ret = salinfo_log_clear(data, cpu))) count = ret; } else if (sscanf(cmd, "oemdata %d %d", &cpu, &offset) == 2) { if (data->state != STATE_LOG_RECORD && data->state != STATE_OEMDATA) return -EINVAL; if (offset > data->log_size - sizeof(efi_guid_t)) return -EINVAL; data->state = STATE_OEMDATA; if (salinfo_platform_oemdata) { struct salinfo_platform_oemdata_parms parms = { .efi_guid = data->log_buffer + offset, .oemdata = &data->oemdata, .oemdata_size = &data->oemdata_size }; count = work_on_cpu_safe(cpu, salinfo_platform_oemdata_cpu, &parms); } else data->oemdata_size = 0; } else return -EINVAL; return count; } static const struct proc_ops salinfo_data_proc_ops = { .proc_open = salinfo_log_open, .proc_release = salinfo_log_release, .proc_read = salinfo_log_read, .proc_write = salinfo_log_write, .proc_lseek = default_llseek, }; static int salinfo_cpu_online(unsigned int cpu) { unsigned int i, end = ARRAY_SIZE(salinfo_data); struct salinfo_data *data; spin_lock_irq(&data_saved_lock); for (i = 0, data = salinfo_data; i < end; ++i, ++data) { cpumask_set_cpu(cpu, &data->cpu_event); wake_up_interruptible(&data->read_wait); } spin_unlock_irq(&data_saved_lock); return 0; } static int salinfo_cpu_pre_down(unsigned int cpu) { unsigned int i, end = ARRAY_SIZE(salinfo_data); struct salinfo_data *data; spin_lock_irq(&data_saved_lock); for (i = 0, data = salinfo_data; i < end; ++i, ++data) { struct salinfo_data_saved *data_saved; int j = ARRAY_SIZE(data->data_saved) - 1; for (data_saved = data->data_saved + j; j >= 0; --j, --data_saved) { if (data_saved->buffer && data_saved->cpu == cpu) shift1_data_saved(data, j); } cpumask_clear_cpu(cpu, &data->cpu_event); } spin_unlock_irq(&data_saved_lock); return 0; } /* * 'data' contains an integer that corresponds to the feature we're * testing */ static int __maybe_unused proc_salinfo_show(struct seq_file *m, void *v) { unsigned long data = (unsigned long)v; seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n"); return 0; } static int __init salinfo_init(void) { struct proc_dir_entry *salinfo_dir; /* /proc/sal dir entry */ struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */ struct proc_dir_entry *dir, *entry; struct salinfo_data *data; int i; salinfo_dir = proc_mkdir("sal", NULL); if (!salinfo_dir) return 0; for (i=0; i < NR_SALINFO_ENTRIES; i++) { /* pass the feature bit in question as misc data */ *sdir++ = proc_create_single_data(salinfo_entries[i].name, 0, salinfo_dir, proc_salinfo_show, (void *)salinfo_entries[i].feature); } for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { data = salinfo_data + i; data->type = i; init_waitqueue_head(&data->read_wait); dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); if (!dir) continue; entry = proc_create_data("event", S_IRUSR, dir, &salinfo_event_proc_ops, data); if (!entry) continue; *sdir++ = entry; entry = proc_create_data("data", S_IRUSR | S_IWUSR, dir, &salinfo_data_proc_ops, data); if (!entry) continue; *sdir++ = entry; *sdir++ = dir; } *sdir++ = salinfo_dir; timer_setup(&salinfo_timer, salinfo_timeout, 0); salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY; add_timer(&salinfo_timer); i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/salinfo:online", salinfo_cpu_online, salinfo_cpu_pre_down); WARN_ON(i < 0); return 0; } module_init(salinfo_init);
linux-master
arch/ia64/kernel/salinfo.c
// SPDX-License-Identifier: GPL-2.0 /* * I/O SAPIC support. * * Copyright (C) 1999 Intel Corp. * Copyright (C) 1999 Asit Mallick <[email protected]> * Copyright (C) 2000-2002 J.I. Lee <[email protected]> * Copyright (C) 1999-2000, 2002-2003 Hewlett-Packard Co. * David Mosberger-Tang <[email protected]> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999,2000 Walt Drummond <[email protected]> * * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O * APIC code. In particular, we now have separate * handlers for edge and level triggered * interrupts. * 00/10/27 Asit Mallick, Goutham Rao <[email protected]> IRQ vector * allocation PCI to vector mapping, shared PCI * interrupts. * 00/10/27 D. Mosberger Document things a bit more to make them more * understandable. Clean up much of the old * IOSAPIC cruft. * 01/07/27 J.I. Lee PCI irq routing, Platform/Legacy interrupts * and fixes for ACPI S5(SoftOff) support. * 02/01/23 J.I. Lee iosapic pgm fixes for PCI irq routing from _PRT * 02/01/07 E. Focht <[email protected]> Redirectable interrupt * vectors in iosapic_set_affinity(), * initializations for /proc/irq/#/smp_affinity * 02/04/02 P. Diefenbaugh Cleaned up ACPI PCI IRQ routing. * 02/04/18 J.I. Lee bug fix in iosapic_init_pci_irq * 02/04/30 J.I. Lee bug fix in find_iosapic to fix ACPI PCI IRQ to * IOSAPIC mapping error * 02/07/29 T. Kochi Allocate interrupt vectors dynamically * 02/08/04 T. Kochi Cleaned up terminology (irq, global system * interrupt, vector, etc.) * 02/09/20 D. Mosberger Simplified by taking advantage of ACPI's * pci_irq code. * 03/02/19 B. Helgaas Make pcat_compat system-wide, not per-IOSAPIC. * Remove iosapic_address & gsi_base from * external interfaces. Rationalize * __init/__devinit attributes. * 04/12/04 Ashok Raj <[email protected]> Intel Corporation 2004 * Updated to work with irq migration necessary * for CPU Hotplug */ /* * Here is what the interrupt logic between a PCI device and the kernel looks * like: * * (1) A PCI device raises one of the four interrupt pins (INTA, INTB, INTC, * INTD). The device is uniquely identified by its bus-, and slot-number * (the function number does not matter here because all functions share * the same interrupt lines). * * (2) The motherboard routes the interrupt line to a pin on a IOSAPIC * controller. Multiple interrupt lines may have to share the same * IOSAPIC pin (if they're level triggered and use the same polarity). * Each interrupt line has a unique Global System Interrupt (GSI) number * which can be calculated as the sum of the controller's base GSI number * and the IOSAPIC pin number to which the line connects. * * (3) The IOSAPIC uses an internal routing table entries (RTEs) to map the * IOSAPIC pin into the IA-64 interrupt vector. This interrupt vector is then * sent to the CPU. * * (4) The kernel recognizes an interrupt as an IRQ. The IRQ interface is * used as architecture-independent interrupt handling mechanism in Linux. * As an IRQ is a number, we have to have * IA-64 interrupt vector number <-> IRQ number mapping. On smaller * systems, we use one-to-one mapping between IA-64 vector and IRQ. * * To sum up, there are three levels of mappings involved: * * PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ * * Note: The term "IRQ" is loosely used everywhere in Linux kernel to * describe interrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ * (isa_irq) is the only exception in this source code. */ #include <linux/acpi.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/string.h> #include <linux/memblock.h> #include <asm/delay.h> #include <asm/hw_irq.h> #include <asm/io.h> #include <asm/iosapic.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/xtp.h> #undef DEBUG_INTERRUPT_ROUTING #ifdef DEBUG_INTERRUPT_ROUTING #define DBG(fmt...) printk(fmt) #else #define DBG(fmt...) #endif static DEFINE_SPINLOCK(iosapic_lock); /* * These tables map IA-64 vectors to the IOSAPIC pin that generates this * vector. */ #define NO_REF_RTE 0 static struct iosapic { char __iomem *addr; /* base address of IOSAPIC */ unsigned int gsi_base; /* GSI base */ unsigned short num_rte; /* # of RTEs on this IOSAPIC */ int rtes_inuse; /* # of RTEs in use on this IOSAPIC */ #ifdef CONFIG_NUMA unsigned short node; /* numa node association via pxm */ #endif spinlock_t lock; /* lock for indirect reg access */ } iosapic_lists[NR_IOSAPICS]; struct iosapic_rte_info { struct list_head rte_list; /* RTEs sharing the same vector */ char rte_index; /* IOSAPIC RTE index */ int refcnt; /* reference counter */ struct iosapic *iosapic; } ____cacheline_aligned; static struct iosapic_intr_info { struct list_head rtes; /* RTEs using this vector (empty => * not an IOSAPIC interrupt) */ int count; /* # of registered RTEs */ u32 low32; /* current value of low word of * Redirection table entry */ unsigned int dest; /* destination CPU physical ID */ unsigned char dmode : 3; /* delivery mode (see iosapic.h) */ unsigned char polarity: 1; /* interrupt polarity * (see iosapic.h) */ unsigned char trigger : 1; /* trigger mode (see iosapic.h) */ } iosapic_intr_info[NR_IRQS]; static unsigned char pcat_compat; /* 8259 compatibility flag */ static inline void iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val) { unsigned long flags; spin_lock_irqsave(&iosapic->lock, flags); __iosapic_write(iosapic->addr, reg, val); spin_unlock_irqrestore(&iosapic->lock, flags); } /* * Find an IOSAPIC associated with a GSI */ static inline int find_iosapic (unsigned int gsi) { int i; for (i = 0; i < NR_IOSAPICS; i++) { if ((unsigned) (gsi - iosapic_lists[i].gsi_base) < iosapic_lists[i].num_rte) return i; } return -1; } static inline int __gsi_to_irq(unsigned int gsi) { int irq; struct iosapic_intr_info *info; struct iosapic_rte_info *rte; for (irq = 0; irq < NR_IRQS; irq++) { info = &iosapic_intr_info[irq]; list_for_each_entry(rte, &info->rtes, rte_list) if (rte->iosapic->gsi_base + rte->rte_index == gsi) return irq; } return -1; } int gsi_to_irq (unsigned int gsi) { unsigned long flags; int irq; spin_lock_irqsave(&iosapic_lock, flags); irq = __gsi_to_irq(gsi); spin_unlock_irqrestore(&iosapic_lock, flags); return irq; } static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi) { struct iosapic_rte_info *rte; list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) if (rte->iosapic->gsi_base + rte->rte_index == gsi) return rte; return NULL; } static void set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask) { unsigned long pol, trigger, dmode; u32 low32, high32; int rte_index; char redir; struct iosapic_rte_info *rte; ia64_vector vector = irq_to_vector(irq); DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest); rte = find_rte(irq, gsi); if (!rte) return; /* not an IOSAPIC interrupt */ rte_index = rte->rte_index; pol = iosapic_intr_info[irq].polarity; trigger = iosapic_intr_info[irq].trigger; dmode = iosapic_intr_info[irq].dmode; redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0; #ifdef CONFIG_SMP set_irq_affinity_info(irq, (int)(dest & 0xffff), redir); #endif low32 = ((pol << IOSAPIC_POLARITY_SHIFT) | (trigger << IOSAPIC_TRIGGER_SHIFT) | (dmode << IOSAPIC_DELIVERY_SHIFT) | ((mask ? 1 : 0) << IOSAPIC_MASK_SHIFT) | vector); /* dest contains both id and eid */ high32 = (dest << IOSAPIC_DEST_SHIFT); iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32); iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32); iosapic_intr_info[irq].low32 = low32; iosapic_intr_info[irq].dest = dest; } static void iosapic_nop (struct irq_data *data) { /* do nothing... */ } #ifdef CONFIG_KEXEC void kexec_disable_iosapic(void) { struct iosapic_intr_info *info; struct iosapic_rte_info *rte; ia64_vector vec; int irq; for (irq = 0; irq < NR_IRQS; irq++) { info = &iosapic_intr_info[irq]; vec = irq_to_vector(irq); list_for_each_entry(rte, &info->rtes, rte_list) { iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), IOSAPIC_MASK|vec); iosapic_eoi(rte->iosapic->addr, vec); } } } #endif static void mask_irq (struct irq_data *data) { unsigned int irq = data->irq; u32 low32; int rte_index; struct iosapic_rte_info *rte; if (!iosapic_intr_info[irq].count) return; /* not an IOSAPIC interrupt! */ /* set only the mask bit */ low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK; list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) { rte_index = rte->rte_index; iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32); } } static void unmask_irq (struct irq_data *data) { unsigned int irq = data->irq; u32 low32; int rte_index; struct iosapic_rte_info *rte; if (!iosapic_intr_info[irq].count) return; /* not an IOSAPIC interrupt! */ low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK; list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) { rte_index = rte->rte_index; iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32); } } static int iosapic_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { #ifdef CONFIG_SMP unsigned int irq = data->irq; u32 high32, low32; int cpu, dest, rte_index; int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; struct iosapic_rte_info *rte; struct iosapic *iosapic; irq &= (~IA64_IRQ_REDIRECTED); cpu = cpumask_first_and(cpu_online_mask, mask); if (cpu >= nr_cpu_ids) return -1; if (irq_prepare_move(irq, cpu)) return -1; dest = cpu_physical_id(cpu); if (!iosapic_intr_info[irq].count) return -1; /* not an IOSAPIC interrupt */ set_irq_affinity_info(irq, dest, redir); /* dest contains both id and eid */ high32 = dest << IOSAPIC_DEST_SHIFT; low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT); if (redir) /* change delivery mode to lowest priority */ low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT); else /* change delivery mode to fixed */ low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT); low32 &= IOSAPIC_VECTOR_MASK; low32 |= irq_to_vector(irq); iosapic_intr_info[irq].low32 = low32; iosapic_intr_info[irq].dest = dest; list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) { iosapic = rte->iosapic; rte_index = rte->rte_index; iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32); iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32); } #endif return 0; } /* * Handlers for level-triggered interrupts. */ static unsigned int iosapic_startup_level_irq (struct irq_data *data) { unmask_irq(data); return 0; } static void iosapic_unmask_level_irq (struct irq_data *data) { unsigned int irq = data->irq; ia64_vector vec = irq_to_vector(irq); struct iosapic_rte_info *rte; int do_unmask_irq = 0; irq_complete_move(irq); if (unlikely(irqd_is_setaffinity_pending(data))) { do_unmask_irq = 1; mask_irq(data); } else unmask_irq(data); list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) iosapic_eoi(rte->iosapic->addr, vec); if (unlikely(do_unmask_irq)) { irq_move_masked_irq(data); unmask_irq(data); } } #define iosapic_shutdown_level_irq mask_irq #define iosapic_enable_level_irq unmask_irq #define iosapic_disable_level_irq mask_irq #define iosapic_ack_level_irq iosapic_nop static struct irq_chip irq_type_iosapic_level = { .name = "IO-SAPIC-level", .irq_startup = iosapic_startup_level_irq, .irq_shutdown = iosapic_shutdown_level_irq, .irq_enable = iosapic_enable_level_irq, .irq_disable = iosapic_disable_level_irq, .irq_ack = iosapic_ack_level_irq, .irq_mask = mask_irq, .irq_unmask = iosapic_unmask_level_irq, .irq_set_affinity = iosapic_set_affinity }; /* * Handlers for edge-triggered interrupts. */ static unsigned int iosapic_startup_edge_irq (struct irq_data *data) { unmask_irq(data); /* * IOSAPIC simply drops interrupts pended while the * corresponding pin was masked, so we can't know if an * interrupt is pending already. Let's hope not... */ return 0; } static void iosapic_ack_edge_irq (struct irq_data *data) { irq_complete_move(data->irq); irq_move_irq(data); } #define iosapic_enable_edge_irq unmask_irq #define iosapic_disable_edge_irq iosapic_nop static struct irq_chip irq_type_iosapic_edge = { .name = "IO-SAPIC-edge", .irq_startup = iosapic_startup_edge_irq, .irq_shutdown = iosapic_disable_edge_irq, .irq_enable = iosapic_enable_edge_irq, .irq_disable = iosapic_disable_edge_irq, .irq_ack = iosapic_ack_edge_irq, .irq_mask = mask_irq, .irq_unmask = unmask_irq, .irq_set_affinity = iosapic_set_affinity }; static unsigned int iosapic_version (char __iomem *addr) { /* * IOSAPIC Version Register return 32 bit structure like: * { * unsigned int version : 8; * unsigned int reserved1 : 8; * unsigned int max_redir : 8; * unsigned int reserved2 : 8; * } */ return __iosapic_read(addr, IOSAPIC_VERSION); } static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol) { int i, irq = -ENOSPC, min_count = -1; struct iosapic_intr_info *info; /* * shared vectors for edge-triggered interrupts are not * supported yet */ if (trigger == IOSAPIC_EDGE) return -EINVAL; for (i = 0; i < NR_IRQS; i++) { info = &iosapic_intr_info[i]; if (info->trigger == trigger && info->polarity == pol && (info->dmode == IOSAPIC_FIXED || info->dmode == IOSAPIC_LOWEST_PRIORITY) && can_request_irq(i, IRQF_SHARED)) { if (min_count == -1 || info->count < min_count) { irq = i; min_count = info->count; } } } return irq; } /* * if the given vector is already owned by other, * assign a new vector for the other and make the vector available */ static void __init iosapic_reassign_vector (int irq) { int new_irq; if (iosapic_intr_info[irq].count) { new_irq = create_irq(); if (new_irq < 0) panic("%s: out of interrupt vectors!\n", __func__); printk(KERN_INFO "Reassigning vector %d to %d\n", irq_to_vector(irq), irq_to_vector(new_irq)); memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq], sizeof(struct iosapic_intr_info)); INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes); list_move(iosapic_intr_info[irq].rtes.next, &iosapic_intr_info[new_irq].rtes); memset(&iosapic_intr_info[irq], 0, sizeof(struct iosapic_intr_info)); iosapic_intr_info[irq].low32 = IOSAPIC_MASK; INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes); } } static inline int irq_is_shared (int irq) { return (iosapic_intr_info[irq].count > 1); } struct irq_chip* ia64_native_iosapic_get_irq_chip(unsigned long trigger) { if (trigger == IOSAPIC_EDGE) return &irq_type_iosapic_edge; else return &irq_type_iosapic_level; } static int register_intr (unsigned int gsi, int irq, unsigned char delivery, unsigned long polarity, unsigned long trigger) { struct irq_chip *chip, *irq_type; int index; struct iosapic_rte_info *rte; index = find_iosapic(gsi); if (index < 0) { printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __func__, gsi); return -ENODEV; } rte = find_rte(irq, gsi); if (!rte) { rte = kzalloc(sizeof (*rte), GFP_ATOMIC); if (!rte) { printk(KERN_WARNING "%s: cannot allocate memory\n", __func__); return -ENOMEM; } rte->iosapic = &iosapic_lists[index]; rte->rte_index = gsi - rte->iosapic->gsi_base; rte->refcnt++; list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes); iosapic_intr_info[irq].count++; iosapic_lists[index].rtes_inuse++; } else if (rte->refcnt == NO_REF_RTE) { struct iosapic_intr_info *info = &iosapic_intr_info[irq]; if (info->count > 0 && (info->trigger != trigger || info->polarity != polarity)){ printk (KERN_WARNING "%s: cannot override the interrupt\n", __func__); return -EINVAL; } rte->refcnt++; iosapic_intr_info[irq].count++; iosapic_lists[index].rtes_inuse++; } iosapic_intr_info[irq].polarity = polarity; iosapic_intr_info[irq].dmode = delivery; iosapic_intr_info[irq].trigger = trigger; irq_type = iosapic_get_irq_chip(trigger); chip = irq_get_chip(irq); if (irq_type != NULL && chip != irq_type) { if (chip != &no_irq_chip) printk(KERN_WARNING "%s: changing vector %d from %s to %s\n", __func__, irq_to_vector(irq), chip->name, irq_type->name); chip = irq_type; } irq_set_chip_handler_name_locked(irq_get_irq_data(irq), chip, trigger == IOSAPIC_EDGE ? handle_edge_irq : handle_level_irq, NULL); return 0; } static unsigned int get_target_cpu (unsigned int gsi, int irq) { #ifdef CONFIG_SMP static int cpu = -1; extern int cpe_vector; cpumask_t domain = irq_to_domain(irq); /* * In case of vector shared by multiple RTEs, all RTEs that * share the vector need to use the same destination CPU. */ if (iosapic_intr_info[irq].count) return iosapic_intr_info[irq].dest; /* * If the platform supports redirection via XTP, let it * distribute interrupts. */ if (smp_int_redirect & SMP_IRQ_REDIRECTION) return cpu_physical_id(smp_processor_id()); /* * Some interrupts (ACPI SCI, for instance) are registered * before the BSP is marked as online. */ if (!cpu_online(smp_processor_id())) return cpu_physical_id(smp_processor_id()); if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR) return get_cpei_target_cpu(); #ifdef CONFIG_NUMA { int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0; const struct cpumask *cpu_mask; iosapic_index = find_iosapic(gsi); if (iosapic_index < 0 || iosapic_lists[iosapic_index].node == MAX_NUMNODES) goto skip_numa_setup; cpu_mask = cpumask_of_node(iosapic_lists[iosapic_index].node); num_cpus = 0; for_each_cpu_and(numa_cpu, cpu_mask, &domain) { if (cpu_online(numa_cpu)) num_cpus++; } if (!num_cpus) goto skip_numa_setup; /* Use irq assignment to distribute across cpus in node */ cpu_index = irq % num_cpus; for_each_cpu_and(numa_cpu, cpu_mask, &domain) if (cpu_online(numa_cpu) && i++ >= cpu_index) break; if (numa_cpu < nr_cpu_ids) return cpu_physical_id(numa_cpu); } skip_numa_setup: #endif /* * Otherwise, round-robin interrupt vectors across all the * processors. (It'd be nice if we could be smarter in the * case of NUMA.) */ do { if (++cpu >= nr_cpu_ids) cpu = 0; } while (!cpu_online(cpu) || !cpumask_test_cpu(cpu, &domain)); return cpu_physical_id(cpu); #else /* CONFIG_SMP */ return cpu_physical_id(smp_processor_id()); #endif } static inline unsigned char choose_dmode(void) { #ifdef CONFIG_SMP if (smp_int_redirect & SMP_IRQ_REDIRECTION) return IOSAPIC_LOWEST_PRIORITY; #endif return IOSAPIC_FIXED; } /* * ACPI can describe IOSAPIC interrupts via static tables and namespace * methods. This provides an interface to register those interrupts and * program the IOSAPIC RTE. */ int iosapic_register_intr (unsigned int gsi, unsigned long polarity, unsigned long trigger) { int irq, mask = 1, err; unsigned int dest; unsigned long flags; struct iosapic_rte_info *rte; u32 low32; unsigned char dmode; struct irq_desc *desc; /* * If this GSI has already been registered (i.e., it's a * shared interrupt, or we lost a race to register it), * don't touch the RTE. */ spin_lock_irqsave(&iosapic_lock, flags); irq = __gsi_to_irq(gsi); if (irq > 0) { rte = find_rte(irq, gsi); if(iosapic_intr_info[irq].count == 0) { assign_irq_vector(irq); irq_init_desc(irq); } else if (rte->refcnt != NO_REF_RTE) { rte->refcnt++; goto unlock_iosapic_lock; } } else irq = create_irq(); /* If vector is running out, we try to find a sharable vector */ if (irq < 0) { irq = iosapic_find_sharable_irq(trigger, polarity); if (irq < 0) goto unlock_iosapic_lock; } desc = irq_to_desc(irq); raw_spin_lock(&desc->lock); dest = get_target_cpu(gsi, irq); dmode = choose_dmode(); err = register_intr(gsi, irq, dmode, polarity, trigger); if (err < 0) { raw_spin_unlock(&desc->lock); irq = err; goto unlock_iosapic_lock; } /* * If the vector is shared and already unmasked for other * interrupt sources, don't mask it. */ low32 = iosapic_intr_info[irq].low32; if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK)) mask = 0; set_rte(gsi, irq, dest, mask); printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), cpu_logical_id(dest), dest, irq_to_vector(irq)); raw_spin_unlock(&desc->lock); unlock_iosapic_lock: spin_unlock_irqrestore(&iosapic_lock, flags); return irq; } void iosapic_unregister_intr (unsigned int gsi) { unsigned long flags; int irq, index; u32 low32; unsigned long trigger, polarity; unsigned int dest; struct iosapic_rte_info *rte; /* * If the irq associated with the gsi is not found, * iosapic_unregister_intr() is unbalanced. We need to check * this again after getting locks. */ irq = gsi_to_irq(gsi); if (irq < 0) { printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi); WARN_ON(1); return; } spin_lock_irqsave(&iosapic_lock, flags); if ((rte = find_rte(irq, gsi)) == NULL) { printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi); WARN_ON(1); goto out; } if (--rte->refcnt > 0) goto out; rte->refcnt = NO_REF_RTE; /* Mask the interrupt */ low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK; iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32); iosapic_intr_info[irq].count--; index = find_iosapic(gsi); iosapic_lists[index].rtes_inuse--; WARN_ON(iosapic_lists[index].rtes_inuse < 0); trigger = iosapic_intr_info[irq].trigger; polarity = iosapic_intr_info[irq].polarity; dest = iosapic_intr_info[irq].dest; printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n", gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), cpu_logical_id(dest), dest, irq_to_vector(irq)); if (iosapic_intr_info[irq].count == 0) { #ifdef CONFIG_SMP /* Clear affinity */ irq_data_update_affinity(irq_get_irq_data(irq), cpu_all_mask); #endif /* Clear the interrupt information */ iosapic_intr_info[irq].dest = 0; iosapic_intr_info[irq].dmode = 0; iosapic_intr_info[irq].polarity = 0; iosapic_intr_info[irq].trigger = 0; iosapic_intr_info[irq].low32 |= IOSAPIC_MASK; /* Destroy and reserve IRQ */ destroy_and_reserve_irq(irq); } out: spin_unlock_irqrestore(&iosapic_lock, flags); } /* * ACPI calls this when it finds an entry for a platform interrupt. */ int __init iosapic_register_platform_intr (u32 int_type, unsigned int gsi, int iosapic_vector, u16 eid, u16 id, unsigned long polarity, unsigned long trigger) { static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"}; unsigned char delivery; int irq, vector, mask = 0; unsigned int dest = ((id << 8) | eid) & 0xffff; switch (int_type) { case ACPI_INTERRUPT_PMI: irq = vector = iosapic_vector; bind_irq_vector(irq, vector, CPU_MASK_ALL); /* * since PMI vector is alloc'd by FW(ACPI) not by kernel, * we need to make sure the vector is available */ iosapic_reassign_vector(irq); delivery = IOSAPIC_PMI; break; case ACPI_INTERRUPT_INIT: irq = create_irq(); if (irq < 0) panic("%s: out of interrupt vectors!\n", __func__); vector = irq_to_vector(irq); delivery = IOSAPIC_INIT; break; case ACPI_INTERRUPT_CPEI: irq = vector = IA64_CPE_VECTOR; BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL)); delivery = IOSAPIC_FIXED; mask = 1; break; default: printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__, int_type); return -1; } register_intr(gsi, irq, delivery, polarity, trigger); printk(KERN_INFO "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)" " vector %d\n", int_type < ARRAY_SIZE(name) ? name[int_type] : "unknown", int_type, gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), cpu_logical_id(dest), dest, vector); set_rte(gsi, irq, dest, mask); return vector; } /* * ACPI calls this when it finds an entry for a legacy ISA IRQ override. */ void iosapic_override_isa_irq(unsigned int isa_irq, unsigned int gsi, unsigned long polarity, unsigned long trigger) { int vector, irq; unsigned int dest = cpu_physical_id(smp_processor_id()); unsigned char dmode; irq = vector = isa_irq_to_vector(isa_irq); BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL)); dmode = choose_dmode(); register_intr(gsi, irq, dmode, polarity, trigger); DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n", isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level", polarity == IOSAPIC_POL_HIGH ? "high" : "low", cpu_logical_id(dest), dest, vector); set_rte(gsi, irq, dest, 1); } void __init ia64_native_iosapic_pcat_compat_init(void) { if (pcat_compat) { /* * Disable the compatibility mode interrupts (8259 style), * needs IN/OUT support enabled. */ printk(KERN_INFO "%s: Disabling PC-AT compatible 8259 interrupts\n", __func__); outb(0xff, 0xA1); outb(0xff, 0x21); } } void __init iosapic_system_init (int system_pcat_compat) { int irq; for (irq = 0; irq < NR_IRQS; ++irq) { iosapic_intr_info[irq].low32 = IOSAPIC_MASK; /* mark as unused */ INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes); iosapic_intr_info[irq].count = 0; } pcat_compat = system_pcat_compat; if (pcat_compat) iosapic_pcat_compat_init(); } static inline int iosapic_alloc (void) { int index; for (index = 0; index < NR_IOSAPICS; index++) if (!iosapic_lists[index].addr) return index; printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__); return -1; } static inline void iosapic_free (int index) { memset(&iosapic_lists[index], 0, sizeof(iosapic_lists[0])); } static inline int iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver) { int index; unsigned int gsi_end, base, end; /* check gsi range */ gsi_end = gsi_base + ((ver >> 16) & 0xff); for (index = 0; index < NR_IOSAPICS; index++) { if (!iosapic_lists[index].addr) continue; base = iosapic_lists[index].gsi_base; end = base + iosapic_lists[index].num_rte - 1; if (gsi_end < base || end < gsi_base) continue; /* OK */ return -EBUSY; } return 0; } static int iosapic_delete_rte(unsigned int irq, unsigned int gsi) { struct iosapic_rte_info *rte, *temp; list_for_each_entry_safe(rte, temp, &iosapic_intr_info[irq].rtes, rte_list) { if (rte->iosapic->gsi_base + rte->rte_index == gsi) { if (rte->refcnt) return -EBUSY; list_del(&rte->rte_list); kfree(rte); return 0; } } return -EINVAL; } int iosapic_init(unsigned long phys_addr, unsigned int gsi_base) { int num_rte, err, index; unsigned int isa_irq, ver; char __iomem *addr; unsigned long flags; spin_lock_irqsave(&iosapic_lock, flags); index = find_iosapic(gsi_base); if (index >= 0) { spin_unlock_irqrestore(&iosapic_lock, flags); return -EBUSY; } addr = ioremap(phys_addr, 0); if (addr == NULL) { spin_unlock_irqrestore(&iosapic_lock, flags); return -ENOMEM; } ver = iosapic_version(addr); if ((err = iosapic_check_gsi_range(gsi_base, ver))) { iounmap(addr); spin_unlock_irqrestore(&iosapic_lock, flags); return err; } /* * The MAX_REDIR register holds the highest input pin number * (starting from 0). We add 1 so that we can use it for * number of pins (= RTEs) */ num_rte = ((ver >> 16) & 0xff) + 1; index = iosapic_alloc(); iosapic_lists[index].addr = addr; iosapic_lists[index].gsi_base = gsi_base; iosapic_lists[index].num_rte = num_rte; #ifdef CONFIG_NUMA iosapic_lists[index].node = MAX_NUMNODES; #endif spin_lock_init(&iosapic_lists[index].lock); spin_unlock_irqrestore(&iosapic_lock, flags); if ((gsi_base == 0) && pcat_compat) { /* * Map the legacy ISA devices into the IOSAPIC data. Some of * these may get reprogrammed later on with data from the ACPI * Interrupt Source Override table. */ for (isa_irq = 0; isa_irq < 16; ++isa_irq) iosapic_override_isa_irq(isa_irq, isa_irq, IOSAPIC_POL_HIGH, IOSAPIC_EDGE); } return 0; } int iosapic_remove(unsigned int gsi_base) { int i, irq, index, err = 0; unsigned long flags; spin_lock_irqsave(&iosapic_lock, flags); index = find_iosapic(gsi_base); if (index < 0) { printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n", __func__, gsi_base); goto out; } if (iosapic_lists[index].rtes_inuse) { err = -EBUSY; printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n", __func__, gsi_base); goto out; } for (i = gsi_base; i < gsi_base + iosapic_lists[index].num_rte; i++) { irq = __gsi_to_irq(i); if (irq < 0) continue; err = iosapic_delete_rte(irq, i); if (err) goto out; } iounmap(iosapic_lists[index].addr); iosapic_free(index); out: spin_unlock_irqrestore(&iosapic_lock, flags); return err; } #ifdef CONFIG_NUMA void map_iosapic_to_node(unsigned int gsi_base, int node) { int index; index = find_iosapic(gsi_base); if (index < 0) { printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", __func__, gsi_base); return; } iosapic_lists[index].node = node; return; } #endif
linux-master
arch/ia64/kernel/iosapic.c
// SPDX-License-Identifier: GPL-2.0-only /* * SMP Support * * Copyright (C) 1999 Walt Drummond <[email protected]> * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <[email protected]> * * Lots of stuff stolen from arch/alpha/kernel/smp.c * * 01/05/16 Rohit Seth <[email protected]> IA64-SMP functions. Reorganized * the existing code (on the lines of x86 port). * 00/09/11 David Mosberger <[email protected]> Do loops_per_jiffy * calibration on each CPU. * 00/08/23 Asit Mallick <[email protected]> fixed logical processor id * 00/03/31 Rohit Seth <[email protected]> Fixes for Bootstrap Processor * & cpu_online_map now gets done here (instead of setup.c) * 99/10/05 davidm Update to bring it in sync with new command-line processing * scheme. * 10/13/00 Goutham Rao <[email protected]> Updated smp_call_function and * smp_call_function_single to resend IPI on timeouts */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/cache.h> #include <linux/delay.h> #include <linux/efi.h> #include <linux/bitops.h> #include <linux/kexec.h> #include <linux/atomic.h> #include <asm/current.h> #include <asm/delay.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/sal.h> #include <asm/tlbflush.h> #include <asm/unistd.h> #include <asm/mca.h> #include <asm/xtp.h> /* * Note: alignment of 4 entries/cacheline was empirically determined * to be a good tradeoff between hot cachelines & spreading the array * across too many cacheline. */ static struct local_tlb_flush_counts { unsigned int count; } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS]; static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS], shadow_flush_counts); #define IPI_CALL_FUNC 0 #define IPI_CPU_STOP 1 #define IPI_CALL_FUNC_SINGLE 2 #define IPI_KDUMP_CPU_STOP 3 /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation); extern void cpu_halt (void); static void stop_this_cpu(void) { /* * Remove this CPU: */ set_cpu_online(smp_processor_id(), false); max_xtp(); local_irq_disable(); cpu_halt(); } void cpu_die(void) { max_xtp(); local_irq_disable(); cpu_halt(); /* Should never be here */ BUG(); for (;;); } irqreturn_t handle_IPI (int irq, void *dev_id) { int this_cpu = get_cpu(); unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation); unsigned long ops; mb(); /* Order interrupt and bit testing. */ while ((ops = xchg(pending_ipis, 0)) != 0) { mb(); /* Order bit clearing and data access. */ do { unsigned long which; which = ffz(~ops); ops &= ~(1 << which); switch (which) { case IPI_CPU_STOP: stop_this_cpu(); break; case IPI_CALL_FUNC: generic_smp_call_function_interrupt(); break; case IPI_CALL_FUNC_SINGLE: generic_smp_call_function_single_interrupt(); break; #ifdef CONFIG_KEXEC case IPI_KDUMP_CPU_STOP: unw_init_running(kdump_cpu_freeze, NULL); break; #endif default: printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); break; } } while (ops); mb(); /* Order data access and bit testing. */ } put_cpu(); return IRQ_HANDLED; } /* * Called with preemption disabled. */ static inline void send_IPI_single (int dest_cpu, int op) { set_bit(op, &per_cpu(ipi_operation, dest_cpu)); ia64_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0); } /* * Called with preemption disabled. */ static inline void send_IPI_allbutself (int op) { unsigned int i; for_each_online_cpu(i) { if (i != smp_processor_id()) send_IPI_single(i, op); } } /* * Called with preemption disabled. */ static inline void send_IPI_mask(const struct cpumask *mask, int op) { unsigned int cpu; for_each_cpu(cpu, mask) { send_IPI_single(cpu, op); } } /* * Called with preemption disabled. */ static inline void send_IPI_all (int op) { int i; for_each_online_cpu(i) { send_IPI_single(i, op); } } /* * Called with preemption disabled. */ static inline void send_IPI_self (int op) { send_IPI_single(smp_processor_id(), op); } #ifdef CONFIG_KEXEC void kdump_smp_send_stop(void) { send_IPI_allbutself(IPI_KDUMP_CPU_STOP); } void kdump_smp_send_init(void) { unsigned int cpu, self_cpu; self_cpu = smp_processor_id(); for_each_online_cpu(cpu) { if (cpu != self_cpu) { if(kdump_status[cpu] == 0) ia64_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0); } } } #endif /* * Called with preemption disabled. */ void arch_smp_send_reschedule (int cpu) { ia64_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); } EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); /* * Called with preemption disabled. */ static void smp_send_local_flush_tlb (int cpu) { ia64_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0); } void smp_local_flush_tlb(void) { /* * Use atomic ops. Otherwise, the load/increment/store sequence from * a "++" operation can have the line stolen between the load & store. * The overhead of the atomic op in negligible in this case & offers * significant benefit for the brief periods where lots of cpus * are simultaneously flushing TLBs. */ ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq); local_flush_tlb_all(); } #define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */ void smp_flush_tlb_cpumask(cpumask_t xcpumask) { unsigned short *counts = __ia64_per_cpu_var(shadow_flush_counts); cpumask_t cpumask = xcpumask; int mycpu, cpu, flush_mycpu = 0; preempt_disable(); mycpu = smp_processor_id(); for_each_cpu(cpu, &cpumask) counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff; mb(); for_each_cpu(cpu, &cpumask) { if (cpu == mycpu) flush_mycpu = 1; else smp_send_local_flush_tlb(cpu); } if (flush_mycpu) smp_local_flush_tlb(); for_each_cpu(cpu, &cpumask) while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff)) udelay(FLUSH_DELAY); preempt_enable(); } void smp_flush_tlb_all (void) { on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); } void smp_flush_tlb_mm (struct mm_struct *mm) { cpumask_var_t cpus; preempt_disable(); /* this happens for the common case of a single-threaded fork(): */ if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) { local_finish_flush_tlb_mm(mm); preempt_enable(); return; } if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) { smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, mm, 1); } else { cpumask_copy(cpus, mm_cpumask(mm)); smp_call_function_many(cpus, (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); free_cpumask_var(cpus); } local_irq_disable(); local_finish_flush_tlb_mm(mm); local_irq_enable(); preempt_enable(); } void arch_send_call_function_single_ipi(int cpu) { send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) { send_IPI_mask(mask, IPI_CALL_FUNC); } /* * this function calls the 'stop' function on all other CPUs in the system. */ void smp_send_stop (void) { send_IPI_allbutself(IPI_CPU_STOP); }
linux-master
arch/ia64/kernel/smp.c
// SPDX-License-Identifier: GPL-2.0 /* * Emulation of the "brl" instruction for IA64 processors that * don't support it in hardware. * Author: Stephan Zeisset, Intel Corp. <[email protected]> * * 02/22/02 D. Mosberger Clear si_flgs, si_isr, and si_imm to avoid * leaking kernel bits. */ #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/uaccess.h> #include <asm/processor.h> extern char ia64_set_b1, ia64_set_b2, ia64_set_b3, ia64_set_b4, ia64_set_b5; struct illegal_op_return { unsigned long fkt, arg1, arg2, arg3; }; /* * The unimplemented bits of a virtual address must be set * to the value of the most significant implemented bit. * unimpl_va_mask includes all unimplemented bits and * the most significant implemented bit, so the result * of an and operation with the mask must be all 0's * or all 1's for the address to be valid. */ #define unimplemented_virtual_address(va) ( \ ((va) & local_cpu_data->unimpl_va_mask) != 0 && \ ((va) & local_cpu_data->unimpl_va_mask) != local_cpu_data->unimpl_va_mask \ ) /* * The unimplemented bits of a physical address must be 0. * unimpl_pa_mask includes all unimplemented bits, so the result * of an and operation with the mask must be all 0's for the * address to be valid. */ #define unimplemented_physical_address(pa) ( \ ((pa) & local_cpu_data->unimpl_pa_mask) != 0 \ ) /* * Handle an illegal operation fault that was caused by an * unimplemented "brl" instruction. * If we are not successful (e.g because the illegal operation * wasn't caused by a "brl" after all), we return -1. * If we are successful, we return either 0 or the address * of a "fixup" function for manipulating preserved register * state. */ struct illegal_op_return ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec) { unsigned long bundle[2]; unsigned long opcode, btype, qp, offset, cpl; unsigned long next_ip; struct illegal_op_return rv; long tmp_taken, unimplemented_address; rv.fkt = (unsigned long) -1; /* * Decode the instruction bundle. */ if (copy_from_user(bundle, (void *) (regs->cr_iip), sizeof(bundle))) return rv; next_ip = (unsigned long) regs->cr_iip + 16; /* "brl" must be in slot 2. */ if (ia64_psr(regs)->ri != 1) return rv; /* Must be "mlx" template */ if ((bundle[0] & 0x1e) != 0x4) return rv; opcode = (bundle[1] >> 60); btype = ((bundle[1] >> 29) & 0x7); qp = ((bundle[1] >> 23) & 0x3f); offset = ((bundle[1] & 0x0800000000000000L) << 4) | ((bundle[1] & 0x00fffff000000000L) >> 32) | ((bundle[1] & 0x00000000007fffffL) << 40) | ((bundle[0] & 0xffff000000000000L) >> 24); tmp_taken = regs->pr & (1L << qp); switch(opcode) { case 0xC: /* * Long Branch. */ if (btype != 0) return rv; rv.fkt = 0; if (!(tmp_taken)) { /* * Qualifying predicate is 0. * Skip instruction. */ regs->cr_iip = next_ip; ia64_psr(regs)->ri = 0; return rv; } break; case 0xD: /* * Long Call. */ rv.fkt = 0; if (!(tmp_taken)) { /* * Qualifying predicate is 0. * Skip instruction. */ regs->cr_iip = next_ip; ia64_psr(regs)->ri = 0; return rv; } /* * BR[btype] = IP+16 */ switch(btype) { case 0: regs->b0 = next_ip; break; case 1: rv.fkt = (unsigned long) &ia64_set_b1; break; case 2: rv.fkt = (unsigned long) &ia64_set_b2; break; case 3: rv.fkt = (unsigned long) &ia64_set_b3; break; case 4: rv.fkt = (unsigned long) &ia64_set_b4; break; case 5: rv.fkt = (unsigned long) &ia64_set_b5; break; case 6: regs->b6 = next_ip; break; case 7: regs->b7 = next_ip; break; } rv.arg1 = next_ip; /* * AR[PFS].pfm = CFM * AR[PFS].pec = AR[EC] * AR[PFS].ppl = PSR.cpl */ cpl = ia64_psr(regs)->cpl; regs->ar_pfs = ((regs->cr_ifs & 0x3fffffffff) | (ar_ec << 52) | (cpl << 62)); /* * CFM.sof -= CFM.sol * CFM.sol = 0 * CFM.sor = 0 * CFM.rrb.gr = 0 * CFM.rrb.fr = 0 * CFM.rrb.pr = 0 */ regs->cr_ifs = ((regs->cr_ifs & 0xffffffc00000007f) - ((regs->cr_ifs >> 7) & 0x7f)); break; default: /* * Unknown opcode. */ return rv; } regs->cr_iip += offset; ia64_psr(regs)->ri = 0; if (ia64_psr(regs)->it == 0) unimplemented_address = unimplemented_physical_address(regs->cr_iip); else unimplemented_address = unimplemented_virtual_address(regs->cr_iip); if (unimplemented_address) { /* * The target address contains unimplemented bits. */ printk(KERN_DEBUG "Woah! Unimplemented Instruction Address Trap!\n"); force_sig_fault(SIGILL, ILL_BADIADDR, (void __user *)NULL, 0, 0, 0); } else if (ia64_psr(regs)->tb) { /* * Branch Tracing is enabled. * Force a taken branch signal. */ force_sig_fault(SIGTRAP, TRAP_BRANCH, (void __user *)NULL, 0, 0, 0); } else if (ia64_psr(regs)->ss) { /* * Single Step is enabled. * Force a trace signal. */ force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)NULL, 0, 0, 0); } return rv; }
linux-master
arch/ia64/kernel/brl_emu.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * acpi.c - Architecture-Specific Low-Level ACPI Support * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999,2000 Walt Drummond <[email protected]> * Copyright (C) 2000, 2002-2003 Hewlett-Packard Co. * David Mosberger-Tang <[email protected]> * Copyright (C) 2000 Intel Corp. * Copyright (C) 2000,2001 J.I. Lee <[email protected]> * Copyright (C) 2001 Paul Diefenbaugh <[email protected]> * Copyright (C) 2001 Jenna Hall <[email protected]> * Copyright (C) 2001 Takayoshi Kochi <[email protected]> * Copyright (C) 2002 Erich Focht <[email protected]> * Copyright (C) 2004 Ashok Raj <[email protected]> */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/string.h> #include <linux/types.h> #include <linux/irq.h> #include <linux/acpi.h> #include <linux/efi.h> #include <linux/mmzone.h> #include <linux/nodemask.h> #include <linux/slab.h> #include <acpi/processor.h> #include <asm/io.h> #include <asm/iosapic.h> #include <asm/page.h> #include <asm/numa.h> #include <asm/sal.h> #include <asm/cyclone.h> #define PREFIX "ACPI: " int acpi_lapic; unsigned int acpi_cpei_override; unsigned int acpi_cpei_phys_cpuid; #define ACPI_MAX_PLATFORM_INTERRUPTS 256 /* Array to record platform interrupt vectors for generic interrupt routing. */ int platform_intr_list[ACPI_MAX_PLATFORM_INTERRUPTS] = { [0 ... ACPI_MAX_PLATFORM_INTERRUPTS - 1] = -1 }; enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC; /* * Interrupt routing API for device drivers. Provides interrupt vector for * a generic platform event. Currently only CPEI is implemented. */ int acpi_request_vector(u32 int_type) { int vector = -1; if (int_type < ACPI_MAX_PLATFORM_INTERRUPTS) { /* corrected platform error interrupt */ vector = platform_intr_list[int_type]; } else printk(KERN_ERR "acpi_request_vector(): invalid interrupt type\n"); return vector; } void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size) { return __va(phys); } void __init __acpi_unmap_table(void __iomem *map, unsigned long size) { } /* -------------------------------------------------------------------------- Boot-time Table Parsing -------------------------------------------------------------------------- */ static int available_cpus __initdata; struct acpi_table_madt *acpi_madt __initdata; static u8 has_8259; static int __init acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_local_apic_override *lapic; lapic = (struct acpi_madt_local_apic_override *)header; if (BAD_MADT_ENTRY(lapic, end)) return -EINVAL; if (lapic->address) { iounmap(ipi_base_addr); ipi_base_addr = ioremap(lapic->address, 0); } return 0; } static int __init acpi_parse_lsapic(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_local_sapic *lsapic; lsapic = (struct acpi_madt_local_sapic *)header; /*Skip BAD_MADT_ENTRY check, as lsapic size could vary */ if (lsapic->lapic_flags & ACPI_MADT_ENABLED) { #ifdef CONFIG_SMP smp_boot_data.cpu_phys_id[available_cpus] = (lsapic->id << 8) | lsapic->eid; #endif ++available_cpus; } total_cpus++; return 0; } static int __init acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_local_apic_nmi *lacpi_nmi; lacpi_nmi = (struct acpi_madt_local_apic_nmi *)header; if (BAD_MADT_ENTRY(lacpi_nmi, end)) return -EINVAL; /* TBD: Support lapic_nmi entries */ return 0; } static int __init acpi_parse_iosapic(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_io_sapic *iosapic; iosapic = (struct acpi_madt_io_sapic *)header; if (BAD_MADT_ENTRY(iosapic, end)) return -EINVAL; return iosapic_init(iosapic->address, iosapic->global_irq_base); } static unsigned int __initdata acpi_madt_rev; static int __init acpi_parse_plat_int_src(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_interrupt_source *plintsrc; int vector; plintsrc = (struct acpi_madt_interrupt_source *)header; if (BAD_MADT_ENTRY(plintsrc, end)) return -EINVAL; /* * Get vector assignment for this interrupt, set attributes, * and program the IOSAPIC routing table. */ vector = iosapic_register_platform_intr(plintsrc->type, plintsrc->global_irq, plintsrc->io_sapic_vector, plintsrc->eid, plintsrc->id, ((plintsrc->inti_flags & ACPI_MADT_POLARITY_MASK) == ACPI_MADT_POLARITY_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, ((plintsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) == ACPI_MADT_TRIGGER_EDGE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); platform_intr_list[plintsrc->type] = vector; if (acpi_madt_rev > 1) { acpi_cpei_override = plintsrc->flags & ACPI_MADT_CPEI_OVERRIDE; } /* * Save the physical id, so we can check when its being removed */ acpi_cpei_phys_cpuid = ((plintsrc->id << 8) | (plintsrc->eid)) & 0xffff; return 0; } #ifdef CONFIG_HOTPLUG_CPU unsigned int can_cpei_retarget(void) { extern int cpe_vector; extern unsigned int force_cpei_retarget; /* * Only if CPEI is supported and the override flag * is present, otherwise return that its re-targettable * if we are in polling mode. */ if (cpe_vector > 0) { if (acpi_cpei_override || force_cpei_retarget) return 1; else return 0; } return 1; } unsigned int is_cpu_cpei_target(unsigned int cpu) { unsigned int logical_id; logical_id = cpu_logical_id(acpi_cpei_phys_cpuid); if (logical_id == cpu) return 1; else return 0; } void set_cpei_target_cpu(unsigned int cpu) { acpi_cpei_phys_cpuid = cpu_physical_id(cpu); } #endif unsigned int get_cpei_target_cpu(void) { return acpi_cpei_phys_cpuid; } static int __init acpi_parse_int_src_ovr(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_interrupt_override *p; p = (struct acpi_madt_interrupt_override *)header; if (BAD_MADT_ENTRY(p, end)) return -EINVAL; iosapic_override_isa_irq(p->source_irq, p->global_irq, ((p->inti_flags & ACPI_MADT_POLARITY_MASK) == ACPI_MADT_POLARITY_ACTIVE_LOW) ? IOSAPIC_POL_LOW : IOSAPIC_POL_HIGH, ((p->inti_flags & ACPI_MADT_TRIGGER_MASK) == ACPI_MADT_TRIGGER_LEVEL) ? IOSAPIC_LEVEL : IOSAPIC_EDGE); return 0; } static int __init acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end) { struct acpi_madt_nmi_source *nmi_src; nmi_src = (struct acpi_madt_nmi_source *)header; if (BAD_MADT_ENTRY(nmi_src, end)) return -EINVAL; /* TBD: Support nimsrc entries */ return 0; } static void __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if (!strncmp(oem_id, "IBM", 3) && (!strncmp(oem_table_id, "SERMOW", 6))) { /* * Unfortunately ITC_DRIFT is not yet part of the * official SAL spec, so the ITC_DRIFT bit is not * set by the BIOS on this hardware. */ sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT; cyclone_setup(); } } static int __init acpi_parse_madt(struct acpi_table_header *table) { acpi_madt = (struct acpi_table_madt *)table; acpi_madt_rev = acpi_madt->header.revision; /* remember the value for reference after free_initmem() */ #ifdef CONFIG_ITANIUM has_8259 = 1; /* Firmware on old Itanium systems is broken */ #else has_8259 = acpi_madt->flags & ACPI_MADT_PCAT_COMPAT; #endif iosapic_system_init(has_8259); /* Get base address of IPI Message Block */ if (acpi_madt->address) ipi_base_addr = ioremap(acpi_madt->address, 0); printk(KERN_INFO PREFIX "Local APIC address %p\n", ipi_base_addr); acpi_madt_oem_check(acpi_madt->header.oem_id, acpi_madt->header.oem_table_id); return 0; } #ifdef CONFIG_ACPI_NUMA #undef SLIT_DEBUG #define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32) static int __initdata srat_num_cpus; /* number of cpus */ static u32 pxm_flag[PXM_FLAG_LEN]; #define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag)) #define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag)) static struct acpi_table_slit __initdata *slit_table; cpumask_t early_cpu_possible_map = CPU_MASK_NONE; static int __init get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa) { int pxm; pxm = pa->proximity_domain_lo; if (acpi_srat_revision >= 2) pxm += pa->proximity_domain_hi[0] << 8; return pxm; } static int __init get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma) { int pxm; pxm = ma->proximity_domain; if (acpi_srat_revision <= 1) pxm &= 0xff; return pxm; } /* * ACPI 2.0 SLIT (System Locality Information Table) * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf */ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) { u32 len; len = sizeof(struct acpi_table_header) + 8 + slit->locality_count * slit->locality_count; if (slit->header.length != len) { printk(KERN_ERR "ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n", len, slit->header.length); return; } slit_table = slit; } void __init acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { int pxm; if (!(pa->flags & ACPI_SRAT_CPU_ENABLED)) return; if (srat_num_cpus >= ARRAY_SIZE(node_cpuid)) { printk_once(KERN_WARNING "node_cpuid[%ld] is too small, may not be able to use all cpus\n", ARRAY_SIZE(node_cpuid)); return; } pxm = get_processor_proximity_domain(pa); /* record this node in proximity bitmap */ pxm_bit_set(pxm); node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->local_sapic_eid); /* nid should be overridden as logical node id later */ node_cpuid[srat_num_cpus].nid = pxm; cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map); srat_num_cpus++; } int __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { unsigned long paddr, size; int pxm; struct node_memblk_s *p, *q, *pend; pxm = get_memory_proximity_domain(ma); /* fill node memory chunk structure */ paddr = ma->base_address; size = ma->length; /* Ignore disabled entries */ if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) return -1; if (num_node_memblks >= NR_NODE_MEMBLKS) { pr_err("NUMA: too many memblk ranges\n"); return -EINVAL; } /* record this node in proximity bitmap */ pxm_bit_set(pxm); /* Insertion sort based on base address */ pend = &node_memblk[num_node_memblks]; for (p = &node_memblk[0]; p < pend; p++) { if (paddr < p->start_paddr) break; } if (p < pend) { for (q = pend - 1; q >= p; q--) *(q + 1) = *q; } p->start_paddr = paddr; p->size = size; p->nid = pxm; num_node_memblks++; return 0; } void __init acpi_numa_fixup(void) { int i, j, node_from, node_to; /* If there's no SRAT, fix the phys_id and mark node 0 online */ if (srat_num_cpus == 0) { node_set_online(0); node_cpuid[0].phys_id = hard_smp_processor_id(); slit_distance(0, 0) = LOCAL_DISTANCE; goto out; } /* * MCD - This can probably be dropped now. No need for pxm ID to node ID * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES. */ nodes_clear(node_online_map); for (i = 0; i < MAX_PXM_DOMAINS; i++) { if (pxm_bit_test(i)) { int nid = acpi_map_pxm_to_node(i); node_set_online(nid); } } /* set logical node id in memory chunk structure */ for (i = 0; i < num_node_memblks; i++) node_memblk[i].nid = pxm_to_node(node_memblk[i].nid); /* assign memory bank numbers for each chunk on each node */ for_each_online_node(i) { int bank; bank = 0; for (j = 0; j < num_node_memblks; j++) if (node_memblk[j].nid == i) node_memblk[j].bank = bank++; } /* set logical node id in cpu structure */ for_each_possible_early_cpu(i) node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes()); printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); if (!slit_table) { for (i = 0; i < MAX_NUMNODES; i++) for (j = 0; j < MAX_NUMNODES; j++) slit_distance(i, j) = i == j ? LOCAL_DISTANCE : REMOTE_DISTANCE; goto out; } memset(numa_slit, -1, sizeof(numa_slit)); for (i = 0; i < slit_table->locality_count; i++) { if (!pxm_bit_test(i)) continue; node_from = pxm_to_node(i); for (j = 0; j < slit_table->locality_count; j++) { if (!pxm_bit_test(j)) continue; node_to = pxm_to_node(j); slit_distance(node_from, node_to) = slit_table->entry[i * slit_table->locality_count + j]; } } #ifdef SLIT_DEBUG printk("ACPI 2.0 SLIT locality table:\n"); for_each_online_node(i) { for_each_online_node(j) printk("%03d ", node_distance(i, j)); printk("\n"); } #endif out: node_possible_map = node_online_map; } #endif /* CONFIG_ACPI_NUMA */ /* * success: return IRQ number (>=0) * failure: return < 0 */ int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity) { if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) return gsi; if (has_8259 && gsi < 16) return isa_irq_to_vector(gsi); return iosapic_register_intr(gsi, (polarity == ACPI_ACTIVE_HIGH) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, (triggering == ACPI_EDGE_SENSITIVE) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); } EXPORT_SYMBOL_GPL(acpi_register_gsi); void acpi_unregister_gsi(u32 gsi) { if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM) return; if (has_8259 && gsi < 16) return; iosapic_unregister_intr(gsi); } EXPORT_SYMBOL_GPL(acpi_unregister_gsi); static int __init acpi_parse_fadt(struct acpi_table_header *table) { struct acpi_table_header *fadt_header; struct acpi_table_fadt *fadt; fadt_header = (struct acpi_table_header *)table; if (fadt_header->revision != 3) return -ENODEV; /* Only deal with ACPI 2.0 FADT */ fadt = (struct acpi_table_fadt *)fadt_header; acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW); return 0; } int __init early_acpi_boot_init(void) { int ret; /* * do a partial walk of MADT to determine how many CPUs * we have including offline CPUs */ if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { printk(KERN_ERR PREFIX "Can't find MADT\n"); return 0; } ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS); if (ret < 1) printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n"); else acpi_lapic = 1; #ifdef CONFIG_SMP if (available_cpus == 0) { printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n"); printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id()); smp_boot_data.cpu_phys_id[available_cpus] = hard_smp_processor_id(); available_cpus = 1; /* We've got at least one of these, no? */ } smp_boot_data.cpu_count = available_cpus; #endif /* Make boot-up look pretty */ printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); return 0; } int __init acpi_boot_init(void) { /* * MADT * ---- * Parse the Multiple APIC Description Table (MADT), if exists. * Note that this table provides platform SMP configuration * information -- the successor to MPS tables. */ if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { printk(KERN_ERR PREFIX "Can't find MADT\n"); goto skip_madt; } /* Local APIC */ if (acpi_table_parse_madt (ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, acpi_parse_lapic_addr_ovr, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n"); if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0) < 0) printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n"); /* I/O APIC */ if (acpi_table_parse_madt (ACPI_MADT_TYPE_IO_SAPIC, acpi_parse_iosapic, NR_IOSAPICS) < 1) { printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n"); } /* System-Level Interrupt Routing */ if (acpi_table_parse_madt (ACPI_MADT_TYPE_INTERRUPT_SOURCE, acpi_parse_plat_int_src, ACPI_MAX_PLATFORM_INTERRUPTS) < 0) printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n"); if (acpi_table_parse_madt (ACPI_MADT_TYPE_INTERRUPT_OVERRIDE, acpi_parse_int_src_ovr, 0) < 0) printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n"); if (acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE, acpi_parse_nmi_src, 0) < 0) printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n"); skip_madt: /* * FADT says whether a legacy keyboard controller is present. * The FADT also contains an SCI_INT line, by which the system * gets interrupts such as power and sleep buttons. If it's not * on a Legacy interrupt, it needs to be setup. */ if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) printk(KERN_ERR PREFIX "Can't find FADT\n"); #ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_SMP if (srat_num_cpus == 0) { int cpu, i = 1; for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id()) node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu]; } #endif build_cpu_to_node_map(); #endif return 0; } int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) { int tmp; if (has_8259 && gsi < 16) *irq = isa_irq_to_vector(gsi); else { tmp = gsi_to_irq(gsi); if (tmp == -1) return -1; *irq = tmp; } return 0; } int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) { if (isa_irq >= 16) return -1; *gsi = isa_irq; return 0; } /* * ACPI based hotplug CPU support */ #ifdef CONFIG_ACPI_HOTPLUG_CPU int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA /* * We don't have cpu-only-node hotadd. But if the system equips * SRAT table, pxm is already found and node is ready. * So, just pxm_to_nid(pxm) is OK. * This code here is for the system which doesn't have full SRAT * table for possible cpus. */ node_cpuid[cpu].phys_id = physid; node_cpuid[cpu].nid = acpi_get_node(handle); #endif return 0; } int additional_cpus __initdata = -1; static __init int setup_additional_cpus(char *s) { if (s) additional_cpus = simple_strtol(s, NULL, 0); return 0; } early_param("additional_cpus", setup_additional_cpus); /* * cpu_possible_mask should be static, it cannot change as CPUs * are onlined, or offlined. The reason is per-cpu data-structures * are allocated by some modules at init time, and dont expect to * do this dynamically on cpu arrival/departure. * cpu_present_mask on the other hand can change dynamically. * In case when cpu_hotplug is not compiled, then we resort to current * behaviour, which is cpu_possible == cpu_present. * - Ashok Raj * * Three ways to find out the number of additional hotplug CPUs: * - If the BIOS specified disabled CPUs in ACPI/mptables use that. * - The user can overwrite it with additional_cpus=NUM * - Otherwise don't reserve additional CPUs. */ __init void prefill_possible_map(void) { int i; int possible, disabled_cpus; disabled_cpus = total_cpus - available_cpus; if (additional_cpus == -1) { if (disabled_cpus > 0) additional_cpus = disabled_cpus; else additional_cpus = 0; } possible = available_cpus + additional_cpus; if (possible > nr_cpu_ids) possible = nr_cpu_ids; printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", possible, max((possible - available_cpus), 0)); for (i = 0; i < possible; i++) set_cpu_possible(i, true); } static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) { int cpu; cpu = cpumask_first_zero(cpu_present_mask); if (cpu >= nr_cpu_ids) return -EINVAL; acpi_map_cpu2node(handle, cpu, physid); set_cpu_present(cpu, true); ia64_cpu_to_sapicid[cpu] = physid; acpi_processor_set_pdc(handle); *pcpu = cpu; return (0); } /* wrapper to silence section mismatch warning */ int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu) { return _acpi_map_lsapic(handle, physid, pcpu); } EXPORT_SYMBOL(acpi_map_cpu); int acpi_unmap_cpu(int cpu) { ia64_cpu_to_sapicid[cpu] = -1; set_cpu_present(cpu, false); #ifdef CONFIG_ACPI_NUMA /* NUMA specific cleanup's */ #endif return (0); } EXPORT_SYMBOL(acpi_unmap_cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ #ifdef CONFIG_ACPI_NUMA static acpi_status acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct acpi_madt_io_sapic *iosapic; unsigned int gsi_base; int node; /* Only care about objects w/ a method that returns the MADT */ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) return AE_OK; if (!buffer.length || !buffer.pointer) return AE_OK; obj = buffer.pointer; if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < sizeof(*iosapic)) { kfree(buffer.pointer); return AE_OK; } iosapic = (struct acpi_madt_io_sapic *)obj->buffer.pointer; if (iosapic->header.type != ACPI_MADT_TYPE_IO_SAPIC) { kfree(buffer.pointer); return AE_OK; } gsi_base = iosapic->global_irq_base; kfree(buffer.pointer); /* OK, it's an IOSAPIC MADT entry; associate it with a node */ node = acpi_get_node(handle); if (node == NUMA_NO_NODE || !node_online(node) || cpumask_empty(cpumask_of_node(node))) return AE_OK; /* We know a gsi to node mapping! */ map_iosapic_to_node(gsi_base, node); return AE_OK; } static int __init acpi_map_iosapics (void) { acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL); return 0; } fs_initcall(acpi_map_iosapics); #endif /* CONFIG_ACPI_NUMA */ int __ref acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) { int err; if ((err = iosapic_init(phys_addr, gsi_base))) return err; #ifdef CONFIG_ACPI_NUMA acpi_map_iosapic(handle, 0, NULL, NULL); #endif /* CONFIG_ACPI_NUMA */ return 0; } EXPORT_SYMBOL(acpi_register_ioapic); int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base) { return iosapic_remove(gsi_base); } EXPORT_SYMBOL(acpi_unregister_ioapic); /* * acpi_suspend_lowlevel() - save kernel state and suspend. * * TBD when IA64 starts to support suspend... */ int acpi_suspend_lowlevel(void) { return 0; } void acpi_proc_quirk_mwait_check(void) { }
linux-master
arch/ia64/kernel/acpi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/xtensa/platforms/xt2000/setup.c * * Platform specific functions for the XT2000 board. * * Authors: Chris Zankel <[email protected]> * Joe Taylor <[email protected]> * * Copyright 2001 - 2004 Tensilica Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/kdev_t.h> #include <linux/types.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/stringify.h> #include <linux/platform_device.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/timer.h> #include <asm/processor.h> #include <asm/platform.h> #include <asm/bootparam.h> #include <platform/hardware.h> #include <platform/serial.h> /* Assumes s points to an 8-chr string. No checking for NULL. */ static void led_print (int f, char *s) { unsigned long* led_addr = (unsigned long*) (XT2000_LED_ADDR + 0xE0) + f; int i; for (i = f; i < 8; i++) if ((*led_addr++ = *s++) == 0) break; } static int xt2000_power_off(struct sys_off_data *unused) { led_print (0, "POWEROFF"); local_irq_disable(); while (1); return NOTIFY_DONE; } static int xt2000_restart(struct notifier_block *this, unsigned long event, void *ptr) { /* Flush and reset the mmu, simulate a processor reset, and * jump to the reset vector. */ cpu_reset(); return NOTIFY_DONE; } static struct notifier_block xt2000_restart_block = { .notifier_call = xt2000_restart, }; void __init platform_setup(char** cmdline) { led_print (0, "LINUX "); } /* Heartbeat. Let the LED blink. */ static void xt2000_heartbeat(struct timer_list *unused); static DEFINE_TIMER(heartbeat_timer, xt2000_heartbeat); static void xt2000_heartbeat(struct timer_list *unused) { static int i; led_print(7, i ? "." : " "); i ^= 1; mod_timer(&heartbeat_timer, jiffies + HZ / 2); } //#define RS_TABLE_SIZE 2 #define _SERIAL_PORT(_base,_irq) \ { \ .mapbase = (_base), \ .membase = (void*)(_base), \ .irq = (_irq), \ .uartclk = DUART16552_XTAL_FREQ, \ .iotype = UPIO_MEM, \ .flags = UPF_BOOT_AUTOCONF, \ .regshift = 2, \ } static struct plat_serial8250_port xt2000_serial_data[] = { #if XCHAL_HAVE_BE _SERIAL_PORT(DUART16552_1_ADDR + 3, DUART16552_1_INTNUM), _SERIAL_PORT(DUART16552_2_ADDR + 3, DUART16552_2_INTNUM), #else _SERIAL_PORT(DUART16552_1_ADDR, DUART16552_1_INTNUM), _SERIAL_PORT(DUART16552_2_ADDR, DUART16552_2_INTNUM), #endif { } }; static struct platform_device xt2000_serial8250_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = xt2000_serial_data, }, }; static struct resource xt2000_sonic_res[] = { { .start = SONIC83934_ADDR, .end = SONIC83934_ADDR + 0xff, .flags = IORESOURCE_MEM, }, { .start = SONIC83934_INTNUM, .end = SONIC83934_INTNUM, .flags = IORESOURCE_IRQ, }, }; static struct platform_device xt2000_sonic_device = { .name = "xtsonic", .num_resources = ARRAY_SIZE(xt2000_sonic_res), .resource = xt2000_sonic_res, }; static int __init xt2000_setup_devinit(void) { platform_device_register(&xt2000_serial8250_device); platform_device_register(&xt2000_sonic_device); mod_timer(&heartbeat_timer, jiffies + HZ / 2); register_restart_handler(&xt2000_restart_block); register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT, xt2000_power_off, NULL); return 0; } device_initcall(xt2000_setup_devinit);
linux-master
arch/xtensa/platforms/xt2000/setup.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * arch/xtensa/platforms/iss/network.c * * Platform specific initialization. * * Authors: Chris Zankel <[email protected]> * Based on work form the UML team. * * Copyright 2005 Tensilica Inc. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/list.h> #include <linux/irq.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/if_ether.h> #include <linux/inetdevice.h> #include <linux/init.h> #include <linux/if_tun.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> #include <linux/ioctl.h> #include <linux/memblock.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/platform_device.h> #include <platform/simcall.h> #define DRIVER_NAME "iss-netdev" #define ETH_MAX_PACKET 1500 #define ETH_HEADER_OTHER 14 #define ISS_NET_TIMER_VALUE (HZ / 10) /* ------------------------------------------------------------------------- */ /* We currently only support the TUNTAP transport protocol. */ #define TRANSPORT_TUNTAP_NAME "tuntap" #define TRANSPORT_TUNTAP_MTU ETH_MAX_PACKET struct tuntap_info { char dev_name[IFNAMSIZ]; int fd; }; /* ------------------------------------------------------------------------- */ struct iss_net_private; struct iss_net_ops { int (*open)(struct iss_net_private *lp); void (*close)(struct iss_net_private *lp); int (*read)(struct iss_net_private *lp, struct sk_buff **skb); int (*write)(struct iss_net_private *lp, struct sk_buff **skb); unsigned short (*protocol)(struct sk_buff *skb); int (*poll)(struct iss_net_private *lp); }; /* This structure contains out private information for the driver. */ struct iss_net_private { spinlock_t lock; struct net_device *dev; struct platform_device pdev; struct timer_list tl; struct rtnl_link_stats64 stats; struct timer_list timer; unsigned int timer_val; int index; int mtu; struct { union { struct tuntap_info tuntap; } info; const struct iss_net_ops *net_ops; } tp; }; /* ================================ HELPERS ================================ */ static char *split_if_spec(char *str, ...) { char **arg, *end; va_list ap; va_start(ap, str); while ((arg = va_arg(ap, char**)) != NULL) { if (*str == '\0') { va_end(ap); return NULL; } end = strchr(str, ','); if (end != str) *arg = str; if (end == NULL) { va_end(ap); return NULL; } *end++ = '\0'; str = end; } va_end(ap); return str; } /* Set Ethernet address of the specified device. */ static void setup_etheraddr(struct net_device *dev, char *str) { u8 addr[ETH_ALEN]; if (str == NULL) goto random; if (!mac_pton(str, addr)) { pr_err("%s: failed to parse '%s' as an ethernet address\n", dev->name, str); goto random; } if (is_multicast_ether_addr(addr)) { pr_err("%s: attempt to assign a multicast ethernet address\n", dev->name); goto random; } if (!is_valid_ether_addr(addr)) { pr_err("%s: attempt to assign an invalid ethernet address\n", dev->name); goto random; } if (!is_local_ether_addr(addr)) pr_warn("%s: assigning a globally valid ethernet address\n", dev->name); eth_hw_addr_set(dev, addr); return; random: pr_info("%s: choosing a random ethernet address\n", dev->name); eth_hw_addr_random(dev); } /* ======================= TUNTAP TRANSPORT INTERFACE ====================== */ static int tuntap_open(struct iss_net_private *lp) { struct ifreq ifr; char *dev_name = lp->tp.info.tuntap.dev_name; int err = -EINVAL; int fd; fd = simc_open("/dev/net/tun", 02, 0); /* O_RDWR */ if (fd < 0) { pr_err("%s: failed to open /dev/net/tun, returned %d (errno = %d)\n", lp->dev->name, fd, errno); return fd; } memset(&ifr, 0, sizeof(ifr)); ifr.ifr_flags = IFF_TAP | IFF_NO_PI; strscpy(ifr.ifr_name, dev_name, sizeof(ifr.ifr_name)); err = simc_ioctl(fd, TUNSETIFF, &ifr); if (err < 0) { pr_err("%s: failed to set interface %s, returned %d (errno = %d)\n", lp->dev->name, dev_name, err, errno); simc_close(fd); return err; } lp->tp.info.tuntap.fd = fd; return err; } static void tuntap_close(struct iss_net_private *lp) { simc_close(lp->tp.info.tuntap.fd); lp->tp.info.tuntap.fd = -1; } static int tuntap_read(struct iss_net_private *lp, struct sk_buff **skb) { return simc_read(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->dev->mtu + ETH_HEADER_OTHER); } static int tuntap_write(struct iss_net_private *lp, struct sk_buff **skb) { return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len); } unsigned short tuntap_protocol(struct sk_buff *skb) { return eth_type_trans(skb, skb->dev); } static int tuntap_poll(struct iss_net_private *lp) { return simc_poll(lp->tp.info.tuntap.fd); } static const struct iss_net_ops tuntap_ops = { .open = tuntap_open, .close = tuntap_close, .read = tuntap_read, .write = tuntap_write, .protocol = tuntap_protocol, .poll = tuntap_poll, }; /* * ethX=tuntap,[mac address],device name */ static int tuntap_probe(struct iss_net_private *lp, int index, char *init) { struct net_device *dev = lp->dev; char *dev_name = NULL, *mac_str = NULL, *rem = NULL; /* Transport should be 'tuntap': ethX=tuntap,mac,dev_name */ if (strncmp(init, TRANSPORT_TUNTAP_NAME, sizeof(TRANSPORT_TUNTAP_NAME) - 1)) return 0; init += sizeof(TRANSPORT_TUNTAP_NAME) - 1; if (*init == ',') { rem = split_if_spec(init + 1, &mac_str, &dev_name, NULL); if (rem != NULL) { pr_err("%s: extra garbage on specification : '%s'\n", dev->name, rem); return 0; } } else if (*init != '\0') { pr_err("%s: invalid argument: %s. Skipping device!\n", dev->name, init); return 0; } if (!dev_name) { pr_err("%s: missing tuntap device name\n", dev->name); return 0; } strscpy(lp->tp.info.tuntap.dev_name, dev_name, sizeof(lp->tp.info.tuntap.dev_name)); setup_etheraddr(dev, mac_str); lp->mtu = TRANSPORT_TUNTAP_MTU; lp->tp.info.tuntap.fd = -1; lp->tp.net_ops = &tuntap_ops; return 1; } /* ================================ ISS NET ================================ */ static int iss_net_rx(struct net_device *dev) { struct iss_net_private *lp = netdev_priv(dev); int pkt_len; struct sk_buff *skb; /* Check if there is any new data. */ if (lp->tp.net_ops->poll(lp) == 0) return 0; /* Try to allocate memory, if it fails, try again next round. */ skb = dev_alloc_skb(dev->mtu + 2 + ETH_HEADER_OTHER); if (skb == NULL) { spin_lock_bh(&lp->lock); lp->stats.rx_dropped++; spin_unlock_bh(&lp->lock); return 0; } skb_reserve(skb, 2); /* Setup skb */ skb->dev = dev; skb_reset_mac_header(skb); pkt_len = lp->tp.net_ops->read(lp, &skb); skb_put(skb, pkt_len); if (pkt_len > 0) { skb_trim(skb, pkt_len); skb->protocol = lp->tp.net_ops->protocol(skb); spin_lock_bh(&lp->lock); lp->stats.rx_bytes += skb->len; lp->stats.rx_packets++; spin_unlock_bh(&lp->lock); netif_rx(skb); return pkt_len; } kfree_skb(skb); return pkt_len; } static int iss_net_poll(struct iss_net_private *lp) { int err, ret = 0; if (!netif_running(lp->dev)) return 0; while ((err = iss_net_rx(lp->dev)) > 0) ret++; if (err < 0) { pr_err("Device '%s' read returned %d, shutting it down\n", lp->dev->name, err); dev_close(lp->dev); } else { /* FIXME reactivate_fd(lp->fd, ISS_ETH_IRQ); */ } return ret; } static void iss_net_timer(struct timer_list *t) { struct iss_net_private *lp = from_timer(lp, t, timer); iss_net_poll(lp); mod_timer(&lp->timer, jiffies + lp->timer_val); } static int iss_net_open(struct net_device *dev) { struct iss_net_private *lp = netdev_priv(dev); int err; err = lp->tp.net_ops->open(lp); if (err < 0) return err; netif_start_queue(dev); /* clear buffer - it can happen that the host side of the interface * is full when we get here. In this case, new data is never queued, * SIGIOs never arrive, and the net never works. */ while ((err = iss_net_rx(dev)) > 0) ; timer_setup(&lp->timer, iss_net_timer, 0); lp->timer_val = ISS_NET_TIMER_VALUE; mod_timer(&lp->timer, jiffies + lp->timer_val); return err; } static int iss_net_close(struct net_device *dev) { struct iss_net_private *lp = netdev_priv(dev); netif_stop_queue(dev); del_timer_sync(&lp->timer); lp->tp.net_ops->close(lp); return 0; } static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct iss_net_private *lp = netdev_priv(dev); int len; netif_stop_queue(dev); len = lp->tp.net_ops->write(lp, &skb); if (len == skb->len) { spin_lock_bh(&lp->lock); lp->stats.tx_packets++; lp->stats.tx_bytes += skb->len; spin_unlock_bh(&lp->lock); netif_trans_update(dev); netif_start_queue(dev); /* this is normally done in the interrupt when tx finishes */ netif_wake_queue(dev); } else if (len == 0) { netif_start_queue(dev); spin_lock_bh(&lp->lock); lp->stats.tx_dropped++; spin_unlock_bh(&lp->lock); } else { netif_start_queue(dev); pr_err("%s: %s failed(%d)\n", dev->name, __func__, len); } dev_kfree_skb(skb); return NETDEV_TX_OK; } static void iss_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct iss_net_private *lp = netdev_priv(dev); spin_lock_bh(&lp->lock); *stats = lp->stats; spin_unlock_bh(&lp->lock); } static void iss_net_set_multicast_list(struct net_device *dev) { } static void iss_net_tx_timeout(struct net_device *dev, unsigned int txqueue) { } static int iss_net_change_mtu(struct net_device *dev, int new_mtu) { return -EINVAL; } void iss_net_user_timer_expire(struct timer_list *unused) { } static struct platform_driver iss_net_driver = { .driver = { .name = DRIVER_NAME, }, }; static int driver_registered; static const struct net_device_ops iss_netdev_ops = { .ndo_open = iss_net_open, .ndo_stop = iss_net_close, .ndo_get_stats64 = iss_net_get_stats64, .ndo_start_xmit = iss_net_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = iss_net_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_tx_timeout = iss_net_tx_timeout, .ndo_set_rx_mode = iss_net_set_multicast_list, }; static void iss_net_pdev_release(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct iss_net_private *lp = container_of(pdev, struct iss_net_private, pdev); free_netdev(lp->dev); } static void iss_net_configure(int index, char *init) { struct net_device *dev; struct iss_net_private *lp; dev = alloc_etherdev(sizeof(*lp)); if (dev == NULL) { pr_err("eth_configure: failed to allocate device\n"); return; } /* Initialize private element. */ lp = netdev_priv(dev); *lp = (struct iss_net_private) { .dev = dev, .index = index, }; spin_lock_init(&lp->lock); /* * If this name ends up conflicting with an existing registered * netdevice, that is OK, register_netdev{,ice}() will notice this * and fail. */ snprintf(dev->name, sizeof(dev->name), "eth%d", index); /* * Try all transport protocols. * Note: more protocols can be added by adding '&& !X_init(lp, eth)'. */ if (!tuntap_probe(lp, index, init)) { pr_err("%s: invalid arguments. Skipping device!\n", dev->name); goto err_free_netdev; } pr_info("Netdevice %d (%pM)\n", index, dev->dev_addr); /* sysfs register */ if (!driver_registered) { if (platform_driver_register(&iss_net_driver)) goto err_free_netdev; driver_registered = 1; } lp->pdev.id = index; lp->pdev.name = DRIVER_NAME; lp->pdev.dev.release = iss_net_pdev_release; if (platform_device_register(&lp->pdev)) goto err_free_netdev; SET_NETDEV_DEV(dev, &lp->pdev.dev); dev->netdev_ops = &iss_netdev_ops; dev->mtu = lp->mtu; dev->watchdog_timeo = (HZ >> 1); dev->irq = -1; rtnl_lock(); if (register_netdevice(dev)) { rtnl_unlock(); pr_err("%s: error registering net device!\n", dev->name); platform_device_unregister(&lp->pdev); /* dev is freed by the iss_net_pdev_release callback */ return; } rtnl_unlock(); timer_setup(&lp->tl, iss_net_user_timer_expire, 0); return; err_free_netdev: free_netdev(dev); } /* ------------------------------------------------------------------------- */ /* Filled in during early boot */ struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line); struct iss_net_init { struct list_head list; char *init; /* init string */ int index; }; /* * Parse the command line and look for 'ethX=...' fields, and register all * those fields. They will be later initialized in iss_net_init. */ static int __init iss_net_setup(char *str) { struct iss_net_init *device = NULL; struct iss_net_init *new; struct list_head *ele; char *end; int rc; unsigned n; end = strchr(str, '='); if (!end) { pr_err("Expected '=' after device number\n"); return 1; } *end = 0; rc = kstrtouint(str, 0, &n); *end = '='; if (rc < 0) { pr_err("Failed to parse '%s'\n", str); return 1; } str = end; list_for_each(ele, &eth_cmd_line) { device = list_entry(ele, struct iss_net_init, list); if (device->index == n) break; } if (device && device->index == n) { pr_err("Device %u already configured\n", n); return 1; } new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); if (new == NULL) { pr_err("Alloc_bootmem failed\n"); return 1; } INIT_LIST_HEAD(&new->list); new->index = n; new->init = str + 1; list_add_tail(&new->list, &eth_cmd_line); return 1; } __setup("eth", iss_net_setup); /* * Initialize all ISS Ethernet devices previously registered in iss_net_setup. */ static int iss_net_init(void) { struct list_head *ele, *next; /* Walk through all Ethernet devices specified in the command line. */ list_for_each_safe(ele, next, &eth_cmd_line) { struct iss_net_init *eth; eth = list_entry(ele, struct iss_net_init, list); iss_net_configure(eth->index, eth->init); } return 1; } device_initcall(iss_net_init);
linux-master
arch/xtensa/platforms/iss/network.c
/* * arch/xtensa/platforms/iss/simdisk.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001-2013 Tensilica Inc. * Authors Victor Prupis */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #include <platform/simcall.h> #define SIMDISK_MAJOR 240 #define SIMDISK_MINORS 1 #define MAX_SIMDISK_COUNT 10 struct simdisk { const char *filename; spinlock_t lock; struct gendisk *gd; struct proc_dir_entry *procfile; int users; unsigned long size; int fd; }; static int simdisk_count = CONFIG_BLK_DEV_SIMDISK_COUNT; module_param(simdisk_count, int, S_IRUGO); MODULE_PARM_DESC(simdisk_count, "Number of simdisk units."); static int n_files; static const char *filename[MAX_SIMDISK_COUNT] = { #ifdef CONFIG_SIMDISK0_FILENAME CONFIG_SIMDISK0_FILENAME, #ifdef CONFIG_SIMDISK1_FILENAME CONFIG_SIMDISK1_FILENAME, #endif #endif }; static int simdisk_param_set_filename(const char *val, const struct kernel_param *kp) { if (n_files < ARRAY_SIZE(filename)) filename[n_files++] = val; else return -EINVAL; return 0; } static const struct kernel_param_ops simdisk_param_ops_filename = { .set = simdisk_param_set_filename, }; module_param_cb(filename, &simdisk_param_ops_filename, &n_files, 0); MODULE_PARM_DESC(filename, "Backing storage filename."); static int simdisk_major = SIMDISK_MAJOR; static void simdisk_transfer(struct simdisk *dev, unsigned long sector, unsigned long nsect, char *buffer, int write) { unsigned long offset = sector << SECTOR_SHIFT; unsigned long nbytes = nsect << SECTOR_SHIFT; if (offset > dev->size || dev->size - offset < nbytes) { pr_notice("Beyond-end %s (%ld %ld)\n", write ? "write" : "read", offset, nbytes); return; } spin_lock(&dev->lock); while (nbytes > 0) { unsigned long io; simc_lseek(dev->fd, offset, SEEK_SET); READ_ONCE(*buffer); if (write) io = simc_write(dev->fd, buffer, nbytes); else io = simc_read(dev->fd, buffer, nbytes); if (io == -1) { pr_err("SIMDISK: IO error %d\n", errno); break; } buffer += io; offset += io; nbytes -= io; } spin_unlock(&dev->lock); } static void simdisk_submit_bio(struct bio *bio) { struct simdisk *dev = bio->bi_bdev->bd_disk->private_data; struct bio_vec bvec; struct bvec_iter iter; sector_t sector = bio->bi_iter.bi_sector; bio_for_each_segment(bvec, bio, iter) { char *buffer = bvec_kmap_local(&bvec); unsigned len = bvec.bv_len >> SECTOR_SHIFT; simdisk_transfer(dev, sector, len, buffer, bio_data_dir(bio) == WRITE); sector += len; kunmap_local(buffer); } bio_endio(bio); } static int simdisk_open(struct gendisk *disk, blk_mode_t mode) { struct simdisk *dev = disk->private_data; spin_lock(&dev->lock); ++dev->users; spin_unlock(&dev->lock); return 0; } static void simdisk_release(struct gendisk *disk) { struct simdisk *dev = disk->private_data; spin_lock(&dev->lock); --dev->users; spin_unlock(&dev->lock); } static const struct block_device_operations simdisk_ops = { .owner = THIS_MODULE, .submit_bio = simdisk_submit_bio, .open = simdisk_open, .release = simdisk_release, }; static struct simdisk *sddev; static struct proc_dir_entry *simdisk_procdir; static int simdisk_attach(struct simdisk *dev, const char *filename) { int err = 0; filename = kstrdup(filename, GFP_KERNEL); if (filename == NULL) return -ENOMEM; spin_lock(&dev->lock); if (dev->fd != -1) { err = -EBUSY; goto out; } dev->fd = simc_open(filename, O_RDWR, 0); if (dev->fd == -1) { pr_err("SIMDISK: Can't open %s: %d\n", filename, errno); err = -ENODEV; goto out; } dev->size = simc_lseek(dev->fd, 0, SEEK_END); set_capacity(dev->gd, dev->size >> SECTOR_SHIFT); dev->filename = filename; pr_info("SIMDISK: %s=%s\n", dev->gd->disk_name, dev->filename); out: if (err) kfree(filename); spin_unlock(&dev->lock); return err; } static int simdisk_detach(struct simdisk *dev) { int err = 0; spin_lock(&dev->lock); if (dev->users != 0) { err = -EBUSY; } else if (dev->fd != -1) { if (simc_close(dev->fd)) { pr_err("SIMDISK: error closing %s: %d\n", dev->filename, errno); err = -EIO; } else { pr_info("SIMDISK: %s detached from %s\n", dev->gd->disk_name, dev->filename); dev->fd = -1; kfree(dev->filename); dev->filename = NULL; } } spin_unlock(&dev->lock); return err; } static ssize_t proc_read_simdisk(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct simdisk *dev = pde_data(file_inode(file)); const char *s = dev->filename; if (s) { ssize_t len = strlen(s); char *temp = kmalloc(len + 2, GFP_KERNEL); if (!temp) return -ENOMEM; len = scnprintf(temp, len + 2, "%s\n", s); len = simple_read_from_buffer(buf, size, ppos, temp, len); kfree(temp); return len; } return simple_read_from_buffer(buf, size, ppos, "\n", 1); } static ssize_t proc_write_simdisk(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char *tmp = memdup_user_nul(buf, count); struct simdisk *dev = pde_data(file_inode(file)); int err; if (IS_ERR(tmp)) return PTR_ERR(tmp); err = simdisk_detach(dev); if (err != 0) goto out_free; if (count > 0 && tmp[count - 1] == '\n') tmp[count - 1] = 0; if (tmp[0]) err = simdisk_attach(dev, tmp); if (err == 0) err = count; out_free: kfree(tmp); return err; } static const struct proc_ops simdisk_proc_ops = { .proc_read = proc_read_simdisk, .proc_write = proc_write_simdisk, .proc_lseek = default_llseek, }; static int __init simdisk_setup(struct simdisk *dev, int which, struct proc_dir_entry *procdir) { char tmp[2] = { '0' + which, 0 }; int err = -ENOMEM; dev->fd = -1; dev->filename = NULL; spin_lock_init(&dev->lock); dev->users = 0; dev->gd = blk_alloc_disk(NUMA_NO_NODE); if (!dev->gd) goto out; dev->gd->major = simdisk_major; dev->gd->first_minor = which; dev->gd->minors = SIMDISK_MINORS; dev->gd->fops = &simdisk_ops; dev->gd->private_data = dev; snprintf(dev->gd->disk_name, 32, "simdisk%d", which); set_capacity(dev->gd, 0); err = add_disk(dev->gd); if (err) goto out_cleanup_disk; dev->procfile = proc_create_data(tmp, 0644, procdir, &simdisk_proc_ops, dev); return 0; out_cleanup_disk: put_disk(dev->gd); out: return err; } static int __init simdisk_init(void) { int i; if (register_blkdev(simdisk_major, "simdisk") < 0) { pr_err("SIMDISK: register_blkdev: %d\n", simdisk_major); return -EIO; } pr_info("SIMDISK: major: %d\n", simdisk_major); if (n_files > simdisk_count) simdisk_count = n_files; if (simdisk_count > MAX_SIMDISK_COUNT) simdisk_count = MAX_SIMDISK_COUNT; sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL); if (sddev == NULL) goto out_unregister; simdisk_procdir = proc_mkdir("simdisk", 0); if (simdisk_procdir == NULL) goto out_free_unregister; for (i = 0; i < simdisk_count; ++i) { if (simdisk_setup(sddev + i, i, simdisk_procdir) == 0) { if (filename[i] != NULL && filename[i][0] != 0 && (n_files == 0 || i < n_files)) simdisk_attach(sddev + i, filename[i]); } } return 0; out_free_unregister: kfree(sddev); out_unregister: unregister_blkdev(simdisk_major, "simdisk"); return -ENOMEM; } module_init(simdisk_init); static void simdisk_teardown(struct simdisk *dev, int which, struct proc_dir_entry *procdir) { char tmp[2] = { '0' + which, 0 }; simdisk_detach(dev); if (dev->gd) { del_gendisk(dev->gd); put_disk(dev->gd); } remove_proc_entry(tmp, procdir); } static void __exit simdisk_exit(void) { int i; for (i = 0; i < simdisk_count; ++i) simdisk_teardown(sddev + i, i, simdisk_procdir); remove_proc_entry("simdisk", 0); kfree(sddev); unregister_blkdev(simdisk_major, "simdisk"); } module_exit(simdisk_exit); MODULE_ALIAS_BLOCKDEV_MAJOR(SIMDISK_MAJOR); MODULE_LICENSE("GPL");
linux-master
arch/xtensa/platforms/iss/simdisk.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * arch/xtensa/platform-iss/setup.c * * Platform specific initialization. * * Authors: Chris Zankel <[email protected]> * Joe Taylor <[email protected]> * * Copyright 2001 - 2005 Tensilica Inc. * Copyright 2017 Cadence Design Systems Inc. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/panic_notifier.h> #include <linux/printk.h> #include <linux/reboot.h> #include <linux/string.h> #include <asm/platform.h> #include <asm/setup.h> #include <platform/simcall.h> static int iss_power_off(struct sys_off_data *unused) { pr_info(" ** Called platform_power_off() **\n"); simc_exit(0); return NOTIFY_DONE; } static int iss_restart(struct notifier_block *this, unsigned long event, void *ptr) { /* Flush and reset the mmu, simulate a processor reset, and * jump to the reset vector. */ cpu_reset(); return NOTIFY_DONE; } static struct notifier_block iss_restart_block = { .notifier_call = iss_restart, }; static int iss_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { simc_exit(1); return NOTIFY_DONE; } static struct notifier_block iss_panic_block = { .notifier_call = iss_panic_event, }; void __init platform_setup(char **p_cmdline) { static void *argv[COMMAND_LINE_SIZE / sizeof(void *)] __initdata; static char cmdline[COMMAND_LINE_SIZE] __initdata; int argc = simc_argc(); int argv_size = simc_argv_size(); if (argc > 1) { if (argv_size > sizeof(argv)) { pr_err("%s: command line too long: argv_size = %d\n", __func__, argv_size); } else { int i; cmdline[0] = 0; simc_argv((void *)argv); for (i = 1; i < argc; ++i) { if (i > 1) strcat(cmdline, " "); strcat(cmdline, argv[i]); } *p_cmdline = cmdline; } } atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block); register_restart_handler(&iss_restart_block); register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_PLATFORM, iss_power_off, NULL); }
linux-master
arch/xtensa/platforms/iss/setup.c
/* * arch/xtensa/platforms/iss/console.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001-2005 Tensilica Inc. * Authors Christian Zankel, Joe Taylor */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/console.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/major.h> #include <linux/param.h> #include <linux/seq_file.h> #include <linux/serial.h> #include <linux/uaccess.h> #include <asm/irq.h> #include <platform/simcall.h> #include <linux/tty.h> #include <linux/tty_flip.h> #define SERIAL_MAX_NUM_LINES 1 #define SERIAL_TIMER_VALUE (HZ / 10) static void rs_poll(struct timer_list *); static struct tty_driver *serial_driver; static struct tty_port serial_port; static DEFINE_TIMER(serial_timer, rs_poll); static int rs_open(struct tty_struct *tty, struct file * filp) { if (tty->count == 1) mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE); return 0; } static void rs_close(struct tty_struct *tty, struct file * filp) { if (tty->count == 1) del_timer_sync(&serial_timer); } static ssize_t rs_write(struct tty_struct * tty, const u8 *buf, size_t count) { /* see drivers/char/serialX.c to reference original version */ simc_write(1, buf, count); return count; } static void rs_poll(struct timer_list *unused) { struct tty_port *port = &serial_port; int i = 0; int rd = 1; unsigned char c; while (simc_poll(0)) { rd = simc_read(0, &c, 1); if (rd <= 0) break; tty_insert_flip_char(port, c, TTY_NORMAL); i++; } if (i) tty_flip_buffer_push(port); if (rd) mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE); } static unsigned int rs_write_room(struct tty_struct *tty) { /* Let's say iss can always accept 2K characters.. */ return 2 * 1024; } static int rs_proc_show(struct seq_file *m, void *v) { seq_printf(m, "serinfo:1.0 driver:0.1\n"); return 0; } static const struct tty_operations serial_ops = { .open = rs_open, .close = rs_close, .write = rs_write, .write_room = rs_write_room, .proc_show = rs_proc_show, }; static int __init rs_init(void) { struct tty_driver *driver; int ret; driver = tty_alloc_driver(SERIAL_MAX_NUM_LINES, TTY_DRIVER_REAL_RAW); if (IS_ERR(driver)) return PTR_ERR(driver); tty_port_init(&serial_port); /* Initialize the tty_driver structure */ driver->driver_name = "iss_serial"; driver->name = "ttyS"; driver->major = TTY_MAJOR; driver->minor_start = 64; driver->type = TTY_DRIVER_TYPE_SERIAL; driver->subtype = SERIAL_TYPE_NORMAL; driver->init_termios = tty_std_termios; driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; tty_set_operations(driver, &serial_ops); tty_port_link_device(&serial_port, driver, 0); ret = tty_register_driver(driver); if (ret) { pr_err("Couldn't register serial driver\n"); tty_driver_kref_put(driver); tty_port_destroy(&serial_port); return ret; } serial_driver = driver; return 0; } static __exit void rs_exit(void) { tty_unregister_driver(serial_driver); tty_driver_kref_put(serial_driver); tty_port_destroy(&serial_port); } /* We use `late_initcall' instead of just `__initcall' as a workaround for * the fact that (1) simcons_tty_init can't be called before tty_init, * (2) tty_init is called via `module_init', (3) if statically linked, * module_init == device_init, and (4) there's no ordering of init lists. * We can do this easily because simcons is always statically linked, but * other tty drivers that depend on tty_init and which must use * `module_init' to declare their init routines are likely to be broken. */ late_initcall(rs_init); #ifdef CONFIG_SERIAL_CONSOLE static void iss_console_write(struct console *co, const char *s, unsigned count) { if (s && *s != 0) { int len = strlen(s); simc_write(1, s, count < len ? count : len); } } static struct tty_driver* iss_console_device(struct console *c, int *index) { *index = c->index; return serial_driver; } static struct console sercons = { .name = "ttyS", .write = iss_console_write, .device = iss_console_device, .flags = CON_PRINTBUFFER, .index = -1 }; static int __init iss_console_init(void) { register_console(&sercons); return 0; } console_initcall(iss_console_init); #endif /* CONFIG_SERIAL_CONSOLE */
linux-master
arch/xtensa/platforms/iss/console.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * arch/xtensa/platform/xtavnet/setup.c * * ... * * Authors: Chris Zankel <[email protected]> * Joe Taylor <[email protected]> * * Copyright 2001 - 2006 Tensilica Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/kdev_t.h> #include <linux/types.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/clk-provider.h> #include <linux/of_address.h> #include <linux/slab.h> #include <asm/timex.h> #include <asm/processor.h> #include <asm/platform.h> #include <asm/bootparam.h> #include <platform/lcd.h> #include <platform/hardware.h> static int xtfpga_power_off(struct sys_off_data *unused) { lcd_disp_at_pos("POWEROFF", 0); local_irq_disable(); while (1) cpu_relax(); return NOTIFY_DONE; } static int xtfpga_restart(struct notifier_block *this, unsigned long event, void *ptr) { /* Try software reset first. */ WRITE_ONCE(*(u32 *)XTFPGA_SWRST_VADDR, 0xdead); /* If software reset did not work, flush and reset the mmu, * simulate a processor reset, and jump to the reset vector. */ cpu_reset(); return NOTIFY_DONE; } static struct notifier_block xtfpga_restart_block = { .notifier_call = xtfpga_restart, }; #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT void __init platform_calibrate_ccount(void) { ccount_freq = *(long *)XTFPGA_CLKFRQ_VADDR; } #endif static void __init xtfpga_register_handlers(void) { register_restart_handler(&xtfpga_restart_block); register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT, xtfpga_power_off, NULL); } #ifdef CONFIG_USE_OF static void __init xtfpga_clk_setup(struct device_node *np) { void __iomem *base = of_iomap(np, 0); struct clk *clk; u32 freq; if (!base) { pr_err("%pOFn: invalid address\n", np); return; } freq = __raw_readl(base); iounmap(base); clk = clk_register_fixed_rate(NULL, np->name, NULL, 0, freq); if (IS_ERR(clk)) { pr_err("%pOFn: clk registration failed\n", np); return; } if (of_clk_add_provider(np, of_clk_src_simple_get, clk)) { pr_err("%pOFn: clk provider registration failed\n", np); return; } } CLK_OF_DECLARE(xtfpga_clk, "cdns,xtfpga-clock", xtfpga_clk_setup); #define MAC_LEN 6 static void __init update_local_mac(struct device_node *node) { struct property *newmac; const u8* macaddr; int prop_len; macaddr = of_get_property(node, "local-mac-address", &prop_len); if (macaddr == NULL || prop_len != MAC_LEN) return; newmac = kzalloc(sizeof(*newmac) + MAC_LEN, GFP_KERNEL); if (newmac == NULL) return; newmac->value = newmac + 1; newmac->length = MAC_LEN; newmac->name = kstrdup("local-mac-address", GFP_KERNEL); if (newmac->name == NULL) { kfree(newmac); return; } memcpy(newmac->value, macaddr, MAC_LEN); ((u8*)newmac->value)[5] = (*(u32*)DIP_SWITCHES_VADDR) & 0x3f; of_update_property(node, newmac); } static int __init machine_setup(void) { struct device_node *eth = NULL; if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc"))) update_local_mac(eth); of_node_put(eth); xtfpga_register_handlers(); return 0; } arch_initcall(machine_setup); #else #include <linux/serial_8250.h> #include <linux/if.h> #include <net/ethoc.h> #include <linux/usb/c67x00.h> /*---------------------------------------------------------------------------- * Ethernet -- OpenCores Ethernet MAC (ethoc driver) */ static struct resource ethoc_res[] = { [0] = { /* register space */ .start = OETH_REGS_PADDR, .end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { /* buffer space */ .start = OETH_SRAMBUFF_PADDR, .end = OETH_SRAMBUFF_PADDR + OETH_SRAMBUFF_SIZE - 1, .flags = IORESOURCE_MEM, }, [2] = { /* IRQ number */ .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), .flags = IORESOURCE_IRQ, }, }; static struct ethoc_platform_data ethoc_pdata = { /* * The MAC address for these boards is 00:50:c2:13:6f:xx. * The last byte (here as zero) is read from the DIP switches on the * board. */ .hwaddr = { 0x00, 0x50, 0xc2, 0x13, 0x6f, 0 }, .phy_id = -1, .big_endian = XCHAL_HAVE_BE, }; static struct platform_device ethoc_device = { .name = "ethoc", .id = -1, .num_resources = ARRAY_SIZE(ethoc_res), .resource = ethoc_res, .dev = { .platform_data = &ethoc_pdata, }, }; /*---------------------------------------------------------------------------- * USB Host/Device -- Cypress CY7C67300 */ static struct resource c67x00_res[] = { [0] = { /* register space */ .start = C67X00_PADDR, .end = C67X00_PADDR + C67X00_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { /* IRQ number */ .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), .flags = IORESOURCE_IRQ, }, }; static struct c67x00_platform_data c67x00_pdata = { .sie_config = C67X00_SIE1_HOST | C67X00_SIE2_UNUSED, .hpi_regstep = 4, }; static struct platform_device c67x00_device = { .name = "c67x00", .id = -1, .num_resources = ARRAY_SIZE(c67x00_res), .resource = c67x00_res, .dev = { .platform_data = &c67x00_pdata, }, }; /*---------------------------------------------------------------------------- * UART */ static struct resource serial_resource = { .start = DUART16552_PADDR, .end = DUART16552_PADDR + 0x1f, .flags = IORESOURCE_MEM, }; static struct plat_serial8250_port serial_platform_data[] = { [0] = { .mapbase = DUART16552_PADDR, .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32, .regshift = 2, .uartclk = 0, /* set in xtavnet_init() */ }, { }, }; static struct platform_device xtavnet_uart = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = serial_platform_data, }, .num_resources = 1, .resource = &serial_resource, }; /* platform devices */ static struct platform_device *platform_devices[] __initdata = { &ethoc_device, &c67x00_device, &xtavnet_uart, }; static int __init xtavnet_init(void) { /* Ethernet MAC address. */ ethoc_pdata.hwaddr[5] = *(u32 *)DIP_SWITCHES_VADDR; /* Clock rate varies among FPGA bitstreams; board specific FPGA register * reports the actual clock rate. */ serial_platform_data[0].uartclk = *(long *)XTFPGA_CLKFRQ_VADDR; /* register platform devices */ platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); /* ETHOC driver is a bit quiet; at least display Ethernet MAC, so user * knows whether they set it correctly on the DIP switches. */ pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr); ethoc_pdata.eth_clkfreq = *(long *)XTFPGA_CLKFRQ_VADDR; xtfpga_register_handlers(); return 0; } /* * Register to be done during do_initcalls(). */ arch_initcall(xtavnet_init); #endif /* CONFIG_USE_OF */
linux-master
arch/xtensa/platforms/xtfpga/setup.c
/* * Driver for the LCD display on the Tensilica XTFPGA board family. * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001, 2006 Tensilica Inc. * Copyright (C) 2015 Cadence Design Systems Inc. */ #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <platform/hardware.h> #include <platform/lcd.h> /* LCD instruction and data addresses. */ #define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR)) #define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4) #define LCD_CLEAR 0x1 #define LCD_DISPLAY_ON 0xc /* 8bit and 2 lines display */ #define LCD_DISPLAY_MODE8BIT 0x38 #define LCD_DISPLAY_MODE4BIT 0x28 #define LCD_DISPLAY_POS 0x80 #define LCD_SHIFT_LEFT 0x18 #define LCD_SHIFT_RIGHT 0x1c static void lcd_put_byte(u8 *addr, u8 data) { #ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS WRITE_ONCE(*addr, data); #else WRITE_ONCE(*addr, data & 0xf0); WRITE_ONCE(*addr, (data << 4) & 0xf0); #endif } static int __init lcd_init(void) { WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT); mdelay(5); WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT); udelay(200); WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE8BIT); udelay(50); #ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS WRITE_ONCE(*LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT); udelay(50); lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT); udelay(50); #endif lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON); udelay(50); lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR); mdelay(10); lcd_disp_at_pos("XTENSA LINUX", 0); return 0; } void lcd_disp_at_pos(char *str, unsigned char pos) { lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos); udelay(100); while (*str != 0) { lcd_put_byte(LCD_DATA_ADDR, *str); udelay(200); str++; } } void lcd_shiftleft(void) { lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT); udelay(50); } void lcd_shiftright(void) { lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT); udelay(50); } arch_initcall(lcd_init);
linux-master
arch/xtensa/platforms/xtfpga/lcd.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/zlib.h> /* bits taken from ppc */ extern void *avail_ram, *end_avail; void exit (void) { for (;;); } void *zalloc(unsigned size) { void *p = avail_ram; size = (size + 7) & -8; avail_ram += size; if (avail_ram > end_avail) { //puts("oops... out of memory\n"); //pause(); exit (); } return p; } #define HEAD_CRC 2 #define EXTRA_FIELD 4 #define ORIG_NAME 8 #define COMMENT 0x10 #define RESERVED 0xe0 #define DEFLATED 8 void gunzip (void *dst, int dstlen, unsigned char *src, int *lenp) { z_stream s; int r, i, flags; /* skip header */ i = 10; flags = src[3]; if (src[2] != DEFLATED || (flags & RESERVED) != 0) { //puts("bad gzipped data\n"); exit(); } if ((flags & EXTRA_FIELD) != 0) i = 12 + src[10] + (src[11] << 8); if ((flags & ORIG_NAME) != 0) while (src[i++] != 0) ; if ((flags & COMMENT) != 0) while (src[i++] != 0) ; if ((flags & HEAD_CRC) != 0) i += 2; if (i >= *lenp) { //puts("gunzip: ran out of data in header\n"); exit(); } s.workspace = zalloc(zlib_inflate_workspacesize()); r = zlib_inflateInit2(&s, -MAX_WBITS); if (r != Z_OK) { //puts("inflateInit2 returned "); puthex(r); puts("\n"); exit(); } s.next_in = src + i; s.avail_in = *lenp - i; s.next_out = dst; s.avail_out = dstlen; r = zlib_inflate(&s, Z_FINISH); if (r != Z_OK && r != Z_STREAM_END) { //puts("inflate returned "); puthex(r); puts("\n"); exit(); } *lenp = s.next_out - (unsigned char *) dst; zlib_inflateEnd(&s); }
linux-master
arch/xtensa/boot/lib/zmem.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/xtensa/lib/pci-auto.c * * PCI autoconfiguration library * * Copyright (C) 2001 - 2005 Tensilica Inc. * * Chris Zankel <[email protected], [email protected]> * * Based on work from Matt Porter <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <asm/pci-bridge.h> /* * * Setting up a PCI * * pci_ctrl->first_busno = <first bus number (0)> * pci_ctrl->last_busno = <last bus number (0xff)> * pci_ctrl->ops = <PCI config operations> * pci_ctrl->map_irq = <function to return the interrupt number for a device> * * pci_ctrl->io_space.start = <IO space start address (PCI view)> * pci_ctrl->io_space.end = <IO space end address (PCI view)> * pci_ctrl->io_space.base = <IO space offset: address 0 from CPU space> * pci_ctrl->mem_space.start = <MEM space start address (PCI view)> * pci_ctrl->mem_space.end = <MEM space end address (PCI view)> * pci_ctrl->mem_space.base = <MEM space offset: address 0 from CPU space> * * pcibios_init_resource(&pci_ctrl->io_resource, <IO space start>, * <IO space end>, IORESOURCE_IO, "PCI host bridge"); * pcibios_init_resource(&pci_ctrl->mem_resources[0], <MEM space start>, * <MEM space end>, IORESOURCE_MEM, "PCI host bridge"); * * pci_ctrl->last_busno = pciauto_bus_scan(pci_ctrl,pci_ctrl->first_busno); * * int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) * */ static int pciauto_upper_iospc; static int pciauto_upper_memspc; static struct pci_dev pciauto_dev; static struct pci_bus pciauto_bus; /* * Helper functions */ /* Initialize the bars of a PCI device. */ static void __init pciauto_setup_bars(struct pci_dev *dev, int bar_limit) { int bar_size; int bar, bar_nr; int *upper_limit; int found_mem64 = 0; for (bar = PCI_BASE_ADDRESS_0, bar_nr = 0; bar <= bar_limit; bar+=4, bar_nr++) { /* Tickle the BAR and get the size */ pci_write_config_dword(dev, bar, 0xffffffff); pci_read_config_dword(dev, bar, &bar_size); /* If BAR is not implemented go to the next BAR */ if (!bar_size) continue; /* Check the BAR type and set our address mask */ if (bar_size & PCI_BASE_ADDRESS_SPACE_IO) { bar_size &= PCI_BASE_ADDRESS_IO_MASK; upper_limit = &pciauto_upper_iospc; pr_debug("PCI Autoconfig: BAR %d, I/O, ", bar_nr); } else { if ((bar_size & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) found_mem64 = 1; bar_size &= PCI_BASE_ADDRESS_MEM_MASK; upper_limit = &pciauto_upper_memspc; pr_debug("PCI Autoconfig: BAR %d, Mem, ", bar_nr); } /* Allocate a base address (bar_size is negative!) */ *upper_limit = (*upper_limit + bar_size) & bar_size; /* Write it out and update our limit */ pci_write_config_dword(dev, bar, *upper_limit); /* * If we are a 64-bit decoder then increment to the * upper 32 bits of the bar and force it to locate * in the lower 4GB of memory. */ if (found_mem64) pci_write_config_dword(dev, (bar+=4), 0x00000000); pr_debug("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit); } } /* Initialize the interrupt number. */ static void __init pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn) { u8 pin; int irq = 0; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); /* Fix illegal pin numbers. */ if (pin == 0 || pin > 4) pin = 1; if (pci_ctrl->map_irq) irq = pci_ctrl->map_irq(dev, PCI_SLOT(devfn), pin); if (irq == -1) irq = 0; pr_debug("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin); pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); } static void __init pciauto_prescan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus, int *iosave, int *memsave) { /* Configure bus number registers */ pci_write_config_byte(dev, PCI_PRIMARY_BUS, current_bus); pci_write_config_byte(dev, PCI_SECONDARY_BUS, sub_bus + 1); pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, 0xff); /* Round memory allocator to 1MB boundary */ pciauto_upper_memspc &= ~(0x100000 - 1); *memsave = pciauto_upper_memspc; /* Round I/O allocator to 4KB boundary */ pciauto_upper_iospc &= ~(0x1000 - 1); *iosave = pciauto_upper_iospc; /* Set up memory and I/O filter limits, assume 32-bit I/O space */ pci_write_config_word(dev, PCI_MEMORY_LIMIT, ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16); pci_write_config_byte(dev, PCI_IO_LIMIT, ((pciauto_upper_iospc - 1) & 0x0000f000) >> 8); pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16, ((pciauto_upper_iospc - 1) & 0xffff0000) >> 16); } static void __init pciauto_postscan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus, int *iosave, int *memsave) { int cmdstat; /* Configure bus number registers */ pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, sub_bus); /* * Round memory allocator to 1MB boundary. * If no space used, allocate minimum. */ pciauto_upper_memspc &= ~(0x100000 - 1); if (*memsave == pciauto_upper_memspc) pciauto_upper_memspc -= 0x00100000; pci_write_config_word(dev, PCI_MEMORY_BASE, pciauto_upper_memspc >> 16); /* Allocate 1MB for pre-fretch */ pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT, ((pciauto_upper_memspc - 1) & 0xfff00000) >> 16); pciauto_upper_memspc -= 0x100000; pci_write_config_word(dev, PCI_PREF_MEMORY_BASE, pciauto_upper_memspc >> 16); /* Round I/O allocator to 4KB boundary */ pciauto_upper_iospc &= ~(0x1000 - 1); if (*iosave == pciauto_upper_iospc) pciauto_upper_iospc -= 0x1000; pci_write_config_byte(dev, PCI_IO_BASE, (pciauto_upper_iospc & 0x0000f000) >> 8); pci_write_config_word(dev, PCI_IO_BASE_UPPER16, pciauto_upper_iospc >> 16); /* Enable memory and I/O accesses, enable bus master */ pci_read_config_dword(dev, PCI_COMMAND, &cmdstat); pci_write_config_dword(dev, PCI_COMMAND, cmdstat | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); } /* * Scan the current PCI bus. */ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) { int sub_bus, pci_devfn, pci_class, cmdstat, found_multi=0; unsigned short vid; unsigned char header_type; struct pci_dev *dev = &pciauto_dev; pciauto_dev.bus = &pciauto_bus; pciauto_dev.sysdata = pci_ctrl; pciauto_bus.ops = pci_ctrl->ops; /* * Fetch our I/O and memory space upper boundaries used * to allocated base addresses on this pci_controller. */ if (current_bus == pci_ctrl->first_busno) { pciauto_upper_iospc = pci_ctrl->io_resource.end + 1; pciauto_upper_memspc = pci_ctrl->mem_resources[0].end + 1; } sub_bus = current_bus; for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) { /* Skip our host bridge */ if ((current_bus == pci_ctrl->first_busno) && (pci_devfn == 0)) continue; if (PCI_FUNC(pci_devfn) && !found_multi) continue; pciauto_bus.number = current_bus; pciauto_dev.devfn = pci_devfn; /* If config space read fails from this device, move on */ if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type)) continue; if (!PCI_FUNC(pci_devfn)) found_multi = header_type & 0x80; pci_read_config_word(dev, PCI_VENDOR_ID, &vid); if (vid == 0xffff || vid == 0x0000) { found_multi = 0; continue; } pci_read_config_dword(dev, PCI_CLASS_REVISION, &pci_class); if ((pci_class >> 16) == PCI_CLASS_BRIDGE_PCI) { int iosave, memsave; pr_debug("PCI Autoconfig: Found P2P bridge, device %d\n", PCI_SLOT(pci_devfn)); /* Allocate PCI I/O and/or memory space */ pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1); pciauto_prescan_setup_bridge(dev, current_bus, sub_bus, &iosave, &memsave); sub_bus = pciauto_bus_scan(pci_ctrl, sub_bus+1); pciauto_postscan_setup_bridge(dev, current_bus, sub_bus, &iosave, &memsave); pciauto_bus.number = current_bus; continue; } /* * Found a peripheral, enable some standard * settings */ pci_read_config_dword(dev, PCI_COMMAND, &cmdstat); pci_write_config_dword(dev, PCI_COMMAND, cmdstat | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80); /* Allocate PCI I/O and/or memory space */ pr_debug("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n", current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn)); pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5); pciauto_setup_irq(pci_ctrl, dev, pci_devfn); } return sub_bus; }
linux-master
arch/xtensa/lib/pci-auto.c
/* * Xtensa KASAN shadow map initialization * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2017 Cadence Design Systems Inc. */ #include <linux/memblock.h> #include <linux/init_task.h> #include <linux/kasan.h> #include <linux/kernel.h> #include <asm/initialize_mmu.h> #include <asm/tlbflush.h> void __init kasan_early_init(void) { unsigned long vaddr = KASAN_SHADOW_START; pmd_t *pmd = pmd_off_k(vaddr); int i; for (i = 0; i < PTRS_PER_PTE; ++i) set_pte(kasan_early_shadow_pte + i, mk_pte(virt_to_page(kasan_early_shadow_page), PAGE_KERNEL)); for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { BUG_ON(!pmd_none(*pmd)); set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte)); } } static void __init populate(void *start, void *end) { unsigned long n_pages = (end - start) / PAGE_SIZE; unsigned long n_pmds = n_pages / PTRS_PER_PTE; unsigned long i, j; unsigned long vaddr = (unsigned long)start; pmd_t *pmd = pmd_off_k(vaddr); pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); if (!pte) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, n_pages * sizeof(pte_t), PAGE_SIZE); pr_debug("%s: %p - %p\n", __func__, start, end); for (i = j = 0; i < n_pmds; ++i) { int k; for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { phys_addr_t phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, MEMBLOCK_ALLOC_ANYWHERE); if (!phys) panic("Failed to allocate page table page\n"); set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); } } for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE) set_pmd(pmd + i, __pmd((unsigned long)pte)); local_flush_tlb_all(); memset(start, 0, end - start); } void __init kasan_init(void) { int i; BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START - (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)); BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR); /* * Replace shadow map pages that cover addresses from VMALLOC area * start to the end of KSEG with clean writable pages. */ populate(kasan_mem_to_shadow((void *)VMALLOC_START), kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR)); /* * Write protect kasan_early_shadow_page and zero-initialize it again. */ for (i = 0; i < PTRS_PER_PTE; ++i) set_pte(kasan_early_shadow_pte + i, mk_pte(virt_to_page(kasan_early_shadow_page), PAGE_KERNEL_RO)); local_flush_tlb_all(); memset(kasan_early_shadow_page, 0, PAGE_SIZE); /* At this point kasan is fully initialized. Enable error messages. */ current->kasan_depth = 0; pr_info("KernelAddressSanitizer initialized\n"); }
linux-master
arch/xtensa/mm/kasan_init.c
/* * arch/xtensa/mm/init.c * * Derived from MIPS, PPC. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2014 - 2016 Cadence Design Systems Inc. * * Chris Zankel <[email protected]> * Joe Taylor <[email protected], [email protected]> * Marc Gauthier * Kevin Chea */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/memblock.h> #include <linux/gfp.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/nodemask.h> #include <linux/mm.h> #include <linux/of_fdt.h> #include <linux/dma-map-ops.h> #include <asm/bootparam.h> #include <asm/page.h> #include <asm/sections.h> #include <asm/sysmem.h> /* * Initialize the bootmem system and give it all low memory we have available. */ void __init bootmem_init(void) { /* Reserve all memory below PHYS_OFFSET, as memory * accounting doesn't work for pages below that address. * * If PHYS_OFFSET is zero reserve page at address 0: * successfull allocations should never return NULL. */ memblock_reserve(0, PHYS_OFFSET ? PHYS_OFFSET : 1); early_init_fdt_scan_reserved_mem(); if (!memblock_phys_mem_size()) panic("No memory found!\n"); min_low_pfn = PFN_UP(memblock_start_of_DRAM()); min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET)); max_pfn = PFN_DOWN(memblock_end_of_DRAM()); max_low_pfn = min(max_pfn, MAX_LOW_PFN); early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT, (phys_addr_t)max_low_pfn << PAGE_SHIFT); memblock_set_current_limit(PFN_PHYS(max_low_pfn)); dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); memblock_dump_all(); } void __init zones_init(void) { /* All pages are DMA-able, so we put them all in the DMA zone. */ unsigned long max_zone_pfn[MAX_NR_ZONES] = { [ZONE_NORMAL] = max_low_pfn, #ifdef CONFIG_HIGHMEM [ZONE_HIGHMEM] = max_pfn, #endif }; free_area_init(max_zone_pfn); } static void __init free_highpages(void) { #ifdef CONFIG_HIGHMEM unsigned long max_low = max_low_pfn; phys_addr_t range_start, range_end; u64 i; /* set highmem page free */ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &range_start, &range_end, NULL) { unsigned long start = PFN_UP(range_start); unsigned long end = PFN_DOWN(range_end); /* Ignore complete lowmem entries */ if (end <= max_low) continue; /* Truncate partial highmem entries */ if (start < max_low) start = max_low; for (; start < end; start++) free_highmem_page(pfn_to_page(start)); } #endif } /* * Initialize memory pages. */ void __init mem_init(void) { free_highpages(); max_mapnr = max_pfn - ARCH_PFN_OFFSET; high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); memblock_free_all(); pr_info("virtual kernel memory layout:\n" #ifdef CONFIG_KASAN " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n" #endif #ifdef CONFIG_MMU " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n" #endif #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n" " fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n" #endif " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n" " .text : 0x%08lx - 0x%08lx (%5lu kB)\n" " .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n" " .data : 0x%08lx - 0x%08lx (%5lu kB)\n" " .init : 0x%08lx - 0x%08lx (%5lu kB)\n" " .bss : 0x%08lx - 0x%08lx (%5lu kB)\n", #ifdef CONFIG_KASAN KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE, KASAN_SHADOW_SIZE >> 20, #endif #ifdef CONFIG_MMU VMALLOC_START, VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, FIXADDR_START, FIXADDR_END, (FIXADDR_END - FIXADDR_START) >> 10, #endif PAGE_OFFSET, PAGE_OFFSET + (max_low_pfn - min_low_pfn) * PAGE_SIZE, #else min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE, #endif ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20, (unsigned long)_text, (unsigned long)_etext, (unsigned long)(_etext - _text) >> 10, (unsigned long)__start_rodata, (unsigned long)__end_rodata, (unsigned long)(__end_rodata - __start_rodata) >> 10, (unsigned long)_sdata, (unsigned long)_edata, (unsigned long)(_edata - _sdata) >> 10, (unsigned long)__init_begin, (unsigned long)__init_end, (unsigned long)(__init_end - __init_begin) >> 10, (unsigned long)__bss_start, (unsigned long)__bss_stop, (unsigned long)(__bss_stop - __bss_start) >> 10); } static void __init parse_memmap_one(char *p) { char *oldp; unsigned long start_at, mem_size; if (!p) return; oldp = p; mem_size = memparse(p, &p); if (p == oldp) return; switch (*p) { case '@': start_at = memparse(p + 1, &p); memblock_add(start_at, mem_size); break; case '$': start_at = memparse(p + 1, &p); memblock_reserve(start_at, mem_size); break; case 0: memblock_reserve(mem_size, -mem_size); break; default: pr_warn("Unrecognized memmap syntax: %s\n", p); break; } } static int __init parse_memmap_opt(char *str) { while (str) { char *k = strchr(str, ','); if (k) *k++ = 0; parse_memmap_one(str); str = k; } return 0; } early_param("memmap", parse_memmap_opt); #ifdef CONFIG_MMU static const pgprot_t protection_map[16] = { [VM_NONE] = PAGE_NONE, [VM_READ] = PAGE_READONLY, [VM_WRITE] = PAGE_COPY, [VM_WRITE | VM_READ] = PAGE_COPY, [VM_EXEC] = PAGE_READONLY_EXEC, [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, [VM_SHARED] = PAGE_NONE, [VM_SHARED | VM_READ] = PAGE_READONLY, [VM_SHARED | VM_WRITE] = PAGE_SHARED, [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC, [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC, [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC, [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC }; DECLARE_VM_GET_PAGE_PROT #endif
linux-master
arch/xtensa/mm/init.c
// SPDX-License-Identifier: GPL-2.0-only /* * ioremap implementation. * * Copyright (C) 2015 Cadence Design Systems Inc. */ #include <linux/io.h> #include <linux/pgtable.h> #include <asm/cacheflush.h> #include <asm/io.h> void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, unsigned long prot) { unsigned long pfn = __phys_to_pfn((phys_addr)); WARN_ON(pfn_valid(pfn)); return generic_ioremap_prot(phys_addr, size, __pgprot(prot)); } EXPORT_SYMBOL(ioremap_prot); void iounmap(volatile void __iomem *addr) { unsigned long va = (unsigned long) addr; if ((va >= XCHAL_KIO_CACHED_VADDR && va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) || (va >= XCHAL_KIO_BYPASS_VADDR && va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE)) return; generic_iounmap(addr); } EXPORT_SYMBOL(iounmap);
linux-master
arch/xtensa/mm/ioremap.c
/* * arch/xtensa/mm/cache.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001-2006 Tensilica Inc. * * Chris Zankel <[email protected]> * Joe Taylor * Marc Gauthier * */ #include <linux/init.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/memblock.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/pgtable.h> #include <asm/bootparam.h> #include <asm/mmu_context.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/page.h> /* * Note: * The kernel provides one architecture bit PG_arch_1 in the page flags that * can be used for cache coherency. * * I$-D$ coherency. * * The Xtensa architecture doesn't keep the instruction cache coherent with * the data cache. We use the architecture bit to indicate if the caches * are coherent. The kernel clears this bit whenever a page is added to the * page cache. At that time, the caches might not be in sync. We, therefore, * define this flag as 'clean' if set. * * D-cache aliasing. * * With cache aliasing, we have to always flush the cache when pages are * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty * page. * * * */ #if (DCACHE_WAY_SIZE > PAGE_SIZE) static inline void kmap_invalidate_coherent(struct page *page, unsigned long vaddr) { if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { unsigned long kvaddr; if (!PageHighMem(page)) { kvaddr = (unsigned long)page_to_virt(page); __invalidate_dcache_page(kvaddr); } else { kvaddr = TLBTEMP_BASE_1 + (page_to_phys(page) & DCACHE_ALIAS_MASK); preempt_disable(); __invalidate_dcache_page_alias(kvaddr, page_to_phys(page)); preempt_enable(); } } } static inline void *coherent_kvaddr(struct page *page, unsigned long base, unsigned long vaddr, unsigned long *paddr) { *paddr = page_to_phys(page); return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); } void clear_user_highpage(struct page *page, unsigned long vaddr) { unsigned long paddr; void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr); preempt_disable(); kmap_invalidate_coherent(page, vaddr); set_bit(PG_arch_1, &page->flags); clear_page_alias(kvaddr, paddr); preempt_enable(); } EXPORT_SYMBOL(clear_user_highpage); void copy_user_highpage(struct page *dst, struct page *src, unsigned long vaddr, struct vm_area_struct *vma) { unsigned long dst_paddr, src_paddr; void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr, &dst_paddr); void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr, &src_paddr); preempt_disable(); kmap_invalidate_coherent(dst, vaddr); set_bit(PG_arch_1, &dst->flags); copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); preempt_enable(); } EXPORT_SYMBOL(copy_user_highpage); /* * Any time the kernel writes to a user page cache page, or it is about to * read from a page cache page this routine is called. * */ void flush_dcache_folio(struct folio *folio) { struct address_space *mapping = folio_flush_mapping(folio); /* * If we have a mapping but the page is not mapped to user-space * yet, we simply mark this page dirty and defer flushing the * caches until update_mmu(). */ if (mapping && !mapping_mapped(mapping)) { if (!test_bit(PG_arch_1, &folio->flags)) set_bit(PG_arch_1, &folio->flags); return; } else { unsigned long phys = folio_pfn(folio) * PAGE_SIZE; unsigned long temp = folio_pos(folio); unsigned int i, nr = folio_nr_pages(folio); unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys)); unsigned long virt; /* * Flush the page in kernel space and user space. * Note that we can omit that step if aliasing is not * an issue, but we do have to synchronize I$ and D$ * if we have a mapping. */ if (!alias && !mapping) return; preempt_disable(); for (i = 0; i < nr; i++) { virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(virt, phys); virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK); if (alias) __flush_invalidate_dcache_page_alias(virt, phys); if (mapping) __invalidate_icache_page_alias(virt, phys); phys += PAGE_SIZE; temp += PAGE_SIZE; } preempt_enable(); } /* There shouldn't be an entry in the cache for this page anymore. */ } EXPORT_SYMBOL(flush_dcache_folio); /* * For now, flush the whole cache. FIXME?? */ void local_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { __flush_invalidate_dcache_all(); __invalidate_icache_all(); } EXPORT_SYMBOL(local_flush_cache_range); /* * Remove any entry in the cache for this page. * * Note that this function is only called for user pages, so use the * alias versions of the cache flush functions. */ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) { /* Note that we have to use the 'alias' address to avoid multi-hit */ unsigned long phys = page_to_phys(pfn_to_page(pfn)); unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK); preempt_disable(); __flush_invalidate_dcache_page_alias(virt, phys); __invalidate_icache_page_alias(virt, phys); preempt_enable(); } EXPORT_SYMBOL(local_flush_cache_page); #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, unsigned int nr) { unsigned long pfn = pte_pfn(*ptep); struct folio *folio; unsigned int i; if (!pfn_valid(pfn)) return; folio = page_folio(pfn_to_page(pfn)); /* Invalidate old entries in TLBs */ for (i = 0; i < nr; i++) flush_tlb_page(vma, addr + i * PAGE_SIZE); nr = folio_nr_pages(folio); #if (DCACHE_WAY_SIZE > PAGE_SIZE) if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) { unsigned long phys = folio_pfn(folio) * PAGE_SIZE; unsigned long tmp; preempt_disable(); for (i = 0; i < nr; i++) { tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(tmp, phys); tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(tmp, phys); __invalidate_icache_page_alias(tmp, phys); phys += PAGE_SIZE; } preempt_enable(); clear_bit(PG_arch_1, &folio->flags); } #else if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags) && (vma->vm_flags & VM_EXEC) != 0) { for (i = 0; i < nr; i++) { void *paddr = kmap_local_folio(folio, i * PAGE_SIZE); __flush_dcache_page((unsigned long)paddr); __invalidate_icache_page((unsigned long)paddr); kunmap_local(paddr); } set_bit(PG_arch_1, &folio->flags); } #endif } /* * access_process_vm() has called get_user_pages(), which has done a * flush_dcache_page() on the page. */ #if (DCACHE_WAY_SIZE > PAGE_SIZE) void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { unsigned long phys = page_to_phys(page); unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); /* Flush and invalidate user page if aliased. */ if (alias) { unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); preempt_disable(); __flush_invalidate_dcache_page_alias(t, phys); preempt_enable(); } /* Copy data */ memcpy(dst, src, len); /* * Flush and invalidate kernel page if aliased and synchronize * data and instruction caches for executable pages. */ if (alias) { unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); preempt_disable(); __flush_invalidate_dcache_range((unsigned long) dst, len); if ((vma->vm_flags & VM_EXEC) != 0) __invalidate_icache_page_alias(t, phys); preempt_enable(); } else if ((vma->vm_flags & VM_EXEC) != 0) { __flush_dcache_range((unsigned long)dst,len); __invalidate_icache_range((unsigned long) dst, len); } } extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) { unsigned long phys = page_to_phys(page); unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys)); /* * Flush user page if aliased. * (Note: a simply flush would be sufficient) */ if (alias) { unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); preempt_disable(); __flush_invalidate_dcache_page_alias(t, phys); preempt_enable(); } memcpy(dst, src, len); } #endif
linux-master
arch/xtensa/mm/cache.c
/* * High memory support for Xtensa architecture * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of * this archive for more details. * * Copyright (C) 2014 Cadence Design Systems Inc. */ #include <linux/export.h> #include <linux/highmem.h> #include <asm/tlbflush.h> #if DCACHE_WAY_SIZE > PAGE_SIZE unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS]; wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS]; static void __init kmap_waitqueues_init(void) { unsigned int i; for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i) init_waitqueue_head(pkmap_map_wait_arr + i); } static inline enum fixed_addresses kmap_idx(int type, unsigned long color) { int idx = (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS; /* * The fixmap operates top down, so the color offset needs to be * reverse as well. */ return idx + DCACHE_N_COLORS - 1 - color; } enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn) { return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT)); } enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr) { return kmap_idx(type, DCACHE_ALIAS(addr)); } #else static inline void kmap_waitqueues_init(void) { } #endif void __init kmap_init(void) { /* Check if this memory layout is broken because PKMAP overlaps * page table. */ BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE); kmap_waitqueues_init(); }
linux-master
arch/xtensa/mm/highmem.c
// TODO VM_EXEC flag work-around, cache aliasing /* * arch/xtensa/mm/fault.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2010 Tensilica Inc. * * Chris Zankel <[email protected]> * Joe Taylor <[email protected], [email protected]> */ #include <linux/mm.h> #include <linux/extable.h> #include <linux/hardirq.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/hardirq.h> void bad_page_fault(struct pt_regs*, unsigned long, int); static void vmalloc_fault(struct pt_regs *regs, unsigned int address) { #ifdef CONFIG_MMU /* Synchronize this task's top level page-table * with the 'reference' page table. */ struct mm_struct *act_mm = current->active_mm; int index = pgd_index(address); pgd_t *pgd, *pgd_k; p4d_t *p4d, *p4d_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; if (act_mm == NULL) goto bad_page_fault; pgd = act_mm->pgd + index; pgd_k = init_mm.pgd + index; if (!pgd_present(*pgd_k)) goto bad_page_fault; pgd_val(*pgd) = pgd_val(*pgd_k); p4d = p4d_offset(pgd, address); p4d_k = p4d_offset(pgd_k, address); if (!p4d_present(*p4d) || !p4d_present(*p4d_k)) goto bad_page_fault; pud = pud_offset(p4d, address); pud_k = pud_offset(p4d_k, address); if (!pud_present(*pud) || !pud_present(*pud_k)) goto bad_page_fault; pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) goto bad_page_fault; pmd_val(*pmd) = pmd_val(*pmd_k); pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) goto bad_page_fault; return; bad_page_fault: bad_page_fault(regs, address, SIGKILL); #else WARN_ONCE(1, "%s in noMMU configuration\n", __func__); #endif } /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * Note: does not handle Miss and MultiHit. */ void do_page_fault(struct pt_regs *regs) { struct vm_area_struct * vma; struct mm_struct *mm = current->mm; unsigned int exccause = regs->exccause; unsigned int address = regs->excvaddr; int code; int is_write, is_exec; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; code = SEGV_MAPERR; /* We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. */ if (address >= TASK_SIZE && !user_mode(regs)) { vmalloc_fault(regs, address); return; } /* If we're in an interrupt or have no user * context, we must not take the fault.. */ if (faulthandler_disabled() || !mm) { bad_page_fault(regs, address, SIGSEGV); return; } is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0; is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE || exccause == EXCCAUSE_ITLB_MISS || exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n", current->comm, current->pid, address, exccause, regs->pc, is_write ? "w" : "", is_exec ? "x" : ""); if (user_mode(regs)) flags |= FAULT_FLAG_USER; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) goto bad_area_nosemaphore; /* Ok, we have a good vm_area for this memory access, so * we can handle it.. */ code = SEGV_ACCERR; if (is_write) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; flags |= FAULT_FLAG_WRITE; } else if (is_exec) { if (!(vma->vm_flags & VM_EXEC)) goto bad_area; } else /* Allow read even from write-only pages. */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) goto bad_area; /* If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) bad_page_fault(regs, address, SIGKILL); return; } /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) return; if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGSEGV) goto bad_area; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_RETRY) { flags |= FAULT_FLAG_TRIED; /* No need to mmap_read_unlock(mm) as we would * have already released it in __lock_page_or_retry * in mm/filemap.c. */ goto retry; } mmap_read_unlock(mm); return; /* Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: mmap_read_unlock(mm); bad_area_nosemaphore: if (user_mode(regs)) { force_sig_fault(SIGSEGV, code, (void *) address); return; } bad_page_fault(regs, address, SIGSEGV); return; /* We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: mmap_read_unlock(mm); if (!user_mode(regs)) bad_page_fault(regs, address, SIGKILL); else pagefault_out_of_memory(); return; do_sigbus: mmap_read_unlock(mm); /* Send a sigbus, regardless of whether we were in kernel * or user mode. */ force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) bad_page_fault(regs, address, SIGBUS); return; } void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) { extern void __noreturn die(const char*, struct pt_regs*, long); const struct exception_table_entry *entry; /* Are we prepared to handle this kernel fault? */ if ((entry = search_exception_tables(regs->pc)) != NULL) { pr_debug("%s: Exception at pc=%#010lx (%lx)\n", current->comm, regs->pc, entry->fixup); regs->pc = entry->fixup; return; } /* Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ pr_alert("Unable to handle kernel paging request at virtual " "address %08lx\n pc = %08lx, ra = %08lx\n", address, regs->pc, regs->areg[0]); die("Oops", regs, sig); }
linux-master
arch/xtensa/mm/fault.c
/* * arch/xtensa/mm/tlb.c * * Logic that manipulates the Xtensa MMU. Derived from MIPS. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2003 Tensilica Inc. * * Joe Taylor * Chris Zankel <[email protected]> * Marc Gauthier */ #include <linux/mm.h> #include <asm/processor.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> static inline void __flush_itlb_all (void) { int w, i; for (w = 0; w < ITLB_ARF_WAYS; w++) { for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) { int e = w + (i << PAGE_SHIFT); invalidate_itlb_entry_no_isync(e); } } asm volatile ("isync\n"); } static inline void __flush_dtlb_all (void) { int w, i; for (w = 0; w < DTLB_ARF_WAYS; w++) { for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) { int e = w + (i << PAGE_SHIFT); invalidate_dtlb_entry_no_isync(e); } } asm volatile ("isync\n"); } void local_flush_tlb_all(void) { __flush_itlb_all(); __flush_dtlb_all(); } /* If mm is current, we simply assign the current task a new ASID, thus, * invalidating all previous tlb entries. If mm is someone else's user mapping, * wie invalidate the context, thus, when that user mapping is swapped in, * a new context will be assigned to it. */ void local_flush_tlb_mm(struct mm_struct *mm) { int cpu = smp_processor_id(); if (mm == current->active_mm) { unsigned long flags; local_irq_save(flags); mm->context.asid[cpu] = NO_CONTEXT; activate_context(mm, cpu); local_irq_restore(flags); } else { mm->context.asid[cpu] = NO_CONTEXT; mm->context.cpu = -1; } } #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2) #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2) #if _ITLB_ENTRIES > _DTLB_ENTRIES # define _TLB_ENTRIES _ITLB_ENTRIES #else # define _TLB_ENTRIES _DTLB_ENTRIES #endif void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int cpu = smp_processor_id(); struct mm_struct *mm = vma->vm_mm; unsigned long flags; if (mm->context.asid[cpu] == NO_CONTEXT) return; pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n", (unsigned long)mm->context.asid[cpu], start, end); local_irq_save(flags); if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { int oldpid = get_rasid_register(); set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); start &= PAGE_MASK; if (vma->vm_flags & VM_EXEC) while(start < end) { invalidate_itlb_mapping(start); invalidate_dtlb_mapping(start); start += PAGE_SIZE; } else while(start < end) { invalidate_dtlb_mapping(start); start += PAGE_SIZE; } set_rasid_register(oldpid); } else { local_flush_tlb_mm(mm); } local_irq_restore(flags); } void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { int cpu = smp_processor_id(); struct mm_struct* mm = vma->vm_mm; unsigned long flags; int oldpid; if (mm->context.asid[cpu] == NO_CONTEXT) return; local_irq_save(flags); oldpid = get_rasid_register(); set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); if (vma->vm_flags & VM_EXEC) invalidate_itlb_mapping(page); invalidate_dtlb_mapping(page); set_rasid_register(oldpid); local_irq_restore(flags); } void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) { if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET && end - start < _TLB_ENTRIES << PAGE_SHIFT) { start &= PAGE_MASK; while (start < end) { invalidate_itlb_mapping(start); invalidate_dtlb_mapping(start); start += PAGE_SIZE; } } else { local_flush_tlb_all(); } } void update_mmu_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { local_flush_tlb_page(vma, address); } #ifdef CONFIG_DEBUG_TLB_SANITY static unsigned get_pte_for_vaddr(unsigned vaddr) { struct task_struct *task = get_current(); struct mm_struct *mm = task->mm; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned int pteval; if (!mm) mm = task->active_mm; pgd = pgd_offset(mm, vaddr); if (pgd_none_or_clear_bad(pgd)) return 0; p4d = p4d_offset(pgd, vaddr); if (p4d_none_or_clear_bad(p4d)) return 0; pud = pud_offset(p4d, vaddr); if (pud_none_or_clear_bad(pud)) return 0; pmd = pmd_offset(pud, vaddr); if (pmd_none_or_clear_bad(pmd)) return 0; pte = pte_offset_map(pmd, vaddr); if (!pte) return 0; pteval = pte_val(*pte); pte_unmap(pte); return pteval; } enum { TLB_SUSPICIOUS = 1, TLB_INSANE = 2, }; static void tlb_insane(void) { BUG_ON(1); } static void tlb_suspicious(void) { WARN_ON(1); } /* * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE), * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE. * * Check that valid TLB entries either have the same PA as the PTE, or PTE is * marked as non-present. Non-present PTE and the page with non-zero refcount * and zero mapcount is normal for batched TLB flush operation. Zero refcount * means that the page was freed prematurely. Non-zero mapcount is unusual, * but does not necessary means an error, thus marked as suspicious. */ static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) { unsigned tlbidx = w | (e << PAGE_SHIFT); unsigned r0 = dtlb ? read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx); unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx); unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); unsigned pte = get_pte_for_vaddr(vpn); unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK; unsigned tlb_asid = r0 & ASID_MASK; bool kernel = tlb_asid == 1; int rc = 0; if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) { pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n", dtlb ? 'D' : 'I', w, e, vpn, kernel ? "kernel" : "user"); rc |= TLB_INSANE; } if (tlb_asid == mm_asid) { if ((pte ^ r1) & PAGE_MASK) { pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n", dtlb ? 'D' : 'I', w, e, r0, r1, pte); if (pte == 0 || !pte_present(__pte(pte))) { struct page *p = pfn_to_page(r1 >> PAGE_SHIFT); pr_err("page refcount: %d, mapcount: %d\n", page_count(p), page_mapcount(p)); if (!page_count(p)) rc |= TLB_INSANE; else if (page_mapcount(p)) rc |= TLB_SUSPICIOUS; } else { rc |= TLB_INSANE; } } } return rc; } void check_tlb_sanity(void) { unsigned long flags; unsigned w, e; int bug = 0; local_irq_save(flags); for (w = 0; w < DTLB_ARF_WAYS; ++w) for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e) bug |= check_tlb_entry(w, e, true); for (w = 0; w < ITLB_ARF_WAYS; ++w) for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e) bug |= check_tlb_entry(w, e, false); if (bug & TLB_INSANE) tlb_insane(); if (bug & TLB_SUSPICIOUS) tlb_suspicious(); local_irq_restore(flags); } #endif /* CONFIG_DEBUG_TLB_SANITY */
linux-master
arch/xtensa/mm/tlb.c
// SPDX-License-Identifier: GPL-2.0 /* * xtensa mmu stuff * * Extracted from init.c */ #include <linux/memblock.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/cache.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/page.h> #include <asm/initialize_mmu.h> #include <asm/io.h> DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; #if defined(CONFIG_HIGHMEM) static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) { pmd_t *pmd = pmd_off_k(vaddr); pte_t *pte; unsigned long i; n_pages = ALIGN(n_pages, PTRS_PER_PTE); pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n", __func__, vaddr, n_pages); pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE); if (!pte) panic("%s: Failed to allocate %lu bytes align=%lx\n", __func__, n_pages * sizeof(pte_t), PAGE_SIZE); for (i = 0; i < n_pages; ++i) pte_clear(NULL, 0, pte + i); for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) { pte_t *cur_pte = pte + i; BUG_ON(!pmd_none(*pmd)); set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK)); BUG_ON(cur_pte != pte_offset_kernel(pmd, 0)); pr_debug("%s: pmd: 0x%p, pte: 0x%p\n", __func__, pmd, cur_pte); } return pte; } static void __init fixedrange_init(void) { BUILD_BUG_ON(FIXADDR_START < TLBTEMP_BASE_1 + TLBTEMP_SIZE); init_pmd(FIXADDR_START, __end_of_fixed_addresses); } #endif void __init paging_init(void) { #ifdef CONFIG_HIGHMEM fixedrange_init(); pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); kmap_init(); #endif } /* * Flush the mmu and reset associated register to default values. */ void init_mmu(void) { #if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) /* * Writing zeros to the instruction and data TLBCFG special * registers ensure that valid values exist in the register. * * For existing PGSZID<w> fields, zero selects the first element * of the page-size array. For nonexistent PGSZID<w> fields, * zero is the best value to write. Also, when changing PGSZID<w> * fields, the corresponding TLB must be flushed. */ set_itlbcfg_register(0); set_dtlbcfg_register(0); #endif init_kio(); local_flush_tlb_all(); /* Set rasid register to a known value. */ set_rasid_register(ASID_INSERT(ASID_USER_FIRST)); /* Set PTEVADDR special register to the start of the page * table, which is in kernel mappable space (ie. not * statically mapped). This register's value is undefined on * reset. */ set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR); } void init_kio(void) { #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF) /* * Update the IO area mapping in case xtensa_kio_paddr has changed */ write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK), XCHAL_KIO_CACHED_VADDR + 6); write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK), XCHAL_KIO_CACHED_VADDR + 6); write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), XCHAL_KIO_BYPASS_VADDR + 6); write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS), XCHAL_KIO_BYPASS_VADDR + 6); #endif }
linux-master
arch/xtensa/mm/mmu.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * DMA coherent memory allocation. * * Copyright (C) 2002 - 2005 Tensilica Inc. * Copyright (C) 2015 Cadence Design Systems Inc. * * Based on version for i386. * * Chris Zankel <[email protected]> * Joe Taylor <[email protected], [email protected]> */ #include <linux/dma-map-ops.h> #include <linux/dma-direct.h> #include <linux/gfp.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/types.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <asm/platform.h> static void do_cache_op(phys_addr_t paddr, size_t size, void (*fn)(unsigned long, unsigned long)) { unsigned long off = paddr & (PAGE_SIZE - 1); unsigned long pfn = PFN_DOWN(paddr); struct page *page = pfn_to_page(pfn); if (!PageHighMem(page)) fn((unsigned long)phys_to_virt(paddr), size); else while (size > 0) { size_t sz = min_t(size_t, size, PAGE_SIZE - off); void *vaddr = kmap_atomic(page); fn((unsigned long)vaddr + off, sz); kunmap_atomic(vaddr); off = 0; ++page; size -= sz; } } void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_BIDIRECTIONAL: case DMA_FROM_DEVICE: do_cache_op(paddr, size, __invalidate_dcache_range); break; case DMA_NONE: BUG(); break; default: break; } } void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_BIDIRECTIONAL: case DMA_TO_DEVICE: if (XCHAL_DCACHE_IS_WRITEBACK) do_cache_op(paddr, size, __flush_dcache_range); break; case DMA_NONE: BUG(); break; default: break; } } void arch_dma_prep_coherent(struct page *page, size_t size) { __invalidate_dcache_range((unsigned long)page_address(page), size); } /* * Memory caching is platform-dependent in noMMU xtensa configurations. * This function should be implemented in platform code in order to enable * coherent DMA memory operations when CONFIG_MMU is not enabled. */ #ifdef CONFIG_MMU void *arch_dma_set_uncached(void *p, size_t size) { return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; } #endif /* CONFIG_MMU */
linux-master
arch/xtensa/kernel/pci-dma.c
/* * arch/xtensa/kernel/process.c * * Xtensa Processor version. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. * * Joe Taylor <[email protected], [email protected]> * Chris Zankel <[email protected]> * Marc Gauthier <[email protected], [email protected]> * Kevin Chea */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/elf.h> #include <linux/hw_breakpoint.h> #include <linux/init.h> #include <linux/prctl.h> #include <linux/init_task.h> #include <linux/module.h> #include <linux/mqueue.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/platform.h> #include <asm/mmu.h> #include <asm/irq.h> #include <linux/atomic.h> #include <asm/asm-offsets.h> #include <asm/regs.h> #include <asm/hw_breakpoint.h> #include <asm/traps.h> extern void ret_from_fork(void); extern void ret_from_kernel_thread(void); void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); #ifdef CONFIG_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif #if XTENSA_HAVE_COPROCESSORS void local_coprocessors_flush_release_all(void) { struct thread_info **coprocessor_owner; struct thread_info *unique_owner[XCHAL_CP_MAX]; int n = 0; int i, j; coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner; xtensa_set_sr(XCHAL_CP_MASK, cpenable); for (i = 0; i < XCHAL_CP_MAX; i++) { struct thread_info *ti = coprocessor_owner[i]; if (ti) { coprocessor_flush(ti, i); for (j = 0; j < n; j++) if (unique_owner[j] == ti) break; if (j == n) unique_owner[n++] = ti; coprocessor_owner[i] = NULL; } } for (i = 0; i < n; i++) { /* pairs with memw (1) in fast_coprocessor and memw in switch_to */ smp_wmb(); unique_owner[i]->cpenable = 0; } xtensa_set_sr(0, cpenable); } static void local_coprocessor_release_all(void *info) { struct thread_info *ti = info; struct thread_info **coprocessor_owner; int i; coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner; /* Walk through all cp owners and release it for the requested one. */ for (i = 0; i < XCHAL_CP_MAX; i++) { if (coprocessor_owner[i] == ti) coprocessor_owner[i] = NULL; } /* pairs with memw (1) in fast_coprocessor and memw in switch_to */ smp_wmb(); ti->cpenable = 0; if (ti == current_thread_info()) xtensa_set_sr(0, cpenable); } void coprocessor_release_all(struct thread_info *ti) { if (ti->cpenable) { /* pairs with memw (2) in fast_coprocessor */ smp_rmb(); smp_call_function_single(ti->cp_owner_cpu, local_coprocessor_release_all, ti, true); } } static void local_coprocessor_flush_all(void *info) { struct thread_info *ti = info; struct thread_info **coprocessor_owner; unsigned long old_cpenable; int i; coprocessor_owner = this_cpu_ptr(&exc_table)->coprocessor_owner; old_cpenable = xtensa_xsr(ti->cpenable, cpenable); for (i = 0; i < XCHAL_CP_MAX; i++) { if (coprocessor_owner[i] == ti) coprocessor_flush(ti, i); } xtensa_set_sr(old_cpenable, cpenable); } void coprocessor_flush_all(struct thread_info *ti) { if (ti->cpenable) { /* pairs with memw (2) in fast_coprocessor */ smp_rmb(); smp_call_function_single(ti->cp_owner_cpu, local_coprocessor_flush_all, ti, true); } } static void local_coprocessor_flush_release_all(void *info) { local_coprocessor_flush_all(info); local_coprocessor_release_all(info); } void coprocessor_flush_release_all(struct thread_info *ti) { if (ti->cpenable) { /* pairs with memw (2) in fast_coprocessor */ smp_rmb(); smp_call_function_single(ti->cp_owner_cpu, local_coprocessor_flush_release_all, ti, true); } } #endif /* * Powermanagement idle function, if any is provided by the platform. */ void arch_cpu_idle(void) { platform_idle(); raw_local_irq_disable(); } /* * This is called when the thread calls exit(). */ void exit_thread(struct task_struct *tsk) { #if XTENSA_HAVE_COPROCESSORS coprocessor_release_all(task_thread_info(tsk)); #endif } /* * Flush thread state. This is called when a thread does an execve() * Note that we flush coprocessor registers for the case execve fails. */ void flush_thread(void) { #if XTENSA_HAVE_COPROCESSORS struct thread_info *ti = current_thread_info(); coprocessor_flush_release_all(ti); #endif flush_ptrace_hw_breakpoint(current); } /* * this gets called so that we can store coprocessor state into memory and * copy the current task into the new thread. */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { #if XTENSA_HAVE_COPROCESSORS coprocessor_flush_all(task_thread_info(src)); #endif *dst = *src; return 0; } /* * Copy thread. * * There are two modes in which this function is called: * 1) Userspace thread creation, * regs != NULL, usp_thread_fn is userspace stack pointer. * It is expected to copy parent regs (in case CLONE_VM is not set * in the clone_flags) and set up passed usp in the childregs. * 2) Kernel thread creation, * regs == NULL, usp_thread_fn is the function to run in the new thread * and thread_fn_arg is its parameter. * childregs are not used for the kernel threads. * * The stack layout for the new thread looks like this: * * +------------------------+ * | childregs | * +------------------------+ <- thread.sp = sp in dummy-frame * | dummy-frame | (saved in dummy-frame spill-area) * +------------------------+ * * We create a dummy frame to return to either ret_from_fork or * ret_from_kernel_thread: * a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4) * sp points to itself (thread.sp) * a2, a3 are unused for userspace threads, * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads. * * Note: This is a pristine frame, so we don't need any spill region on top of * childregs. * * The fun part: if we're keeping the same VM (i.e. cloning a thread, * not an entire process), we're normally given a new usp, and we CANNOT share * any live address register windows. If we just copy those live frames over, * the two threads (parent and child) will overflow the same frames onto the * parent stack at different times, likely corrupting the parent stack (esp. * if the parent returns from functions that called clone() and calls new * ones, before the child overflows its now old copies of its parent windows). * One solution is to spill windows to the parent stack, but that's fairly * involved. Much simpler to just not copy those live frames across. */ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) { unsigned long clone_flags = args->flags; unsigned long usp_thread_fn = args->stack; unsigned long tls = args->tls; struct pt_regs *childregs = task_pt_regs(p); #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) struct thread_info *ti; #endif #if defined(__XTENSA_WINDOWED_ABI__) /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */ SPILL_SLOT(childregs, 1) = (unsigned long)childregs; SPILL_SLOT(childregs, 0) = 0; p->thread.sp = (unsigned long)childregs; #elif defined(__XTENSA_CALL0_ABI__) /* Reserve 16 bytes for the _switch_to stack frame. */ p->thread.sp = (unsigned long)childregs - 16; #else #error Unsupported Xtensa ABI #endif if (!args->fn) { struct pt_regs *regs = current_pt_regs(); unsigned long usp = usp_thread_fn ? usp_thread_fn : regs->areg[1]; p->thread.ra = MAKE_RA_FOR_CALL( (unsigned long)ret_from_fork, 0x1); *childregs = *regs; childregs->areg[1] = usp; childregs->areg[2] = 0; /* When sharing memory with the parent thread, the child usually starts on a pristine stack, so we have to reset windowbase, windowstart and wmask. (Note that such a new thread is required to always create an initial call4 frame) The exception is vfork, where the new thread continues to run on the parent's stack until it calls execve. This could be a call8 or call12, which requires a legal stack frame of the previous caller for the overflow handlers to work. (Note that it's always legal to overflow live registers). In this case, ensure to spill at least the stack pointer of that frame. */ if (clone_flags & CLONE_VM) { /* check that caller window is live and same stack */ int len = childregs->wmask & ~0xf; if (regs->areg[1] == usp && len != 0) { int callinc = (regs->areg[0] >> 30) & 3; int caller_ars = XCHAL_NUM_AREGS - callinc * 4; put_user(regs->areg[caller_ars+1], (unsigned __user*)(usp - 12)); } childregs->wmask = 1; childregs->windowstart = 1; childregs->windowbase = 0; } if (clone_flags & CLONE_SETTLS) childregs->threadptr = tls; } else { p->thread.ra = MAKE_RA_FOR_CALL( (unsigned long)ret_from_kernel_thread, 1); /* pass parameters to ret_from_kernel_thread: */ #if defined(__XTENSA_WINDOWED_ABI__) /* * a2 = thread_fn, a3 = thread_fn arg. * Window underflow will load registers from the * spill slots on the stack on return from _switch_to. */ SPILL_SLOT(childregs, 2) = (unsigned long)args->fn; SPILL_SLOT(childregs, 3) = (unsigned long)args->fn_arg; #elif defined(__XTENSA_CALL0_ABI__) /* * a12 = thread_fn, a13 = thread_fn arg. * _switch_to epilogue will load registers from the stack. */ ((unsigned long *)p->thread.sp)[0] = (unsigned long)args->fn; ((unsigned long *)p->thread.sp)[1] = (unsigned long)args->fn_arg; #else #error Unsupported Xtensa ABI #endif /* Childregs are only used when we're going to userspace * in which case start_thread will set them up. */ } #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) ti = task_thread_info(p); ti->cpenable = 0; #endif clear_ptrace_hw_breakpoint(p); return 0; } /* * These bracket the sleeping functions.. */ unsigned long __get_wchan(struct task_struct *p) { unsigned long sp, pc; unsigned long stack_page = (unsigned long) task_stack_page(p); int count = 0; sp = p->thread.sp; pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp); do { if (sp < stack_page + sizeof(struct task_struct) || sp >= (stack_page + THREAD_SIZE) || pc == 0) return 0; if (!in_sched_functions(pc)) return pc; /* Stack layout: sp-4: ra, sp-3: sp' */ pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp); sp = SPILL_SLOT(sp, 1); } while (count++ < 16); return 0; }
linux-master
arch/xtensa/kernel/process.c
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/xtensa/kernel/irq.c * * Xtensa built-in interrupt controller and some generic functions copied * from i386. * * Copyright (C) 2002 - 2013 Tensilica, Inc. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar * * * Chris Zankel <[email protected]> * Kevin Chea * */ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel_stat.h> #include <linux/irqchip.h> #include <linux/irqchip/xtensa-mx.h> #include <linux/irqchip/xtensa-pic.h> #include <linux/irqdomain.h> #include <linux/of.h> #include <asm/mxregs.h> #include <linux/uaccess.h> #include <asm/platform.h> DECLARE_PER_CPU(unsigned long, nmi_count); asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) { #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 1KB free? */ { unsigned long sp = current_stack_pointer; sp &= THREAD_SIZE - 1; if (unlikely(sp < (sizeof(thread_info) + 1024))) printk("Stack overflow in do_IRQ: %ld\n", sp - sizeof(struct thread_info)); } #endif generic_handle_domain_irq(NULL, hwirq); } int arch_show_interrupts(struct seq_file *p, int prec) { unsigned cpu __maybe_unused; #ifdef CONFIG_SMP show_ipi_list(p, prec); #endif #if XTENSA_FAKE_NMI seq_printf(p, "%*s:", prec, "NMI"); for_each_online_cpu(cpu) seq_printf(p, " %10lu", per_cpu(nmi_count, cpu)); seq_puts(p, " Non-maskable interrupts\n"); #endif return 0; } int xtensa_irq_domain_xlate(const u32 *intspec, unsigned int intsize, unsigned long int_irq, unsigned long ext_irq, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(intsize < 1 || intsize > 2)) return -EINVAL; if (intsize == 2 && intspec[1] == 1) { int_irq = xtensa_map_ext_irq(ext_irq); if (int_irq < XCHAL_NUM_INTERRUPTS) *out_hwirq = int_irq; else return -EINVAL; } else { *out_hwirq = int_irq; } *out_type = IRQ_TYPE_NONE; return 0; } int xtensa_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct irq_chip *irq_chip = d->host_data; u32 mask = 1 << hw; if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) { irq_set_chip_and_handler_name(irq, irq_chip, handle_simple_irq, "level"); irq_set_status_flags(irq, IRQ_LEVEL); } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) { irq_set_chip_and_handler_name(irq, irq_chip, handle_edge_irq, "edge"); irq_clear_status_flags(irq, IRQ_LEVEL); } else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) { irq_set_chip_and_handler_name(irq, irq_chip, handle_level_irq, "level"); irq_set_status_flags(irq, IRQ_LEVEL); } else if (mask & XCHAL_INTTYPE_MASK_TIMER) { irq_set_chip_and_handler_name(irq, irq_chip, handle_percpu_irq, "timer"); irq_clear_status_flags(irq, IRQ_LEVEL); #ifdef XCHAL_INTTYPE_MASK_PROFILING } else if (mask & XCHAL_INTTYPE_MASK_PROFILING) { irq_set_chip_and_handler_name(irq, irq_chip, handle_percpu_irq, "profiling"); irq_set_status_flags(irq, IRQ_LEVEL); #endif } else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */ /* XCHAL_INTTYPE_MASK_NMI */ irq_set_chip_and_handler_name(irq, irq_chip, handle_level_irq, "level"); irq_set_status_flags(irq, IRQ_LEVEL); } return 0; } unsigned xtensa_map_ext_irq(unsigned ext_irq) { unsigned mask = XCHAL_INTTYPE_MASK_EXTERN_EDGE | XCHAL_INTTYPE_MASK_EXTERN_LEVEL; unsigned i; for (i = 0; mask; ++i, mask >>= 1) { if ((mask & 1) && ext_irq-- == 0) return i; } return XCHAL_NUM_INTERRUPTS; } unsigned xtensa_get_ext_irq_no(unsigned irq) { unsigned mask = (XCHAL_INTTYPE_MASK_EXTERN_EDGE | XCHAL_INTTYPE_MASK_EXTERN_LEVEL) & ((1u << irq) - 1); return hweight32(mask); } void __init init_IRQ(void) { #ifdef CONFIG_USE_OF irqchip_init(); #else #ifdef CONFIG_HAVE_SMP xtensa_mx_init_legacy(NULL); #else xtensa_pic_init_legacy(NULL); #endif #endif #ifdef CONFIG_SMP ipi_init(); #endif } #ifdef CONFIG_HOTPLUG_CPU /* * The CPU has been marked offline. Migrate IRQs off this CPU. If * the affinity settings do not allow other CPUs, force them onto any * available CPU. */ void migrate_irqs(void) { unsigned int i, cpu = smp_processor_id(); for_each_active_irq(i) { struct irq_data *data = irq_get_irq_data(i); const struct cpumask *mask; unsigned int newcpu; if (irqd_is_per_cpu(data)) continue; mask = irq_data_get_affinity_mask(data); if (!cpumask_test_cpu(cpu, mask)) continue; newcpu = cpumask_any_and(mask, cpu_online_mask); if (newcpu >= nr_cpu_ids) { pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", i, cpu); irq_set_affinity(i, cpu_all_mask); } else { irq_set_affinity(i, mask); } } } #endif /* CONFIG_HOTPLUG_CPU */
linux-master
arch/xtensa/kernel/irq.c
/* * arch/xtensa/kernel/xtensa_ksyms.c * * Export Xtensa-specific functions for loadable modules. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. * * Joe Taylor <[email protected]> */ #include <linux/module.h> #include <asm/pgtable.h> EXPORT_SYMBOL(empty_zero_page); unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v) { BUG(); } EXPORT_SYMBOL(__sync_fetch_and_and_4); unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v) { BUG(); } EXPORT_SYMBOL(__sync_fetch_and_or_4);
linux-master
arch/xtensa/kernel/xtensa_ksyms.c
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2007 Tensilica Inc. * * Joe Taylor <[email protected], [email protected]> * Chris Zankel <[email protected]> * Scott Foehner<[email protected]>, * Kevin Chea * Marc Gauthier<[email protected]> <[email protected]> */ #include <linux/audit.h> #include <linux/errno.h> #include <linux/hw_breakpoint.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/perf_event.h> #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/seccomp.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/uaccess.h> #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> #include <asm/coprocessor.h> #include <asm/elf.h> #include <asm/page.h> #include <asm/ptrace.h> static int gpr_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { struct pt_regs *regs = task_pt_regs(target); struct user_pt_regs newregs = { .pc = regs->pc, .ps = regs->ps & ~(1 << PS_EXCM_BIT), .lbeg = regs->lbeg, .lend = regs->lend, .lcount = regs->lcount, .sar = regs->sar, .threadptr = regs->threadptr, .windowbase = regs->windowbase, .windowstart = regs->windowstart, .syscall = regs->syscall, }; memcpy(newregs.a, regs->areg + XCHAL_NUM_AREGS - regs->windowbase * 4, regs->windowbase * 16); memcpy(newregs.a + regs->windowbase * 4, regs->areg, (WSBITS - regs->windowbase) * 16); return membuf_write(&to, &newregs, sizeof(newregs)); } static int gpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct user_pt_regs newregs = {0}; struct pt_regs *regs; const u32 ps_mask = PS_CALLINC_MASK | PS_OWB_MASK; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); if (ret) return ret; if (newregs.windowbase >= XCHAL_NUM_AREGS / 4) return -EINVAL; regs = task_pt_regs(target); regs->pc = newregs.pc; regs->ps = (regs->ps & ~ps_mask) | (newregs.ps & ps_mask); regs->lbeg = newregs.lbeg; regs->lend = newregs.lend; regs->lcount = newregs.lcount; regs->sar = newregs.sar; regs->threadptr = newregs.threadptr; if (newregs.syscall) regs->syscall = newregs.syscall; if (newregs.windowbase != regs->windowbase || newregs.windowstart != regs->windowstart) { u32 rotws, wmask; rotws = (((newregs.windowstart | (newregs.windowstart << WSBITS)) >> newregs.windowbase) & ((1 << WSBITS) - 1)) & ~1; wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) | (rotws & 0xF) | 1; regs->windowbase = newregs.windowbase; regs->windowstart = newregs.windowstart; regs->wmask = wmask; } memcpy(regs->areg + XCHAL_NUM_AREGS - newregs.windowbase * 4, newregs.a, newregs.windowbase * 16); memcpy(regs->areg, newregs.a + newregs.windowbase * 4, (WSBITS - newregs.windowbase) * 16); return 0; } static int tie_get(struct task_struct *target, const struct user_regset *regset, struct membuf to) { int ret; struct pt_regs *regs = task_pt_regs(target); struct thread_info *ti = task_thread_info(target); elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL); if (!newregs) return -ENOMEM; newregs->opt = regs->xtregs_opt; newregs->user = ti->xtregs_user; #if XTENSA_HAVE_COPROCESSORS /* Flush all coprocessor registers to memory. */ coprocessor_flush_all(ti); newregs->cp0 = ti->xtregs_cp.cp0; newregs->cp1 = ti->xtregs_cp.cp1; newregs->cp2 = ti->xtregs_cp.cp2; newregs->cp3 = ti->xtregs_cp.cp3; newregs->cp4 = ti->xtregs_cp.cp4; newregs->cp5 = ti->xtregs_cp.cp5; newregs->cp6 = ti->xtregs_cp.cp6; newregs->cp7 = ti->xtregs_cp.cp7; #endif ret = membuf_write(&to, newregs, sizeof(*newregs)); kfree(newregs); return ret; } static int tie_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct pt_regs *regs = task_pt_regs(target); struct thread_info *ti = task_thread_info(target); elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL); if (!newregs) return -ENOMEM; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, newregs, 0, -1); if (ret) goto exit; regs->xtregs_opt = newregs->opt; ti->xtregs_user = newregs->user; #if XTENSA_HAVE_COPROCESSORS /* Flush all coprocessors before we overwrite them. */ coprocessor_flush_release_all(ti); ti->xtregs_cp.cp0 = newregs->cp0; ti->xtregs_cp.cp1 = newregs->cp1; ti->xtregs_cp.cp2 = newregs->cp2; ti->xtregs_cp.cp3 = newregs->cp3; ti->xtregs_cp.cp4 = newregs->cp4; ti->xtregs_cp.cp5 = newregs->cp5; ti->xtregs_cp.cp6 = newregs->cp6; ti->xtregs_cp.cp7 = newregs->cp7; #endif exit: kfree(newregs); return ret; } enum xtensa_regset { REGSET_GPR, REGSET_TIE, }; static const struct user_regset xtensa_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, .n = sizeof(struct user_pt_regs) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .regset_get = gpr_get, .set = gpr_set, }, [REGSET_TIE] = { .core_note_type = NT_PRFPREG, .n = sizeof(elf_xtregs_t) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .regset_get = tie_get, .set = tie_set, }, }; static const struct user_regset_view user_xtensa_view = { .name = "xtensa", .e_machine = EM_XTENSA, .regsets = xtensa_regsets, .n = ARRAY_SIZE(xtensa_regsets) }; const struct user_regset_view *task_user_regset_view(struct task_struct *task) { return &user_xtensa_view; } void user_enable_single_step(struct task_struct *child) { set_tsk_thread_flag(child, TIF_SINGLESTEP); } void user_disable_single_step(struct task_struct *child) { clear_tsk_thread_flag(child, TIF_SINGLESTEP); } /* * Called by kernel/ptrace.c when detaching to disable single stepping. */ void ptrace_disable(struct task_struct *child) { /* Nothing to do.. */ } static int ptrace_getregs(struct task_struct *child, void __user *uregs) { return copy_regset_to_user(child, &user_xtensa_view, REGSET_GPR, 0, sizeof(xtensa_gregset_t), uregs); } static int ptrace_setregs(struct task_struct *child, void __user *uregs) { return copy_regset_from_user(child, &user_xtensa_view, REGSET_GPR, 0, sizeof(xtensa_gregset_t), uregs); } static int ptrace_getxregs(struct task_struct *child, void __user *uregs) { return copy_regset_to_user(child, &user_xtensa_view, REGSET_TIE, 0, sizeof(elf_xtregs_t), uregs); } static int ptrace_setxregs(struct task_struct *child, void __user *uregs) { return copy_regset_from_user(child, &user_xtensa_view, REGSET_TIE, 0, sizeof(elf_xtregs_t), uregs); } static int ptrace_peekusr(struct task_struct *child, long regno, long __user *ret) { struct pt_regs *regs; unsigned long tmp; regs = task_pt_regs(child); tmp = 0; /* Default return value. */ switch(regno) { case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: tmp = regs->areg[regno - REG_AR_BASE]; break; case REG_A_BASE ... REG_A_BASE + 15: tmp = regs->areg[regno - REG_A_BASE]; break; case REG_PC: tmp = regs->pc; break; case REG_PS: /* Note: PS.EXCM is not set while user task is running; * its being set in regs is for exception handling * convenience. */ tmp = (regs->ps & ~(1 << PS_EXCM_BIT)); break; case REG_WB: break; /* tmp = 0 */ case REG_WS: { unsigned long wb = regs->windowbase; unsigned long ws = regs->windowstart; tmp = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1); break; } case REG_LBEG: tmp = regs->lbeg; break; case REG_LEND: tmp = regs->lend; break; case REG_LCOUNT: tmp = regs->lcount; break; case REG_SAR: tmp = regs->sar; break; case SYSCALL_NR: tmp = regs->syscall; break; default: return -EIO; } return put_user(tmp, ret); } static int ptrace_pokeusr(struct task_struct *child, long regno, long val) { struct pt_regs *regs; regs = task_pt_regs(child); switch (regno) { case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1: regs->areg[regno - REG_AR_BASE] = val; break; case REG_A_BASE ... REG_A_BASE + 15: regs->areg[regno - REG_A_BASE] = val; break; case REG_PC: regs->pc = val; break; case SYSCALL_NR: regs->syscall = val; break; default: return -EIO; } return 0; } #ifdef CONFIG_HAVE_HW_BREAKPOINT static void ptrace_hbptriggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { int i; struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); if (bp->attr.bp_type & HW_BREAKPOINT_X) { for (i = 0; i < XCHAL_NUM_IBREAK; ++i) if (current->thread.ptrace_bp[i] == bp) break; i <<= 1; } else { for (i = 0; i < XCHAL_NUM_DBREAK; ++i) if (current->thread.ptrace_wp[i] == bp) break; i = (i << 1) | 1; } force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address); } static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type) { struct perf_event_attr attr; ptrace_breakpoint_init(&attr); /* Initialise fields to sane defaults. */ attr.bp_addr = 0; attr.bp_len = 1; attr.bp_type = type; attr.disabled = 1; return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); } /* * Address bit 0 choose instruction (0) or data (1) break register, bits * 31..1 are the register number. * Both PTRACE_GETHBPREGS and PTRACE_SETHBPREGS transfer two 32-bit words: * address (0) and control (1). * Instruction breakpoint contorl word is 0 to clear breakpoint, 1 to set. * Data breakpoint control word bit 31 is 'trigger on store', bit 30 is * 'trigger on load, bits 29..0 are length. Length 0 is used to clear a * breakpoint. To set a breakpoint length must be a power of 2 in the range * 1..64 and the address must be length-aligned. */ static long ptrace_gethbpregs(struct task_struct *child, long addr, long __user *datap) { struct perf_event *bp; u32 user_data[2] = {0}; bool dbreak = addr & 1; unsigned idx = addr >> 1; if ((!dbreak && idx >= XCHAL_NUM_IBREAK) || (dbreak && idx >= XCHAL_NUM_DBREAK)) return -EINVAL; if (dbreak) bp = child->thread.ptrace_wp[idx]; else bp = child->thread.ptrace_bp[idx]; if (bp) { user_data[0] = bp->attr.bp_addr; user_data[1] = bp->attr.disabled ? 0 : bp->attr.bp_len; if (dbreak) { if (bp->attr.bp_type & HW_BREAKPOINT_R) user_data[1] |= DBREAKC_LOAD_MASK; if (bp->attr.bp_type & HW_BREAKPOINT_W) user_data[1] |= DBREAKC_STOR_MASK; } } if (copy_to_user(datap, user_data, sizeof(user_data))) return -EFAULT; return 0; } static long ptrace_sethbpregs(struct task_struct *child, long addr, long __user *datap) { struct perf_event *bp; struct perf_event_attr attr; u32 user_data[2]; bool dbreak = addr & 1; unsigned idx = addr >> 1; int bp_type = 0; if ((!dbreak && idx >= XCHAL_NUM_IBREAK) || (dbreak && idx >= XCHAL_NUM_DBREAK)) return -EINVAL; if (copy_from_user(user_data, datap, sizeof(user_data))) return -EFAULT; if (dbreak) { bp = child->thread.ptrace_wp[idx]; if (user_data[1] & DBREAKC_LOAD_MASK) bp_type |= HW_BREAKPOINT_R; if (user_data[1] & DBREAKC_STOR_MASK) bp_type |= HW_BREAKPOINT_W; } else { bp = child->thread.ptrace_bp[idx]; bp_type = HW_BREAKPOINT_X; } if (!bp) { bp = ptrace_hbp_create(child, bp_type ? bp_type : HW_BREAKPOINT_RW); if (IS_ERR(bp)) return PTR_ERR(bp); if (dbreak) child->thread.ptrace_wp[idx] = bp; else child->thread.ptrace_bp[idx] = bp; } attr = bp->attr; attr.bp_addr = user_data[0]; attr.bp_len = user_data[1] & ~(DBREAKC_LOAD_MASK | DBREAKC_STOR_MASK); attr.bp_type = bp_type; attr.disabled = !attr.bp_len; return modify_user_hw_breakpoint(bp, &attr); } #endif long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret = -EPERM; void __user *datap = (void __user *) data; switch (request) { case PTRACE_PEEKUSR: /* read register specified by addr. */ ret = ptrace_peekusr(child, addr, datap); break; case PTRACE_POKEUSR: /* write register specified by addr. */ ret = ptrace_pokeusr(child, addr, data); break; case PTRACE_GETREGS: ret = ptrace_getregs(child, datap); break; case PTRACE_SETREGS: ret = ptrace_setregs(child, datap); break; case PTRACE_GETXTREGS: ret = ptrace_getxregs(child, datap); break; case PTRACE_SETXTREGS: ret = ptrace_setxregs(child, datap); break; #ifdef CONFIG_HAVE_HW_BREAKPOINT case PTRACE_GETHBPREGS: ret = ptrace_gethbpregs(child, addr, datap); break; case PTRACE_SETHBPREGS: ret = ptrace_sethbpregs(child, addr, datap); break; #endif default: ret = ptrace_request(child, request, addr, data); break; } return ret; } void do_syscall_trace_leave(struct pt_regs *regs); int do_syscall_trace_enter(struct pt_regs *regs) { if (regs->syscall == NO_SYSCALL) regs->areg[2] = -ENOSYS; if (test_thread_flag(TIF_SYSCALL_TRACE) && ptrace_report_syscall_entry(regs)) { regs->areg[2] = -ENOSYS; regs->syscall = NO_SYSCALL; return 0; } if (regs->syscall == NO_SYSCALL || secure_computing() == -1) { do_syscall_trace_leave(regs); return 0; } if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, syscall_get_nr(current, regs)); audit_syscall_entry(regs->syscall, regs->areg[6], regs->areg[3], regs->areg[4], regs->areg[5]); return 1; } void do_syscall_trace_leave(struct pt_regs *regs) { int step; audit_syscall_exit(regs); if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_exit(regs, regs_return_value(regs)); step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) ptrace_report_syscall_exit(regs, step); }
linux-master
arch/xtensa/kernel/ptrace.c
/* * arch/xtensa/kernel/traps.c * * Exception handling. * * Derived from code with the following copyrights: * Copyright (C) 1994 - 1999 by Ralf Baechle * Modified for R3000 by Paul M. Antoine, 1995, 1996 * Complete output from die() by Ulf Carlsson, 1998 * Copyright (C) 1999 Silicon Graphics, Inc. * * Essentially rewritten for the Xtensa architecture port. * * Copyright (C) 2001 - 2013 Tensilica Inc. * * Joe Taylor <[email protected], [email protected]> * Chris Zankel <[email protected]> * Marc Gauthier<[email protected], [email protected]> * Kevin Chea * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> #include <linux/init.h> #include <linux/module.h> #include <linux/stringify.h> #include <linux/kallsyms.h> #include <linux/delay.h> #include <linux/hardirq.h> #include <linux/ratelimit.h> #include <linux/pgtable.h> #include <asm/stacktrace.h> #include <asm/ptrace.h> #include <asm/timex.h> #include <linux/uaccess.h> #include <asm/processor.h> #include <asm/traps.h> #include <asm/hw_breakpoint.h> /* * Machine specific interrupt handlers */ static void do_illegal_instruction(struct pt_regs *regs); static void do_div0(struct pt_regs *regs); static void do_interrupt(struct pt_regs *regs); #if XTENSA_FAKE_NMI static void do_nmi(struct pt_regs *regs); #endif #ifdef CONFIG_XTENSA_LOAD_STORE static void do_load_store(struct pt_regs *regs); #endif static void do_unaligned_user(struct pt_regs *regs); static void do_multihit(struct pt_regs *regs); #if XTENSA_HAVE_COPROCESSORS static void do_coprocessor(struct pt_regs *regs); #endif static void do_debug(struct pt_regs *regs); /* * The vector table must be preceded by a save area (which * implies it must be in RAM, unless one places RAM immediately * before a ROM and puts the vector at the start of the ROM (!)) */ #define KRNL 0x01 #define USER 0x02 #define COPROCESSOR(x) \ { EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER|KRNL, fast_coprocessor },\ { EXCCAUSE_COPROCESSOR ## x ## _DISABLED, 0, do_coprocessor } typedef struct { int cause; int fast; void* handler; } dispatch_init_table_t; static dispatch_init_table_t __initdata dispatch_init_table[] = { #ifdef CONFIG_USER_ABI_CALL0_PROBE { EXCCAUSE_ILLEGAL_INSTRUCTION, USER, fast_illegal_instruction_user }, #endif { EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction}, { EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user }, { EXCCAUSE_SYSTEM_CALL, 0, system_call }, /* EXCCAUSE_INSTRUCTION_FETCH unhandled */ #ifdef CONFIG_XTENSA_LOAD_STORE { EXCCAUSE_LOAD_STORE_ERROR, USER|KRNL, fast_load_store }, { EXCCAUSE_LOAD_STORE_ERROR, 0, do_load_store }, #endif { EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt }, #ifdef SUPPORT_WINDOWED { EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca }, #endif { EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0, do_div0 }, /* EXCCAUSE_PRIVILEGED unhandled */ #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION || \ IS_ENABLED(CONFIG_XTENSA_LOAD_STORE) #ifdef CONFIG_XTENSA_UNALIGNED_USER { EXCCAUSE_UNALIGNED, USER, fast_unaligned }, #endif { EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, #endif { EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, #ifdef CONFIG_MMU { EXCCAUSE_ITLB_MISS, 0, do_page_fault }, { EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss}, { EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss}, { EXCCAUSE_DTLB_MISS, 0, do_page_fault }, { EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited }, #endif /* CONFIG_MMU */ #ifdef CONFIG_PFAULT { EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit }, { EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault }, { EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit }, { EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault }, { EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault }, { EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault }, #endif /* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */ #if XTENSA_HAVE_COPROCESSOR(0) COPROCESSOR(0), #endif #if XTENSA_HAVE_COPROCESSOR(1) COPROCESSOR(1), #endif #if XTENSA_HAVE_COPROCESSOR(2) COPROCESSOR(2), #endif #if XTENSA_HAVE_COPROCESSOR(3) COPROCESSOR(3), #endif #if XTENSA_HAVE_COPROCESSOR(4) COPROCESSOR(4), #endif #if XTENSA_HAVE_COPROCESSOR(5) COPROCESSOR(5), #endif #if XTENSA_HAVE_COPROCESSOR(6) COPROCESSOR(6), #endif #if XTENSA_HAVE_COPROCESSOR(7) COPROCESSOR(7), #endif #if XTENSA_FAKE_NMI { EXCCAUSE_MAPPED_NMI, 0, do_nmi }, #endif { EXCCAUSE_MAPPED_DEBUG, 0, do_debug }, { -1, -1, 0 } }; /* The exception table <exc_table> serves two functions: * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c) * 2. it is a temporary memory buffer for the exception handlers. */ DEFINE_PER_CPU(struct exc_table, exc_table); DEFINE_PER_CPU(struct debug_table, debug_table); void die(const char*, struct pt_regs*, long); static inline void __die_if_kernel(const char *str, struct pt_regs *regs, long err) { if (!user_mode(regs)) die(str, regs, err); } #ifdef CONFIG_PRINT_USER_CODE_ON_UNHANDLED_EXCEPTION static inline void dump_user_code(struct pt_regs *regs) { char buf[32]; if (copy_from_user(buf, (void __user *)(regs->pc & -16), sizeof(buf)) == 0) { print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE, 32, 1, buf, sizeof(buf), false); } } #else static inline void dump_user_code(struct pt_regs *regs) { } #endif /* * Unhandled Exceptions. Kill user task or panic if in kernel space. */ void do_unhandled(struct pt_regs *regs) { __die_if_kernel("Caught unhandled exception - should not happen", regs, SIGKILL); /* If in user mode, send SIGILL signal to current process */ pr_info_ratelimited("Caught unhandled exception in '%s' " "(pid = %d, pc = %#010lx) - should not happen\n" "\tEXCCAUSE is %ld\n", current->comm, task_pid_nr(current), regs->pc, regs->exccause); dump_user_code(regs); force_sig(SIGILL); } /* * Multi-hit exception. This if fatal! */ static void do_multihit(struct pt_regs *regs) { die("Caught multihit exception", regs, SIGKILL); } /* * IRQ handler. */ #if XTENSA_FAKE_NMI #define IS_POW2(v) (((v) & ((v) - 1)) == 0) #if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \ IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))) #warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level." #warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire." static inline void check_valid_nmi(void) { unsigned intread = xtensa_get_sr(interrupt); unsigned intenable = xtensa_get_sr(intenable); BUG_ON(intread & intenable & ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^ XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^ BIT(XCHAL_PROFILING_INTERRUPT))); } #else static inline void check_valid_nmi(void) { } #endif irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id); DEFINE_PER_CPU(unsigned long, nmi_count); static void do_nmi(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); nmi_enter(); ++*this_cpu_ptr(&nmi_count); check_valid_nmi(); xtensa_pmu_irq_handler(0, NULL); nmi_exit(); set_irq_regs(old_regs); } #endif static void do_interrupt(struct pt_regs *regs) { static const unsigned int_level_mask[] = { 0, XCHAL_INTLEVEL1_MASK, XCHAL_INTLEVEL2_MASK, XCHAL_INTLEVEL3_MASK, XCHAL_INTLEVEL4_MASK, XCHAL_INTLEVEL5_MASK, XCHAL_INTLEVEL6_MASK, XCHAL_INTLEVEL7_MASK, }; struct pt_regs *old_regs = set_irq_regs(regs); unsigned unhandled = ~0u; irq_enter(); for (;;) { unsigned intread = xtensa_get_sr(interrupt); unsigned intenable = xtensa_get_sr(intenable); unsigned int_at_level = intread & intenable; unsigned level; for (level = LOCKLEVEL; level > 0; --level) { if (int_at_level & int_level_mask[level]) { int_at_level &= int_level_mask[level]; if (int_at_level & unhandled) int_at_level &= unhandled; else unhandled |= int_level_mask[level]; break; } } if (level == 0) break; /* clear lowest pending irq in the unhandled mask */ unhandled ^= (int_at_level & -int_at_level); do_IRQ(__ffs(int_at_level), regs); } irq_exit(); set_irq_regs(old_regs); } static bool check_div0(struct pt_regs *regs) { static const u8 pattern[] = {'D', 'I', 'V', '0'}; const u8 *p; u8 buf[5]; if (user_mode(regs)) { if (copy_from_user(buf, (void __user *)regs->pc + 2, 5)) return false; p = buf; } else { p = (const u8 *)regs->pc + 2; } return memcmp(p, pattern, sizeof(pattern)) == 0 || memcmp(p + 1, pattern, sizeof(pattern)) == 0; } /* * Illegal instruction. Fatal if in kernel space. */ static void do_illegal_instruction(struct pt_regs *regs) { #ifdef CONFIG_USER_ABI_CALL0_PROBE /* * When call0 application encounters an illegal instruction fast * exception handler will attempt to set PS.WOE and retry failing * instruction. * If we get here we know that that instruction is also illegal * with PS.WOE set, so it's not related to the windowed option * hence PS.WOE may be cleared. */ if (regs->pc == current_thread_info()->ps_woe_fix_addr) regs->ps &= ~PS_WOE_MASK; #endif if (check_div0(regs)) { do_div0(regs); return; } __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL); /* If in user mode, send SIGILL signal to current process. */ pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", current->comm, task_pid_nr(current), regs->pc); force_sig(SIGILL); } static void do_div0(struct pt_regs *regs) { __die_if_kernel("Unhandled division by 0 in kernel", regs, SIGKILL); force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc); } #ifdef CONFIG_XTENSA_LOAD_STORE static void do_load_store(struct pt_regs *regs) { __die_if_kernel("Unhandled load/store exception in kernel", regs, SIGKILL); pr_info_ratelimited("Load/store error to %08lx in '%s' (pid = %d, pc = %#010lx)\n", regs->excvaddr, current->comm, task_pid_nr(current), regs->pc); force_sig_fault(SIGBUS, BUS_ADRERR, (void *)regs->excvaddr); } #endif /* * Handle unaligned memory accesses from user space. Kill task. * * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory * accesses causes from user space. */ static void do_unaligned_user(struct pt_regs *regs) { __die_if_kernel("Unhandled unaligned exception in kernel", regs, SIGKILL); pr_info_ratelimited("Unaligned memory access to %08lx in '%s' " "(pid = %d, pc = %#010lx)\n", regs->excvaddr, current->comm, task_pid_nr(current), regs->pc); force_sig_fault(SIGBUS, BUS_ADRALN, (void *) regs->excvaddr); } #if XTENSA_HAVE_COPROCESSORS static void do_coprocessor(struct pt_regs *regs) { coprocessor_flush_release_all(current_thread_info()); } #endif /* Handle debug events. * When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with * preemption disabled to avoid rescheduling and keep mapping of hardware * breakpoint structures to debug registers intact, so that * DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit. */ static void do_debug(struct pt_regs *regs) { #ifdef CONFIG_HAVE_HW_BREAKPOINT int ret = check_hw_breakpoint(regs); preempt_enable(); if (ret == 0) return; #endif __die_if_kernel("Breakpoint in kernel", regs, SIGKILL); /* If in user mode, send SIGTRAP signal to current process */ force_sig(SIGTRAP); } #define set_handler(type, cause, handler) \ do { \ unsigned int cpu; \ \ for_each_possible_cpu(cpu) \ per_cpu(exc_table, cpu).type[cause] = (handler);\ } while (0) /* Set exception C handler - for temporary use when probing exceptions */ xtensa_exception_handler * __init trap_set_handler(int cause, xtensa_exception_handler *handler) { void *previous = per_cpu(exc_table, 0).default_handler[cause]; set_handler(default_handler, cause, handler); return previous; } static void trap_init_excsave(void) { xtensa_set_sr(this_cpu_ptr(&exc_table), excsave1); } static void trap_init_debug(void) { unsigned long debugsave = (unsigned long)this_cpu_ptr(&debug_table); this_cpu_ptr(&debug_table)->debug_exception = debug_exception; __asm__ __volatile__("wsr %0, excsave" __stringify(XCHAL_DEBUGLEVEL) :: "a"(debugsave)); } /* * Initialize dispatch tables. * * The exception vectors are stored compressed the __init section in the * dispatch_init_table. This function initializes the following three tables * from that compressed table: * - fast user first dispatch table for user exceptions * - fast kernel first dispatch table for kernel exceptions * - default C-handler C-handler called by the default fast handler. * * See vectors.S for more details. */ void __init trap_init(void) { int i; /* Setup default vectors. */ for (i = 0; i < EXCCAUSE_N; i++) { set_handler(fast_user_handler, i, user_exception); set_handler(fast_kernel_handler, i, kernel_exception); set_handler(default_handler, i, do_unhandled); } /* Setup specific handlers. */ for(i = 0; dispatch_init_table[i].cause >= 0; i++) { int fast = dispatch_init_table[i].fast; int cause = dispatch_init_table[i].cause; void *handler = dispatch_init_table[i].handler; if (fast == 0) set_handler(default_handler, cause, handler); if ((fast & USER) != 0) set_handler(fast_user_handler, cause, handler); if ((fast & KRNL) != 0) set_handler(fast_kernel_handler, cause, handler); } /* Initialize EXCSAVE_1 to hold the address of the exception table. */ trap_init_excsave(); trap_init_debug(); } #ifdef CONFIG_SMP void secondary_trap_init(void) { trap_init_excsave(); trap_init_debug(); } #endif /* * This function dumps the current valid window frame and other base registers. */ void show_regs(struct pt_regs * regs) { int i; show_regs_print_info(KERN_DEFAULT); for (i = 0; i < 16; i++) { if ((i % 8) == 0) pr_info("a%02d:", i); pr_cont(" %08lx", regs->areg[i]); } pr_cont("\n"); pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", regs->pc, regs->ps, regs->depc, regs->excvaddr); pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", regs->lbeg, regs->lend, regs->lcount, regs->sar); if (user_mode(regs)) pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", regs->windowbase, regs->windowstart, regs->wmask, regs->syscall); } static int show_trace_cb(struct stackframe *frame, void *data) { const char *loglvl = data; if (kernel_text_address(frame->pc)) printk("%s [<%08lx>] %pB\n", loglvl, frame->pc, (void *)frame->pc); return 0; } static void show_trace(struct task_struct *task, unsigned long *sp, const char *loglvl) { if (!sp) sp = stack_pointer(task); printk("%sCall Trace:\n", loglvl); walk_stackframe(sp, show_trace_cb, (void *)loglvl); } #define STACK_DUMP_ENTRY_SIZE 4 #define STACK_DUMP_LINE_SIZE 16 static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; struct stack_fragment { size_t len; size_t off; u8 *sp; const char *loglvl; }; static int show_stack_fragment_cb(struct stackframe *frame, void *data) { struct stack_fragment *sf = data; while (sf->off < sf->len) { u8 line[STACK_DUMP_LINE_SIZE]; size_t line_len = sf->len - sf->off > STACK_DUMP_LINE_SIZE ? STACK_DUMP_LINE_SIZE : sf->len - sf->off; bool arrow = sf->off == 0; if (frame && frame->sp == (unsigned long)(sf->sp + sf->off)) arrow = true; __memcpy(line, sf->sp + sf->off, line_len); print_hex_dump(sf->loglvl, arrow ? "> " : " ", DUMP_PREFIX_NONE, STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE, line, line_len, false); sf->off += STACK_DUMP_LINE_SIZE; if (arrow) return 0; } return 1; } void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { struct stack_fragment sf; if (!sp) sp = stack_pointer(task); sf.len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE), kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE); sf.off = 0; sf.sp = (u8 *)sp; sf.loglvl = loglvl; printk("%sStack:\n", loglvl); walk_stackframe(sp, show_stack_fragment_cb, &sf); while (sf.off < sf.len) show_stack_fragment_cb(NULL, &sf); show_trace(task, sp, loglvl); } DEFINE_SPINLOCK(die_lock); void __noreturn die(const char * str, struct pt_regs * regs, long err) { static int die_counter; const char *pr = ""; if (IS_ENABLED(CONFIG_PREEMPTION)) pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; console_verbose(); spin_lock_irq(&die_lock); pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr); show_regs(regs); if (!user_mode(regs)) show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irq(&die_lock); if (in_interrupt()) panic("Fatal exception in interrupt"); if (panic_on_oops) panic("Fatal exception"); make_task_dead(err); }
linux-master
arch/xtensa/kernel/traps.c
/* * arch/xtensa/kernel/asm-offsets.c * * Generates definitions from c-type structures used by assembly sources. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005 Tensilica Inc. * * Chris Zankel <[email protected]> */ #include <asm/processor.h> #include <asm/coprocessor.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/thread_info.h> #include <linux/ptrace.h> #include <linux/mm.h> #include <linux/kbuild.h> #include <linux/suspend.h> #include <asm/ptrace.h> #include <asm/traps.h> #include <linux/uaccess.h> int main(void) { /* struct pt_regs */ DEFINE(PT_PC, offsetof (struct pt_regs, pc)); DEFINE(PT_PS, offsetof (struct pt_regs, ps)); DEFINE(PT_DEPC, offsetof (struct pt_regs, depc)); DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause)); DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr)); DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause)); DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask)); DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg)); DEFINE(PT_LEND, offsetof (struct pt_regs, lend)); DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount)); DEFINE(PT_SAR, offsetof (struct pt_regs, sar)); DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel)); DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall)); DEFINE(PT_SCOMPARE1, offsetof(struct pt_regs, scompare1)); DEFINE(PT_THREADPTR, offsetof(struct pt_regs, threadptr)); DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0])); DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1])); DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2])); DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3])); DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4])); DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5])); DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6])); DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7])); DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8])); DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9])); DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10])); DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11])); DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12])); DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13])); DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14])); DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15])); DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase)); DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart)); DEFINE(PT_KERNEL_SIZE, offsetof(struct pt_regs, areg[16])); DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS])); DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS])); DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt)); DEFINE(XTREGS_OPT_SIZE, sizeof(xtregs_opt_t)); /* struct task_struct */ DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace)); DEFINE(TASK_MM, offsetof (struct task_struct, mm)); DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm)); DEFINE(TASK_PID, offsetof (struct task_struct, pid)); DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); #ifdef CONFIG_STACKPROTECTOR DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); #endif DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); /* offsets in thread_info struct */ OFFSET(TI_TASK, thread_info, task); OFFSET(TI_FLAGS, thread_info, flags); OFFSET(TI_STSTUS, thread_info, status); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); #ifdef CONFIG_USER_ABI_CALL0_PROBE OFFSET(TI_PS_WOE_FIX_ADDR, thread_info, ps_woe_fix_addr); #endif /* struct thread_info (offset from start_struct) */ DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra)); DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); #if XCHAL_HAVE_EXCLUSIVE DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8)); #endif DEFINE(THREAD_CPENABLE, offsetof(struct thread_info, cpenable)); DEFINE(THREAD_CPU, offsetof(struct thread_info, cpu)); DEFINE(THREAD_CP_OWNER_CPU, offsetof(struct thread_info, cp_owner_cpu)); #if XTENSA_HAVE_COPROCESSORS DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0)); DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1)); DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2)); DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3)); DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4)); DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5)); DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6)); DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7)); #endif DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); /* struct mm_struct */ DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users)); DEFINE(MM_PGD, offsetof (struct mm_struct, pgd)); DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context)); /* struct page */ DEFINE(PAGE_FLAGS, offsetof(struct page, flags)); /* constants */ DEFINE(_CLONE_VM, CLONE_VM); DEFINE(_CLONE_UNTRACED, CLONE_UNTRACED); DEFINE(PG_ARCH_1, PG_arch_1); /* struct debug_table */ DEFINE(DT_DEBUG_EXCEPTION, offsetof(struct debug_table, debug_exception)); DEFINE(DT_DEBUG_SAVE, offsetof(struct debug_table, debug_save)); #ifdef CONFIG_HAVE_HW_BREAKPOINT DEFINE(DT_DBREAKC_SAVE, offsetof(struct debug_table, dbreakc_save)); DEFINE(DT_ICOUNT_SAVE, offsetof(struct debug_table, icount_save)); DEFINE(DT_ICOUNT_LEVEL_SAVE, offsetof(struct debug_table, icount_level_save)); #endif /* struct exc_table */ DEFINE(EXC_TABLE_KSTK, offsetof(struct exc_table, kstk)); DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save)); DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup)); DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param)); #if XTENSA_HAVE_COPROCESSORS DEFINE(EXC_TABLE_COPROCESSOR_OWNER, offsetof(struct exc_table, coprocessor_owner)); #endif DEFINE(EXC_TABLE_FAST_USER, offsetof(struct exc_table, fast_user_handler)); DEFINE(EXC_TABLE_FAST_KERNEL, offsetof(struct exc_table, fast_kernel_handler)); DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler)); #ifdef CONFIG_HIBERNATION DEFINE(PBE_ADDRESS, offsetof(struct pbe, address)); DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address)); DEFINE(PBE_NEXT, offsetof(struct pbe, next)); DEFINE(PBE_SIZE, sizeof(struct pbe)); #endif return 0; }
linux-master
arch/xtensa/kernel/asm-offsets.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * arch/xtensa/kernel/pci.c * * PCI bios-type initialisation for PCI machines * * Copyright (C) 2001-2005 Tensilica Inc. * * Based largely on work from Cort (ppc/kernel/pci.c) * IO functions copied from sparc. * * Chris Zankel <[email protected]> */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/memblock.h> #include <asm/pci-bridge.h> #include <asm/platform.h> /* * We need to avoid collisions with `mirrored' VGA ports * and other strange ISA hardware, so we always want the * addresses to be allocated in the 0x000-0x0ff region * modulo 0x400. * * Why? Because some silly external IO cards only decode * the low 10 bits of the IO address. The 0x00-0xff region * is reserved for motherboard devices that decode all 16 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, * but we want to try to avoid allocating at 0x2900-0x2bff * which might have be mirrored at 0x0100-0x03ff.. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { struct pci_dev *dev = data; resource_size_t start = res->start; if (res->flags & IORESOURCE_IO) { if (size > 0x100) { pr_err("PCI: I/O Region %s/%d too large (%u bytes)\n", pci_name(dev), dev->resource - res, size); } if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; } return start; } void pcibios_fixup_bus(struct pci_bus *bus) { if (bus->parent) { /* This is a subordinate bridge */ pci_read_bridge_bases(bus); } } /* * Platform support for /proc/bus/pci/X/Y mmap()s. * -- paulus. */ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma) { struct pci_controller *pci_ctrl = (struct pci_controller*) pdev->sysdata; resource_size_t ioaddr = pci_resource_start(pdev, bar); if (!pci_ctrl) return -EINVAL; /* should never happen */ /* Convert to an offset within this PCI controller */ ioaddr -= (unsigned long)pci_ctrl->io_space.base; vma->vm_pgoff += (ioaddr + pci_ctrl->io_space.start) >> PAGE_SHIFT; return 0; }
linux-master
arch/xtensa/kernel/pci.c
/* * arch/xtensa/kernel/module.c * * Module support. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2006 Tensilica Inc. * * Chris Zankel <[email protected]> * */ #include <linux/module.h> #include <linux/moduleloader.h> #include <linux/elf.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/cache.h> static int decode_calln_opcode (unsigned char *location) { #ifdef __XTENSA_EB__ return (location[0] & 0xf0) == 0x50; #endif #ifdef __XTENSA_EL__ return (location[0] & 0xf) == 0x5; #endif } static int decode_l32r_opcode (unsigned char *location) { #ifdef __XTENSA_EB__ return (location[0] & 0xf0) == 0x10; #endif #ifdef __XTENSA_EL__ return (location[0] & 0xf) == 0x1; #endif } int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *mod) { unsigned int i; Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; Elf32_Sym *sym; unsigned char *location; uint32_t value; pr_debug("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rela[i].r_offset; sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM(rela[i].r_info); value = sym->st_value + rela[i].r_addend; switch (ELF32_R_TYPE(rela[i].r_info)) { case R_XTENSA_NONE: case R_XTENSA_DIFF8: case R_XTENSA_DIFF16: case R_XTENSA_DIFF32: case R_XTENSA_ASM_EXPAND: break; case R_XTENSA_32: case R_XTENSA_PLT: *(uint32_t *)location += value; break; case R_XTENSA_SLOT0_OP: if (decode_calln_opcode(location)) { value -= ((unsigned long)location & -4) + 4; if ((value & 3) != 0 || ((value + (1 << 19)) >> 20) != 0) { pr_err("%s: relocation out of range, " "section %d reloc %d " "sym '%s'\n", mod->name, relsec, i, strtab + sym->st_name); return -ENOEXEC; } value = (signed int)value >> 2; #ifdef __XTENSA_EB__ location[0] = ((location[0] & ~0x3) | ((value >> 16) & 0x3)); location[1] = (value >> 8) & 0xff; location[2] = value & 0xff; #endif #ifdef __XTENSA_EL__ location[0] = ((location[0] & ~0xc0) | ((value << 6) & 0xc0)); location[1] = (value >> 2) & 0xff; location[2] = (value >> 10) & 0xff; #endif } else if (decode_l32r_opcode(location)) { value -= (((unsigned long)location + 3) & -4); if ((value & 3) != 0 || (signed int)value >> 18 != -1) { pr_err("%s: relocation out of range, " "section %d reloc %d " "sym '%s'\n", mod->name, relsec, i, strtab + sym->st_name); return -ENOEXEC; } value = (signed int)value >> 2; #ifdef __XTENSA_EB__ location[1] = (value >> 8) & 0xff; location[2] = value & 0xff; #endif #ifdef __XTENSA_EL__ location[1] = value & 0xff; location[2] = (value >> 8) & 0xff; #endif } /* FIXME: Ignore any other opcodes. The Xtensa assembler currently assumes that the linker will always do relaxation and so all PC-relative operands need relocations. (The assembler also writes out the tentative PC-relative values, assuming no link-time relaxation, so it is usually safe to ignore the relocations.) If the assembler's "--no-link-relax" flag can be made to work, and if all kernel modules can be assembled with that flag, then unexpected relocations could be detected here. */ break; case R_XTENSA_SLOT1_OP: case R_XTENSA_SLOT2_OP: case R_XTENSA_SLOT3_OP: case R_XTENSA_SLOT4_OP: case R_XTENSA_SLOT5_OP: case R_XTENSA_SLOT6_OP: case R_XTENSA_SLOT7_OP: case R_XTENSA_SLOT8_OP: case R_XTENSA_SLOT9_OP: case R_XTENSA_SLOT10_OP: case R_XTENSA_SLOT11_OP: case R_XTENSA_SLOT12_OP: case R_XTENSA_SLOT13_OP: case R_XTENSA_SLOT14_OP: pr_err("%s: unexpected FLIX relocation: %u\n", mod->name, ELF32_R_TYPE(rela[i].r_info)); return -ENOEXEC; case R_XTENSA_SLOT0_ALT: case R_XTENSA_SLOT1_ALT: case R_XTENSA_SLOT2_ALT: case R_XTENSA_SLOT3_ALT: case R_XTENSA_SLOT4_ALT: case R_XTENSA_SLOT5_ALT: case R_XTENSA_SLOT6_ALT: case R_XTENSA_SLOT7_ALT: case R_XTENSA_SLOT8_ALT: case R_XTENSA_SLOT9_ALT: case R_XTENSA_SLOT10_ALT: case R_XTENSA_SLOT11_ALT: case R_XTENSA_SLOT12_ALT: case R_XTENSA_SLOT13_ALT: case R_XTENSA_SLOT14_ALT: pr_err("%s: unexpected ALT relocation: %u\n", mod->name, ELF32_R_TYPE(rela[i].r_info)); return -ENOEXEC; default: pr_err("%s: unexpected relocation: %u\n", mod->name, ELF32_R_TYPE(rela[i].r_info)); return -ENOEXEC; } } return 0; }
linux-master
arch/xtensa/kernel/module.c
/* * arch/xtensa/kernel/platform.c * * Default platform functions. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005 Tensilica Inc. * * Chris Zankel <[email protected]> */ #include <linux/printk.h> #include <linux/types.h> #include <asm/platform.h> #include <asm/timex.h> /* * Default functions that are used if no platform specific function is defined. * (Please, refer to arch/xtensa/include/asm/platform.h for more information) */ void __weak __init platform_init(bp_tag_t *first) { } void __weak __init platform_setup(char **cmd) { } void __weak platform_idle(void) { __asm__ __volatile__ ("waiti 0" ::: "memory"); } #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT void __weak platform_calibrate_ccount(void) { pr_err("ERROR: Cannot calibrate cpu frequency! Assuming 10MHz.\n"); ccount_freq = 10 * 1000000UL; } #endif
linux-master
arch/xtensa/kernel/platform.c
/* * arch/xtensa/kernel/syscall.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2000 Silicon Graphics, Inc. * Copyright (C) 1995 - 2000 by Ralf Baechle * * Joe Taylor <[email protected], [email protected]> * Marc Gauthier <[email protected], [email protected]> * Chris Zankel <[email protected]> * Kevin Chea * */ #include <linux/uaccess.h> #include <asm/syscall.h> #include <linux/linkage.h> #include <linux/stringify.h> #include <linux/errno.h> #include <linux/syscalls.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/mman.h> #include <linux/sched/mm.h> #include <linux/shm.h> syscall_t sys_call_table[] /* FIXME __cacheline_aligned */= { #define __SYSCALL(nr, entry) (syscall_t)entry, #include <asm/syscall_table.h> }; #define COLOUR_ALIGN(addr, pgoff) \ ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \ (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) { unsigned long ret; long err; err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); if (err) return err; return (long)ret; } asmlinkage long xtensa_fadvise64_64(int fd, int advice, unsigned long long offset, unsigned long long len) { return ksys_fadvise64_64(fd, offset, len, advice); } #ifdef CONFIG_MMU unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vmm; struct vma_iterator vmi; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ if ((flags & MAP_SHARED) && ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; } if (len > TASK_SIZE) return -ENOMEM; if (!addr) addr = TASK_UNMAPPED_BASE; if (flags & MAP_SHARED) addr = COLOUR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); vma_iter_init(&vmi, current->mm, addr); for_each_vma(vmi, vmm) { /* At this point: (addr < vmm->vm_end). */ if (addr + len <= vm_start_gap(vmm)) break; addr = vmm->vm_end; if (flags & MAP_SHARED) addr = COLOUR_ALIGN(addr, pgoff); } if (TASK_SIZE - len < addr) return -ENOMEM; return addr; } #endif
linux-master
arch/xtensa/kernel/syscall.c
/* * S32C1I selftest. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2016 Cadence Design Systems Inc. */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/traps.h> #if XCHAL_HAVE_S32C1I static int __initdata rcw_word, rcw_probe_pc, rcw_exc; /* * Basic atomic compare-and-swap, that records PC of S32C1I for probing. * * If *v == cmp, set *v = set. Return previous *v. */ static inline int probed_compare_swap(int *v, int cmp, int set) { int tmp; __asm__ __volatile__( " movi %1, 1f\n" " s32i %1, %4, 0\n" " wsr %2, scompare1\n" "1: s32c1i %0, %3, 0\n" : "=a" (set), "=&a" (tmp) : "a" (cmp), "a" (v), "a" (&rcw_probe_pc), "0" (set) : "memory" ); return set; } /* Handle probed exception */ static void __init do_probed_exception(struct pt_regs *regs) { if (regs->pc == rcw_probe_pc) { /* exception on s32c1i ? */ regs->pc += 3; /* skip the s32c1i instruction */ rcw_exc = regs->exccause; } else { do_unhandled(regs); } } /* Simple test of S32C1I (soc bringup assist) */ static int __init check_s32c1i(void) { int n, cause1, cause2; void *handbus, *handdata, *handaddr; /* temporarily saved handlers */ rcw_probe_pc = 0; handbus = trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, do_probed_exception); handdata = trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, do_probed_exception); handaddr = trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, do_probed_exception); /* First try an S32C1I that does not store: */ rcw_exc = 0; rcw_word = 1; n = probed_compare_swap(&rcw_word, 0, 2); cause1 = rcw_exc; /* took exception? */ if (cause1 != 0) { /* unclean exception? */ if (n != 2 || rcw_word != 1) panic("S32C1I exception error"); } else if (rcw_word != 1 || n != 1) { panic("S32C1I compare error"); } /* Then an S32C1I that stores: */ rcw_exc = 0; rcw_word = 0x1234567; n = probed_compare_swap(&rcw_word, 0x1234567, 0xabcde); cause2 = rcw_exc; if (cause2 != 0) { /* unclean exception? */ if (n != 0xabcde || rcw_word != 0x1234567) panic("S32C1I exception error (b)"); } else if (rcw_word != 0xabcde || n != 0x1234567) { panic("S32C1I store error"); } /* Verify consistency of exceptions: */ if (cause1 || cause2) { pr_warn("S32C1I took exception %d, %d\n", cause1, cause2); /* If emulation of S32C1I upon bus error gets implemented, * we can get rid of this panic for single core (not SMP) */ panic("S32C1I exceptions not currently supported"); } if (cause1 != cause2) panic("inconsistent S32C1I exceptions"); trap_set_handler(EXCCAUSE_LOAD_STORE_ERROR, handbus); trap_set_handler(EXCCAUSE_LOAD_STORE_DATA_ERROR, handdata); trap_set_handler(EXCCAUSE_LOAD_STORE_ADDR_ERROR, handaddr); return 0; } #else /* XCHAL_HAVE_S32C1I */ /* This condition should not occur with a commercially deployed processor. * Display reminder for early engr test or demo chips / FPGA bitstreams */ static int __init check_s32c1i(void) { pr_warn("Processor configuration lacks atomic compare-and-swap support!\n"); return 0; } #endif /* XCHAL_HAVE_S32C1I */ early_initcall(check_s32c1i);
linux-master
arch/xtensa/kernel/s32c1i_selftest.c
/* * arch/xtensa/kernel/setup.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1995 Linus Torvalds * Copyright (C) 2001 - 2005 Tensilica Inc. * Copyright (C) 2014 - 2016 Cadence Design Systems Inc. * * Chris Zankel <[email protected]> * Joe Taylor <[email protected], [email protected]> * Kevin Chea * Marc Gauthier<[email protected]> <[email protected]> */ #include <linux/errno.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/screen_info.h> #include <linux/kernel.h> #include <linux/percpu.h> #include <linux/reboot.h> #include <linux/cpu.h> #include <linux/of.h> #include <linux/of_fdt.h> #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) # include <linux/console.h> #endif #ifdef CONFIG_PROC_FS # include <linux/seq_file.h> #endif #include <asm/bootparam.h> #include <asm/kasan.h> #include <asm/mmu_context.h> #include <asm/page.h> #include <asm/param.h> #include <asm/platform.h> #include <asm/processor.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp.h> #include <asm/sysmem.h> #include <asm/timex.h> #include <asm/traps.h> #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) struct screen_info screen_info = { .orig_x = 0, .orig_y = 24, .orig_video_cols = 80, .orig_video_lines = 24, .orig_video_isVGA = 1, .orig_video_points = 16, }; #endif #ifdef CONFIG_BLK_DEV_INITRD extern unsigned long initrd_start; extern unsigned long initrd_end; extern int initrd_below_start_ok; #endif #ifdef CONFIG_USE_OF void *dtb_start = __dtb_start; #endif extern unsigned long loops_per_jiffy; /* Command line specified as configuration option. */ static char __initdata command_line[COMMAND_LINE_SIZE]; #ifdef CONFIG_CMDLINE_BOOL static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; #endif #ifdef CONFIG_PARSE_BOOTPARAM /* * Boot parameter parsing. * * The Xtensa port uses a list of variable-sized tags to pass data to * the kernel. The first tag must be a BP_TAG_FIRST tag for the list * to be recognised. The list is terminated with a zero-sized * BP_TAG_LAST tag. */ typedef struct tagtable { u32 tag; int (*parse)(const bp_tag_t*); } tagtable_t; #define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \ __section(".taglist") __attribute__((used)) = { tag, fn } /* parse current tag */ static int __init parse_tag_mem(const bp_tag_t *tag) { struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data); if (mi->type != MEMORY_TYPE_CONVENTIONAL) return -1; return memblock_add(mi->start, mi->end - mi->start); } __tagtable(BP_TAG_MEMORY, parse_tag_mem); #ifdef CONFIG_BLK_DEV_INITRD static int __init parse_tag_initrd(const bp_tag_t* tag) { struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data); initrd_start = (unsigned long)__va(mi->start); initrd_end = (unsigned long)__va(mi->end); return 0; } __tagtable(BP_TAG_INITRD, parse_tag_initrd); #endif /* CONFIG_BLK_DEV_INITRD */ #ifdef CONFIG_USE_OF static int __init parse_tag_fdt(const bp_tag_t *tag) { dtb_start = __va(tag->data[0]); return 0; } __tagtable(BP_TAG_FDT, parse_tag_fdt); #endif /* CONFIG_USE_OF */ static int __init parse_tag_cmdline(const bp_tag_t* tag) { strscpy(command_line, (char *)(tag->data), COMMAND_LINE_SIZE); return 0; } __tagtable(BP_TAG_COMMAND_LINE, parse_tag_cmdline); static int __init parse_bootparam(const bp_tag_t* tag) { extern tagtable_t __tagtable_begin, __tagtable_end; tagtable_t *t; /* Boot parameters must start with a BP_TAG_FIRST tag. */ if (tag->id != BP_TAG_FIRST) { pr_warn("Invalid boot parameters!\n"); return 0; } tag = (bp_tag_t*)((unsigned long)tag + sizeof(bp_tag_t) + tag->size); /* Parse all tags. */ while (tag != NULL && tag->id != BP_TAG_LAST) { for (t = &__tagtable_begin; t < &__tagtable_end; t++) { if (tag->id == t->tag) { t->parse(tag); break; } } if (t == &__tagtable_end) pr_warn("Ignoring tag 0x%08x\n", tag->id); tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size); } return 0; } #else static int __init parse_bootparam(const bp_tag_t *tag) { pr_info("Ignoring boot parameters at %p\n", tag); return 0; } #endif #ifdef CONFIG_USE_OF #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR; EXPORT_SYMBOL(xtensa_kio_paddr); static int __init xtensa_dt_io_area(unsigned long node, const char *uname, int depth, void *data) { const __be32 *ranges; int len; if (depth > 1) return 0; if (!of_flat_dt_is_compatible(node, "simple-bus")) return 0; ranges = of_get_flat_dt_prop(node, "ranges", &len); if (!ranges) return 1; if (len == 0) return 1; xtensa_kio_paddr = of_read_ulong(ranges+1, 1); /* round down to nearest 256MB boundary */ xtensa_kio_paddr &= 0xf0000000; init_kio(); return 1; } #else static int __init xtensa_dt_io_area(unsigned long node, const char *uname, int depth, void *data) { return 1; } #endif void __init early_init_devtree(void *params) { early_init_dt_scan(params); of_scan_flat_dt(xtensa_dt_io_area, NULL); if (!command_line[0]) strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE); } #endif /* CONFIG_USE_OF */ /* * Initialize architecture. (Early stage) */ void __init init_arch(bp_tag_t *bp_start) { /* Initialize basic exception handling if configuration may need it */ if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_XTENSA_LOAD_STORE)) early_trap_init(); /* Initialize MMU. */ init_mmu(); /* Initialize initial KASAN shadow map */ kasan_early_init(); /* Parse boot parameters */ if (bp_start) parse_bootparam(bp_start); #ifdef CONFIG_USE_OF early_init_devtree(dtb_start); #endif #ifdef CONFIG_CMDLINE_BOOL if (!command_line[0]) strscpy(command_line, default_command_line, COMMAND_LINE_SIZE); #endif /* Early hook for platforms */ platform_init(bp_start); } /* * Initialize system. Setup memory and reserve regions. */ static inline int __init_memblock mem_reserve(unsigned long start, unsigned long end) { return memblock_reserve(start, end - start); } void __init setup_arch(char **cmdline_p) { pr_info("config ID: %08x:%08x\n", xtensa_get_sr(SREG_EPC), xtensa_get_sr(SREG_EXCSAVE)); if (xtensa_get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 || xtensa_get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1) pr_info("built for config ID: %08x:%08x\n", XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1); *cmdline_p = command_line; platform_setup(cmdline_p); strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); /* Reserve some memory regions */ #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start < initrd_end && !mem_reserve(__pa(initrd_start), __pa(initrd_end))) initrd_below_start_ok = 1; else initrd_start = 0; #endif mem_reserve(__pa(_stext), __pa(_end)); #ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_VECTORS_ADDR mem_reserve(__pa(_xip_text_start), __pa(_xip_text_end)); #endif mem_reserve(__pa(_xip_start), __pa(_xip_end)); #endif #ifdef CONFIG_VECTORS_ADDR #ifdef SUPPORT_WINDOWED mem_reserve(__pa(_WindowVectors_text_start), __pa(_WindowVectors_text_end)); #endif mem_reserve(__pa(_DebugInterruptVector_text_start), __pa(_DebugInterruptVector_text_end)); mem_reserve(__pa(_KernelExceptionVector_text_start), __pa(_KernelExceptionVector_text_end)); mem_reserve(__pa(_UserExceptionVector_text_start), __pa(_UserExceptionVector_text_end)); mem_reserve(__pa(_DoubleExceptionVector_text_start), __pa(_DoubleExceptionVector_text_end)); mem_reserve(__pa(_exception_text_start), __pa(_exception_text_end)); #if XCHAL_EXCM_LEVEL >= 2 mem_reserve(__pa(_Level2InterruptVector_text_start), __pa(_Level2InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 3 mem_reserve(__pa(_Level3InterruptVector_text_start), __pa(_Level3InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 4 mem_reserve(__pa(_Level4InterruptVector_text_start), __pa(_Level4InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 5 mem_reserve(__pa(_Level5InterruptVector_text_start), __pa(_Level5InterruptVector_text_end)); #endif #if XCHAL_EXCM_LEVEL >= 6 mem_reserve(__pa(_Level6InterruptVector_text_start), __pa(_Level6InterruptVector_text_end)); #endif #endif /* CONFIG_VECTORS_ADDR */ #ifdef CONFIG_SECONDARY_RESET_VECTOR mem_reserve(__pa(_SecondaryResetVector_text_start), __pa(_SecondaryResetVector_text_end)); #endif parse_early_param(); bootmem_init(); kasan_init(); unflatten_and_copy_device_tree(); #ifdef CONFIG_SMP smp_init_cpus(); #endif paging_init(); zones_init(); #ifdef CONFIG_VT # if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; # endif #endif } static DEFINE_PER_CPU(struct cpu, cpu_data); static int __init topology_init(void) { int i; for_each_possible_cpu(i) { struct cpu *cpu = &per_cpu(cpu_data, i); cpu->hotpluggable = !!i; register_cpu(cpu, i); } return 0; } subsys_initcall(topology_init); void cpu_reset(void) { #if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU) local_irq_disable(); /* * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must * be flushed. * Way 4 is not currently used by linux. * Ways 5 and 6 shall not be touched on MMUv2 as they are hardwired. * Way 5 shall be flushed and way 6 shall be set to identity mapping * on MMUv3. */ local_flush_tlb_all(); invalidate_page_directory(); #if XCHAL_HAVE_SPANNING_WAY /* MMU v3 */ { unsigned long vaddr = (unsigned long)cpu_reset; unsigned long paddr = __pa(vaddr); unsigned long tmpaddr = vaddr + SZ_512M; unsigned long tmp0, tmp1, tmp2, tmp3; /* * Find a place for the temporary mapping. It must not be * in the same 512MB region with vaddr or paddr, otherwise * there may be multihit exception either on entry to the * temporary mapping, or on entry to the identity mapping. * (512MB is the biggest page size supported by TLB.) */ while (((tmpaddr ^ paddr) & -SZ_512M) == 0) tmpaddr += SZ_512M; /* Invalidate mapping in the selected temporary area */ if (itlb_probe(tmpaddr) & BIT(ITLB_HIT_BIT)) invalidate_itlb_entry(itlb_probe(tmpaddr)); if (itlb_probe(tmpaddr + PAGE_SIZE) & BIT(ITLB_HIT_BIT)) invalidate_itlb_entry(itlb_probe(tmpaddr + PAGE_SIZE)); /* * Map two consecutive pages starting at the physical address * of this function to the temporary mapping area. */ write_itlb_entry(__pte((paddr & PAGE_MASK) | _PAGE_HW_VALID | _PAGE_HW_EXEC | _PAGE_CA_BYPASS), tmpaddr & PAGE_MASK); write_itlb_entry(__pte(((paddr & PAGE_MASK) + PAGE_SIZE) | _PAGE_HW_VALID | _PAGE_HW_EXEC | _PAGE_CA_BYPASS), (tmpaddr & PAGE_MASK) + PAGE_SIZE); /* Reinitialize TLB */ __asm__ __volatile__ ("movi %0, 1f\n\t" "movi %3, 2f\n\t" "add %0, %0, %4\n\t" "add %3, %3, %5\n\t" "jx %0\n" /* * No literal, data or stack access * below this point */ "1:\n\t" /* Initialize *tlbcfg */ "movi %0, 0\n\t" "wsr %0, itlbcfg\n\t" "wsr %0, dtlbcfg\n\t" /* Invalidate TLB way 5 */ "movi %0, 4\n\t" "movi %1, 5\n" "1:\n\t" "iitlb %1\n\t" "idtlb %1\n\t" "add %1, %1, %6\n\t" "addi %0, %0, -1\n\t" "bnez %0, 1b\n\t" /* Initialize TLB way 6 */ "movi %0, 7\n\t" "addi %1, %9, 3\n\t" "addi %2, %9, 6\n" "1:\n\t" "witlb %1, %2\n\t" "wdtlb %1, %2\n\t" "add %1, %1, %7\n\t" "add %2, %2, %7\n\t" "addi %0, %0, -1\n\t" "bnez %0, 1b\n\t" "isync\n\t" /* Jump to identity mapping */ "jx %3\n" "2:\n\t" /* Complete way 6 initialization */ "witlb %1, %2\n\t" "wdtlb %1, %2\n\t" /* Invalidate temporary mapping */ "sub %0, %9, %7\n\t" "iitlb %0\n\t" "add %0, %0, %8\n\t" "iitlb %0" : "=&a"(tmp0), "=&a"(tmp1), "=&a"(tmp2), "=&a"(tmp3) : "a"(tmpaddr - vaddr), "a"(paddr - vaddr), "a"(SZ_128M), "a"(SZ_512M), "a"(PAGE_SIZE), "a"((tmpaddr + SZ_512M) & PAGE_MASK) : "memory"); } #endif #endif __asm__ __volatile__ ("movi a2, 0\n\t" "wsr a2, icountlevel\n\t" "movi a2, 0\n\t" "wsr a2, icount\n\t" #if XCHAL_NUM_IBREAK > 0 "wsr a2, ibreakenable\n\t" #endif #if XCHAL_HAVE_LOOPS "wsr a2, lcount\n\t" #endif "movi a2, 0x1f\n\t" "wsr a2, ps\n\t" "isync\n\t" "jx %0\n\t" : : "a" (XCHAL_RESET_VECTOR_VADDR) : "a2"); for (;;) ; } void machine_restart(char * cmd) { local_irq_disable(); smp_send_stop(); do_kernel_restart(cmd); pr_err("Reboot failed -- System halted\n"); while (1) cpu_relax(); } void machine_halt(void) { local_irq_disable(); smp_send_stop(); do_kernel_power_off(); while (1) cpu_relax(); } void machine_power_off(void) { local_irq_disable(); smp_send_stop(); do_kernel_power_off(); while (1) cpu_relax(); } #ifdef CONFIG_PROC_FS /* * Display some core information through /proc/cpuinfo. */ static int c_show(struct seq_file *f, void *slot) { /* high-level stuff */ seq_printf(f, "CPU count\t: %u\n" "CPU list\t: %*pbl\n" "vendor_id\t: Tensilica\n" "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" "core ID\t\t: " XCHAL_CORE_ID "\n" "build ID\t: 0x%x\n" "config ID\t: %08x:%08x\n" "byte order\t: %s\n" "cpu MHz\t\t: %lu.%02lu\n" "bogomips\t: %lu.%02lu\n", num_online_cpus(), cpumask_pr_args(cpu_online_mask), XCHAL_BUILD_UNIQUE_ID, xtensa_get_sr(SREG_EPC), xtensa_get_sr(SREG_EXCSAVE), XCHAL_HAVE_BE ? "big" : "little", ccount_freq/1000000, (ccount_freq/10000) % 100, loops_per_jiffy/(500000/HZ), (loops_per_jiffy/(5000/HZ)) % 100); seq_puts(f, "flags\t\t: " #if XCHAL_HAVE_NMI "nmi " #endif #if XCHAL_HAVE_DEBUG "debug " # if XCHAL_HAVE_OCD "ocd " # endif #if XCHAL_HAVE_TRAX "trax " #endif #if XCHAL_NUM_PERF_COUNTERS "perf " #endif #endif #if XCHAL_HAVE_DENSITY "density " #endif #if XCHAL_HAVE_BOOLEANS "boolean " #endif #if XCHAL_HAVE_LOOPS "loop " #endif #if XCHAL_HAVE_NSA "nsa " #endif #if XCHAL_HAVE_MINMAX "minmax " #endif #if XCHAL_HAVE_SEXT "sext " #endif #if XCHAL_HAVE_CLAMPS "clamps " #endif #if XCHAL_HAVE_MAC16 "mac16 " #endif #if XCHAL_HAVE_MUL16 "mul16 " #endif #if XCHAL_HAVE_MUL32 "mul32 " #endif #if XCHAL_HAVE_MUL32_HIGH "mul32h " #endif #if XCHAL_HAVE_FP "fpu " #endif #if XCHAL_HAVE_S32C1I "s32c1i " #endif #if XCHAL_HAVE_EXCLUSIVE "exclusive " #endif "\n"); /* Registers. */ seq_printf(f,"physical aregs\t: %d\n" "misc regs\t: %d\n" "ibreak\t\t: %d\n" "dbreak\t\t: %d\n" "perf counters\t: %d\n", XCHAL_NUM_AREGS, XCHAL_NUM_MISC_REGS, XCHAL_NUM_IBREAK, XCHAL_NUM_DBREAK, XCHAL_NUM_PERF_COUNTERS); /* Interrupt. */ seq_printf(f,"num ints\t: %d\n" "ext ints\t: %d\n" "int levels\t: %d\n" "timers\t\t: %d\n" "debug level\t: %d\n", XCHAL_NUM_INTERRUPTS, XCHAL_NUM_EXTINTERRUPTS, XCHAL_NUM_INTLEVELS, XCHAL_NUM_TIMERS, XCHAL_DEBUGLEVEL); /* Cache */ seq_printf(f,"icache line size: %d\n" "icache ways\t: %d\n" "icache size\t: %d\n" "icache flags\t: " #if XCHAL_ICACHE_LINE_LOCKABLE "lock " #endif "\n" "dcache line size: %d\n" "dcache ways\t: %d\n" "dcache size\t: %d\n" "dcache flags\t: " #if XCHAL_DCACHE_IS_WRITEBACK "writeback " #endif #if XCHAL_DCACHE_LINE_LOCKABLE "lock " #endif "\n", XCHAL_ICACHE_LINESIZE, XCHAL_ICACHE_WAYS, XCHAL_ICACHE_SIZE, XCHAL_DCACHE_LINESIZE, XCHAL_DCACHE_WAYS, XCHAL_DCACHE_SIZE); return 0; } /* * We show only CPU #0 info. */ static void * c_start(struct seq_file *f, loff_t *pos) { return (*pos == 0) ? (void *)1 : NULL; } static void * c_next(struct seq_file *f, void *v, loff_t *pos) { ++*pos; return c_start(f, pos); } static void c_stop(struct seq_file *f, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = c_show, }; #endif /* CONFIG_PROC_FS */
linux-master
arch/xtensa/kernel/setup.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/mm.h> #include <linux/suspend.h> #include <asm/coprocessor.h> int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end)); return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); } void notrace save_processor_state(void) { WARN_ON(num_online_cpus() != 1); #if XTENSA_HAVE_COPROCESSORS local_coprocessors_flush_release_all(); #endif } void notrace restore_processor_state(void) { }
linux-master
arch/xtensa/kernel/hibernate.c
/* * arch/xtensa/kernel/time.c * * Timer and clock support. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2005 Tensilica Inc. * * Chris Zankel <[email protected]> */ #include <linux/clk.h> #include <linux/of_clk.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/profile.h> #include <linux/delay.h> #include <linux/irqdomain.h> #include <linux/sched_clock.h> #include <asm/timex.h> #include <asm/platform.h> unsigned long ccount_freq; /* ccount Hz */ EXPORT_SYMBOL(ccount_freq); static u64 ccount_read(struct clocksource *cs) { return (u64)get_ccount(); } static u64 notrace ccount_sched_clock_read(void) { return get_ccount(); } static struct clocksource ccount_clocksource = { .name = "ccount", .rating = 200, .read = ccount_read, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; struct ccount_timer { struct clock_event_device evt; int irq_enabled; char name[24]; }; static int ccount_timer_set_next_event(unsigned long delta, struct clock_event_device *dev) { unsigned long flags, next; int ret = 0; local_irq_save(flags); next = get_ccount() + delta; set_linux_timer(next); if (next - get_ccount() > delta) ret = -ETIME; local_irq_restore(flags); return ret; } /* * There is no way to disable the timer interrupt at the device level, * only at the intenable register itself. Since enable_irq/disable_irq * calls are nested, we need to make sure that these calls are * balanced. */ static int ccount_timer_shutdown(struct clock_event_device *evt) { struct ccount_timer *timer = container_of(evt, struct ccount_timer, evt); if (timer->irq_enabled) { disable_irq_nosync(evt->irq); timer->irq_enabled = 0; } return 0; } static int ccount_timer_set_oneshot(struct clock_event_device *evt) { struct ccount_timer *timer = container_of(evt, struct ccount_timer, evt); if (!timer->irq_enabled) { enable_irq(evt->irq); timer->irq_enabled = 1; } return 0; } static DEFINE_PER_CPU(struct ccount_timer, ccount_timer) = { .evt = { .features = CLOCK_EVT_FEAT_ONESHOT, .rating = 300, .set_next_event = ccount_timer_set_next_event, .set_state_shutdown = ccount_timer_shutdown, .set_state_oneshot = ccount_timer_set_oneshot, .tick_resume = ccount_timer_set_oneshot, }, }; static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt; set_linux_timer(get_linux_timer()); evt->event_handler(evt); return IRQ_HANDLED; } void local_timer_setup(unsigned cpu) { struct ccount_timer *timer = &per_cpu(ccount_timer, cpu); struct clock_event_device *clockevent = &timer->evt; timer->irq_enabled = 1; snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu); clockevent->name = timer->name; clockevent->cpumask = cpumask_of(cpu); clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT); if (WARN(!clockevent->irq, "error: can't map timer irq")) return; clockevents_config_and_register(clockevent, ccount_freq, 0xf, 0xffffffff); } #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT #ifdef CONFIG_OF static void __init calibrate_ccount(void) { struct device_node *cpu; struct clk *clk; cpu = of_find_compatible_node(NULL, NULL, "cdns,xtensa-cpu"); if (cpu) { clk = of_clk_get(cpu, 0); of_node_put(cpu); if (!IS_ERR(clk)) { ccount_freq = clk_get_rate(clk); return; } else { pr_warn("%s: CPU input clock not found\n", __func__); } } else { pr_warn("%s: CPU node not found in the device tree\n", __func__); } platform_calibrate_ccount(); } #else static inline void calibrate_ccount(void) { platform_calibrate_ccount(); } #endif #endif void __init time_init(void) { int irq; of_clk_init(NULL); #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT pr_info("Calibrating CPU frequency "); calibrate_ccount(); pr_cont("%d.%02d MHz\n", (int)ccount_freq / 1000000, (int)(ccount_freq / 10000) % 100); #else ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; #endif WARN(!ccount_freq, "%s: CPU clock frequency is not set up correctly\n", __func__); clocksource_register_hz(&ccount_clocksource, ccount_freq); local_timer_setup(0); irq = this_cpu_ptr(&ccount_timer)->evt.irq; if (request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL)) pr_err("Failed to request irq %d (timer)\n", irq); sched_clock_register(ccount_sched_clock_read, 32, ccount_freq); timer_probe(); } #ifndef CONFIG_GENERIC_CALIBRATE_DELAY void calibrate_delay(void) { loops_per_jiffy = ccount_freq / HZ; pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n", loops_per_jiffy / (1000000 / HZ), (loops_per_jiffy / (10000 / HZ)) % 100); } #endif
linux-master
arch/xtensa/kernel/time.c
// SPDX-License-Identifier: GPL-2.0-only /* * Xtensa Performance Monitor Module driver * See Tensilica Debug User's Guide for PMU registers documentation. * * Copyright (C) 2015 Cadence Design Systems Inc. */ #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/of.h> #include <linux/perf_event.h> #include <linux/platform_device.h> #include <asm/core.h> #include <asm/processor.h> #include <asm/stacktrace.h> #define XTENSA_HWVERSION_RG_2015_0 260000 #if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RG_2015_0 #define XTENSA_PMU_ERI_BASE 0x00101000 #else #define XTENSA_PMU_ERI_BASE 0x00001000 #endif /* Global control/status for all perf counters */ #define XTENSA_PMU_PMG XTENSA_PMU_ERI_BASE /* Perf counter values */ #define XTENSA_PMU_PM(i) (XTENSA_PMU_ERI_BASE + 0x80 + (i) * 4) /* Perf counter control registers */ #define XTENSA_PMU_PMCTRL(i) (XTENSA_PMU_ERI_BASE + 0x100 + (i) * 4) /* Perf counter status registers */ #define XTENSA_PMU_PMSTAT(i) (XTENSA_PMU_ERI_BASE + 0x180 + (i) * 4) #define XTENSA_PMU_PMG_PMEN 0x1 #define XTENSA_PMU_COUNTER_MASK 0xffffffffULL #define XTENSA_PMU_COUNTER_MAX 0x7fffffff #define XTENSA_PMU_PMCTRL_INTEN 0x00000001 #define XTENSA_PMU_PMCTRL_KRNLCNT 0x00000008 #define XTENSA_PMU_PMCTRL_TRACELEVEL 0x000000f0 #define XTENSA_PMU_PMCTRL_SELECT_SHIFT 8 #define XTENSA_PMU_PMCTRL_SELECT 0x00001f00 #define XTENSA_PMU_PMCTRL_MASK_SHIFT 16 #define XTENSA_PMU_PMCTRL_MASK 0xffff0000 #define XTENSA_PMU_MASK(select, mask) \ (((select) << XTENSA_PMU_PMCTRL_SELECT_SHIFT) | \ ((mask) << XTENSA_PMU_PMCTRL_MASK_SHIFT) | \ XTENSA_PMU_PMCTRL_TRACELEVEL | \ XTENSA_PMU_PMCTRL_INTEN) #define XTENSA_PMU_PMSTAT_OVFL 0x00000001 #define XTENSA_PMU_PMSTAT_INTASRT 0x00000010 struct xtensa_pmu_events { /* Array of events currently on this core */ struct perf_event *event[XCHAL_NUM_PERF_COUNTERS]; /* Bitmap of used hardware counters */ unsigned long used_mask[BITS_TO_LONGS(XCHAL_NUM_PERF_COUNTERS)]; }; static DEFINE_PER_CPU(struct xtensa_pmu_events, xtensa_pmu_events); static const u32 xtensa_hw_ctl[] = { [PERF_COUNT_HW_CPU_CYCLES] = XTENSA_PMU_MASK(0, 0x1), [PERF_COUNT_HW_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0xffff), [PERF_COUNT_HW_CACHE_REFERENCES] = XTENSA_PMU_MASK(10, 0x1), [PERF_COUNT_HW_CACHE_MISSES] = XTENSA_PMU_MASK(12, 0x1), /* Taken and non-taken branches + taken loop ends */ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XTENSA_PMU_MASK(2, 0x490), /* Instruction-related + other global stall cycles */ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XTENSA_PMU_MASK(4, 0x1ff), /* Data-related global stall cycles */ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = XTENSA_PMU_MASK(3, 0x1ff), }; #define C(_x) PERF_COUNT_HW_CACHE_##_x static const u32 xtensa_cache_ctl[][C(OP_MAX)][C(RESULT_MAX)] = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(10, 0x1), [C(RESULT_MISS)] = XTENSA_PMU_MASK(10, 0x2), }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(11, 0x1), [C(RESULT_MISS)] = XTENSA_PMU_MASK(11, 0x2), }, }, [C(L1I)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(8, 0x1), [C(RESULT_MISS)] = XTENSA_PMU_MASK(8, 0x2), }, }, [C(DTLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(9, 0x1), [C(RESULT_MISS)] = XTENSA_PMU_MASK(9, 0x8), }, }, [C(ITLB)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = XTENSA_PMU_MASK(7, 0x1), [C(RESULT_MISS)] = XTENSA_PMU_MASK(7, 0x8), }, }, }; static int xtensa_pmu_cache_event(u64 config) { unsigned int cache_type, cache_op, cache_result; int ret; cache_type = (config >> 0) & 0xff; cache_op = (config >> 8) & 0xff; cache_result = (config >> 16) & 0xff; if (cache_type >= ARRAY_SIZE(xtensa_cache_ctl) || cache_op >= C(OP_MAX) || cache_result >= C(RESULT_MAX)) return -EINVAL; ret = xtensa_cache_ctl[cache_type][cache_op][cache_result]; if (ret == 0) return -EINVAL; return ret; } static inline uint32_t xtensa_pmu_read_counter(int idx) { return get_er(XTENSA_PMU_PM(idx)); } static inline void xtensa_pmu_write_counter(int idx, uint32_t v) { set_er(v, XTENSA_PMU_PM(idx)); } static void xtensa_perf_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) { uint64_t prev_raw_count, new_raw_count; int64_t delta; do { prev_raw_count = local64_read(&hwc->prev_count); new_raw_count = xtensa_pmu_read_counter(event->hw.idx); } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count); delta = (new_raw_count - prev_raw_count) & XTENSA_PMU_COUNTER_MASK; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); } static bool xtensa_perf_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx) { bool rc = false; s64 left; if (!is_sampling_event(event)) { left = XTENSA_PMU_COUNTER_MAX; } else { s64 period = hwc->sample_period; left = local64_read(&hwc->period_left); if (left <= -period) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; rc = true; } else if (left <= 0) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; rc = true; } if (left > XTENSA_PMU_COUNTER_MAX) left = XTENSA_PMU_COUNTER_MAX; } local64_set(&hwc->prev_count, -left); xtensa_pmu_write_counter(idx, -left); perf_event_update_userpage(event); return rc; } static void xtensa_pmu_enable(struct pmu *pmu) { set_er(get_er(XTENSA_PMU_PMG) | XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG); } static void xtensa_pmu_disable(struct pmu *pmu) { set_er(get_er(XTENSA_PMU_PMG) & ~XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG); } static int xtensa_pmu_event_init(struct perf_event *event) { int ret; switch (event->attr.type) { case PERF_TYPE_HARDWARE: if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) || xtensa_hw_ctl[event->attr.config] == 0) return -EINVAL; event->hw.config = xtensa_hw_ctl[event->attr.config]; return 0; case PERF_TYPE_HW_CACHE: ret = xtensa_pmu_cache_event(event->attr.config); if (ret < 0) return ret; event->hw.config = ret; return 0; case PERF_TYPE_RAW: /* Not 'previous counter' select */ if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) == (1 << XTENSA_PMU_PMCTRL_SELECT_SHIFT)) return -EINVAL; event->hw.config = (event->attr.config & (XTENSA_PMU_PMCTRL_KRNLCNT | XTENSA_PMU_PMCTRL_TRACELEVEL | XTENSA_PMU_PMCTRL_SELECT | XTENSA_PMU_PMCTRL_MASK)) | XTENSA_PMU_PMCTRL_INTEN; return 0; default: return -ENOENT; } } /* * Starts/Stops a counter present on the PMU. The PMI handler * should stop the counter when perf_event_overflow() returns * !0. ->start() will be used to continue. */ static void xtensa_pmu_start(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; if (WARN_ON_ONCE(idx == -1)) return; if (flags & PERF_EF_RELOAD) { WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); xtensa_perf_event_set_period(event, hwc, idx); } hwc->state = 0; set_er(hwc->config, XTENSA_PMU_PMCTRL(idx)); } static void xtensa_pmu_stop(struct perf_event *event, int flags) { struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; if (!(hwc->state & PERF_HES_STOPPED)) { set_er(0, XTENSA_PMU_PMCTRL(idx)); set_er(get_er(XTENSA_PMU_PMSTAT(idx)), XTENSA_PMU_PMSTAT(idx)); hwc->state |= PERF_HES_STOPPED; } if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { xtensa_perf_event_update(event, &event->hw, idx); event->hw.state |= PERF_HES_UPTODATE; } } /* * Adds/Removes a counter to/from the PMU, can be done inside * a transaction, see the ->*_txn() methods. */ static int xtensa_pmu_add(struct perf_event *event, int flags) { struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; if (__test_and_set_bit(idx, ev->used_mask)) { idx = find_first_zero_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS); if (idx == XCHAL_NUM_PERF_COUNTERS) return -EAGAIN; __set_bit(idx, ev->used_mask); hwc->idx = idx; } ev->event[idx] = event; hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; if (flags & PERF_EF_START) xtensa_pmu_start(event, PERF_EF_RELOAD); perf_event_update_userpage(event); return 0; } static void xtensa_pmu_del(struct perf_event *event, int flags) { struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); xtensa_pmu_stop(event, PERF_EF_UPDATE); __clear_bit(event->hw.idx, ev->used_mask); perf_event_update_userpage(event); } static void xtensa_pmu_read(struct perf_event *event) { xtensa_perf_event_update(event, &event->hw, event->hw.idx); } static int callchain_trace(struct stackframe *frame, void *data) { struct perf_callchain_entry_ctx *entry = data; perf_callchain_store(entry, frame->pc); return 0; } void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { xtensa_backtrace_kernel(regs, entry->max_stack, callchain_trace, NULL, entry); } void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { xtensa_backtrace_user(regs, entry->max_stack, callchain_trace, entry); } void perf_event_print_debug(void) { unsigned long flags; unsigned i; local_irq_save(flags); pr_info("CPU#%d: PMG: 0x%08lx\n", smp_processor_id(), get_er(XTENSA_PMU_PMG)); for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) pr_info("PM%d: 0x%08lx, PMCTRL%d: 0x%08lx, PMSTAT%d: 0x%08lx\n", i, get_er(XTENSA_PMU_PM(i)), i, get_er(XTENSA_PMU_PMCTRL(i)), i, get_er(XTENSA_PMU_PMSTAT(i))); local_irq_restore(flags); } irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id) { irqreturn_t rc = IRQ_NONE; struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events); unsigned i; for_each_set_bit(i, ev->used_mask, XCHAL_NUM_PERF_COUNTERS) { uint32_t v = get_er(XTENSA_PMU_PMSTAT(i)); struct perf_event *event = ev->event[i]; struct hw_perf_event *hwc = &event->hw; u64 last_period; if (!(v & XTENSA_PMU_PMSTAT_OVFL)) continue; set_er(v, XTENSA_PMU_PMSTAT(i)); xtensa_perf_event_update(event, hwc, i); last_period = hwc->last_period; if (xtensa_perf_event_set_period(event, hwc, i)) { struct perf_sample_data data; struct pt_regs *regs = get_irq_regs(); perf_sample_data_init(&data, 0, last_period); if (perf_event_overflow(event, &data, regs)) xtensa_pmu_stop(event, 0); } rc = IRQ_HANDLED; } return rc; } static struct pmu xtensa_pmu = { .pmu_enable = xtensa_pmu_enable, .pmu_disable = xtensa_pmu_disable, .event_init = xtensa_pmu_event_init, .add = xtensa_pmu_add, .del = xtensa_pmu_del, .start = xtensa_pmu_start, .stop = xtensa_pmu_stop, .read = xtensa_pmu_read, }; static int xtensa_pmu_setup(unsigned int cpu) { unsigned i; set_er(0, XTENSA_PMU_PMG); for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) { set_er(0, XTENSA_PMU_PMCTRL(i)); set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i)); } return 0; } static int __init xtensa_pmu_init(void) { int ret; int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT); ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING, "perf/xtensa:starting", xtensa_pmu_setup, NULL); if (ret) { pr_err("xtensa_pmu: failed to register CPU-hotplug.\n"); return ret; } #if XTENSA_FAKE_NMI enable_irq(irq); #else ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU, "pmu", NULL); if (ret < 0) return ret; #endif ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW); if (ret) free_irq(irq, NULL); return ret; } early_initcall(xtensa_pmu_init);
linux-master
arch/xtensa/kernel/perf_event.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Cadence Design Systems Inc. #include <linux/cpu.h> #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/memory.h> #include <linux/stop_machine.h> #include <linux/types.h> #include <asm/cacheflush.h> #define J_OFFSET_MASK 0x0003ffff #define J_SIGN_MASK (~(J_OFFSET_MASK >> 1)) #if defined(__XTENSA_EL__) #define J_INSN 0x6 #define NOP_INSN 0x0020f0 #elif defined(__XTENSA_EB__) #define J_INSN 0x60000000 #define NOP_INSN 0x0f020000 #else #error Unsupported endianness. #endif struct patch { atomic_t cpu_count; unsigned long addr; size_t sz; const void *data; }; static void local_patch_text(unsigned long addr, const void *data, size_t sz) { memcpy((void *)addr, data, sz); local_flush_icache_range(addr, addr + sz); } static int patch_text_stop_machine(void *data) { struct patch *patch = data; if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { local_patch_text(patch->addr, patch->data, patch->sz); atomic_inc(&patch->cpu_count); } else { while (atomic_read(&patch->cpu_count) <= num_online_cpus()) cpu_relax(); __invalidate_icache_range(patch->addr, patch->sz); } return 0; } static void patch_text(unsigned long addr, const void *data, size_t sz) { if (IS_ENABLED(CONFIG_SMP)) { struct patch patch = { .cpu_count = ATOMIC_INIT(0), .addr = addr, .sz = sz, .data = data, }; stop_machine_cpuslocked(patch_text_stop_machine, &patch, cpu_online_mask); } else { unsigned long flags; local_irq_save(flags); local_patch_text(addr, data, sz); local_irq_restore(flags); } } void arch_jump_label_transform(struct jump_entry *e, enum jump_label_type type) { u32 d = (jump_entry_target(e) - (jump_entry_code(e) + 4)); u32 insn; /* Jump only works within 128K of the J instruction. */ BUG_ON(!((d & J_SIGN_MASK) == 0 || (d & J_SIGN_MASK) == J_SIGN_MASK)); if (type == JUMP_LABEL_JMP) { #if defined(__XTENSA_EL__) insn = ((d & J_OFFSET_MASK) << 6) | J_INSN; #elif defined(__XTENSA_EB__) insn = ((d & J_OFFSET_MASK) << 8) | J_INSN; #endif } else { insn = NOP_INSN; } patch_text(jump_entry_code(e), &insn, JUMP_LABEL_NOP_SIZE); }
linux-master
arch/xtensa/kernel/jump_label.c