python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Derived from "arch/powerpc/platforms/pseries/pci_dlpar.c"
*
* Copyright (C) 2003 Linda Xie <[email protected]>
* Copyright (C) 2005 International Business Machines
*
* Updates, 2005, John Rose <[email protected]>
* Updates, 2005, Linas Vepstas <[email protected]>
* Updates, 2013, Gavin Shan <[email protected]>
*/
#include <linux/pci.h>
#include <linux/export.h>
#include <linux/of.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
#include <asm/eeh.h>
static struct pci_bus *find_bus_among_children(struct pci_bus *bus,
struct device_node *dn)
{
struct pci_bus *child = NULL;
struct pci_bus *tmp;
if (pci_bus_to_OF_node(bus) == dn)
return bus;
list_for_each_entry(tmp, &bus->children, node) {
child = find_bus_among_children(tmp, dn);
if (child)
break;
}
return child;
}
struct pci_bus *pci_find_bus_by_node(struct device_node *dn)
{
struct pci_dn *pdn = PCI_DN(dn);
if (!pdn || !pdn->phb || !pdn->phb->bus)
return NULL;
return find_bus_among_children(pdn->phb->bus, dn);
}
EXPORT_SYMBOL_GPL(pci_find_bus_by_node);
/**
* pcibios_release_device - release PCI device
* @dev: PCI device
*
* The function is called before releasing the indicated PCI device.
*/
void pcibios_release_device(struct pci_dev *dev)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
struct pci_dn *pdn = pci_get_pdn(dev);
if (phb->controller_ops.release_device)
phb->controller_ops.release_device(dev);
/* free()ing the pci_dn has been deferred to us, do it now */
if (pdn && (pdn->flags & PCI_DN_FLAG_DEAD)) {
pci_dbg(dev, "freeing dead pdn\n");
kfree(pdn);
}
}
/**
* pci_hp_remove_devices - remove all devices under this bus
* @bus: the indicated PCI bus
*
* Remove all of the PCI devices under this bus both from the
* linux pci device tree, and from the powerpc EEH address cache.
*/
void pci_hp_remove_devices(struct pci_bus *bus)
{
struct pci_dev *dev, *tmp;
struct pci_bus *child_bus;
/* First go down child busses */
list_for_each_entry(child_bus, &bus->children, node)
pci_hp_remove_devices(child_bus);
pr_debug("PCI: Removing devices on bus %04x:%02x\n",
pci_domain_nr(bus), bus->number);
list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) {
pr_debug(" Removing %s...\n", pci_name(dev));
pci_stop_and_remove_bus_device(dev);
}
}
EXPORT_SYMBOL_GPL(pci_hp_remove_devices);
/**
* pci_hp_add_devices - adds new pci devices to bus
* @bus: the indicated PCI bus
*
* This routine will find and fixup new pci devices under
* the indicated bus. This routine presumes that there
* might already be some devices under this bridge, so
* it carefully tries to add only new devices. (And that
* is how this routine differs from other, similar pcibios
* routines.)
*/
void pci_hp_add_devices(struct pci_bus *bus)
{
int slotno, mode, max;
struct pci_dev *dev;
struct pci_controller *phb;
struct device_node *dn = pci_bus_to_OF_node(bus);
phb = pci_bus_to_host(bus);
mode = PCI_PROBE_NORMAL;
if (phb->controller_ops.probe_mode)
mode = phb->controller_ops.probe_mode(bus);
if (mode == PCI_PROBE_DEVTREE) {
/* use ofdt-based probe */
of_rescan_bus(dn, bus);
} else if (mode == PCI_PROBE_NORMAL &&
dn->child && PCI_DN(dn->child)) {
/*
* Use legacy probe. In the partial hotplug case, we
* probably have grandchildren devices unplugged. So
* we don't check the return value from pci_scan_slot() in
* order for fully rescan all the way down to pick them up.
* They can have been removed during partial hotplug.
*/
slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
max = bus->busn_res.start;
/*
* Scan bridges that are already configured. We don't touch
* them unless they are misconfigured (which will be done in
* the second scan below).
*/
for_each_pci_bridge(dev, bus)
max = pci_scan_bridge(bus, dev, max, 0);
/* Scan bridges that need to be reconfigured */
for_each_pci_bridge(dev, bus)
max = pci_scan_bridge(bus, dev, max, 1);
}
pcibios_finish_adding_to_bus(bus);
}
EXPORT_SYMBOL_GPL(pci_hp_add_devices);
| linux-master | arch/powerpc/kernel/pci-hotplug.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Common boot and setup code for both 32-bit and 64-bit.
* Extracted from arch/powerpc/kernel/setup_64.c.
*
* Copyright (C) 2001 PPC64 Team, IBM Corp
*/
#undef DEBUG
#include <linux/export.h>
#include <linux/panic_notifier.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/seq_file.h>
#include <linux/ioport.h>
#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/unistd.h>
#include <linux/seq_buf.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
#include <linux/hugetlb.h>
#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/paca.h>
#include <asm/processor.h>
#include <asm/vdso_datapage.h>
#include <asm/smp.h>
#include <asm/elf.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/firmware.h>
#include <asm/btext.h>
#include <asm/nvram.h>
#include <asm/setup.h>
#include <asm/rtas.h>
#include <asm/iommu.h>
#include <asm/serial.h>
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/xmon.h>
#include <asm/cputhreads.h>
#include <mm/mmu_decl.h>
#include <asm/archrandom.h>
#include <asm/fadump.h>
#include <asm/udbg.h>
#include <asm/hugetlb.h>
#include <asm/livepatch.h>
#include <asm/mmu_context.h>
#include <asm/cpu_has_feature.h>
#include <asm/kasan.h>
#include <asm/mce.h>
#include "setup.h"
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
/* The main machine-dep calls structure
*/
struct machdep_calls ppc_md;
EXPORT_SYMBOL(ppc_md);
struct machdep_calls *machine_id;
EXPORT_SYMBOL(machine_id);
int boot_cpuid = -1;
EXPORT_SYMBOL_GPL(boot_cpuid);
#ifdef CONFIG_PPC64
int boot_cpu_hwid = -1;
#endif
/*
* These are used in binfmt_elf.c to put aux entries on the stack
* for each elf executable being started.
*/
int dcache_bsize;
int icache_bsize;
/*
* This still seems to be needed... -- paulus
*/
struct screen_info screen_info = {
.orig_x = 0,
.orig_y = 25,
.orig_video_cols = 80,
.orig_video_lines = 25,
.orig_video_isVGA = 1,
.orig_video_points = 16
};
#if defined(CONFIG_FB_VGA16_MODULE)
EXPORT_SYMBOL(screen_info);
#endif
/* Variables required to store legacy IO irq routing */
int of_i8042_kbd_irq;
EXPORT_SYMBOL_GPL(of_i8042_kbd_irq);
int of_i8042_aux_irq;
EXPORT_SYMBOL_GPL(of_i8042_aux_irq);
#ifdef __DO_IRQ_CANON
/* XXX should go elsewhere eventually */
int ppc_do_canonicalize_irqs;
EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
#endif
#ifdef CONFIG_CRASH_CORE
/* This keeps a track of which one is the crashing cpu. */
int crashing_cpu = -1;
#endif
/* also used by kexec */
void machine_shutdown(void)
{
/*
* if fadump is active, cleanup the fadump registration before we
* shutdown.
*/
fadump_cleanup();
if (ppc_md.machine_shutdown)
ppc_md.machine_shutdown();
}
static void machine_hang(void)
{
pr_emerg("System Halted, OK to turn off power\n");
local_irq_disable();
while (1)
;
}
void machine_restart(char *cmd)
{
machine_shutdown();
if (ppc_md.restart)
ppc_md.restart(cmd);
smp_send_stop();
do_kernel_restart(cmd);
mdelay(1000);
machine_hang();
}
void machine_power_off(void)
{
machine_shutdown();
do_kernel_power_off();
smp_send_stop();
machine_hang();
}
/* Used by the G5 thermal driver */
EXPORT_SYMBOL_GPL(machine_power_off);
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
{
if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
return 1;
return 0;
}
EXPORT_SYMBOL(arch_get_random_seed_longs);
void machine_halt(void)
{
machine_shutdown();
if (ppc_md.halt)
ppc_md.halt();
smp_send_stop();
machine_hang();
}
#ifdef CONFIG_SMP
DEFINE_PER_CPU(unsigned int, cpu_pvr);
#endif
static void show_cpuinfo_summary(struct seq_file *m)
{
struct device_node *root;
const char *model = NULL;
unsigned long bogosum = 0;
int i;
if (IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_PPC32)) {
for_each_online_cpu(i)
bogosum += loops_per_jiffy;
seq_printf(m, "total bogomips\t: %lu.%02lu\n",
bogosum / (500000 / HZ), bogosum / (5000 / HZ) % 100);
}
seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
if (ppc_md.name)
seq_printf(m, "platform\t: %s\n", ppc_md.name);
root = of_find_node_by_path("/");
if (root)
model = of_get_property(root, "model", NULL);
if (model)
seq_printf(m, "model\t\t: %s\n", model);
of_node_put(root);
if (ppc_md.show_cpuinfo != NULL)
ppc_md.show_cpuinfo(m);
/* Display the amount of memory */
if (IS_ENABLED(CONFIG_PPC32))
seq_printf(m, "Memory\t\t: %d MB\n",
(unsigned int)(total_memory / (1024 * 1024)));
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long cpu_id = (unsigned long)v - 1;
unsigned int pvr;
unsigned long proc_freq;
unsigned short maj;
unsigned short min;
#ifdef CONFIG_SMP
pvr = per_cpu(cpu_pvr, cpu_id);
#else
pvr = mfspr(SPRN_PVR);
#endif
maj = (pvr >> 8) & 0xFF;
min = pvr & 0xFF;
seq_printf(m, "processor\t: %lu\ncpu\t\t: ", cpu_id);
if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name)
seq_puts(m, cur_cpu_spec->cpu_name);
else
seq_printf(m, "unknown (%08x)", pvr);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
seq_puts(m, ", altivec supported");
seq_putc(m, '\n');
#ifdef CONFIG_TAU
if (cpu_has_feature(CPU_FTR_TAU)) {
if (IS_ENABLED(CONFIG_TAU_AVERAGE)) {
/* more straightforward, but potentially misleading */
seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
cpu_temp(cpu_id));
} else {
/* show the actual temp sensor range */
u32 temp;
temp = cpu_temp_both(cpu_id);
seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
temp & 0xff, temp >> 16);
}
}
#endif /* CONFIG_TAU */
/*
* Platforms that have variable clock rates, should implement
* the method ppc_md.get_proc_freq() that reports the clock
* rate of a given cpu. The rest can use ppc_proc_freq to
* report the clock rate that is same across all cpus.
*/
if (ppc_md.get_proc_freq)
proc_freq = ppc_md.get_proc_freq(cpu_id);
else
proc_freq = ppc_proc_freq;
if (proc_freq)
seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
proc_freq / 1000000, proc_freq % 1000000);
/* If we are a Freescale core do a simple check so
* we don't have to keep adding cases in the future */
if (PVR_VER(pvr) & 0x8000) {
switch (PVR_VER(pvr)) {
case 0x8000: /* 7441/7450/7451, Voyager */
case 0x8001: /* 7445/7455, Apollo 6 */
case 0x8002: /* 7447/7457, Apollo 7 */
case 0x8003: /* 7447A, Apollo 7 PM */
case 0x8004: /* 7448, Apollo 8 */
case 0x800c: /* 7410, Nitro */
maj = ((pvr >> 8) & 0xF);
min = PVR_MIN(pvr);
break;
default: /* e500/book-e */
maj = PVR_MAJ(pvr);
min = PVR_MIN(pvr);
break;
}
} else {
switch (PVR_VER(pvr)) {
case 0x1008: /* 740P/750P ?? */
maj = ((pvr >> 8) & 0xFF) - 1;
min = pvr & 0xFF;
break;
case 0x004e: /* POWER9 bits 12-15 give chip type */
case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
maj = (pvr >> 8) & 0x0F;
min = pvr & 0xFF;
break;
default:
maj = (pvr >> 8) & 0xFF;
min = pvr & 0xFF;
break;
}
}
seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
maj, min, PVR_VER(pvr), PVR_REV(pvr));
if (IS_ENABLED(CONFIG_PPC32))
seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100);
seq_putc(m, '\n');
/* If this is the last cpu, print the summary */
if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
show_cpuinfo_summary(m);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
if (*pos == 0) /* just in case, cpu 0 is not the first */
*pos = cpumask_first(cpu_online_mask);
else
*pos = cpumask_next(*pos - 1, cpu_online_mask);
if ((*pos) < nr_cpu_ids)
return (void *)(unsigned long)(*pos + 1);
return NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
void __init check_for_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n",
initrd_start, initrd_end);
/* If we were passed an initrd, set the ROOT_DEV properly if the values
* look sensible. If not, clear initrd reference.
*/
if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
initrd_end > initrd_start)
ROOT_DEV = Root_RAM0;
else
initrd_start = initrd_end = 0;
if (initrd_start)
pr_info("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
DBG(" <- check_for_initrd()\n");
#endif /* CONFIG_BLK_DEV_INITRD */
}
#ifdef CONFIG_SMP
int threads_per_core, threads_per_subcore, threads_shift __read_mostly;
cpumask_t threads_core_mask __read_mostly;
EXPORT_SYMBOL_GPL(threads_per_core);
EXPORT_SYMBOL_GPL(threads_per_subcore);
EXPORT_SYMBOL_GPL(threads_shift);
EXPORT_SYMBOL_GPL(threads_core_mask);
static void __init cpu_init_thread_core_maps(int tpc)
{
int i;
threads_per_core = tpc;
threads_per_subcore = tpc;
cpumask_clear(&threads_core_mask);
/* This implementation only supports power of 2 number of threads
* for simplicity and performance
*/
threads_shift = ilog2(tpc);
BUG_ON(tpc != (1 << threads_shift));
for (i = 0; i < tpc; i++)
cpumask_set_cpu(i, &threads_core_mask);
printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n",
tpc, tpc > 1 ? "s" : "");
printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift);
}
u32 *cpu_to_phys_id = NULL;
/**
* setup_cpu_maps - initialize the following cpu maps:
* cpu_possible_mask
* cpu_present_mask
*
* Having the possible map set up early allows us to restrict allocations
* of things like irqstacks to nr_cpu_ids rather than NR_CPUS.
*
* We do not initialize the online map here; cpus set their own bits in
* cpu_online_mask as they come up.
*
* This function is valid only for Open Firmware systems. finish_device_tree
* must be called before using this.
*
* While we're here, we may as well set the "physical" cpu ids in the paca.
*
* NOTE: This must match the parsing done in early_init_dt_scan_cpus.
*/
void __init smp_setup_cpu_maps(void)
{
struct device_node *dn;
int cpu = 0;
int nthreads = 1;
DBG("smp_setup_cpu_maps()\n");
cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
__alignof__(u32));
if (!cpu_to_phys_id)
panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
__func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
for_each_node_by_type(dn, "cpu") {
const __be32 *intserv;
__be32 cpu_be;
int j, len;
DBG(" * %pOF...\n", dn);
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
&len);
if (intserv) {
DBG(" ibm,ppc-interrupt-server#s -> %lu threads\n",
(len / sizeof(int)));
} else {
DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n");
intserv = of_get_property(dn, "reg", &len);
if (!intserv) {
cpu_be = cpu_to_be32(cpu);
/* XXX: what is this? uninitialized?? */
intserv = &cpu_be; /* assume logical == phys */
len = 4;
}
}
nthreads = len / sizeof(int);
for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
bool avail;
DBG(" thread %d -> cpu %d (hard id %d)\n",
j, cpu, be32_to_cpu(intserv[j]));
avail = of_device_is_available(dn);
if (!avail)
avail = !of_property_match_string(dn,
"enable-method", "spin-table");
set_cpu_present(cpu, avail);
set_cpu_possible(cpu, true);
cpu_to_phys_id[cpu] = be32_to_cpu(intserv[j]);
cpu++;
}
if (cpu >= nr_cpu_ids) {
of_node_put(dn);
break;
}
}
/* If no SMT supported, nthreads is forced to 1 */
if (!cpu_has_feature(CPU_FTR_SMT)) {
DBG(" SMT disabled ! nthreads forced to 1\n");
nthreads = 1;
}
#ifdef CONFIG_PPC64
/*
* On pSeries LPAR, we need to know how many cpus
* could possibly be added to this partition.
*/
if (firmware_has_feature(FW_FEATURE_LPAR) &&
(dn = of_find_node_by_path("/rtas"))) {
int num_addr_cell, num_size_cell, maxcpus;
const __be32 *ireg;
num_addr_cell = of_n_addr_cells(dn);
num_size_cell = of_n_size_cells(dn);
ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL);
if (!ireg)
goto out;
maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
/* Double maxcpus for processors which have SMT capability */
if (cpu_has_feature(CPU_FTR_SMT))
maxcpus *= nthreads;
if (maxcpus > nr_cpu_ids) {
printk(KERN_WARNING
"Partition configured for %d cpus, "
"operating system maximum is %u.\n",
maxcpus, nr_cpu_ids);
maxcpus = nr_cpu_ids;
} else
printk(KERN_INFO "Partition configured for %d cpus.\n",
maxcpus);
for (cpu = 0; cpu < maxcpus; cpu++)
set_cpu_possible(cpu, true);
out:
of_node_put(dn);
}
vdso_data->processorCount = num_present_cpus();
#endif /* CONFIG_PPC64 */
/* Initialize CPU <=> thread mapping/
*
* WARNING: We assume that the number of threads is the same for
* every CPU in the system. If that is not the case, then some code
* here will have to be reworked
*/
cpu_init_thread_core_maps(nthreads);
/* Now that possible cpus are set, set nr_cpu_ids for later use */
setup_nr_cpu_ids();
free_unused_pacas();
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_PCSPKR_PLATFORM
static __init int add_pcspkr(void)
{
struct device_node *np;
struct platform_device *pd;
int ret;
np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
of_node_put(np);
if (!np)
return -ENODEV;
pd = platform_device_alloc("pcspkr", -1);
if (!pd)
return -ENOMEM;
ret = platform_device_add(pd);
if (ret)
platform_device_put(pd);
return ret;
}
device_initcall(add_pcspkr);
#endif /* CONFIG_PCSPKR_PLATFORM */
static char ppc_hw_desc_buf[128] __initdata;
struct seq_buf ppc_hw_desc __initdata = {
.buffer = ppc_hw_desc_buf,
.size = sizeof(ppc_hw_desc_buf),
.len = 0,
.readpos = 0,
};
static __init void probe_machine(void)
{
extern struct machdep_calls __machine_desc_start;
extern struct machdep_calls __machine_desc_end;
unsigned int i;
/*
* Iterate all ppc_md structures until we find the proper
* one for the current machine type
*/
DBG("Probing machine type ...\n");
/*
* Check ppc_md is empty, if not we have a bug, ie, we setup an
* entry before probe_machine() which will be overwritten
*/
for (i = 0; i < (sizeof(ppc_md) / sizeof(void *)); i++) {
if (((void **)&ppc_md)[i]) {
printk(KERN_ERR "Entry %d in ppc_md non empty before"
" machine probe !\n", i);
}
}
for (machine_id = &__machine_desc_start;
machine_id < &__machine_desc_end;
machine_id++) {
DBG(" %s ...\n", machine_id->name);
if (machine_id->compatible && !of_machine_is_compatible(machine_id->compatible))
continue;
memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
if (ppc_md.probe && !ppc_md.probe())
continue;
DBG(" %s match !\n", machine_id->name);
break;
}
/* What can we do if we didn't find ? */
if (machine_id >= &__machine_desc_end) {
pr_err("No suitable machine description found !\n");
for (;;);
}
// Append the machine name to other info we've gathered
seq_buf_puts(&ppc_hw_desc, ppc_md.name);
// Set the generic hardware description shown in oopses
dump_stack_set_arch_desc(ppc_hw_desc.buffer);
pr_info("Hardware name: %s\n", ppc_hw_desc.buffer);
}
/* Match a class of boards, not a specific device configuration. */
int check_legacy_ioport(unsigned long base_port)
{
struct device_node *parent, *np = NULL;
int ret = -ENODEV;
switch(base_port) {
case I8042_DATA_REG:
if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303")))
np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03");
if (np) {
parent = of_get_parent(np);
of_i8042_kbd_irq = irq_of_parse_and_map(parent, 0);
if (!of_i8042_kbd_irq)
of_i8042_kbd_irq = 1;
of_i8042_aux_irq = irq_of_parse_and_map(parent, 1);
if (!of_i8042_aux_irq)
of_i8042_aux_irq = 12;
of_node_put(np);
np = parent;
break;
}
np = of_find_node_by_type(NULL, "8042");
/* Pegasos has no device_type on its 8042 node, look for the
* name instead */
if (!np)
np = of_find_node_by_name(NULL, "8042");
if (np) {
of_i8042_kbd_irq = 1;
of_i8042_aux_irq = 12;
}
break;
case FDC_BASE: /* FDC1 */
np = of_find_node_by_type(NULL, "fdc");
break;
default:
/* ipmi is supposed to fail here */
break;
}
if (!np)
return ret;
parent = of_get_parent(np);
if (parent) {
if (of_node_is_type(parent, "isa"))
ret = 0;
of_node_put(parent);
}
of_node_put(np);
return ret;
}
EXPORT_SYMBOL(check_legacy_ioport);
/*
* Panic notifiers setup
*
* We have 3 notifiers for powerpc, each one from a different "nature":
*
* - ppc_panic_fadump_handler() is a hypervisor notifier, which hard-disables
* IRQs and deal with the Firmware-Assisted dump, when it is configured;
* should run early in the panic path.
*
* - dump_kernel_offset() is an informative notifier, just showing the KASLR
* offset if we have RANDOMIZE_BASE set.
*
* - ppc_panic_platform_handler() is a low-level handler that's registered
* only if the platform wishes to perform final actions in the panic path,
* hence it should run late and might not even return. Currently, only
* pseries and ps3 platforms register callbacks.
*/
static int ppc_panic_fadump_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{
/*
* panic does a local_irq_disable, but we really
* want interrupts to be hard disabled.
*/
hard_irq_disable();
/*
* If firmware-assisted dump has been registered then trigger
* its callback and let the firmware handles everything else.
*/
crash_fadump(NULL, ptr);
return NOTIFY_DONE;
}
static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
void *p)
{
pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
kaslr_offset(), KERNELBASE);
return NOTIFY_DONE;
}
static int ppc_panic_platform_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{
/*
* This handler is only registered if we have a panic callback
* on ppc_md, hence NULL check is not needed.
* Also, it may not return, so it runs really late on panic path.
*/
ppc_md.panic(ptr);
return NOTIFY_DONE;
}
static struct notifier_block ppc_fadump_block = {
.notifier_call = ppc_panic_fadump_handler,
.priority = INT_MAX, /* run early, to notify the firmware ASAP */
};
static struct notifier_block kernel_offset_notifier = {
.notifier_call = dump_kernel_offset,
};
static struct notifier_block ppc_panic_block = {
.notifier_call = ppc_panic_platform_handler,
.priority = INT_MIN, /* may not return; must be done last */
};
void __init setup_panic(void)
{
/* Hard-disables IRQs + deal with FW-assisted dump (fadump) */
atomic_notifier_chain_register(&panic_notifier_list,
&ppc_fadump_block);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
atomic_notifier_chain_register(&panic_notifier_list,
&kernel_offset_notifier);
/* Low-level platform-specific routines that should run on panic */
if (ppc_md.panic)
atomic_notifier_chain_register(&panic_notifier_list,
&ppc_panic_block);
}
#ifdef CONFIG_CHECK_CACHE_COHERENCY
/*
* For platforms that have configurable cache-coherency. This function
* checks that the cache coherency setting of the kernel matches the setting
* left by the firmware, as indicated in the device tree. Since a mismatch
* will eventually result in DMA failures, we print * and error and call
* BUG() in that case.
*/
#define KERNEL_COHERENCY (!IS_ENABLED(CONFIG_NOT_COHERENT_CACHE))
static int __init check_cache_coherency(void)
{
struct device_node *np;
const void *prop;
bool devtree_coherency;
np = of_find_node_by_path("/");
prop = of_get_property(np, "coherency-off", NULL);
of_node_put(np);
devtree_coherency = prop ? false : true;
if (devtree_coherency != KERNEL_COHERENCY) {
printk(KERN_ERR
"kernel coherency:%s != device tree_coherency:%s\n",
KERNEL_COHERENCY ? "on" : "off",
devtree_coherency ? "on" : "off");
BUG();
}
return 0;
}
late_initcall(check_cache_coherency);
#endif /* CONFIG_CHECK_CACHE_COHERENCY */
void ppc_printk_progress(char *s, unsigned short hex)
{
pr_info("%s\n", s);
}
static __init void print_system_info(void)
{
pr_info("-----------------------------------------------------\n");
pr_info("phys_mem_size = 0x%llx\n",
(unsigned long long)memblock_phys_mem_size());
pr_info("dcache_bsize = 0x%x\n", dcache_bsize);
pr_info("icache_bsize = 0x%x\n", icache_bsize);
pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
pr_info(" possible = 0x%016lx\n",
(unsigned long)CPU_FTRS_POSSIBLE);
pr_info(" always = 0x%016lx\n",
(unsigned long)CPU_FTRS_ALWAYS);
pr_info("cpu_user_features = 0x%08x 0x%08x\n",
cur_cpu_spec->cpu_user_features,
cur_cpu_spec->cpu_user_features2);
pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
#ifdef CONFIG_PPC64
pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
#ifdef CONFIG_PPC_BOOK3S
pr_info("vmalloc start = 0x%lx\n", KERN_VIRT_START);
pr_info("IO start = 0x%lx\n", KERN_IO_START);
pr_info("vmemmap start = 0x%lx\n", (unsigned long)vmemmap);
#endif
#endif
if (!early_radix_enabled())
print_system_hash_info();
if (PHYSICAL_START > 0)
pr_info("physical_start = 0x%llx\n",
(unsigned long long)PHYSICAL_START);
pr_info("-----------------------------------------------------\n");
}
#ifdef CONFIG_SMP
static void __init smp_setup_pacas(void)
{
int cpu;
for_each_possible_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
allocate_paca(cpu);
set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]);
}
memblock_free(cpu_to_phys_id, nr_cpu_ids * sizeof(u32));
cpu_to_phys_id = NULL;
}
#endif
/*
* Called into from start_kernel this initializes memblock, which is used
* to manage page allocation until mem_init is called.
*/
void __init setup_arch(char **cmdline_p)
{
kasan_init();
*cmdline_p = boot_command_line;
/* Set a half-reasonable default so udelay does something sensible */
loops_per_jiffy = 500000000 / HZ;
/* Unflatten the device-tree passed by prom_init or kexec */
unflatten_device_tree();
/*
* Initialize cache line/block info from device-tree (on ppc64) or
* just cputable (on ppc32).
*/
initialize_cache_info();
/* Initialize RTAS if available. */
rtas_initialize();
/* Check if we have an initrd provided via the device-tree. */
check_for_initrd();
/* Probe the machine type, establish ppc_md. */
probe_machine();
/* Setup panic notifier if requested by the platform. */
setup_panic();
/*
* Configure ppc_md.power_save (ppc32 only, 64-bit machines do
* it from their respective probe() function.
*/
setup_power_save();
/* Discover standard serial ports. */
find_legacy_serial_ports();
/* Register early console with the printk subsystem. */
register_early_udbg_console();
/* Setup the various CPU maps based on the device-tree. */
smp_setup_cpu_maps();
/* Initialize xmon. */
xmon_setup();
/* Check the SMT related command line arguments (ppc64). */
check_smt_enabled();
/* Parse memory topology */
mem_topology_setup();
/*
* Release secondary cpus out of their spinloops at 0x60 now that
* we can map physical -> logical CPU ids.
*
* Freescale Book3e parts spin in a loop provided by firmware,
* so smp_release_cpus() does nothing for them.
*/
#ifdef CONFIG_SMP
smp_setup_pacas();
/* On BookE, setup per-core TLB data structures. */
setup_tlb_core_data();
#endif
/* Print various info about the machine that has been gathered so far. */
print_system_info();
klp_init_thread_info(&init_task);
setup_initial_init_mm(_stext, _etext, _edata, _end);
/* sched_init() does the mmgrab(&init_mm) for the primary CPU */
VM_WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(&init_mm)));
cpumask_set_cpu(smp_processor_id(), mm_cpumask(&init_mm));
inc_mm_active_cpus(&init_mm);
mm_iommu_init(&init_mm);
irqstack_early_init();
exc_lvl_early_init();
emergency_stack_init();
mce_init();
smp_release_cpus();
initmem_init();
/*
* Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must
* be called after initmem_init(), so that pageblock_order is initialised.
*/
kvm_cma_reserve();
gigantic_hugetlb_cma_reserve();
early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
if (ppc_md.setup_arch)
ppc_md.setup_arch();
setup_barrier_nospec();
setup_spectre_v2();
paging_init();
/* Initialize the MMU context management stuff. */
mmu_context_init();
/* Interrupt code needs to be 64K-aligned. */
if (IS_ENABLED(CONFIG_PPC64) && (unsigned long)_stext & 0xffff)
panic("Kernelbase not 64K-aligned (0x%lx)!\n",
(unsigned long)_stext);
}
| linux-master | arch/powerpc/kernel/setup-common.c |
/*
* Common signal handling code for both 32 and 64 bits
*
* Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation
* Extracted from signal_32.c and signal_64.c
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file README.legal in the main directory of
* this archive for more details.
*/
#include <linux/resume_user_mode.h>
#include <linux/signal.h>
#include <linux/uprobes.h>
#include <linux/key.h>
#include <linux/context_tracking.h>
#include <linux/livepatch.h>
#include <linux/syscalls.h>
#include <asm/hw_breakpoint.h>
#include <linux/uaccess.h>
#include <asm/switch_to.h>
#include <asm/unistd.h>
#include <asm/debug.h>
#include <asm/tm.h>
#include "signal.h"
#ifdef CONFIG_VSX
unsigned long copy_fpr_to_user(void __user *to,
struct task_struct *task)
{
u64 buf[ELF_NFPREG];
int i;
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
buf[i] = task->thread.TS_FPR(i);
buf[i] = task->thread.fp_state.fpscr;
return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
}
unsigned long copy_fpr_from_user(struct task_struct *task,
void __user *from)
{
u64 buf[ELF_NFPREG];
int i;
if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
return 1;
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
task->thread.TS_FPR(i) = buf[i];
task->thread.fp_state.fpscr = buf[i];
return 0;
}
unsigned long copy_vsx_to_user(void __user *to,
struct task_struct *task)
{
u64 buf[ELF_NVSRHALFREG];
int i;
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < ELF_NVSRHALFREG; i++)
buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
}
unsigned long copy_vsx_from_user(struct task_struct *task,
void __user *from)
{
u64 buf[ELF_NVSRHALFREG];
int i;
if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
return 1;
for (i = 0; i < ELF_NVSRHALFREG ; i++)
task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return 0;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
unsigned long copy_ckfpr_to_user(void __user *to,
struct task_struct *task)
{
u64 buf[ELF_NFPREG];
int i;
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
buf[i] = task->thread.TS_CKFPR(i);
buf[i] = task->thread.ckfp_state.fpscr;
return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
}
unsigned long copy_ckfpr_from_user(struct task_struct *task,
void __user *from)
{
u64 buf[ELF_NFPREG];
int i;
if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
return 1;
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
task->thread.TS_CKFPR(i) = buf[i];
task->thread.ckfp_state.fpscr = buf[i];
return 0;
}
unsigned long copy_ckvsx_to_user(void __user *to,
struct task_struct *task)
{
u64 buf[ELF_NVSRHALFREG];
int i;
/* save FPR copy to local buffer then write to the thread_struct */
for (i = 0; i < ELF_NVSRHALFREG; i++)
buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
}
unsigned long copy_ckvsx_from_user(struct task_struct *task,
void __user *from)
{
u64 buf[ELF_NVSRHALFREG];
int i;
if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
return 1;
for (i = 0; i < ELF_NVSRHALFREG ; i++)
task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return 0;
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#endif
/* Log an error when sending an unhandled signal to a process. Controlled
* through debug.exception-trace sysctl.
*/
int show_unhandled_signals = 1;
unsigned long get_min_sigframe_size(void)
{
if (IS_ENABLED(CONFIG_PPC64))
return get_min_sigframe_size_64();
else
return get_min_sigframe_size_32();
}
#ifdef CONFIG_COMPAT
unsigned long get_min_sigframe_size_compat(void)
{
return get_min_sigframe_size_32();
}
#endif
/*
* Allocate space for the signal frame
*/
static unsigned long get_tm_stackpointer(struct task_struct *tsk);
void __user *get_sigframe(struct ksignal *ksig, struct task_struct *tsk,
size_t frame_size, int is_32)
{
unsigned long oldsp, newsp;
unsigned long sp = get_tm_stackpointer(tsk);
/* Default to using normal stack */
if (is_32)
oldsp = sp & 0x0ffffffffUL;
else
oldsp = sp;
oldsp = sigsp(oldsp, ksig);
newsp = (oldsp - frame_size) & ~0xFUL;
return (void __user *)newsp;
}
static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
int has_handler)
{
unsigned long ret = regs->gpr[3];
int restart = 1;
/* syscall ? */
if (!trap_is_syscall(regs))
return;
if (trap_norestart(regs))
return;
/* error signalled ? */
if (trap_is_scv(regs)) {
/* 32-bit compat mode sign extend? */
if (!IS_ERR_VALUE(ret))
return;
ret = -ret;
} else if (!(regs->ccr & 0x10000000)) {
return;
}
switch (ret) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
/* ERESTARTNOHAND means that the syscall should only be
* restarted if there was no handler for the signal, and since
* we only get here if there is a handler, we dont restart.
*/
restart = !has_handler;
break;
case ERESTARTSYS:
/* ERESTARTSYS means to restart the syscall if there is no
* handler or the handler was registered with SA_RESTART
*/
restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0;
break;
case ERESTARTNOINTR:
/* ERESTARTNOINTR means that the syscall should be
* called again after the signal handler returns.
*/
break;
default:
return;
}
if (restart) {
if (ret == ERESTART_RESTARTBLOCK)
regs->gpr[0] = __NR_restart_syscall;
else
regs->gpr[3] = regs->orig_gpr3;
regs_add_return_ip(regs, -4);
regs->result = 0;
} else {
if (trap_is_scv(regs)) {
regs->result = -EINTR;
regs->gpr[3] = -EINTR;
} else {
regs->result = -EINTR;
regs->gpr[3] = EINTR;
regs->ccr |= 0x10000000;
}
}
}
static void do_signal(struct task_struct *tsk)
{
sigset_t *oldset = sigmask_to_save();
struct ksignal ksig = { .sig = 0 };
int ret;
BUG_ON(tsk != current);
get_signal(&ksig);
/* Is there any syscall restart business here ? */
check_syscall_restart(tsk->thread.regs, &ksig.ka, ksig.sig > 0);
if (ksig.sig <= 0) {
/* No signal to deliver -- put the saved sigmask back */
restore_saved_sigmask();
set_trap_norestart(tsk->thread.regs);
return; /* no signals delivered */
}
/*
* Reenable the DABR before delivering the signal to
* user space. The DABR will have been cleared if it
* triggered inside the kernel.
*/
if (!IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
int i;
for (i = 0; i < nr_wp_slots(); i++) {
if (tsk->thread.hw_brk[i].address && tsk->thread.hw_brk[i].type)
__set_breakpoint(i, &tsk->thread.hw_brk[i]);
}
}
/* Re-enable the breakpoints for the signal stack */
thread_change_pc(tsk, tsk->thread.regs);
rseq_signal_deliver(&ksig, tsk->thread.regs);
if (is_32bit_task()) {
if (ksig.ka.sa.sa_flags & SA_SIGINFO)
ret = handle_rt_signal32(&ksig, oldset, tsk);
else
ret = handle_signal32(&ksig, oldset, tsk);
} else {
ret = handle_rt_signal64(&ksig, oldset, tsk);
}
set_trap_norestart(tsk->thread.regs);
signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP));
}
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
if (thread_info_flags & _TIF_PATCH_PENDING)
klp_update_patch_state(current);
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
BUG_ON(regs != current->thread.regs);
do_signal(current);
}
if (thread_info_flags & _TIF_NOTIFY_RESUME)
resume_user_mode_work(regs);
}
static unsigned long get_tm_stackpointer(struct task_struct *tsk)
{
/* When in an active transaction that takes a signal, we need to be
* careful with the stack. It's possible that the stack has moved back
* up after the tbegin. The obvious case here is when the tbegin is
* called inside a function that returns before a tend. In this case,
* the stack is part of the checkpointed transactional memory state.
* If we write over this non transactionally or in suspend, we are in
* trouble because if we get a tm abort, the program counter and stack
* pointer will be back at the tbegin but our in memory stack won't be
* valid anymore.
*
* To avoid this, when taking a signal in an active transaction, we
* need to use the stack pointer from the checkpointed state, rather
* than the speculated state. This ensures that the signal context
* (written tm suspended) will be written below the stack required for
* the rollback. The transaction is aborted because of the treclaim,
* so any memory written between the tbegin and the signal will be
* rolled back anyway.
*
* For signals taken in non-TM or suspended mode, we use the
* normal/non-checkpointed stack pointer.
*/
struct pt_regs *regs = tsk->thread.regs;
unsigned long ret = regs->gpr[1];
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BUG_ON(tsk != current);
if (MSR_TM_ACTIVE(regs->msr)) {
preempt_disable();
tm_reclaim_current(TM_CAUSE_SIGNAL);
if (MSR_TM_TRANSACTIONAL(regs->msr))
ret = tsk->thread.ckpt_regs.gpr[1];
/*
* If we treclaim, we must clear the current thread's TM bits
* before re-enabling preemption. Otherwise we might be
* preempted and have the live MSR[TS] changed behind our back
* (tm_recheckpoint_new_task() would recheckpoint). Besides, we
* enter the signal handler in non-transactional state.
*/
regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
preempt_enable();
}
#endif
return ret;
}
static const char fm32[] = KERN_INFO "%s[%d]: bad frame in %s: %p nip %08lx lr %08lx\n";
static const char fm64[] = KERN_INFO "%s[%d]: bad frame in %s: %p nip %016lx lr %016lx\n";
void signal_fault(struct task_struct *tsk, struct pt_regs *regs,
const char *where, void __user *ptr)
{
if (show_unhandled_signals)
printk_ratelimited(regs->msr & MSR_64BIT ? fm64 : fm32, tsk->comm,
task_pid_nr(tsk), where, ptr, regs->nip, regs->link);
}
| linux-master | arch/powerpc/kernel/signal.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Derived from arch/i386/kernel/irq.c
* Copyright (C) 1992 Linus Torvalds
* Adapted from arch/i386 by Gary Thomas
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
* Updated and modified by Cort Dougan <[email protected]>
* Copyright (C) 1996-2001 Cort Dougan
* Adapted for Power Macintosh by Paul Mackerras
* Copyright (C) 1996 Paul Mackerras ([email protected])
*
* This file contains the code used by various IRQ handling routines:
* asking for different IRQ's should be done through these routines
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
*/
#undef DEBUG
#include <linux/export.h>
#include <linux/threads.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/bitops.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/vmalloc.h>
#include <linux/pgtable.h>
#include <linux/static_call.h>
#include <linux/uaccess.h>
#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/cache.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/smp.h>
#include <asm/hw_irq.h>
#include <asm/softirq_stack.h>
#include <asm/ppc_asm.h>
#include <asm/paca.h>
#include <asm/firmware.h>
#include <asm/lv1call.h>
#include <asm/dbell.h>
#include <asm/trace.h>
#include <asm/cpu_has_feature.h>
int distribute_irqs = 1;
static inline void next_interrupt(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
}
/*
* We are responding to the next interrupt, so interrupt-off
* latencies should be reset here.
*/
lockdep_hardirq_exit();
trace_hardirqs_on();
trace_hardirqs_off();
lockdep_hardirq_enter();
}
static inline bool irq_happened_test_and_clear(u8 irq)
{
if (local_paca->irq_happened & irq) {
local_paca->irq_happened &= ~irq;
return true;
}
return false;
}
static __no_kcsan void __replay_soft_interrupts(void)
{
struct pt_regs regs;
/*
* We use local_paca rather than get_paca() to avoid all the
* debug_smp_processor_id() business in this low level function.
*/
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON_ONCE(mfmsr() & MSR_EE);
WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
WARN_ON(local_paca->irq_happened & PACA_IRQ_REPLAYING);
}
/*
* PACA_IRQ_REPLAYING prevents interrupt handlers from enabling
* MSR[EE] to get PMIs, which can result in more IRQs becoming
* pending.
*/
local_paca->irq_happened |= PACA_IRQ_REPLAYING;
ppc_save_regs(®s);
regs.softe = IRQS_ENABLED;
regs.msr |= MSR_EE;
/*
* Force the delivery of pending soft-disabled interrupts on PS3.
* Any HV call will have this side effect.
*/
if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
u64 tmp, tmp2;
lv1_get_version_info(&tmp, &tmp2);
}
/*
* Check if an hypervisor Maintenance interrupt happened.
* This is a higher priority interrupt than the others, so
* replay it first.
*/
if (IS_ENABLED(CONFIG_PPC_BOOK3S) &&
irq_happened_test_and_clear(PACA_IRQ_HMI)) {
regs.trap = INTERRUPT_HMI;
handle_hmi_exception(®s);
next_interrupt(®s);
}
if (irq_happened_test_and_clear(PACA_IRQ_DEC)) {
regs.trap = INTERRUPT_DECREMENTER;
timer_interrupt(®s);
next_interrupt(®s);
}
if (irq_happened_test_and_clear(PACA_IRQ_EE)) {
regs.trap = INTERRUPT_EXTERNAL;
do_IRQ(®s);
next_interrupt(®s);
}
if (IS_ENABLED(CONFIG_PPC_DOORBELL) &&
irq_happened_test_and_clear(PACA_IRQ_DBELL)) {
regs.trap = INTERRUPT_DOORBELL;
doorbell_exception(®s);
next_interrupt(®s);
}
/* Book3E does not support soft-masking PMI interrupts */
if (IS_ENABLED(CONFIG_PPC_BOOK3S) &&
irq_happened_test_and_clear(PACA_IRQ_PMI)) {
regs.trap = INTERRUPT_PERFMON;
performance_monitor_exception(®s);
next_interrupt(®s);
}
local_paca->irq_happened &= ~PACA_IRQ_REPLAYING;
}
__no_kcsan void replay_soft_interrupts(void)
{
irq_enter(); /* See comment in arch_local_irq_restore */
__replay_soft_interrupts();
irq_exit();
}
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
static inline __no_kcsan void replay_soft_interrupts_irqrestore(void)
{
unsigned long kuap_state = get_kuap();
/*
* Check if anything calls local_irq_enable/restore() when KUAP is
* disabled (user access enabled). We handle that case here by saving
* and re-locking AMR but we shouldn't get here in the first place,
* hence the warning.
*/
kuap_assert_locked();
if (kuap_state != AMR_KUAP_BLOCKED)
set_kuap(AMR_KUAP_BLOCKED);
__replay_soft_interrupts();
if (kuap_state != AMR_KUAP_BLOCKED)
set_kuap(kuap_state);
}
#else
#define replay_soft_interrupts_irqrestore() __replay_soft_interrupts()
#endif
notrace __no_kcsan void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
/* Write the new soft-enabled value if it is a disable */
if (mask) {
irq_soft_mask_set(mask);
return;
}
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
WARN_ON_ONCE(in_nmi());
WARN_ON_ONCE(in_hardirq());
WARN_ON_ONCE(local_paca->irq_happened & PACA_IRQ_REPLAYING);
}
again:
/*
* After the stb, interrupts are unmasked and there are no interrupts
* pending replay. The restart sequence makes this atomic with
* respect to soft-masked interrupts. If this was just a simple code
* sequence, a soft-masked interrupt could become pending right after
* the comparison and before the stb.
*
* This allows interrupts to be unmasked without hard disabling, and
* also without new hard interrupts coming in ahead of pending ones.
*/
asm_volatile_goto(
"1: \n"
" lbz 9,%0(13) \n"
" cmpwi 9,0 \n"
" bne %l[happened] \n"
" stb 9,%1(13) \n"
"2: \n"
RESTART_TABLE(1b, 2b, 1b)
: : "i" (offsetof(struct paca_struct, irq_happened)),
"i" (offsetof(struct paca_struct, irq_soft_mask))
: "cr0", "r9"
: happened);
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(!(mfmsr() & MSR_EE));
/*
* If we came here from the replay below, we might have a preempt
* pending (due to preempt_enable_no_resched()). Have to check now.
*/
preempt_check_resched();
return;
happened:
irq_happened = READ_ONCE(local_paca->irq_happened);
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(!irq_happened);
if (irq_happened == PACA_IRQ_HARD_DIS) {
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(mfmsr() & MSR_EE);
irq_soft_mask_set(IRQS_ENABLED);
local_paca->irq_happened = 0;
__hard_irq_enable();
preempt_check_resched();
return;
}
/* Have interrupts to replay, need to hard disable first */
if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
if (!(mfmsr() & MSR_EE)) {
/*
* An interrupt could have come in and cleared
* MSR[EE] and set IRQ_HARD_DIS, so check
* IRQ_HARD_DIS again and warn if it is still
* clear.
*/
irq_happened = READ_ONCE(local_paca->irq_happened);
WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
}
}
__hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
} else {
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
if (WARN_ON_ONCE(mfmsr() & MSR_EE))
__hard_irq_disable();
}
}
/*
* Disable preempt here, so that the below preempt_enable will
* perform resched if required (a replayed interrupt may set
* need_resched).
*/
preempt_disable();
irq_soft_mask_set(IRQS_ALL_DISABLED);
trace_hardirqs_off();
/*
* Now enter interrupt context. The interrupt handlers themselves
* also call irq_enter/exit (which is okay, they can nest). But call
* it here now to hold off softirqs until the below irq_exit(). If
* we allowed replayed handlers to run softirqs, that enables irqs,
* which must replay interrupts, which recurses in here and makes
* things more complicated. The recursion is limited to 2, and it can
* be made to work, but it's complicated.
*
* local_bh_disable can not be used here because interrupts taken in
* idle are not in the right context (RCU, tick, etc) to run softirqs
* so irq_enter must be called.
*/
irq_enter();
replay_soft_interrupts_irqrestore();
irq_exit();
if (unlikely(local_paca->irq_happened != PACA_IRQ_HARD_DIS)) {
/*
* The softirq processing in irq_exit() may enable interrupts
* temporarily, which can result in MSR[EE] being enabled and
* more irqs becoming pending. Go around again if that happens.
*/
trace_hardirqs_on();
preempt_enable_no_resched();
goto again;
}
trace_hardirqs_on();
irq_soft_mask_set(IRQS_ENABLED);
local_paca->irq_happened = 0;
__hard_irq_enable();
preempt_enable();
}
EXPORT_SYMBOL(arch_local_irq_restore);
/*
* This is a helper to use when about to go into idle low-power
* when the latter has the side effect of re-enabling interrupts
* (such as calling H_CEDE under pHyp).
*
* You call this function with interrupts soft-disabled (this is
* already the case when ppc_md.power_save is called). The function
* will return whether to enter power save or just return.
*
* In the former case, it will have generally sanitized the lazy irq
* state, and in the latter case it will leave with interrupts hard
* disabled and marked as such, so the local_irq_enable() call
* in arch_cpu_idle() will properly re-enable everything.
*/
__cpuidle bool prep_irq_for_idle(void)
{
/*
* First we need to hard disable to ensure no interrupt
* occurs before we effectively enter the low power state
*/
__hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
/*
* If anything happened while we were soft-disabled,
* we return now and do not enter the low power state.
*/
if (lazy_irq_pending())
return false;
/*
* Mark interrupts as soft-enabled and clear the
* PACA_IRQ_HARD_DIS from the pending mask since we
* are about to hard enable as well as a side effect
* of entering the low power state.
*/
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
irq_soft_mask_set(IRQS_ENABLED);
/* Tell the caller to enter the low power state */
return true;
}
#ifdef CONFIG_PPC_BOOK3S
/*
* This is for idle sequences that return with IRQs off, but the
* idle state itself wakes on interrupt. Tell the irq tracer that
* IRQs are enabled for the duration of idle so it does not get long
* off times. Must be paired with fini_irq_for_idle_irqsoff.
*/
bool prep_irq_for_idle_irqsoff(void)
{
WARN_ON(!irqs_disabled());
/*
* First we need to hard disable to ensure no interrupt
* occurs before we effectively enter the low power state
*/
__hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
/*
* If anything happened while we were soft-disabled,
* we return now and do not enter the low power state.
*/
if (lazy_irq_pending())
return false;
/* Tell lockdep we are about to re-enable */
trace_hardirqs_on();
return true;
}
/*
* Take the SRR1 wakeup reason, index into this table to find the
* appropriate irq_happened bit.
*
* Sytem reset exceptions taken in idle state also come through here,
* but they are NMI interrupts so do not need to wait for IRQs to be
* restored, and should be taken as early as practical. These are marked
* with 0xff in the table. The Power ISA specifies 0100b as the system
* reset interrupt reason.
*/
#define IRQ_SYSTEM_RESET 0xff
static const u8 srr1_to_lazyirq[0x10] = {
0, 0, 0,
PACA_IRQ_DBELL,
IRQ_SYSTEM_RESET,
PACA_IRQ_DBELL,
PACA_IRQ_DEC,
0,
PACA_IRQ_EE,
PACA_IRQ_EE,
PACA_IRQ_HMI,
0, 0, 0, 0, 0 };
void replay_system_reset(void)
{
struct pt_regs regs;
ppc_save_regs(®s);
regs.trap = 0x100;
get_paca()->in_nmi = 1;
system_reset_exception(®s);
get_paca()->in_nmi = 0;
}
EXPORT_SYMBOL_GPL(replay_system_reset);
void irq_set_pending_from_srr1(unsigned long srr1)
{
unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
u8 reason = srr1_to_lazyirq[idx];
/*
* Take the system reset now, which is immediately after registers
* are restored from idle. It's an NMI, so interrupts need not be
* re-enabled before it is taken.
*/
if (unlikely(reason == IRQ_SYSTEM_RESET)) {
replay_system_reset();
return;
}
if (reason == PACA_IRQ_DBELL) {
/*
* When doorbell triggers a system reset wakeup, the message
* is not cleared, so if the doorbell interrupt is replayed
* and the IPI handled, the doorbell interrupt would still
* fire when EE is enabled.
*
* To avoid taking the superfluous doorbell interrupt,
* execute a msgclr here before the interrupt is replayed.
*/
ppc_msgclr(PPC_DBELL_MSGTYPE);
}
/*
* The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
* so this can be called unconditionally with the SRR1 wake
* reason as returned by the idle code, which uses 0 to mean no
* interrupt.
*
* If a future CPU was to designate this as an interrupt reason,
* then a new index for no interrupt must be assigned.
*/
local_paca->irq_happened |= reason;
}
#endif /* CONFIG_PPC_BOOK3S */
/*
* Force a replay of the external interrupt handler on this CPU.
*/
void force_external_irq_replay(void)
{
/*
* This must only be called with interrupts soft-disabled,
* the replay will happen when re-enabling.
*/
WARN_ON(!arch_irqs_disabled());
/*
* Interrupts must always be hard disabled before irq_happened is
* modified (to prevent lost update in case of interrupt between
* load and store).
*/
__hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
/* Indicate in the PACA that we have an interrupt to replay */
local_paca->irq_happened |= PACA_IRQ_EE;
}
static int __init setup_noirqdistrib(char *str)
{
distribute_irqs = 0;
return 1;
}
__setup("noirqdistrib", setup_noirqdistrib);
| linux-master | arch/powerpc/kernel/irq_64.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PowerPC backend to the KGDB stub.
*
* 1998 (c) Michael AK Tesch ([email protected])
* Copyright (C) 2003 Timesys Corporation.
* Copyright (C) 2004-2006 MontaVista Software, Inc.
* PPC64 Mods (C) 2005 Frank Rowand ([email protected])
* PPC32 support restored by Vitaly Wool <[email protected]> and
* Sergei Shtylyov <[email protected]>
* Copyright (C) 2007-2008 Wind River Systems, Inc.
*/
#include <linux/kernel.h>
#include <linux/kgdb.h>
#include <linux/smp.h>
#include <linux/signal.h>
#include <linux/ptrace.h>
#include <linux/kdebug.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/debug.h>
#include <asm/code-patching.h>
#include <linux/slab.h>
#include <asm/inst.h>
/*
* This table contains the mapping between PowerPC hardware trap types, and
* signals, which are primarily what GDB understands. GDB and the kernel
* don't always agree on values, so we use constants taken from gdb-6.2.
*/
static struct hard_trap_info
{
unsigned int tt; /* Trap type code for powerpc */
unsigned char signo; /* Signal that we map this trap into */
} hard_trap_info[] = {
{ 0x0100, 0x02 /* SIGINT */ }, /* system reset */
{ 0x0200, 0x0b /* SIGSEGV */ }, /* machine check */
{ 0x0300, 0x0b /* SIGSEGV */ }, /* data access */
{ 0x0400, 0x0b /* SIGSEGV */ }, /* instruction access */
{ 0x0500, 0x02 /* SIGINT */ }, /* external interrupt */
{ 0x0600, 0x0a /* SIGBUS */ }, /* alignment */
{ 0x0700, 0x05 /* SIGTRAP */ }, /* program check */
{ 0x0800, 0x08 /* SIGFPE */ }, /* fp unavailable */
{ 0x0900, 0x0e /* SIGALRM */ }, /* decrementer */
{ 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */
#ifdef CONFIG_BOOKE_OR_40x
{ 0x2002, 0x05 /* SIGTRAP */ }, /* debug */
#if defined(CONFIG_PPC_85xx)
{ 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */
{ 0x2020, 0x08 /* SIGFPE */ }, /* spe unavailable */
{ 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */
{ 0x2040, 0x08 /* SIGFPE */ }, /* spe fp data */
{ 0x2050, 0x08 /* SIGFPE */ }, /* spe fp round */
{ 0x2060, 0x0e /* SIGILL */ }, /* performance monitor */
{ 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */
{ 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */
{ 0x3200, 0x02 /* SIGINT */ }, /* watchdog */
#else /* ! CONFIG_PPC_85xx */
{ 0x1000, 0x0e /* SIGALRM */ }, /* prog interval timer */
{ 0x1010, 0x0e /* SIGALRM */ }, /* fixed interval timer */
{ 0x1020, 0x02 /* SIGINT */ }, /* watchdog */
{ 0x2010, 0x08 /* SIGFPE */ }, /* fp unavailable */
{ 0x2020, 0x08 /* SIGFPE */ }, /* ap unavailable */
#endif
#else /* !CONFIG_BOOKE_OR_40x */
{ 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */
#if defined(CONFIG_PPC_8xx)
{ 0x1000, 0x04 /* SIGILL */ }, /* software emulation */
#else /* ! CONFIG_PPC_8xx */
{ 0x0f00, 0x04 /* SIGILL */ }, /* performance monitor */
{ 0x0f20, 0x08 /* SIGFPE */ }, /* altivec unavailable */
{ 0x1300, 0x05 /* SIGTRAP */ }, /* instruction address break */
#if defined(CONFIG_PPC64)
{ 0x1200, 0x05 /* SIGILL */ }, /* system error */
{ 0x1500, 0x04 /* SIGILL */ }, /* soft patch */
{ 0x1600, 0x04 /* SIGILL */ }, /* maintenance */
{ 0x1700, 0x08 /* SIGFPE */ }, /* altivec assist */
{ 0x1800, 0x04 /* SIGILL */ }, /* thermal */
#else /* ! CONFIG_PPC64 */
{ 0x1400, 0x02 /* SIGINT */ }, /* SMI */
{ 0x1600, 0x08 /* SIGFPE */ }, /* altivec assist */
{ 0x1700, 0x04 /* SIGILL */ }, /* TAU */
{ 0x2000, 0x05 /* SIGTRAP */ }, /* run mode */
#endif
#endif
#endif
{ 0x0000, 0x00 } /* Must be last */
};
static int computeSignal(unsigned int tt)
{
struct hard_trap_info *ht;
for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
if (ht->tt == tt)
return ht->signo;
return SIGHUP; /* default for things we don't know about */
}
/**
*
* kgdb_skipexception - Bail out of KGDB when we've been triggered.
* @exception: Exception vector number
* @regs: Current &struct pt_regs.
*
* On some architectures we need to skip a breakpoint exception when
* it occurs after a breakpoint has been removed.
*
*/
int kgdb_skipexception(int exception, struct pt_regs *regs)
{
return kgdb_isremovedbreak(regs->nip);
}
static int kgdb_debugger_ipi(struct pt_regs *regs)
{
kgdb_nmicallback(raw_smp_processor_id(), regs);
return 0;
}
#ifdef CONFIG_SMP
void kgdb_roundup_cpus(void)
{
smp_send_debugger_break();
}
#endif
/* KGDB functions to use existing PowerPC64 hooks. */
static int kgdb_debugger(struct pt_regs *regs)
{
return !kgdb_handle_exception(1, computeSignal(TRAP(regs)),
DIE_OOPS, regs);
}
static int kgdb_handle_breakpoint(struct pt_regs *regs)
{
if (user_mode(regs))
return 0;
if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
return 0;
if (*(u32 *)regs->nip == BREAK_INSTR)
regs_add_return_ip(regs, BREAK_INSTR_SIZE);
return 1;
}
static int kgdb_singlestep(struct pt_regs *regs)
{
if (user_mode(regs))
return 0;
kgdb_handle_exception(0, SIGTRAP, 0, regs);
return 1;
}
static int kgdb_iabr_match(struct pt_regs *regs)
{
if (user_mode(regs))
return 0;
if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
return 0;
return 1;
}
static int kgdb_break_match(struct pt_regs *regs)
{
if (user_mode(regs))
return 0;
if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
return 0;
return 1;
}
#define PACK64(ptr, src) do { *(ptr++) = (src); } while (0)
#define PACK32(ptr, src) do { \
u32 *ptr32; \
ptr32 = (u32 *)ptr; \
*(ptr32++) = (src); \
ptr = (unsigned long *)ptr32; \
} while (0)
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
STACK_INT_FRAME_REGS);
unsigned long *ptr = gdb_regs;
int reg;
memset(gdb_regs, 0, NUMREGBYTES);
/* Regs GPR0-2 */
for (reg = 0; reg < 3; reg++)
PACK64(ptr, regs->gpr[reg]);
/* Regs GPR3-13 are caller saved, not in regs->gpr[] */
ptr += 11;
/* Regs GPR14-31 */
for (reg = 14; reg < 32; reg++)
PACK64(ptr, regs->gpr[reg]);
#ifdef CONFIG_PPC_85xx
#ifdef CONFIG_SPE
for (reg = 0; reg < 32; reg++)
PACK64(ptr, p->thread.evr[reg]);
#else
ptr += 32;
#endif
#else
/* fp registers not used by kernel, leave zero */
ptr += 32 * 8 / sizeof(long);
#endif
PACK64(ptr, regs->nip);
PACK64(ptr, regs->msr);
PACK32(ptr, regs->ccr);
PACK64(ptr, regs->link);
PACK64(ptr, regs->ctr);
PACK32(ptr, regs->xer);
BUG_ON((unsigned long)ptr >
(unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
}
#define GDB_SIZEOF_REG sizeof(unsigned long)
#define GDB_SIZEOF_REG_U32 sizeof(u32)
#ifdef CONFIG_PPC_85xx
#define GDB_SIZEOF_FLOAT_REG sizeof(unsigned long)
#else
#define GDB_SIZEOF_FLOAT_REG sizeof(u64)
#endif
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[0]) },
{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[1]) },
{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[2]) },
{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[3]) },
{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[4]) },
{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[5]) },
{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[6]) },
{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[7]) },
{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[8]) },
{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[9]) },
{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[10]) },
{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[11]) },
{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[12]) },
{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[13]) },
{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[14]) },
{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[15]) },
{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[16]) },
{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[17]) },
{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[18]) },
{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[19]) },
{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[20]) },
{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[21]) },
{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[22]) },
{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[23]) },
{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[24]) },
{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[25]) },
{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[26]) },
{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[27]) },
{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[28]) },
{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[29]) },
{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[30]) },
{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[31]) },
{ "f0", GDB_SIZEOF_FLOAT_REG, 0 },
{ "f1", GDB_SIZEOF_FLOAT_REG, 1 },
{ "f2", GDB_SIZEOF_FLOAT_REG, 2 },
{ "f3", GDB_SIZEOF_FLOAT_REG, 3 },
{ "f4", GDB_SIZEOF_FLOAT_REG, 4 },
{ "f5", GDB_SIZEOF_FLOAT_REG, 5 },
{ "f6", GDB_SIZEOF_FLOAT_REG, 6 },
{ "f7", GDB_SIZEOF_FLOAT_REG, 7 },
{ "f8", GDB_SIZEOF_FLOAT_REG, 8 },
{ "f9", GDB_SIZEOF_FLOAT_REG, 9 },
{ "f10", GDB_SIZEOF_FLOAT_REG, 10 },
{ "f11", GDB_SIZEOF_FLOAT_REG, 11 },
{ "f12", GDB_SIZEOF_FLOAT_REG, 12 },
{ "f13", GDB_SIZEOF_FLOAT_REG, 13 },
{ "f14", GDB_SIZEOF_FLOAT_REG, 14 },
{ "f15", GDB_SIZEOF_FLOAT_REG, 15 },
{ "f16", GDB_SIZEOF_FLOAT_REG, 16 },
{ "f17", GDB_SIZEOF_FLOAT_REG, 17 },
{ "f18", GDB_SIZEOF_FLOAT_REG, 18 },
{ "f19", GDB_SIZEOF_FLOAT_REG, 19 },
{ "f20", GDB_SIZEOF_FLOAT_REG, 20 },
{ "f21", GDB_SIZEOF_FLOAT_REG, 21 },
{ "f22", GDB_SIZEOF_FLOAT_REG, 22 },
{ "f23", GDB_SIZEOF_FLOAT_REG, 23 },
{ "f24", GDB_SIZEOF_FLOAT_REG, 24 },
{ "f25", GDB_SIZEOF_FLOAT_REG, 25 },
{ "f26", GDB_SIZEOF_FLOAT_REG, 26 },
{ "f27", GDB_SIZEOF_FLOAT_REG, 27 },
{ "f28", GDB_SIZEOF_FLOAT_REG, 28 },
{ "f29", GDB_SIZEOF_FLOAT_REG, 29 },
{ "f30", GDB_SIZEOF_FLOAT_REG, 30 },
{ "f31", GDB_SIZEOF_FLOAT_REG, 31 },
{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, nip) },
{ "msr", GDB_SIZEOF_REG, offsetof(struct pt_regs, msr) },
{ "cr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ccr) },
{ "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, link) },
{ "ctr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ctr) },
{ "xer", GDB_SIZEOF_REG, offsetof(struct pt_regs, xer) },
};
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
if (regno < 32 || regno >= 64)
/* First 0 -> 31 gpr registers*/
/* pc, msr, ls... registers 64 -> 69 */
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
dbg_reg_def[regno].size);
if (regno >= 32 && regno < 64) {
/* FP registers 32 -> 63 */
#if defined(CONFIG_PPC_85xx) && defined(CONFIG_SPE)
if (current)
memcpy(mem, ¤t->thread.evr[regno-32],
dbg_reg_def[regno].size);
#else
/* fp registers not used by kernel, leave zero */
memset(mem, 0, dbg_reg_def[regno].size);
#endif
}
return dbg_reg_def[regno].name;
}
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return -EINVAL;
if (regno < 32 || regno >= 64)
/* First 0 -> 31 gpr registers*/
/* pc, msr, ls... registers 64 -> 69 */
memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
dbg_reg_def[regno].size);
if (regno >= 32 && regno < 64) {
/* FP registers 32 -> 63 */
#if defined(CONFIG_PPC_85xx) && defined(CONFIG_SPE)
memcpy(¤t->thread.evr[regno-32], mem,
dbg_reg_def[regno].size);
#else
/* fp registers not used by kernel, leave zero */
return 0;
#endif
}
return 0;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
regs_set_return_ip(regs, pc);
}
/*
* This function does PowerPC specific processing for interfacing to gdb.
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *linux_regs)
{
char *ptr = &remcom_in_buffer[1];
unsigned long addr;
switch (remcom_in_buffer[0]) {
/*
* sAA..AA Step one instruction from AA..AA
* This will return an error to gdb ..
*/
case 's':
case 'c':
/* handle the optional parameter */
if (kgdb_hex2long(&ptr, &addr))
regs_set_return_ip(linux_regs, addr);
atomic_set(&kgdb_cpu_doing_single_step, -1);
/* set the trace bit if we're stepping */
if (remcom_in_buffer[0] == 's') {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
mtspr(SPRN_DBCR0,
mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
regs_set_return_msr(linux_regs, linux_regs->msr | MSR_DE);
#else
regs_set_return_msr(linux_regs, linux_regs->msr | MSR_SE);
#endif
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
return 0;
}
return -1;
}
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
u32 instr, *addr = (u32 *)bpt->bpt_addr;
int err;
err = get_kernel_nofault(instr, addr);
if (err)
return err;
err = patch_instruction(addr, ppc_inst(BREAK_INSTR));
if (err)
return -EFAULT;
*(u32 *)bpt->saved_instr = instr;
return 0;
}
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
unsigned int instr = *(unsigned int *)bpt->saved_instr;
u32 *addr = (u32 *)bpt->bpt_addr;
err = patch_instruction(addr, ppc_inst(instr));
if (err)
return -EFAULT;
return 0;
}
/*
* Global data
*/
const struct kgdb_arch arch_kgdb_ops;
static int kgdb_not_implemented(struct pt_regs *regs)
{
return 0;
}
static void *old__debugger_ipi;
static void *old__debugger;
static void *old__debugger_bpt;
static void *old__debugger_sstep;
static void *old__debugger_iabr_match;
static void *old__debugger_break_match;
static void *old__debugger_fault_handler;
int kgdb_arch_init(void)
{
old__debugger_ipi = __debugger_ipi;
old__debugger = __debugger;
old__debugger_bpt = __debugger_bpt;
old__debugger_sstep = __debugger_sstep;
old__debugger_iabr_match = __debugger_iabr_match;
old__debugger_break_match = __debugger_break_match;
old__debugger_fault_handler = __debugger_fault_handler;
__debugger_ipi = kgdb_debugger_ipi;
__debugger = kgdb_debugger;
__debugger_bpt = kgdb_handle_breakpoint;
__debugger_sstep = kgdb_singlestep;
__debugger_iabr_match = kgdb_iabr_match;
__debugger_break_match = kgdb_break_match;
__debugger_fault_handler = kgdb_not_implemented;
return 0;
}
void kgdb_arch_exit(void)
{
__debugger_ipi = old__debugger_ipi;
__debugger = old__debugger;
__debugger_bpt = old__debugger_bpt;
__debugger_sstep = old__debugger_sstep;
__debugger_iabr_match = old__debugger_iabr_match;
__debugger_break_match = old__debugger_break_match;
__debugger_fault_handler = old__debugger_fault_handler;
}
| linux-master | arch/powerpc/kernel/kgdb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2000 Tilmann Bitterberg
* ([email protected])
*
* RTAS (Runtime Abstraction Services) stuff
* Intention is to provide a clean user interface
* to use the RTAS.
*
* TODO:
* Split off a header file and maybe move it to a different
* location. Write Documentation on what the /proc/rtas/ entries
* actually do.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/ctype.h>
#include <linux/time.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/rtc.h>
#include <linux/of.h>
#include <linux/uaccess.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/rtas.h>
#include <asm/machdep.h> /* for ppc_md */
#include <asm/time.h>
/* Token for Sensors */
#define KEY_SWITCH 0x0001
#define ENCLOSURE_SWITCH 0x0002
#define THERMAL_SENSOR 0x0003
#define LID_STATUS 0x0004
#define POWER_SOURCE 0x0005
#define BATTERY_VOLTAGE 0x0006
#define BATTERY_REMAINING 0x0007
#define BATTERY_PERCENTAGE 0x0008
#define EPOW_SENSOR 0x0009
#define BATTERY_CYCLESTATE 0x000a
#define BATTERY_CHARGING 0x000b
/* IBM specific sensors */
#define IBM_SURVEILLANCE 0x2328 /* 9000 */
#define IBM_FANRPM 0x2329 /* 9001 */
#define IBM_VOLTAGE 0x232a /* 9002 */
#define IBM_DRCONNECTOR 0x232b /* 9003 */
#define IBM_POWERSUPPLY 0x232c /* 9004 */
/* Status return values */
#define SENSOR_CRITICAL_HIGH 13
#define SENSOR_WARNING_HIGH 12
#define SENSOR_NORMAL 11
#define SENSOR_WARNING_LOW 10
#define SENSOR_CRITICAL_LOW 9
#define SENSOR_SUCCESS 0
#define SENSOR_HW_ERROR -1
#define SENSOR_BUSY -2
#define SENSOR_NOT_EXIST -3
#define SENSOR_DR_ENTITY -9000
/* Location Codes */
#define LOC_SCSI_DEV_ADDR 'A'
#define LOC_SCSI_DEV_LOC 'B'
#define LOC_CPU 'C'
#define LOC_DISKETTE 'D'
#define LOC_ETHERNET 'E'
#define LOC_FAN 'F'
#define LOC_GRAPHICS 'G'
/* reserved / not used 'H' */
#define LOC_IO_ADAPTER 'I'
/* reserved / not used 'J' */
#define LOC_KEYBOARD 'K'
#define LOC_LCD 'L'
#define LOC_MEMORY 'M'
#define LOC_NV_MEMORY 'N'
#define LOC_MOUSE 'O'
#define LOC_PLANAR 'P'
#define LOC_OTHER_IO 'Q'
#define LOC_PARALLEL 'R'
#define LOC_SERIAL 'S'
#define LOC_DEAD_RING 'T'
#define LOC_RACKMOUNTED 'U' /* for _u_nit is rack mounted */
#define LOC_VOLTAGE 'V'
#define LOC_SWITCH_ADAPTER 'W'
#define LOC_OTHER 'X'
#define LOC_FIRMWARE 'Y'
#define LOC_SCSI 'Z'
/* Tokens for indicators */
#define TONE_FREQUENCY 0x0001 /* 0 - 1000 (HZ)*/
#define TONE_VOLUME 0x0002 /* 0 - 100 (%) */
#define SYSTEM_POWER_STATE 0x0003
#define WARNING_LIGHT 0x0004
#define DISK_ACTIVITY_LIGHT 0x0005
#define HEX_DISPLAY_UNIT 0x0006
#define BATTERY_WARNING_TIME 0x0007
#define CONDITION_CYCLE_REQUEST 0x0008
#define SURVEILLANCE_INDICATOR 0x2328 /* 9000 */
#define DR_ACTION 0x2329 /* 9001 */
#define DR_INDICATOR 0x232a /* 9002 */
/* 9003 - 9004: Vendor specific */
/* 9006 - 9999: Vendor specific */
/* other */
#define MAX_SENSORS 17 /* I only know of 17 sensors */
#define MAX_LINELENGTH 256
#define SENSOR_PREFIX "ibm,sensor-"
#define cel_to_fahr(x) ((x*9/5)+32)
struct individual_sensor {
unsigned int token;
unsigned int quant;
};
struct rtas_sensors {
struct individual_sensor sensor[MAX_SENSORS];
unsigned int quant;
};
/* Globals */
static struct rtas_sensors sensors;
static struct device_node *rtas_node = NULL;
static unsigned long power_on_time = 0; /* Save the time the user set */
static char progress_led[MAX_LINELENGTH];
static unsigned long rtas_tone_frequency = 1000;
static unsigned long rtas_tone_volume = 0;
/* ****************************************************************** */
/* Declarations */
static int ppc_rtas_sensors_show(struct seq_file *m, void *v);
static int ppc_rtas_clock_show(struct seq_file *m, void *v);
static ssize_t ppc_rtas_clock_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos);
static int ppc_rtas_progress_show(struct seq_file *m, void *v);
static ssize_t ppc_rtas_progress_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos);
static int ppc_rtas_poweron_show(struct seq_file *m, void *v);
static ssize_t ppc_rtas_poweron_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos);
static ssize_t ppc_rtas_tone_freq_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos);
static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v);
static ssize_t ppc_rtas_tone_volume_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos);
static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v);
static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v);
static int poweron_open(struct inode *inode, struct file *file)
{
return single_open(file, ppc_rtas_poweron_show, NULL);
}
static const struct proc_ops ppc_rtas_poweron_proc_ops = {
.proc_open = poweron_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_write = ppc_rtas_poweron_write,
.proc_release = single_release,
};
static int progress_open(struct inode *inode, struct file *file)
{
return single_open(file, ppc_rtas_progress_show, NULL);
}
static const struct proc_ops ppc_rtas_progress_proc_ops = {
.proc_open = progress_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_write = ppc_rtas_progress_write,
.proc_release = single_release,
};
static int clock_open(struct inode *inode, struct file *file)
{
return single_open(file, ppc_rtas_clock_show, NULL);
}
static const struct proc_ops ppc_rtas_clock_proc_ops = {
.proc_open = clock_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_write = ppc_rtas_clock_write,
.proc_release = single_release,
};
static int tone_freq_open(struct inode *inode, struct file *file)
{
return single_open(file, ppc_rtas_tone_freq_show, NULL);
}
static const struct proc_ops ppc_rtas_tone_freq_proc_ops = {
.proc_open = tone_freq_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_write = ppc_rtas_tone_freq_write,
.proc_release = single_release,
};
static int tone_volume_open(struct inode *inode, struct file *file)
{
return single_open(file, ppc_rtas_tone_volume_show, NULL);
}
static const struct proc_ops ppc_rtas_tone_volume_proc_ops = {
.proc_open = tone_volume_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_write = ppc_rtas_tone_volume_write,
.proc_release = single_release,
};
static int ppc_rtas_find_all_sensors(void);
static void ppc_rtas_process_sensor(struct seq_file *m,
struct individual_sensor *s, int state, int error, const char *loc);
static char *ppc_rtas_process_error(int error);
static void get_location_code(struct seq_file *m,
struct individual_sensor *s, const char *loc);
static void check_location_string(struct seq_file *m, const char *c);
static void check_location(struct seq_file *m, const char *c);
static int __init proc_rtas_init(void)
{
if (!machine_is(pseries))
return -ENODEV;
rtas_node = of_find_node_by_name(NULL, "rtas");
if (rtas_node == NULL)
return -ENODEV;
proc_create("powerpc/rtas/progress", 0644, NULL,
&ppc_rtas_progress_proc_ops);
proc_create("powerpc/rtas/clock", 0644, NULL,
&ppc_rtas_clock_proc_ops);
proc_create("powerpc/rtas/poweron", 0644, NULL,
&ppc_rtas_poweron_proc_ops);
proc_create_single("powerpc/rtas/sensors", 0444, NULL,
ppc_rtas_sensors_show);
proc_create("powerpc/rtas/frequency", 0644, NULL,
&ppc_rtas_tone_freq_proc_ops);
proc_create("powerpc/rtas/volume", 0644, NULL,
&ppc_rtas_tone_volume_proc_ops);
proc_create_single("powerpc/rtas/rmo_buffer", 0400, NULL,
ppc_rtas_rmo_buf_show);
return 0;
}
__initcall(proc_rtas_init);
static int parse_number(const char __user *p, size_t count, u64 *val)
{
char buf[40];
if (count > 39)
return -EINVAL;
if (copy_from_user(buf, p, count))
return -EFAULT;
buf[count] = 0;
return kstrtoull(buf, 10, val);
}
/* ****************************************************************** */
/* POWER-ON-TIME */
/* ****************************************************************** */
static ssize_t ppc_rtas_poweron_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
struct rtc_time tm;
time64_t nowtime;
int error = parse_number(buf, count, &nowtime);
if (error)
return error;
power_on_time = nowtime; /* save the time */
rtc_time64_to_tm(nowtime, &tm);
error = rtas_call(rtas_function_token(RTAS_FN_SET_TIME_FOR_POWER_ON), 7, 1, NULL,
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, 0 /* nano */);
if (error)
printk(KERN_WARNING "error: setting poweron time returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
static int ppc_rtas_poweron_show(struct seq_file *m, void *v)
{
if (power_on_time == 0)
seq_printf(m, "Power on time not set\n");
else
seq_printf(m, "%lu\n",power_on_time);
return 0;
}
/* ****************************************************************** */
/* PROGRESS */
/* ****************************************************************** */
static ssize_t ppc_rtas_progress_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
unsigned long hex;
if (count >= MAX_LINELENGTH)
count = MAX_LINELENGTH -1;
if (copy_from_user(progress_led, buf, count)) { /* save the string */
return -EFAULT;
}
progress_led[count] = 0;
/* Lets see if the user passed hexdigits */
hex = simple_strtoul(progress_led, NULL, 10);
rtas_progress ((char *)progress_led, hex);
return count;
/* clear the line */
/* rtas_progress(" ", 0xffff);*/
}
/* ****************************************************************** */
static int ppc_rtas_progress_show(struct seq_file *m, void *v)
{
if (progress_led[0])
seq_printf(m, "%s\n", progress_led);
return 0;
}
/* ****************************************************************** */
/* CLOCK */
/* ****************************************************************** */
static ssize_t ppc_rtas_clock_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
struct rtc_time tm;
time64_t nowtime;
int error = parse_number(buf, count, &nowtime);
if (error)
return error;
rtc_time64_to_tm(nowtime, &tm);
error = rtas_call(rtas_function_token(RTAS_FN_SET_TIME_OF_DAY), 7, 1, NULL,
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, 0);
if (error)
printk(KERN_WARNING "error: setting the clock returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
static int ppc_rtas_clock_show(struct seq_file *m, void *v)
{
int ret[8];
int error = rtas_call(rtas_function_token(RTAS_FN_GET_TIME_OF_DAY), 0, 8, ret);
if (error) {
printk(KERN_WARNING "error: reading the clock returned: %s\n",
ppc_rtas_process_error(error));
seq_printf(m, "0");
} else {
unsigned int year, mon, day, hour, min, sec;
year = ret[0]; mon = ret[1]; day = ret[2];
hour = ret[3]; min = ret[4]; sec = ret[5];
seq_printf(m, "%lld\n",
mktime64(year, mon, day, hour, min, sec));
}
return 0;
}
/* ****************************************************************** */
/* SENSOR STUFF */
/* ****************************************************************** */
static int ppc_rtas_sensors_show(struct seq_file *m, void *v)
{
int i,j;
int state, error;
int get_sensor_state = rtas_function_token(RTAS_FN_GET_SENSOR_STATE);
seq_printf(m, "RTAS (RunTime Abstraction Services) Sensor Information\n");
seq_printf(m, "Sensor\t\tValue\t\tCondition\tLocation\n");
seq_printf(m, "********************************************************\n");
if (ppc_rtas_find_all_sensors() != 0) {
seq_printf(m, "\nNo sensors are available\n");
return 0;
}
for (i=0; i<sensors.quant; i++) {
struct individual_sensor *p = &sensors.sensor[i];
char rstr[64];
const char *loc;
int llen, offs;
sprintf (rstr, SENSOR_PREFIX"%04d", p->token);
loc = of_get_property(rtas_node, rstr, &llen);
/* A sensor may have multiple instances */
for (j = 0, offs = 0; j <= p->quant; j++) {
error = rtas_call(get_sensor_state, 2, 2, &state,
p->token, j);
ppc_rtas_process_sensor(m, p, state, error, loc);
seq_putc(m, '\n');
if (loc) {
offs += strlen(loc) + 1;
loc += strlen(loc) + 1;
if (offs >= llen)
loc = NULL;
}
}
}
return 0;
}
/* ****************************************************************** */
static int ppc_rtas_find_all_sensors(void)
{
const unsigned int *utmp;
int len, i;
utmp = of_get_property(rtas_node, "rtas-sensors", &len);
if (utmp == NULL) {
printk (KERN_ERR "error: could not get rtas-sensors\n");
return 1;
}
sensors.quant = len / 8; /* int + int */
for (i=0; i<sensors.quant; i++) {
sensors.sensor[i].token = *utmp++;
sensors.sensor[i].quant = *utmp++;
}
return 0;
}
/* ****************************************************************** */
/*
* Builds a string of what rtas returned
*/
static char *ppc_rtas_process_error(int error)
{
switch (error) {
case SENSOR_CRITICAL_HIGH:
return "(critical high)";
case SENSOR_WARNING_HIGH:
return "(warning high)";
case SENSOR_NORMAL:
return "(normal)";
case SENSOR_WARNING_LOW:
return "(warning low)";
case SENSOR_CRITICAL_LOW:
return "(critical low)";
case SENSOR_SUCCESS:
return "(read ok)";
case SENSOR_HW_ERROR:
return "(hardware error)";
case SENSOR_BUSY:
return "(busy)";
case SENSOR_NOT_EXIST:
return "(non existent)";
case SENSOR_DR_ENTITY:
return "(dr entity removed)";
default:
return "(UNKNOWN)";
}
}
/* ****************************************************************** */
/*
* Builds a string out of what the sensor said
*/
static void ppc_rtas_process_sensor(struct seq_file *m,
struct individual_sensor *s, int state, int error, const char *loc)
{
/* Defined return vales */
const char * key_switch[] = { "Off\t", "Normal\t", "Secure\t",
"Maintenance" };
const char * enclosure_switch[] = { "Closed", "Open" };
const char * lid_status[] = { " ", "Open", "Closed" };
const char * power_source[] = { "AC\t", "Battery",
"AC & Battery" };
const char * battery_remaining[] = { "Very Low", "Low", "Mid", "High" };
const char * epow_sensor[] = {
"EPOW Reset", "Cooling warning", "Power warning",
"System shutdown", "System halt", "EPOW main enclosure",
"EPOW power off" };
const char * battery_cyclestate[] = { "None", "In progress",
"Requested" };
const char * battery_charging[] = { "Charging", "Discharging",
"No current flow" };
const char * ibm_drconnector[] = { "Empty", "Present", "Unusable",
"Exchange" };
int have_strings = 0;
int num_states = 0;
int temperature = 0;
int unknown = 0;
/* What kind of sensor do we have here? */
switch (s->token) {
case KEY_SWITCH:
seq_printf(m, "Key switch:\t");
num_states = sizeof(key_switch) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t", key_switch[state]);
have_strings = 1;
}
break;
case ENCLOSURE_SWITCH:
seq_printf(m, "Enclosure switch:\t");
num_states = sizeof(enclosure_switch) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t",
enclosure_switch[state]);
have_strings = 1;
}
break;
case THERMAL_SENSOR:
seq_printf(m, "Temp. (C/F):\t");
temperature = 1;
break;
case LID_STATUS:
seq_printf(m, "Lid status:\t");
num_states = sizeof(lid_status) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t", lid_status[state]);
have_strings = 1;
}
break;
case POWER_SOURCE:
seq_printf(m, "Power source:\t");
num_states = sizeof(power_source) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t",
power_source[state]);
have_strings = 1;
}
break;
case BATTERY_VOLTAGE:
seq_printf(m, "Battery voltage:\t");
break;
case BATTERY_REMAINING:
seq_printf(m, "Battery remaining:\t");
num_states = sizeof(battery_remaining) / sizeof(char *);
if (state < num_states)
{
seq_printf(m, "%s\t",
battery_remaining[state]);
have_strings = 1;
}
break;
case BATTERY_PERCENTAGE:
seq_printf(m, "Battery percentage:\t");
break;
case EPOW_SENSOR:
seq_printf(m, "EPOW Sensor:\t");
num_states = sizeof(epow_sensor) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t", epow_sensor[state]);
have_strings = 1;
}
break;
case BATTERY_CYCLESTATE:
seq_printf(m, "Battery cyclestate:\t");
num_states = sizeof(battery_cyclestate) /
sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t",
battery_cyclestate[state]);
have_strings = 1;
}
break;
case BATTERY_CHARGING:
seq_printf(m, "Battery Charging:\t");
num_states = sizeof(battery_charging) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t",
battery_charging[state]);
have_strings = 1;
}
break;
case IBM_SURVEILLANCE:
seq_printf(m, "Surveillance:\t");
break;
case IBM_FANRPM:
seq_printf(m, "Fan (rpm):\t");
break;
case IBM_VOLTAGE:
seq_printf(m, "Voltage (mv):\t");
break;
case IBM_DRCONNECTOR:
seq_printf(m, "DR connector:\t");
num_states = sizeof(ibm_drconnector) / sizeof(char *);
if (state < num_states) {
seq_printf(m, "%s\t",
ibm_drconnector[state]);
have_strings = 1;
}
break;
case IBM_POWERSUPPLY:
seq_printf(m, "Powersupply:\t");
break;
default:
seq_printf(m, "Unknown sensor (type %d), ignoring it\n",
s->token);
unknown = 1;
have_strings = 1;
break;
}
if (have_strings == 0) {
if (temperature) {
seq_printf(m, "%4d /%4d\t", state, cel_to_fahr(state));
} else
seq_printf(m, "%10d\t", state);
}
if (unknown == 0) {
seq_printf(m, "%s\t", ppc_rtas_process_error(error));
get_location_code(m, s, loc);
}
}
/* ****************************************************************** */
static void check_location(struct seq_file *m, const char *c)
{
switch (c[0]) {
case LOC_PLANAR:
seq_printf(m, "Planar #%c", c[1]);
break;
case LOC_CPU:
seq_printf(m, "CPU #%c", c[1]);
break;
case LOC_FAN:
seq_printf(m, "Fan #%c", c[1]);
break;
case LOC_RACKMOUNTED:
seq_printf(m, "Rack #%c", c[1]);
break;
case LOC_VOLTAGE:
seq_printf(m, "Voltage #%c", c[1]);
break;
case LOC_LCD:
seq_printf(m, "LCD #%c", c[1]);
break;
case '.':
seq_printf(m, "- %c", c[1]);
break;
default:
seq_printf(m, "Unknown location");
break;
}
}
/* ****************************************************************** */
/*
* Format:
* ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ]
* the '.' may be an abbreviation
*/
static void check_location_string(struct seq_file *m, const char *c)
{
while (*c) {
if (isalpha(*c) || *c == '.')
check_location(m, c);
else if (*c == '/' || *c == '-')
seq_printf(m, " at ");
c++;
}
}
/* ****************************************************************** */
static void get_location_code(struct seq_file *m, struct individual_sensor *s,
const char *loc)
{
if (!loc || !*loc) {
seq_printf(m, "---");/* does not have a location */
} else {
check_location_string(m, loc);
}
seq_putc(m, ' ');
}
/* ****************************************************************** */
/* INDICATORS - Tone Frequency */
/* ****************************************************************** */
static ssize_t ppc_rtas_tone_freq_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
u64 freq;
int error = parse_number(buf, count, &freq);
if (error)
return error;
rtas_tone_frequency = freq; /* save it for later */
error = rtas_call(rtas_function_token(RTAS_FN_SET_INDICATOR), 3, 1, NULL,
TONE_FREQUENCY, 0, freq);
if (error)
printk(KERN_WARNING "error: setting tone frequency returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
static int ppc_rtas_tone_freq_show(struct seq_file *m, void *v)
{
seq_printf(m, "%lu\n", rtas_tone_frequency);
return 0;
}
/* ****************************************************************** */
/* INDICATORS - Tone Volume */
/* ****************************************************************** */
static ssize_t ppc_rtas_tone_volume_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
u64 volume;
int error = parse_number(buf, count, &volume);
if (error)
return error;
if (volume > 100)
volume = 100;
rtas_tone_volume = volume; /* save it for later */
error = rtas_call(rtas_function_token(RTAS_FN_SET_INDICATOR), 3, 1, NULL,
TONE_VOLUME, 0, volume);
if (error)
printk(KERN_WARNING "error: setting tone volume returned: %s\n",
ppc_rtas_process_error(error));
return count;
}
/* ****************************************************************** */
static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v)
{
seq_printf(m, "%lu\n", rtas_tone_volume);
return 0;
}
/**
* ppc_rtas_rmo_buf_show() - Describe RTAS-addressable region for user space.
*
* Base + size description of a range of RTAS-addressable memory set
* aside for user space to use as work area(s) for certain RTAS
* functions. User space accesses this region via /dev/mem. Apart from
* security policies, the kernel does not arbitrate or serialize
* access to this region, and user space must ensure that concurrent
* users do not interfere with each other.
*/
static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v)
{
seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_USER_REGION_SIZE);
return 0;
}
| linux-master | arch/powerpc/kernel/rtas-proc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Code for Kernel probes Jump optimization.
*
* Copyright 2017, Anju T, IBM Corp.
*/
#include <linux/kprobes.h>
#include <linux/jump_label.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <asm/kprobes.h>
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
#include <asm/sstep.h>
#include <asm/ppc-opcode.h>
#include <asm/inst.h>
#define TMPL_CALL_HDLR_IDX (optprobe_template_call_handler - optprobe_template_entry)
#define TMPL_EMULATE_IDX (optprobe_template_call_emulate - optprobe_template_entry)
#define TMPL_RET_IDX (optprobe_template_ret - optprobe_template_entry)
#define TMPL_OP_IDX (optprobe_template_op_address - optprobe_template_entry)
#define TMPL_INSN_IDX (optprobe_template_insn - optprobe_template_entry)
#define TMPL_END_IDX (optprobe_template_end - optprobe_template_entry)
static bool insn_page_in_use;
void *alloc_optinsn_page(void)
{
if (insn_page_in_use)
return NULL;
insn_page_in_use = true;
return &optinsn_slot;
}
void free_optinsn_page(void *page)
{
insn_page_in_use = false;
}
/*
* Check if we can optimize this probe. Returns NIP post-emulation if this can
* be optimized and 0 otherwise.
*/
static unsigned long can_optimize(struct kprobe *p)
{
struct pt_regs regs;
struct instruction_op op;
unsigned long nip = 0;
unsigned long addr = (unsigned long)p->addr;
/*
* kprobe placed for kretprobe during boot time
* has a 'nop' instruction, which can be emulated.
* So further checks can be skipped.
*/
if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
return addr + sizeof(kprobe_opcode_t);
/*
* We only support optimizing kernel addresses, but not
* module addresses.
*
* FIXME: Optimize kprobes placed in module addresses.
*/
if (!is_kernel_addr(addr))
return 0;
memset(®s, 0, sizeof(struct pt_regs));
regs.nip = addr;
regs.trap = 0x0;
regs.msr = MSR_KERNEL;
/*
* Kprobe placed in conditional branch instructions are
* not optimized, as we can't predict the nip prior with
* dummy pt_regs and can not ensure that the return branch
* from detour buffer falls in the range of address (i.e 32MB).
* A branch back from trampoline is set up in the detour buffer
* to the nip returned by the analyse_instr() here.
*
* Ensure that the instruction is not a conditional branch,
* and that can be emulated.
*/
if (!is_conditional_branch(ppc_inst_read(p->ainsn.insn)) &&
analyse_instr(&op, ®s, ppc_inst_read(p->ainsn.insn)) == 1) {
emulate_update_regs(®s, &op);
nip = regs.nip;
}
return nip;
}
static void optimized_callback(struct optimized_kprobe *op,
struct pt_regs *regs)
{
/* This is possible if op is under delayed unoptimizing */
if (kprobe_disabled(&op->kp))
return;
preempt_disable();
if (kprobe_running()) {
kprobes_inc_nmissed_count(&op->kp);
} else {
__this_cpu_write(current_kprobe, &op->kp);
regs_set_return_ip(regs, (unsigned long)op->kp.addr);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
opt_pre_handler(&op->kp, regs);
__this_cpu_write(current_kprobe, NULL);
}
preempt_enable();
}
NOKPROBE_SYMBOL(optimized_callback);
void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
{
if (op->optinsn.insn) {
free_optinsn_slot(op->optinsn.insn, 1);
op->optinsn.insn = NULL;
}
}
static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
{
patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HI(val))));
patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
}
/*
* Generate instructions to load provided immediate 64-bit value
* to register 'reg' and patch these instructions at 'addr'.
*/
static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr)
{
patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HIGHEST(val))));
patch_instruction(addr++, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_HIGHER(val))));
patch_instruction(addr++, ppc_inst(PPC_RAW_SLDI(reg, reg, 32)));
patch_instruction(addr++, ppc_inst(PPC_RAW_ORIS(reg, reg, PPC_HI(val))));
patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
}
static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
{
if (IS_ENABLED(CONFIG_PPC64))
patch_imm64_load_insns(val, reg, addr);
else
patch_imm32_load_insns(val, reg, addr);
}
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
{
ppc_inst_t branch_op_callback, branch_emulate_step, temp;
unsigned long op_callback_addr, emulate_step_addr;
kprobe_opcode_t *buff;
long b_offset;
unsigned long nip, size;
int rc, i;
nip = can_optimize(p);
if (!nip)
return -EILSEQ;
/* Allocate instruction slot for detour buffer */
buff = get_optinsn_slot();
if (!buff)
return -ENOMEM;
/*
* OPTPROBE uses 'b' instruction to branch to optinsn.insn.
*
* The target address has to be relatively nearby, to permit use
* of branch instruction in powerpc, because the address is specified
* in an immediate field in the instruction opcode itself, ie 24 bits
* in the opcode specify the address. Therefore the address should
* be within 32MB on either side of the current instruction.
*/
b_offset = (unsigned long)buff - (unsigned long)p->addr;
if (!is_offset_in_branch_range(b_offset))
goto error;
/* Check if the return address is also within 32MB range */
b_offset = (unsigned long)(buff + TMPL_RET_IDX) - nip;
if (!is_offset_in_branch_range(b_offset))
goto error;
/* Setup template */
/* We can optimize this via patch_instruction_window later */
size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
pr_devel("Copying template to %p, size %lu\n", buff, size);
for (i = 0; i < size; i++) {
rc = patch_instruction(buff + i, ppc_inst(*(optprobe_template_entry + i)));
if (rc < 0)
goto error;
}
/*
* Fixup the template with instructions to:
* 1. load the address of the actual probepoint
*/
patch_imm_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
/*
* 2. branch to optimized_callback() and emulate_step()
*/
op_callback_addr = ppc_kallsyms_lookup_name("optimized_callback");
emulate_step_addr = ppc_kallsyms_lookup_name("emulate_step");
if (!op_callback_addr || !emulate_step_addr) {
WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
goto error;
}
rc = create_branch(&branch_op_callback, buff + TMPL_CALL_HDLR_IDX,
op_callback_addr, BRANCH_SET_LINK);
rc |= create_branch(&branch_emulate_step, buff + TMPL_EMULATE_IDX,
emulate_step_addr, BRANCH_SET_LINK);
if (rc)
goto error;
patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
/*
* 3. load instruction to be emulated into relevant register, and
*/
temp = ppc_inst_read(p->ainsn.insn);
patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
/*
* 4. branch back from trampoline
*/
patch_branch(buff + TMPL_RET_IDX, nip, 0);
flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX]));
op->optinsn.insn = buff;
return 0;
error:
free_optinsn_slot(buff, 0);
return -ERANGE;
}
int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
{
return optinsn->insn != NULL;
}
/*
* On powerpc, Optprobes always replaces one instruction (4 bytes
* aligned and 4 bytes long). It is impossible to encounter another
* kprobe in this address range. So always return 0.
*/
int arch_check_optimized_kprobe(struct optimized_kprobe *op)
{
return 0;
}
void arch_optimize_kprobes(struct list_head *oplist)
{
ppc_inst_t instr;
struct optimized_kprobe *op;
struct optimized_kprobe *tmp;
list_for_each_entry_safe(op, tmp, oplist, list) {
/*
* Backup instructions which will be replaced
* by jump address
*/
memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE);
create_branch(&instr, op->kp.addr, (unsigned long)op->optinsn.insn, 0);
patch_instruction(op->kp.addr, instr);
list_del_init(&op->list);
}
}
void arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
arch_arm_kprobe(&op->kp);
}
void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list)
{
struct optimized_kprobe *op;
struct optimized_kprobe *tmp;
list_for_each_entry_safe(op, tmp, oplist, list) {
arch_unoptimize_kprobe(op);
list_move(&op->list, done_list);
}
}
int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr)
{
return (op->kp.addr <= addr &&
op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
}
| linux-master | arch/powerpc/kernel/optprobes.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/delay.h>
#include <linux/ratelimit.h>
#include <asm/rtas.h>
#include <asm/time.h>
#define MAX_RTC_WAIT 5000 /* 5 sec */
time64_t __init rtas_get_boot_time(void)
{
int ret[8];
int error;
unsigned int wait_time;
u64 max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_function_token(RTAS_FN_GET_TIME_OF_DAY), 0, 8, ret);
wait_time = rtas_busy_delay_time(error);
if (wait_time) {
/* This is boot time so we spin. */
udelay(wait_time*1000);
}
} while (wait_time && (get_tb() < max_wait_tb));
if (error != 0) {
printk_ratelimited(KERN_WARNING
"error: reading the clock failed (%d)\n",
error);
return 0;
}
return mktime64(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]);
}
/* NOTE: get_rtc_time will get an error if executed in interrupt context
* and if a delay is needed to read the clock. In this case we just
* silently return without updating rtc_tm.
*/
void rtas_get_rtc_time(struct rtc_time *rtc_tm)
{
int ret[8];
int error;
unsigned int wait_time;
u64 max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_function_token(RTAS_FN_GET_TIME_OF_DAY), 0, 8, ret);
wait_time = rtas_busy_delay_time(error);
if (wait_time) {
if (in_interrupt()) {
memset(rtc_tm, 0, sizeof(struct rtc_time));
printk_ratelimited(KERN_WARNING
"error: reading clock "
"would delay interrupt\n");
return; /* delay not allowed */
}
msleep(wait_time);
}
} while (wait_time && (get_tb() < max_wait_tb));
if (error != 0) {
printk_ratelimited(KERN_WARNING
"error: reading the clock failed (%d)\n",
error);
return;
}
rtc_tm->tm_sec = ret[5];
rtc_tm->tm_min = ret[4];
rtc_tm->tm_hour = ret[3];
rtc_tm->tm_mday = ret[2];
rtc_tm->tm_mon = ret[1] - 1;
rtc_tm->tm_year = ret[0] - 1900;
}
int rtas_set_rtc_time(struct rtc_time *tm)
{
int error, wait_time;
u64 max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_function_token(RTAS_FN_SET_TIME_OF_DAY), 7, 1, NULL,
tm->tm_year + 1900, tm->tm_mon + 1,
tm->tm_mday, tm->tm_hour, tm->tm_min,
tm->tm_sec, 0);
wait_time = rtas_busy_delay_time(error);
if (wait_time) {
if (in_interrupt())
return 1; /* probably decrementer */
msleep(wait_time);
}
} while (wait_time && (get_tb() < max_wait_tb));
if (error != 0)
printk_ratelimited(KERN_WARNING
"error: setting the clock failed (%d)\n",
error);
return 0;
}
| linux-master | arch/powerpc/kernel/rtas-rtc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
* <[email protected]>
* and Arnd Bergmann, IBM Corp.
*/
#undef DEBUG
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/atomic.h>
#include <asm/errno.h>
#include <asm/topology.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/eeh.h>
#ifdef CONFIG_PPC_OF_PLATFORM_PCI
/* The probing of PCI controllers from of_platform is currently
* 64 bits only, mostly due to gratuitous differences between
* the 32 and 64 bits PCI code on PowerPC and the 32 bits one
* lacking some bits needed here.
*/
static int of_pci_phb_probe(struct platform_device *dev)
{
struct pci_controller *phb;
/* Check if we can do that ... */
if (ppc_md.pci_setup_phb == NULL)
return -ENODEV;
pr_info("Setting up PCI bus %pOF\n", dev->dev.of_node);
/* Alloc and setup PHB data structure */
phb = pcibios_alloc_controller(dev->dev.of_node);
if (!phb)
return -ENODEV;
/* Setup parent in sysfs */
phb->parent = &dev->dev;
/* Setup the PHB using arch provided callback */
if (ppc_md.pci_setup_phb(phb)) {
pcibios_free_controller(phb);
return -ENODEV;
}
/* Process "ranges" property */
pci_process_bridge_OF_ranges(phb, dev->dev.of_node, 0);
/* Init pci_dn data structures */
pci_devs_phb_init_dynamic(phb);
/* Create EEH PE for the PHB */
eeh_phb_pe_create(phb);
/* Scan the bus */
pcibios_scan_phb(phb);
if (phb->bus == NULL)
return -ENXIO;
/* Claim resources. This might need some rework as well depending
* whether we are doing probe-only or not, like assigning unassigned
* resources etc...
*/
pcibios_claim_one_bus(phb->bus);
/* Add probed PCI devices to the device model */
pci_bus_add_devices(phb->bus);
return 0;
}
static const struct of_device_id of_pci_phb_ids[] = {
{ .type = "pci", },
{ .type = "pcix", },
{ .type = "pcie", },
{ .type = "pciex", },
{ .type = "ht", },
{}
};
static struct platform_driver of_pci_phb_driver = {
.probe = of_pci_phb_probe,
.driver = {
.name = "of-pci",
.of_match_table = of_pci_phb_ids,
},
};
builtin_platform_driver(of_pci_phb_driver);
#endif /* CONFIG_PPC_OF_PLATFORM_PCI */
| linux-master | arch/powerpc/kernel/of_platform.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PCI address cache; allows the lookup of PCI devices based on I/O address
*
* Copyright IBM Corporation 2004
* Copyright Linas Vepstas <[email protected]> 2004
*/
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/debugfs.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
/**
* DOC: Overview
*
* The pci address cache subsystem. This subsystem places
* PCI device address resources into a red-black tree, sorted
* according to the address range, so that given only an i/o
* address, the corresponding PCI device can be **quickly**
* found. It is safe to perform an address lookup in an interrupt
* context; this ability is an important feature.
*
* Currently, the only customer of this code is the EEH subsystem;
* thus, this code has been somewhat tailored to suit EEH better.
* In particular, the cache does *not* hold the addresses of devices
* for which EEH is not enabled.
*
* (Implementation Note: The RB tree seems to be better/faster
* than any hash algo I could think of for this problem, even
* with the penalty of slow pointer chases for d-cache misses).
*/
struct pci_io_addr_range {
struct rb_node rb_node;
resource_size_t addr_lo;
resource_size_t addr_hi;
struct eeh_dev *edev;
struct pci_dev *pcidev;
unsigned long flags;
};
static struct pci_io_addr_cache {
struct rb_root rb_root;
spinlock_t piar_lock;
} pci_io_addr_cache_root;
static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
{
struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
while (n) {
struct pci_io_addr_range *piar;
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
if (addr < piar->addr_lo)
n = n->rb_left;
else if (addr > piar->addr_hi)
n = n->rb_right;
else
return piar->edev;
}
return NULL;
}
/**
* eeh_addr_cache_get_dev - Get device, given only address
* @addr: mmio (PIO) phys address or i/o port number
*
* Given an mmio phys address, or a port number, find a pci device
* that implements this address. I/O port numbers are assumed to be offset
* from zero (that is, they do *not* have pci_io_addr added in).
* It is safe to call this function within an interrupt.
*/
struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr)
{
struct eeh_dev *edev;
unsigned long flags;
spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
edev = __eeh_addr_cache_get_device(addr);
spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
return edev;
}
#ifdef DEBUG
/*
* Handy-dandy debug print routine, does nothing more
* than print out the contents of our addr cache.
*/
static void eeh_addr_cache_print(struct pci_io_addr_cache *cache)
{
struct rb_node *n;
int cnt = 0;
n = rb_first(&cache->rb_root);
while (n) {
struct pci_io_addr_range *piar;
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
pr_info("PCI: %s addr range %d [%pap-%pap]: %s\n",
(piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
&piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
cnt++;
n = rb_next(n);
}
}
#endif
/* Insert address range into the rb tree. */
static struct pci_io_addr_range *
eeh_addr_cache_insert(struct pci_dev *dev, resource_size_t alo,
resource_size_t ahi, unsigned long flags)
{
struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node;
struct rb_node *parent = NULL;
struct pci_io_addr_range *piar;
/* Walk tree, find a place to insert into tree */
while (*p) {
parent = *p;
piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
if (ahi < piar->addr_lo) {
p = &parent->rb_left;
} else if (alo > piar->addr_hi) {
p = &parent->rb_right;
} else {
if (dev != piar->pcidev ||
alo != piar->addr_lo || ahi != piar->addr_hi) {
pr_warn("PIAR: overlapping address range\n");
}
return piar;
}
}
piar = kzalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC);
if (!piar)
return NULL;
piar->addr_lo = alo;
piar->addr_hi = ahi;
piar->edev = pci_dev_to_eeh_dev(dev);
piar->pcidev = dev;
piar->flags = flags;
eeh_edev_dbg(piar->edev, "PIAR: insert range=[%pap:%pap]\n",
&alo, &ahi);
rb_link_node(&piar->rb_node, parent, p);
rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
return piar;
}
static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
{
struct eeh_dev *edev;
int i;
edev = pci_dev_to_eeh_dev(dev);
if (!edev) {
pr_warn("PCI: no EEH dev found for %s\n",
pci_name(dev));
return;
}
/* Skip any devices for which EEH is not enabled. */
if (!edev->pe) {
dev_dbg(&dev->dev, "EEH: Skip building address cache\n");
return;
}
/*
* Walk resources on this device, poke the first 7 (6 normal BAR and 1
* ROM BAR) into the tree.
*/
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
resource_size_t start = pci_resource_start(dev,i);
resource_size_t end = pci_resource_end(dev,i);
unsigned long flags = pci_resource_flags(dev,i);
/* We are interested only bus addresses, not dma or other stuff */
if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM)))
continue;
if (start == 0 || ~start == 0 || end == 0 || ~end == 0)
continue;
eeh_addr_cache_insert(dev, start, end, flags);
}
}
/**
* eeh_addr_cache_insert_dev - Add a device to the address cache
* @dev: PCI device whose I/O addresses we are interested in.
*
* In order to support the fast lookup of devices based on addresses,
* we maintain a cache of devices that can be quickly searched.
* This routine adds a device to that cache.
*/
void eeh_addr_cache_insert_dev(struct pci_dev *dev)
{
unsigned long flags;
spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
__eeh_addr_cache_insert_dev(dev);
spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
}
static inline void __eeh_addr_cache_rmv_dev(struct pci_dev *dev)
{
struct rb_node *n;
restart:
n = rb_first(&pci_io_addr_cache_root.rb_root);
while (n) {
struct pci_io_addr_range *piar;
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
if (piar->pcidev == dev) {
eeh_edev_dbg(piar->edev, "PIAR: remove range=[%pap:%pap]\n",
&piar->addr_lo, &piar->addr_hi);
rb_erase(n, &pci_io_addr_cache_root.rb_root);
kfree(piar);
goto restart;
}
n = rb_next(n);
}
}
/**
* eeh_addr_cache_rmv_dev - remove pci device from addr cache
* @dev: device to remove
*
* Remove a device from the addr-cache tree.
* This is potentially expensive, since it will walk
* the tree multiple times (once per resource).
* But so what; device removal doesn't need to be that fast.
*/
void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
{
unsigned long flags;
spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
__eeh_addr_cache_rmv_dev(dev);
spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
}
/**
* eeh_addr_cache_init - Initialize a cache of I/O addresses
*
* Initialize a cache of pci i/o addresses. This cache will be used to
* find the pci device that corresponds to a given address.
*/
void eeh_addr_cache_init(void)
{
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
}
static int eeh_addr_cache_show(struct seq_file *s, void *v)
{
struct pci_io_addr_range *piar;
struct rb_node *n;
unsigned long flags;
spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) {
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
seq_printf(s, "%s addr range [%pap-%pap]: %s\n",
(piar->flags & IORESOURCE_IO) ? "i/o" : "mem",
&piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
}
spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(eeh_addr_cache);
void __init eeh_cache_debugfs_init(void)
{
debugfs_create_file_unsafe("eeh_address_cache", 0400,
arch_debugfs_dir, NULL,
&eeh_addr_cache_fops);
}
| linux-master | arch/powerpc/kernel/eeh_cache.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* c 2001 PPC 64 Team, IBM Corp
*
* /dev/nvram driver for PPC64
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/nvram.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kmsg_dump.h>
#include <linux/pagemap.h>
#include <linux/pstore.h>
#include <linux/zlib.h>
#include <linux/uaccess.h>
#include <linux/of.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
#include <asm/machdep.h>
#undef DEBUG_NVRAM
#define NVRAM_HEADER_LEN sizeof(struct nvram_header)
#define NVRAM_BLOCK_LEN NVRAM_HEADER_LEN
/* If change this size, then change the size of NVNAME_LEN */
struct nvram_header {
unsigned char signature;
unsigned char checksum;
unsigned short length;
/* Terminating null required only for names < 12 chars. */
char name[12];
};
struct nvram_partition {
struct list_head partition;
struct nvram_header header;
unsigned int index;
};
static LIST_HEAD(nvram_partitions);
#ifdef CONFIG_PPC_PSERIES
struct nvram_os_partition rtas_log_partition = {
.name = "ibm,rtas-log",
.req_size = 2079,
.min_size = 1055,
.index = -1,
.os_partition = true
};
#endif
struct nvram_os_partition oops_log_partition = {
.name = "lnx,oops-log",
.req_size = 4000,
.min_size = 2000,
.index = -1,
.os_partition = true
};
static const char *nvram_os_partitions[] = {
#ifdef CONFIG_PPC_PSERIES
"ibm,rtas-log",
#endif
"lnx,oops-log",
NULL
};
static void oops_to_nvram(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason);
static struct kmsg_dumper nvram_kmsg_dumper = {
.dump = oops_to_nvram
};
/*
* For capturing and compressing an oops or panic report...
* big_oops_buf[] holds the uncompressed text we're capturing.
*
* oops_buf[] holds the compressed text, preceded by a oops header.
* oops header has u16 holding the version of oops header (to differentiate
* between old and new format header) followed by u16 holding the length of
* the compressed* text (*Or uncompressed, if compression fails.) and u64
* holding the timestamp. oops_buf[] gets written to NVRAM.
*
* oops_log_info points to the header. oops_data points to the compressed text.
*
* +- oops_buf
* | +- oops_data
* v v
* +-----------+-----------+-----------+------------------------+
* | version | length | timestamp | text |
* | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) |
* +-----------+-----------+-----------+------------------------+
* ^
* +- oops_log_info
*
* We preallocate these buffers during init to avoid kmalloc during oops/panic.
*/
static size_t big_oops_buf_sz;
static char *big_oops_buf, *oops_buf;
static char *oops_data;
static size_t oops_data_sz;
/* Compression parameters */
#define COMPR_LEVEL 6
#define WINDOW_BITS 12
#define MEM_LEVEL 4
static struct z_stream_s stream;
#ifdef CONFIG_PSTORE
#ifdef CONFIG_PPC_POWERNV
static struct nvram_os_partition skiboot_partition = {
.name = "ibm,skiboot",
.index = -1,
.os_partition = false
};
#endif
#ifdef CONFIG_PPC_PSERIES
static struct nvram_os_partition of_config_partition = {
.name = "of-config",
.index = -1,
.os_partition = false
};
#endif
static struct nvram_os_partition common_partition = {
.name = "common",
.index = -1,
.os_partition = false
};
static enum pstore_type_id nvram_type_ids[] = {
PSTORE_TYPE_DMESG,
PSTORE_TYPE_PPC_COMMON,
-1,
-1,
-1
};
static int read_type;
#endif
/* nvram_write_os_partition
*
* We need to buffer the error logs into nvram to ensure that we have
* the failure information to decode. If we have a severe error there
* is no way to guarantee that the OS or the machine is in a state to
* get back to user land and write the error to disk. For example if
* the SCSI device driver causes a Machine Check by writing to a bad
* IO address, there is no way of guaranteeing that the device driver
* is in any state that is would also be able to write the error data
* captured to disk, thus we buffer it in NVRAM for analysis on the
* next boot.
*
* In NVRAM the partition containing the error log buffer will looks like:
* Header (in bytes):
* +-----------+----------+--------+------------+------------------+
* | signature | checksum | length | name | data |
* |0 |1 |2 3|4 15|16 length-1|
* +-----------+----------+--------+------------+------------------+
*
* The 'data' section would look like (in bytes):
* +--------------+------------+-----------------------------------+
* | event_logged | sequence # | error log |
* |0 3|4 7|8 error_log_size-1|
* +--------------+------------+-----------------------------------+
*
* event_logged: 0 if event has not been logged to syslog, 1 if it has
* sequence #: The unique sequence # for each event. (until it wraps)
* error log: The error log from event_scan
*/
int nvram_write_os_partition(struct nvram_os_partition *part,
char *buff, int length,
unsigned int err_type,
unsigned int error_log_cnt)
{
int rc;
loff_t tmp_index;
struct err_log_info info;
if (part->index == -1)
return -ESPIPE;
if (length > part->size)
length = part->size;
info.error_type = cpu_to_be32(err_type);
info.seq_num = cpu_to_be32(error_log_cnt);
tmp_index = part->index;
rc = ppc_md.nvram_write((char *)&info, sizeof(info), &tmp_index);
if (rc <= 0) {
pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
return rc;
}
rc = ppc_md.nvram_write(buff, length, &tmp_index);
if (rc <= 0) {
pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
return rc;
}
return 0;
}
/* nvram_read_partition
*
* Reads nvram partition for at most 'length'
*/
int nvram_read_partition(struct nvram_os_partition *part, char *buff,
int length, unsigned int *err_type,
unsigned int *error_log_cnt)
{
int rc;
loff_t tmp_index;
struct err_log_info info;
if (part->index == -1)
return -1;
if (length > part->size)
length = part->size;
tmp_index = part->index;
if (part->os_partition) {
rc = ppc_md.nvram_read((char *)&info, sizeof(info), &tmp_index);
if (rc <= 0) {
pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
return rc;
}
}
rc = ppc_md.nvram_read(buff, length, &tmp_index);
if (rc <= 0) {
pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
return rc;
}
if (part->os_partition) {
*error_log_cnt = be32_to_cpu(info.seq_num);
*err_type = be32_to_cpu(info.error_type);
}
return 0;
}
/* nvram_init_os_partition
*
* This sets up a partition with an "OS" signature.
*
* The general strategy is the following:
* 1.) If a partition with the indicated name already exists...
* - If it's large enough, use it.
* - Otherwise, recycle it and keep going.
* 2.) Search for a free partition that is large enough.
* 3.) If there's not a free partition large enough, recycle any obsolete
* OS partitions and try again.
* 4.) Will first try getting a chunk that will satisfy the requested size.
* 5.) If a chunk of the requested size cannot be allocated, then try finding
* a chunk that will satisfy the minum needed.
*
* Returns 0 on success, else -1.
*/
int __init nvram_init_os_partition(struct nvram_os_partition *part)
{
loff_t p;
int size;
/* Look for ours */
p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
/* Found one but too small, remove it */
if (p && size < part->min_size) {
pr_info("nvram: Found too small %s partition,"
" removing it...\n", part->name);
nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL);
p = 0;
}
/* Create one if we didn't find */
if (!p) {
p = nvram_create_partition(part->name, NVRAM_SIG_OS,
part->req_size, part->min_size);
if (p == -ENOSPC) {
pr_info("nvram: No room to create %s partition, "
"deleting any obsolete OS partitions...\n",
part->name);
nvram_remove_partition(NULL, NVRAM_SIG_OS,
nvram_os_partitions);
p = nvram_create_partition(part->name, NVRAM_SIG_OS,
part->req_size, part->min_size);
}
}
if (p <= 0) {
pr_err("nvram: Failed to find or create %s"
" partition, err %d\n", part->name, (int)p);
return -1;
}
part->index = p;
part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info);
return 0;
}
/* Derived from logfs_compress() */
static int nvram_compress(const void *in, void *out, size_t inlen,
size_t outlen)
{
int err, ret;
ret = -EIO;
err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
MEM_LEVEL, Z_DEFAULT_STRATEGY);
if (err != Z_OK)
goto error;
stream.next_in = in;
stream.avail_in = inlen;
stream.total_in = 0;
stream.next_out = out;
stream.avail_out = outlen;
stream.total_out = 0;
err = zlib_deflate(&stream, Z_FINISH);
if (err != Z_STREAM_END)
goto error;
err = zlib_deflateEnd(&stream);
if (err != Z_OK)
goto error;
if (stream.total_out >= stream.total_in)
goto error;
ret = stream.total_out;
error:
return ret;
}
/* Compress the text from big_oops_buf into oops_buf. */
static int zip_oops(size_t text_len)
{
struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
oops_data_sz);
if (zipped_len < 0) {
pr_err("nvram: compression failed; returned %d\n", zipped_len);
pr_err("nvram: logging uncompressed oops/panic report\n");
return -1;
}
oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
oops_hdr->report_length = cpu_to_be16(zipped_len);
oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
return 0;
}
#ifdef CONFIG_PSTORE
static int nvram_pstore_open(struct pstore_info *psi)
{
/* Reset the iterator to start reading partitions again */
read_type = -1;
return 0;
}
/**
* nvram_pstore_write - pstore write callback for nvram
* @record: pstore record to write, with @id to be set
*
* Called by pstore_dump() when an oops or panic report is logged in the
* printk buffer.
* Returns 0 on successful write.
*/
static int nvram_pstore_write(struct pstore_record *record)
{
int rc;
unsigned int err_type = ERR_TYPE_KERNEL_PANIC;
struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf;
/* part 1 has the recent messages from printk buffer */
if (record->part > 1 || (record->type != PSTORE_TYPE_DMESG))
return -1;
if (clobbering_unread_rtas_event())
return -1;
oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
oops_hdr->report_length = cpu_to_be16(record->size);
oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
if (record->compressed)
err_type = ERR_TYPE_KERNEL_PANIC_GZ;
rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
(int) (sizeof(*oops_hdr) + record->size), err_type,
record->count);
if (rc != 0)
return rc;
record->id = record->part;
return 0;
}
/*
* Reads the oops/panic report, rtas, of-config and common partition.
* Returns the length of the data we read from each partition.
* Returns 0 if we've been called before.
*/
static ssize_t nvram_pstore_read(struct pstore_record *record)
{
struct oops_log_info *oops_hdr;
unsigned int err_type, id_no, size = 0;
struct nvram_os_partition *part = NULL;
char *buff = NULL;
int sig = 0;
loff_t p;
read_type++;
switch (nvram_type_ids[read_type]) {
case PSTORE_TYPE_DMESG:
part = &oops_log_partition;
record->type = PSTORE_TYPE_DMESG;
break;
case PSTORE_TYPE_PPC_COMMON:
sig = NVRAM_SIG_SYS;
part = &common_partition;
record->type = PSTORE_TYPE_PPC_COMMON;
record->id = PSTORE_TYPE_PPC_COMMON;
record->time.tv_sec = 0;
record->time.tv_nsec = 0;
break;
#ifdef CONFIG_PPC_PSERIES
case PSTORE_TYPE_PPC_RTAS:
part = &rtas_log_partition;
record->type = PSTORE_TYPE_PPC_RTAS;
record->time.tv_sec = last_rtas_event;
record->time.tv_nsec = 0;
break;
case PSTORE_TYPE_PPC_OF:
sig = NVRAM_SIG_OF;
part = &of_config_partition;
record->type = PSTORE_TYPE_PPC_OF;
record->id = PSTORE_TYPE_PPC_OF;
record->time.tv_sec = 0;
record->time.tv_nsec = 0;
break;
#endif
#ifdef CONFIG_PPC_POWERNV
case PSTORE_TYPE_PPC_OPAL:
sig = NVRAM_SIG_FW;
part = &skiboot_partition;
record->type = PSTORE_TYPE_PPC_OPAL;
record->id = PSTORE_TYPE_PPC_OPAL;
record->time.tv_sec = 0;
record->time.tv_nsec = 0;
break;
#endif
default:
return 0;
}
if (!part->os_partition) {
p = nvram_find_partition(part->name, sig, &size);
if (p <= 0) {
pr_err("nvram: Failed to find partition %s, "
"err %d\n", part->name, (int)p);
return 0;
}
part->index = p;
part->size = size;
}
buff = kmalloc(part->size, GFP_KERNEL);
if (!buff)
return -ENOMEM;
if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) {
kfree(buff);
return 0;
}
record->count = 0;
if (part->os_partition)
record->id = id_no;
if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
size_t length, hdr_size;
oops_hdr = (struct oops_log_info *)buff;
if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
/* Old format oops header had 2-byte record size */
hdr_size = sizeof(u16);
length = be16_to_cpu(oops_hdr->version);
record->time.tv_sec = 0;
record->time.tv_nsec = 0;
} else {
hdr_size = sizeof(*oops_hdr);
length = be16_to_cpu(oops_hdr->report_length);
record->time.tv_sec = be64_to_cpu(oops_hdr->timestamp);
record->time.tv_nsec = 0;
}
record->buf = kmemdup(buff + hdr_size, length, GFP_KERNEL);
kfree(buff);
if (record->buf == NULL)
return -ENOMEM;
record->ecc_notice_size = 0;
if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
record->compressed = true;
else
record->compressed = false;
return length;
}
record->buf = buff;
return part->size;
}
static struct pstore_info nvram_pstore_info = {
.owner = THIS_MODULE,
.name = "nvram",
.flags = PSTORE_FLAGS_DMESG,
.open = nvram_pstore_open,
.read = nvram_pstore_read,
.write = nvram_pstore_write,
};
static int __init nvram_pstore_init(void)
{
int rc = 0;
if (machine_is(pseries)) {
nvram_type_ids[2] = PSTORE_TYPE_PPC_RTAS;
nvram_type_ids[3] = PSTORE_TYPE_PPC_OF;
} else
nvram_type_ids[2] = PSTORE_TYPE_PPC_OPAL;
nvram_pstore_info.buf = oops_data;
nvram_pstore_info.bufsize = oops_data_sz;
rc = pstore_register(&nvram_pstore_info);
if (rc && (rc != -EPERM))
/* Print error only when pstore.backend == nvram */
pr_err("nvram: pstore_register() failed, returned %d. "
"Defaults to kmsg_dump\n", rc);
return rc;
}
#else
static int __init nvram_pstore_init(void)
{
return -1;
}
#endif
void __init nvram_init_oops_partition(int rtas_partition_exists)
{
int rc;
rc = nvram_init_os_partition(&oops_log_partition);
if (rc != 0) {
#ifdef CONFIG_PPC_PSERIES
if (!rtas_partition_exists) {
pr_err("nvram: Failed to initialize oops partition!");
return;
}
pr_notice("nvram: Using %s partition to log both"
" RTAS errors and oops/panic reports\n",
rtas_log_partition.name);
memcpy(&oops_log_partition, &rtas_log_partition,
sizeof(rtas_log_partition));
#else
pr_err("nvram: Failed to initialize oops partition!");
return;
#endif
}
oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL);
if (!oops_buf) {
pr_err("nvram: No memory for %s partition\n",
oops_log_partition.name);
return;
}
oops_data = oops_buf + sizeof(struct oops_log_info);
oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
rc = nvram_pstore_init();
if (!rc)
return;
/*
* Figure compression (preceded by elimination of each line's <n>
* severity prefix) will reduce the oops/panic report to at most
* 45% of its original size.
*/
big_oops_buf_sz = (oops_data_sz * 100) / 45;
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (big_oops_buf) {
stream.workspace = kmalloc(zlib_deflate_workspacesize(
WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
if (!stream.workspace) {
pr_err("nvram: No memory for compression workspace; "
"skipping compression of %s partition data\n",
oops_log_partition.name);
kfree(big_oops_buf);
big_oops_buf = NULL;
}
} else {
pr_err("No memory for uncompressed %s data; "
"skipping compression\n", oops_log_partition.name);
stream.workspace = NULL;
}
rc = kmsg_dump_register(&nvram_kmsg_dumper);
if (rc != 0) {
pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
kfree(oops_buf);
kfree(big_oops_buf);
kfree(stream.workspace);
}
}
/*
* This is our kmsg_dump callback, called after an oops or panic report
* has been written to the printk buffer. We want to capture as much
* of the printk buffer as possible. First, capture as much as we can
* that we think will compress sufficiently to fit in the lnx,oops-log
* partition. If that's too much, go back and capture uncompressed text.
*/
static void oops_to_nvram(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
{
struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
static unsigned int oops_count = 0;
static struct kmsg_dump_iter iter;
static bool panicking = false;
static DEFINE_SPINLOCK(lock);
unsigned long flags;
size_t text_len;
unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
int rc = -1;
switch (reason) {
case KMSG_DUMP_SHUTDOWN:
/* These are almost always orderly shutdowns. */
return;
case KMSG_DUMP_OOPS:
break;
case KMSG_DUMP_PANIC:
panicking = true;
break;
case KMSG_DUMP_EMERG:
if (panicking)
/* Panic report already captured. */
return;
break;
default:
pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
__func__, (int) reason);
return;
}
if (clobbering_unread_rtas_event())
return;
if (!spin_trylock_irqsave(&lock, flags))
return;
if (big_oops_buf) {
kmsg_dump_rewind(&iter);
kmsg_dump_get_buffer(&iter, false,
big_oops_buf, big_oops_buf_sz, &text_len);
rc = zip_oops(text_len);
}
if (rc != 0) {
kmsg_dump_rewind(&iter);
kmsg_dump_get_buffer(&iter, false,
oops_data, oops_data_sz, &text_len);
err_type = ERR_TYPE_KERNEL_PANIC;
oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
oops_hdr->report_length = cpu_to_be16(text_len);
oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
}
(void) nvram_write_os_partition(&oops_log_partition, oops_buf,
(int) (sizeof(*oops_hdr) + text_len), err_type,
++oops_count);
spin_unlock_irqrestore(&lock, flags);
}
#ifdef DEBUG_NVRAM
static void __init nvram_print_partitions(char * label)
{
struct nvram_partition * tmp_part;
printk(KERN_WARNING "--------%s---------\n", label);
printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
list_for_each_entry(tmp_part, &nvram_partitions, partition) {
printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12.12s\n",
tmp_part->index, tmp_part->header.signature,
tmp_part->header.checksum, tmp_part->header.length,
tmp_part->header.name);
}
}
#endif
static int __init nvram_write_header(struct nvram_partition * part)
{
loff_t tmp_index;
int rc;
struct nvram_header phead;
memcpy(&phead, &part->header, NVRAM_HEADER_LEN);
phead.length = cpu_to_be16(phead.length);
tmp_index = part->index;
rc = ppc_md.nvram_write((char *)&phead, NVRAM_HEADER_LEN, &tmp_index);
return rc;
}
static unsigned char __init nvram_checksum(struct nvram_header *p)
{
unsigned int c_sum, c_sum2;
unsigned short *sp = (unsigned short *)p->name; /* assume 6 shorts */
c_sum = p->signature + p->length + sp[0] + sp[1] + sp[2] + sp[3] + sp[4] + sp[5];
/* The sum may have spilled into the 3rd byte. Fold it back. */
c_sum = ((c_sum & 0xffff) + (c_sum >> 16)) & 0xffff;
/* The sum cannot exceed 2 bytes. Fold it into a checksum */
c_sum2 = (c_sum >> 8) + (c_sum << 8);
c_sum = ((c_sum + c_sum2) >> 8) & 0xff;
return c_sum;
}
/*
* Per the criteria passed via nvram_remove_partition(), should this
* partition be removed? 1=remove, 0=keep
*/
static int __init nvram_can_remove_partition(struct nvram_partition *part,
const char *name, int sig, const char *exceptions[])
{
if (part->header.signature != sig)
return 0;
if (name) {
if (strncmp(name, part->header.name, 12))
return 0;
} else if (exceptions) {
const char **except;
for (except = exceptions; *except; except++) {
if (!strncmp(*except, part->header.name, 12))
return 0;
}
}
return 1;
}
/**
* nvram_remove_partition - Remove one or more partitions in nvram
* @name: name of the partition to remove, or NULL for a
* signature only match
* @sig: signature of the partition(s) to remove
* @exceptions: When removing all partitions with a matching signature,
* leave these alone.
*/
int __init nvram_remove_partition(const char *name, int sig,
const char *exceptions[])
{
struct nvram_partition *part, *prev, *tmp;
int rc;
list_for_each_entry(part, &nvram_partitions, partition) {
if (!nvram_can_remove_partition(part, name, sig, exceptions))
continue;
/* Make partition a free partition */
part->header.signature = NVRAM_SIG_FREE;
memset(part->header.name, 'w', 12);
part->header.checksum = nvram_checksum(&part->header);
rc = nvram_write_header(part);
if (rc <= 0) {
printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
return rc;
}
}
/* Merge contiguous ones */
prev = NULL;
list_for_each_entry_safe(part, tmp, &nvram_partitions, partition) {
if (part->header.signature != NVRAM_SIG_FREE) {
prev = NULL;
continue;
}
if (prev) {
prev->header.length += part->header.length;
prev->header.checksum = nvram_checksum(&prev->header);
rc = nvram_write_header(prev);
if (rc <= 0) {
printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
return rc;
}
list_del(&part->partition);
kfree(part);
} else
prev = part;
}
return 0;
}
/**
* nvram_create_partition - Create a partition in nvram
* @name: name of the partition to create
* @sig: signature of the partition to create
* @req_size: size of data to allocate in bytes
* @min_size: minimum acceptable size (0 means req_size)
*
* Returns a negative error code or a positive nvram index
* of the beginning of the data area of the newly created
* partition. If you provided a min_size smaller than req_size
* you need to query for the actual size yourself after the
* call using nvram_partition_get_size().
*/
loff_t __init nvram_create_partition(const char *name, int sig,
int req_size, int min_size)
{
struct nvram_partition *part;
struct nvram_partition *new_part;
struct nvram_partition *free_part = NULL;
static char nv_init_vals[16];
loff_t tmp_index;
long size = 0;
int rc;
BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
/* Convert sizes from bytes to blocks */
req_size = ALIGN(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
min_size = ALIGN(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
/* If no minimum size specified, make it the same as the
* requested size
*/
if (min_size == 0)
min_size = req_size;
if (min_size > req_size)
return -EINVAL;
/* Now add one block to each for the header */
req_size += 1;
min_size += 1;
/* Find a free partition that will give us the maximum needed size
If can't find one that will give us the minimum size needed */
list_for_each_entry(part, &nvram_partitions, partition) {
if (part->header.signature != NVRAM_SIG_FREE)
continue;
if (part->header.length >= req_size) {
size = req_size;
free_part = part;
break;
}
if (part->header.length > size &&
part->header.length >= min_size) {
size = part->header.length;
free_part = part;
}
}
if (!size)
return -ENOSPC;
/* Create our OS partition */
new_part = kzalloc(sizeof(*new_part), GFP_KERNEL);
if (!new_part) {
pr_err("%s: kmalloc failed\n", __func__);
return -ENOMEM;
}
new_part->index = free_part->index;
new_part->header.signature = sig;
new_part->header.length = size;
memcpy(new_part->header.name, name, strnlen(name, sizeof(new_part->header.name)));
new_part->header.checksum = nvram_checksum(&new_part->header);
rc = nvram_write_header(new_part);
if (rc <= 0) {
pr_err("%s: nvram_write_header failed (%d)\n", __func__, rc);
kfree(new_part);
return rc;
}
list_add_tail(&new_part->partition, &free_part->partition);
/* Adjust or remove the partition we stole the space from */
if (free_part->header.length > size) {
free_part->index += size * NVRAM_BLOCK_LEN;
free_part->header.length -= size;
free_part->header.checksum = nvram_checksum(&free_part->header);
rc = nvram_write_header(free_part);
if (rc <= 0) {
pr_err("%s: nvram_write_header failed (%d)\n",
__func__, rc);
return rc;
}
} else {
list_del(&free_part->partition);
kfree(free_part);
}
/* Clear the new partition */
for (tmp_index = new_part->index + NVRAM_HEADER_LEN;
tmp_index < ((size - 1) * NVRAM_BLOCK_LEN);
tmp_index += NVRAM_BLOCK_LEN) {
rc = ppc_md.nvram_write(nv_init_vals, NVRAM_BLOCK_LEN, &tmp_index);
if (rc <= 0) {
pr_err("%s: nvram_write failed (%d)\n",
__func__, rc);
return rc;
}
}
return new_part->index + NVRAM_HEADER_LEN;
}
/**
* nvram_get_partition_size - Get the data size of an nvram partition
* @data_index: This is the offset of the start of the data of
* the partition. The same value that is returned by
* nvram_create_partition().
*/
int nvram_get_partition_size(loff_t data_index)
{
struct nvram_partition *part;
list_for_each_entry(part, &nvram_partitions, partition) {
if (part->index + NVRAM_HEADER_LEN == data_index)
return (part->header.length - 1) * NVRAM_BLOCK_LEN;
}
return -1;
}
/**
* nvram_find_partition - Find an nvram partition by signature and name
* @name: Name of the partition or NULL for any name
* @sig: Signature to test against
* @out_size: if non-NULL, returns the size of the data part of the partition
*/
loff_t nvram_find_partition(const char *name, int sig, int *out_size)
{
struct nvram_partition *p;
list_for_each_entry(p, &nvram_partitions, partition) {
if (p->header.signature == sig &&
(!name || !strncmp(p->header.name, name, 12))) {
if (out_size)
*out_size = (p->header.length - 1) *
NVRAM_BLOCK_LEN;
return p->index + NVRAM_HEADER_LEN;
}
}
return 0;
}
int __init nvram_scan_partitions(void)
{
loff_t cur_index = 0;
struct nvram_header phead;
struct nvram_partition * tmp_part;
unsigned char c_sum;
char * header;
int total_size;
int err;
if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
return -ENODEV;
total_size = ppc_md.nvram_size();
header = kmalloc(NVRAM_HEADER_LEN, GFP_KERNEL);
if (!header) {
printk(KERN_ERR "nvram_scan_partitions: Failed kmalloc\n");
return -ENOMEM;
}
while (cur_index < total_size) {
err = ppc_md.nvram_read(header, NVRAM_HEADER_LEN, &cur_index);
if (err != NVRAM_HEADER_LEN) {
printk(KERN_ERR "nvram_scan_partitions: Error parsing "
"nvram partitions\n");
goto out;
}
cur_index -= NVRAM_HEADER_LEN; /* nvram_read will advance us */
memcpy(&phead, header, NVRAM_HEADER_LEN);
phead.length = be16_to_cpu(phead.length);
err = 0;
c_sum = nvram_checksum(&phead);
if (c_sum != phead.checksum) {
printk(KERN_WARNING "WARNING: nvram partition checksum"
" was %02x, should be %02x!\n",
phead.checksum, c_sum);
printk(KERN_WARNING "Terminating nvram partition scan\n");
goto out;
}
if (!phead.length) {
printk(KERN_WARNING "WARNING: nvram corruption "
"detected: 0-length partition\n");
goto out;
}
tmp_part = kmalloc(sizeof(*tmp_part), GFP_KERNEL);
err = -ENOMEM;
if (!tmp_part) {
printk(KERN_ERR "nvram_scan_partitions: kmalloc failed\n");
goto out;
}
memcpy(&tmp_part->header, &phead, NVRAM_HEADER_LEN);
tmp_part->index = cur_index;
list_add_tail(&tmp_part->partition, &nvram_partitions);
cur_index += phead.length * NVRAM_BLOCK_LEN;
}
err = 0;
#ifdef DEBUG_NVRAM
nvram_print_partitions("NVRAM Partitions");
#endif
out:
kfree(header);
return err;
}
| linux-master | arch/powerpc/kernel/nvram_64.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* DAWR infrastructure
*
* Copyright 2019, Michael Neuling, IBM Corporation.
*/
#include <linux/types.h>
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <asm/machdep.h>
#include <asm/hvcall.h>
#include <asm/firmware.h>
bool dawr_force_enable;
EXPORT_SYMBOL_GPL(dawr_force_enable);
int set_dawr(int nr, struct arch_hw_breakpoint *brk)
{
unsigned long dawr, dawrx, mrd;
dawr = brk->address;
dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE))
<< (63 - 58);
dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) << (63 - 59);
dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) >> 3;
/*
* DAWR length is stored in field MDR bits 48:53. Matches range in
* doublewords (64 bits) biased by -1 eg. 0b000000=1DW and
* 0b111111=64DW.
* brk->hw_len is in bytes.
* This aligns up to double word size, shifts and does the bias.
*/
mrd = ((brk->hw_len + 7) >> 3) - 1;
dawrx |= (mrd & 0x3f) << (63 - 53);
if (ppc_md.set_dawr)
return ppc_md.set_dawr(nr, dawr, dawrx);
if (nr == 0) {
mtspr(SPRN_DAWR0, dawr);
mtspr(SPRN_DAWRX0, dawrx);
} else {
mtspr(SPRN_DAWR1, dawr);
mtspr(SPRN_DAWRX1, dawrx);
}
return 0;
}
static void disable_dawrs_cb(void *info)
{
struct arch_hw_breakpoint null_brk = {0};
int i;
for (i = 0; i < nr_wp_slots(); i++)
set_dawr(i, &null_brk);
}
static ssize_t dawr_write_file_bool(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct arch_hw_breakpoint null_brk = {0};
size_t rc;
/* Send error to user if they hypervisor won't allow us to write DAWR */
if (!dawr_force_enable &&
firmware_has_feature(FW_FEATURE_LPAR) &&
set_dawr(0, &null_brk) != H_SUCCESS)
return -ENODEV;
rc = debugfs_write_file_bool(file, user_buf, count, ppos);
if (rc)
return rc;
/* If we are clearing, make sure all CPUs have the DAWR cleared */
if (!dawr_force_enable)
smp_call_function(disable_dawrs_cb, NULL, 0);
return rc;
}
static const struct file_operations dawr_enable_fops = {
.read = debugfs_read_file_bool,
.write = dawr_write_file_bool,
.open = simple_open,
.llseek = default_llseek,
};
static int __init dawr_force_setup(void)
{
if (cpu_has_feature(CPU_FTR_DAWR)) {
/* Don't setup sysfs file for user control on P8 */
dawr_force_enable = true;
return 0;
}
if (PVR_VER(mfspr(SPRN_PVR)) == PVR_POWER9) {
/* Turn DAWR off by default, but allow admin to turn it on */
debugfs_create_file_unsafe("dawr_enable_dangerous", 0600,
arch_debugfs_dir,
&dawr_force_enable,
&dawr_enable_fops);
}
return 0;
}
arch_initcall(dawr_force_setup);
| linux-master | arch/powerpc/kernel/dawr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Machine check exception handling CPU-side for power7 and power8
*
* Copyright 2013 IBM Corporation
* Author: Mahesh Salgaonkar <[email protected]>
*/
#undef DEBUG
#define pr_fmt(fmt) "mce_power: " fmt
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/extable.h>
#include <linux/pgtable.h>
#include <asm/mmu.h>
#include <asm/mce.h>
#include <asm/machdep.h>
#include <asm/pte-walk.h>
#include <asm/sstep.h>
#include <asm/exception-64s.h>
#include <asm/extable.h>
#include <asm/inst.h>
/*
* Convert an address related to an mm to a PFN. NOTE: we are in real
* mode, we could potentially race with page table updates.
*/
unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
{
pte_t *ptep, pte;
unsigned int shift;
unsigned long pfn, flags;
struct mm_struct *mm;
if (user_mode(regs))
mm = current->mm;
else
mm = &init_mm;
local_irq_save(flags);
ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
if (!ptep) {
pfn = ULONG_MAX;
goto out;
}
pte = READ_ONCE(*ptep);
if (!pte_present(pte) || pte_special(pte)) {
pfn = ULONG_MAX;
goto out;
}
if (shift <= PAGE_SHIFT)
pfn = pte_pfn(pte);
else {
unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
pfn = pte_pfn(__pte(pte_val(pte) | (addr & rpnmask)));
}
out:
local_irq_restore(flags);
return pfn;
}
static bool mce_in_guest(void)
{
#ifdef CONFIG_KVM_BOOK3S_HANDLER
/*
* If machine check is hit when in guest context or low level KVM
* code, avoid looking up any translations or making any attempts
* to recover, just record the event and pass to KVM.
*/
if (get_paca()->kvm_hstate.in_guest)
return true;
#endif
return false;
}
/* flush SLBs and reload */
#ifdef CONFIG_PPC_64S_HASH_MMU
void flush_and_reload_slb(void)
{
if (early_radix_enabled())
return;
/* Invalidate all SLBs */
slb_flush_all_realmode();
/*
* This probably shouldn't happen, but it may be possible it's
* called in early boot before SLB shadows are allocated.
*/
if (!get_slb_shadow())
return;
slb_restore_bolted_realmode();
}
#endif
void flush_erat(void)
{
#ifdef CONFIG_PPC_64S_HASH_MMU
if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
flush_and_reload_slb();
return;
}
#endif
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
}
#define MCE_FLUSH_SLB 1
#define MCE_FLUSH_TLB 2
#define MCE_FLUSH_ERAT 3
static int mce_flush(int what)
{
#ifdef CONFIG_PPC_64S_HASH_MMU
if (what == MCE_FLUSH_SLB) {
flush_and_reload_slb();
return 1;
}
#endif
if (what == MCE_FLUSH_ERAT) {
flush_erat();
return 1;
}
if (what == MCE_FLUSH_TLB) {
tlbiel_all();
return 1;
}
return 0;
}
#define SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42))
struct mce_ierror_table {
unsigned long srr1_mask;
unsigned long srr1_value;
bool nip_valid; /* nip is a valid indicator of faulting address */
unsigned int error_type;
unsigned int error_subtype;
unsigned int error_class;
unsigned int initiator;
unsigned int severity;
bool sync_error;
};
static const struct mce_ierror_table mce_p7_ierror_table[] = {
{ 0x00000000001c0000, 0x0000000000040000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000001c0000, 0x0000000000080000, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000001c0000, 0x00000000000c0000, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000001c0000, 0x0000000000100000, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000001c0000, 0x0000000000140000, true,
MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000001c0000, 0x0000000000180000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000001c0000, 0x00000000001c0000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0, 0, 0, 0, 0, 0, 0 } };
static const struct mce_ierror_table mce_p8_ierror_table[] = {
{ 0x00000000081c0000, 0x0000000000040000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000000080000, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x00000000000c0000, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000100000, true,
MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000140000, true,
MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000180000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x00000000001c0000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008000000, true,
MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_IFETCH_TIMEOUT, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008040000, true,
MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0, 0, 0, 0, 0, 0, 0 } };
static const struct mce_ierror_table mce_p9_ierror_table[] = {
{ 0x00000000081c0000, 0x0000000000040000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000000080000, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x00000000000c0000, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000100000, true,
MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000140000, true,
MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000180000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x00000000001c0000, true,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH_FOREIGN, MCE_ECLASS_SOFTWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008000000, true,
MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_IFETCH_TIMEOUT, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008040000, true,
MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x00000000080c0000, true,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH, MCE_ECLASS_SOFTWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008100000, true,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH, MCE_ECLASS_SOFTWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008140000, false,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_STORE, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_FATAL, false }, /* ASYNC is fatal */
{ 0x00000000081c0000, 0x0000000008180000, false,
MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_STORE_TIMEOUT,
MCE_INITIATOR_CPU, MCE_SEV_FATAL, false }, /* ASYNC is fatal */
{ 0x00000000081c0000, 0x00000000081c0000, true, MCE_ECLASS_HARDWARE,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0, 0, 0, 0, 0, 0, 0 } };
static const struct mce_ierror_table mce_p10_ierror_table[] = {
{ 0x00000000081c0000, 0x0000000000040000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000000080000, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x00000000000c0000, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000100000, true,
MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000140000, true,
MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000180000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x00000000001c0000, true,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH_FOREIGN, MCE_ECLASS_SOFTWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008080000, true,
MCE_ERROR_TYPE_USER,MCE_USER_ERROR_SCV, MCE_ECLASS_SOFTWARE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x00000000080c0000, true,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH, MCE_ECLASS_SOFTWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008100000, true,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH, MCE_ECLASS_SOFTWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008140000, false,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_STORE, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_FATAL, false }, /* ASYNC is fatal */
{ 0x00000000081c0000, 0x00000000081c0000, true, MCE_ECLASS_HARDWARE,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0, 0, 0, 0, 0, 0, 0 } };
struct mce_derror_table {
unsigned long dsisr_value;
bool dar_valid; /* dar is a valid indicator of faulting address */
unsigned int error_type;
unsigned int error_subtype;
unsigned int error_class;
unsigned int initiator;
unsigned int severity;
bool sync_error;
};
static const struct mce_derror_table mce_p7_derror_table[] = {
{ 0x00008000, false,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00004000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000800, true,
MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000400, true,
MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000080, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000100, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000040, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
MCE_ECLASS_HARD_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0, false, 0, 0, 0, 0, 0 } };
static const struct mce_derror_table mce_p8_derror_table[] = {
{ 0x00008000, false,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00004000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00002000, true,
MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00001000, true,
MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000800, true,
MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000400, true,
MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000200, true,
MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, /* SECONDARY ERAT */
MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000080, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000100, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0, false, 0, 0, 0, 0, 0 } };
static const struct mce_derror_table mce_p9_derror_table[] = {
{ 0x00008000, false,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00004000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00002000, true,
MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00001000, true,
MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000800, true,
MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000400, true,
MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000200, false,
MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE, MCE_ECLASS_SOFTWARE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000080, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000100, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000040, true,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000020, false,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000010, false,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000008, false,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD_STORE_FOREIGN, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0, false, 0, 0, 0, 0, 0 } };
static const struct mce_derror_table mce_p10_derror_table[] = {
{ 0x00008000, false,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00004000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000800, true,
MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000400, true,
MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000200, false,
MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE, MCE_ECLASS_SOFTWARE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000080, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
MCE_ECLASS_SOFT_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000100, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000040, true,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000020, false,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000010, false,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN,
MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000008, false,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD_STORE_FOREIGN, MCE_ECLASS_HARDWARE,
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0, false, 0, 0, 0, 0, 0 } };
static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
uint64_t *phys_addr)
{
/*
* Carefully look at the NIP to determine
* the instruction to analyse. Reading the NIP
* in real-mode is tricky and can lead to recursive
* faults
*/
ppc_inst_t instr;
unsigned long pfn, instr_addr;
struct instruction_op op;
struct pt_regs tmp = *regs;
pfn = addr_to_pfn(regs, regs->nip);
if (pfn != ULONG_MAX) {
instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK);
instr = ppc_inst_read((u32 *)instr_addr);
if (!analyse_instr(&op, &tmp, instr)) {
pfn = addr_to_pfn(regs, op.ea);
*addr = op.ea;
*phys_addr = (pfn << PAGE_SHIFT);
return 0;
}
/*
* analyse_instr() might fail if the instruction
* is not a load/store, although this is unexpected
* for load/store errors or if we got the NIP
* wrong
*/
}
*addr = 0;
return -1;
}
static int mce_handle_ierror(struct pt_regs *regs, unsigned long srr1,
const struct mce_ierror_table table[],
struct mce_error_info *mce_err, uint64_t *addr,
uint64_t *phys_addr)
{
int handled = 0;
int i;
*addr = 0;
for (i = 0; table[i].srr1_mask; i++) {
if ((srr1 & table[i].srr1_mask) != table[i].srr1_value)
continue;
if (!mce_in_guest()) {
/* attempt to correct the error */
switch (table[i].error_type) {
case MCE_ERROR_TYPE_SLB:
#ifdef CONFIG_PPC_64S_HASH_MMU
if (local_paca->in_mce == 1)
slb_save_contents(local_paca->mce_faulty_slbs);
#endif
handled = mce_flush(MCE_FLUSH_SLB);
break;
case MCE_ERROR_TYPE_ERAT:
handled = mce_flush(MCE_FLUSH_ERAT);
break;
case MCE_ERROR_TYPE_TLB:
handled = mce_flush(MCE_FLUSH_TLB);
break;
}
}
/* now fill in mce_error_info */
mce_err->error_type = table[i].error_type;
mce_err->error_class = table[i].error_class;
switch (table[i].error_type) {
case MCE_ERROR_TYPE_UE:
mce_err->u.ue_error_type = table[i].error_subtype;
break;
case MCE_ERROR_TYPE_SLB:
mce_err->u.slb_error_type = table[i].error_subtype;
break;
case MCE_ERROR_TYPE_ERAT:
mce_err->u.erat_error_type = table[i].error_subtype;
break;
case MCE_ERROR_TYPE_TLB:
mce_err->u.tlb_error_type = table[i].error_subtype;
break;
case MCE_ERROR_TYPE_USER:
mce_err->u.user_error_type = table[i].error_subtype;
break;
case MCE_ERROR_TYPE_RA:
mce_err->u.ra_error_type = table[i].error_subtype;
break;
case MCE_ERROR_TYPE_LINK:
mce_err->u.link_error_type = table[i].error_subtype;
break;
}
mce_err->sync_error = table[i].sync_error;
mce_err->severity = table[i].severity;
mce_err->initiator = table[i].initiator;
if (table[i].nip_valid && !mce_in_guest()) {
*addr = regs->nip;
if (mce_err->sync_error &&
table[i].error_type == MCE_ERROR_TYPE_UE) {
unsigned long pfn;
if (get_paca()->in_mce < MAX_MCE_DEPTH) {
pfn = addr_to_pfn(regs, regs->nip);
if (pfn != ULONG_MAX) {
*phys_addr =
(pfn << PAGE_SHIFT);
}
}
}
}
return handled;
}
mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
mce_err->error_class = MCE_ECLASS_UNKNOWN;
mce_err->severity = MCE_SEV_SEVERE;
mce_err->initiator = MCE_INITIATOR_CPU;
mce_err->sync_error = true;
return 0;
}
static int mce_handle_derror(struct pt_regs *regs,
const struct mce_derror_table table[],
struct mce_error_info *mce_err, uint64_t *addr,
uint64_t *phys_addr)
{
uint64_t dsisr = regs->dsisr;
int handled = 0;
int found = 0;
int i;
*addr = 0;
for (i = 0; table[i].dsisr_value; i++) {
if (!(dsisr & table[i].dsisr_value))
continue;
if (!mce_in_guest()) {
/* attempt to correct the error */
switch (table[i].error_type) {
case MCE_ERROR_TYPE_SLB:
#ifdef CONFIG_PPC_64S_HASH_MMU
if (local_paca->in_mce == 1)
slb_save_contents(local_paca->mce_faulty_slbs);
#endif
if (mce_flush(MCE_FLUSH_SLB))
handled = 1;
break;
case MCE_ERROR_TYPE_ERAT:
if (mce_flush(MCE_FLUSH_ERAT))
handled = 1;
break;
case MCE_ERROR_TYPE_TLB:
if (mce_flush(MCE_FLUSH_TLB))
handled = 1;
break;
}
}
/*
* Attempt to handle multiple conditions, but only return
* one. Ensure uncorrectable errors are first in the table
* to match.
*/
if (found)
continue;
/* now fill in mce_error_info */
mce_err->error_type = table[i].error_type;
mce_err->error_class = table[i].error_class;
switch (table[i].error_type) {
case MCE_ERROR_TYPE_UE:
mce_err->u.ue_error_type = table[i].error_subtype;
break;
case MCE_ERROR_TYPE_SLB:
mce_err->u.slb_error_type = table[i].error_subtype;
break;
case MCE_ERROR_TYPE_ERAT:
mce_err->u.erat_error_type = table[i].error_subtype;
break;
case MCE_ERROR_TYPE_TLB:
mce_err->u.tlb_error_type = table[i].error_subtype;
break;
case MCE_ERROR_TYPE_USER:
mce_err->u.user_error_type = table[i].error_subtype;
break;
case MCE_ERROR_TYPE_RA:
mce_err->u.ra_error_type = table[i].error_subtype;
break;
case MCE_ERROR_TYPE_LINK:
mce_err->u.link_error_type = table[i].error_subtype;
break;
}
mce_err->sync_error = table[i].sync_error;
mce_err->severity = table[i].severity;
mce_err->initiator = table[i].initiator;
if (table[i].dar_valid)
*addr = regs->dar;
else if (mce_err->sync_error && !mce_in_guest() &&
table[i].error_type == MCE_ERROR_TYPE_UE) {
/*
* We do a maximum of 4 nested MCE calls, see
* kernel/exception-64s.h
*/
if (get_paca()->in_mce < MAX_MCE_DEPTH)
mce_find_instr_ea_and_phys(regs, addr,
phys_addr);
}
found = 1;
}
if (found)
return handled;
mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
mce_err->error_class = MCE_ECLASS_UNKNOWN;
mce_err->severity = MCE_SEV_SEVERE;
mce_err->initiator = MCE_INITIATOR_CPU;
mce_err->sync_error = true;
return 0;
}
static long mce_handle_ue_error(struct pt_regs *regs,
struct mce_error_info *mce_err)
{
if (mce_in_guest())
return 0;
mce_common_process_ue(regs, mce_err);
if (mce_err->ignore_event)
return 1;
/*
* On specific SCOM read via MMIO we may get a machine check
* exception with SRR0 pointing inside opal. If that is the
* case OPAL may have recovery address to re-read SCOM data in
* different way and hence we can recover from this MC.
*/
if (ppc_md.mce_check_early_recovery) {
if (ppc_md.mce_check_early_recovery(regs))
return 1;
}
return 0;
}
static long mce_handle_error(struct pt_regs *regs,
unsigned long srr1,
const struct mce_derror_table dtable[],
const struct mce_ierror_table itable[])
{
struct mce_error_info mce_err = { 0 };
uint64_t addr, phys_addr = ULONG_MAX;
long handled;
if (SRR1_MC_LOADSTORE(srr1))
handled = mce_handle_derror(regs, dtable, &mce_err, &addr,
&phys_addr);
else
handled = mce_handle_ierror(regs, srr1, itable, &mce_err, &addr,
&phys_addr);
if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
handled = mce_handle_ue_error(regs, &mce_err);
save_mce_event(regs, handled, &mce_err, regs->nip, addr, phys_addr);
return handled;
}
long __machine_check_early_realmode_p7(struct pt_regs *regs)
{
/* P7 DD1 leaves top bits of DSISR undefined */
regs->dsisr &= 0x0000ffff;
return mce_handle_error(regs, regs->msr,
mce_p7_derror_table, mce_p7_ierror_table);
}
long __machine_check_early_realmode_p8(struct pt_regs *regs)
{
return mce_handle_error(regs, regs->msr,
mce_p8_derror_table, mce_p8_ierror_table);
}
long __machine_check_early_realmode_p9(struct pt_regs *regs)
{
unsigned long srr1 = regs->msr;
/*
* On POWER9 DD2.1 and below, it's possible to get a machine check
* caused by a paste instruction where only DSISR bit 25 is set. This
* will result in the MCE handler seeing an unknown event and the kernel
* crashing. An MCE that occurs like this is spurious, so we don't need
* to do anything in terms of servicing it. If there is something that
* needs to be serviced, the CPU will raise the MCE again with the
* correct DSISR so that it can be serviced properly. So detect this
* case and mark it as handled.
*/
if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
return 1;
/*
* Async machine check due to bad real address from store or foreign
* link time out comes with the load/store bit (PPC bit 42) set in
* SRR1, but the cause comes in SRR1 not DSISR. Clear bit 42 so we're
* directed to the ierror table so it will find the cause (which
* describes it correctly as a store error).
*/
if (SRR1_MC_LOADSTORE(srr1) &&
((srr1 & 0x081c0000) == 0x08140000 ||
(srr1 & 0x081c0000) == 0x08180000)) {
srr1 &= ~PPC_BIT(42);
}
return mce_handle_error(regs, srr1,
mce_p9_derror_table, mce_p9_ierror_table);
}
long __machine_check_early_realmode_p10(struct pt_regs *regs)
{
unsigned long srr1 = regs->msr;
/*
* Async machine check due to bad real address from store comes with
* the load/store bit (PPC bit 42) set in SRR1, but the cause comes in
* SRR1 not DSISR. Clear bit 42 so we're directed to the ierror table
* so it will find the cause (which describes it correctly as a store
* error).
*/
if (SRR1_MC_LOADSTORE(srr1) &&
(srr1 & 0x081c0000) == 0x08140000) {
srr1 &= ~PPC_BIT(42);
}
return mce_handle_error(regs, srr1,
mce_p10_derror_table, mce_p10_ierror_table);
}
| linux-master | arch/powerpc/kernel/mce_power.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Idle daemon for PowerPC. Idle daemon will handle any action
* that needs to be taken when the system becomes idle.
*
* Originally written by Cort Dougan ([email protected]).
* Subsequent 32-bit hacking by Tom Rini, Armin Kuster,
* Paul Mackerras and others.
*
* iSeries supported added by Mike Corrigan <[email protected]>
*
* Additional shared processor, SMT, and firmware support
* Copyright (c) 2003 Dave Engebretsen <[email protected]>
*
* 32-bit and 64-bit versions merged by Paul Mackerras <[email protected]>
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/sysctl.h>
#include <linux/tick.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/runlatch.h>
#include <asm/smp.h>
unsigned long cpuidle_disable = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(cpuidle_disable);
static int __init powersave_off(char *arg)
{
ppc_md.power_save = NULL;
cpuidle_disable = IDLE_POWERSAVE_OFF;
return 1;
}
__setup("powersave=off", powersave_off);
void arch_cpu_idle(void)
{
ppc64_runlatch_off();
if (ppc_md.power_save) {
ppc_md.power_save();
/*
* Some power_save functions return with
* interrupts enabled, some don't.
*/
if (!irqs_disabled())
raw_local_irq_disable();
} else {
/*
* Go into low thread priority and possibly
* low power mode.
*/
HMT_low();
HMT_very_low();
}
HMT_medium();
ppc64_runlatch_on();
}
int powersave_nap;
#ifdef CONFIG_PPC_970_NAP
void power4_idle(void)
{
if (!cpu_has_feature(CPU_FTR_CAN_NAP))
return;
if (!powersave_nap)
return;
if (!prep_irq_for_idle())
return;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile(PPC_DSSALL " ; sync" ::: "memory");
power4_idle_nap();
/*
* power4_idle_nap returns with interrupts enabled (soft and hard).
* to our caller with interrupts enabled (soft and hard). Our caller
* can cope with either interrupts disabled or enabled upon return.
*/
}
#endif
#ifdef CONFIG_SYSCTL
/*
* Register the sysctl to set/clear powersave_nap.
*/
static struct ctl_table powersave_nap_ctl_table[] = {
{
.procname = "powersave-nap",
.data = &powersave_nap,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{}
};
static int __init
register_powersave_nap_sysctl(void)
{
register_sysctl("kernel", powersave_nap_ctl_table);
return 0;
}
__initcall(register_powersave_nap_sysctl);
#endif
| linux-master | arch/powerpc/kernel/idle.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Kernel Probes (KProbes)
*
* Copyright (C) IBM Corporation, 2002, 2004
*
* 2002-Oct Created by Vamsi Krishna S <[email protected]> Kernel
* Probes initial implementation ( includes contributions from
* Rusty Russell).
* 2004-July Suparna Bhattacharya <[email protected]> added jumper probes
* interface to access function arguments.
* 2004-Nov Ananth N Mavinakayanahalli <[email protected]> kprobes port
* for PPC64
*/
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/preempt.h>
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
#include <linux/moduleloader.h>
#include <linux/set_memory.h>
#include <asm/code-patching.h>
#include <asm/cacheflush.h>
#include <asm/sstep.h>
#include <asm/sections.h>
#include <asm/inst.h>
#include <linux/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
bool arch_within_kprobe_blacklist(unsigned long addr)
{
return (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end) ||
(addr >= (unsigned long)_stext &&
addr < (unsigned long)__head_end);
}
kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
{
kprobe_opcode_t *addr = NULL;
#ifdef CONFIG_PPC64_ELF_ABI_V2
/* PPC64 ABIv2 needs local entry point */
addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
if (addr && !offset) {
#ifdef CONFIG_KPROBES_ON_FTRACE
unsigned long faddr;
/*
* Per livepatch.h, ftrace location is always within the first
* 16 bytes of a function on powerpc with -mprofile-kernel.
*/
faddr = ftrace_location_range((unsigned long)addr,
(unsigned long)addr + 16);
if (faddr)
addr = (kprobe_opcode_t *)faddr;
else
#endif
addr = (kprobe_opcode_t *)ppc_function_entry(addr);
}
#elif defined(CONFIG_PPC64_ELF_ABI_V1)
/*
* 64bit powerpc ABIv1 uses function descriptors:
* - Check for the dot variant of the symbol first.
* - If that fails, try looking up the symbol provided.
*
* This ensures we always get to the actual symbol and not
* the descriptor.
*
* Also handle <module:symbol> format.
*/
char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
bool dot_appended = false;
const char *c;
ssize_t ret = 0;
int len = 0;
if ((c = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
c++;
len = c - name;
memcpy(dot_name, name, len);
} else
c = name;
if (*c != '\0' && *c != '.') {
dot_name[len++] = '.';
dot_appended = true;
}
ret = strscpy(dot_name + len, c, KSYM_NAME_LEN);
if (ret > 0)
addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
/* Fallback to the original non-dot symbol lookup */
if (!addr && dot_appended)
addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
#else
addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
#endif
return addr;
}
static bool arch_kprobe_on_func_entry(unsigned long offset)
{
#ifdef CONFIG_PPC64_ELF_ABI_V2
#ifdef CONFIG_KPROBES_ON_FTRACE
return offset <= 16;
#else
return offset <= 8;
#endif
#else
return !offset;
#endif
}
/* XXX try and fold the magic of kprobe_lookup_name() in this */
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
bool *on_func_entry)
{
*on_func_entry = arch_kprobe_on_func_entry(offset);
return (kprobe_opcode_t *)(addr + offset);
}
void *alloc_insn_page(void)
{
void *page;
page = module_alloc(PAGE_SIZE);
if (!page)
return NULL;
if (strict_module_rwx_enabled())
set_memory_rox((unsigned long)page, 1);
return page;
}
int arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
struct kprobe *prev;
ppc_inst_t insn = ppc_inst_read(p->addr);
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
ret = -EINVAL;
} else if (!can_single_step(ppc_inst_val(insn))) {
printk("Cannot register a kprobe on instructions that can't be single stepped\n");
ret = -EINVAL;
} else if ((unsigned long)p->addr & ~PAGE_MASK &&
ppc_inst_prefixed(ppc_inst_read(p->addr - 1))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
prev = get_kprobe(p->addr - 1);
/*
* When prev is a ftrace-based kprobe, we don't have an insn, and it
* doesn't probe for prefixed instruction.
*/
if (prev && !kprobe_ftrace(prev) &&
ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
/* insn must be on a special executable page on ppc64. This is
* not explicitly required on ppc32 (right now), but it doesn't hurt */
if (!ret) {
p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn)
ret = -ENOMEM;
}
if (!ret) {
patch_instruction(p->ainsn.insn, insn);
p->opcode = ppc_inst_val(insn);
}
p->ainsn.boostable = 0;
return ret;
}
NOKPROBE_SYMBOL(arch_prepare_kprobe);
void arch_arm_kprobe(struct kprobe *p)
{
WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)));
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
void arch_disarm_kprobe(struct kprobe *p)
{
WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(p->opcode)));
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
void arch_remove_kprobe(struct kprobe *p)
{
if (p->ainsn.insn) {
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
}
}
NOKPROBE_SYMBOL(arch_remove_kprobe);
static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{
enable_single_step(regs);
/*
* On powerpc we should single step on the original
* instruction even if the probed insn is a trap
* variant as values in regs could play a part in
* if the trap is taken or not
*/
regs_set_return_ip(regs, (unsigned long)p->ainsn.insn);
}
static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
}
static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
}
static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_msr = regs->msr;
}
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->link;
ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->link = (unsigned long)__kretprobe_trampoline;
}
NOKPROBE_SYMBOL(arch_prepare_kretprobe);
static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
{
int ret;
ppc_inst_t insn = ppc_inst_read(p->ainsn.insn);
/* regs->nip is also adjusted if emulate_step returns 1 */
ret = emulate_step(regs, insn);
if (ret > 0) {
/*
* Once this instruction has been boosted
* successfully, set the boostable flag
*/
if (unlikely(p->ainsn.boostable == 0))
p->ainsn.boostable = 1;
} else if (ret < 0) {
/*
* We don't allow kprobes on mtmsr(d)/rfi(d), etc.
* So, we should never get here... but, its still
* good to catch them, just in case...
*/
printk("Can't step on instruction %08lx\n", ppc_inst_as_ulong(insn));
BUG();
} else {
/*
* If we haven't previously emulated this instruction, then it
* can't be boosted. Note it down so we don't try to do so again.
*
* If, however, we had emulated this instruction in the past,
* then this is just an error with the current run (for
* instance, exceptions due to a load/store). We return 0 so
* that this is now single-stepped, but continue to try
* emulating it in subsequent probe hits.
*/
if (unlikely(p->ainsn.boostable != 1))
p->ainsn.boostable = -1;
}
return ret;
}
NOKPROBE_SYMBOL(try_to_emulate);
int kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
unsigned int *addr = (unsigned int *)regs->nip;
struct kprobe_ctlblk *kcb;
if (user_mode(regs))
return 0;
if (!IS_ENABLED(CONFIG_BOOKE) &&
(!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
return 0;
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
*/
preempt_disable();
kcb = get_kprobe_ctlblk();
p = get_kprobe(addr);
if (!p) {
unsigned int instr;
if (get_kernel_nofault(instr, addr))
goto no_kprobe;
if (instr != BREAKPOINT_INSTRUCTION) {
/*
* PowerPC has multiple variants of the "trap"
* instruction. If the current instruction is a
* trap variant, it could belong to someone else
*/
if (is_trap(instr))
goto no_kprobe;
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
*/
ret = 1;
}
/* Not one of ours: let kernel handle it */
goto no_kprobe;
}
/* Check we're not actually recursing */
if (kprobe_running()) {
kprobe_opcode_t insn = *p->ainsn.insn;
if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) {
/* Turn off 'trace' bits */
regs_set_return_msr(regs,
(regs->msr & ~MSR_SINGLESTEP) |
kcb->kprobe_saved_msr);
goto no_kprobe;
}
/*
* We have reentered the kprobe_handler(), since another probe
* was hit while within the handler. We here save the original
* kprobes variables and just single step on the instruction of
* the new probe without calling any user handlers.
*/
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
kcb->kprobe_status = KPROBE_REENTER;
if (p->ainsn.boostable >= 0) {
ret = try_to_emulate(p, regs);
if (ret > 0) {
restore_previous_kprobe(kcb);
preempt_enable();
return 1;
}
}
prepare_singlestep(p, regs);
return 1;
}
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
set_current_kprobe(p, regs, kcb);
if (p->pre_handler && p->pre_handler(p, regs)) {
/* handler changed execution path, so skip ss setup */
reset_current_kprobe();
preempt_enable();
return 1;
}
if (p->ainsn.boostable >= 0) {
ret = try_to_emulate(p, regs);
if (ret > 0) {
if (p->post_handler)
p->post_handler(p, regs, 0);
kcb->kprobe_status = KPROBE_HIT_SSDONE;
reset_current_kprobe();
preempt_enable();
return 1;
}
}
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
preempt_enable();
return ret;
}
NOKPROBE_SYMBOL(kprobe_handler);
/*
* Function return probe trampoline:
* - init_kprobes() establishes a probepoint here
* - When the probed function returns, this probe
* causes the handlers to fire
*/
asm(".global __kretprobe_trampoline\n"
".type __kretprobe_trampoline, @function\n"
"__kretprobe_trampoline:\n"
"nop\n"
"blr\n"
".size __kretprobe_trampoline, .-__kretprobe_trampoline\n");
/*
* Called when the probe at kretprobe trampoline is hit
*/
static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
unsigned long orig_ret_address;
orig_ret_address = __kretprobe_trampoline_handler(regs, NULL);
/*
* We get here through one of two paths:
* 1. by taking a trap -> kprobe_handler() -> here
* 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
*
* When going back through (1), we need regs->nip to be setup properly
* as it is used to determine the return address from the trap.
* For (2), since nip is not honoured with optprobes, we instead setup
* the link register properly so that the subsequent 'blr' in
* __kretprobe_trampoline jumps back to the right instruction.
*
* For nip, we should set the address to the previous instruction since
* we end up emulating it in kprobe_handler(), which increments the nip
* again.
*/
regs_set_return_ip(regs, orig_ret_address - 4);
regs->link = orig_ret_address;
return 0;
}
NOKPROBE_SYMBOL(trampoline_probe_handler);
/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "breakpoint"
* instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn.
*/
int kprobe_post_handler(struct pt_regs *regs)
{
int len;
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (!cur || user_mode(regs))
return 0;
len = ppc_inst_len(ppc_inst_read(cur->ainsn.insn));
/* make sure we got here for instruction we have a kprobe on */
if (((unsigned long)cur->ainsn.insn + len) != regs->nip)
return 0;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
/* Adjust nip to after the single-stepped instruction */
regs_set_return_ip(regs, (unsigned long)cur->addr + len);
regs_set_return_msr(regs, regs->msr | kcb->kprobe_saved_msr);
/*Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
preempt_enable();
/*
* if somebody else is singlestepping across a probe point, msr
* will have DE/SE set, in which case, continue the remaining processing
* of do_debug, as if this is not a probe hit.
*/
if (regs->msr & MSR_SINGLESTEP)
return 0;
return 1;
}
NOKPROBE_SYMBOL(kprobe_post_handler);
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
const struct exception_table_entry *entry;
switch(kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe and the nip points back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs_set_return_ip(regs, (unsigned long)cur->addr);
/* Turn off 'trace' bits */
regs_set_return_msr(regs,
(regs->msr & ~MSR_SINGLESTEP) |
kcb->kprobe_saved_msr);
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
preempt_enable();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if ((entry = search_exception_tables(regs->nip)) != NULL) {
regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
*/
break;
default:
break;
}
return 0;
}
NOKPROBE_SYMBOL(kprobe_fault_handler);
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &__kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{
return register_kprobe(&trampoline_p);
}
int arch_trampoline_kprobe(struct kprobe *p)
{
if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
return 1;
return 0;
}
NOKPROBE_SYMBOL(arch_trampoline_kprobe);
| linux-master | arch/powerpc/kernel/kprobes.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/init.h>
struct dentry *arch_debugfs_dir;
EXPORT_SYMBOL(arch_debugfs_dir);
static int __init arch_kdebugfs_init(void)
{
arch_debugfs_dir = debugfs_create_dir("powerpc", NULL);
return 0;
}
arch_initcall(arch_kdebugfs_init);
| linux-master | arch/powerpc/kernel/kdebugfs.c |
// SPDX-License-Identifier: GPL-2.0+
//
// Security related flags and so on.
//
// Copyright 2018, Michael Ellerman, IBM Corporation.
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/memblock.h>
#include <linux/nospec.h>
#include <linux/prctl.h>
#include <linux/seq_buf.h>
#include <linux/debugfs.h>
#include <asm/asm-prototypes.h>
#include <asm/code-patching.h>
#include <asm/security_features.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/inst.h>
#include "setup.h"
u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
enum branch_cache_flush_type {
BRANCH_CACHE_FLUSH_NONE = 0x1,
BRANCH_CACHE_FLUSH_SW = 0x2,
BRANCH_CACHE_FLUSH_HW = 0x4,
};
static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
bool barrier_nospec_enabled;
static bool no_nospec;
static bool btb_flush_enabled;
#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
static bool no_spectrev2;
#endif
static void enable_barrier_nospec(bool enable)
{
barrier_nospec_enabled = enable;
do_barrier_nospec_fixups(enable);
}
void __init setup_barrier_nospec(void)
{
bool enable;
/*
* It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
* But there's a good reason not to. The two flags we check below are
* both are enabled by default in the kernel, so if the hcall is not
* functional they will be enabled.
* On a system where the host firmware has been updated (so the ori
* functions as a barrier), but on which the hypervisor (KVM/Qemu) has
* not been updated, we would like to enable the barrier. Dropping the
* check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
* we potentially enable the barrier on systems where the host firmware
* is not updated, but that's harmless as it's a no-op.
*/
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
if (!no_nospec && !cpu_mitigations_off())
enable_barrier_nospec(enable);
}
static int __init handle_nospectre_v1(char *p)
{
no_nospec = true;
return 0;
}
early_param("nospectre_v1", handle_nospectre_v1);
#ifdef CONFIG_DEBUG_FS
static int barrier_nospec_set(void *data, u64 val)
{
switch (val) {
case 0:
case 1:
break;
default:
return -EINVAL;
}
if (!!val == !!barrier_nospec_enabled)
return 0;
enable_barrier_nospec(!!val);
return 0;
}
static int barrier_nospec_get(void *data, u64 *val)
{
*val = barrier_nospec_enabled ? 1 : 0;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get,
barrier_nospec_set, "%llu\n");
static __init int barrier_nospec_debugfs_init(void)
{
debugfs_create_file_unsafe("barrier_nospec", 0600,
arch_debugfs_dir, NULL,
&fops_barrier_nospec);
return 0;
}
device_initcall(barrier_nospec_debugfs_init);
static __init int security_feature_debugfs_init(void)
{
debugfs_create_x64("security_features", 0400, arch_debugfs_dir,
&powerpc_security_features);
return 0;
}
device_initcall(security_feature_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
static int __init handle_nospectre_v2(char *p)
{
no_spectrev2 = true;
return 0;
}
early_param("nospectre_v2", handle_nospectre_v2);
#endif /* CONFIG_PPC_E500 || CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_E500
void __init setup_spectre_v2(void)
{
if (no_spectrev2 || cpu_mitigations_off())
do_btb_flush_fixups();
else
btb_flush_enabled = true;
}
#endif /* CONFIG_PPC_E500 */
#ifdef CONFIG_PPC_BOOK3S_64
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
{
bool thread_priv;
thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
if (rfi_flush) {
struct seq_buf s;
seq_buf_init(&s, buf, PAGE_SIZE - 1);
seq_buf_printf(&s, "Mitigation: RFI Flush");
if (thread_priv)
seq_buf_printf(&s, ", L1D private per thread");
seq_buf_printf(&s, "\n");
return s.len;
}
if (thread_priv)
return sprintf(buf, "Vulnerable: L1D private per thread\n");
if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
return sprintf(buf, "Not affected\n");
return sprintf(buf, "Vulnerable\n");
}
ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_meltdown(dev, attr, buf);
}
#endif
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
{
struct seq_buf s;
seq_buf_init(&s, buf, PAGE_SIZE - 1);
if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
if (barrier_nospec_enabled)
seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
else
seq_buf_printf(&s, "Vulnerable");
if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
seq_buf_printf(&s, ", ori31 speculation barrier enabled");
seq_buf_printf(&s, "\n");
} else
seq_buf_printf(&s, "Not affected\n");
return s.len;
}
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
{
struct seq_buf s;
bool bcs, ccd;
seq_buf_init(&s, buf, PAGE_SIZE - 1);
bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
if (bcs || ccd) {
seq_buf_printf(&s, "Mitigation: ");
if (bcs)
seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
if (bcs && ccd)
seq_buf_printf(&s, ", ");
if (ccd)
seq_buf_printf(&s, "Indirect branch cache disabled");
} else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
seq_buf_printf(&s, "Mitigation: Software count cache flush");
if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW)
seq_buf_printf(&s, " (hardware accelerated)");
} else if (btb_flush_enabled) {
seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
} else {
seq_buf_printf(&s, "Vulnerable");
}
if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
seq_buf_printf(&s, ", Software link stack flush");
if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW)
seq_buf_printf(&s, " (hardware accelerated)");
}
seq_buf_printf(&s, "\n");
return s.len;
}
#ifdef CONFIG_PPC_BOOK3S_64
/*
* Store-forwarding barrier support.
*/
static enum stf_barrier_type stf_enabled_flush_types;
static bool no_stf_barrier;
static bool stf_barrier;
static int __init handle_no_stf_barrier(char *p)
{
pr_info("stf-barrier: disabled on command line.");
no_stf_barrier = true;
return 0;
}
early_param("no_stf_barrier", handle_no_stf_barrier);
enum stf_barrier_type stf_barrier_type_get(void)
{
return stf_enabled_flush_types;
}
/* This is the generic flag used by other architectures */
static int __init handle_ssbd(char *p)
{
if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
/* Until firmware tells us, we have the barrier with auto */
return 0;
} else if (strncmp(p, "off", 3) == 0) {
handle_no_stf_barrier(NULL);
return 0;
} else
return 1;
return 0;
}
early_param("spec_store_bypass_disable", handle_ssbd);
/* This is the generic flag used by other architectures */
static int __init handle_no_ssbd(char *p)
{
handle_no_stf_barrier(NULL);
return 0;
}
early_param("nospec_store_bypass_disable", handle_no_ssbd);
static void stf_barrier_enable(bool enable)
{
if (enable)
do_stf_barrier_fixups(stf_enabled_flush_types);
else
do_stf_barrier_fixups(STF_BARRIER_NONE);
stf_barrier = enable;
}
void setup_stf_barrier(void)
{
enum stf_barrier_type type;
bool enable;
/* Default to fallback in case fw-features are not available */
if (cpu_has_feature(CPU_FTR_ARCH_300))
type = STF_BARRIER_EIEIO;
else if (cpu_has_feature(CPU_FTR_ARCH_207S))
type = STF_BARRIER_SYNC_ORI;
else if (cpu_has_feature(CPU_FTR_ARCH_206))
type = STF_BARRIER_FALLBACK;
else
type = STF_BARRIER_NONE;
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_STF_BARRIER);
if (type == STF_BARRIER_FALLBACK) {
pr_info("stf-barrier: fallback barrier available\n");
} else if (type == STF_BARRIER_SYNC_ORI) {
pr_info("stf-barrier: hwsync barrier available\n");
} else if (type == STF_BARRIER_EIEIO) {
pr_info("stf-barrier: eieio barrier available\n");
}
stf_enabled_flush_types = type;
if (!no_stf_barrier && !cpu_mitigations_off())
stf_barrier_enable(enable);
}
ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
{
if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
const char *type;
switch (stf_enabled_flush_types) {
case STF_BARRIER_EIEIO:
type = "eieio";
break;
case STF_BARRIER_SYNC_ORI:
type = "hwsync";
break;
case STF_BARRIER_FALLBACK:
type = "fallback";
break;
default:
type = "unknown";
}
return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
}
if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
return sprintf(buf, "Not affected\n");
return sprintf(buf, "Vulnerable\n");
}
static int ssb_prctl_get(struct task_struct *task)
{
/*
* The STF_BARRIER feature is on by default, so if it's off that means
* firmware has explicitly said the CPU is not vulnerable via either
* the hypercall or device tree.
*/
if (!security_ftr_enabled(SEC_FTR_STF_BARRIER))
return PR_SPEC_NOT_AFFECTED;
/*
* If the system's CPU has no known barrier (see setup_stf_barrier())
* then assume that the CPU is not vulnerable.
*/
if (stf_enabled_flush_types == STF_BARRIER_NONE)
return PR_SPEC_NOT_AFFECTED;
/*
* Otherwise the CPU is vulnerable. The barrier is not a global or
* per-process mitigation, so the only value that can be reported here
* is PR_SPEC_ENABLE, which appears as "vulnerable" in /proc.
*/
return PR_SPEC_ENABLE;
}
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
{
switch (which) {
case PR_SPEC_STORE_BYPASS:
return ssb_prctl_get(task);
default:
return -ENODEV;
}
}
#ifdef CONFIG_DEBUG_FS
static int stf_barrier_set(void *data, u64 val)
{
bool enable;
if (val == 1)
enable = true;
else if (val == 0)
enable = false;
else
return -EINVAL;
/* Only do anything if we're changing state */
if (enable != stf_barrier)
stf_barrier_enable(enable);
return 0;
}
static int stf_barrier_get(void *data, u64 *val)
{
*val = stf_barrier ? 1 : 0;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set,
"%llu\n");
static __init int stf_barrier_debugfs_init(void)
{
debugfs_create_file_unsafe("stf_barrier", 0600, arch_debugfs_dir,
NULL, &fops_stf_barrier);
return 0;
}
device_initcall(stf_barrier_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
static void update_branch_cache_flush(void)
{
u32 *site, __maybe_unused *site2;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
site = &patch__call_kvm_flush_link_stack;
site2 = &patch__call_kvm_flush_link_stack_p9;
// This controls the branch from guest_exit_cont to kvm_flush_link_stack
if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP()));
} else {
// Could use HW flush, but that could also flush count cache
patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
}
#endif
// Patch out the bcctr first, then nop the rest
site = &patch__call_flush_branch_caches3;
patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
site = &patch__call_flush_branch_caches2;
patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
site = &patch__call_flush_branch_caches1;
patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
// This controls the branch from _switch to flush_branch_caches
if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
// Nothing to be done
} else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW &&
link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) {
// Patch in the bcctr last
site = &patch__call_flush_branch_caches1;
patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff
site = &patch__call_flush_branch_caches2;
patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9
site = &patch__call_flush_branch_caches3;
patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH));
} else {
patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK);
// If we just need to flush the link stack, early return
if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
patch_instruction_site(&patch__flush_link_stack_return,
ppc_inst(PPC_RAW_BLR()));
// If we have flush instruction, early return
} else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) {
patch_instruction_site(&patch__flush_count_cache_return,
ppc_inst(PPC_RAW_BLR()));
}
}
}
static void toggle_branch_cache_flush(bool enable)
{
if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE)
count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
pr_info("count-cache-flush: flush disabled.\n");
} else {
if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
count_cache_flush_type = BRANCH_CACHE_FLUSH_HW;
pr_info("count-cache-flush: hardware flush enabled.\n");
} else {
count_cache_flush_type = BRANCH_CACHE_FLUSH_SW;
pr_info("count-cache-flush: software flush enabled.\n");
}
}
if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) {
if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
pr_info("link-stack-flush: flush disabled.\n");
} else {
if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) {
link_stack_flush_type = BRANCH_CACHE_FLUSH_HW;
pr_info("link-stack-flush: hardware flush enabled.\n");
} else {
link_stack_flush_type = BRANCH_CACHE_FLUSH_SW;
pr_info("link-stack-flush: software flush enabled.\n");
}
}
update_branch_cache_flush();
}
void setup_count_cache_flush(void)
{
bool enable = true;
if (no_spectrev2 || cpu_mitigations_off()) {
if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
enable = false;
}
/*
* There's no firmware feature flag/hypervisor bit to tell us we need to
* flush the link stack on context switch. So we set it here if we see
* either of the Spectre v2 mitigations that aim to protect userspace.
*/
if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
toggle_branch_cache_flush(enable);
}
static enum l1d_flush_type enabled_flush_types;
static void *l1d_flush_fallback_area;
static bool no_rfi_flush;
static bool no_entry_flush;
static bool no_uaccess_flush;
bool rfi_flush;
static bool entry_flush;
static bool uaccess_flush;
DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
EXPORT_SYMBOL(uaccess_flush_key);
static int __init handle_no_rfi_flush(char *p)
{
pr_info("rfi-flush: disabled on command line.");
no_rfi_flush = true;
return 0;
}
early_param("no_rfi_flush", handle_no_rfi_flush);
static int __init handle_no_entry_flush(char *p)
{
pr_info("entry-flush: disabled on command line.");
no_entry_flush = true;
return 0;
}
early_param("no_entry_flush", handle_no_entry_flush);
static int __init handle_no_uaccess_flush(char *p)
{
pr_info("uaccess-flush: disabled on command line.");
no_uaccess_flush = true;
return 0;
}
early_param("no_uaccess_flush", handle_no_uaccess_flush);
/*
* The RFI flush is not KPTI, but because users will see doco that says to use
* nopti we hijack that option here to also disable the RFI flush.
*/
static int __init handle_no_pti(char *p)
{
pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
handle_no_rfi_flush(NULL);
return 0;
}
early_param("nopti", handle_no_pti);
static void do_nothing(void *unused)
{
/*
* We don't need to do the flush explicitly, just enter+exit kernel is
* sufficient, the RFI exit handlers will do the right thing.
*/
}
void rfi_flush_enable(bool enable)
{
if (enable) {
do_rfi_flush_fixups(enabled_flush_types);
on_each_cpu(do_nothing, NULL, 1);
} else
do_rfi_flush_fixups(L1D_FLUSH_NONE);
rfi_flush = enable;
}
static void entry_flush_enable(bool enable)
{
if (enable) {
do_entry_flush_fixups(enabled_flush_types);
on_each_cpu(do_nothing, NULL, 1);
} else {
do_entry_flush_fixups(L1D_FLUSH_NONE);
}
entry_flush = enable;
}
static void uaccess_flush_enable(bool enable)
{
if (enable) {
do_uaccess_flush_fixups(enabled_flush_types);
static_branch_enable(&uaccess_flush_key);
on_each_cpu(do_nothing, NULL, 1);
} else {
static_branch_disable(&uaccess_flush_key);
do_uaccess_flush_fixups(L1D_FLUSH_NONE);
}
uaccess_flush = enable;
}
static void __ref init_fallback_flush(void)
{
u64 l1d_size, limit;
int cpu;
/* Only allocate the fallback flush area once (at boot time). */
if (l1d_flush_fallback_area)
return;
l1d_size = ppc64_caches.l1d.size;
/*
* If there is no d-cache-size property in the device tree, l1d_size
* could be zero. That leads to the loop in the asm wrapping around to
* 2^64-1, and then walking off the end of the fallback area and
* eventually causing a page fault which is fatal. Just default to
* something vaguely sane.
*/
if (!l1d_size)
l1d_size = (64 * 1024);
limit = min(ppc64_bolted_size(), ppc64_rma_size);
/*
* Align to L1d size, and size it at 2x L1d size, to catch possible
* hardware prefetch runoff. We don't have a recipe for load patterns to
* reliably avoid the prefetcher.
*/
l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
l1d_size, MEMBLOCK_LOW_LIMIT,
limit, NUMA_NO_NODE);
if (!l1d_flush_fallback_area)
panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
__func__, l1d_size * 2, l1d_size, &limit);
for_each_possible_cpu(cpu) {
struct paca_struct *paca = paca_ptrs[cpu];
paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
paca->l1d_flush_size = l1d_size;
}
}
void setup_rfi_flush(enum l1d_flush_type types, bool enable)
{
if (types & L1D_FLUSH_FALLBACK) {
pr_info("rfi-flush: fallback displacement flush available\n");
init_fallback_flush();
}
if (types & L1D_FLUSH_ORI)
pr_info("rfi-flush: ori type flush available\n");
if (types & L1D_FLUSH_MTTRIG)
pr_info("rfi-flush: mttrig type flush available\n");
enabled_flush_types = types;
if (!cpu_mitigations_off() && !no_rfi_flush)
rfi_flush_enable(enable);
}
void setup_entry_flush(bool enable)
{
if (cpu_mitigations_off())
return;
if (!no_entry_flush)
entry_flush_enable(enable);
}
void setup_uaccess_flush(bool enable)
{
if (cpu_mitigations_off())
return;
if (!no_uaccess_flush)
uaccess_flush_enable(enable);
}
#ifdef CONFIG_DEBUG_FS
static int count_cache_flush_set(void *data, u64 val)
{
bool enable;
if (val == 1)
enable = true;
else if (val == 0)
enable = false;
else
return -EINVAL;
toggle_branch_cache_flush(enable);
return 0;
}
static int count_cache_flush_get(void *data, u64 *val)
{
if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE)
*val = 0;
else
*val = 1;
return 0;
}
static int link_stack_flush_get(void *data, u64 *val)
{
if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE)
*val = 0;
else
*val = 1;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
count_cache_flush_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_link_stack_flush, link_stack_flush_get,
count_cache_flush_set, "%llu\n");
static __init int count_cache_flush_debugfs_init(void)
{
debugfs_create_file_unsafe("count_cache_flush", 0600,
arch_debugfs_dir, NULL,
&fops_count_cache_flush);
debugfs_create_file_unsafe("link_stack_flush", 0600,
arch_debugfs_dir, NULL,
&fops_link_stack_flush);
return 0;
}
device_initcall(count_cache_flush_debugfs_init);
static int rfi_flush_set(void *data, u64 val)
{
bool enable;
if (val == 1)
enable = true;
else if (val == 0)
enable = false;
else
return -EINVAL;
/* Only do anything if we're changing state */
if (enable != rfi_flush)
rfi_flush_enable(enable);
return 0;
}
static int rfi_flush_get(void *data, u64 *val)
{
*val = rfi_flush ? 1 : 0;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
static int entry_flush_set(void *data, u64 val)
{
bool enable;
if (val == 1)
enable = true;
else if (val == 0)
enable = false;
else
return -EINVAL;
/* Only do anything if we're changing state */
if (enable != entry_flush)
entry_flush_enable(enable);
return 0;
}
static int entry_flush_get(void *data, u64 *val)
{
*val = entry_flush ? 1 : 0;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
static int uaccess_flush_set(void *data, u64 val)
{
bool enable;
if (val == 1)
enable = true;
else if (val == 0)
enable = false;
else
return -EINVAL;
/* Only do anything if we're changing state */
if (enable != uaccess_flush)
uaccess_flush_enable(enable);
return 0;
}
static int uaccess_flush_get(void *data, u64 *val)
{
*val = uaccess_flush ? 1 : 0;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
static __init int rfi_flush_debugfs_init(void)
{
debugfs_create_file("rfi_flush", 0600, arch_debugfs_dir, NULL, &fops_rfi_flush);
debugfs_create_file("entry_flush", 0600, arch_debugfs_dir, NULL, &fops_entry_flush);
debugfs_create_file("uaccess_flush", 0600, arch_debugfs_dir, NULL, &fops_uaccess_flush);
return 0;
}
device_initcall(rfi_flush_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_PPC_BOOK3S_64 */
| linux-master | arch/powerpc/kernel/security.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Routines to emulate some Altivec/VMX instructions, specifically
* those that can trap when given denormalized operands in Java mode.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/switch_to.h>
#include <linux/uaccess.h>
#include <asm/inst.h>
/* Functions in vector.S */
extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b);
extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b);
extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
extern void vrefp(vector128 *dst, vector128 *src);
extern void vrsqrtefp(vector128 *dst, vector128 *src);
extern void vexptep(vector128 *dst, vector128 *src);
static unsigned int exp2s[8] = {
0x800000,
0x8b95c2,
0x9837f0,
0xa5fed7,
0xb504f3,
0xc5672a,
0xd744fd,
0xeac0c7
};
/*
* Computes an estimate of 2^x. The `s' argument is the 32-bit
* single-precision floating-point representation of x.
*/
static unsigned int eexp2(unsigned int s)
{
int exp, pwr;
unsigned int mant, frac;
/* extract exponent field from input */
exp = ((s >> 23) & 0xff) - 127;
if (exp > 7) {
/* check for NaN input */
if (exp == 128 && (s & 0x7fffff) != 0)
return s | 0x400000; /* return QNaN */
/* 2^-big = 0, 2^+big = +Inf */
return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */
}
if (exp < -23)
return 0x3f800000; /* 1.0 */
/* convert to fixed point integer in 9.23 representation */
pwr = (s & 0x7fffff) | 0x800000;
if (exp > 0)
pwr <<= exp;
else
pwr >>= -exp;
if (s & 0x80000000)
pwr = -pwr;
/* extract integer part, which becomes exponent part of result */
exp = (pwr >> 23) + 126;
if (exp >= 254)
return 0x7f800000;
if (exp < -23)
return 0;
/* table lookup on top 3 bits of fraction to get mantissa */
mant = exp2s[(pwr >> 20) & 7];
/* linear interpolation using remaining 20 bits of fraction */
asm("mulhwu %0,%1,%2" : "=r" (frac)
: "r" (pwr << 12), "r" (0x172b83ff));
asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant));
mant += frac;
if (exp >= 0)
return mant + (exp << 23);
/* denormalized result */
exp = -exp;
mant += 1 << (exp - 1);
return mant >> exp;
}
/*
* Computes an estimate of log_2(x). The `s' argument is the 32-bit
* single-precision floating-point representation of x.
*/
static unsigned int elog2(unsigned int s)
{
int exp, mant, lz, frac;
exp = s & 0x7f800000;
mant = s & 0x7fffff;
if (exp == 0x7f800000) { /* Inf or NaN */
if (mant != 0)
s |= 0x400000; /* turn NaN into QNaN */
return s;
}
if ((exp | mant) == 0) /* +0 or -0 */
return 0xff800000; /* return -Inf */
if (exp == 0) {
/* denormalized */
asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant));
mant <<= lz - 8;
exp = (-118 - lz) << 23;
} else {
mant |= 0x800000;
exp -= 127 << 23;
}
if (mant >= 0xb504f3) { /* 2^0.5 * 2^23 */
exp |= 0x400000; /* 0.5 * 2^23 */
asm("mulhwu %0,%1,%2" : "=r" (mant)
: "r" (mant), "r" (0xb504f334)); /* 2^-0.5 * 2^32 */
}
if (mant >= 0x9837f0) { /* 2^0.25 * 2^23 */
exp |= 0x200000; /* 0.25 * 2^23 */
asm("mulhwu %0,%1,%2" : "=r" (mant)
: "r" (mant), "r" (0xd744fccb)); /* 2^-0.25 * 2^32 */
}
if (mant >= 0x8b95c2) { /* 2^0.125 * 2^23 */
exp |= 0x100000; /* 0.125 * 2^23 */
asm("mulhwu %0,%1,%2" : "=r" (mant)
: "r" (mant), "r" (0xeac0c6e8)); /* 2^-0.125 * 2^32 */
}
if (mant > 0x800000) { /* 1.0 * 2^23 */
/* calculate (mant - 1) * 1.381097463 */
/* 1.381097463 == 0.125 / (2^0.125 - 1) */
asm("mulhwu %0,%1,%2" : "=r" (frac)
: "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a));
exp += frac;
}
s = exp & 0x80000000;
if (exp != 0) {
if (s)
exp = -exp;
asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp));
lz = 8 - lz;
if (lz > 0)
exp >>= lz;
else if (lz < 0)
exp <<= -lz;
s += ((lz + 126) << 23) + exp;
}
return s;
}
#define VSCR_SAT 1
static int ctsxs(unsigned int x, int scale, unsigned int *vscrp)
{
int exp, mant;
exp = (x >> 23) & 0xff;
mant = x & 0x7fffff;
if (exp == 255 && mant != 0)
return 0; /* NaN -> 0 */
exp = exp - 127 + scale;
if (exp < 0)
return 0; /* round towards zero */
if (exp >= 31) {
/* saturate, unless the result would be -2^31 */
if (x + (scale << 23) != 0xcf000000)
*vscrp |= VSCR_SAT;
return (x & 0x80000000)? 0x80000000: 0x7fffffff;
}
mant |= 0x800000;
mant = (mant << 7) >> (30 - exp);
return (x & 0x80000000)? -mant: mant;
}
static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp)
{
int exp;
unsigned int mant;
exp = (x >> 23) & 0xff;
mant = x & 0x7fffff;
if (exp == 255 && mant != 0)
return 0; /* NaN -> 0 */
exp = exp - 127 + scale;
if (exp < 0)
return 0; /* round towards zero */
if (x & 0x80000000) {
/* negative => saturate to 0 */
*vscrp |= VSCR_SAT;
return 0;
}
if (exp >= 32) {
/* saturate */
*vscrp |= VSCR_SAT;
return 0xffffffff;
}
mant |= 0x800000;
mant = (mant << 8) >> (31 - exp);
return mant;
}
/* Round to floating integer, towards 0 */
static unsigned int rfiz(unsigned int x)
{
int exp;
exp = ((x >> 23) & 0xff) - 127;
if (exp == 128 && (x & 0x7fffff) != 0)
return x | 0x400000; /* NaN -> make it a QNaN */
if (exp >= 23)
return x; /* it's an integer already (or Inf) */
if (exp < 0)
return x & 0x80000000; /* |x| < 1.0 rounds to 0 */
return x & ~(0x7fffff >> exp);
}
/* Round to floating integer, towards +/- Inf */
static unsigned int rfii(unsigned int x)
{
int exp, mask;
exp = ((x >> 23) & 0xff) - 127;
if (exp == 128 && (x & 0x7fffff) != 0)
return x | 0x400000; /* NaN -> make it a QNaN */
if (exp >= 23)
return x; /* it's an integer already (or Inf) */
if ((x & 0x7fffffff) == 0)
return x; /* +/-0 -> +/-0 */
if (exp < 0)
/* 0 < |x| < 1.0 rounds to +/- 1.0 */
return (x & 0x80000000) | 0x3f800000;
mask = 0x7fffff >> exp;
/* mantissa overflows into exponent - that's OK,
it can't overflow into the sign bit */
return (x + mask) & ~mask;
}
/* Round to floating integer, to nearest */
static unsigned int rfin(unsigned int x)
{
int exp, half;
exp = ((x >> 23) & 0xff) - 127;
if (exp == 128 && (x & 0x7fffff) != 0)
return x | 0x400000; /* NaN -> make it a QNaN */
if (exp >= 23)
return x; /* it's an integer already (or Inf) */
if (exp < -1)
return x & 0x80000000; /* |x| < 0.5 -> +/-0 */
if (exp == -1)
/* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */
return (x & 0x80000000) | 0x3f800000;
half = 0x400000 >> exp;
/* add 0.5 to the magnitude and chop off the fraction bits */
return (x + half) & ~(0x7fffff >> exp);
}
int emulate_altivec(struct pt_regs *regs)
{
ppc_inst_t instr;
unsigned int i, word;
unsigned int va, vb, vc, vd;
vector128 *vrs;
if (get_user_instr(instr, (void __user *)regs->nip))
return -EFAULT;
word = ppc_inst_val(instr);
if (ppc_inst_primary_opcode(instr) != 4)
return -EINVAL; /* not an altivec instruction */
vd = (word >> 21) & 0x1f;
va = (word >> 16) & 0x1f;
vb = (word >> 11) & 0x1f;
vc = (word >> 6) & 0x1f;
vrs = current->thread.vr_state.vr;
switch (word & 0x3f) {
case 10:
switch (vc) {
case 0: /* vaddfp */
vaddfp(&vrs[vd], &vrs[va], &vrs[vb]);
break;
case 1: /* vsubfp */
vsubfp(&vrs[vd], &vrs[va], &vrs[vb]);
break;
case 4: /* vrefp */
vrefp(&vrs[vd], &vrs[vb]);
break;
case 5: /* vrsqrtefp */
vrsqrtefp(&vrs[vd], &vrs[vb]);
break;
case 6: /* vexptefp */
for (i = 0; i < 4; ++i)
vrs[vd].u[i] = eexp2(vrs[vb].u[i]);
break;
case 7: /* vlogefp */
for (i = 0; i < 4; ++i)
vrs[vd].u[i] = elog2(vrs[vb].u[i]);
break;
case 8: /* vrfin */
for (i = 0; i < 4; ++i)
vrs[vd].u[i] = rfin(vrs[vb].u[i]);
break;
case 9: /* vrfiz */
for (i = 0; i < 4; ++i)
vrs[vd].u[i] = rfiz(vrs[vb].u[i]);
break;
case 10: /* vrfip */
for (i = 0; i < 4; ++i) {
u32 x = vrs[vb].u[i];
x = (x & 0x80000000)? rfiz(x): rfii(x);
vrs[vd].u[i] = x;
}
break;
case 11: /* vrfim */
for (i = 0; i < 4; ++i) {
u32 x = vrs[vb].u[i];
x = (x & 0x80000000)? rfii(x): rfiz(x);
vrs[vd].u[i] = x;
}
break;
case 14: /* vctuxs */
for (i = 0; i < 4; ++i)
vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
¤t->thread.vr_state.vscr.u[3]);
break;
case 15: /* vctsxs */
for (i = 0; i < 4; ++i)
vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
¤t->thread.vr_state.vscr.u[3]);
break;
default:
return -EINVAL;
}
break;
case 46: /* vmaddfp */
vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
break;
case 47: /* vnmsubfp */
vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
break;
default:
return -EINVAL;
}
return 0;
}
| linux-master | arch/powerpc/kernel/vecemu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001 Dave Engebretsen, IBM Corporation
* Copyright (C) 2003 Anton Blanchard <[email protected]>, IBM
*
* RTAS specific routines for PCI.
*
* Based on code from pci.c, chrp_pci.c and pSeries_pci.c
*/
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/pgtable.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
#include <asm/rtas.h>
#include <asm/mpic.h>
#include <asm/ppc-pci.h>
#include <asm/eeh.h>
/* RTAS tokens */
static int read_pci_config;
static int write_pci_config;
static int ibm_read_pci_config;
static int ibm_write_pci_config;
static inline int config_access_valid(struct pci_dn *dn, int where)
{
if (where < 256)
return 1;
if (where < 4096 && dn->pci_ext_config_space)
return 1;
return 0;
}
int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
{
int returnval = -1;
unsigned long buid, addr;
int ret;
if (!pdn)
return PCIBIOS_DEVICE_NOT_FOUND;
if (!config_access_valid(pdn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
#ifdef CONFIG_EEH
if (pdn->edev && pdn->edev->pe &&
(pdn->edev->pe->state & EEH_PE_CFG_BLOCKED))
return PCIBIOS_SET_FAILED;
#endif
addr = rtas_config_addr(pdn->busno, pdn->devfn, where);
buid = pdn->phb->buid;
if (buid) {
ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval,
addr, BUID_HI(buid), BUID_LO(buid), size);
} else {
ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size);
}
*val = returnval;
if (ret)
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
}
static int rtas_pci_read_config(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 *val)
{
struct pci_dn *pdn;
int ret;
*val = 0xFFFFFFFF;
pdn = pci_get_pdn_by_devfn(bus, devfn);
/* Validity of pdn is checked in here */
ret = rtas_read_config(pdn, where, size, val);
if (*val == EEH_IO_ERROR_VALUE(size) &&
eeh_dev_check_failure(pdn_to_eeh_dev(pdn)))
return PCIBIOS_DEVICE_NOT_FOUND;
return ret;
}
int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val)
{
unsigned long buid, addr;
int ret;
if (!pdn)
return PCIBIOS_DEVICE_NOT_FOUND;
if (!config_access_valid(pdn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
#ifdef CONFIG_EEH
if (pdn->edev && pdn->edev->pe &&
(pdn->edev->pe->state & EEH_PE_CFG_BLOCKED))
return PCIBIOS_SET_FAILED;
#endif
addr = rtas_config_addr(pdn->busno, pdn->devfn, where);
buid = pdn->phb->buid;
if (buid) {
ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr,
BUID_HI(buid), BUID_LO(buid), size, (ulong) val);
} else {
ret = rtas_call(write_pci_config, 3, 1, NULL, addr, size, (ulong)val);
}
if (ret)
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
}
static int rtas_pci_write_config(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 val)
{
struct pci_dn *pdn;
pdn = pci_get_pdn_by_devfn(bus, devfn);
/* Validity of pdn is checked in here. */
return rtas_write_config(pdn, where, size, val);
}
static struct pci_ops rtas_pci_ops = {
.read = rtas_pci_read_config,
.write = rtas_pci_write_config,
};
static int is_python(struct device_node *dev)
{
const char *model = of_get_property(dev, "model", NULL);
if (model && strstr(model, "Python"))
return 1;
return 0;
}
static void python_countermeasures(struct device_node *dev)
{
struct resource registers;
void __iomem *chip_regs;
volatile u32 val;
if (of_address_to_resource(dev, 0, ®isters)) {
printk(KERN_ERR "Can't get address for Python workarounds !\n");
return;
}
/* Python's register file is 1 MB in size. */
chip_regs = ioremap(registers.start & ~(0xfffffUL), 0x100000);
/*
* Firmware doesn't always clear this bit which is critical
* for good performance - Anton
*/
#define PRG_CL_RESET_VALID 0x00010000
val = in_be32(chip_regs + 0xf6030);
if (val & PRG_CL_RESET_VALID) {
printk(KERN_INFO "Python workaround: ");
val &= ~PRG_CL_RESET_VALID;
out_be32(chip_regs + 0xf6030, val);
/*
* We must read it back for changes to
* take effect
*/
val = in_be32(chip_regs + 0xf6030);
printk("reg0: %x\n", val);
}
iounmap(chip_regs);
}
void __init init_pci_config_tokens(void)
{
read_pci_config = rtas_function_token(RTAS_FN_READ_PCI_CONFIG);
write_pci_config = rtas_function_token(RTAS_FN_WRITE_PCI_CONFIG);
ibm_read_pci_config = rtas_function_token(RTAS_FN_IBM_READ_PCI_CONFIG);
ibm_write_pci_config = rtas_function_token(RTAS_FN_IBM_WRITE_PCI_CONFIG);
}
unsigned long get_phb_buid(struct device_node *phb)
{
struct resource r;
if (ibm_read_pci_config == -1)
return 0;
if (of_address_to_resource(phb, 0, &r))
return 0;
return r.start;
}
static int phb_set_bus_ranges(struct device_node *dev,
struct pci_controller *phb)
{
const __be32 *bus_range;
unsigned int len;
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
return 1;
}
phb->first_busno = be32_to_cpu(bus_range[0]);
phb->last_busno = be32_to_cpu(bus_range[1]);
return 0;
}
int rtas_setup_phb(struct pci_controller *phb)
{
struct device_node *dev = phb->dn;
if (is_python(dev))
python_countermeasures(dev);
if (phb_set_bus_ranges(dev, phb))
return 1;
phb->ops = &rtas_pci_ops;
phb->buid = get_phb_buid(dev);
return 0;
}
| linux-master | arch/powerpc/kernel/rtas_pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SMP support for ppc.
*
* Written by Cort Dougan ([email protected]) borrowing a great
* deal of code from the sparc and intel versions.
*
* Copyright (C) 1999 Cort Dougan <[email protected]>
*
* PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/sched/mm.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/topology.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/profile.h>
#include <linux/processor.h>
#include <linux/random.h>
#include <linux/stackprotector.h>
#include <linux/pgtable.h>
#include <linux/clockchips.h>
#include <linux/kexec.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include <asm/kvm_ppc.h>
#include <asm/dbell.h>
#include <asm/page.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/cputhreads.h>
#include <asm/cputable.h>
#include <asm/mpic.h>
#include <asm/vdso_datapage.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#endif
#include <asm/vdso.h>
#include <asm/debug.h>
#include <asm/cpu_has_feature.h>
#include <asm/ftrace.h>
#include <asm/kup.h>
#include <asm/fadump.h>
#include <trace/events/ipi.h>
#ifdef DEBUG
#include <asm/udbg.h>
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
#ifdef CONFIG_HOTPLUG_CPU
/* State of each CPU during hotplug phases */
static DEFINE_PER_CPU(int, cpu_state) = { 0 };
#endif
struct task_struct *secondary_current;
bool has_big_cores;
bool coregroup_enabled;
bool thread_group_shares_l2;
bool thread_group_shares_l3;
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
EXPORT_SYMBOL_GPL(has_big_cores);
enum {
#ifdef CONFIG_SCHED_SMT
smt_idx,
#endif
cache_idx,
mc_idx,
die_idx,
};
#define MAX_THREAD_LIST_SIZE 8
#define THREAD_GROUP_SHARE_L1 1
#define THREAD_GROUP_SHARE_L2_L3 2
struct thread_groups {
unsigned int property;
unsigned int nr_groups;
unsigned int threads_per_group;
unsigned int thread_list[MAX_THREAD_LIST_SIZE];
};
/* Maximum number of properties that groups of threads within a core can share */
#define MAX_THREAD_GROUP_PROPERTIES 2
struct thread_groups_list {
unsigned int nr_properties;
struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES];
};
static struct thread_groups_list tgl[NR_CPUS] __initdata;
/*
* On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
* the set its siblings that share the L1-cache.
*/
DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
/*
* On some big-cores system, thread_group_l2_cache_map for each CPU
* corresponds to the set its siblings within the core that share the
* L2-cache.
*/
DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
/*
* On P10, thread_group_l3_cache_map for each CPU is equal to the
* thread_group_l2_cache_map
*/
DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
/* Can't be static due to PowerMac hackery */
volatile unsigned int cpu_callin_map[NR_CPUS];
int smt_enabled_at_boot = 1;
/*
* Returns 1 if the specified cpu should be brought up during boot.
* Used to inhibit booting threads if they've been disabled or
* limited on the command line
*/
int smp_generic_cpu_bootable(unsigned int nr)
{
/* Special case - we inhibit secondary thread startup
* during boot if the user requests it.
*/
if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
return 0;
if (smt_enabled_at_boot
&& cpu_thread_in_core(nr) >= smt_enabled_at_boot)
return 0;
}
return 1;
}
#ifdef CONFIG_PPC64
int smp_generic_kick_cpu(int nr)
{
if (nr < 0 || nr >= nr_cpu_ids)
return -EINVAL;
/*
* The processor is currently spinning, waiting for the
* cpu_start field to become non-zero After we set cpu_start,
* the processor will continue on to secondary_start
*/
if (!paca_ptrs[nr]->cpu_start) {
paca_ptrs[nr]->cpu_start = 1;
smp_mb();
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* Ok it's not there, so it might be soft-unplugged, let's
* try to bring it back
*/
generic_set_cpu_up(nr);
smp_wmb();
smp_send_reschedule(nr);
#endif /* CONFIG_HOTPLUG_CPU */
return 0;
}
#endif /* CONFIG_PPC64 */
static irqreturn_t call_function_action(int irq, void *data)
{
generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
static irqreturn_t reschedule_action(int irq, void *data)
{
scheduler_ipi();
return IRQ_HANDLED;
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
{
timer_broadcast_interrupt();
return IRQ_HANDLED;
}
#endif
#ifdef CONFIG_NMI_IPI
static irqreturn_t nmi_ipi_action(int irq, void *data)
{
smp_handle_nmi_ipi(get_irq_regs());
return IRQ_HANDLED;
}
#endif
static irq_handler_t smp_ipi_action[] = {
[PPC_MSG_CALL_FUNCTION] = call_function_action,
[PPC_MSG_RESCHEDULE] = reschedule_action,
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
[PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
#endif
#ifdef CONFIG_NMI_IPI
[PPC_MSG_NMI_IPI] = nmi_ipi_action,
#endif
};
/*
* The NMI IPI is a fallback and not truly non-maskable. It is simpler
* than going through the call function infrastructure, and strongly
* serialized, so it is more appropriate for debugging.
*/
const char *smp_ipi_name[] = {
[PPC_MSG_CALL_FUNCTION] = "ipi call function",
[PPC_MSG_RESCHEDULE] = "ipi reschedule",
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
[PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
#endif
#ifdef CONFIG_NMI_IPI
[PPC_MSG_NMI_IPI] = "nmi ipi",
#endif
};
/* optional function to request ipi, for controllers with >= 4 ipis */
int smp_request_message_ipi(int virq, int msg)
{
int err;
if (msg < 0 || msg > PPC_MSG_NMI_IPI)
return -EINVAL;
#ifndef CONFIG_NMI_IPI
if (msg == PPC_MSG_NMI_IPI)
return 1;
#endif
err = request_irq(virq, smp_ipi_action[msg],
IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
smp_ipi_name[msg], NULL);
WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
virq, smp_ipi_name[msg], err);
return err;
}
#ifdef CONFIG_PPC_SMP_MUXED_IPI
struct cpu_messages {
long messages; /* current messages */
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
void smp_muxed_ipi_set_message(int cpu, int msg)
{
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
char *message = (char *)&info->messages;
/*
* Order previous accesses before accesses in the IPI handler.
*/
smp_mb();
WRITE_ONCE(message[msg], 1);
}
void smp_muxed_ipi_message_pass(int cpu, int msg)
{
smp_muxed_ipi_set_message(cpu, msg);
/*
* cause_ipi functions are required to include a full barrier
* before doing whatever causes the IPI.
*/
smp_ops->cause_ipi(cpu);
}
#ifdef __BIG_ENDIAN__
#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
#else
#define IPI_MESSAGE(A) (1uL << (8 * (A)))
#endif
irqreturn_t smp_ipi_demux(void)
{
mb(); /* order any irq clear */
return smp_ipi_demux_relaxed();
}
/* sync-free variant. Callers should ensure synchronization */
irqreturn_t smp_ipi_demux_relaxed(void)
{
struct cpu_messages *info;
unsigned long all;
info = this_cpu_ptr(&ipi_message);
do {
all = xchg(&info->messages, 0);
#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
/*
* Must check for PPC_MSG_RM_HOST_ACTION messages
* before PPC_MSG_CALL_FUNCTION messages because when
* a VM is destroyed, we call kick_all_cpus_sync()
* to ensure that any pending PPC_MSG_RM_HOST_ACTION
* messages have completed before we free any VCPUs.
*/
if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
kvmppc_xics_ipi_action();
#endif
if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
generic_smp_call_function_interrupt();
if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
scheduler_ipi();
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
timer_broadcast_interrupt();
#endif
#ifdef CONFIG_NMI_IPI
if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
nmi_ipi_action(0, NULL);
#endif
} while (READ_ONCE(info->messages));
return IRQ_HANDLED;
}
#endif /* CONFIG_PPC_SMP_MUXED_IPI */
static inline void do_message_pass(int cpu, int msg)
{
if (smp_ops->message_pass)
smp_ops->message_pass(cpu, msg);
#ifdef CONFIG_PPC_SMP_MUXED_IPI
else
smp_muxed_ipi_message_pass(cpu, msg);
#endif
}
void arch_smp_send_reschedule(int cpu)
{
if (likely(smp_ops))
do_message_pass(cpu, PPC_MSG_RESCHEDULE);
}
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
void arch_send_call_function_single_ipi(int cpu)
{
do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
unsigned int cpu;
for_each_cpu(cpu, mask)
do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
#ifdef CONFIG_NMI_IPI
/*
* "NMI IPI" system.
*
* NMI IPIs may not be recoverable, so should not be used as ongoing part of
* a running system. They can be used for crash, debug, halt/reboot, etc.
*
* The IPI call waits with interrupts disabled until all targets enter the
* NMI handler, then returns. Subsequent IPIs can be issued before targets
* have returned from their handlers, so there is no guarantee about
* concurrency or re-entrancy.
*
* A new NMI can be issued before all targets exit the handler.
*
* The IPI call may time out without all targets entering the NMI handler.
* In that case, there is some logic to recover (and ignore subsequent
* NMI interrupts that may eventually be raised), but the platform interrupt
* handler may not be able to distinguish this from other exception causes,
* which may cause a crash.
*/
static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
static struct cpumask nmi_ipi_pending_mask;
static bool nmi_ipi_busy = false;
static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
noinstr static void nmi_ipi_lock_start(unsigned long *flags)
{
raw_local_irq_save(*flags);
hard_irq_disable();
while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
raw_local_irq_restore(*flags);
spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
raw_local_irq_save(*flags);
hard_irq_disable();
}
}
noinstr static void nmi_ipi_lock(void)
{
while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
}
noinstr static void nmi_ipi_unlock(void)
{
smp_mb();
WARN_ON(raw_atomic_read(&__nmi_ipi_lock) != 1);
raw_atomic_set(&__nmi_ipi_lock, 0);
}
noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
{
nmi_ipi_unlock();
raw_local_irq_restore(*flags);
}
/*
* Platform NMI handler calls this to ack
*/
noinstr int smp_handle_nmi_ipi(struct pt_regs *regs)
{
void (*fn)(struct pt_regs *) = NULL;
unsigned long flags;
int me = raw_smp_processor_id();
int ret = 0;
/*
* Unexpected NMIs are possible here because the interrupt may not
* be able to distinguish NMI IPIs from other types of NMIs, or
* because the caller may have timed out.
*/
nmi_ipi_lock_start(&flags);
if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
fn = READ_ONCE(nmi_ipi_function);
WARN_ON_ONCE(!fn);
ret = 1;
}
nmi_ipi_unlock_end(&flags);
if (fn)
fn(regs);
return ret;
}
static void do_smp_send_nmi_ipi(int cpu, bool safe)
{
if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
return;
if (cpu >= 0) {
do_message_pass(cpu, PPC_MSG_NMI_IPI);
} else {
int c;
for_each_online_cpu(c) {
if (c == raw_smp_processor_id())
continue;
do_message_pass(c, PPC_MSG_NMI_IPI);
}
}
}
/*
* - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
* - fn is the target callback function.
* - delay_us > 0 is the delay before giving up waiting for targets to
* begin executing the handler, == 0 specifies indefinite delay.
*/
static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
u64 delay_us, bool safe)
{
unsigned long flags;
int me = raw_smp_processor_id();
int ret = 1;
BUG_ON(cpu == me);
BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
if (unlikely(!smp_ops))
return 0;
nmi_ipi_lock_start(&flags);
while (nmi_ipi_busy) {
nmi_ipi_unlock_end(&flags);
spin_until_cond(!nmi_ipi_busy);
nmi_ipi_lock_start(&flags);
}
nmi_ipi_busy = true;
nmi_ipi_function = fn;
WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
if (cpu < 0) {
/* ALL_OTHERS */
cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
} else {
cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
}
nmi_ipi_unlock();
/* Interrupts remain hard disabled */
do_smp_send_nmi_ipi(cpu, safe);
nmi_ipi_lock();
/* nmi_ipi_busy is set here, so unlock/lock is okay */
while (!cpumask_empty(&nmi_ipi_pending_mask)) {
nmi_ipi_unlock();
udelay(1);
nmi_ipi_lock();
if (delay_us) {
delay_us--;
if (!delay_us)
break;
}
}
if (!cpumask_empty(&nmi_ipi_pending_mask)) {
/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
ret = 0;
cpumask_clear(&nmi_ipi_pending_mask);
}
nmi_ipi_function = NULL;
nmi_ipi_busy = false;
nmi_ipi_unlock_end(&flags);
return ret;
}
int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
{
return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
}
int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
{
return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
}
#endif /* CONFIG_NMI_IPI */
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
unsigned int cpu;
for_each_cpu(cpu, mask)
do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
}
#endif
#ifdef CONFIG_DEBUGGER
static void debugger_ipi_callback(struct pt_regs *regs)
{
debugger_ipi(regs);
}
void smp_send_debugger_break(void)
{
smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
}
#endif
#ifdef CONFIG_KEXEC_CORE
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
int cpu;
smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
if (kdump_in_progress() && crash_wake_offline) {
for_each_present_cpu(cpu) {
if (cpu_online(cpu))
continue;
/*
* crash_ipi_callback will wait for
* all cpus, including offline CPUs.
* We don't care about nmi_ipi_function.
* Offline cpus will jump straight into
* crash_ipi_callback, we can skip the
* entire NMI dance and waiting for
* cpus to clear pending mask, etc.
*/
do_smp_send_nmi_ipi(cpu, false);
}
}
}
#endif
void crash_smp_send_stop(void)
{
static bool stopped = false;
/*
* In case of fadump, register data for all CPUs is captured by f/w
* on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
* this rtas call to avoid tricky post processing of those CPUs'
* backtraces.
*/
if (should_fadump_crash())
return;
if (stopped)
return;
stopped = true;
#ifdef CONFIG_KEXEC_CORE
if (kexec_crash_image) {
crash_kexec_prepare();
return;
}
#endif
smp_send_stop();
}
#ifdef CONFIG_NMI_IPI
static void nmi_stop_this_cpu(struct pt_regs *regs)
{
/*
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
*/
set_cpu_online(smp_processor_id(), false);
spin_begin();
while (1)
spin_cpu_relax();
}
void smp_send_stop(void)
{
smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
}
#else /* CONFIG_NMI_IPI */
static void stop_this_cpu(void *dummy)
{
hard_irq_disable();
/*
* Offlining CPUs in stop_this_cpu can result in scheduler warnings,
* (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
* to know other CPUs are offline before it breaks locks to flush
* printk buffers, in case we panic()ed while holding the lock.
*/
set_cpu_online(smp_processor_id(), false);
spin_begin();
while (1)
spin_cpu_relax();
}
void smp_send_stop(void)
{
static bool stopped = false;
/*
* Prevent waiting on csd lock from a previous smp_send_stop.
* This is racy, but in general callers try to do the right
* thing and only fire off one smp_send_stop (e.g., see
* kernel/panic.c)
*/
if (stopped)
return;
stopped = true;
smp_call_function(stop_this_cpu, NULL, 0);
}
#endif /* CONFIG_NMI_IPI */
static struct task_struct *current_set[NR_CPUS];
static void smp_store_cpu_info(int id)
{
per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
#ifdef CONFIG_PPC_E500
per_cpu(next_tlbcam_idx, id)
= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif
}
/*
* Relationships between CPUs are maintained in a set of per-cpu cpumasks so
* rather than just passing around the cpumask we pass around a function that
* returns the that cpumask for the given CPU.
*/
static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
{
cpumask_set_cpu(i, get_cpumask(j));
cpumask_set_cpu(j, get_cpumask(i));
}
#ifdef CONFIG_HOTPLUG_CPU
static void set_cpus_unrelated(int i, int j,
struct cpumask *(*get_cpumask)(int))
{
cpumask_clear_cpu(i, get_cpumask(j));
cpumask_clear_cpu(j, get_cpumask(i));
}
#endif
/*
* Extends set_cpus_related. Instead of setting one CPU at a time in
* dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
*/
static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
struct cpumask *(*dstmask)(int))
{
struct cpumask *mask;
int k;
mask = srcmask(j);
for_each_cpu(k, srcmask(i))
cpumask_or(dstmask(k), dstmask(k), mask);
if (i == j)
return;
mask = srcmask(i);
for_each_cpu(k, srcmask(j))
cpumask_or(dstmask(k), dstmask(k), mask);
}
/*
* parse_thread_groups: Parses the "ibm,thread-groups" device tree
* property for the CPU device node @dn and stores
* the parsed output in the thread_groups_list
* structure @tglp.
*
* @dn: The device node of the CPU device.
* @tglp: Pointer to a thread group list structure into which the parsed
* output of "ibm,thread-groups" is stored.
*
* ibm,thread-groups[0..N-1] array defines which group of threads in
* the CPU-device node can be grouped together based on the property.
*
* This array can represent thread groupings for multiple properties.
*
* ibm,thread-groups[i + 0] tells us the property based on which the
* threads are being grouped together. If this value is 1, it implies
* that the threads in the same group share L1, translation cache. If
* the value is 2, it implies that the threads in the same group share
* the same L2 cache.
*
* ibm,thread-groups[i+1] tells us how many such thread groups exist for the
* property ibm,thread-groups[i]
*
* ibm,thread-groups[i+2] tells us the number of threads in each such
* group.
* Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
*
* ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
* "ibm,ppc-interrupt-server#s" arranged as per their membership in
* the grouping.
*
* Example:
* If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
* This can be decomposed up into two consecutive arrays:
* a) [1,2,4,8,10,12,14,9,11,13,15]
* b) [2,2,4,8,10,12,14,9,11,13,15]
*
* where in,
*
* a) provides information of Property "1" being shared by "2" groups,
* each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
* the first group is {8,10,12,14} and the
* "ibm,ppc-interrupt-server#s" of the second group is
* {9,11,13,15}. Property "1" is indicative of the thread in the
* group sharing L1 cache, translation cache and Instruction Data
* flow.
*
* b) provides information of Property "2" being shared by "2" groups,
* each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
* the first group is {8,10,12,14} and the
* "ibm,ppc-interrupt-server#s" of the second group is
* {9,11,13,15}. Property "2" indicates that the threads in each
* group share the L2-cache.
*
* Returns 0 on success, -EINVAL if the property does not exist,
* -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data isn't large enough.
*/
static int parse_thread_groups(struct device_node *dn,
struct thread_groups_list *tglp)
{
unsigned int property_idx = 0;
u32 *thread_group_array;
size_t total_threads;
int ret = 0, count;
u32 *thread_list;
int i = 0;
count = of_property_count_u32_elems(dn, "ibm,thread-groups");
thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL);
ret = of_property_read_u32_array(dn, "ibm,thread-groups",
thread_group_array, count);
if (ret)
goto out_free;
while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) {
int j;
struct thread_groups *tg = &tglp->property_tgs[property_idx++];
tg->property = thread_group_array[i];
tg->nr_groups = thread_group_array[i + 1];
tg->threads_per_group = thread_group_array[i + 2];
total_threads = tg->nr_groups * tg->threads_per_group;
thread_list = &thread_group_array[i + 3];
for (j = 0; j < total_threads; j++)
tg->thread_list[j] = thread_list[j];
i = i + 3 + total_threads;
}
tglp->nr_properties = property_idx;
out_free:
kfree(thread_group_array);
return ret;
}
/*
* get_cpu_thread_group_start : Searches the thread group in tg->thread_list
* that @cpu belongs to.
*
* @cpu : The logical CPU whose thread group is being searched.
* @tg : The thread-group structure of the CPU node which @cpu belongs
* to.
*
* Returns the index to tg->thread_list that points to the start
* of the thread_group that @cpu belongs to.
*
* Returns -1 if cpu doesn't belong to any of the groups pointed to by
* tg->thread_list.
*/
static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
{
int hw_cpu_id = get_hard_smp_processor_id(cpu);
int i, j;
for (i = 0; i < tg->nr_groups; i++) {
int group_start = i * tg->threads_per_group;
for (j = 0; j < tg->threads_per_group; j++) {
int idx = group_start + j;
if (tg->thread_list[idx] == hw_cpu_id)
return group_start;
}
}
return -1;
}
static struct thread_groups *__init get_thread_groups(int cpu,
int group_property,
int *err)
{
struct device_node *dn = of_get_cpu_node(cpu, NULL);
struct thread_groups_list *cpu_tgl = &tgl[cpu];
struct thread_groups *tg = NULL;
int i;
*err = 0;
if (!dn) {
*err = -ENODATA;
return NULL;
}
if (!cpu_tgl->nr_properties) {
*err = parse_thread_groups(dn, cpu_tgl);
if (*err)
goto out;
}
for (i = 0; i < cpu_tgl->nr_properties; i++) {
if (cpu_tgl->property_tgs[i].property == group_property) {
tg = &cpu_tgl->property_tgs[i];
break;
}
}
if (!tg)
*err = -EINVAL;
out:
of_node_put(dn);
return tg;
}
static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg,
int cpu, int cpu_group_start)
{
int first_thread = cpu_first_thread_sibling(cpu);
int i;
zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
for (i = first_thread; i < first_thread + threads_per_core; i++) {
int i_group_start = get_cpu_thread_group_start(i, tg);
if (unlikely(i_group_start == -1)) {
WARN_ON_ONCE(1);
return -ENODATA;
}
if (i_group_start == cpu_group_start)
cpumask_set_cpu(i, *mask);
}
return 0;
}
static int __init init_thread_group_cache_map(int cpu, int cache_property)
{
int cpu_group_start = -1, err = 0;
struct thread_groups *tg = NULL;
cpumask_var_t *mask = NULL;
if (cache_property != THREAD_GROUP_SHARE_L1 &&
cache_property != THREAD_GROUP_SHARE_L2_L3)
return -EINVAL;
tg = get_thread_groups(cpu, cache_property, &err);
if (!tg)
return err;
cpu_group_start = get_cpu_thread_group_start(cpu, tg);
if (unlikely(cpu_group_start == -1)) {
WARN_ON_ONCE(1);
return -ENODATA;
}
if (cache_property == THREAD_GROUP_SHARE_L1) {
mask = &per_cpu(thread_group_l1_cache_map, cpu);
update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
}
else if (cache_property == THREAD_GROUP_SHARE_L2_L3) {
mask = &per_cpu(thread_group_l2_cache_map, cpu);
update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
mask = &per_cpu(thread_group_l3_cache_map, cpu);
update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
}
return 0;
}
static bool shared_caches;
#ifdef CONFIG_SCHED_SMT
/* cpumask of CPUs with asymmetric SMT dependency */
static int powerpc_smt_flags(void)
{
int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
flags |= SD_ASYM_PACKING;
}
return flags;
}
#endif
/*
* P9 has a slightly odd architecture where pairs of cores share an L2 cache.
* This topology makes it *much* cheaper to migrate tasks between adjacent cores
* since the migrated task remains cache hot. We want to take advantage of this
* at the scheduler level so an extra topology level is required.
*/
static int powerpc_shared_cache_flags(void)
{
return SD_SHARE_PKG_RESOURCES;
}
/*
* We can't just pass cpu_l2_cache_mask() directly because
* returns a non-const pointer and the compiler barfs on that.
*/
static const struct cpumask *shared_cache_mask(int cpu)
{
return per_cpu(cpu_l2_cache_map, cpu);
}
#ifdef CONFIG_SCHED_SMT
static const struct cpumask *smallcore_smt_mask(int cpu)
{
return cpu_smallcore_mask(cpu);
}
#endif
static struct cpumask *cpu_coregroup_mask(int cpu)
{
return per_cpu(cpu_coregroup_map, cpu);
}
static bool has_coregroup_support(void)
{
return coregroup_enabled;
}
static const struct cpumask *cpu_mc_mask(int cpu)
{
return cpu_coregroup_mask(cpu);
}
static struct sched_domain_topology_level powerpc_topology[] = {
#ifdef CONFIG_SCHED_SMT
{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
#endif
{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
{ cpu_mc_mask, SD_INIT_NAME(MC) },
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
{ NULL, },
};
static int __init init_big_cores(void)
{
int cpu;
for_each_possible_cpu(cpu) {
int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
if (err)
return err;
zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
GFP_KERNEL,
cpu_to_node(cpu));
}
has_big_cores = true;
for_each_possible_cpu(cpu) {
int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3);
if (err)
return err;
}
thread_group_shares_l2 = true;
thread_group_shares_l3 = true;
pr_debug("L2/L3 cache only shared by the threads in the small core\n");
return 0;
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int cpu, num_threads;
DBG("smp_prepare_cpus\n");
/*
* setup_cpu may need to be called on the boot cpu. We haven't
* spun any cpus up but lets be paranoid.
*/
BUG_ON(boot_cpuid != smp_processor_id());
/* Fixup boot cpu */
smp_store_cpu_info(boot_cpuid);
cpu_callin_map[boot_cpuid] = 1;
for_each_possible_cpu(cpu) {
zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
if (has_coregroup_support())
zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
#ifdef CONFIG_NUMA
/*
* numa_node_id() works after this.
*/
if (cpu_present(cpu)) {
set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
set_cpu_numa_mem(cpu,
local_memory_node(numa_cpu_lookup_table[cpu]));
}
#endif
}
/* Init the cpumasks so the boot CPU is related to itself */
cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
if (has_coregroup_support())
cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
init_big_cores();
if (has_big_cores) {
cpumask_set_cpu(boot_cpuid,
cpu_smallcore_mask(boot_cpuid));
}
if (cpu_to_chip_id(boot_cpuid) != -1) {
int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
/*
* All threads of a core will all belong to the same core,
* chip_id_lookup_table will have one entry per core.
* Assumption: if boot_cpuid doesn't have a chip-id, then no
* other CPUs, will also not have chip-id.
*/
chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL);
if (chip_id_lookup_table)
memset(chip_id_lookup_table, -1, sizeof(int) * idx);
}
if (smp_ops && smp_ops->probe)
smp_ops->probe();
// Initalise the generic SMT topology support
num_threads = 1;
if (smt_enabled_at_boot)
num_threads = smt_enabled_at_boot;
cpu_smt_set_num_threads(num_threads, threads_per_core);
}
void smp_prepare_boot_cpu(void)
{
BUG_ON(smp_processor_id() != boot_cpuid);
#ifdef CONFIG_PPC64
paca_ptrs[boot_cpuid]->__current = current;
#endif
set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
current_set[boot_cpuid] = current;
}
#ifdef CONFIG_HOTPLUG_CPU
int generic_cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
if (cpu == boot_cpuid)
return -EBUSY;
set_cpu_online(cpu, false);
#ifdef CONFIG_PPC64
vdso_data->processorCount--;
#endif
/* Update affinity of all IRQs previously aimed at this CPU */
irq_migrate_all_off_this_cpu();
/*
* Depending on the details of the interrupt controller, it's possible
* that one of the interrupts we just migrated away from this CPU is
* actually already pending on this CPU. If we leave it in that state
* the interrupt will never be EOI'ed, and will never fire again. So
* temporarily enable interrupts here, to allow any pending interrupt to
* be received (and EOI'ed), before we take this CPU offline.
*/
local_irq_enable();
mdelay(1);
local_irq_disable();
return 0;
}
void generic_cpu_die(unsigned int cpu)
{
int i;
for (i = 0; i < 100; i++) {
smp_rmb();
if (is_cpu_dead(cpu))
return;
msleep(100);
}
printk(KERN_ERR "CPU%d didn't die...\n", cpu);
}
void generic_set_cpu_dead(unsigned int cpu)
{
per_cpu(cpu_state, cpu) = CPU_DEAD;
}
/*
* The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
* the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
* which makes the delay in generic_cpu_die() not happen.
*/
void generic_set_cpu_up(unsigned int cpu)
{
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
}
int generic_check_cpu_restart(unsigned int cpu)
{
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
}
int is_cpu_dead(unsigned int cpu)
{
return per_cpu(cpu_state, cpu) == CPU_DEAD;
}
static bool secondaries_inhibited(void)
{
return kvm_hv_mode_active();
}
#else /* HOTPLUG_CPU */
#define secondaries_inhibited() 0
#endif
static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
{
#ifdef CONFIG_PPC64
paca_ptrs[cpu]->__current = idle;
paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
THREAD_SIZE - STACK_FRAME_MIN_SIZE;
#endif
task_thread_info(idle)->cpu = cpu;
secondary_current = current_set[cpu] = idle;
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
const unsigned long boot_spin_ms = 5 * MSEC_PER_SEC;
const bool booting = system_state < SYSTEM_RUNNING;
const unsigned long hp_spin_ms = 1;
unsigned long deadline;
int rc;
const unsigned long spin_wait_ms = booting ? boot_spin_ms : hp_spin_ms;
/*
* Don't allow secondary threads to come online if inhibited
*/
if (threads_per_core > 1 && secondaries_inhibited() &&
cpu_thread_in_subcore(cpu))
return -EBUSY;
if (smp_ops == NULL ||
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
return -EINVAL;
cpu_idle_thread_init(cpu, tidle);
/*
* The platform might need to allocate resources prior to bringing
* up the CPU
*/
if (smp_ops->prepare_cpu) {
rc = smp_ops->prepare_cpu(cpu);
if (rc)
return rc;
}
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug
*/
cpu_callin_map[cpu] = 0;
/* The information for processor bringup must
* be written out to main store before we release
* the processor.
*/
smp_mb();
/* wake up cpus */
DBG("smp: kicking cpu %d\n", cpu);
rc = smp_ops->kick_cpu(cpu);
if (rc) {
pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
return rc;
}
/*
* At boot time, simply spin on the callin word until the
* deadline passes.
*
* At run time, spin for an optimistic amount of time to avoid
* sleeping in the common case.
*/
deadline = jiffies + msecs_to_jiffies(spin_wait_ms);
spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline));
if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) {
const unsigned long sleep_interval_us = 10 * USEC_PER_MSEC;
const unsigned long sleep_wait_ms = 100 * MSEC_PER_SEC;
deadline = jiffies + msecs_to_jiffies(sleep_wait_ms);
while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline))
fsleep(sleep_interval_us);
}
if (!cpu_callin_map[cpu]) {
printk(KERN_ERR "Processor %u is stuck.\n", cpu);
return -ENOENT;
}
DBG("Processor %u found.\n", cpu);
if (smp_ops->give_timebase)
smp_ops->give_timebase();
/* Wait until cpu puts itself in the online & active maps */
spin_until_cond(cpu_online(cpu));
return 0;
}
/* Return the value of the reg property corresponding to the given
* logical cpu.
*/
int cpu_to_core_id(int cpu)
{
struct device_node *np;
int id = -1;
np = of_get_cpu_node(cpu, NULL);
if (!np)
goto out;
id = of_get_cpu_hwid(np, 0);
out:
of_node_put(np);
return id;
}
EXPORT_SYMBOL_GPL(cpu_to_core_id);
/* Helper routines for cpu to core mapping */
int cpu_core_index_of_thread(int cpu)
{
return cpu >> threads_shift;
}
EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
int cpu_first_thread_of_core(int core)
{
return core << threads_shift;
}
EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
/* Must be called when no change can occur to cpu_present_mask,
* i.e. during cpu online or offline.
*/
static struct device_node *cpu_to_l2cache(int cpu)
{
struct device_node *np;
struct device_node *cache;
if (!cpu_present(cpu))
return NULL;
np = of_get_cpu_node(cpu, NULL);
if (np == NULL)
return NULL;
cache = of_find_next_cache_node(np);
of_node_put(np);
return cache;
}
static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
{
struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
struct device_node *l2_cache, *np;
int i;
if (has_big_cores)
submask_fn = cpu_smallcore_mask;
/*
* If the threads in a thread-group share L2 cache, then the
* L2-mask can be obtained from thread_group_l2_cache_map.
*/
if (thread_group_shares_l2) {
cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
if (cpu_online(i))
set_cpus_related(i, cpu, cpu_l2_cache_mask);
}
/* Verify that L1-cache siblings are a subset of L2 cache-siblings */
if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
!cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n",
cpu);
}
return true;
}
l2_cache = cpu_to_l2cache(cpu);
if (!l2_cache || !*mask) {
/* Assume only core siblings share cache with this CPU */
for_each_cpu(i, cpu_sibling_mask(cpu))
set_cpus_related(cpu, i, cpu_l2_cache_mask);
return false;
}
cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
/* Update l2-cache mask with all the CPUs that are part of submask */
or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
/* Skip all CPUs already part of current CPU l2-cache mask */
cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
for_each_cpu(i, *mask) {
/*
* when updating the marks the current CPU has not been marked
* online, but we need to update the cache masks
*/
np = cpu_to_l2cache(i);
/* Skip all CPUs already part of current CPU l2-cache */
if (np == l2_cache) {
or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
cpumask_andnot(*mask, *mask, submask_fn(i));
} else {
cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
}
of_node_put(np);
}
of_node_put(l2_cache);
return true;
}
#ifdef CONFIG_HOTPLUG_CPU
static void remove_cpu_from_masks(int cpu)
{
struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
int i;
unmap_cpu_from_node(cpu);
if (shared_caches)
mask_fn = cpu_l2_cache_mask;
for_each_cpu(i, mask_fn(cpu)) {
set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
set_cpus_unrelated(cpu, i, cpu_sibling_mask);
if (has_big_cores)
set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
}
for_each_cpu(i, cpu_core_mask(cpu))
set_cpus_unrelated(cpu, i, cpu_core_mask);
if (has_coregroup_support()) {
for_each_cpu(i, cpu_coregroup_mask(cpu))
set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
}
}
#endif
static inline void add_cpu_to_smallcore_masks(int cpu)
{
int i;
if (!has_big_cores)
return;
cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
if (cpu_online(i))
set_cpus_related(i, cpu, cpu_smallcore_mask);
}
}
static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
{
struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
int coregroup_id = cpu_to_coregroup_id(cpu);
int i;
if (shared_caches)
submask_fn = cpu_l2_cache_mask;
if (!*mask) {
/* Assume only siblings are part of this CPU's coregroup */
for_each_cpu(i, submask_fn(cpu))
set_cpus_related(cpu, i, cpu_coregroup_mask);
return;
}
cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
/* Update coregroup mask with all the CPUs that are part of submask */
or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
/* Skip all CPUs already part of coregroup mask */
cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
for_each_cpu(i, *mask) {
/* Skip all CPUs not part of this coregroup */
if (coregroup_id == cpu_to_coregroup_id(i)) {
or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
cpumask_andnot(*mask, *mask, submask_fn(i));
} else {
cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
}
}
}
static void add_cpu_to_masks(int cpu)
{
struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
int first_thread = cpu_first_thread_sibling(cpu);
cpumask_var_t mask;
int chip_id = -1;
bool ret;
int i;
/*
* This CPU will not be in the online mask yet so we need to manually
* add it to it's own thread sibling mask.
*/
map_cpu_to_node(cpu, cpu_to_node(cpu));
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
cpumask_set_cpu(cpu, cpu_core_mask(cpu));
for (i = first_thread; i < first_thread + threads_per_core; i++)
if (cpu_online(i))
set_cpus_related(i, cpu, cpu_sibling_mask);
add_cpu_to_smallcore_masks(cpu);
/* In CPU-hotplug path, hence use GFP_ATOMIC */
ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
update_mask_by_l2(cpu, &mask);
if (has_coregroup_support())
update_coregroup_mask(cpu, &mask);
if (chip_id_lookup_table && ret)
chip_id = cpu_to_chip_id(cpu);
if (shared_caches)
submask_fn = cpu_l2_cache_mask;
/* Update core_mask with all the CPUs that are part of submask */
or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
/* Skip all CPUs already part of current CPU core mask */
cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
/* If chip_id is -1; limit the cpu_core_mask to within DIE*/
if (chip_id == -1)
cpumask_and(mask, mask, cpu_cpu_mask(cpu));
for_each_cpu(i, mask) {
if (chip_id == cpu_to_chip_id(i)) {
or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
cpumask_andnot(mask, mask, submask_fn(i));
} else {
cpumask_andnot(mask, mask, cpu_core_mask(i));
}
}
free_cpumask_var(mask);
}
/* Activate a secondary processor. */
__no_stack_protector
void start_secondary(void *unused)
{
unsigned int cpu = raw_smp_processor_id();
/* PPC64 calls setup_kup() in early_setup_secondary() */
if (IS_ENABLED(CONFIG_PPC32))
setup_kup();
mmgrab_lazy_tlb(&init_mm);
current->active_mm = &init_mm;
VM_WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(&init_mm)));
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
inc_mm_active_cpus(&init_mm);
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
rcu_cpu_starting(cpu);
cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
smp_ops->setup_cpu(cpu);
if (smp_ops->take_timebase)
smp_ops->take_timebase();
secondary_cpu_time_init();
#ifdef CONFIG_PPC64
if (system_state == SYSTEM_RUNNING)
vdso_data->processorCount++;
vdso_getcpu_init();
#endif
set_numa_node(numa_cpu_lookup_table[cpu]);
set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
/* Update topology CPU masks */
add_cpu_to_masks(cpu);
/*
* Check for any shared caches. Note that this must be done on a
* per-core basis because one core in the pair might be disabled.
*/
if (!shared_caches) {
struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
struct cpumask *mask = cpu_l2_cache_mask(cpu);
if (has_big_cores)
sibling_mask = cpu_smallcore_mask;
if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
shared_caches = true;
}
smp_wmb();
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
boot_init_stack_canary();
local_irq_enable();
/* We can enable ftrace for secondary cpus now */
this_cpu_enable_ftrace();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
BUG();
}
static void __init fixup_topology(void)
{
int i;
#ifdef CONFIG_SCHED_SMT
if (has_big_cores) {
pr_info("Big cores detected but using small core scheduling\n");
powerpc_topology[smt_idx].mask = smallcore_smt_mask;
}
#endif
if (!has_coregroup_support())
powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
/*
* Try to consolidate topology levels here instead of
* allowing scheduler to degenerate.
* - Dont consolidate if masks are different.
* - Dont consolidate if sd_flags exists and are different.
*/
for (i = 1; i <= die_idx; i++) {
if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
continue;
if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
continue;
if (!powerpc_topology[i - 1].sd_flags)
powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
#ifdef CONFIG_SCHED_DEBUG
powerpc_topology[i].name = powerpc_topology[i + 1].name;
#endif
}
}
void __init smp_cpus_done(unsigned int max_cpus)
{
/*
* We are running pinned to the boot CPU, see rest_init().
*/
if (smp_ops && smp_ops->setup_cpu)
smp_ops->setup_cpu(boot_cpuid);
if (smp_ops && smp_ops->bringup_done)
smp_ops->bringup_done();
dump_numa_cpu_topology();
fixup_topology();
set_sched_topology(powerpc_topology);
}
#ifdef CONFIG_HOTPLUG_CPU
int __cpu_disable(void)
{
int cpu = smp_processor_id();
int err;
if (!smp_ops->cpu_disable)
return -ENOSYS;
this_cpu_disable_ftrace();
err = smp_ops->cpu_disable();
if (err)
return err;
/* Update sibling maps */
remove_cpu_from_masks(cpu);
return 0;
}
void __cpu_die(unsigned int cpu)
{
/*
* This could perhaps be a generic call in idlea_task_dead(), but
* that requires testing from all archs, so first put it here to
*/
VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(&init_mm)));
dec_mm_active_cpus(&init_mm);
cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
if (smp_ops->cpu_die)
smp_ops->cpu_die(cpu);
}
void __noreturn arch_cpu_idle_dead(void)
{
/*
* Disable on the down path. This will be re-enabled by
* start_secondary() via start_secondary_resume() below
*/
this_cpu_disable_ftrace();
if (smp_ops->cpu_offline_self)
smp_ops->cpu_offline_self();
/* If we return, we re-enter start_secondary */
start_secondary_resume();
}
#endif
| linux-master | arch/powerpc/kernel/smp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 IBM Corporation
* Author: Nayna Jain
*
* This file initializes secvar operations for PowerPC Secureboot
*/
#include <linux/cache.h>
#include <asm/secvar.h>
#include <asm/bug.h>
const struct secvar_operations *secvar_ops __ro_after_init = NULL;
int set_secvar_ops(const struct secvar_operations *ops)
{
if (WARN_ON_ONCE(secvar_ops))
return -EBUSY;
secvar_ops = ops;
return 0;
}
| linux-master | arch/powerpc/kernel/secvar-ops.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001 Ben. Herrenschmidt ([email protected])
*
* Modifications for ppc64:
* Copyright (C) 2003 Dave Engebretsen <[email protected]>
*/
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/jump_label.h>
#include <linux/of.h>
#include <asm/cputable.h>
#include <asm/mce.h>
#include <asm/mmu.h>
#include <asm/setup.h>
#include <asm/cpu_setup.h>
static struct cpu_spec the_cpu_spec __read_mostly;
struct cpu_spec* cur_cpu_spec __read_mostly = NULL;
EXPORT_SYMBOL(cur_cpu_spec);
/* The platform string corresponding to the real PVR */
const char *powerpc_base_platform;
#include "cpu_specs.h"
void __init set_cur_cpu_spec(struct cpu_spec *s)
{
struct cpu_spec *t = &the_cpu_spec;
t = PTRRELOC(t);
/*
* use memcpy() instead of *t = *s so that GCC replaces it
* by __memcpy() when KASAN is active
*/
memcpy(t, s, sizeof(*t));
*PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
}
static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
struct cpu_spec *s)
{
struct cpu_spec *t = &the_cpu_spec;
struct cpu_spec old;
t = PTRRELOC(t);
old = *t;
/*
* Copy everything, then do fixups. Use memcpy() instead of *t = *s
* so that GCC replaces it by __memcpy() when KASAN is active
*/
memcpy(t, s, sizeof(*t));
/*
* If we are overriding a previous value derived from the real
* PVR with a new value obtained using a logical PVR value,
* don't modify the performance monitor fields.
*/
if (old.num_pmcs && !s->num_pmcs) {
t->num_pmcs = old.num_pmcs;
t->pmc_type = old.pmc_type;
/*
* Let's ensure that the
* fix for the PMAO bug is enabled on compatibility mode.
*/
t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
}
/* Set kuap ON at startup, will be disabled later if cmdline has 'nosmap' */
if (IS_ENABLED(CONFIG_PPC_KUAP) && IS_ENABLED(CONFIG_PPC32))
t->mmu_features |= MMU_FTR_KUAP;
*PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
/*
* Set the base platform string once; assumes
* we're called with real pvr first.
*/
if (*PTRRELOC(&powerpc_base_platform) == NULL)
*PTRRELOC(&powerpc_base_platform) = t->platform;
#if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE)
/* ppc64 and booke expect identify_cpu to also call setup_cpu for
* that processor. I will consolidate that at a later time, for now,
* just use #ifdef. We also don't need to PTRRELOC the function
* pointer on ppc64 and booke as we are running at 0 in real mode
* on ppc64 and reloc_offset is always 0 on booke.
*/
if (t->cpu_setup) {
t->cpu_setup(offset, t);
}
#endif /* CONFIG_PPC64 || CONFIG_BOOKE */
return t;
}
struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
{
struct cpu_spec *s = cpu_specs;
int i;
BUILD_BUG_ON(!ARRAY_SIZE(cpu_specs));
s = PTRRELOC(s);
for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) {
if ((pvr & s->pvr_mask) == s->pvr_value)
return setup_cpu_spec(offset, s);
}
BUG();
return NULL;
}
/*
* Used by cpufeatures to get the name for CPUs with a PVR table.
* If they don't hae a PVR table, cpufeatures gets the name from
* cpu device-tree node.
*/
void __init identify_cpu_name(unsigned int pvr)
{
struct cpu_spec *s = cpu_specs;
struct cpu_spec *t = &the_cpu_spec;
int i;
s = PTRRELOC(s);
t = PTRRELOC(t);
for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) {
if ((pvr & s->pvr_mask) == s->pvr_value) {
t->cpu_name = s->cpu_name;
return;
}
}
}
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS] = {
[0 ... NUM_CPU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
};
EXPORT_SYMBOL_GPL(cpu_feature_keys);
void __init cpu_feature_keys_init(void)
{
int i;
for (i = 0; i < NUM_CPU_FTR_KEYS; i++) {
unsigned long f = 1ul << i;
if (!(cur_cpu_spec->cpu_features & f))
static_branch_disable(&cpu_feature_keys[i]);
}
}
struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = {
[0 ... NUM_MMU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
};
EXPORT_SYMBOL(mmu_feature_keys);
void __init mmu_feature_keys_init(void)
{
int i;
for (i = 0; i < NUM_MMU_FTR_KEYS; i++) {
unsigned long f = 1ul << i;
if (!(cur_cpu_spec->mmu_features & f))
static_branch_disable(&mmu_feature_keys[i]);
}
}
#endif
| linux-master | arch/powerpc/kernel/cputable.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Smp timebase synchronization for ppc.
*
* Copyright (C) 2003 Samuel Rydh ([email protected])
*
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/smp.h>
#include <asm/time.h>
#define NUM_ITER 300
enum {
kExit=0, kSetAndTest, kTest
};
static struct {
volatile u64 tb;
volatile u64 mark;
volatile int cmd;
volatile int handshake;
int filler[2];
volatile int ack;
int filler2[7];
volatile int race_result;
} *tbsync;
static volatile int running;
static void enter_contest(u64 mark, long add)
{
while (get_tb() < mark)
tbsync->race_result = add;
}
void smp_generic_take_timebase(void)
{
int cmd;
u64 tb;
unsigned long flags;
local_irq_save(flags);
while (!running)
barrier();
rmb();
for (;;) {
tbsync->ack = 1;
while (!tbsync->handshake)
barrier();
rmb();
cmd = tbsync->cmd;
tb = tbsync->tb;
mb();
tbsync->ack = 0;
if (cmd == kExit)
break;
while (tbsync->handshake)
barrier();
if (cmd == kSetAndTest)
set_tb(tb >> 32, tb & 0xfffffffful);
enter_contest(tbsync->mark, -1);
}
local_irq_restore(flags);
}
static int start_contest(int cmd, long offset, int num)
{
int i, score=0;
u64 tb;
u64 mark;
tbsync->cmd = cmd;
local_irq_disable();
for (i = -3; i < num; ) {
tb = get_tb() + 400;
tbsync->tb = tb + offset;
tbsync->mark = mark = tb + 400;
wmb();
tbsync->handshake = 1;
while (tbsync->ack)
barrier();
while (get_tb() <= tb)
barrier();
tbsync->handshake = 0;
enter_contest(mark, 1);
while (!tbsync->ack)
barrier();
if (i++ > 0)
score += tbsync->race_result;
}
local_irq_enable();
return score;
}
void smp_generic_give_timebase(void)
{
int i, score, score2, old, min=0, max=5000, offset=1000;
pr_debug("Software timebase sync\n");
/* if this fails then this kernel won't work anyway... */
tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL );
mb();
running = 1;
while (!tbsync->ack)
barrier();
pr_debug("Got ack\n");
/* binary search */
for (old = -1; old != offset ; offset = (min+max) / 2) {
score = start_contest(kSetAndTest, offset, NUM_ITER);
pr_debug("score %d, offset %d\n", score, offset );
if( score > 0 )
max = offset;
else
min = offset;
old = offset;
}
score = start_contest(kSetAndTest, min, NUM_ITER);
score2 = start_contest(kSetAndTest, max, NUM_ITER);
pr_debug("Min %d (score %d), Max %d (score %d)\n",
min, score, max, score2);
score = abs(score);
score2 = abs(score2);
offset = (score < score2) ? min : max;
/* guard against inaccurate mttb */
for (i = 0; i < 10; i++) {
start_contest(kSetAndTest, offset, NUM_ITER/10);
if ((score2 = start_contest(kTest, offset, NUM_ITER)) < 0)
score2 = -score2;
if (score2 <= score || score2 < 20)
break;
}
pr_debug("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER );
/* exiting */
tbsync->cmd = kExit;
wmb();
tbsync->handshake = 1;
while (tbsync->ack)
barrier();
tbsync->handshake = 0;
kfree(tbsync);
tbsync = NULL;
running = 0;
}
| linux-master | arch/powerpc/kernel/smp-tbsync.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ppc64 "iomap" interface implementation.
*
* (C) Copyright 2004 Linus Torvalds
*/
#include <linux/pci.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/isa-bridge.h>
void __iomem *ioport_map(unsigned long port, unsigned int len)
{
return (void __iomem *) (port + _IO_BASE);
}
EXPORT_SYMBOL(ioport_map);
#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
{
if (isa_vaddr_is_ioport(addr))
return;
if (pcibios_vaddr_is_ioport(addr))
return;
iounmap(addr);
}
EXPORT_SYMBOL(pci_iounmap);
#endif /* CONFIG_PCI */
| linux-master | arch/powerpc/kernel/iomap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2020, Jordan Niethe, IBM Corporation.
*
* This file contains low level CPU setup functions.
* Originally written in assembly by Benjamin Herrenschmidt & various other
* authors.
*/
#include <asm/reg.h>
#include <asm/synch.h>
#include <linux/bitops.h>
#include <asm/cputable.h>
#include <asm/cpu_setup.h>
/* Disable CPU_FTR_HVMODE and return false if MSR:HV is not set */
static bool init_hvmode_206(struct cpu_spec *t)
{
u64 msr;
msr = mfmsr();
if (msr & MSR_HV)
return true;
t->cpu_features &= ~(CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST);
return false;
}
static void init_LPCR_ISA300(u64 lpcr, u64 lpes)
{
/* POWER9 has no VRMASD */
lpcr |= (lpes << LPCR_LPES_SH) & LPCR_LPES;
lpcr |= LPCR_PECE0|LPCR_PECE1|LPCR_PECE2;
lpcr |= (4ull << LPCR_DPFD_SH) & LPCR_DPFD;
lpcr &= ~LPCR_HDICE; /* clear HDICE */
lpcr |= (4ull << LPCR_VC_SH);
mtspr(SPRN_LPCR, lpcr);
isync();
}
/*
* Setup a sane LPCR:
* Called with initial LPCR and desired LPES 2-bit value
*
* LPES = 0b01 (HSRR0/1 used for 0x500)
* PECE = 0b111
* DPFD = 4
* HDICE = 0
* VC = 0b100 (VPM0=1, VPM1=0, ISL=0)
* VRMASD = 0b10000 (L=1, LP=00)
*
* Other bits untouched for now
*/
static void init_LPCR_ISA206(u64 lpcr, u64 lpes)
{
lpcr |= (0x10ull << LPCR_VRMASD_SH) & LPCR_VRMASD;
init_LPCR_ISA300(lpcr, lpes);
}
static void init_FSCR(void)
{
u64 fscr;
fscr = mfspr(SPRN_FSCR);
fscr |= FSCR_TAR|FSCR_EBB;
mtspr(SPRN_FSCR, fscr);
}
static void init_FSCR_power9(void)
{
u64 fscr;
fscr = mfspr(SPRN_FSCR);
fscr |= FSCR_SCV;
mtspr(SPRN_FSCR, fscr);
init_FSCR();
}
static void init_FSCR_power10(void)
{
u64 fscr;
fscr = mfspr(SPRN_FSCR);
fscr |= FSCR_PREFIX;
mtspr(SPRN_FSCR, fscr);
init_FSCR_power9();
}
static void init_HFSCR(void)
{
u64 hfscr;
hfscr = mfspr(SPRN_HFSCR);
hfscr |= HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|HFSCR_DSCR|\
HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP;
mtspr(SPRN_HFSCR, hfscr);
}
static void init_PMU_HV(void)
{
mtspr(SPRN_MMCRC, 0);
}
static void init_PMU_HV_ISA207(void)
{
mtspr(SPRN_MMCRH, 0);
}
static void init_PMU(void)
{
mtspr(SPRN_MMCRA, 0);
mtspr(SPRN_MMCR0, MMCR0_FC);
mtspr(SPRN_MMCR1, 0);
mtspr(SPRN_MMCR2, 0);
}
static void init_PMU_ISA207(void)
{
mtspr(SPRN_MMCRS, 0);
}
static void init_PMU_ISA31(void)
{
mtspr(SPRN_MMCR3, 0);
mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
}
static void init_DEXCR(void)
{
mtspr(SPRN_DEXCR, DEXCR_INIT);
mtspr(SPRN_HASHKEYR, 0);
}
/*
* Note that we can be called twice of pseudo-PVRs.
* The parameter offset is not used.
*/
void __setup_cpu_power7(unsigned long offset, struct cpu_spec *t)
{
if (!init_hvmode_206(t))
return;
mtspr(SPRN_LPID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH);
}
void __restore_cpu_power7(void)
{
u64 msr;
msr = mfmsr();
if (!(msr & MSR_HV))
return;
mtspr(SPRN_LPID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH);
}
void __setup_cpu_power8(unsigned long offset, struct cpu_spec *t)
{
init_FSCR();
init_PMU();
init_PMU_ISA207();
if (!init_hvmode_206(t))
return;
mtspr(SPRN_LPID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */
init_HFSCR();
init_PMU_HV();
init_PMU_HV_ISA207();
}
void __restore_cpu_power8(void)
{
u64 msr;
init_FSCR();
init_PMU();
init_PMU_ISA207();
msr = mfmsr();
if (!(msr & MSR_HV))
return;
mtspr(SPRN_LPID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */
init_HFSCR();
init_PMU_HV();
init_PMU_HV_ISA207();
}
void __setup_cpu_power9(unsigned long offset, struct cpu_spec *t)
{
init_FSCR_power9();
init_PMU();
if (!init_hvmode_206(t))
return;
mtspr(SPRN_PSSCR, 0);
mtspr(SPRN_LPID, 0);
mtspr(SPRN_PID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
init_HFSCR();
init_PMU_HV();
}
void __restore_cpu_power9(void)
{
u64 msr;
init_FSCR_power9();
init_PMU();
msr = mfmsr();
if (!(msr & MSR_HV))
return;
mtspr(SPRN_PSSCR, 0);
mtspr(SPRN_LPID, 0);
mtspr(SPRN_PID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
init_HFSCR();
init_PMU_HV();
}
void __setup_cpu_power10(unsigned long offset, struct cpu_spec *t)
{
init_FSCR_power10();
init_PMU();
init_PMU_ISA31();
init_DEXCR();
if (!init_hvmode_206(t))
return;
mtspr(SPRN_PSSCR, 0);
mtspr(SPRN_LPID, 0);
mtspr(SPRN_PID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
init_HFSCR();
init_PMU_HV();
}
void __restore_cpu_power10(void)
{
u64 msr;
init_FSCR_power10();
init_PMU();
init_PMU_ISA31();
init_DEXCR();
msr = mfmsr();
if (!(msr & MSR_HV))
return;
mtspr(SPRN_PSSCR, 0);
mtspr(SPRN_LPID, 0);
mtspr(SPRN_PID, 0);
mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
init_HFSCR();
init_PMU_HV();
}
| linux-master | arch/powerpc/kernel/cpu_setup_power.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
* using the CPU's debug registers. Derived from
* "arch/x86/kernel/hw_breakpoint.c"
*
* Copyright 2010 IBM Corporation
* Author: K.Prasad <[email protected]>
*/
#include <linux/hw_breakpoint.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/init.h>
#include <asm/hw_breakpoint.h>
#include <asm/processor.h>
#include <asm/sstep.h>
#include <asm/debug.h>
#include <asm/hvcall.h>
#include <asm/inst.h>
#include <linux/uaccess.h>
/*
* Stores the breakpoints currently in use on each breakpoint address
* register for every cpu
*/
static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
/*
* Returns total number of data or instruction breakpoints available.
*/
int hw_breakpoint_slots(int type)
{
if (type == TYPE_DATA)
return nr_wp_slots();
return 0; /* no instruction breakpoints available */
}
/*
* Install a perf counter breakpoint.
*
* We seek a free debug address register and use it for this
* breakpoint.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
int arch_install_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
struct perf_event **slot;
int i;
for (i = 0; i < nr_wp_slots(); i++) {
slot = this_cpu_ptr(&bp_per_reg[i]);
if (!*slot) {
*slot = bp;
break;
}
}
if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
return -EBUSY;
/*
* Do not install DABR values if the instruction must be single-stepped.
* If so, DABR will be populated in single_step_dabr_instruction().
*/
if (!info->perf_single_step)
__set_breakpoint(i, info);
return 0;
}
/*
* Uninstall the breakpoint contained in the given counter.
*
* First we search the debug address register it uses and then we disable
* it.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint null_brk = {0};
struct perf_event **slot;
int i;
for (i = 0; i < nr_wp_slots(); i++) {
slot = this_cpu_ptr(&bp_per_reg[i]);
if (*slot == bp) {
*slot = NULL;
break;
}
}
if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
return;
__set_breakpoint(i, &null_brk);
}
static bool is_ptrace_bp(struct perf_event *bp)
{
return bp->overflow_handler == ptrace_triggered;
}
/*
* Check for virtual address in kernel space.
*/
int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
return is_kernel_addr(hw->address);
}
int arch_bp_generic_fields(int type, int *gen_bp_type)
{
*gen_bp_type = 0;
if (type & HW_BRK_TYPE_READ)
*gen_bp_type |= HW_BREAKPOINT_R;
if (type & HW_BRK_TYPE_WRITE)
*gen_bp_type |= HW_BREAKPOINT_W;
if (*gen_bp_type == 0)
return -EINVAL;
return 0;
}
/*
* Watchpoint match range is always doubleword(8 bytes) aligned on
* powerpc. If the given range is crossing doubleword boundary, we
* need to increase the length such that next doubleword also get
* covered. Ex,
*
* address len = 6 bytes
* |=========.
* |------------v--|------v--------|
* | | | | | | | | | | | | | | | | |
* |---------------|---------------|
* <---8 bytes--->
*
* In this case, we should configure hw as:
* start_addr = address & ~(HW_BREAKPOINT_SIZE - 1)
* len = 16 bytes
*
* @start_addr is inclusive but @end_addr is exclusive.
*/
static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
{
u16 max_len = DABR_MAX_LEN;
u16 hw_len;
unsigned long start_addr, end_addr;
start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE);
end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE);
hw_len = end_addr - start_addr;
if (dawr_enabled()) {
max_len = DAWR_MAX_LEN;
/* DAWR region can't cross 512 bytes boundary on p10 predecessors */
if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
(ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512)))
return -EINVAL;
} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
/* 8xx can setup a range without limitation */
max_len = U16_MAX;
}
if (hw_len > max_len)
return -EINVAL;
hw->hw_len = hw_len;
return 0;
}
/*
* Validate the arch-specific HW Breakpoint register settings
*/
int hw_breakpoint_arch_parse(struct perf_event *bp,
const struct perf_event_attr *attr,
struct arch_hw_breakpoint *hw)
{
int ret = -EINVAL;
if (!bp || !attr->bp_len)
return ret;
hw->type = HW_BRK_TYPE_TRANSLATE;
if (attr->bp_type & HW_BREAKPOINT_R)
hw->type |= HW_BRK_TYPE_READ;
if (attr->bp_type & HW_BREAKPOINT_W)
hw->type |= HW_BRK_TYPE_WRITE;
if (hw->type == HW_BRK_TYPE_TRANSLATE)
/* must set alteast read or write */
return ret;
if (!attr->exclude_user)
hw->type |= HW_BRK_TYPE_USER;
if (!attr->exclude_kernel)
hw->type |= HW_BRK_TYPE_KERNEL;
if (!attr->exclude_hv)
hw->type |= HW_BRK_TYPE_HYP;
hw->address = attr->bp_addr;
hw->len = attr->bp_len;
if (!ppc_breakpoint_available())
return -ENODEV;
return hw_breakpoint_validate_len(hw);
}
/*
* Restores the breakpoint on the debug registers.
* Invoke this function if it is known that the execution context is
* about to change to cause loss of MSR_SE settings.
*
* The perf watchpoint will simply re-trigger once the thread is started again,
* and the watchpoint handler will set up MSR_SE and perf_single_step as
* needed.
*/
void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
{
struct arch_hw_breakpoint *info;
int i;
preempt_disable();
for (i = 0; i < nr_wp_slots(); i++) {
struct perf_event *bp = __this_cpu_read(bp_per_reg[i]);
if (unlikely(bp && counter_arch_bp(bp)->perf_single_step))
goto reset;
}
goto out;
reset:
regs_set_return_msr(regs, regs->msr & ~MSR_SE);
for (i = 0; i < nr_wp_slots(); i++) {
info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
__set_breakpoint(i, info);
info->perf_single_step = false;
}
out:
preempt_enable();
}
static bool is_larx_stcx_instr(int type)
{
return type == LARX || type == STCX;
}
static bool is_octword_vsx_instr(int type, int size)
{
return ((type == LOAD_VSX || type == STORE_VSX) && size == 32);
}
/*
* We've failed in reliably handling the hw-breakpoint. Unregister
* it and throw a warning message to let the user know about it.
*/
static void handler_error(struct perf_event *bp)
{
WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.",
counter_arch_bp(bp)->address);
perf_event_disable_inatomic(bp);
}
static void larx_stcx_err(struct perf_event *bp)
{
printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n",
counter_arch_bp(bp)->address);
perf_event_disable_inatomic(bp);
}
static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
int *hit, ppc_inst_t instr)
{
int i;
int stepped;
/* Do not emulate user-space instructions, instead single-step them */
if (user_mode(regs)) {
for (i = 0; i < nr_wp_slots(); i++) {
if (!hit[i])
continue;
counter_arch_bp(bp[i])->perf_single_step = true;
bp[i] = NULL;
}
regs_set_return_msr(regs, regs->msr | MSR_SE);
return false;
}
stepped = emulate_step(regs, instr);
if (!stepped) {
for (i = 0; i < nr_wp_slots(); i++) {
if (!hit[i])
continue;
handler_error(bp[i]);
bp[i] = NULL;
}
return false;
}
return true;
}
static void handle_p10dd1_spurious_exception(struct perf_event **bp,
int *hit, unsigned long ea)
{
int i;
unsigned long hw_end_addr;
/*
* Handle spurious exception only when any bp_per_reg is set.
* Otherwise this might be created by xmon and not actually a
* spurious exception.
*/
for (i = 0; i < nr_wp_slots(); i++) {
struct arch_hw_breakpoint *info;
if (!bp[i])
continue;
info = counter_arch_bp(bp[i]);
hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
/*
* Ending address of DAWR range is less than starting
* address of op.
*/
if ((hw_end_addr - 1) >= ea)
continue;
/*
* Those addresses need to be in the same or in two
* consecutive 512B blocks;
*/
if (((hw_end_addr - 1) >> 10) != (ea >> 10))
continue;
/*
* 'op address + 64B' generates an address that has a
* carry into bit 52 (crosses 2K boundary).
*/
if ((ea & 0x800) == ((ea + 64) & 0x800))
continue;
break;
}
if (i == nr_wp_slots())
return;
for (i = 0; i < nr_wp_slots(); i++) {
if (bp[i]) {
hit[i] = 1;
counter_arch_bp(bp[i])->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
}
}
}
/*
* Handle a DABR or DAWR exception.
*
* Called in atomic context.
*/
int hw_breakpoint_handler(struct die_args *args)
{
bool err = false;
int rc = NOTIFY_STOP;
struct perf_event *bp[HBP_NUM_MAX] = { NULL };
struct pt_regs *regs = args->regs;
int i;
int hit[HBP_NUM_MAX] = {0};
int nr_hit = 0;
bool ptrace_bp = false;
ppc_inst_t instr = ppc_inst(0);
int type = 0;
int size = 0;
unsigned long ea = 0;
/* Disable breakpoints during exception handling */
hw_breakpoint_disable();
/*
* The counter may be concurrently released but that can only
* occur from a call_rcu() path. We can then safely fetch
* the breakpoint, use its callback, touch its counter
* while we are in an rcu_read_lock() path.
*/
rcu_read_lock();
if (!IS_ENABLED(CONFIG_PPC_8xx))
wp_get_instr_detail(regs, &instr, &type, &size, &ea);
for (i = 0; i < nr_wp_slots(); i++) {
struct arch_hw_breakpoint *info;
bp[i] = __this_cpu_read(bp_per_reg[i]);
if (!bp[i])
continue;
info = counter_arch_bp(bp[i]);
info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
if (wp_check_constraints(regs, instr, ea, type, size, info)) {
if (!IS_ENABLED(CONFIG_PPC_8xx) &&
ppc_inst_equal(instr, ppc_inst(0))) {
handler_error(bp[i]);
bp[i] = NULL;
err = 1;
continue;
}
if (is_ptrace_bp(bp[i]))
ptrace_bp = true;
hit[i] = 1;
nr_hit++;
}
}
if (err)
goto reset;
if (!nr_hit) {
/* Workaround for Power10 DD1 */
if (!IS_ENABLED(CONFIG_PPC_8xx) && mfspr(SPRN_PVR) == 0x800100 &&
is_octword_vsx_instr(type, size)) {
handle_p10dd1_spurious_exception(bp, hit, ea);
} else {
rc = NOTIFY_DONE;
goto out;
}
}
/*
* Return early after invoking user-callback function without restoring
* DABR if the breakpoint is from ptrace which always operates in
* one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
* generated in do_dabr().
*/
if (ptrace_bp) {
for (i = 0; i < nr_wp_slots(); i++) {
if (!hit[i] || !is_ptrace_bp(bp[i]))
continue;
perf_bp_event(bp[i], regs);
bp[i] = NULL;
}
rc = NOTIFY_DONE;
goto reset;
}
if (!IS_ENABLED(CONFIG_PPC_8xx)) {
if (is_larx_stcx_instr(type)) {
for (i = 0; i < nr_wp_slots(); i++) {
if (!hit[i])
continue;
larx_stcx_err(bp[i]);
bp[i] = NULL;
}
goto reset;
}
if (!stepping_handler(regs, bp, hit, instr))
goto reset;
}
/*
* As a policy, the callback is invoked in a 'trigger-after-execute'
* fashion
*/
for (i = 0; i < nr_wp_slots(); i++) {
if (!hit[i])
continue;
if (!(counter_arch_bp(bp[i])->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
perf_bp_event(bp[i], regs);
}
reset:
for (i = 0; i < nr_wp_slots(); i++) {
if (!bp[i])
continue;
__set_breakpoint(i, counter_arch_bp(bp[i]));
}
out:
rcu_read_unlock();
return rc;
}
NOKPROBE_SYMBOL(hw_breakpoint_handler);
/*
* Handle single-step exceptions following a DABR hit.
*
* Called in atomic context.
*/
static int single_step_dabr_instruction(struct die_args *args)
{
struct pt_regs *regs = args->regs;
bool found = false;
/*
* Check if we are single-stepping as a result of a
* previous HW Breakpoint exception
*/
for (int i = 0; i < nr_wp_slots(); i++) {
struct perf_event *bp;
struct arch_hw_breakpoint *info;
bp = __this_cpu_read(bp_per_reg[i]);
if (!bp)
continue;
info = counter_arch_bp(bp);
if (!info->perf_single_step)
continue;
found = true;
/*
* We shall invoke the user-defined callback function in the
* single stepping handler to confirm to 'trigger-after-execute'
* semantics
*/
if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
perf_bp_event(bp, regs);
info->perf_single_step = false;
__set_breakpoint(i, counter_arch_bp(bp));
}
/*
* If the process was being single-stepped by ptrace, let the
* other single-step actions occur (e.g. generate SIGTRAP).
*/
if (!found || test_thread_flag(TIF_SINGLESTEP))
return NOTIFY_DONE;
return NOTIFY_STOP;
}
NOKPROBE_SYMBOL(single_step_dabr_instruction);
/*
* Handle debug exception notifications.
*
* Called in atomic context.
*/
int hw_breakpoint_exceptions_notify(
struct notifier_block *unused, unsigned long val, void *data)
{
int ret = NOTIFY_DONE;
switch (val) {
case DIE_DABR_MATCH:
ret = hw_breakpoint_handler(data);
break;
case DIE_SSTEP:
ret = single_step_dabr_instruction(data);
break;
}
return ret;
}
NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
/*
* Release the user breakpoints used by ptrace
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
int i;
struct thread_struct *t = &tsk->thread;
for (i = 0; i < nr_wp_slots(); i++) {
unregister_hw_breakpoint(t->ptrace_bps[i]);
t->ptrace_bps[i] = NULL;
}
}
void hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
}
void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
/*
* Disable the breakpoint request here since ptrace has defined a
* one-shot behaviour for breakpoint exceptions in PPC64.
* The SIGTRAP signal is generated automatically for us in do_dabr().
* We don't have to do anything about that here
*/
attr = bp->attr;
attr.disabled = true;
modify_user_hw_breakpoint(bp, &attr);
}
| linux-master | arch/powerpc/kernel/hw_breakpoint.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Code for replacing ftrace calls with jumps.
*
* Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
*
* Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
*
* Added function graph tracer code, taken from x86 that was written
* by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
*
*/
#define pr_fmt(fmt) "ftrace-powerpc: " fmt
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
#include <asm/ftrace.h>
#include <asm/syscall.h>
#include <asm/inst.h>
#define NUM_FTRACE_TRAMPS 2
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
static ppc_inst_t ftrace_create_branch_inst(unsigned long ip, unsigned long addr, int link)
{
ppc_inst_t op;
WARN_ON(!is_offset_in_branch_range(addr - ip));
create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
return op;
}
static inline int ftrace_read_inst(unsigned long ip, ppc_inst_t *op)
{
if (copy_inst_from_kernel_nofault(op, (void *)ip)) {
pr_err("0x%lx: fetching instruction failed\n", ip);
return -EFAULT;
}
return 0;
}
static inline int ftrace_validate_inst(unsigned long ip, ppc_inst_t inst)
{
ppc_inst_t op;
int ret;
ret = ftrace_read_inst(ip, &op);
if (!ret && !ppc_inst_equal(op, inst)) {
pr_err("0x%lx: expected (%08lx) != found (%08lx)\n",
ip, ppc_inst_as_ulong(inst), ppc_inst_as_ulong(op));
ret = -EINVAL;
}
return ret;
}
static inline int ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
{
int ret = ftrace_validate_inst(ip, old);
if (!ret)
ret = patch_instruction((u32 *)ip, new);
return ret;
}
static int is_bl_op(ppc_inst_t op)
{
return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
}
static unsigned long find_ftrace_tramp(unsigned long ip)
{
int i;
for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
if (!ftrace_tramps[i])
continue;
else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
return ftrace_tramps[i];
return 0;
}
static int ftrace_get_call_inst(struct dyn_ftrace *rec, unsigned long addr, ppc_inst_t *call_inst)
{
unsigned long ip = rec->ip;
unsigned long stub;
if (is_offset_in_branch_range(addr - ip)) {
/* Within range */
stub = addr;
#ifdef CONFIG_MODULES
} else if (rec->arch.mod) {
/* Module code would be going to one of the module stubs */
stub = (addr == (unsigned long)ftrace_caller ? rec->arch.mod->arch.tramp :
rec->arch.mod->arch.tramp_regs);
#endif
} else if (core_kernel_text(ip)) {
/* We would be branching to one of our ftrace stubs */
stub = find_ftrace_tramp(ip);
if (!stub) {
pr_err("0x%lx: No ftrace stubs reachable\n", ip);
return -EINVAL;
}
} else {
return -EINVAL;
}
*call_inst = ftrace_create_branch_inst(ip, stub, 1);
return 0;
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
{
/* This should never be called since we override ftrace_replace_code() */
WARN_ON(1);
return -EINVAL;
}
#endif
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
ppc_inst_t old, new;
int ret;
/* This can only ever be called during module load */
if (WARN_ON(!IS_ENABLED(CONFIG_MODULES) || core_kernel_text(rec->ip)))
return -EINVAL;
old = ppc_inst(PPC_RAW_NOP());
ret = ftrace_get_call_inst(rec, addr, &new);
if (ret)
return ret;
return ftrace_modify_code(rec->ip, old, new);
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
{
/*
* This should never be called since we override ftrace_replace_code(),
* as well as ftrace_init_nop()
*/
WARN_ON(1);
return -EINVAL;
}
void ftrace_replace_code(int enable)
{
ppc_inst_t old, new, call_inst, new_call_inst;
ppc_inst_t nop_inst = ppc_inst(PPC_RAW_NOP());
unsigned long ip, new_addr, addr;
struct ftrace_rec_iter *iter;
struct dyn_ftrace *rec;
int ret = 0, update;
for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter);
ip = rec->ip;
if (rec->flags & FTRACE_FL_DISABLED && !(rec->flags & FTRACE_FL_ENABLED))
continue;
addr = ftrace_get_addr_curr(rec);
new_addr = ftrace_get_addr_new(rec);
update = ftrace_update_record(rec, enable);
switch (update) {
case FTRACE_UPDATE_IGNORE:
default:
continue;
case FTRACE_UPDATE_MODIFY_CALL:
ret = ftrace_get_call_inst(rec, new_addr, &new_call_inst);
ret |= ftrace_get_call_inst(rec, addr, &call_inst);
old = call_inst;
new = new_call_inst;
break;
case FTRACE_UPDATE_MAKE_NOP:
ret = ftrace_get_call_inst(rec, addr, &call_inst);
old = call_inst;
new = nop_inst;
break;
case FTRACE_UPDATE_MAKE_CALL:
ret = ftrace_get_call_inst(rec, new_addr, &call_inst);
old = nop_inst;
new = call_inst;
break;
}
if (!ret)
ret = ftrace_modify_code(ip, old, new);
if (ret)
goto out;
}
out:
if (ret)
ftrace_bug(ret, rec);
return;
}
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
{
unsigned long addr, ip = rec->ip;
ppc_inst_t old, new;
int ret = 0;
/* Verify instructions surrounding the ftrace location */
if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
/* Expect nops */
ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_NOP()));
if (!ret)
ret = ftrace_validate_inst(ip, ppc_inst(PPC_RAW_NOP()));
} else if (IS_ENABLED(CONFIG_PPC32)) {
/* Expected sequence: 'mflr r0', 'stw r0,4(r1)', 'bl _mcount' */
ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
if (!ret)
ret = ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STW(_R0, _R1, 4)));
} else if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
/* Expected sequence: 'mflr r0', ['std r0,16(r1)'], 'bl _mcount' */
ret = ftrace_read_inst(ip - 4, &old);
if (!ret && !ppc_inst_equal(old, ppc_inst(PPC_RAW_MFLR(_R0)))) {
ret = ftrace_validate_inst(ip - 8, ppc_inst(PPC_RAW_MFLR(_R0)));
ret |= ftrace_validate_inst(ip - 4, ppc_inst(PPC_RAW_STD(_R0, _R1, 16)));
}
} else {
return -EINVAL;
}
if (ret)
return ret;
if (!core_kernel_text(ip)) {
if (!mod) {
pr_err("0x%lx: No module provided for non-kernel address\n", ip);
return -EFAULT;
}
rec->arch.mod = mod;
}
/* Nop-out the ftrace location */
new = ppc_inst(PPC_RAW_NOP());
addr = MCOUNT_ADDR;
if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)) {
/* we instead patch-in the 'mflr r0' */
old = ppc_inst(PPC_RAW_NOP());
new = ppc_inst(PPC_RAW_MFLR(_R0));
ret = ftrace_modify_code(ip - 4, old, new);
} else if (is_offset_in_branch_range(addr - ip)) {
/* Within range */
old = ftrace_create_branch_inst(ip, addr, 1);
ret = ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip) || (IS_ENABLED(CONFIG_MODULES) && mod)) {
/*
* We would be branching to a linker-generated stub, or to the module _mcount
* stub. Let's just confirm we have a 'bl' here.
*/
ret = ftrace_read_inst(ip, &old);
if (ret)
return ret;
if (!is_bl_op(old)) {
pr_err("0x%lx: expected (bl) != found (%08lx)\n", ip, ppc_inst_as_ulong(old));
return -EINVAL;
}
ret = patch_instruction((u32 *)ip, new);
} else {
return -EINVAL;
}
return ret;
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
ppc_inst_t old, new;
int ret;
old = ppc_inst_read((u32 *)&ftrace_call);
new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
ret = ftrace_modify_code(ip, old, new);
/* Also update the regs callback function */
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
ip = (unsigned long)(&ftrace_regs_call);
old = ppc_inst_read((u32 *)&ftrace_regs_call);
new = ftrace_create_branch_inst(ip, ppc_function_entry(func), 1);
ret = ftrace_modify_code(ip, old, new);
}
return ret;
}
/*
* Use the default ftrace_modify_all_code, but without
* stop_machine().
*/
void arch_ftrace_update_code(int command)
{
ftrace_modify_all_code(command);
}
void ftrace_free_init_tramp(void)
{
int i;
for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
ftrace_tramps[i] = 0;
return;
}
}
static void __init add_ftrace_tramp(unsigned long tramp)
{
int i;
for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
if (!ftrace_tramps[i]) {
ftrace_tramps[i] = tramp;
return;
}
}
int __init ftrace_dyn_arch_init(void)
{
unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
unsigned long addr = FTRACE_REGS_ADDR;
long reladdr;
int i;
u32 stub_insns[] = {
#ifdef CONFIG_PPC_KERNEL_PCREL
/* pla r12,addr */
PPC_PREFIX_MLS | __PPC_PRFX_R(1),
PPC_INST_PADDI | ___PPC_RT(_R12),
PPC_RAW_MTCTR(_R12),
PPC_RAW_BCTR()
#elif defined(CONFIG_PPC64)
PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
PPC_RAW_ADDIS(_R12, _R12, 0),
PPC_RAW_ADDI(_R12, _R12, 0),
PPC_RAW_MTCTR(_R12),
PPC_RAW_BCTR()
#else
PPC_RAW_LIS(_R12, 0),
PPC_RAW_ADDI(_R12, _R12, 0),
PPC_RAW_MTCTR(_R12),
PPC_RAW_BCTR()
#endif
};
if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
for (i = 0; i < 2; i++) {
reladdr = addr - (unsigned long)tramp[i];
if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
pr_err("Address of %ps out of range of pcrel address.\n",
(void *)addr);
return -1;
}
memcpy(tramp[i], stub_insns, sizeof(stub_insns));
tramp[i][0] |= IMM_H18(reladdr);
tramp[i][1] |= IMM_L(reladdr);
add_ftrace_tramp((unsigned long)tramp[i]);
}
} else if (IS_ENABLED(CONFIG_PPC64)) {
reladdr = addr - kernel_toc_addr();
if (reladdr >= (long)SZ_2G || reladdr < -(long long)SZ_2G) {
pr_err("Address of %ps out of range of kernel_toc.\n",
(void *)addr);
return -1;
}
for (i = 0; i < 2; i++) {
memcpy(tramp[i], stub_insns, sizeof(stub_insns));
tramp[i][1] |= PPC_HA(reladdr);
tramp[i][2] |= PPC_LO(reladdr);
add_ftrace_tramp((unsigned long)tramp[i]);
}
} else {
for (i = 0; i < 2; i++) {
memcpy(tramp[i], stub_insns, sizeof(stub_insns));
tramp[i][0] |= PPC_HA(addr);
tramp[i][1] |= PPC_LO(addr);
add_ftrace_tramp((unsigned long)tramp[i]);
}
}
return 0;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
unsigned long sp = fregs->regs.gpr[1];
int bit;
if (unlikely(ftrace_graph_is_dead()))
goto out;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
goto out;
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
goto out;
if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
parent_ip = ppc_function_entry(return_to_handler);
ftrace_test_recursion_unlock(bit);
out:
fregs->regs.link = parent_ip;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
| linux-master | arch/powerpc/kernel/trace/ftrace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2015 Naveen N. Rao, IBM Corporation
*/
#include <asm/trace_clock.h>
#include <asm/time.h>
u64 notrace trace_clock_ppc_tb(void)
{
return get_tb();
}
| linux-master | arch/powerpc/kernel/trace/trace_clock.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Code for replacing ftrace calls with jumps.
*
* Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
*
* Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
*
* Added function graph tracer code, taken from x86 that was written
* by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
*
*/
#define pr_fmt(fmt) "ftrace-powerpc: " fmt
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
#include <asm/ftrace.h>
#include <asm/syscall.h>
#include <asm/inst.h>
/*
* We generally only have a single long_branch tramp and at most 2 or 3 plt
* tramps generated. But, we don't use the plt tramps currently. We also allot
* 2 tramps after .text and .init.text. So, we only end up with around 3 usable
* tramps in total. Set aside 8 just to be sure.
*/
#define NUM_FTRACE_TRAMPS 8
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
static ppc_inst_t
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
{
ppc_inst_t op;
addr = ppc_function_entry((void *)addr);
/* if (link) set op to 'bl' else 'b' */
create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
return op;
}
static inline int
ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
{
ppc_inst_t replaced;
/*
* Note:
* We are paranoid about modifying text, as if a bug was to happen, it
* could cause us to read or write to someplace that could cause harm.
* Carefully read and modify the code with probe_kernel_*(), and make
* sure what we read is what we expected it to be before modifying it.
*/
/* read the text we want to modify */
if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
return -EFAULT;
/* Make sure it is what we expect it to be */
if (!ppc_inst_equal(replaced, old)) {
pr_err("%p: replaced (%08lx) != old (%08lx)", (void *)ip,
ppc_inst_as_ulong(replaced), ppc_inst_as_ulong(old));
return -EINVAL;
}
/* replace the text with the new text */
return patch_instruction((u32 *)ip, new);
}
/*
* Helper functions that are the same for both PPC64 and PPC32.
*/
static int test_24bit_addr(unsigned long ip, unsigned long addr)
{
addr = ppc_function_entry((void *)addr);
return is_offset_in_branch_range(addr - ip);
}
static int is_bl_op(ppc_inst_t op)
{
return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
}
static int is_b_op(ppc_inst_t op)
{
return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0);
}
static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
{
int offset;
offset = PPC_LI(ppc_inst_val(op));
/* make it signed */
if (offset & 0x02000000)
offset |= 0xfe000000;
return ip + (long)offset;
}
#ifdef CONFIG_MODULES
static int
__ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long entry, ptr, tramp;
unsigned long ip = rec->ip;
ppc_inst_t op, pop;
/* read where this goes */
if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
return -EFAULT;
}
/* Make sure that this is still a 24bit jump */
if (!is_bl_op(op)) {
pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
return -EINVAL;
}
/* lets find where the pointer goes */
tramp = find_bl_target(ip, op);
pr_devel("ip:%lx jumps to %lx", ip, tramp);
if (module_trampoline_target(mod, tramp, &ptr)) {
pr_err("Failed to get trampoline target\n");
return -EFAULT;
}
pr_devel("trampoline target %lx", ptr);
entry = ppc_global_function_entry((void *)addr);
/* This should match what was called */
if (ptr != entry) {
pr_err("addr %lx does not match expected %lx\n", ptr, entry);
return -EINVAL;
}
if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
pr_err("Fetching instruction at %lx failed.\n", ip - 4);
return -EFAULT;
}
/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
!ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
pr_err("Unexpected instruction %08lx around bl _mcount\n",
ppc_inst_as_ulong(op));
return -EINVAL;
}
} else if (IS_ENABLED(CONFIG_PPC64)) {
/*
* Check what is in the next instruction. We can see ld r2,40(r1), but
* on first pass after boot we will see mflr r0.
*/
if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
pr_err("Fetching op failed.\n");
return -EFAULT;
}
if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
pr_err("Expected %08lx found %08lx\n", PPC_INST_LD_TOC,
ppc_inst_as_ulong(op));
return -EINVAL;
}
}
/*
* When using -mprofile-kernel or PPC32 there is no load to jump over.
*
* Otherwise our original call site looks like:
*
* bl <tramp>
* ld r2,XX(r1)
*
* Milton Miller pointed out that we can not simply nop the branch.
* If a task was preempted when calling a trace function, the nops
* will remove the way to restore the TOC in r2 and the r2 TOC will
* get corrupted.
*
* Use a b +8 to jump over the load.
*/
if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
pop = ppc_inst(PPC_RAW_NOP());
else
pop = ppc_inst(PPC_RAW_BRANCH(8)); /* b +8 */
if (patch_instruction((u32 *)ip, pop)) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
return 0;
}
#else
static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
{
return 0;
}
#endif /* CONFIG_MODULES */
static unsigned long find_ftrace_tramp(unsigned long ip)
{
int i;
/*
* We have the compiler generated long_branch tramps at the end
* and we prefer those
*/
for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
if (!ftrace_tramps[i])
continue;
else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
return ftrace_tramps[i];
return 0;
}
static int add_ftrace_tramp(unsigned long tramp)
{
int i;
for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
if (!ftrace_tramps[i]) {
ftrace_tramps[i] = tramp;
return 0;
}
return -1;
}
/*
* If this is a compiler generated long_branch trampoline (essentially, a
* trampoline that has a branch to _mcount()), we re-write the branch to
* instead go to ftrace_[regs_]caller() and note down the location of this
* trampoline.
*/
static int setup_mcount_compiler_tramp(unsigned long tramp)
{
int i;
ppc_inst_t op;
unsigned long ptr;
/* Is this a known long jump tramp? */
for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
if (ftrace_tramps[i] == tramp)
return 0;
/* New trampoline -- read where this goes */
if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
pr_debug("Fetching opcode failed.\n");
return -1;
}
/* Is this a 24 bit branch? */
if (!is_b_op(op)) {
pr_debug("Trampoline is not a long branch tramp.\n");
return -1;
}
/* lets find where the pointer goes */
ptr = find_bl_target(tramp, op);
if (ptr != ppc_global_function_entry((void *)_mcount)) {
pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
return -1;
}
/* Let's re-write the tramp to go to ftrace_[regs_]caller */
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
else
ptr = ppc_global_function_entry((void *)ftrace_caller);
if (patch_branch((u32 *)tramp, ptr, 0)) {
pr_debug("REL24 out of range!\n");
return -1;
}
if (add_ftrace_tramp(tramp)) {
pr_debug("No tramp locations left\n");
return -1;
}
return 0;
}
static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long tramp, ip = rec->ip;
ppc_inst_t op;
/* Read where this goes */
if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
return -EFAULT;
}
/* Make sure that this is still a 24bit jump */
if (!is_bl_op(op)) {
pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
return -EINVAL;
}
/* Let's find where the pointer goes */
tramp = find_bl_target(ip, op);
pr_devel("ip:%lx jumps to %lx", ip, tramp);
if (setup_mcount_compiler_tramp(tramp)) {
/* Are other trampolines reachable? */
if (!find_ftrace_tramp(ip)) {
pr_err("No ftrace trampolines reachable from %ps\n",
(void *)ip);
return -EINVAL;
}
}
if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
return 0;
}
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
ppc_inst_t old, new;
/*
* If the calling address is more that 24 bits away,
* then we had to use a trampoline to make the call.
* Otherwise just update the call site.
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
old = ftrace_call_replace(ip, addr, 1);
new = ppc_inst(PPC_RAW_NOP());
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip)) {
return __ftrace_make_nop_kernel(rec, addr);
} else if (!IS_ENABLED(CONFIG_MODULES)) {
return -EINVAL;
}
/*
* Out of range jumps are called from modules.
* We should either already have a pointer to the module
* or it has been passed in.
*/
if (!rec->arch.mod) {
if (!mod) {
pr_err("No module loaded addr=%lx\n", addr);
return -EFAULT;
}
rec->arch.mod = mod;
} else if (mod) {
if (mod != rec->arch.mod) {
pr_err("Record mod %p not equal to passed in mod %p\n",
rec->arch.mod, mod);
return -EINVAL;
}
/* nothing to do if mod == rec->arch.mod */
} else
mod = rec->arch.mod;
return __ftrace_make_nop(mod, rec, addr);
}
#ifdef CONFIG_MODULES
/*
* Examine the existing instructions for __ftrace_make_call.
* They should effectively be a NOP, and follow formal constraints,
* depending on the ABI. Return false if they don't.
*/
static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
{
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
else
return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
}
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
ppc_inst_t op[2];
void *ip = (void *)rec->ip;
unsigned long entry, ptr, tramp;
struct module *mod = rec->arch.mod;
/* read where this goes */
if (copy_inst_from_kernel_nofault(op, ip))
return -EFAULT;
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
copy_inst_from_kernel_nofault(op + 1, ip + 4))
return -EFAULT;
if (!expected_nop_sequence(ip, op[0], op[1])) {
pr_err("Unexpected call sequence at %p: %08lx %08lx\n", ip,
ppc_inst_as_ulong(op[0]), ppc_inst_as_ulong(op[1]));
return -EINVAL;
}
/* If we never set up ftrace trampoline(s), then bail */
if (!mod->arch.tramp ||
(IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
pr_err("No ftrace trampoline\n");
return -EINVAL;
}
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS)
tramp = mod->arch.tramp_regs;
else
tramp = mod->arch.tramp;
if (module_trampoline_target(mod, tramp, &ptr)) {
pr_err("Failed to get trampoline target\n");
return -EFAULT;
}
pr_devel("trampoline target %lx", ptr);
entry = ppc_global_function_entry((void *)addr);
/* This should match what was called */
if (ptr != entry) {
pr_err("addr %lx does not match expected %lx\n", ptr, entry);
return -EINVAL;
}
if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
return 0;
}
#else
static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
return 0;
}
#endif /* CONFIG_MODULES */
static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
{
ppc_inst_t op;
void *ip = (void *)rec->ip;
unsigned long tramp, entry, ptr;
/* Make sure we're being asked to patch branch to a known ftrace addr */
entry = ppc_global_function_entry((void *)ftrace_caller);
ptr = ppc_global_function_entry((void *)addr);
if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
entry = ppc_global_function_entry((void *)ftrace_regs_caller);
if (ptr != entry) {
pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
return -EINVAL;
}
/* Make sure we have a nop */
if (copy_inst_from_kernel_nofault(&op, ip)) {
pr_err("Unable to read ftrace location %p\n", ip);
return -EFAULT;
}
if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Unexpected call sequence at %p: %08lx\n",
ip, ppc_inst_as_ulong(op));
return -EINVAL;
}
tramp = find_ftrace_tramp((unsigned long)ip);
if (!tramp) {
pr_err("No ftrace trampolines reachable from %ps\n", ip);
return -EINVAL;
}
if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
pr_err("Error patching branch to ftrace tramp!\n");
return -EINVAL;
}
return 0;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
ppc_inst_t old, new;
/*
* If the calling address is more that 24 bits away,
* then we had to use a trampoline to make the call.
* Otherwise just update the call site.
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
old = ppc_inst(PPC_RAW_NOP());
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip)) {
return __ftrace_make_call_kernel(rec, addr);
} else if (!IS_ENABLED(CONFIG_MODULES)) {
/* We should not get here without modules */
return -EINVAL;
}
/*
* Out of range jumps are called from modules.
* Being that we are converting from nop, it had better
* already have a module defined.
*/
if (!rec->arch.mod) {
pr_err("No module loaded\n");
return -EINVAL;
}
return __ftrace_make_call(rec, addr);
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#ifdef CONFIG_MODULES
static int
__ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
ppc_inst_t op;
unsigned long ip = rec->ip;
unsigned long entry, ptr, tramp;
struct module *mod = rec->arch.mod;
/* If we never set up ftrace trampolines, then bail */
if (!mod->arch.tramp || !mod->arch.tramp_regs) {
pr_err("No ftrace trampoline\n");
return -EINVAL;
}
/* read where this goes */
if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
return -EFAULT;
}
/* Make sure that this is still a 24bit jump */
if (!is_bl_op(op)) {
pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
return -EINVAL;
}
/* lets find where the pointer goes */
tramp = find_bl_target(ip, op);
entry = ppc_global_function_entry((void *)old_addr);
pr_devel("ip:%lx jumps to %lx", ip, tramp);
if (tramp != entry) {
/* old_addr is not within range, so we must have used a trampoline */
if (module_trampoline_target(mod, tramp, &ptr)) {
pr_err("Failed to get trampoline target\n");
return -EFAULT;
}
pr_devel("trampoline target %lx", ptr);
/* This should match what was called */
if (ptr != entry) {
pr_err("addr %lx does not match expected %lx\n", ptr, entry);
return -EINVAL;
}
}
/* The new target may be within range */
if (test_24bit_addr(ip, addr)) {
/* within range */
if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
return 0;
}
if (rec->flags & FTRACE_FL_REGS)
tramp = mod->arch.tramp_regs;
else
tramp = mod->arch.tramp;
if (module_trampoline_target(mod, tramp, &ptr)) {
pr_err("Failed to get trampoline target\n");
return -EFAULT;
}
pr_devel("trampoline target %lx", ptr);
entry = ppc_global_function_entry((void *)addr);
/* This should match what was called */
if (ptr != entry) {
pr_err("addr %lx does not match expected %lx\n", ptr, entry);
return -EINVAL;
}
if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
return 0;
}
#else
static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
{
return 0;
}
#endif
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
unsigned long ip = rec->ip;
ppc_inst_t old, new;
/*
* If the calling address is more that 24 bits away,
* then we had to use a trampoline to make the call.
* Otherwise just update the call site.
*/
if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) {
/* within range */
old = ftrace_call_replace(ip, old_addr, 1);
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip)) {
/*
* We always patch out of range locations to go to the regs
* variant, so there is nothing to do here
*/
return 0;
} else if (!IS_ENABLED(CONFIG_MODULES)) {
/* We should not get here without modules */
return -EINVAL;
}
/*
* Out of range jumps are called from modules.
*/
if (!rec->arch.mod) {
pr_err("No module loaded\n");
return -EINVAL;
}
return __ftrace_modify_call(rec, old_addr, addr);
}
#endif
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
ppc_inst_t old, new;
int ret;
old = ppc_inst_read((u32 *)&ftrace_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
/* Also update the regs callback function */
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
ip = (unsigned long)(&ftrace_regs_call);
old = ppc_inst_read((u32 *)&ftrace_regs_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
}
return ret;
}
/*
* Use the default ftrace_modify_all_code, but without
* stop_machine().
*/
void arch_ftrace_update_code(int command)
{
ftrace_modify_all_code(command);
}
#ifdef CONFIG_PPC64
#define PACATOC offsetof(struct paca_struct, kernel_toc)
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
void ftrace_free_init_tramp(void)
{
int i;
for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
ftrace_tramps[i] = 0;
return;
}
}
int __init ftrace_dyn_arch_init(void)
{
int i;
unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
u32 stub_insns[] = {
PPC_RAW_LD(_R12, _R13, PACATOC),
PPC_RAW_ADDIS(_R12, _R12, 0),
PPC_RAW_ADDI(_R12, _R12, 0),
PPC_RAW_MTCTR(_R12),
PPC_RAW_BCTR()
};
unsigned long addr;
long reladdr;
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
addr = ppc_global_function_entry((void *)ftrace_regs_caller);
else
addr = ppc_global_function_entry((void *)ftrace_caller);
reladdr = addr - kernel_toc_addr();
if (reladdr >= SZ_2G || reladdr < -(long)SZ_2G) {
pr_err("Address of %ps out of range of kernel_toc.\n",
(void *)addr);
return -1;
}
for (i = 0; i < 2; i++) {
memcpy(tramp[i], stub_insns, sizeof(stub_insns));
tramp[i][1] |= PPC_HA(reladdr);
tramp[i][2] |= PPC_LO(reladdr);
add_ftrace_tramp((unsigned long)tramp[i]);
}
return 0;
}
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_call(void);
extern void ftrace_graph_stub(void);
static int ftrace_modify_ftrace_graph_caller(bool enable)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
unsigned long addr = (unsigned long)(&ftrace_graph_caller);
unsigned long stub = (unsigned long)(&ftrace_graph_stub);
ppc_inst_t old, new;
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
return 0;
old = ftrace_call_replace(ip, enable ? stub : addr, 0);
new = ftrace_call_replace(ip, enable ? addr : stub, 0);
return ftrace_modify_code(ip, old, new);
}
int ftrace_enable_ftrace_graph_caller(void)
{
return ftrace_modify_ftrace_graph_caller(true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_ftrace_graph_caller(false);
}
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info. Return the address we want to divert to.
*/
static unsigned long
__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
{
unsigned long return_hooker;
int bit;
if (unlikely(ftrace_graph_is_dead()))
goto out;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
goto out;
bit = ftrace_test_recursion_trylock(ip, parent);
if (bit < 0)
goto out;
return_hooker = ppc_function_entry(return_to_handler);
if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
parent = return_hooker;
ftrace_test_recursion_unlock(bit);
out:
return parent;
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
}
#else
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
unsigned long sp)
{
return __prepare_ftrace_return(parent, ip, sp);
}
#endif
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_PPC64_ELF_ABI_V1
char *arch_ftrace_match_adjust(char *str, const char *search)
{
if (str[0] == '.' && search[0] != '.')
return str + 1;
else
return str;
}
#endif /* CONFIG_PPC64_ELF_ABI_V1 */
| linux-master | arch/powerpc/kernel/trace/ftrace_64_pg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Powerpc userspace implementations of gettimeofday() and similar.
*/
#include <linux/time.h>
#include <linux/types.h>
#ifdef __powerpc64__
int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts,
const struct vdso_data *vd)
{
return __cvdso_clock_gettime_data(vd, clock, ts);
}
int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res,
const struct vdso_data *vd)
{
return __cvdso_clock_getres_data(vd, clock_id, res);
}
#else
int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts,
const struct vdso_data *vd)
{
return __cvdso_clock_gettime32_data(vd, clock, ts);
}
int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts,
const struct vdso_data *vd)
{
return __cvdso_clock_gettime_data(vd, clock, ts);
}
int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res,
const struct vdso_data *vd)
{
return __cvdso_clock_getres_time32_data(vd, clock_id, res);
}
#endif
int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz,
const struct vdso_data *vd)
{
return __cvdso_gettimeofday_data(vd, tv, tz);
}
__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, const struct vdso_data *vd)
{
return __cvdso_time_data(vd, time);
}
| linux-master | arch/powerpc/kernel/vdso/vgettimeofday.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/regset.h>
#include <asm/switch_to.h>
#include "ptrace-decl.h"
/*
* For get_evrregs/set_evrregs functions 'data' has the following layout:
*
* struct {
* u32 evr[32];
* u64 acc;
* u32 spefscr;
* }
*/
int evr_active(struct task_struct *target, const struct user_regset *regset)
{
flush_spe_to_thread(target);
return target->thread.used_spe ? regset->n : 0;
}
int evr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
flush_spe_to_thread(target);
membuf_write(&to, &target->thread.evr, sizeof(target->thread.evr));
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
offsetof(struct thread_struct, spefscr));
return membuf_write(&to, &target->thread.acc,
sizeof(u64) + sizeof(u32));
}
int evr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
flush_spe_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.evr,
0, sizeof(target->thread.evr));
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
offsetof(struct thread_struct, spefscr));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.acc,
sizeof(target->thread.evr), -1);
return ret;
}
| linux-master | arch/powerpc/kernel/ptrace/ptrace-spe.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/regset.h>
#include <asm/switch_to.h>
#include "ptrace-decl.h"
/*
* Regardless of transactions, 'fp_state' holds the current running
* value of all FPR registers and 'ckfp_state' holds the last checkpointed
* value of all FPR registers for the current transaction.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
* };
*/
int fpr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
u64 buf[33];
int i;
flush_fp_to_thread(target);
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
buf[32] = target->thread.fp_state.fpscr;
return membuf_write(&to, buf, 33 * sizeof(u64));
}
/*
* Regardless of transactions, 'fp_state' holds the current running
* value of all FPR registers and 'ckfp_state' holds the last checkpointed
* value of all FPR registers for the current transaction.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
* };
*
*/
int fpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
u64 buf[33];
int i;
flush_fp_to_thread(target);
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
buf[32] = target->thread.fp_state.fpscr;
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
target->thread.fp_state.fpscr = buf[32];
return 0;
}
/*
* Currently to set and get all the vsx state, you need to call
* the fp and VMX calls as well. This only get/sets the lower 32
* 128bit VSX registers.
*/
int vsr_active(struct task_struct *target, const struct user_regset *regset)
{
flush_vsx_to_thread(target);
return target->thread.used_vsr ? regset->n : 0;
}
/*
* Regardless of transactions, 'fp_state' holds the current running
* value of all FPR registers and 'ckfp_state' holds the last
* checkpointed value of all FPR registers for the current
* transaction.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 vsx[32];
* };
*/
int vsr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
u64 buf[32];
int i;
flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
return membuf_write(&to, buf, 32 * sizeof(double));
}
/*
* Regardless of transactions, 'fp_state' holds the current running
* value of all FPR registers and 'ckfp_state' holds the last
* checkpointed value of all FPR registers for the current
* transaction.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 vsx[32];
* };
*/
int vsr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
u64 buf[32];
int ret, i;
flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
if (!ret)
for (i = 0; i < 32 ; i++)
target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return ret;
}
| linux-master | arch/powerpc/kernel/ptrace/ptrace-vsx.c |
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
*
* Derived from "arch/m68k/kernel/ptrace.c"
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* Modified by Cort Dougan ([email protected])
* and Paul Mackerras ([email protected]).
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file README.legal in the main directory of
* this archive for more details.
*/
#include <linux/regset.h>
#include <linux/ptrace.h>
#include <linux/audit.h>
#include <linux/context_tracking.h>
#include <linux/syscalls.h>
#include <asm/switch_to.h>
#include <asm/debug.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
#include "ptrace-decl.h"
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* make sure the single step bit is not set. */
user_disable_single_step(child);
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret = -EPERM;
void __user *datavp = (void __user *) data;
unsigned long __user *datalp = datavp;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long index, tmp;
ret = -EIO;
/* convert to index and check */
index = addr / sizeof(long);
if ((addr & (sizeof(long) - 1)) || !child->thread.regs)
break;
if (index < PT_FPR0)
ret = ptrace_get_reg(child, (int) index, &tmp);
else
ret = ptrace_get_fpr(child, index, &tmp);
if (ret)
break;
ret = put_user(tmp, datalp);
break;
}
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR: {
unsigned long index;
ret = -EIO;
/* convert to index and check */
index = addr / sizeof(long);
if ((addr & (sizeof(long) - 1)) || !child->thread.regs)
break;
if (index < PT_FPR0)
ret = ptrace_put_reg(child, index, data);
else
ret = ptrace_put_fpr(child, index, data);
break;
}
case PPC_PTRACE_GETHWDBGINFO: {
struct ppc_debug_info dbginfo;
ppc_gethwdinfo(&dbginfo);
if (copy_to_user(datavp, &dbginfo,
sizeof(struct ppc_debug_info)))
return -EFAULT;
return 0;
}
case PPC_PTRACE_SETHWDEBUG: {
struct ppc_hw_breakpoint bp_info;
if (copy_from_user(&bp_info, datavp,
sizeof(struct ppc_hw_breakpoint)))
return -EFAULT;
return ppc_set_hwdebug(child, &bp_info);
}
case PPC_PTRACE_DELHWDEBUG: {
ret = ppc_del_hwdebug(child, data);
break;
}
case PTRACE_GET_DEBUGREG:
ret = ptrace_get_debugreg(child, addr, datalp);
break;
case PTRACE_SET_DEBUGREG:
ret = ptrace_set_debugreg(child, addr, data);
break;
#ifdef CONFIG_PPC64
case PTRACE_GETREGS64:
#endif
case PTRACE_GETREGS: /* Get all pt_regs from the child. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct user_pt_regs),
datavp);
#ifdef CONFIG_PPC64
case PTRACE_SETREGS64:
#endif
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct user_pt_regs),
datavp);
case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
datavp);
case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
datavp);
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
datavp);
case PTRACE_SETVRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
datavp);
#endif
#ifdef CONFIG_VSX
case PTRACE_GETVSRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
datavp);
case PTRACE_SETVSRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
datavp);
#endif
#ifdef CONFIG_SPE
case PTRACE_GETEVRREGS:
/* Get the child spe register state. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
datavp);
case PTRACE_SETEVRREGS:
/* Set the child spe register state. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
datavp);
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
#ifdef CONFIG_SECCOMP
static int do_seccomp(struct pt_regs *regs)
{
if (!test_thread_flag(TIF_SECCOMP))
return 0;
/*
* The ABI we present to seccomp tracers is that r3 contains
* the syscall return value and orig_gpr3 contains the first
* syscall parameter. This is different to the ptrace ABI where
* both r3 and orig_gpr3 contain the first syscall parameter.
*/
regs->gpr[3] = -ENOSYS;
/*
* We use the __ version here because we have already checked
* TIF_SECCOMP. If this fails, there is nothing left to do, we
* have already loaded -ENOSYS into r3, or seccomp has put
* something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
*/
if (__secure_computing(NULL))
return -1;
/*
* The syscall was allowed by seccomp, restore the register
* state to what audit expects.
* Note that we use orig_gpr3, which means a seccomp tracer can
* modify the first syscall parameter (in orig_gpr3) and also
* allow the syscall to proceed.
*/
regs->gpr[3] = regs->orig_gpr3;
return 0;
}
#else
static inline int do_seccomp(struct pt_regs *regs) { return 0; }
#endif /* CONFIG_SECCOMP */
/**
* do_syscall_trace_enter() - Do syscall tracing on kernel entry.
* @regs: the pt_regs of the task to trace (current)
*
* Performs various types of tracing on syscall entry. This includes seccomp,
* ptrace, syscall tracepoints and audit.
*
* The pt_regs are potentially visible to userspace via ptrace, so their
* contents is ABI.
*
* One or more of the tracers may modify the contents of pt_regs, in particular
* to modify arguments or even the syscall number itself.
*
* It's also possible that a tracer can choose to reject the system call. In
* that case this function will return an illegal syscall number, and will put
* an appropriate return value in regs->r3.
*
* Return: the (possibly changed) syscall number.
*/
long do_syscall_trace_enter(struct pt_regs *regs)
{
u32 flags;
flags = read_thread_flags() & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
if (flags) {
int rc = ptrace_report_syscall_entry(regs);
if (unlikely(flags & _TIF_SYSCALL_EMU)) {
/*
* A nonzero return code from
* ptrace_report_syscall_entry() tells us to prevent
* the syscall execution, but we are not going to
* execute it anyway.
*
* Returning -1 will skip the syscall execution. We want
* to avoid clobbering any registers, so we don't goto
* the skip label below.
*/
return -1;
}
if (rc) {
/*
* The tracer decided to abort the syscall. Note that
* the tracer may also just change regs->gpr[0] to an
* invalid syscall number, that is handled below on the
* exit path.
*/
goto skip;
}
}
/* Run seccomp after ptrace; allow it to set gpr[3]. */
if (do_seccomp(regs))
return -1;
/* Avoid trace and audit when syscall is invalid. */
if (regs->gpr[0] >= NR_syscalls)
goto skip;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->gpr[0]);
if (!is_32bit_task())
audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
regs->gpr[5], regs->gpr[6]);
else
audit_syscall_entry(regs->gpr[0],
regs->gpr[3] & 0xffffffff,
regs->gpr[4] & 0xffffffff,
regs->gpr[5] & 0xffffffff,
regs->gpr[6] & 0xffffffff);
/* Return the possibly modified but valid syscall number */
return regs->gpr[0];
skip:
/*
* If we are aborting explicitly, or if the syscall number is
* now invalid, set the return value to -ENOSYS.
*/
regs->gpr[3] = -ENOSYS;
return -1;
}
void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->result);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
ptrace_report_syscall_exit(regs, step);
}
void __init pt_regs_check(void);
/*
* Dummy function, its purpose is to break the build if struct pt_regs and
* struct user_pt_regs don't match.
*/
void __init pt_regs_check(void)
{
BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
offsetof(struct user_pt_regs, gpr));
BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
offsetof(struct user_pt_regs, nip));
BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
offsetof(struct user_pt_regs, msr));
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct user_pt_regs, orig_gpr3));
BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
offsetof(struct user_pt_regs, ctr));
BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
offsetof(struct user_pt_regs, link));
BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
offsetof(struct user_pt_regs, xer));
BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
offsetof(struct user_pt_regs, ccr));
#ifdef __powerpc64__
BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
offsetof(struct user_pt_regs, softe));
#else
BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
offsetof(struct user_pt_regs, mq));
#endif
BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
offsetof(struct user_pt_regs, trap));
BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
offsetof(struct user_pt_regs, dar));
BUILD_BUG_ON(offsetof(struct pt_regs, dear) !=
offsetof(struct user_pt_regs, dar));
BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
offsetof(struct user_pt_regs, dsisr));
BUILD_BUG_ON(offsetof(struct pt_regs, esr) !=
offsetof(struct user_pt_regs, dsisr));
BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
offsetof(struct user_pt_regs, result));
BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
// Now check that the pt_regs offsets match the uapi #defines
#define CHECK_REG(_pt, _reg) \
BUILD_BUG_ON(_pt != (offsetof(struct user_pt_regs, _reg) / \
sizeof(unsigned long)));
CHECK_REG(PT_R0, gpr[0]);
CHECK_REG(PT_R1, gpr[1]);
CHECK_REG(PT_R2, gpr[2]);
CHECK_REG(PT_R3, gpr[3]);
CHECK_REG(PT_R4, gpr[4]);
CHECK_REG(PT_R5, gpr[5]);
CHECK_REG(PT_R6, gpr[6]);
CHECK_REG(PT_R7, gpr[7]);
CHECK_REG(PT_R8, gpr[8]);
CHECK_REG(PT_R9, gpr[9]);
CHECK_REG(PT_R10, gpr[10]);
CHECK_REG(PT_R11, gpr[11]);
CHECK_REG(PT_R12, gpr[12]);
CHECK_REG(PT_R13, gpr[13]);
CHECK_REG(PT_R14, gpr[14]);
CHECK_REG(PT_R15, gpr[15]);
CHECK_REG(PT_R16, gpr[16]);
CHECK_REG(PT_R17, gpr[17]);
CHECK_REG(PT_R18, gpr[18]);
CHECK_REG(PT_R19, gpr[19]);
CHECK_REG(PT_R20, gpr[20]);
CHECK_REG(PT_R21, gpr[21]);
CHECK_REG(PT_R22, gpr[22]);
CHECK_REG(PT_R23, gpr[23]);
CHECK_REG(PT_R24, gpr[24]);
CHECK_REG(PT_R25, gpr[25]);
CHECK_REG(PT_R26, gpr[26]);
CHECK_REG(PT_R27, gpr[27]);
CHECK_REG(PT_R28, gpr[28]);
CHECK_REG(PT_R29, gpr[29]);
CHECK_REG(PT_R30, gpr[30]);
CHECK_REG(PT_R31, gpr[31]);
CHECK_REG(PT_NIP, nip);
CHECK_REG(PT_MSR, msr);
CHECK_REG(PT_ORIG_R3, orig_gpr3);
CHECK_REG(PT_CTR, ctr);
CHECK_REG(PT_LNK, link);
CHECK_REG(PT_XER, xer);
CHECK_REG(PT_CCR, ccr);
#ifdef CONFIG_PPC64
CHECK_REG(PT_SOFTE, softe);
#else
CHECK_REG(PT_MQ, mq);
#endif
CHECK_REG(PT_TRAP, trap);
CHECK_REG(PT_DAR, dar);
CHECK_REG(PT_DSISR, dsisr);
CHECK_REG(PT_RESULT, result);
#undef CHECK_REG
BUILD_BUG_ON(PT_REGS_COUNT != sizeof(struct user_pt_regs) / sizeof(unsigned long));
/*
* PT_DSCR isn't a real reg, but it's important that it doesn't overlap the
* real registers.
*/
BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
// ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible
BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX));
}
| linux-master | arch/powerpc/kernel/ptrace/ptrace.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/regset.h>
#include <linux/hw_breakpoint.h>
#include "ptrace-decl.h"
void user_enable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
task->thread.debug.dbcr0 &= ~DBCR0_BT;
task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs_set_return_msr(regs, regs->msr | MSR_DE);
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_enable_block_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
task->thread.debug.dbcr0 &= ~DBCR0_IC;
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
regs_set_return_msr(regs, regs->msr | MSR_DE);
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
/*
* The logic to disable single stepping should be as
* simple as turning off the Instruction Complete flag.
* And, after doing so, if all debug flags are off, turn
* off DBCR0(IDM) and MSR(DE) .... Torez
*/
task->thread.debug.dbcr0 &= ~(DBCR0_IC | DBCR0_BT);
/*
* Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
*/
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
task->thread.debug.dbcr1)) {
/*
* All debug events were off.....
*/
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
regs_set_return_msr(regs, regs->msr & ~MSR_DE);
}
}
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void ppc_gethwdinfo(struct ppc_debug_info *dbginfo)
{
dbginfo->version = 1;
dbginfo->num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
dbginfo->num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
dbginfo->num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
dbginfo->data_bp_alignment = 4;
dbginfo->sizeof_condition = 4;
dbginfo->features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
PPC_DEBUG_FEATURE_INSN_BP_MASK;
if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_DAC_RANGE))
dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_RANGE |
PPC_DEBUG_FEATURE_DATA_BP_MASK;
}
int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
unsigned long __user *datalp)
{
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
return -EINVAL;
return put_user(child->thread.debug.dac1, datalp);
}
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
{
struct pt_regs *regs = task->thread.regs;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret;
struct thread_struct *thread = &task->thread;
struct perf_event *bp;
struct perf_event_attr attr;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
* For embedded processors we support one DAC and no IAC's at the
* moment.
*/
if (addr > 0)
return -EINVAL;
/* The bottom 3 bits in dabr are flags */
if ((data & ~0x7UL) >= TASK_SIZE)
return -EIO;
/* As described above, it was assumed 3 bits were passed with the data
* address, but we will assume only the mode bits will be passed
* as to not cause alignment restrictions for DAC-based processors.
*/
/* DAC's hold the whole address without any mode flags */
task->thread.debug.dac1 = data & ~0x3UL;
if (task->thread.debug.dac1 == 0) {
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
task->thread.debug.dbcr1)) {
regs_set_return_msr(regs, regs->msr & ~MSR_DE);
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
}
return 0;
}
/* Read or Write bits must be set */
if (!(data & 0x3UL))
return -EINVAL;
/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 register */
task->thread.debug.dbcr0 |= DBCR0_IDM;
/* Check for write and read flags and set DBCR0 accordingly */
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
if (data & 0x1UL)
dbcr_dac(task) |= DBCR_DAC1R;
if (data & 0x2UL)
dbcr_dac(task) |= DBCR_DAC1W;
regs_set_return_msr(regs, regs->msr | MSR_DE);
return 0;
}
static long set_instruction_bp(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int slot;
int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
slot2_in_use = 1;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
slot4_in_use = 1;
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
/* Make sure range is valid. */
if (bp_info->addr2 >= TASK_SIZE)
return -EIO;
/* We need a pair of IAC regsisters */
if (!slot1_in_use && !slot2_in_use) {
slot = 1;
child->thread.debug.iac1 = bp_info->addr;
child->thread.debug.iac2 = bp_info->addr2;
child->thread.debug.dbcr0 |= DBCR0_IAC1;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC12X;
else
dbcr_iac_range(child) |= DBCR_IAC12I;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if ((!slot3_in_use) && (!slot4_in_use)) {
slot = 3;
child->thread.debug.iac3 = bp_info->addr;
child->thread.debug.iac4 = bp_info->addr2;
child->thread.debug.dbcr0 |= DBCR0_IAC3;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC34X;
else
dbcr_iac_range(child) |= DBCR_IAC34I;
#endif
} else {
return -ENOSPC;
}
} else {
/* We only need one. If possible leave a pair free in
* case a range is needed later
*/
if (!slot1_in_use) {
/*
* Don't use iac1 if iac1-iac2 are free and either
* iac3 or iac4 (but not both) are free
*/
if (slot2_in_use || slot3_in_use == slot4_in_use) {
slot = 1;
child->thread.debug.iac1 = bp_info->addr;
child->thread.debug.dbcr0 |= DBCR0_IAC1;
goto out;
}
}
if (!slot2_in_use) {
slot = 2;
child->thread.debug.iac2 = bp_info->addr;
child->thread.debug.dbcr0 |= DBCR0_IAC2;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if (!slot3_in_use) {
slot = 3;
child->thread.debug.iac3 = bp_info->addr;
child->thread.debug.dbcr0 |= DBCR0_IAC3;
} else if (!slot4_in_use) {
slot = 4;
child->thread.debug.iac4 = bp_info->addr;
child->thread.debug.dbcr0 |= DBCR0_IAC4;
#endif
} else {
return -ENOSPC;
}
}
out:
child->thread.debug.dbcr0 |= DBCR0_IDM;
regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
return slot;
}
static int del_instruction_bp(struct task_struct *child, int slot)
{
switch (slot) {
case 1:
if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
/* address range - clear slots 1 & 2 */
child->thread.debug.iac2 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
}
child->thread.debug.iac1 = 0;
child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
break;
case 2:
if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
/* used in a range */
return -EINVAL;
child->thread.debug.iac2 = 0;
child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case 3:
if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
/* address range - clear slots 3 & 4 */
child->thread.debug.iac4 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
}
child->thread.debug.iac3 = 0;
child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
break;
case 4:
if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
/* Used in a range */
return -EINVAL;
child->thread.debug.iac4 = 0;
child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
break;
#endif
default:
return -EINVAL;
}
return 0;
}
static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
{
int byte_enable =
(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
& 0xf;
int condition_mode =
bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
int slot;
if (byte_enable && condition_mode == 0)
return -EINVAL;
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
slot = 1;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dbcr_dac(child) |= DBCR_DAC1R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC1W;
child->thread.debug.dac1 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
child->thread.debug.dvc1 =
(unsigned long)bp_info->condition_value;
child->thread.debug.dbcr2 |=
((byte_enable << DBCR2_DVC1BE_SHIFT) |
(condition_mode << DBCR2_DVC1M_SHIFT));
}
#endif
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
} else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
/* Both dac1 and dac2 are part of a range */
return -ENOSPC;
#endif
} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
slot = 2;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dbcr_dac(child) |= DBCR_DAC2R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC2W;
child->thread.debug.dac2 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
child->thread.debug.dvc2 =
(unsigned long)bp_info->condition_value;
child->thread.debug.dbcr2 |=
((byte_enable << DBCR2_DVC2BE_SHIFT) |
(condition_mode << DBCR2_DVC2M_SHIFT));
}
#endif
} else {
return -ENOSPC;
}
child->thread.debug.dbcr0 |= DBCR0_IDM;
regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
return slot + 4;
}
static int del_dac(struct task_struct *child, int slot)
{
if (slot == 1) {
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
return -ENOENT;
child->thread.debug.dac1 = 0;
dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
child->thread.debug.dac2 = 0;
child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
}
child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.debug.dvc1 = 0;
#endif
} else if (slot == 2) {
if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
return -ENOENT;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
/* Part of a range */
return -EINVAL;
child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.debug.dvc2 = 0;
#endif
child->thread.debug.dac2 = 0;
dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
} else {
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
static int set_dac_range(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
/* We don't allow range watchpoints to be used with DVC */
if (bp_info->condition_mode)
return -EINVAL;
/*
* Best effort to verify the address range. The user/supervisor bits
* prevent trapping in kernel space, but let's fail on an obvious bad
* range. The simple test on the mask is not fool-proof, and any
* exclusive range will spill over into kernel space.
*/
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if (mode == PPC_BREAKPOINT_MODE_MASK) {
/*
* dac2 is a bitmask. Don't allow a mask that makes a
* kernel space address from a valid dac1 value
*/
if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
return -EIO;
} else {
/*
* For range breakpoints, addr2 must also be a valid address
*/
if (bp_info->addr2 >= TASK_SIZE)
return -EIO;
}
if (child->thread.debug.dbcr0 &
(DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
return -ENOSPC;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
child->thread.debug.dac1 = bp_info->addr;
child->thread.debug.dac2 = bp_info->addr2;
if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
child->thread.debug.dbcr2 |= DBCR2_DAC12M;
else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
else /* PPC_BREAKPOINT_MODE_MASK */
child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
return 5;
}
#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
{
if (bp_info->version != 1)
return -ENOTSUPP;
/*
* Check for invalid flags and combinations
*/
if (bp_info->trigger_type == 0 ||
(bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
PPC_BREAKPOINT_TRIGGER_RW)) ||
(bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
(bp_info->condition_mode &
~(PPC_BREAKPOINT_CONDITION_MODE |
PPC_BREAKPOINT_CONDITION_BE_ALL)))
return -EINVAL;
#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
#endif
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
if (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE ||
bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
return set_instruction_bp(child, bp_info);
}
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
return set_dac(child, bp_info);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
return set_dac_range(child, bp_info);
#else
return -EINVAL;
#endif
}
long ppc_del_hwdebug(struct task_struct *child, long data)
{
int rc;
if (data <= 4)
rc = del_instruction_bp(child, (int)data);
else
rc = del_dac(child, (int)data - 4);
if (!rc) {
if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
child->thread.debug.dbcr1)) {
child->thread.debug.dbcr0 &= ~DBCR0_IDM;
regs_set_return_msr(child->thread.regs,
child->thread.regs->msr & ~MSR_DE);
}
}
return rc;
}
| linux-master | arch/powerpc/kernel/ptrace/ptrace-adv.c |
/*
* ptrace for 32-bit processes running on a 64-bit kernel.
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
*
* Derived from "arch/m68k/kernel/ptrace.c"
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* Modified by Cort Dougan ([email protected])
* and Paul Mackerras ([email protected]).
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of
* this archive for more details.
*/
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/compat.h>
#include <asm/switch_to.h>
#include "ptrace-decl.h"
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/* Macros to workout the correct index for the FPR in the thread struct */
#define FPRNUMBER(i) (((i) - PT_FPR0) >> 1)
#define FPRHALF(i) (((i) - PT_FPR0) & 1)
#define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i)
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
unsigned long addr = caddr;
unsigned long data = cdata;
int ret;
switch (request) {
/*
* Read 4 bytes of the other process' storage
* data is a pointer specifying where the user wants the
* 4 bytes copied into
* addr is a pointer in the user's storage that contains an 8 byte
* address in the other process of the 4 bytes that is to be read
* (this is run in a 32-bit process looking at a 64-bit process)
* when I and D space are separate, these will need to be fixed.
*/
case PPC_PTRACE_PEEKTEXT_3264:
case PPC_PTRACE_PEEKDATA_3264: {
u32 tmp;
int copied;
u32 __user * addrOthers;
ret = -EIO;
/* Get the addr in the other process that we want to read */
if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
break;
copied = ptrace_access_vm(child, (u64)addrOthers, &tmp,
sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
break;
ret = put_user(tmp, (u32 __user *)data);
break;
}
/* Read a register (specified by ADDR) out of the "user area" */
case PTRACE_PEEKUSR: {
int index;
unsigned long tmp;
ret = -EIO;
/* convert to index and check */
index = (unsigned long) addr >> 2;
if ((addr & 3) || (index > PT_FPSCR32))
break;
if (index < PT_FPR0) {
ret = ptrace_get_reg(child, index, &tmp);
if (ret)
break;
} else {
flush_fp_to_thread(child);
/*
* the user space code considers the floating point
* to be an array of unsigned int (32 bits) - the
* index passed in is based on this assumption.
*/
tmp = ((unsigned int *)child->thread.fp_state.fpr)
[FPRINDEX(index)];
}
ret = put_user((unsigned int)tmp, (u32 __user *)data);
break;
}
/*
* Read 4 bytes out of the other process' pt_regs area
* data is a pointer specifying where the user wants the
* 4 bytes copied into
* addr is the offset into the other process' pt_regs structure
* that is to be read
* (this is run in a 32-bit process looking at a 64-bit process)
*/
case PPC_PTRACE_PEEKUSR_3264: {
u32 index;
u32 reg32bits;
u64 tmp;
u32 numReg;
u32 part;
ret = -EIO;
/* Determine which register the user wants */
index = (u64)addr >> 2;
numReg = index / 2;
/* Determine which part of the register the user wants */
if (index % 2)
part = 1; /* want the 2nd half of the register (right-most). */
else
part = 0; /* want the 1st half of the register (left-most). */
/* Validate the input - check to see if address is on the wrong boundary
* or beyond the end of the user area
*/
if ((addr & 3) || numReg > PT_FPSCR)
break;
if (numReg >= PT_FPR0) {
flush_fp_to_thread(child);
/* get 64 bit FPR */
tmp = child->thread.fp_state.fpr[numReg - PT_FPR0][0];
} else { /* register within PT_REGS struct */
unsigned long tmp2;
ret = ptrace_get_reg(child, numReg, &tmp2);
if (ret)
break;
tmp = tmp2;
}
reg32bits = ((u32*)&tmp)[part];
ret = put_user(reg32bits, (u32 __user *)data);
break;
}
/*
* Write 4 bytes into the other process' storage
* data is the 4 bytes that the user wants written
* addr is a pointer in the user's storage that contains an
* 8 byte address in the other process where the 4 bytes
* that is to be written
* (this is run in a 32-bit process looking at a 64-bit process)
* when I and D space are separate, these will need to be fixed.
*/
case PPC_PTRACE_POKETEXT_3264:
case PPC_PTRACE_POKEDATA_3264: {
u32 tmp = data;
u32 __user * addrOthers;
/* Get the addr in the other process that we want to write into */
ret = -EIO;
if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
break;
ret = 0;
if (ptrace_access_vm(child, (u64)addrOthers, &tmp,
sizeof(tmp),
FOLL_FORCE | FOLL_WRITE) == sizeof(tmp))
break;
ret = -EIO;
break;
}
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR: {
unsigned long index;
ret = -EIO;
/* convert to index and check */
index = (unsigned long) addr >> 2;
if ((addr & 3) || (index > PT_FPSCR32))
break;
if (index < PT_FPR0) {
ret = ptrace_put_reg(child, index, data);
} else {
flush_fp_to_thread(child);
/*
* the user space code considers the floating point
* to be an array of unsigned int (32 bits) - the
* index passed in is based on this assumption.
*/
((unsigned int *)child->thread.fp_state.fpr)
[FPRINDEX(index)] = data;
ret = 0;
}
break;
}
/*
* Write 4 bytes into the other process' pt_regs area
* data is the 4 bytes that the user wants written
* addr is the offset into the other process' pt_regs structure
* that is to be written into
* (this is run in a 32-bit process looking at a 64-bit process)
*/
case PPC_PTRACE_POKEUSR_3264: {
u32 index;
u32 numReg;
ret = -EIO;
/* Determine which register the user wants */
index = (u64)addr >> 2;
numReg = index / 2;
/*
* Validate the input - check to see if address is on the
* wrong boundary or beyond the end of the user area
*/
if ((addr & 3) || (numReg > PT_FPSCR))
break;
if (numReg < PT_FPR0) {
unsigned long freg;
ret = ptrace_get_reg(child, numReg, &freg);
if (ret)
break;
if (index % 2)
freg = (freg & ~0xfffffffful) | (data & 0xfffffffful);
else
freg = (freg & 0xfffffffful) | (data << 32);
ret = ptrace_put_reg(child, numReg, freg);
} else {
u64 *tmp;
flush_fp_to_thread(child);
/* get 64 bit FPR ... */
tmp = &child->thread.fp_state.fpr[numReg - PT_FPR0][0];
/* ... write the 32 bit part we want */
((u32 *)tmp)[index % 2] = data;
ret = 0;
}
break;
}
case PTRACE_GET_DEBUGREG: {
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
unsigned long dabr_fake;
#endif
ret = -EINVAL;
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
break;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
ret = put_user(child->thread.debug.dac1, (u32 __user *)data);
#else
dabr_fake = (
(child->thread.hw_brk[0].address & (~HW_BRK_TYPE_DABR)) |
(child->thread.hw_brk[0].type & HW_BRK_TYPE_DABR));
ret = put_user(dabr_fake, (u32 __user *)data);
#endif
break;
}
case PTRACE_GETREGS: /* Get all pt_regs from the child. */
return copy_regset_to_user(
child, task_user_regset_view(current), 0,
0, PT_REGS_COUNT * sizeof(compat_long_t),
compat_ptr(data));
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(
child, task_user_regset_view(current), 0,
0, PT_REGS_COUNT * sizeof(compat_long_t),
compat_ptr(data));
case PTRACE_GETFPREGS:
case PTRACE_SETFPREGS:
case PTRACE_GETVRREGS:
case PTRACE_SETVRREGS:
case PTRACE_GETVSRREGS:
case PTRACE_SETVSRREGS:
case PTRACE_GETREGS64:
case PTRACE_SETREGS64:
case PTRACE_KILL:
case PTRACE_SINGLESTEP:
case PTRACE_DETACH:
case PTRACE_SET_DEBUGREG:
case PTRACE_SYSCALL:
case PTRACE_CONT:
case PPC_PTRACE_GETHWDBGINFO:
case PPC_PTRACE_SETHWDEBUG:
case PPC_PTRACE_DELHWDEBUG:
ret = arch_ptrace(child, request, addr, data);
break;
default:
ret = compat_ptrace_request(child, request, addr, data);
break;
}
return ret;
}
| linux-master | arch/powerpc/kernel/ptrace/ptrace32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/regset.h>
#include <asm/switch_to.h>
#include <asm/tm.h>
#include <asm/asm-prototypes.h>
#include "ptrace-decl.h"
void flush_tmregs_to_thread(struct task_struct *tsk)
{
/*
* If task is not current, it will have been flushed already to
* it's thread_struct during __switch_to().
*
* A reclaim flushes ALL the state or if not in TM save TM SPRs
* in the appropriate thread structures from live.
*/
if (!cpu_has_feature(CPU_FTR_TM) || tsk != current)
return;
if (MSR_TM_SUSPENDED(mfmsr())) {
tm_reclaim_current(TM_CAUSE_SIGNAL);
} else {
tm_enable();
tm_save_sprs(&tsk->thread);
}
}
static unsigned long get_user_ckpt_msr(struct task_struct *task)
{
return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
}
static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
{
task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
return 0;
}
static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
{
set_trap(&task->thread.ckpt_regs, trap);
return 0;
}
/**
* tm_cgpr_active - get active number of registers in CGPR
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks for the active number of available
* regisers in transaction checkpointed GPR category.
*/
int tm_cgpr_active(struct task_struct *target, const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return 0;
return regset->n;
}
/**
* tm_cgpr_get - get CGPR registers
* @target: The target task.
* @regset: The user regset structure.
* @to: Destination of copy.
*
* This function gets transaction checkpointed GPR registers.
*
* When the transaction is active, 'ckpt_regs' holds all the checkpointed
* GPR register values for the current transaction to fall back on if it
* aborts in between. This function gets those checkpointed GPR registers.
* The userspace interface buffer layout is as follows.
*
* struct data {
* struct pt_regs ckpt_regs;
* };
*/
int tm_cgpr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
struct membuf to_msr = membuf_at(&to, offsetof(struct pt_regs, msr));
#ifdef CONFIG_PPC64
struct membuf to_softe = membuf_at(&to, offsetof(struct pt_regs, softe));
#endif
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
membuf_write(&to, &target->thread.ckpt_regs, sizeof(struct user_pt_regs));
membuf_store(&to_msr, get_user_ckpt_msr(target));
#ifdef CONFIG_PPC64
membuf_store(&to_softe, 0x1ul);
#endif
return membuf_zero(&to, ELF_NGREG * sizeof(unsigned long) -
sizeof(struct user_pt_regs));
}
/*
* tm_cgpr_set - set the CGPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets in transaction checkpointed GPR registers.
*
* When the transaction is active, 'ckpt_regs' holds the checkpointed
* GPR register values for the current transaction to fall back on if it
* aborts in between. This function sets those checkpointed GPR registers.
* The userspace interface buffer layout is as follows.
*
* struct data {
* struct pt_regs ckpt_regs;
* };
*/
int tm_cgpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long reg;
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ckpt_regs,
0, PT_MSR * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_MSR * sizeof(reg),
(PT_MSR + 1) * sizeof(reg));
if (!ret)
ret = set_user_ckpt_msr(target, reg);
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ckpt_regs.orig_gpr3,
PT_ORIG_R3 * sizeof(reg),
(PT_MAX_PUT_REG + 1) * sizeof(reg));
if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
(PT_MAX_PUT_REG + 1) * sizeof(reg),
PT_TRAP * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_TRAP * sizeof(reg),
(PT_TRAP + 1) * sizeof(reg));
if (!ret)
ret = set_user_ckpt_trap(target, reg);
}
if (!ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
return ret;
}
/**
* tm_cfpr_active - get active number of registers in CFPR
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks for the active number of available
* regisers in transaction checkpointed FPR category.
*/
int tm_cfpr_active(struct task_struct *target, const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return 0;
return regset->n;
}
/**
* tm_cfpr_get - get CFPR registers
* @target: The target task.
* @regset: The user regset structure.
* @to: Destination of copy.
*
* This function gets in transaction checkpointed FPR registers.
*
* When the transaction is active 'ckfp_state' holds the checkpointed
* values for the current transaction to fall back on if it aborts
* in between. This function gets those checkpointed FPR registers.
* The userspace interface buffer layout is as follows.
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
*};
*/
int tm_cfpr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
u64 buf[33];
int i;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_CKFPR(i);
buf[32] = target->thread.ckfp_state.fpscr;
return membuf_write(&to, buf, sizeof(buf));
}
/**
* tm_cfpr_set - set CFPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets in transaction checkpointed FPR registers.
*
* When the transaction is active 'ckfp_state' holds the checkpointed
* FPR register values for the current transaction to fall back on
* if it aborts in between. This function sets these checkpointed
* FPR registers. The userspace interface buffer layout is as follows.
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
*};
*/
int tm_cfpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
u64 buf[33];
int i;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
for (i = 0; i < 32; i++)
buf[i] = target->thread.TS_CKFPR(i);
buf[32] = target->thread.ckfp_state.fpscr;
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_CKFPR(i) = buf[i];
target->thread.ckfp_state.fpscr = buf[32];
return 0;
}
/**
* tm_cvmx_active - get active number of registers in CVMX
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks for the active number of available
* regisers in checkpointed VMX category.
*/
int tm_cvmx_active(struct task_struct *target, const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return 0;
return regset->n;
}
/**
* tm_cvmx_get - get CMVX registers
* @target: The target task.
* @regset: The user regset structure.
* @to: Destination of copy.
*
* This function gets in transaction checkpointed VMX registers.
*
* When the transaction is active 'ckvr_state' and 'ckvrsave' hold
* the checkpointed values for the current transaction to fall
* back on if it aborts in between. The userspace interface buffer
* layout is as follows.
*
* struct data {
* vector128 vr[32];
* vector128 vscr;
* vector128 vrsave;
*};
*/
int tm_cvmx_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
/* Flush the state */
flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
membuf_write(&to, &target->thread.ckvr_state, 33 * sizeof(vector128));
/*
* Copy out only the low-order word of vrsave.
*/
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.ckvrsave;
return membuf_write(&to, &vrsave, sizeof(vrsave));
}
/**
* tm_cvmx_set - set CMVX registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets in transaction checkpointed VMX registers.
*
* When the transaction is active 'ckvr_state' and 'ckvrsave' hold
* the checkpointed values for the current transaction to fall
* back on if it aborts in between. The userspace interface buffer
* layout is as follows.
*
* struct data {
* vector128 vr[32];
* vector128 vscr;
* vector128 vrsave;
*};
*/
int tm_cvmx_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.ckvr_state,
0, 33 * sizeof(vector128));
if (!ret && count > 0) {
/*
* We use only the low-order word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.ckvrsave;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
if (!ret)
target->thread.ckvrsave = vrsave.word;
}
return ret;
}
/**
* tm_cvsx_active - get active number of registers in CVSX
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks for the active number of available
* regisers in transaction checkpointed VSX category.
*/
int tm_cvsx_active(struct task_struct *target, const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return 0;
flush_vsx_to_thread(target);
return target->thread.used_vsr ? regset->n : 0;
}
/**
* tm_cvsx_get - get CVSX registers
* @target: The target task.
* @regset: The user regset structure.
* @to: Destination of copy.
*
* This function gets in transaction checkpointed VSX registers.
*
* When the transaction is active 'ckfp_state' holds the checkpointed
* values for the current transaction to fall back on if it aborts
* in between. This function gets those checkpointed VSX registers.
* The userspace interface buffer layout is as follows.
*
* struct data {
* u64 vsx[32];
*};
*/
int tm_cvsx_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
u64 buf[32];
int i;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
/* Flush the state */
flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
return membuf_write(&to, buf, 32 * sizeof(double));
}
/**
* tm_cvsx_set - set CFPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets in transaction checkpointed VSX registers.
*
* When the transaction is active 'ckfp_state' holds the checkpointed
* VSX register values for the current transaction to fall back on
* if it aborts in between. This function sets these checkpointed
* FPR registers. The userspace interface buffer layout is as follows.
*
* struct data {
* u64 vsx[32];
*};
*/
int tm_cvsx_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
u64 buf[32];
int ret, i;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
/* Flush the state */
flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
if (!ret)
for (i = 0; i < 32 ; i++)
target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return ret;
}
/**
* tm_spr_active - get active number of registers in TM SPR
* @target: The target task.
* @regset: The user regset structure.
*
* This function checks the active number of available
* regisers in the transactional memory SPR category.
*/
int tm_spr_active(struct task_struct *target, const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
return regset->n;
}
/**
* tm_spr_get - get the TM related SPR registers
* @target: The target task.
* @regset: The user regset structure.
* @to: Destination of copy.
*
* This function gets transactional memory related SPR registers.
* The userspace interface buffer layout is as follows.
*
* struct {
* u64 tm_tfhar;
* u64 tm_texasr;
* u64 tm_tfiar;
* };
*/
int tm_spr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
/* Build tests */
BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
/* Flush the states */
flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
/* TFHAR register */
membuf_write(&to, &target->thread.tm_tfhar, sizeof(u64));
/* TEXASR register */
membuf_write(&to, &target->thread.tm_texasr, sizeof(u64));
/* TFIAR register */
return membuf_write(&to, &target->thread.tm_tfiar, sizeof(u64));
}
/**
* tm_spr_set - set the TM related SPR registers
* @target: The target task.
* @regset: The user regset structure.
* @pos: The buffer position.
* @count: Number of bytes to copy.
* @kbuf: Kernel buffer to copy into.
* @ubuf: User buffer to copy from.
*
* This function sets transactional memory related SPR registers.
* The userspace interface buffer layout is as follows.
*
* struct {
* u64 tm_tfhar;
* u64 tm_texasr;
* u64 tm_tfiar;
* };
*/
int tm_spr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
/* Build tests */
BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
/* Flush the states */
flush_tmregs_to_thread(target);
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
/* TFHAR register */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tfhar, 0, sizeof(u64));
/* TEXASR register */
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_texasr, sizeof(u64),
2 * sizeof(u64));
/* TFIAR register */
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tfiar,
2 * sizeof(u64), 3 * sizeof(u64));
return ret;
}
int tm_tar_active(struct task_struct *target, const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (MSR_TM_ACTIVE(target->thread.regs->msr))
return regset->n;
return 0;
}
int tm_tar_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
return membuf_write(&to, &target->thread.tm_tar, sizeof(u64));
}
int tm_tar_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_tar, 0, sizeof(u64));
return ret;
}
int tm_ppr_active(struct task_struct *target, const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (MSR_TM_ACTIVE(target->thread.regs->msr))
return regset->n;
return 0;
}
int tm_ppr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
return membuf_write(&to, &target->thread.tm_ppr, sizeof(u64));
}
int tm_ppr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_ppr, 0, sizeof(u64));
return ret;
}
int tm_dscr_active(struct task_struct *target, const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (MSR_TM_ACTIVE(target->thread.regs->msr))
return regset->n;
return 0;
}
int tm_dscr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
return membuf_write(&to, &target->thread.tm_dscr, sizeof(u64));
}
int tm_dscr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
if (!cpu_has_feature(CPU_FTR_TM))
return -ENODEV;
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
return -ENODATA;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tm_dscr, 0, sizeof(u64));
return ret;
}
int tm_cgpr32_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
gpr32_get_common(target, regset, to,
&target->thread.ckpt_regs.gpr[0]);
return membuf_zero(&to, ELF_NGREG * sizeof(u32));
}
int tm_cgpr32_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
&target->thread.ckpt_regs.gpr[0]);
}
| linux-master | arch/powerpc/kernel/ptrace/ptrace-tm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/regset.h>
#include <linux/hw_breakpoint.h>
#include <asm/debug.h>
#include "ptrace-decl.h"
void user_enable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL)
regs_set_return_msr(regs, (regs->msr & ~MSR_BE) | MSR_SE);
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_enable_block_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL)
regs_set_return_msr(regs, (regs->msr & ~MSR_SE) | MSR_BE);
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL)
regs_set_return_msr(regs, regs->msr & ~(MSR_SE | MSR_BE));
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void ppc_gethwdinfo(struct ppc_debug_info *dbginfo)
{
dbginfo->version = 1;
dbginfo->num_instruction_bps = 0;
if (ppc_breakpoint_available())
dbginfo->num_data_bps = nr_wp_slots();
else
dbginfo->num_data_bps = 0;
dbginfo->num_condition_regs = 0;
dbginfo->data_bp_alignment = sizeof(long);
dbginfo->sizeof_condition = 0;
if (IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT)) {
dbginfo->features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
if (dawr_enabled())
dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
} else {
dbginfo->features = 0;
}
if (cpu_has_feature(CPU_FTR_ARCH_31))
dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_ARCH_31;
}
int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
unsigned long __user *datalp)
{
unsigned long dabr_fake;
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
return -EINVAL;
dabr_fake = ((child->thread.hw_brk[0].address & (~HW_BRK_TYPE_DABR)) |
(child->thread.hw_brk[0].type & HW_BRK_TYPE_DABR));
return put_user(dabr_fake, datalp);
}
/*
* ptrace_set_debugreg() fakes DABR and DABR is only one. So even if
* internal hw supports more than one watchpoint, we support only one
* watchpoint with this interface.
*/
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret;
struct thread_struct *thread = &task->thread;
struct perf_event *bp;
struct perf_event_attr attr;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
bool set_bp = true;
struct arch_hw_breakpoint hw_brk;
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
* For embedded processors we support one DAC and no IAC's at the
* moment.
*/
if (addr > 0)
return -EINVAL;
/* The bottom 3 bits in dabr are flags */
if ((data & ~0x7UL) >= TASK_SIZE)
return -EIO;
/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
* It was assumed, on previous implementations, that 3 bits were
* passed together with the data address, fitting the design of the
* DABR register, as follows:
*
* bit 0: Read flag
* bit 1: Write flag
* bit 2: Breakpoint translation
*
* Thus, we use them here as so.
*/
/* Ensure breakpoint translation bit is set */
if (data && !(data & HW_BRK_TYPE_TRANSLATE))
return -EIO;
hw_brk.address = data & (~HW_BRK_TYPE_DABR);
hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
hw_brk.len = DABR_MAX_LEN;
hw_brk.hw_len = DABR_MAX_LEN;
set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
bp = thread->ptrace_bps[0];
if (!set_bp) {
if (bp) {
unregister_hw_breakpoint(bp);
thread->ptrace_bps[0] = NULL;
}
return 0;
}
if (bp) {
attr = bp->attr;
attr.bp_addr = hw_brk.address;
attr.bp_len = DABR_MAX_LEN;
arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
/* Enable breakpoint */
attr.disabled = false;
ret = modify_user_hw_breakpoint(bp, &attr);
if (ret)
return ret;
thread->ptrace_bps[0] = bp;
thread->hw_brk[0] = hw_brk;
return 0;
}
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
attr.bp_addr = hw_brk.address;
attr.bp_len = DABR_MAX_LEN;
arch_bp_generic_fields(hw_brk.type,
&attr.bp_type);
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
ptrace_triggered, NULL, task);
if (IS_ERR(bp)) {
thread->ptrace_bps[0] = NULL;
return PTR_ERR(bp);
}
#else /* !CONFIG_HAVE_HW_BREAKPOINT */
if (set_bp && (!ppc_breakpoint_available()))
return -ENODEV;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
task->thread.hw_brk[0] = hw_brk;
return 0;
}
#ifdef CONFIG_HAVE_HW_BREAKPOINT
static int find_empty_ptrace_bp(struct thread_struct *thread)
{
int i;
for (i = 0; i < nr_wp_slots(); i++) {
if (!thread->ptrace_bps[i])
return i;
}
return -1;
}
#endif
static int find_empty_hw_brk(struct thread_struct *thread)
{
int i;
for (i = 0; i < nr_wp_slots(); i++) {
if (!thread->hw_brk[i].address)
return i;
}
return -1;
}
long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
{
int i;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int len = 0;
struct thread_struct *thread = &child->thread;
struct perf_event *bp;
struct perf_event_attr attr;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
struct arch_hw_breakpoint brk;
if (bp_info->version != 1)
return -ENOTSUPP;
/*
* We only support one data breakpoint
*/
if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
(bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
if ((unsigned long)bp_info->addr >= TASK_SIZE)
return -EIO;
brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE);
brk.type = HW_BRK_TYPE_TRANSLATE | HW_BRK_TYPE_PRIV_ALL;
brk.len = DABR_MAX_LEN;
brk.hw_len = DABR_MAX_LEN;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
brk.type |= HW_BRK_TYPE_READ;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
brk.type |= HW_BRK_TYPE_WRITE;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
len = bp_info->addr2 - bp_info->addr;
else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
len = 1;
else
return -EINVAL;
i = find_empty_ptrace_bp(thread);
if (i < 0)
return -ENOSPC;
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
attr.bp_addr = (unsigned long)bp_info->addr;
attr.bp_len = len;
arch_bp_generic_fields(brk.type, &attr.bp_type);
bp = register_user_hw_breakpoint(&attr, ptrace_triggered, NULL, child);
thread->ptrace_bps[i] = bp;
if (IS_ERR(bp)) {
thread->ptrace_bps[i] = NULL;
return PTR_ERR(bp);
}
return i + 1;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
return -EINVAL;
i = find_empty_hw_brk(&child->thread);
if (i < 0)
return -ENOSPC;
if (!ppc_breakpoint_available())
return -ENODEV;
child->thread.hw_brk[i] = brk;
return i + 1;
}
long ppc_del_hwdebug(struct task_struct *child, long data)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret = 0;
struct thread_struct *thread = &child->thread;
struct perf_event *bp;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
if (data < 1 || data > nr_wp_slots())
return -EINVAL;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
bp = thread->ptrace_bps[data - 1];
if (bp) {
unregister_hw_breakpoint(bp);
thread->ptrace_bps[data - 1] = NULL;
} else {
ret = -ENOENT;
}
return ret;
#else /* CONFIG_HAVE_HW_BREAKPOINT */
if (!(child->thread.hw_brk[data - 1].flags & HW_BRK_FLAG_DISABLED) &&
child->thread.hw_brk[data - 1].address == 0)
return -ENOENT;
child->thread.hw_brk[data - 1].address = 0;
child->thread.hw_brk[data - 1].type = 0;
child->thread.hw_brk[data - 1].flags = 0;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
return 0;
}
| linux-master | arch/powerpc/kernel/ptrace/ptrace-noadv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/regset.h>
#include <asm/switch_to.h>
#include "ptrace-decl.h"
int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
{
#ifdef CONFIG_PPC_FPU_REGS
unsigned int fpidx = index - PT_FPR0;
#endif
if (index > PT_FPSCR)
return -EIO;
#ifdef CONFIG_PPC_FPU_REGS
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0)) {
if (IS_ENABLED(CONFIG_PPC32))
// On 32-bit the index we are passed refers to 32-bit words
*data = ((u32 *)child->thread.fp_state.fpr)[fpidx];
else
memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
} else
*data = child->thread.fp_state.fpscr;
#else
*data = 0;
#endif
return 0;
}
int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
{
#ifdef CONFIG_PPC_FPU_REGS
unsigned int fpidx = index - PT_FPR0;
#endif
if (index > PT_FPSCR)
return -EIO;
#ifdef CONFIG_PPC_FPU_REGS
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0)) {
if (IS_ENABLED(CONFIG_PPC32))
// On 32-bit the index we are passed refers to 32-bit words
((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
else
memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
} else
child->thread.fp_state.fpscr = data;
#endif
return 0;
}
| linux-master | arch/powerpc/kernel/ptrace/ptrace-fpu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/regset.h>
#include <asm/switch_to.h>
#include "ptrace-decl.h"
/*
* Regardless of transactions, 'fp_state' holds the current running
* value of all FPR registers and 'ckfp_state' holds the last checkpointed
* value of all FPR registers for the current transaction.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
* };
*/
int fpr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
#ifdef CONFIG_PPC_FPU_REGS
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32]));
flush_fp_to_thread(target);
return membuf_write(&to, &target->thread.fp_state, 33 * sizeof(u64));
#else
return membuf_write(&to, &empty_zero_page, 33 * sizeof(u64));
#endif
}
/*
* Regardless of transactions, 'fp_state' holds the current running
* value of all FPR registers and 'ckfp_state' holds the last checkpointed
* value of all FPR registers for the current transaction.
*
* Userspace interface buffer layout:
*
* struct data {
* u64 fpr[32];
* u64 fpscr;
* };
*
*/
int fpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
#ifdef CONFIG_PPC_FPU_REGS
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32]));
flush_fp_to_thread(target);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
#else
return 0;
#endif
}
| linux-master | arch/powerpc/kernel/ptrace/ptrace-novsx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/regset.h>
#include <linux/elf.h>
#include <linux/nospec.h>
#include <linux/pkeys.h>
#include "ptrace-decl.h"
struct pt_regs_offset {
const char *name;
int offset;
};
#define STR(s) #s /* convert to string */
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define GPR_OFFSET_NAME(num) \
{.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
GPR_OFFSET_NAME(0),
GPR_OFFSET_NAME(1),
GPR_OFFSET_NAME(2),
GPR_OFFSET_NAME(3),
GPR_OFFSET_NAME(4),
GPR_OFFSET_NAME(5),
GPR_OFFSET_NAME(6),
GPR_OFFSET_NAME(7),
GPR_OFFSET_NAME(8),
GPR_OFFSET_NAME(9),
GPR_OFFSET_NAME(10),
GPR_OFFSET_NAME(11),
GPR_OFFSET_NAME(12),
GPR_OFFSET_NAME(13),
GPR_OFFSET_NAME(14),
GPR_OFFSET_NAME(15),
GPR_OFFSET_NAME(16),
GPR_OFFSET_NAME(17),
GPR_OFFSET_NAME(18),
GPR_OFFSET_NAME(19),
GPR_OFFSET_NAME(20),
GPR_OFFSET_NAME(21),
GPR_OFFSET_NAME(22),
GPR_OFFSET_NAME(23),
GPR_OFFSET_NAME(24),
GPR_OFFSET_NAME(25),
GPR_OFFSET_NAME(26),
GPR_OFFSET_NAME(27),
GPR_OFFSET_NAME(28),
GPR_OFFSET_NAME(29),
GPR_OFFSET_NAME(30),
GPR_OFFSET_NAME(31),
REG_OFFSET_NAME(nip),
REG_OFFSET_NAME(msr),
REG_OFFSET_NAME(ctr),
REG_OFFSET_NAME(link),
REG_OFFSET_NAME(xer),
REG_OFFSET_NAME(ccr),
#ifdef CONFIG_PPC64
REG_OFFSET_NAME(softe),
#else
REG_OFFSET_NAME(mq),
#endif
REG_OFFSET_NAME(trap),
REG_OFFSET_NAME(dar),
REG_OFFSET_NAME(dsisr),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
static unsigned long get_user_msr(struct task_struct *task)
{
return task->thread.regs->msr | task->thread.fpexc_mode;
}
static __always_inline int set_user_msr(struct task_struct *task, unsigned long msr)
{
unsigned long newmsr = (task->thread.regs->msr & ~MSR_DEBUGCHANGE) |
(msr & MSR_DEBUGCHANGE);
regs_set_return_msr(task->thread.regs, newmsr);
return 0;
}
#ifdef CONFIG_PPC64
static int get_user_dscr(struct task_struct *task, unsigned long *data)
{
*data = task->thread.dscr;
return 0;
}
static int set_user_dscr(struct task_struct *task, unsigned long dscr)
{
task->thread.dscr = dscr;
task->thread.dscr_inherit = 1;
return 0;
}
#else
static int get_user_dscr(struct task_struct *task, unsigned long *data)
{
return -EIO;
}
static int set_user_dscr(struct task_struct *task, unsigned long dscr)
{
return -EIO;
}
#endif
/*
* We prevent mucking around with the reserved area of trap
* which are used internally by the kernel.
*/
static __always_inline int set_user_trap(struct task_struct *task, unsigned long trap)
{
set_trap(task->thread.regs, trap);
return 0;
}
/*
* Get contents of register REGNO in task TASK.
*/
int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
{
unsigned int regs_max;
if (task->thread.regs == NULL || !data)
return -EIO;
if (regno == PT_MSR) {
*data = get_user_msr(task);
return 0;
}
if (regno == PT_DSCR)
return get_user_dscr(task, data);
/*
* softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
* no more used as a flag, lets force usr to always see the softe value as 1
* which means interrupts are not soft disabled.
*/
if (IS_ENABLED(CONFIG_PPC64) && regno == PT_SOFTE) {
*data = 1;
return 0;
}
regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
if (regno < regs_max) {
regno = array_index_nospec(regno, regs_max);
*data = ((unsigned long *)task->thread.regs)[regno];
return 0;
}
return -EIO;
}
/*
* Write contents of register REGNO in task TASK.
*/
int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
{
if (task->thread.regs == NULL)
return -EIO;
if (regno == PT_MSR)
return set_user_msr(task, data);
if (regno == PT_TRAP)
return set_user_trap(task, data);
if (regno == PT_DSCR)
return set_user_dscr(task, data);
if (regno <= PT_MAX_PUT_REG) {
regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
((unsigned long *)task->thread.regs)[regno] = data;
return 0;
}
return -EIO;
}
static int gpr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
struct membuf to_msr = membuf_at(&to, offsetof(struct pt_regs, msr));
#ifdef CONFIG_PPC64
struct membuf to_softe = membuf_at(&to, offsetof(struct pt_regs, softe));
#endif
if (target->thread.regs == NULL)
return -EIO;
membuf_write(&to, target->thread.regs, sizeof(struct user_pt_regs));
membuf_store(&to_msr, get_user_msr(target));
#ifdef CONFIG_PPC64
membuf_store(&to_softe, 0x1ul);
#endif
return membuf_zero(&to, ELF_NGREG * sizeof(unsigned long) -
sizeof(struct user_pt_regs));
}
static int gpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, const void *kbuf,
const void __user *ubuf)
{
unsigned long reg;
int ret;
if (target->thread.regs == NULL)
return -EIO;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.regs,
0, PT_MSR * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_MSR * sizeof(reg),
(PT_MSR + 1) * sizeof(reg));
if (!ret)
ret = set_user_msr(target, reg);
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->orig_gpr3,
PT_ORIG_R3 * sizeof(reg),
(PT_MAX_PUT_REG + 1) * sizeof(reg));
if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
(PT_MAX_PUT_REG + 1) * sizeof(reg),
PT_TRAP * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_TRAP * sizeof(reg),
(PT_TRAP + 1) * sizeof(reg));
if (!ret)
ret = set_user_trap(target, reg);
}
if (!ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
return ret;
}
#ifdef CONFIG_PPC64
static int ppr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
if (!target->thread.regs)
return -EINVAL;
return membuf_write(&to, &target->thread.regs->ppr, sizeof(u64));
}
static int ppr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, const void *kbuf,
const void __user *ubuf)
{
if (!target->thread.regs)
return -EINVAL;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->ppr, 0, sizeof(u64));
}
static int dscr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
return membuf_write(&to, &target->thread.dscr, sizeof(u64));
}
static int dscr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, const void *kbuf,
const void __user *ubuf)
{
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.dscr, 0, sizeof(u64));
}
#endif
#ifdef CONFIG_PPC_BOOK3S_64
static int tar_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
return membuf_write(&to, &target->thread.tar, sizeof(u64));
}
static int tar_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, const void *kbuf,
const void __user *ubuf)
{
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.tar, 0, sizeof(u64));
}
static int ebb_active(struct task_struct *target, const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
if (target->thread.used_ebb)
return regset->n;
return 0;
}
static int ebb_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
/* Build tests */
BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
if (!target->thread.used_ebb)
return -ENODATA;
return membuf_write(&to, &target->thread.ebbrr, 3 * sizeof(unsigned long));
}
static int ebb_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, const void *kbuf,
const void __user *ubuf)
{
int ret = 0;
/* Build tests */
BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
if (target->thread.used_ebb)
return -ENODATA;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.ebbrr,
0, sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.ebbhr, sizeof(unsigned long),
2 * sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.bescr, 2 * sizeof(unsigned long),
3 * sizeof(unsigned long));
return ret;
}
static int pmu_active(struct task_struct *target, const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
return regset->n;
}
static int pmu_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
/* Build tests */
BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
return membuf_write(&to, &target->thread.siar, 5 * sizeof(unsigned long));
}
static int pmu_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, const void *kbuf,
const void __user *ubuf)
{
int ret = 0;
/* Build tests */
BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return -ENODEV;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.siar,
0, sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.sdar, sizeof(unsigned long),
2 * sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.sier, 2 * sizeof(unsigned long),
3 * sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.mmcr2, 3 * sizeof(unsigned long),
4 * sizeof(unsigned long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.mmcr0, 4 * sizeof(unsigned long),
5 * sizeof(unsigned long));
return ret;
}
static int dexcr_active(struct task_struct *target, const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_ARCH_31))
return -ENODEV;
return regset->n;
}
static int dexcr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
if (!cpu_has_feature(CPU_FTR_ARCH_31))
return -ENODEV;
/*
* The DEXCR is currently static across all CPUs, so we don't
* store the target's value anywhere, but the static value
* will also be correct.
*/
membuf_store(&to, (u64)lower_32_bits(DEXCR_INIT));
/*
* Technically the HDEXCR is per-cpu, but a hypervisor can't reasonably
* change it between CPUs of the same guest.
*/
return membuf_store(&to, (u64)lower_32_bits(mfspr(SPRN_HDEXCR_RO)));
}
#ifdef CONFIG_CHECKPOINT_RESTORE
static int hashkeyr_active(struct task_struct *target, const struct user_regset *regset)
{
if (!cpu_has_feature(CPU_FTR_ARCH_31))
return -ENODEV;
return regset->n;
}
static int hashkeyr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
if (!cpu_has_feature(CPU_FTR_ARCH_31))
return -ENODEV;
return membuf_store(&to, target->thread.hashkeyr);
}
static int hashkeyr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, const void *kbuf,
const void __user *ubuf)
{
if (!cpu_has_feature(CPU_FTR_ARCH_31))
return -ENODEV;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.hashkeyr,
0, sizeof(unsigned long));
}
#endif /* CONFIG_CHECKPOINT_RESTORE */
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_MEM_KEYS
static int pkey_active(struct task_struct *target, const struct user_regset *regset)
{
if (!arch_pkeys_enabled())
return -ENODEV;
return regset->n;
}
static int pkey_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
if (!arch_pkeys_enabled())
return -ENODEV;
membuf_store(&to, target->thread.regs->amr);
membuf_store(&to, target->thread.regs->iamr);
return membuf_store(&to, default_uamor);
}
static int pkey_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count, const void *kbuf,
const void __user *ubuf)
{
u64 new_amr;
int ret;
if (!arch_pkeys_enabled())
return -ENODEV;
/* Only the AMR can be set from userspace */
if (pos != 0 || count != sizeof(new_amr))
return -EINVAL;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_amr, 0, sizeof(new_amr));
if (ret)
return ret;
/*
* UAMOR determines which bits of the AMR can be set from userspace.
* UAMOR value 0b11 indicates that the AMR value can be modified
* from userspace. If the kernel is using a specific key, we avoid
* userspace modifying the AMR value for that key by masking them
* via UAMOR 0b00.
*
* Pick the AMR values for the keys that kernel is using. This
* will be indicated by the ~default_uamor bits.
*/
target->thread.regs->amr = (new_amr & default_uamor) |
(target->thread.regs->amr & ~default_uamor);
return 0;
}
#endif /* CONFIG_PPC_MEM_KEYS */
static const struct user_regset native_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.regset_get = gpr_get, .set = gpr_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.regset_get = fpr_get, .set = fpr_set
},
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .regset_get = vr_get, .set = vr_set
},
#endif
#ifdef CONFIG_VSX
[REGSET_VSX] = {
.core_note_type = NT_PPC_VSX, .n = 32,
.size = sizeof(double), .align = sizeof(double),
.active = vsr_active, .regset_get = vsr_get, .set = vsr_set
},
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
.core_note_type = NT_PPC_SPE, .n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .regset_get = evr_get, .set = evr_set
},
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
[REGSET_TM_CGPR] = {
.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.active = tm_cgpr_active, .regset_get = tm_cgpr_get, .set = tm_cgpr_set
},
[REGSET_TM_CFPR] = {
.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.active = tm_cfpr_active, .regset_get = tm_cfpr_get, .set = tm_cfpr_set
},
[REGSET_TM_CVMX] = {
.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = tm_cvmx_active, .regset_get = tm_cvmx_get, .set = tm_cvmx_set
},
[REGSET_TM_CVSX] = {
.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
.size = sizeof(double), .align = sizeof(double),
.active = tm_cvsx_active, .regset_get = tm_cvsx_get, .set = tm_cvsx_set
},
[REGSET_TM_SPR] = {
.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_spr_active, .regset_get = tm_spr_get, .set = tm_spr_set
},
[REGSET_TM_CTAR] = {
.core_note_type = NT_PPC_TM_CTAR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_tar_active, .regset_get = tm_tar_get, .set = tm_tar_set
},
[REGSET_TM_CPPR] = {
.core_note_type = NT_PPC_TM_CPPR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_ppr_active, .regset_get = tm_ppr_get, .set = tm_ppr_set
},
[REGSET_TM_CDSCR] = {
.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_dscr_active, .regset_get = tm_dscr_get, .set = tm_dscr_set
},
#endif
#ifdef CONFIG_PPC64
[REGSET_PPR] = {
.core_note_type = NT_PPC_PPR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.regset_get = ppr_get, .set = ppr_set
},
[REGSET_DSCR] = {
.core_note_type = NT_PPC_DSCR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.regset_get = dscr_get, .set = dscr_set
},
#endif
#ifdef CONFIG_PPC_BOOK3S_64
[REGSET_TAR] = {
.core_note_type = NT_PPC_TAR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.regset_get = tar_get, .set = tar_set
},
[REGSET_EBB] = {
.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
.size = sizeof(u64), .align = sizeof(u64),
.active = ebb_active, .regset_get = ebb_get, .set = ebb_set
},
[REGSET_PMR] = {
.core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
.size = sizeof(u64), .align = sizeof(u64),
.active = pmu_active, .regset_get = pmu_get, .set = pmu_set
},
[REGSET_DEXCR] = {
.core_note_type = NT_PPC_DEXCR, .n = ELF_NDEXCR,
.size = sizeof(u64), .align = sizeof(u64),
.active = dexcr_active, .regset_get = dexcr_get
},
#ifdef CONFIG_CHECKPOINT_RESTORE
[REGSET_HASHKEYR] = {
.core_note_type = NT_PPC_HASHKEYR, .n = ELF_NHASHKEYR,
.size = sizeof(u64), .align = sizeof(u64),
.active = hashkeyr_active, .regset_get = hashkeyr_get, .set = hashkeyr_set
},
#endif
#endif
#ifdef CONFIG_PPC_MEM_KEYS
[REGSET_PKEY] = {
.core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
.size = sizeof(u64), .align = sizeof(u64),
.active = pkey_active, .regset_get = pkey_get, .set = pkey_set
},
#endif
};
const struct user_regset_view user_ppc_native_view = {
.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
};
#include <linux/compat.h>
int gpr32_get_common(struct task_struct *target,
const struct user_regset *regset,
struct membuf to, unsigned long *regs)
{
int i;
for (i = 0; i < PT_MSR; i++)
membuf_store(&to, (u32)regs[i]);
membuf_store(&to, (u32)get_user_msr(target));
for (i++ ; i < PT_REGS_COUNT; i++)
membuf_store(&to, (u32)regs[i]);
return membuf_zero(&to, (ELF_NGREG - PT_REGS_COUNT) * sizeof(u32));
}
static int gpr32_set_common_kernel(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, unsigned long *regs)
{
const compat_ulong_t *k = kbuf;
pos /= sizeof(compat_ulong_t);
count /= sizeof(compat_ulong_t);
for (; count > 0 && pos < PT_MSR; --count)
regs[pos++] = *k++;
if (count > 0 && pos == PT_MSR) {
set_user_msr(target, *k++);
++pos;
--count;
}
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
regs[pos++] = *k++;
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
++k;
if (count > 0 && pos == PT_TRAP) {
set_user_trap(target, *k++);
++pos;
--count;
}
kbuf = k;
pos *= sizeof(compat_ulong_t);
count *= sizeof(compat_ulong_t);
user_regset_copyin_ignore(&pos, &count, &kbuf, NULL,
(PT_TRAP + 1) * sizeof(compat_ulong_t), -1);
return 0;
}
static int gpr32_set_common_user(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void __user *ubuf, unsigned long *regs)
{
const compat_ulong_t __user *u = ubuf;
const void *kbuf = NULL;
compat_ulong_t reg;
if (!user_read_access_begin(u, count))
return -EFAULT;
pos /= sizeof(reg);
count /= sizeof(reg);
for (; count > 0 && pos < PT_MSR; --count) {
unsafe_get_user(reg, u++, Efault);
regs[pos++] = reg;
}
if (count > 0 && pos == PT_MSR) {
unsafe_get_user(reg, u++, Efault);
set_user_msr(target, reg);
++pos;
--count;
}
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
unsafe_get_user(reg, u++, Efault);
regs[pos++] = reg;
}
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
unsafe_get_user(reg, u++, Efault);
if (count > 0 && pos == PT_TRAP) {
unsafe_get_user(reg, u++, Efault);
set_user_trap(target, reg);
++pos;
--count;
}
user_read_access_end();
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
return 0;
Efault:
user_read_access_end();
return -EFAULT;
}
int gpr32_set_common(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf,
unsigned long *regs)
{
if (kbuf)
return gpr32_set_common_kernel(target, regset, pos, count, kbuf, regs);
else
return gpr32_set_common_user(target, regset, pos, count, ubuf, regs);
}
static int gpr32_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
if (target->thread.regs == NULL)
return -EIO;
return gpr32_get_common(target, regset, to,
&target->thread.regs->gpr[0]);
}
static int gpr32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
if (target->thread.regs == NULL)
return -EIO;
return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
&target->thread.regs->gpr[0]);
}
/*
* These are the regset flavors matching the CONFIG_PPC32 native set.
*/
static const struct user_regset compat_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
.regset_get = gpr32_get, .set = gpr32_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.regset_get = fpr_get, .set = fpr_set
},
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .regset_get = vr_get, .set = vr_set
},
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
.core_note_type = NT_PPC_SPE, .n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .regset_get = evr_get, .set = evr_set
},
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
[REGSET_TM_CGPR] = {
.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.active = tm_cgpr_active,
.regset_get = tm_cgpr32_get, .set = tm_cgpr32_set
},
[REGSET_TM_CFPR] = {
.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.active = tm_cfpr_active, .regset_get = tm_cfpr_get, .set = tm_cfpr_set
},
[REGSET_TM_CVMX] = {
.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = tm_cvmx_active, .regset_get = tm_cvmx_get, .set = tm_cvmx_set
},
[REGSET_TM_CVSX] = {
.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
.size = sizeof(double), .align = sizeof(double),
.active = tm_cvsx_active, .regset_get = tm_cvsx_get, .set = tm_cvsx_set
},
[REGSET_TM_SPR] = {
.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_spr_active, .regset_get = tm_spr_get, .set = tm_spr_set
},
[REGSET_TM_CTAR] = {
.core_note_type = NT_PPC_TM_CTAR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_tar_active, .regset_get = tm_tar_get, .set = tm_tar_set
},
[REGSET_TM_CPPR] = {
.core_note_type = NT_PPC_TM_CPPR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_ppr_active, .regset_get = tm_ppr_get, .set = tm_ppr_set
},
[REGSET_TM_CDSCR] = {
.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.active = tm_dscr_active, .regset_get = tm_dscr_get, .set = tm_dscr_set
},
#endif
#ifdef CONFIG_PPC64
[REGSET_PPR] = {
.core_note_type = NT_PPC_PPR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.regset_get = ppr_get, .set = ppr_set
},
[REGSET_DSCR] = {
.core_note_type = NT_PPC_DSCR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.regset_get = dscr_get, .set = dscr_set
},
#endif
#ifdef CONFIG_PPC_BOOK3S_64
[REGSET_TAR] = {
.core_note_type = NT_PPC_TAR, .n = 1,
.size = sizeof(u64), .align = sizeof(u64),
.regset_get = tar_get, .set = tar_set
},
[REGSET_EBB] = {
.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
.size = sizeof(u64), .align = sizeof(u64),
.active = ebb_active, .regset_get = ebb_get, .set = ebb_set
},
#endif
};
static const struct user_regset_view user_ppc_compat_view = {
.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
if (IS_ENABLED(CONFIG_COMPAT) && is_tsk_32bit_task(task))
return &user_ppc_compat_view;
return &user_ppc_native_view;
}
| linux-master | arch/powerpc/kernel/ptrace/ptrace-view.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/regset.h>
#include <linux/elf.h>
#include <asm/switch_to.h>
#include "ptrace-decl.h"
/*
* Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
* The transfer totals 34 quadword. Quadwords 0-31 contain the
* corresponding vector registers. Quadword 32 contains the vscr as the
* last word (offset 12) within that quadword. Quadword 33 contains the
* vrsave as the first word (offset 0) within the quadword.
*
* This definition of the VMX state is compatible with the current PPC32
* ptrace interface. This allows signal handling and ptrace to use the
* same structures. This also simplifies the implementation of a bi-arch
* (combined (32- and 64-bit) gdb.
*/
int vr_active(struct task_struct *target, const struct user_regset *regset)
{
flush_altivec_to_thread(target);
return target->thread.used_vr ? regset->n : 0;
}
/*
* Regardless of transactions, 'vr_state' holds the current running
* value of all the VMX registers and 'ckvr_state' holds the last
* checkpointed value of all the VMX registers for the current
* transaction to fall back on in case it aborts.
*
* Userspace interface buffer layout:
*
* struct data {
* vector128 vr[32];
* vector128 vscr;
* vector128 vrsave;
* };
*/
int vr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
flush_altivec_to_thread(target);
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
offsetof(struct thread_vr_state, vr[32]));
membuf_write(&to, &target->thread.vr_state, 33 * sizeof(vector128));
/*
* Copy out only the low-order word of vrsave.
*/
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.vrsave;
return membuf_write(&to, &vrsave, sizeof(vrsave));
}
/*
* Regardless of transactions, 'vr_state' holds the current running
* value of all the VMX registers and 'ckvr_state' holds the last
* checkpointed value of all the VMX registers for the current
* transaction to fall back on in case it aborts.
*
* Userspace interface buffer layout:
*
* struct data {
* vector128 vr[32];
* vector128 vscr;
* vector128 vrsave;
* };
*/
int vr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
flush_altivec_to_thread(target);
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
offsetof(struct thread_vr_state, vr[32]));
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.vr_state, 0,
33 * sizeof(vector128));
if (!ret && count > 0) {
/*
* We use only the first word of vrsave.
*/
int start, end;
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.vrsave;
start = 33 * sizeof(vector128);
end = start + sizeof(vrsave);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
start, end);
if (!ret)
target->thread.vrsave = vrsave.word;
}
return ret;
}
| linux-master | arch/powerpc/kernel/ptrace/ptrace-altivec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* arch/openrisc/lib/memcpy.c
*
* Optimized memory copy routines for openrisc. These are mostly copied
* from ohter sources but slightly entended based on ideas discuassed in
* #openrisc.
*
* The word unroll implementation is an extension to the arm byte
* unrolled implementation, but using word copies (if things are
* properly aligned)
*
* The great arm loop unroll algorithm can be found at:
* arch/arm/boot/compressed/string.c
*/
#include <linux/export.h>
#include <linux/string.h>
#ifdef CONFIG_OR1K_1200
/*
* Do memcpy with word copies and loop unrolling. This gives the
* best performance on the OR1200 and MOR1KX archirectures
*/
void *memcpy(void *dest, __const void *src, __kernel_size_t n)
{
int i = 0;
unsigned char *d, *s;
uint32_t *dest_w = (uint32_t *)dest, *src_w = (uint32_t *)src;
/* If both source and dest are word aligned copy words */
if (!((unsigned int)dest_w & 3) && !((unsigned int)src_w & 3)) {
/* Copy 32 bytes per loop */
for (i = n >> 5; i > 0; i--) {
*dest_w++ = *src_w++;
*dest_w++ = *src_w++;
*dest_w++ = *src_w++;
*dest_w++ = *src_w++;
*dest_w++ = *src_w++;
*dest_w++ = *src_w++;
*dest_w++ = *src_w++;
*dest_w++ = *src_w++;
}
if (n & 1 << 4) {
*dest_w++ = *src_w++;
*dest_w++ = *src_w++;
*dest_w++ = *src_w++;
*dest_w++ = *src_w++;
}
if (n & 1 << 3) {
*dest_w++ = *src_w++;
*dest_w++ = *src_w++;
}
if (n & 1 << 2)
*dest_w++ = *src_w++;
d = (unsigned char *)dest_w;
s = (unsigned char *)src_w;
} else {
d = (unsigned char *)dest_w;
s = (unsigned char *)src_w;
for (i = n >> 3; i > 0; i--) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
if (n & 1 << 2) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
}
}
if (n & 1 << 1) {
*d++ = *s++;
*d++ = *s++;
}
if (n & 1)
*d++ = *s++;
return dest;
}
#else
/*
* Use word copies but no loop unrolling as we cannot assume there
* will be benefits on the archirecture
*/
void *memcpy(void *dest, __const void *src, __kernel_size_t n)
{
unsigned char *d, *s;
uint32_t *dest_w = (uint32_t *)dest, *src_w = (uint32_t *)src;
/* If both source and dest are word aligned copy words */
if (!((unsigned int)dest_w & 3) && !((unsigned int)src_w & 3)) {
for (; n >= 4; n -= 4)
*dest_w++ = *src_w++;
}
d = (unsigned char *)dest_w;
s = (unsigned char *)src_w;
/* For remaining or if not aligned, copy bytes */
for (; n >= 1; n -= 1)
*d++ = *s++;
return dest;
}
#endif
EXPORT_SYMBOL(memcpy);
| linux-master | arch/openrisc/lib/memcpy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* OpenRISC Linux
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*
* Precise Delay Loops
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/timex.h>
#include <asm/param.h>
#include <asm/delay.h>
#include <asm/timex.h>
#include <asm/processor.h>
int read_current_timer(unsigned long *timer_value)
{
*timer_value = get_cycles();
return 0;
}
void __delay(unsigned long cycles)
{
cycles_t start = get_cycles();
while ((get_cycles() - start) < cycles)
cpu_relax();
}
EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
unsigned long long loops;
loops = (unsigned long long)xloops * loops_per_jiffy * HZ;
__delay(loops >> 32);
}
EXPORT_SYMBOL(__const_udelay);
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */
}
EXPORT_SYMBOL(__udelay);
void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */
}
EXPORT_SYMBOL(__ndelay);
| linux-master | arch/openrisc/lib/delay.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC idle.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <[email protected]>
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pagemap.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
int mem_init_done;
static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
/*
* We use only ZONE_NORMAL
*/
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
free_area_init(max_zone_pfn);
}
extern const char _s_kernel_ro[], _e_kernel_ro[];
/*
* Map all physical memory into kernel's address space.
*
* This is explicitly coded for two-level page tables, so if you need
* something else then this needs to change.
*/
static void __init map_ram(void)
{
phys_addr_t start, end;
unsigned long v, p, e;
pgprot_t prot;
pgd_t *pge;
p4d_t *p4e;
pud_t *pue;
pmd_t *pme;
pte_t *pte;
u64 i;
/* These mark extents of read-only kernel pages...
* ...from vmlinux.lds.S
*/
v = PAGE_OFFSET;
for_each_mem_range(i, &start, &end) {
p = (u32) start & PAGE_MASK;
e = (u32) end;
v = (u32) __va(p);
pge = pgd_offset_k(v);
while (p < e) {
int j;
p4e = p4d_offset(pge, v);
pue = pud_offset(p4e, v);
pme = pmd_offset(pue, v);
if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
panic("%s: OR1K kernel hardcoded for "
"two-level page tables",
__func__);
}
/* Alloc one page for holding PTE's... */
pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate page for PTEs\n",
__func__);
set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
/* Fill the newly allocated page with PTE'S */
for (j = 0; p < e && j < PTRS_PER_PTE;
v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
if (v >= (u32) _e_kernel_ro ||
v < (u32) _s_kernel_ro)
prot = PAGE_KERNEL;
else
prot = PAGE_KERNEL_RO;
set_pte(pte, mk_pte_phys(p, prot));
}
pge++;
}
printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
start, end);
}
}
void __init paging_init(void)
{
int i;
printk(KERN_INFO "Setting up paging and PTEs.\n");
/* clear out the init_mm.pgd that will contain the kernel's mappings */
for (i = 0; i < PTRS_PER_PGD; i++)
swapper_pg_dir[i] = __pgd(0);
/* make sure the current pgd table points to something sane
* (even if it is most probably not used until the next
* switch_mm)
*/
current_pgd[smp_processor_id()] = init_mm.pgd;
map_ram();
zone_sizes_init();
/* self modifying code ;) */
/* Since the old TLB miss handler has been running up until now,
* the kernel pages are still all RW, so we can still modify the
* text directly... after this change and a TLB flush, the kernel
* pages will become RO.
*/
{
extern unsigned long dtlb_miss_handler;
extern unsigned long itlb_miss_handler;
unsigned long *dtlb_vector = __va(0x900);
unsigned long *itlb_vector = __va(0xa00);
printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
*itlb_vector = ((unsigned long)&itlb_miss_handler -
(unsigned long)itlb_vector) >> 2;
/* Soft ordering constraint to ensure that dtlb_vector is
* the last thing updated
*/
barrier();
printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
*dtlb_vector = ((unsigned long)&dtlb_miss_handler -
(unsigned long)dtlb_vector) >> 2;
}
/* Soft ordering constraint to ensure that cache invalidation and
* TLB flush really happen _after_ code has been modified.
*/
barrier();
/* Invalidate instruction caches after code modification */
mtspr(SPR_ICBIR, 0x900);
mtspr(SPR_ICBIR, 0xa00);
/* New TLB miss handlers and kernel page tables are in now place.
* Make sure that page flags get updated for all pages in TLB by
* flushing the TLB and forcing all TLB entries to be recreated
* from their page table flags.
*/
flush_tlb_all();
}
/* References to section boundaries */
void __init mem_init(void)
{
BUG_ON(!mem_map);
max_mapnr = max_low_pfn;
high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
/* clear the zero-page */
memset((void *)empty_zero_page, 0, PAGE_SIZE);
/* this will put all low memory onto the freelists */
memblock_free_all();
printk("mem_init_done ...........................................\n");
mem_init_done = 1;
return;
}
static const pgprot_t protection_map[16] = {
[VM_NONE] = PAGE_NONE,
[VM_READ] = PAGE_READONLY_X,
[VM_WRITE] = PAGE_COPY,
[VM_WRITE | VM_READ] = PAGE_COPY_X,
[VM_EXEC] = PAGE_READONLY,
[VM_EXEC | VM_READ] = PAGE_READONLY_X,
[VM_EXEC | VM_WRITE] = PAGE_COPY,
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
[VM_SHARED] = PAGE_NONE,
[VM_SHARED | VM_READ] = PAGE_READONLY_X,
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X,
[VM_SHARED | VM_EXEC] = PAGE_READONLY,
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
};
DECLARE_VM_GET_PAGE_PROT
| linux-master | arch/openrisc/mm/init.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC ioremap.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <[email protected]>
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*/
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <linux/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/bug.h>
#include <linux/sched.h>
#include <asm/tlbflush.h>
extern int mem_init_done;
/*
* OK, this one's a bit tricky... ioremap can get called before memory is
* initialized (early serial console does this) and will want to alloc a page
* for its mapping. No userspace pages will ever get allocated before memory
* is initialized so this applies only to kernel pages. In the event that
* this is called before memory is initialized we allocate the page using
* the memblock infrastructure.
*/
pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
if (likely(mem_init_done)) {
pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
} else {
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
}
return pte;
}
| linux-master | arch/openrisc/mm/ioremap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC cache.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2015 Jan Henrik Weinstock <[email protected]>
*/
#include <asm/spr.h>
#include <asm/spr_defs.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
static __always_inline void cache_loop(struct page *page, const unsigned int reg)
{
unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
while (line < paddr + PAGE_SIZE) {
mtspr(reg, line);
line += L1_CACHE_BYTES;
}
}
void local_dcache_page_flush(struct page *page)
{
cache_loop(page, SPR_DCBFR);
}
EXPORT_SYMBOL(local_dcache_page_flush);
void local_icache_page_inv(struct page *page)
{
cache_loop(page, SPR_ICBIR);
}
EXPORT_SYMBOL(local_icache_page_inv);
void update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *pte)
{
unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT;
struct folio *folio = page_folio(pfn_to_page(pfn));
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
/*
* Since icaches do not snoop for updated data on OpenRISC, we
* must write back and invalidate any dirty pages manually. We
* can skip data pages, since they will not end up in icaches.
*/
if ((vma->vm_flags & VM_EXEC) && dirty) {
unsigned int nr = folio_nr_pages(folio);
while (nr--)
sync_icache_dcache(folio_page(folio, nr));
}
}
| linux-master | arch/openrisc/mm/cache.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC fault.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <[email protected]>
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*/
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/extable.h>
#include <linux/sched/signal.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <asm/bug.h>
#include <asm/mmu_context.h>
#include <asm/siginfo.h>
#include <asm/signal.h>
#define NUM_TLB_ENTRIES 64
#define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
* - also look into include/asm/mmu_context.h
*/
volatile pgd_t *current_pgd[NR_CPUS];
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long vector, int write_acc);
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* If this routine detects a bad access, it returns 1, otherwise it
* returns 0.
*/
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long vector, int write_acc)
{
struct task_struct *tsk;
struct mm_struct *mm;
struct vm_area_struct *vma;
int si_code;
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_DEFAULT;
tsk = current;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*
* NOTE2: This is done so that, when updating the vmalloc
* mappings we don't have to walk all processes pgdirs and
* add the high mappings all at once. Instead we do it as they
* are used. However vmalloc'ed page entries have the PAGE_GLOBAL
* bit set so sometimes the TLB can use a lingering entry.
*
* This verifies that the fault happens in kernel space
* and that the fault was not a protection error.
*/
if (address >= VMALLOC_START &&
(vector != 0x300 && vector != 0x400) &&
!user_mode(regs))
goto vmalloc_fault;
/* If exceptions were enabled, we can reenable them here */
if (user_mode(regs)) {
/* Exception was in userspace: reenable interrupts */
local_irq_enable();
flags |= FAULT_FLAG_USER;
} else {
/* If exception was in a syscall, then IRQ's may have
* been enabled or disabled. If they were enabled,
* reenable them.
*/
if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE))
local_irq_enable();
}
mm = tsk->mm;
si_code = SEGV_MAPERR;
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_interrupt() || !mm)
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (user_mode(regs)) {
/*
* accessing the stack below usp is always a bug.
* we get page-aligned addresses so we can only check
* if we're within a page from usp, but that might be
* enough to catch brutal errors at least.
*/
if (address + PAGE_SIZE < regs->sp)
goto bad_area;
}
vma = expand_stack(mm, address);
if (!vma)
goto bad_area_nosemaphore;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
si_code = SEGV_ACCERR;
/* first do some preliminary protection checks */
if (write_acc) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
} else {
/* not present */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
/* are we trying to execute nonexecutable area */
if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
goto bad_area;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
/*RGD modeled on Cris */
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
/* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
mmap_read_unlock(mm);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
return;
}
no_context:
/* Are we prepared to handle this kernel fault?
*
* (The kernel has valid exception-points in the source
* when it acesses user-memory. When it fails in one
* of those points, we find it in a table and do a jump
* to some fixup code that loads an appropriate error
* code)
*/
{
const struct exception_table_entry *entry;
if ((entry = search_exception_tables(regs->pc)) != NULL) {
/* Adjust the instruction pointer in the stackframe */
regs->pc = entry->fixup;
return;
}
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
if ((unsigned long)(address) < PAGE_SIZE)
printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference");
else
printk(KERN_ALERT "Unable to handle kernel access");
printk(" at virtual address 0x%08lx\n", address);
die("Oops", regs, write_acc);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
mmap_read_unlock(mm);
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
return;
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Use current_pgd instead of tsk->active_mm->pgd
* since the latter might be unavailable if this
* code is executed in a misfortunately run irq
* (like inside schedule() between switch_mm and
* switch_to...).
*/
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
/*
phx_warn("do_page_fault(): vmalloc_fault will not work, "
"since current_pgd assign a proper value somewhere\n"
"anyhow we don't need this at the moment\n");
phx_mmu("vmalloc_fault");
*/
pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
/* Since we're two-level, we don't need to do both
* set_pgd and set_pmd (they do the same thing). If
* we go three-level at some point, do the right thing
* with pgd_present and set_pgd here.
*
* Also, since the vmalloc area is global, we don't
* need to copy individual PTE's, it is enough to
* copy the pgd pointer into the pte page of the
* root task. If that is there, we'll find our pte if
* it exists.
*/
p4d = p4d_offset(pgd, address);
p4d_k = p4d_offset(pgd_k, address);
if (!p4d_present(*p4d_k))
goto no_context;
pud = pud_offset(p4d, address);
pud_k = pud_offset(p4d_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto bad_area_nosemaphore;
set_pmd(pmd, *pmd_k);
/* Make sure the actual PTE exists as well to
* catch kernel vmalloc-area accesses to non-mapped
* addresses. If we don't do this, this will just
* silently loop forever.
*/
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
}
| linux-master | arch/openrisc/mm/fault.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC tlb.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <[email protected]>
* Copyright (C) 2010-2011 Julius Baxter <[email protected]>
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/spr_defs.h>
#define NO_CONTEXT -1
#define NUM_DTLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \
SPR_DMMUCFGR_NTS_OFF))
#define NUM_ITLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \
SPR_IMMUCFGR_NTS_OFF))
#define DTLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_DTLB_SETS-1))
#define ITLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_ITLB_SETS-1))
/*
* Invalidate all TLB entries.
*
* This comes down to setting the 'valid' bit for all xTLBMR registers to 0.
* Easiest way to accomplish this is to just zero out the xTLBMR register
* completely.
*
*/
void local_flush_tlb_all(void)
{
int i;
unsigned long num_tlb_sets;
/* Determine number of sets for IMMU. */
/* FIXME: Assumption is I & D nsets equal. */
num_tlb_sets = NUM_ITLB_SETS;
for (i = 0; i < num_tlb_sets; i++) {
mtspr_off(SPR_DTLBMR_BASE(0), i, 0);
mtspr_off(SPR_ITLBMR_BASE(0), i, 0);
}
}
#define have_dtlbeir (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_TEIRI)
#define have_itlbeir (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_TEIRI)
/*
* Invalidate a single page. This is what the xTLBEIR register is for.
*
* There's no point in checking the vma for PAGE_EXEC to determine whether it's
* the data or instruction TLB that should be flushed... that would take more
* than the few instructions that the following compiles down to!
*
* The case where we don't have the xTLBEIR register really only works for
* MMU's with a single way and is hard-coded that way.
*/
#define flush_dtlb_page_eir(addr) mtspr(SPR_DTLBEIR, addr)
#define flush_dtlb_page_no_eir(addr) \
mtspr_off(SPR_DTLBMR_BASE(0), DTLB_OFFSET(addr), 0);
#define flush_itlb_page_eir(addr) mtspr(SPR_ITLBEIR, addr)
#define flush_itlb_page_no_eir(addr) \
mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0);
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
if (have_dtlbeir)
flush_dtlb_page_eir(addr);
else
flush_dtlb_page_no_eir(addr);
if (have_itlbeir)
flush_itlb_page_eir(addr);
else
flush_itlb_page_no_eir(addr);
}
void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
int addr;
bool dtlbeir;
bool itlbeir;
dtlbeir = have_dtlbeir;
itlbeir = have_itlbeir;
for (addr = start; addr < end; addr += PAGE_SIZE) {
if (dtlbeir)
flush_dtlb_page_eir(addr);
else
flush_dtlb_page_no_eir(addr);
if (itlbeir)
flush_itlb_page_eir(addr);
else
flush_itlb_page_no_eir(addr);
}
}
/*
* Invalidate the selected mm context only.
*
* FIXME: Due to some bug here, we're flushing everything for now.
* This should be changed to loop over over mm and call flush_tlb_range.
*/
void local_flush_tlb_mm(struct mm_struct *mm)
{
/* Was seeing bugs with the mm struct passed to us. Scrapped most of
this function. */
/* Several architectures do this */
local_flush_tlb_all();
}
/* called in schedule() just before actually doing the switch_to */
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *next_tsk)
{
unsigned int cpu;
if (unlikely(prev == next))
return;
cpu = smp_processor_id();
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
/* remember the pgd for the fault handlers
* this is similar to the pgd register in some other CPU's.
* we need our own copy of it because current and active_mm
* might be invalid at points where we still need to derefer
* the pgd.
*/
current_pgd[cpu] = next->pgd;
/* We don't have context support implemented, so flush all
* entries belonging to previous map
*/
local_flush_tlb_mm(prev);
}
/*
* Initialize the context related info for a new mm_struct
* instance.
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context = NO_CONTEXT;
return 0;
}
/* called by __exit_mm to destroy the used MMU context if any before
* destroying the mm itself. this is only called when the last user of the mm
* drops it.
*/
void destroy_context(struct mm_struct *mm)
{
flush_tlb_mm(mm);
}
| linux-master | arch/openrisc/mm/tlb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC process.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <[email protected]>
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*
* This file handles the architecture-dependent parts of process handling...
*/
#define __KERNEL_SYSCALLS__
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/elfcore.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <linux/fs.h>
#include <linux/reboot.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/spr_defs.h>
#include <asm/switch_to.h>
#include <linux/smp.h>
/*
* Pointer to Current thread info structure.
*
* Used at user space -> kernel transitions.
*/
struct thread_info *current_thread_info_set[NR_CPUS] = { &init_thread_info, };
void machine_restart(char *cmd)
{
do_kernel_restart(cmd);
__asm__("l.nop 13");
/* Give a grace period for failure to restart of 1s */
mdelay(1000);
/* Whoops - the platform was unable to reboot. Tell the user! */
pr_emerg("Reboot failed -- System halted\n");
while (1);
}
/*
* This is used if pm_power_off has not been set by a power management
* driver, in this case we can assume we are on a simulator. On
* OpenRISC simulators l.nop 1 will trigger the simulator exit.
*/
static void default_power_off(void)
{
__asm__("l.nop 1");
}
/*
* Similar to machine_power_off, but don't shut off power. Add code
* here to freeze the system for e.g. post-mortem debug purpose when
* possible. This halt has nothing to do with the idle halt.
*/
void machine_halt(void)
{
printk(KERN_INFO "*** MACHINE HALT ***\n");
__asm__("l.nop 1");
}
/* If or when software power-off is implemented, add code here. */
void machine_power_off(void)
{
printk(KERN_INFO "*** MACHINE POWER OFF ***\n");
if (pm_power_off != NULL)
pm_power_off();
else
default_power_off();
}
/*
* Send the doze signal to the cpu if available.
* Make sure, that all interrupts are enabled
*/
void arch_cpu_idle(void)
{
raw_local_irq_enable();
if (mfspr(SPR_UPR) & SPR_UPR_PMP)
mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
raw_local_irq_disable();
}
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
/*
* When a process does an "exec", machine state like FPU and debug
* registers need to be reset. This is a hook function for that.
* Currently we don't have any such state to reset, so this is empty.
*/
void flush_thread(void)
{
}
void show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
/* __PHX__ cleanup this mess */
show_registers(regs);
}
/*
* Copy the thread-specific (arch specific) info from the current
* process to the new one p
*/
extern asmlinkage void ret_from_fork(void);
/*
* copy_thread
* @clone_flags: flags
* @usp: user stack pointer or fn for kernel thread
* @arg: arg to fn for kernel thread; always NULL for userspace thread
* @p: the newly created task
* @tls: the Thread Local Storage pointer for the new process
*
* At the top of a newly initialized kernel stack are two stacked pt_reg
* structures. The first (topmost) is the userspace context of the thread.
* The second is the kernelspace context of the thread.
*
* A kernel thread will not be returning to userspace, so the topmost pt_regs
* struct can be uninitialized; it _does_ need to exist, though, because
* a kernel thread can become a userspace thread by doing a kernel_execve, in
* which case the topmost context will be initialized and used for 'returning'
* to userspace.
*
* The second pt_reg struct needs to be initialized to 'return' to
* ret_from_fork. A kernel thread will need to set r20 to the address of
* a function to call into (with arg in r22); userspace threads need to set
* r20 to NULL in which case ret_from_fork will just continue a return to
* userspace.
*
* A kernel thread 'fn' may return; this is effectively what happens when
* kernel_execve is called. In that case, the userspace pt_regs must have
* been initialized (which kernel_execve takes care of, see start_thread
* below); ret_from_fork will then continue its execution causing the
* 'kernel thread' to return to userspace as a userspace thread.
*/
int
copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct pt_regs *userregs;
struct pt_regs *kregs;
unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
unsigned long top_of_kernel_stack;
top_of_kernel_stack = sp;
/* Locate userspace context on stack... */
sp -= STACK_FRAME_OVERHEAD; /* redzone */
sp -= sizeof(struct pt_regs);
userregs = (struct pt_regs *) sp;
/* ...and kernel context */
sp -= STACK_FRAME_OVERHEAD; /* redzone */
sp -= sizeof(struct pt_regs);
kregs = (struct pt_regs *)sp;
if (unlikely(args->fn)) {
memset(kregs, 0, sizeof(struct pt_regs));
kregs->gpr[20] = (unsigned long)args->fn;
kregs->gpr[22] = (unsigned long)args->fn_arg;
} else {
*userregs = *current_pt_regs();
if (usp)
userregs->sp = usp;
/*
* For CLONE_SETTLS set "tp" (r10) to the TLS pointer.
*/
if (clone_flags & CLONE_SETTLS)
userregs->gpr[10] = tls;
userregs->gpr[11] = 0; /* Result from fork() */
kregs->gpr[20] = 0; /* Userspace thread */
}
/*
* _switch wants the kernel stack page in pt_regs->sp so that it
* can restore it to thread_info->ksp... see _switch for details.
*/
kregs->sp = top_of_kernel_stack;
kregs->gpr[9] = (unsigned long)ret_from_fork;
task_thread_info(p)->ksp = (unsigned long)kregs;
return 0;
}
/*
* Set up a thread for executing a new program
*/
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
unsigned long sr = mfspr(SPR_SR) & ~SPR_SR_SM;
memset(regs, 0, sizeof(struct pt_regs));
regs->pc = pc;
regs->sr = sr;
regs->sp = sp;
}
extern struct thread_info *_switch(struct thread_info *old_ti,
struct thread_info *new_ti);
extern int lwa_flag;
struct task_struct *__switch_to(struct task_struct *old,
struct task_struct *new)
{
struct task_struct *last;
struct thread_info *new_ti, *old_ti;
unsigned long flags;
local_irq_save(flags);
/* current_set is an array of saved current pointers
* (one for each cpu). we need them at user->kernel transition,
* while we save them at kernel->user transition
*/
new_ti = new->stack;
old_ti = old->stack;
lwa_flag = 0;
current_thread_info_set[smp_processor_id()] = new_ti;
last = (_switch(old_ti, new_ti))->task;
local_irq_restore(flags);
return last;
}
/*
* Write out registers in core dump format, as defined by the
* struct user_regs_struct
*/
void dump_elf_thread(elf_greg_t *dest, struct pt_regs* regs)
{
dest[0] = 0; /* r0 */
memcpy(dest+1, regs->gpr+1, 31*sizeof(unsigned long));
dest[32] = regs->pc;
dest[33] = regs->sr;
dest[34] = 0;
dest[35] = 0;
}
unsigned long __get_wchan(struct task_struct *p)
{
/* TODO */
return 0;
}
| linux-master | arch/openrisc/kernel/process.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC irq.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/ftrace.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/export.h>
#include <linux/irqflags.h>
/* read interrupt enabled status */
unsigned long arch_local_save_flags(void)
{
return mfspr(SPR_SR) & (SPR_SR_IEE|SPR_SR_TEE);
}
EXPORT_SYMBOL(arch_local_save_flags);
/* set interrupt enabled status */
void arch_local_irq_restore(unsigned long flags)
{
mtspr(SPR_SR, ((mfspr(SPR_SR) & ~(SPR_SR_IEE|SPR_SR_TEE)) | flags));
}
EXPORT_SYMBOL(arch_local_irq_restore);
void __init init_IRQ(void)
{
irqchip_init();
}
| linux-master | arch/openrisc/kernel/irq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC ptrace.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <[email protected]>
* Copyright (C) 2005 Gyorgy Jeney <[email protected]>
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/audit.h>
#include <linux/regset.h>
#include <linux/elf.h>
#include <asm/thread_info.h>
#include <asm/page.h>
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
/*
* Copy the thread state to a regset that can be interpreted by userspace.
*
* It doesn't matter what our internal pt_regs structure looks like. The
* important thing is that we export a consistent view of the thread state
* to userspace. As such, we need to make sure that the regset remains
* ABI compatible as defined by the struct user_regs_struct:
*
* (Each item is a 32-bit word)
* r0 = 0 (exported for clarity)
* 31 GPRS r1-r31
* PC (Program counter)
* SR (Supervision register)
*/
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const struct pt_regs *regs = task_pt_regs(target);
/* r0 */
membuf_zero(&to, 4);
membuf_write(&to, regs->gpr + 1, 31 * 4);
membuf_store(&to, regs->pc);
return membuf_store(&to, regs->sr);
}
/*
* Set the thread state from a regset passed in via ptrace
*/
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user * ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
int ret;
/* ignore r0 */
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4);
/* r1 - r31 */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->gpr+1, 4, 4*32);
/* PC */
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->pc, 4*32, 4*33);
/*
* Skip SR and padding... userspace isn't allowed to changes bits in
* the Supervision register
*/
if (!ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 4*33, -1);
return ret;
}
/*
* As OpenRISC shares GPRs and floating point registers we don't need to export
* the floating point registers again. So here we only export the fpcsr special
* purpose register.
*/
static int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
const struct pt_regs *regs = task_pt_regs(target);
return membuf_store(&to, regs->fpcsr);
}
static int fpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
int ret;
/* FPCSR */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->fpcsr, 0, 4);
return ret;
}
/*
* Define the register sets available on OpenRISC under Linux
*/
enum or1k_regset {
REGSET_GENERAL,
REGSET_FPU,
};
static const struct user_regset or1k_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(long),
.align = sizeof(long),
.regset_get = genregs_get,
.set = genregs_set,
},
[REGSET_FPU] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct __or1k_fpu_state) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.regset_get = fpregs_get,
.set = fpregs_set,
},
};
static const struct user_regset_view user_or1k_native_view = {
.name = "or1k",
.e_machine = EM_OPENRISC,
.regsets = or1k_regsets,
.n = ARRAY_SIZE(or1k_regsets),
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_or1k_native_view;
}
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure the single step bit is not set.
*/
void ptrace_disable(struct task_struct *child)
{
pr_debug("ptrace_disable(): TODO\n");
user_disable_single_step(child);
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
unsigned long data)
{
int ret;
switch (request) {
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
/*
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
ptrace_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in <something>.
*/
ret = -1L;
audit_syscall_entry(regs->gpr[11], regs->gpr[3], regs->gpr[4],
regs->gpr[5], regs->gpr[6]);
return ret ? : regs->gpr[11];
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
audit_syscall_exit(regs);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
ptrace_report_syscall_exit(regs, step);
}
| linux-master | arch/openrisc/kernel/ptrace.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC traps.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <[email protected]>
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*
* Here we handle the break vectors not used by the system call
* mechanism, as well as some general stack/register dumping
* things.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
#include <linux/extable.h>
#include <linux/kmod.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <asm/bug.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/unwinder.h>
#include <asm/sections.h>
int lwa_flag;
static unsigned long __user *lwa_addr;
asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector);
asmlinkage void do_trap(struct pt_regs *regs, unsigned long address);
asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address);
asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address);
asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address);
asmlinkage void do_illegal_instruction(struct pt_regs *regs,
unsigned long address);
static void print_trace(void *data, unsigned long addr, int reliable)
{
const char *loglvl = data;
printk("%s[<%p>] %s%pS\n", loglvl, (void *) addr, reliable ? "" : "? ",
(void *) addr);
}
static void print_data(unsigned long base_addr, unsigned long word, int i)
{
if (i == 0)
printk("(%08lx:)\t%08lx", base_addr + (i * 4), word);
else
printk(" %08lx:\t%08lx", base_addr + (i * 4), word);
}
/* displays a short stack trace */
void show_stack(struct task_struct *task, unsigned long *esp, const char *loglvl)
{
if (esp == NULL)
esp = (unsigned long *)&esp;
printk("%sCall trace:\n", loglvl);
unwind_stack((void *)loglvl, esp, print_trace);
}
void show_registers(struct pt_regs *regs)
{
int i;
int in_kernel = 1;
unsigned long esp;
esp = (unsigned long)(regs->sp);
if (user_mode(regs))
in_kernel = 0;
printk("CPU #: %d\n"
" PC: %08lx SR: %08lx SP: %08lx FPCSR: %08lx\n",
smp_processor_id(), regs->pc, regs->sr, regs->sp,
regs->fpcsr);
printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n",
0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]);
printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n",
regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]);
printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n",
regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]);
printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n",
regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]);
printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n",
regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]);
printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n",
regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]);
printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n",
regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]);
printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n",
regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]);
printk(" RES: %08lx oGPR11: %08lx\n",
regs->gpr[11], regs->orig_gpr11);
printk("Process %s (pid: %d, stackpage=%08lx)\n",
current->comm, current->pid, (unsigned long)current);
/*
* When in-kernel, we also print out the stack and code at the
* time of the fault..
*/
if (in_kernel) {
printk("\nStack: ");
show_stack(NULL, (unsigned long *)esp, KERN_EMERG);
if (esp < PAGE_OFFSET)
goto bad_stack;
printk("\n");
for (i = -8; i < 24; i += 1) {
unsigned long word;
if (__get_user(word, &((unsigned long *)esp)[i])) {
bad_stack:
printk(" Bad Stack value.");
break;
}
print_data(esp, word, i);
}
printk("\nCode: ");
if (regs->pc < PAGE_OFFSET)
goto bad;
for (i = -6; i < 6; i += 1) {
unsigned long word;
if (__get_user(word, &((unsigned long *)regs->pc)[i])) {
bad:
printk(" Bad PC value.");
break;
}
print_data(regs->pc, word, i);
}
}
printk("\n");
}
/* This is normally the 'Oops' routine */
void __noreturn die(const char *str, struct pt_regs *regs, long err)
{
console_verbose();
printk("\n%s#: %04lx\n", str, err & 0xffff);
show_registers(regs);
#ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
printk("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n");
/* shut down interrupts */
local_irq_disable();
__asm__ __volatile__("l.nop 1");
do {} while (1);
#endif
make_task_dead(SIGSEGV);
}
asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector)
{
printk("Unable to handle exception at EA =0x%x, vector 0x%x",
ea, vector);
die("Oops", regs, 9);
}
asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address)
{
int code = FPE_FLTUNK;
unsigned long fpcsr = regs->fpcsr;
if (fpcsr & SPR_FPCSR_IVF)
code = FPE_FLTINV;
else if (fpcsr & SPR_FPCSR_OVF)
code = FPE_FLTOVF;
else if (fpcsr & SPR_FPCSR_UNF)
code = FPE_FLTUND;
else if (fpcsr & SPR_FPCSR_DZF)
code = FPE_FLTDIV;
else if (fpcsr & SPR_FPCSR_IXF)
code = FPE_FLTRES;
/* Clear all flags */
regs->fpcsr &= ~SPR_FPCSR_ALLF;
force_sig_fault(SIGFPE, code, (void __user *)regs->pc);
}
asmlinkage void do_trap(struct pt_regs *regs, unsigned long address)
{
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc);
}
asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
{
if (user_mode(regs)) {
/* Send a SIGBUS */
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)address);
} else {
printk("KERNEL: Unaligned Access 0x%.8lx\n", address);
show_registers(regs);
die("Die:", regs, address);
}
}
asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address)
{
if (user_mode(regs)) {
/* Send a SIGBUS */
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
} else { /* Kernel mode */
printk("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address);
show_registers(regs);
die("Die:", regs, address);
}
}
static inline int in_delay_slot(struct pt_regs *regs)
{
#ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
/* No delay slot flag, do the old way */
unsigned int op, insn;
insn = *((unsigned int *)regs->pc);
op = insn >> 26;
switch (op) {
case 0x00: /* l.j */
case 0x01: /* l.jal */
case 0x03: /* l.bnf */
case 0x04: /* l.bf */
case 0x11: /* l.jr */
case 0x12: /* l.jalr */
return 1;
default:
return 0;
}
#else
return mfspr(SPR_SR) & SPR_SR_DSX;
#endif
}
static inline void adjust_pc(struct pt_regs *regs, unsigned long address)
{
int displacement;
unsigned int rb, op, jmp;
if (unlikely(in_delay_slot(regs))) {
/* In delay slot, instruction at pc is a branch, simulate it */
jmp = *((unsigned int *)regs->pc);
displacement = sign_extend32(((jmp) & 0x3ffffff) << 2, 27);
rb = (jmp & 0x0000ffff) >> 11;
op = jmp >> 26;
switch (op) {
case 0x00: /* l.j */
regs->pc += displacement;
return;
case 0x01: /* l.jal */
regs->pc += displacement;
regs->gpr[9] = regs->pc + 8;
return;
case 0x03: /* l.bnf */
if (regs->sr & SPR_SR_F)
regs->pc += 8;
else
regs->pc += displacement;
return;
case 0x04: /* l.bf */
if (regs->sr & SPR_SR_F)
regs->pc += displacement;
else
regs->pc += 8;
return;
case 0x11: /* l.jr */
regs->pc = regs->gpr[rb];
return;
case 0x12: /* l.jalr */
regs->pc = regs->gpr[rb];
regs->gpr[9] = regs->pc + 8;
return;
default:
break;
}
} else {
regs->pc += 4;
}
}
static inline void simulate_lwa(struct pt_regs *regs, unsigned long address,
unsigned int insn)
{
unsigned int ra, rd;
unsigned long value;
unsigned long orig_pc;
long imm;
const struct exception_table_entry *entry;
orig_pc = regs->pc;
adjust_pc(regs, address);
ra = (insn >> 16) & 0x1f;
rd = (insn >> 21) & 0x1f;
imm = (short)insn;
lwa_addr = (unsigned long __user *)(regs->gpr[ra] + imm);
if ((unsigned long)lwa_addr & 0x3) {
do_unaligned_access(regs, address);
return;
}
if (get_user(value, lwa_addr)) {
if (user_mode(regs)) {
force_sig(SIGSEGV);
return;
}
if ((entry = search_exception_tables(orig_pc))) {
regs->pc = entry->fixup;
return;
}
/* kernel access in kernel space, load it directly */
value = *((unsigned long *)lwa_addr);
}
lwa_flag = 1;
regs->gpr[rd] = value;
}
static inline void simulate_swa(struct pt_regs *regs, unsigned long address,
unsigned int insn)
{
unsigned long __user *vaddr;
unsigned long orig_pc;
unsigned int ra, rb;
long imm;
const struct exception_table_entry *entry;
orig_pc = regs->pc;
adjust_pc(regs, address);
ra = (insn >> 16) & 0x1f;
rb = (insn >> 11) & 0x1f;
imm = (short)(((insn & 0x2200000) >> 10) | (insn & 0x7ff));
vaddr = (unsigned long __user *)(regs->gpr[ra] + imm);
if (!lwa_flag || vaddr != lwa_addr) {
regs->sr &= ~SPR_SR_F;
return;
}
if ((unsigned long)vaddr & 0x3) {
do_unaligned_access(regs, address);
return;
}
if (put_user(regs->gpr[rb], vaddr)) {
if (user_mode(regs)) {
force_sig(SIGSEGV);
return;
}
if ((entry = search_exception_tables(orig_pc))) {
regs->pc = entry->fixup;
return;
}
/* kernel access in kernel space, store it directly */
*((unsigned long *)vaddr) = regs->gpr[rb];
}
lwa_flag = 0;
regs->sr |= SPR_SR_F;
}
#define INSN_LWA 0x1b
#define INSN_SWA 0x33
asmlinkage void do_illegal_instruction(struct pt_regs *regs,
unsigned long address)
{
unsigned int op;
unsigned int insn = *((unsigned int *)address);
op = insn >> 26;
switch (op) {
case INSN_LWA:
simulate_lwa(regs, address, insn);
return;
case INSN_SWA:
simulate_swa(regs, address, insn);
return;
default:
break;
}
if (user_mode(regs)) {
/* Send a SIGILL */
force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)address);
} else { /* Kernel mode */
printk("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n",
address);
show_registers(regs);
die("Die:", regs, address);
}
}
| linux-master | arch/openrisc/kernel/traps.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC asm-offsets.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <[email protected]>
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/thread_info.h>
#include <linux/kbuild.h>
#include <asm/page.h>
#include <asm/processor.h>
int main(void)
{
/* offsets into the task_struct */
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
/* offsets into thread_info */
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_KSP, offsetof(struct thread_info, ksp));
DEFINE(PT_SIZE, sizeof(struct pt_regs));
/* Interrupt register frame */
DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
DEFINE(NUM_USER_SEGMENTS, TASK_SIZE >> 28);
return 0;
}
| linux-master | arch/openrisc/kernel/asm-offsets.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC module.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
uint32_t value;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
value = sym->st_value + rel[i].r_addend;
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_OR32_32:
*location = value;
break;
case R_OR32_CONST:
*((uint16_t *)location + 1) = value;
break;
case R_OR32_CONSTH:
*((uint16_t *)location + 1) = value >> 16;
break;
case R_OR32_JUMPTARG:
value -= (uint32_t)location;
value >>= 2;
value &= 0x03ffffff;
value |= *location & 0xfc000000;
*location = value;
break;
default:
pr_err("module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
break;
}
}
return 0;
}
| linux-master | arch/openrisc/kernel/module.c |
/*
* OR1K timer synchronisation
*
* Based on work from MIPS implementation.
*
* All CPUs will have their count registers synchronised to the CPU0 next time
* value. This can cause a small timewarp for CPU0. All other CPU's should
* not have done anything significant (but they may have had interrupts
* enabled briefly - prom_smp_finish() should not be responsible for enabling
* interrupts...)
*/
#include <linux/kernel.h>
#include <linux/irqflags.h>
#include <linux/cpumask.h>
#include <asm/time.h>
#include <asm/timex.h>
#include <linux/atomic.h>
#include <asm/barrier.h>
#include <asm/spr.h>
static unsigned int initcount;
static atomic_t count_count_start = ATOMIC_INIT(0);
static atomic_t count_count_stop = ATOMIC_INIT(0);
#define COUNTON 100
#define NR_LOOPS 3
void synchronise_count_master(int cpu)
{
int i;
unsigned long flags;
pr_info("Synchronize counters for CPU %u: ", cpu);
local_irq_save(flags);
/*
* We loop a few times to get a primed instruction cache,
* then the last pass is more or less synchronised and
* the master and slaves each set their cycle counters to a known
* value all at once. This reduces the chance of having random offsets
* between the processors, and guarantees that the maximum
* delay between the cycle counters is never bigger than
* the latency of information-passing (cachelines) between
* two CPUs.
*/
for (i = 0; i < NR_LOOPS; i++) {
/* slaves loop on '!= 2' */
while (atomic_read(&count_count_start) != 1)
mb();
atomic_set(&count_count_stop, 0);
smp_wmb();
/* Let the slave writes its count register */
atomic_inc(&count_count_start);
/* Count will be initialised to current timer */
if (i == 1)
initcount = get_cycles();
/*
* Everyone initialises count in the last loop:
*/
if (i == NR_LOOPS-1)
openrisc_timer_set(initcount);
/*
* Wait for slave to leave the synchronization point:
*/
while (atomic_read(&count_count_stop) != 1)
mb();
atomic_set(&count_count_start, 0);
smp_wmb();
atomic_inc(&count_count_stop);
}
/* Arrange for an interrupt in a short while */
openrisc_timer_set_next(COUNTON);
local_irq_restore(flags);
/*
* i386 code reported the skew here, but the
* count registers were almost certainly out of sync
* so no point in alarming people
*/
pr_cont("done.\n");
}
void synchronise_count_slave(int cpu)
{
int i;
/*
* Not every cpu is online at the time this gets called,
* so we first wait for the master to say everyone is ready
*/
for (i = 0; i < NR_LOOPS; i++) {
atomic_inc(&count_count_start);
while (atomic_read(&count_count_start) != 2)
mb();
/*
* Everyone initialises count in the last loop:
*/
if (i == NR_LOOPS-1)
openrisc_timer_set(initcount);
atomic_inc(&count_count_stop);
while (atomic_read(&count_count_stop) != 2)
mb();
}
/* Arrange for an interrupt in a short while */
openrisc_timer_set_next(COUNTON);
}
#undef NR_LOOPS
| linux-master | arch/openrisc/kernel/sync-timer.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC or32_ksyms.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <[email protected]>
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*/
#include <linux/export.h>
#include <linux/elfcore.h>
#include <linux/sched.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
#include <linux/semaphore.h>
#include <linux/pgtable.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/checksum.h>
#include <asm/io.h>
#include <asm/hardirq.h>
#include <asm/delay.h>
#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
/* compiler generated symbols */
DECLARE_EXPORT(__udivsi3);
DECLARE_EXPORT(__divsi3);
DECLARE_EXPORT(__umodsi3);
DECLARE_EXPORT(__modsi3);
DECLARE_EXPORT(__muldi3);
DECLARE_EXPORT(__ashrdi3);
DECLARE_EXPORT(__ashldi3);
DECLARE_EXPORT(__lshrdi3);
DECLARE_EXPORT(__ucmpdi2);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(__copy_tofrom_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(memset);
| linux-master | arch/openrisc/kernel/or32_ksyms.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC setup.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <[email protected]>
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*
* This file handles the architecture-dependent parts of initialization
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/serial.h>
#include <linux/initrd.h>
#include <linux/of_fdt.h>
#include <linux/of.h>
#include <linux/device.h>
#include <asm/sections.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/io.h>
#include <asm/cpuinfo.h>
#include <asm/delay.h>
#include "vmlinux.h"
static void __init setup_memory(void)
{
unsigned long ram_start_pfn;
unsigned long ram_end_pfn;
phys_addr_t memory_start, memory_end;
memory_end = memory_start = 0;
/* Find main memory where is the kernel, we assume its the only one */
memory_start = memblock_start_of_DRAM();
memory_end = memblock_end_of_DRAM();
if (!memory_end) {
panic("No memory!");
}
ram_start_pfn = PFN_UP(memory_start);
ram_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
/* setup bootmem globals (we use no_bootmem, but mm still depends on this) */
min_low_pfn = ram_start_pfn;
max_low_pfn = ram_end_pfn;
max_pfn = ram_end_pfn;
/*
* initialize the boot-time allocator (with low memory only).
*
* This makes the memory from the end of the kernel to the end of
* RAM usable.
*/
memblock_reserve(__pa(_stext), _end - _stext);
#ifdef CONFIG_BLK_DEV_INITRD
/* Then reserve the initrd, if any */
if (initrd_start && (initrd_end > initrd_start)) {
unsigned long aligned_start = ALIGN_DOWN(initrd_start, PAGE_SIZE);
unsigned long aligned_end = ALIGN(initrd_end, PAGE_SIZE);
memblock_reserve(__pa(aligned_start), aligned_end - aligned_start);
}
#endif /* CONFIG_BLK_DEV_INITRD */
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
memblock_dump_all();
}
struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS];
static void print_cpuinfo(void)
{
unsigned long upr = mfspr(SPR_UPR);
unsigned long vr = mfspr(SPR_VR);
unsigned int version;
unsigned int revision;
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
version = (vr & SPR_VR_VER) >> 24;
revision = (vr & SPR_VR_REV);
printk(KERN_INFO "CPU: OpenRISC-%x (revision %d) @%d MHz\n",
version, revision, cpuinfo->clock_frequency / 1000000);
if (!(upr & SPR_UPR_UP)) {
printk(KERN_INFO
"-- no UPR register... unable to detect configuration\n");
return;
}
if (upr & SPR_UPR_DCP)
printk(KERN_INFO
"-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n",
cpuinfo->dcache_size, cpuinfo->dcache_block_size,
cpuinfo->dcache_ways);
else
printk(KERN_INFO "-- dcache disabled\n");
if (upr & SPR_UPR_ICP)
printk(KERN_INFO
"-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n",
cpuinfo->icache_size, cpuinfo->icache_block_size,
cpuinfo->icache_ways);
else
printk(KERN_INFO "-- icache disabled\n");
if (upr & SPR_UPR_DMP)
printk(KERN_INFO "-- dmmu: %4d entries, %lu way(s)\n",
1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW));
if (upr & SPR_UPR_IMP)
printk(KERN_INFO "-- immu: %4d entries, %lu way(s)\n",
1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW));
printk(KERN_INFO "-- additional features:\n");
if (upr & SPR_UPR_DUP)
printk(KERN_INFO "-- debug unit\n");
if (upr & SPR_UPR_PCUP)
printk(KERN_INFO "-- performance counters\n");
if (upr & SPR_UPR_PMP)
printk(KERN_INFO "-- power management\n");
if (upr & SPR_UPR_PICP)
printk(KERN_INFO "-- PIC\n");
if (upr & SPR_UPR_TTP)
printk(KERN_INFO "-- timer\n");
if (upr & SPR_UPR_CUP)
printk(KERN_INFO "-- custom unit(s)\n");
}
void __init setup_cpuinfo(void)
{
struct device_node *cpu;
unsigned long iccfgr, dccfgr;
unsigned long cache_set_size;
int cpu_id = smp_processor_id();
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu_id];
cpu = of_get_cpu_node(cpu_id, NULL);
if (!cpu)
panic("Couldn't find CPU%d in device tree...\n", cpu_id);
iccfgr = mfspr(SPR_ICCFGR);
cpuinfo->icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3);
cpuinfo->icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
cpuinfo->icache_size =
cache_set_size * cpuinfo->icache_ways * cpuinfo->icache_block_size;
dccfgr = mfspr(SPR_DCCFGR);
cpuinfo->dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3);
cpuinfo->dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
cpuinfo->dcache_size =
cache_set_size * cpuinfo->dcache_ways * cpuinfo->dcache_block_size;
if (of_property_read_u32(cpu, "clock-frequency",
&cpuinfo->clock_frequency)) {
printk(KERN_WARNING
"Device tree missing CPU 'clock-frequency' parameter."
"Assuming frequency 25MHZ"
"This is probably not what you want.");
}
cpuinfo->coreid = mfspr(SPR_COREID);
of_node_put(cpu);
print_cpuinfo();
}
/**
* or1k_early_setup
* @fdt: pointer to the start of the device tree in memory or NULL
*
* Handles the pointer to the device tree that this kernel is to use
* for establishing the available platform devices.
*
* Falls back on built-in device tree in case null pointer is passed.
*/
void __init or1k_early_setup(void *fdt)
{
if (fdt)
pr_info("FDT at %p\n", fdt);
else {
fdt = __dtb_start;
pr_info("Compiled-in FDT at %p\n", fdt);
}
early_init_devtree(fdt);
}
static inline unsigned long extract_value_bits(unsigned long reg,
short bit_nr, short width)
{
return (reg >> bit_nr) & (0 << width);
}
static inline unsigned long extract_value(unsigned long reg, unsigned long mask)
{
while (!(mask & 0x1)) {
reg = reg >> 1;
mask = mask >> 1;
}
return mask & reg;
}
/*
* calibrate_delay
*
* Lightweight calibrate_delay implementation that calculates loops_per_jiffy
* from the clock frequency passed in via the device tree
*
*/
void calibrate_delay(void)
{
const int *val;
struct device_node *cpu = of_get_cpu_node(smp_processor_id(), NULL);
val = of_get_property(cpu, "clock-frequency", NULL);
if (!val)
panic("no cpu 'clock-frequency' parameter in device tree");
loops_per_jiffy = *val / HZ;
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
of_node_put(cpu);
}
void __init setup_arch(char **cmdline_p)
{
unflatten_and_copy_device_tree();
setup_cpuinfo();
#ifdef CONFIG_SMP
smp_init_cpus();
#endif
/* process 1's initial memory region is the kernel code/data */
setup_initial_init_mm(_stext, _etext, _edata, _end);
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start == initrd_end) {
printk(KERN_INFO "Initial ramdisk not found\n");
initrd_start = 0;
initrd_end = 0;
} else {
printk(KERN_INFO "Initial ramdisk at: 0x%p (%lu bytes)\n",
(void *)(initrd_start), initrd_end - initrd_start);
initrd_below_start_ok = 1;
}
#endif
/* setup memblock allocator */
setup_memory();
/* paging_init() sets up the MMU and marks all pages as reserved */
paging_init();
*cmdline_p = boot_command_line;
printk(KERN_INFO "OpenRISC Linux -- http://openrisc.io\n");
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned int vr, cpucfgr;
unsigned int avr;
unsigned int version;
struct cpuinfo_or1k *cpuinfo = v;
vr = mfspr(SPR_VR);
cpucfgr = mfspr(SPR_CPUCFGR);
#ifdef CONFIG_SMP
seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid);
#endif
if (vr & SPR_VR_UVRP) {
vr = mfspr(SPR_VR2);
version = vr & SPR_VR2_VER;
avr = mfspr(SPR_AVR);
seq_printf(m, "cpu architecture\t: "
"OpenRISC 1000 (%d.%d-rev%d)\n",
(avr >> 24) & 0xff,
(avr >> 16) & 0xff,
(avr >> 8) & 0xff);
seq_printf(m, "cpu implementation id\t: 0x%x\n",
(vr & SPR_VR2_CPUID) >> 24);
seq_printf(m, "cpu version\t\t: 0x%x\n", version);
} else {
version = (vr & SPR_VR_VER) >> 24;
seq_printf(m, "cpu\t\t\t: OpenRISC-%x\n", version);
seq_printf(m, "revision\t\t: %d\n", vr & SPR_VR_REV);
}
seq_printf(m, "frequency\t\t: %ld\n", loops_per_jiffy * HZ);
seq_printf(m, "dcache size\t\t: %d bytes\n", cpuinfo->dcache_size);
seq_printf(m, "dcache block size\t: %d bytes\n",
cpuinfo->dcache_block_size);
seq_printf(m, "dcache ways\t\t: %d\n", cpuinfo->dcache_ways);
seq_printf(m, "icache size\t\t: %d bytes\n", cpuinfo->icache_size);
seq_printf(m, "icache block size\t: %d bytes\n",
cpuinfo->icache_block_size);
seq_printf(m, "icache ways\t\t: %d\n", cpuinfo->icache_ways);
seq_printf(m, "immu\t\t\t: %d entries, %lu ways\n",
1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW));
seq_printf(m, "dmmu\t\t\t: %d entries, %lu ways\n",
1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW));
seq_printf(m, "bogomips\t\t: %lu.%02lu\n",
(loops_per_jiffy * HZ) / 500000,
((loops_per_jiffy * HZ) / 5000) % 100);
seq_puts(m, "features\t\t: ");
seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OB32S ? "orbis32" : "");
seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OB64S ? "orbis64" : "");
seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OF32S ? "orfpx32" : "");
seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OF64S ? "orfpx64" : "");
seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OV64S ? "orvdx64" : "");
seq_puts(m, "\n");
seq_puts(m, "\n");
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
*pos = cpumask_next(*pos - 1, cpu_online_mask);
if ((*pos) < nr_cpu_ids)
return &cpuinfo_or1k[*pos];
return NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
| linux-master | arch/openrisc/kernel/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC Linux
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <[email protected]>
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*
* DMA mapping callbacks...
*/
#include <linux/dma-map-ops.h>
#include <linux/pagewalk.h>
#include <asm/cpuinfo.h>
#include <asm/spr_defs.h>
#include <asm/tlbflush.h>
static int
page_set_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
unsigned long cl;
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
pte_val(*pte) |= _PAGE_CI;
/*
* Flush the page out of the TLB so that the new page flags get
* picked up next time there's an access
*/
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
/* Flush page out of dcache */
for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBFR, cl);
return 0;
}
static const struct mm_walk_ops set_nocache_walk_ops = {
.pte_entry = page_set_nocache,
};
static int
page_clear_nocache(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pte_val(*pte) &= ~_PAGE_CI;
/*
* Flush the page out of the TLB so that the new page flags get
* picked up next time there's an access
*/
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
return 0;
}
static const struct mm_walk_ops clear_nocache_walk_ops = {
.pte_entry = page_clear_nocache,
};
void *arch_dma_set_uncached(void *cpu_addr, size_t size)
{
unsigned long va = (unsigned long)cpu_addr;
int error;
/*
* We need to iterate through the pages, clearing the dcache for
* them and setting the cache-inhibit bit.
*/
mmap_write_lock(&init_mm);
error = walk_page_range_novma(&init_mm, va, va + size,
&set_nocache_walk_ops, NULL, NULL);
mmap_write_unlock(&init_mm);
if (error)
return ERR_PTR(error);
return cpu_addr;
}
void arch_dma_clear_uncached(void *cpu_addr, size_t size)
{
unsigned long va = (unsigned long)cpu_addr;
mmap_write_lock(&init_mm);
/* walk_page_range shouldn't be able to fail here */
WARN_ON(walk_page_range_novma(&init_mm, va, va + size,
&clear_nocache_walk_ops, NULL, NULL));
mmap_write_unlock(&init_mm);
}
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
enum dma_data_direction dir)
{
unsigned long cl;
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
switch (dir) {
case DMA_TO_DEVICE:
/* Flush the dcache for the requested range */
for (cl = addr; cl < addr + size;
cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBFR, cl);
break;
case DMA_FROM_DEVICE:
/* Invalidate the dcache for the requested range */
for (cl = addr; cl < addr + size;
cl += cpuinfo->dcache_block_size)
mtspr(SPR_DCBIR, cl);
break;
default:
/*
* NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
* flush nor invalidate the cache here as the area will need
* to be manually synced anyway.
*/
break;
}
}
| linux-master | arch/openrisc/kernel/dma.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC time.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/interrupt.h>
#include <linux/ftrace.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of_clk.h>
#include <asm/cpuinfo.h>
#include <asm/time.h>
irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs);
/* Test the timer ticks to count, used in sync routine */
inline void openrisc_timer_set(unsigned long count)
{
mtspr(SPR_TTCR, count);
}
/* Set the timer to trigger in delta cycles */
inline void openrisc_timer_set_next(unsigned long delta)
{
u32 c;
/* Read 32-bit counter value, add delta, mask off the low 28 bits.
* We're guaranteed delta won't be bigger than 28 bits because the
* generic timekeeping code ensures that for us.
*/
c = mfspr(SPR_TTCR);
c += delta;
c &= SPR_TTMR_TP;
/* Set counter and enable interrupt.
* Keep timer in continuous mode always.
*/
mtspr(SPR_TTMR, SPR_TTMR_CR | SPR_TTMR_IE | c);
}
static int openrisc_timer_set_next_event(unsigned long delta,
struct clock_event_device *dev)
{
openrisc_timer_set_next(delta);
return 0;
}
/* This is the clock event device based on the OR1K tick timer.
* As the timer is being used as a continuous clock-source (required for HR
* timers) we cannot enable the PERIODIC feature. The tick timer can run using
* one-shot events, so no problem.
*/
static DEFINE_PER_CPU(struct clock_event_device, clockevent_openrisc_timer);
void openrisc_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt =
&per_cpu(clockevent_openrisc_timer, cpu);
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu];
mtspr(SPR_TTMR, SPR_TTMR_CR);
#ifdef CONFIG_SMP
evt->broadcast = tick_broadcast;
#endif
evt->name = "openrisc_timer_clockevent",
evt->features = CLOCK_EVT_FEAT_ONESHOT,
evt->rating = 300,
evt->set_next_event = openrisc_timer_set_next_event,
evt->cpumask = cpumask_of(cpu);
/* We only have 28 bits */
clockevents_config_and_register(evt, cpuinfo->clock_frequency,
100, 0x0fffffff);
}
static inline void timer_ack(void)
{
/* Clear the IP bit and disable further interrupts */
/* This can be done very simply... we just need to keep the timer
running, so just maintain the CR bits while clearing the rest
of the register
*/
mtspr(SPR_TTMR, SPR_TTMR_CR);
}
/*
* The timer interrupt is mostly handled in generic code nowadays... this
* function just acknowledges the interrupt and fires the event handler that
* has been set on the clockevent device by the generic time management code.
*
* This function needs to be called by the timer exception handler and that's
* all the exception handler needs to do.
*/
irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt =
&per_cpu(clockevent_openrisc_timer, cpu);
timer_ack();
/*
* update_process_times() expects us to have called irq_enter().
*/
irq_enter();
evt->event_handler(evt);
irq_exit();
set_irq_regs(old_regs);
return IRQ_HANDLED;
}
/*
* Clocksource: Based on OpenRISC timer/counter
*
* This sets up the OpenRISC Tick Timer as a clock source. The tick timer
* is 32 bits wide and runs at the CPU clock frequency.
*/
static u64 openrisc_timer_read(struct clocksource *cs)
{
return (u64) mfspr(SPR_TTCR);
}
static struct clocksource openrisc_timer = {
.name = "openrisc_timer",
.rating = 200,
.read = openrisc_timer_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init openrisc_timer_init(void)
{
struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
if (clocksource_register_hz(&openrisc_timer, cpuinfo->clock_frequency))
panic("failed to register clocksource");
/* Enable the incrementer: 'continuous' mode with interrupt disabled */
mtspr(SPR_TTMR, SPR_TTMR_CR);
return 0;
}
void __init time_init(void)
{
u32 upr;
upr = mfspr(SPR_UPR);
if (!(upr & SPR_UPR_TTP))
panic("Linux not supported on devices without tick timer");
openrisc_timer_init();
openrisc_clockevent_init();
of_clk_init(NULL);
timer_probe();
}
| linux-master | arch/openrisc/kernel/time.c |
/*
* OpenRISC unwinder.c
*
* Reusable arch specific api for unwinding stacks.
*
* Copyright (C) 2017 Stafford Horne <[email protected]>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
#include <asm/unwinder.h>
#ifdef CONFIG_FRAME_POINTER
struct or1k_frameinfo {
unsigned long *fp;
unsigned long ra;
unsigned long top;
};
/*
* Verify a frameinfo structure. The return address should be a valid text
* address. The frame pointer may be null if its the last frame, otherwise
* the frame pointer should point to a location in the stack after the
* top of the next frame up.
*/
static inline int or1k_frameinfo_valid(struct or1k_frameinfo *frameinfo)
{
return (frameinfo->fp == NULL ||
(!kstack_end(frameinfo->fp) &&
frameinfo->fp > &frameinfo->top)) &&
__kernel_text_address(frameinfo->ra);
}
/*
* Create a stack trace doing scanning which is frame pointer aware. We can
* get reliable stack traces by matching the previously found frame
* pointer with the top of the stack address every time we find a valid
* or1k_frameinfo.
*
* Ideally the stack parameter will be passed as FP, but it can not be
* guaranteed. Therefore we scan each address looking for the first sign
* of a return address.
*
* The OpenRISC stack frame looks something like the following. The
* location SP is held in r1 and location FP is held in r2 when frame pointers
* enabled.
*
* SP -> (top of stack)
* - (callee saved registers)
* - (local variables)
* FP-8 -> previous FP \
* FP-4 -> return address |- or1k_frameinfo
* FP -> (previous top of stack) /
*/
void unwind_stack(void *data, unsigned long *stack,
void (*trace)(void *data, unsigned long addr, int reliable))
{
unsigned long *next_fp = NULL;
struct or1k_frameinfo *frameinfo = NULL;
int reliable = 0;
while (!kstack_end(stack)) {
frameinfo = container_of(stack,
struct or1k_frameinfo,
top);
if (__kernel_text_address(frameinfo->ra)) {
if (or1k_frameinfo_valid(frameinfo) &&
(next_fp == NULL ||
next_fp == &frameinfo->top)) {
reliable = 1;
next_fp = frameinfo->fp;
} else
reliable = 0;
trace(data, frameinfo->ra, reliable);
}
stack++;
}
}
#else /* CONFIG_FRAME_POINTER */
/*
* Create a stack trace by doing a simple scan treating all text addresses
* as return addresses.
*/
void unwind_stack(void *data, unsigned long *stack,
void (*trace)(void *data, unsigned long addr, int reliable))
{
unsigned long addr;
while (!kstack_end(stack)) {
addr = *stack++;
if (__kernel_text_address(addr))
trace(data, addr, 0);
}
}
#endif /* CONFIG_FRAME_POINTER */
| linux-master | arch/openrisc/kernel/unwinder.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC sys_call_table.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*/
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/unistd.h>
#include <asm/syscalls.h>
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
void *sys_call_table[__NR_syscalls] = {
#include <asm/unistd.h>
};
| linux-master | arch/openrisc/kernel/sys_call_table.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC prom.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*
* Architecture specific procedures for creating, accessing and
* interpreting the device tree.
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <asm/page.h>
void __init early_init_devtree(void *params)
{
early_init_dt_scan(params);
memblock_allow_resize();
}
| linux-master | arch/openrisc/kernel/prom.c |
/*
* Stack trace utility for OpenRISC
*
* Copyright (C) 2017 Stafford Horne <[email protected]>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*
* Losely based on work from sh and powerpc.
*/
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <asm/processor.h>
#include <asm/unwinder.h>
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
static void
save_stack_address(void *data, unsigned long addr, int reliable)
{
struct stack_trace *trace = data;
if (!reliable)
return;
if (trace->skip > 0) {
trace->skip--;
return;
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = addr;
}
void save_stack_trace(struct stack_trace *trace)
{
unwind_stack(trace, (unsigned long *) &trace, save_stack_address);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
static void
save_stack_address_nosched(void *data, unsigned long addr, int reliable)
{
struct stack_trace *trace = (struct stack_trace *)data;
if (!reliable)
return;
if (in_sched_functions(addr))
return;
if (trace->skip > 0) {
trace->skip--;
return;
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = addr;
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned long *sp = NULL;
if (!try_get_task_stack(tsk))
return;
if (tsk == current)
sp = (unsigned long *) &sp;
else {
unsigned long ksp;
/* Locate stack from kernel context */
ksp = task_thread_info(tsk)->ksp;
ksp += STACK_FRAME_OVERHEAD; /* redzone */
ksp += sizeof(struct pt_regs);
sp = (unsigned long *) ksp;
}
unwind_stack(trace, sp, save_stack_address_nosched);
put_task_stack(tsk);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void
save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
unwind_stack(trace, (unsigned long *) regs->sp,
save_stack_address_nosched);
}
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
| linux-master | arch/openrisc/kernel/stacktrace.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* OpenRISC signal.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2003 Matjaz Breskvar <[email protected]>
* Copyright (C) 2010-2011 Jonas Bonn <[email protected]>
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/resume_user_mode.h>
#include <asm/processor.h>
#include <asm/syscall.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
struct rt_sigframe {
struct siginfo info;
struct ucontext uc;
unsigned char retcode[16]; /* trampoline code */
};
asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs);
asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags,
int syscall);
static int restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
{
int err = 0;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
/*
* Restore the regs from &sc->regs.
* (sc is already checked since the sigframe was
* checked in sys_sigreturn previously)
*/
err |= __copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long));
err |= __copy_from_user(®s->pc, &sc->regs.pc, sizeof(unsigned long));
err |= __copy_from_user(®s->sr, &sc->regs.sr, sizeof(unsigned long));
err |= __copy_from_user(®s->fpcsr, &sc->fpcsr, sizeof(unsigned long));
/* make sure the SM-bit is cleared so user-mode cannot fool us */
regs->sr &= ~SPR_SR_SM;
regs->orig_gpr11 = -1; /* Avoid syscall restart checks */
/* TODO: the other ports use regs->orig_XX to disable syscall checks
* after this completes, but we don't use that mechanism. maybe we can
* use it now ?
*/
return err;
}
asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
{
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->sp;
sigset_t set;
/*
* Since we stacked the signal on a dword boundary,
* then frame should be dword aligned here. If it's
* not, then the user is trying to mess with us.
*/
if (((unsigned long)frame) & 3)
goto badframe;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return regs->gpr[11];
badframe:
force_sig(SIGSEGV);
return 0;
}
/*
* Set up a signal frame.
*/
static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
/* copy the regs */
/* There should be no need to save callee-saved registers here...
* ...but we save them anyway. Revisit this
*/
err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
err |= __copy_to_user(&sc->regs.pc, ®s->pc, sizeof(unsigned long));
err |= __copy_to_user(&sc->regs.sr, ®s->sr, sizeof(unsigned long));
err |= __copy_to_user(&sc->fpcsr, ®s->fpcsr, sizeof(unsigned long));
return err;
}
static inline unsigned long align_sigframe(unsigned long sp)
{
return sp & ~3UL;
}
/*
* Work out where the signal frame should go. It's either on the user stack
* or the alternate stack.
*/
static inline void __user *get_sigframe(struct ksignal *ksig,
struct pt_regs *regs, size_t frame_size)
{
unsigned long sp = regs->sp;
/* redzone */
sp -= STACK_FRAME_OVERHEAD;
sp = sigsp(sp, ksig);
sp = align_sigframe(sp - frame_size);
return (void __user *)sp;
}
/* grab and setup a signal frame.
*
* basically we stack a lot of state info, and arrange for the
* user-mode program to return to the kernel using either a
* trampoline which performs the syscall sigreturn, or a provided
* user-mode trampoline.
*/
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
unsigned long return_ip;
int err = 0;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
return -EFAULT;
/* Create siginfo. */
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(NULL, &frame->uc.uc_link);
err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
return -EFAULT;
/* trampoline - the desired return ip is the retcode itself */
return_ip = (unsigned long)&frame->retcode;
/* This is:
l.ori r11,r0,__NR_sigreturn
l.sys 1
*/
err |= __put_user(0xa960, (short __user *)(frame->retcode + 0));
err |= __put_user(__NR_rt_sigreturn, (short __user *)(frame->retcode + 2));
err |= __put_user(0x20000001, (unsigned long __user *)(frame->retcode + 4));
err |= __put_user(0x15000000, (unsigned long __user *)(frame->retcode + 8));
if (err)
return -EFAULT;
/* Set up registers for signal handler */
regs->pc = (unsigned long)ksig->ka.sa.sa_handler; /* what we enter NOW */
regs->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
regs->gpr[3] = (unsigned long)ksig->sig; /* arg 1: signo */
regs->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
regs->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
/* actually move the usp to reflect the stacked frame */
regs->sp = (unsigned long)frame;
return 0;
}
static inline void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
int ret;
ret = setup_rt_frame(ksig, sigmask_to_save(), regs);
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*
* Also note that the regs structure given here as an argument, is the latest
* pushed pt_regs. It may or may not be the same as the first pushed registers
* when the initial usermode->kernelmode transition took place. Therefore
* we can use user_mode(regs) to see if we came directly from kernel or user
* mode below.
*/
static int do_signal(struct pt_regs *regs, int syscall)
{
struct ksignal ksig;
unsigned long continue_addr = 0;
unsigned long restart_addr = 0;
unsigned long retval = 0;
int restart = 0;
if (syscall) {
continue_addr = regs->pc;
restart_addr = continue_addr - 4;
retval = regs->gpr[11];
/*
* Setup syscall restart here so that a debugger will
* see the already changed PC.
*/
switch (retval) {
case -ERESTART_RESTARTBLOCK:
restart = -2;
fallthrough;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
restart++;
regs->gpr[11] = regs->orig_gpr11;
regs->pc = restart_addr;
break;
}
}
/*
* Get the signal to deliver. During the call to get_signal the
* debugger may change all our registers so we may need to revert
* the decision to restart the syscall; specifically, if the PC is
* changed, don't restart the syscall.
*/
if (get_signal(&ksig)) {
if (unlikely(restart) && regs->pc == restart_addr) {
if (retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK
|| (retval == -ERESTARTSYS
&& !(ksig.ka.sa.sa_flags & SA_RESTART))) {
/* No automatic restart */
regs->gpr[11] = -EINTR;
regs->pc = continue_addr;
}
}
handle_signal(&ksig, regs);
} else {
/* no handler */
restore_saved_sigmask();
/*
* Restore pt_regs PC as syscall restart will be handled by
* kernel without return to userspace
*/
if (unlikely(restart) && regs->pc == restart_addr) {
regs->pc = continue_addr;
return restart;
}
}
return 0;
}
asmlinkage int
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
do {
if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule();
} else {
if (unlikely(!user_mode(regs)))
return 0;
local_irq_enable();
if (thread_flags & (_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)) {
int restart = do_signal(regs, syscall);
if (unlikely(restart)) {
/*
* Restart without handlers.
* Deal with it without leaving
* the kernel space.
*/
return restart;
}
syscall = 0;
} else {
resume_user_mode_work(regs);
}
}
local_irq_disable();
thread_flags = read_thread_flags();
} while (thread_flags & _TIF_WORK_MASK);
return 0;
}
| linux-master | arch/openrisc/kernel/signal.c |
/*
* Copyright (C) 2014 Stefan Kristiansson <[email protected]>
* Copyright (C) 2017 Stafford Horne <[email protected]>
*
* Based on arm64 and arc implementations
* Copyright (C) 2013 ARM Ltd.
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <asm/cpuinfo.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/time.h>
asmlinkage __init void secondary_start_kernel(void);
static void (*smp_cross_call)(const struct cpumask *, unsigned int);
unsigned long secondary_release = -1;
struct thread_info *secondary_thread_info;
enum ipi_msg_type {
IPI_WAKEUP,
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
};
static DEFINE_SPINLOCK(boot_lock);
static void boot_secondary(unsigned int cpu, struct task_struct *idle)
{
/*
* set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
secondary_release = cpu;
smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
/*
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
}
void __init smp_prepare_boot_cpu(void)
{
}
void __init smp_init_cpus(void)
{
struct device_node *cpu;
u32 cpu_id;
for_each_of_cpu_node(cpu) {
cpu_id = of_get_cpu_hwid(cpu, 0);
if (cpu_id < NR_CPUS)
set_cpu_possible(cpu_id, true);
}
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int cpu;
/*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time.
*/
for_each_possible_cpu(cpu) {
if (cpu < max_cpus)
set_cpu_present(cpu, true);
}
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
static DECLARE_COMPLETION(cpu_running);
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
if (smp_cross_call == NULL) {
pr_warn("CPU%u: failed to start, IPI controller missing",
cpu);
return -EIO;
}
secondary_thread_info = task_thread_info(idle);
current_pgd[cpu] = init_mm.pgd;
boot_secondary(cpu, idle);
if (!wait_for_completion_timeout(&cpu_running,
msecs_to_jiffies(1000))) {
pr_crit("CPU%u: failed to start\n", cpu);
return -EIO;
}
synchronise_count_master(cpu);
return 0;
}
asmlinkage __init void secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
pr_info("CPU%u: Booted secondary processor\n", cpu);
setup_cpuinfo();
openrisc_clockevent_init();
notify_cpu_starting(cpu);
/*
* OK, now it's safe to let the boot CPU continue
*/
complete(&cpu_running);
synchronise_count_slave(cpu);
set_cpu_online(cpu, true);
local_irq_enable();
/*
* OK, it's off to the idle thread for us
*/
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
void handle_IPI(unsigned int ipi_msg)
{
unsigned int cpu = smp_processor_id();
switch (ipi_msg) {
case IPI_WAKEUP:
break;
case IPI_RESCHEDULE:
scheduler_ipi();
break;
case IPI_CALL_FUNC:
generic_smp_call_function_interrupt();
break;
case IPI_CALL_FUNC_SINGLE:
generic_smp_call_function_single_interrupt();
break;
default:
WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
break;
}
}
void arch_smp_send_reschedule(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
static void stop_this_cpu(void *dummy)
{
/* Remove this CPU */
set_cpu_online(smp_processor_id(), false);
local_irq_disable();
/* CPU Doze */
if (mfspr(SPR_UPR) & SPR_UPR_PMP)
mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
/* If that didn't work, infinite loop */
while (1)
;
}
void smp_send_stop(void)
{
smp_call_function(stop_this_cpu, NULL, 0);
}
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
smp_cross_call = fn;
}
void arch_send_call_function_single_ipi(int cpu)
{
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_CALL_FUNC);
}
/* TLB flush operations - Performed on each CPU*/
static inline void ipi_flush_tlb_all(void *ignored)
{
local_flush_tlb_all();
}
static inline void ipi_flush_tlb_mm(void *info)
{
struct mm_struct *mm = (struct mm_struct *)info;
local_flush_tlb_mm(mm);
}
static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
{
unsigned int cpuid;
if (cpumask_empty(cmask))
return;
cpuid = get_cpu();
if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
/* local cpu is the only cpu present in cpumask */
local_flush_tlb_mm(mm);
} else {
on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
}
put_cpu();
}
struct flush_tlb_data {
unsigned long addr1;
unsigned long addr2;
};
static inline void ipi_flush_tlb_page(void *info)
{
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
local_flush_tlb_page(NULL, fd->addr1);
}
static inline void ipi_flush_tlb_range(void *info)
{
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
}
static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start,
unsigned long end)
{
unsigned int cpuid;
if (cpumask_empty(cmask))
return;
cpuid = get_cpu();
if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
/* local cpu is the only cpu present in cpumask */
if ((end - start) <= PAGE_SIZE)
local_flush_tlb_page(NULL, start);
else
local_flush_tlb_range(NULL, start, end);
} else {
struct flush_tlb_data fd;
fd.addr1 = start;
fd.addr2 = end;
if ((end - start) <= PAGE_SIZE)
on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
else
on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
}
put_cpu();
}
void flush_tlb_all(void)
{
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
}
void flush_tlb_mm(struct mm_struct *mm)
{
smp_flush_tlb_mm(mm_cpumask(mm), mm);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
}
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm)
: cpu_online_mask;
smp_flush_tlb_range(cmask, start, end);
}
/* Instruction cache invalidate - performed on each cpu */
static void ipi_icache_page_inv(void *arg)
{
struct page *page = arg;
local_icache_page_inv(page);
}
void smp_icache_page_inv(struct page *page)
{
on_each_cpu(ipi_icache_page_inv, page, 1);
}
EXPORT_SYMBOL(smp_icache_page_inv);
| linux-master | arch/openrisc/kernel/smp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* I/O access functions for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
#include <asm/io.h>
/* These are all FIFO routines! */
/*
* __raw_readsw - read words a short at a time
* @addr: source address
* @data: data address
* @len: number of shorts to read
*/
void __raw_readsw(const void __iomem *addr, void *data, int len)
{
const volatile short int *src = (short int *) addr;
short int *dst = (short int *) data;
if ((u32)data & 0x1)
panic("unaligned pointer to readsw");
while (len-- > 0)
*dst++ = *src;
}
EXPORT_SYMBOL(__raw_readsw);
/*
* __raw_writesw - read words a short at a time
* @addr: source address
* @data: data address
* @len: number of shorts to read
*/
void __raw_writesw(void __iomem *addr, const void *data, int len)
{
const short int *src = (short int *)data;
volatile short int *dst = (short int *)addr;
if ((u32)data & 0x1)
panic("unaligned pointer to writesw");
while (len-- > 0)
*dst = *src++;
}
EXPORT_SYMBOL(__raw_writesw);
/* Pretty sure len is pre-adjusted for the length of the access already */
void __raw_readsl(const void __iomem *addr, void *data, int len)
{
const volatile long *src = (long *) addr;
long *dst = (long *) data;
if ((u32)data & 0x3)
panic("unaligned pointer to readsl");
while (len-- > 0)
*dst++ = *src;
}
EXPORT_SYMBOL(__raw_readsl);
void __raw_writesl(void __iomem *addr, const void *data, int len)
{
const long *src = (long *)data;
volatile long *dst = (long *)addr;
if ((u32)data & 0x3)
panic("unaligned pointer to writesl");
while (len-- > 0)
*dst = *src++;
}
EXPORT_SYMBOL(__raw_writesl);
| linux-master | arch/hexagon/lib/io.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Checksum functions for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
/* This was derived from arch/alpha/lib/checksum.c */
#include <linux/module.h>
#include <linux/string.h>
#include <asm/byteorder.h>
#include <net/checksum.h>
#include <linux/uaccess.h>
#include <asm/intrinsics.h>
/* Vector value operations */
#define SIGN(x, y) ((0x8000ULL*x)<<y)
#define CARRY(x, y) ((0x0002ULL*x)<<y)
#define SELECT(x, y) ((0x0001ULL*x)<<y)
#define VR_NEGATE(a, b, c, d) (SIGN(a, 48) + SIGN(b, 32) + SIGN(c, 16) \
+ SIGN(d, 0))
#define VR_CARRY(a, b, c, d) (CARRY(a, 48) + CARRY(b, 32) + CARRY(c, 16) \
+ CARRY(d, 0))
#define VR_SELECT(a, b, c, d) (SELECT(a, 48) + SELECT(b, 32) + SELECT(c, 16) \
+ SELECT(d, 0))
/* optimized HEXAGON V3 intrinsic version */
static inline unsigned short from64to16(u64 x)
{
u64 sum;
sum = HEXAGON_P_vrmpyh_PP(x^VR_NEGATE(1, 1, 1, 1),
VR_SELECT(1, 1, 1, 1));
sum += VR_CARRY(0, 0, 1, 0);
sum = HEXAGON_P_vrmpyh_PP(sum, VR_SELECT(0, 0, 1, 1));
return 0xFFFF & sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented.
*/
__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto, __wsum sum)
{
return (__force __sum16)~from64to16(
(__force u64)saddr + (__force u64)daddr +
(__force u64)sum + ((len + proto) << 8));
}
__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto, __wsum sum)
{
u64 result;
result = (__force u64)saddr + (__force u64)daddr +
(__force u64)sum + ((len + proto) << 8);
/* Fold down to 32-bits so we don't lose in the typedef-less
network stack. */
/* 64 to 33 */
result = (result & 0xffffffffUL) + (result >> 32);
/* 33 to 32 */
result = (result & 0xffffffffUL) + (result >> 32);
return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_tcpudp_nofold);
/*
* Do a 64-bit checksum on an arbitrary memory area..
*
* This isn't a great routine, but it's not _horrible_ either. The
* inner loop could be unrolled a bit further, and there are better
* ways to do the carry, but this is reasonable.
*/
/* optimized HEXAGON intrinsic version, with over read fixed */
unsigned int do_csum(const void *voidptr, int len)
{
u64 sum0, sum1, x0, x1, *ptr8_o, *ptr8_e, *ptr8;
int i, start, mid, end, mask;
const char *ptr = voidptr;
unsigned short *ptr2;
unsigned int *ptr4;
if (len <= 0)
return 0;
start = 0xF & (16-(((int) ptr) & 0xF)) ;
mask = 0x7fffffffUL >> HEXAGON_R_cl0_R(len);
start = start & mask ;
mid = len - start;
end = mid & 0xF;
mid = mid>>4;
sum0 = mid << 18;
sum1 = 0;
if (start & 1)
sum0 += (u64) (ptr[0] << 8);
ptr2 = (unsigned short *) &ptr[start & 1];
if (start & 2)
sum1 += (u64) ptr2[0];
ptr4 = (unsigned int *) &ptr[start & 3];
if (start & 4) {
sum0 = HEXAGON_P_vrmpyhacc_PP(sum0,
VR_NEGATE(0, 0, 1, 1)^((u64)ptr4[0]),
VR_SELECT(0, 0, 1, 1));
sum0 += VR_SELECT(0, 0, 1, 0);
}
ptr8 = (u64 *) &ptr[start & 7];
if (start & 8) {
sum1 = HEXAGON_P_vrmpyhacc_PP(sum1,
VR_NEGATE(1, 1, 1, 1)^(ptr8[0]),
VR_SELECT(1, 1, 1, 1));
sum1 += VR_CARRY(0, 0, 1, 0);
}
ptr8_o = (u64 *) (ptr + start);
ptr8_e = (u64 *) (ptr + start + 8);
if (mid) {
x0 = *ptr8_e; ptr8_e += 2;
x1 = *ptr8_o; ptr8_o += 2;
if (mid > 1)
for (i = 0; i < mid-1; i++) {
sum0 = HEXAGON_P_vrmpyhacc_PP(sum0,
x0^VR_NEGATE(1, 1, 1, 1),
VR_SELECT(1, 1, 1, 1));
sum1 = HEXAGON_P_vrmpyhacc_PP(sum1,
x1^VR_NEGATE(1, 1, 1, 1),
VR_SELECT(1, 1, 1, 1));
x0 = *ptr8_e; ptr8_e += 2;
x1 = *ptr8_o; ptr8_o += 2;
}
sum0 = HEXAGON_P_vrmpyhacc_PP(sum0, x0^VR_NEGATE(1, 1, 1, 1),
VR_SELECT(1, 1, 1, 1));
sum1 = HEXAGON_P_vrmpyhacc_PP(sum1, x1^VR_NEGATE(1, 1, 1, 1),
VR_SELECT(1, 1, 1, 1));
}
ptr4 = (unsigned int *) &ptr[start + (mid * 16) + (end & 8)];
if (end & 4) {
sum1 = HEXAGON_P_vrmpyhacc_PP(sum1,
VR_NEGATE(0, 0, 1, 1)^((u64)ptr4[0]),
VR_SELECT(0, 0, 1, 1));
sum1 += VR_SELECT(0, 0, 1, 0);
}
ptr2 = (unsigned short *) &ptr[start + (mid * 16) + (end & 12)];
if (end & 2)
sum0 += (u64) ptr2[0];
if (end & 1)
sum1 += (u64) ptr[start + (mid * 16) + (end & 14)];
ptr8 = (u64 *) &ptr[start + (mid * 16)];
if (end & 8) {
sum0 = HEXAGON_P_vrmpyhacc_PP(sum0,
VR_NEGATE(1, 1, 1, 1)^(ptr8[0]),
VR_SELECT(1, 1, 1, 1));
sum0 += VR_CARRY(0, 0, 1, 0);
}
sum0 = HEXAGON_P_vrmpyh_PP((sum0+sum1)^VR_NEGATE(0, 0, 0, 1),
VR_SELECT(0, 0, 1, 1));
sum0 += VR_NEGATE(0, 0, 0, 1);
sum0 = HEXAGON_P_vrmpyh_PP(sum0, VR_SELECT(0, 0, 1, 1));
if (start & 1)
sum0 = (sum0 << 8) | (0xFF & (sum0 >> 8));
return 0xFFFF & sum0;
}
| linux-master | arch/hexagon/lib/checksum.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Memory fault handling for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
/*
* Page fault handling for the Hexagon Virtual Machine.
* Can also be called by a native port emulating the HVM
* execptions.
*/
#include <asm/traps.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/signal.h>
#include <linux/extable.h>
#include <linux/hardirq.h>
#include <linux/perf_event.h>
/*
* Decode of hardware exception sends us to one of several
* entry points. At each, we generate canonical arguments
* for handling by the abstract memory management code.
*/
#define FLT_IFETCH -1
#define FLT_LOAD 0
#define FLT_STORE 1
/*
* Canonical page fault handler
*/
void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
int si_signo;
int si_code = SEGV_MAPERR;
vm_fault_t fault;
const struct exception_table_entry *fixup;
unsigned int flags = FAULT_FLAG_DEFAULT;
/*
* If we're in an interrupt or have no user context,
* then must not take the fault.
*/
if (unlikely(in_interrupt() || !mm))
goto no_context;
local_irq_enable();
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma))
goto bad_area_nosemaphore;
/* Address space is OK. Now check access rights. */
si_code = SEGV_ACCERR;
switch (cause) {
case FLT_IFETCH:
if (!(vma->vm_flags & VM_EXEC))
goto bad_area;
break;
case FLT_LOAD:
if (!(vma->vm_flags & VM_READ))
goto bad_area;
break;
case FLT_STORE:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
break;
}
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
goto no_context;
return;
}
/* The fault is fully completed (including releasing mmap lock) */
if (fault & VM_FAULT_COMPLETED)
return;
/* The most common case -- we are done. */
if (likely(!(fault & VM_FAULT_ERROR))) {
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
goto retry;
}
mmap_read_unlock(mm);
return;
}
mmap_read_unlock(mm);
/* Handle copyin/out exception cases */
if (!user_mode(regs))
goto no_context;
if (fault & VM_FAULT_OOM) {
pagefault_out_of_memory();
return;
}
/* User-mode address is in the memory map, but we are
* unable to fix up the page fault.
*/
if (fault & VM_FAULT_SIGBUS) {
si_signo = SIGBUS;
si_code = BUS_ADRERR;
}
/* Address is not in the memory map */
else {
si_signo = SIGSEGV;
si_code = SEGV_ACCERR;
}
force_sig_fault(si_signo, si_code, (void __user *)address);
return;
bad_area:
mmap_read_unlock(mm);
bad_area_nosemaphore:
if (user_mode(regs)) {
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
return;
}
/* Kernel-mode fault falls through */
no_context:
fixup = search_exception_tables(pt_elr(regs));
if (fixup) {
pt_set_elr(regs, fixup->fixup);
return;
}
/* Things are looking very, very bad now */
bust_spinlocks(1);
printk(KERN_EMERG "Unable to handle kernel paging request at "
"virtual address 0x%08lx, regs %p\n", address, regs);
die("Bad Kernel VA", regs, SIGKILL);
}
void read_protection_fault(struct pt_regs *regs)
{
unsigned long badvadr = pt_badva(regs);
do_page_fault(badvadr, FLT_LOAD, regs);
}
void write_protection_fault(struct pt_regs *regs)
{
unsigned long badvadr = pt_badva(regs);
do_page_fault(badvadr, FLT_STORE, regs);
}
void execute_protection_fault(struct pt_regs *regs)
{
unsigned long badvadr = pt_badva(regs);
do_page_fault(badvadr, FLT_IFETCH, regs);
}
| linux-master | arch/hexagon/mm/vm_fault.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Memory subsystem initialization for Hexagon
*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <asm/atomic.h>
#include <linux/highmem.h>
#include <asm/tlb.h>
#include <asm/sections.h>
#include <asm/vm_mmu.h>
/*
* Define a startpg just past the end of the kernel image and a lastpg
* that corresponds to the end of real or simulated platform memory.
*/
#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET + PHYS_OFFSET))
unsigned long bootmem_lastpg; /* Should be set by platform code */
unsigned long __phys_offset; /* physical kernel offset >> 12 */
/* Set as variable to limit PMD copies */
int max_kernel_seg = 0x303;
/* indicate pfn's of high memory */
unsigned long highstart_pfn, highend_pfn;
/* Default cache attribute for newly created page tables */
unsigned long _dflt_cache_att = CACHEDEF;
/*
* The current "generation" of kernel map, which should not roll
* over until Hell freezes over. Actual bound in years needs to be
* calculated to confirm.
*/
DEFINE_SPINLOCK(kmap_gen_lock);
/* checkpatch says don't init this to 0. */
unsigned long long kmap_generation;
/*
* mem_init - initializes memory
*
* Frees up bootmem
* Fixes up more stuff for HIGHMEM
* Calculates and displays memory available/used
*/
void __init mem_init(void)
{
/* No idea where this is actually declared. Seems to evade LXR. */
memblock_free_all();
/*
* To-Do: someone somewhere should wipe out the bootmem map
* after we're done?
*/
/*
* This can be moved to some more virtual-memory-specific
* initialization hook at some point. Set the init_mm
* descriptors "context" value to point to the initial
* kernel segment table's physical address.
*/
init_mm.context.ptbase = __pa(init_mm.pgd);
}
void sync_icache_dcache(pte_t pte)
{
unsigned long addr;
struct page *page;
page = pte_page(pte);
addr = (unsigned long) page_address(page);
__vmcache_idsync(addr, PAGE_SIZE);
}
/*
* In order to set up page allocator "nodes",
* somebody has to call free_area_init() for UMA.
*
* In this mode, we only have one pg_data_t
* structure: contig_mem_data.
*/
void __init paging_init(void)
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
/*
* This is not particularly well documented anywhere, but
* give ZONE_NORMAL all the memory, including the big holes
* left by the kernel+bootmem_map which are already left as reserved
* in the bootmem_map; free_area_init should see those bits and
* adjust accordingly.
*/
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
free_area_init(max_zone_pfn); /* sets up the zonelists and mem_map */
/*
* Start of high memory area. Will probably need something more
* fancy if we... get more fancy.
*/
high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT);
}
#ifndef DMA_RESERVE
#define DMA_RESERVE (4)
#endif
#define DMA_CHUNKSIZE (1<<22)
#define DMA_RESERVED_BYTES (DMA_RESERVE * DMA_CHUNKSIZE)
/*
* Pick out the memory size. We look for mem=size,
* where size is "size[KkMm]"
*/
static int __init early_mem(char *p)
{
unsigned long size;
char *endp;
size = memparse(p, &endp);
bootmem_lastpg = PFN_DOWN(size);
return 0;
}
early_param("mem", early_mem);
size_t hexagon_coherent_pool_size = (size_t) (DMA_RESERVE << 22);
void __init setup_arch_memory(void)
{
/* XXX Todo: this probably should be cleaned up */
u32 *segtable = (u32 *) &swapper_pg_dir[0];
u32 *segtable_end;
/*
* Set up boot memory allocator
*
* The Gorman book also talks about these functions.
* This needs to change for highmem setups.
*/
/* Prior to this, bootmem_lastpg is actually mem size */
bootmem_lastpg += ARCH_PFN_OFFSET;
/* Memory size needs to be a multiple of 16M */
bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) &
~((BIG_KERNEL_PAGE_SIZE) - 1));
memblock_add(PHYS_OFFSET,
(bootmem_lastpg - ARCH_PFN_OFFSET) << PAGE_SHIFT);
/* Reserve kernel text/data/bss */
memblock_reserve(PHYS_OFFSET,
(bootmem_startpg - ARCH_PFN_OFFSET) << PAGE_SHIFT);
/*
* Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached)
* memory allocation
*/
max_low_pfn = bootmem_lastpg - PFN_DOWN(DMA_RESERVED_BYTES);
min_low_pfn = ARCH_PFN_OFFSET;
memblock_reserve(PFN_PHYS(max_low_pfn), DMA_RESERVED_BYTES);
printk(KERN_INFO "bootmem_startpg: 0x%08lx\n", bootmem_startpg);
printk(KERN_INFO "bootmem_lastpg: 0x%08lx\n", bootmem_lastpg);
printk(KERN_INFO "min_low_pfn: 0x%08lx\n", min_low_pfn);
printk(KERN_INFO "max_low_pfn: 0x%08lx\n", max_low_pfn);
/*
* The default VM page tables (will be) populated with
* VA=PA+PAGE_OFFSET mapping. We go in and invalidate entries
* higher than what we have memory for.
*/
/* this is pointer arithmetic; each entry covers 4MB */
segtable = segtable + (PAGE_OFFSET >> 22);
/* this actually only goes to the end of the first gig */
segtable_end = segtable + (1<<(30-22));
/*
* Move forward to the start of empty pages; take into account
* phys_offset shift.
*/
segtable += (bootmem_lastpg-ARCH_PFN_OFFSET)>>(22-PAGE_SHIFT);
{
int i;
for (i = 1 ; i <= DMA_RESERVE ; i++)
segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB)
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X
| __HEXAGON_C_UNC << 6
| __HVM_PDE_S_4MB);
}
printk(KERN_INFO "clearing segtable from %p to %p\n", segtable,
segtable_end);
while (segtable < (segtable_end-8))
*(segtable++) = __HVM_PDE_S_INVALID;
/* stop the pointer at the device I/O 4MB page */
printk(KERN_INFO "segtable = %p (should be equal to _K_io_map)\n",
segtable);
#if 0
/* Other half of the early device table from vm_init_segtable. */
printk(KERN_INFO "&_K_init_devicetable = 0x%08x\n",
(unsigned long) _K_init_devicetable-PAGE_OFFSET);
*segtable = ((u32) (unsigned long) _K_init_devicetable-PAGE_OFFSET) |
__HVM_PDE_S_4KB;
printk(KERN_INFO "*segtable = 0x%08x\n", *segtable);
#endif
/*
* The bootmem allocator seemingly just lives to feed memory
* to the paging system
*/
printk(KERN_INFO "PAGE_SIZE=%lu\n", PAGE_SIZE);
paging_init(); /* See Gorman Book, 2.3 */
/*
* At this point, the page allocator is kind of initialized, but
* apparently no pages are available (just like with the bootmem
* allocator), and need to be freed themselves via mem_init(),
* which is called by start_kernel() later on in the process
*/
}
static const pgprot_t protection_map[16] = {
[VM_NONE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
CACHEDEF),
[VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_READ | CACHEDEF),
[VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
CACHEDEF),
[VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_READ | CACHEDEF),
[VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | CACHEDEF),
[VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | _PAGE_READ |
CACHEDEF),
[VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | CACHEDEF),
[VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | _PAGE_READ |
CACHEDEF),
[VM_SHARED] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
CACHEDEF),
[VM_SHARED | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_READ | CACHEDEF),
[VM_SHARED | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_WRITE | CACHEDEF),
[VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_READ | _PAGE_WRITE |
CACHEDEF),
[VM_SHARED | VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | CACHEDEF),
[VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | _PAGE_READ |
CACHEDEF),
[VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_EXECUTE | _PAGE_WRITE |
CACHEDEF),
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER |
_PAGE_READ | _PAGE_EXECUTE |
_PAGE_WRITE | CACHEDEF)
};
DECLARE_VM_GET_PAGE_PROT
| linux-master | arch/hexagon/mm/init.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hexagon Virtual Machine TLB functions
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
/*
* The Hexagon Virtual Machine conceals the real workings of
* the TLB, but there are one or two functions that need to
* be instantiated for it, differently from a native build.
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/page.h>
#include <asm/hexagon_vm.h>
/*
* Initial VM implementation has only one map active at a time, with
* TLB purgings on changes. So either we're nuking the current map,
* or it's a no-op. This operation is messy on true SMPs where other
* processors must be induced to flush the copies in their local TLBs,
* but Hexagon thread-based virtual processors share the same MMU.
*/
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
if (mm->context.ptbase == current->active_mm->context.ptbase)
__vmclrmap((void *)start, end - start);
}
/*
* Flush a page from the kernel virtual map - used by highmem
*/
void flush_tlb_one(unsigned long vaddr)
{
__vmclrmap((void *)vaddr, PAGE_SIZE);
}
/*
* Flush all TLBs across all CPUs, virtual or real.
* A single Hexagon core has 6 thread contexts but
* only one TLB.
*/
void tlb_flush_all(void)
{
/* should probably use that fixaddr end or whateve label */
__vmclrmap(0, 0xffff0000);
}
/*
* Flush TLB entries associated with a given mm_struct mapping.
*/
void flush_tlb_mm(struct mm_struct *mm)
{
/* Current Virtual Machine has only one map active at a time */
if (current->active_mm->context.ptbase == mm->context.ptbase)
tlb_flush_all();
}
/*
* Flush TLB state associated with a page of a vma.
*/
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vaddr)
{
struct mm_struct *mm = vma->vm_mm;
if (mm->context.ptbase == current->active_mm->context.ptbase)
__vmclrmap((void *)vaddr, PAGE_SIZE);
}
/*
* Flush TLB entries associated with a kernel address range.
* Like flush range, but without the check on the vma->vm_mm.
*/
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
__vmclrmap((void *)start, end - start);
}
| linux-master | arch/hexagon/mm/vm_tlb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cache management functions for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/hexagon_vm.h>
#define spanlines(start, end) \
(((end - (start & ~(LINESIZE - 1))) >> LINEBITS) + 1)
void flush_dcache_range(unsigned long start, unsigned long end)
{
unsigned long lines = spanlines(start, end-1);
unsigned long i, flags;
start &= ~(LINESIZE - 1);
local_irq_save(flags);
for (i = 0; i < lines; i++) {
__asm__ __volatile__ (
" dccleaninva(%0); "
:
: "r" (start)
);
start += LINESIZE;
}
local_irq_restore(flags);
}
void flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long lines = spanlines(start, end-1);
unsigned long i, flags;
start &= ~(LINESIZE - 1);
local_irq_save(flags);
for (i = 0; i < lines; i++) {
__asm__ __volatile__ (
" dccleana(%0); "
" icinva(%0); "
:
: "r" (start)
);
start += LINESIZE;
}
__asm__ __volatile__ (
"isync"
);
local_irq_restore(flags);
}
EXPORT_SYMBOL(flush_icache_range);
void hexagon_clean_dcache_range(unsigned long start, unsigned long end)
{
unsigned long lines = spanlines(start, end-1);
unsigned long i, flags;
start &= ~(LINESIZE - 1);
local_irq_save(flags);
for (i = 0; i < lines; i++) {
__asm__ __volatile__ (
" dccleana(%0); "
:
: "r" (start)
);
start += LINESIZE;
}
local_irq_restore(flags);
}
void hexagon_inv_dcache_range(unsigned long start, unsigned long end)
{
unsigned long lines = spanlines(start, end-1);
unsigned long i, flags;
start &= ~(LINESIZE - 1);
local_irq_save(flags);
for (i = 0; i < lines; i++) {
__asm__ __volatile__ (
" dcinva(%0); "
:
: "r" (start)
);
start += LINESIZE;
}
local_irq_restore(flags);
}
/*
* This is just really brutal and shouldn't be used anyways,
* especially on V2. Left here just in case.
*/
void flush_cache_all_hexagon(void)
{
unsigned long flags;
local_irq_save(flags);
__vmcache_ickill();
__vmcache_dckill();
__vmcache_l2kill();
local_irq_restore(flags);
mb();
}
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len)
{
memcpy(dst, src, len);
if (vma->vm_flags & VM_EXEC) {
flush_icache_range((unsigned long) dst,
(unsigned long) dst + len);
}
}
| linux-master | arch/hexagon/mm/cache.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
/*
* Support for user memory access from kernel. This will
* probably be inlined for performance at some point, but
* for ease of debug, and to a lesser degree for code size,
* we implement here as subroutines.
*/
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/pgtable.h>
/*
* For clear_user(), exploit previously defined copy_to_user function
* and the fact that we've got a handy zero page defined in kernel/head.S
*
* dczero here would be even faster.
*/
__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
{
long uncleared;
while (count > PAGE_SIZE) {
uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
if (uncleared)
return count - (PAGE_SIZE - uncleared);
count -= PAGE_SIZE;
dest += PAGE_SIZE;
}
if (count)
count = raw_copy_to_user(dest, &empty_zero_page, count);
return count;
}
unsigned long clear_user_hexagon(void __user *dest, unsigned long count)
{
if (!access_ok(dest, count))
return count;
else
return __clear_user_hexagon(dest, count);
}
| linux-master | arch/hexagon/mm/uaccess.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* System call table for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/unistd.h>
#include <asm/syscall.h>
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
void *sys_call_table[__NR_syscalls] = {
#include <asm/unistd.h>
};
| linux-master | arch/hexagon/kernel/syscalltab.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Process creation support for Hexagon
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/tick.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/resume_user_mode.h>
/*
* Program thread launch. Often defined as a macro in processor.h,
* but we're shooting for a small footprint and it's not an inner-loop
* performance-critical operation.
*
* The Hexagon ABI specifies that R28 is zero'ed before program launch,
* so that gets automatically done here. If we ever stop doing that here,
* we'll probably want to define the ELF_PLAT_INIT macro.
*/
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
/* We want to zero all data-containing registers. Is this overkill? */
memset(regs, 0, sizeof(*regs));
/* We might want to also zero all Processor registers here */
pt_set_usermode(regs);
pt_set_elr(regs, pc);
pt_set_rte_sp(regs, sp);
}
/*
* Spin, or better still, do a hardware or VM wait instruction
* If hardware or VM offer wait termination even though interrupts
* are disabled.
*/
void arch_cpu_idle(void)
{
__vmwait();
/* interrupts wake us up, but irqs are still disabled */
}
/*
* Copy architecture-specific thread state
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
struct hexagon_switch_stack *ss;
struct pt_regs *childregs;
asmlinkage void ret_from_fork(void);
childregs = (struct pt_regs *) (((unsigned long) ti + THREAD_SIZE) -
sizeof(*childregs));
ti->regs = childregs;
/*
* Establish kernel stack pointer and initial PC for new thread
* Note that unlike the usual situation, we do not copy the
* parent's callee-saved here; those are in pt_regs and whatever
* we leave here will be overridden on return to userland.
*/
ss = (struct hexagon_switch_stack *) ((unsigned long) childregs -
sizeof(*ss));
ss->lr = (unsigned long)ret_from_fork;
p->thread.switch_sp = ss;
if (unlikely(args->fn)) {
memset(childregs, 0, sizeof(struct pt_regs));
/* r24 <- fn, r25 <- arg */
ss->r24 = (unsigned long)args->fn;
ss->r25 = (unsigned long)args->fn_arg;
pt_set_kmode(childregs);
return 0;
}
memcpy(childregs, current_pt_regs(), sizeof(*childregs));
ss->r2524 = 0;
if (usp)
pt_set_rte_sp(childregs, usp);
/* Child sees zero return value */
childregs->r00 = 0;
/*
* The clone syscall has the C signature:
* int [r0] clone(int flags [r0],
* void *child_frame [r1],
* void *parent_tid [r2],
* void *child_tid [r3],
* void *thread_control_block [r4]);
* ugp is used to provide TLS support.
*/
if (clone_flags & CLONE_SETTLS)
childregs->ugp = tls;
/*
* Parent sees new pid -- not necessary, not even possible at
* this point in the fork process
*/
return 0;
}
/*
* Some archs flush debug and FPU info here
*/
void flush_thread(void)
{
}
/*
* The "wait channel" terminology is archaic, but what we want
* is an identification of the point at which the scheduler
* was invoked by a blocked thread.
*/
unsigned long __get_wchan(struct task_struct *p)
{
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
stack_page = (unsigned long)task_stack_page(p);
fp = ((struct hexagon_switch_stack *)p->thread.switch_sp)->fp;
do {
if (fp < (stack_page + sizeof(struct thread_info)) ||
fp >= (THREAD_SIZE - 8 + stack_page))
return 0;
pc = ((unsigned long *)fp)[1];
if (!in_sched_functions(pc))
return pc;
fp = *(unsigned long *) fp;
} while (count++ < 16);
return 0;
}
/*
* Called on the exit path of event entry; see vm_entry.S
*
* Interrupts will already be disabled.
*
* Returns 0 if there's no need to re-check for more work.
*/
int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
{
if (!(thread_info_flags & _TIF_WORK_MASK)) {
return 0;
} /* shortcut -- no work to be done */
local_irq_enable();
if (thread_info_flags & _TIF_NEED_RESCHED) {
schedule();
return 1;
}
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
do_signal(regs);
return 1;
}
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
resume_user_mode_work(regs);
return 1;
}
/* Should not even reach here */
panic("%s: bad thread_info flags 0x%08x\n", __func__,
thread_info_flags);
}
| linux-master | arch/hexagon/kernel/process.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Mostly IRQ support for Hexagon
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/sched/debug.h>
#include <asm/registers.h>
#include <linux/irq.h>
#include <linux/hardirq.h>
/*
* show_regs - print pt_regs structure
* @regs: pointer to pt_regs
*
* To-do: add all the accessor definitions to registers.h
*
* Will make this routine a lot easier to write.
*/
void show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_EMERG);
printk(KERN_EMERG "restart_r0: \t0x%08lx syscall_nr: %ld\n",
regs->restart_r0, regs->syscall_nr);
printk(KERN_EMERG "preds: \t\t0x%08lx\n", regs->preds);
printk(KERN_EMERG "lc0: \t0x%08lx sa0: 0x%08lx m0: 0x%08lx\n",
regs->lc0, regs->sa0, regs->m0);
printk(KERN_EMERG "lc1: \t0x%08lx sa1: 0x%08lx m1: 0x%08lx\n",
regs->lc1, regs->sa1, regs->m1);
printk(KERN_EMERG "gp: \t0x%08lx ugp: 0x%08lx usr: 0x%08lx\n",
regs->gp, regs->ugp, regs->usr);
printk(KERN_EMERG "cs0: \t0x%08lx cs1: 0x%08lx\n",
regs->cs0, regs->cs1);
printk(KERN_EMERG "r0: \t0x%08lx %08lx %08lx %08lx\n", regs->r00,
regs->r01,
regs->r02,
regs->r03);
printk(KERN_EMERG "r4: \t0x%08lx %08lx %08lx %08lx\n", regs->r04,
regs->r05,
regs->r06,
regs->r07);
printk(KERN_EMERG "r8: \t0x%08lx %08lx %08lx %08lx\n", regs->r08,
regs->r09,
regs->r10,
regs->r11);
printk(KERN_EMERG "r12: \t0x%08lx %08lx %08lx %08lx\n", regs->r12,
regs->r13,
regs->r14,
regs->r15);
printk(KERN_EMERG "r16: \t0x%08lx %08lx %08lx %08lx\n", regs->r16,
regs->r17,
regs->r18,
regs->r19);
printk(KERN_EMERG "r20: \t0x%08lx %08lx %08lx %08lx\n", regs->r20,
regs->r21,
regs->r22,
regs->r23);
printk(KERN_EMERG "r24: \t0x%08lx %08lx %08lx %08lx\n", regs->r24,
regs->r25,
regs->r26,
regs->r27);
printk(KERN_EMERG "r28: \t0x%08lx %08lx %08lx %08lx\n", regs->r28,
regs->r29,
regs->r30,
regs->r31);
printk(KERN_EMERG "elr: \t0x%08lx cause: 0x%08lx user_mode: %d\n",
pt_elr(regs), pt_cause(regs), user_mode(regs));
printk(KERN_EMERG "psp: \t0x%08lx badva: 0x%08lx int_enabled: %d\n",
pt_psp(regs), pt_badva(regs), ints_enabled(regs));
}
void dummy_handler(struct pt_regs *regs)
{
unsigned int elr = pt_elr(regs);
printk(KERN_ERR "Unimplemented handler; ELR=0x%08x\n", elr);
}
void arch_do_IRQ(struct pt_regs *regs)
{
int irq = pt_cause(regs);
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
generic_handle_irq(irq);
irq_exit();
set_irq_regs(old_regs);
}
| linux-master | arch/hexagon/kernel/vm_events.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ptrace support for Hexagon
*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <asm/user.h>
#if arch_has_single_step()
/* Both called from ptrace_resume */
void user_enable_single_step(struct task_struct *child)
{
pt_set_singlestep(task_pt_regs(child));
set_tsk_thread_flag(child, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *child)
{
pt_clr_singlestep(task_pt_regs(child));
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
#endif
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
struct pt_regs *regs = task_pt_regs(target);
/* The general idea here is that the copyout must happen in
* exactly the same order in which the userspace expects these
* regs. Now, the sequence in userspace does not match the
* sequence in the kernel, so everything past the 32 gprs
* happens one at a time.
*/
membuf_write(&to, ®s->r00, 32*sizeof(unsigned long));
/* Must be exactly same sequence as struct user_regs_struct */
membuf_store(&to, regs->sa0);
membuf_store(&to, regs->lc0);
membuf_store(&to, regs->sa1);
membuf_store(&to, regs->lc1);
membuf_store(&to, regs->m0);
membuf_store(&to, regs->m1);
membuf_store(&to, regs->usr);
membuf_store(&to, regs->preds);
membuf_store(&to, regs->gp);
membuf_store(&to, regs->ugp);
membuf_store(&to, pt_elr(regs)); // pc
membuf_store(&to, (unsigned long)pt_cause(regs)); // cause
membuf_store(&to, pt_badva(regs)); // badva
#if CONFIG_HEXAGON_ARCH_VERSION >=4
membuf_store(&to, regs->cs0);
membuf_store(&to, regs->cs1);
return membuf_zero(&to, sizeof(unsigned long));
#else
return membuf_zero(&to, 3 * sizeof(unsigned long));
#endif
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
unsigned long bucket;
struct pt_regs *regs = task_pt_regs(target);
if (!regs)
return -EIO;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->r00, 0, 32*sizeof(unsigned long));
#define INEXT(KPT_REG, USR_REG) \
if (!ret) \
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
offsetof(struct user_regs_struct, USR_REG) + \
sizeof(unsigned long));
/* Must be exactly same sequence as struct user_regs_struct */
INEXT(®s->sa0, sa0);
INEXT(®s->lc0, lc0);
INEXT(®s->sa1, sa1);
INEXT(®s->lc1, lc1);
INEXT(®s->m0, m0);
INEXT(®s->m1, m1);
INEXT(®s->usr, usr);
INEXT(®s->preds, p3_0);
INEXT(®s->gp, gp);
INEXT(®s->ugp, ugp);
INEXT(&pt_elr(regs), pc);
/* CAUSE and BADVA aren't writeable. */
INEXT(&bucket, cause);
INEXT(&bucket, badva);
#if CONFIG_HEXAGON_ARCH_VERSION >=4
INEXT(®s->cs0, cs0);
INEXT(®s->cs1, cs1);
#endif
/* Ignore the rest, if needed */
if (!ret)
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
offsetof(struct user_regs_struct, pad1), -1);
else
return ret;
/*
* This is special; SP is actually restored by the VM via the
* special event record which is set by the special trap.
*/
regs->hvmer.vmpsp = regs->r29;
return 0;
}
enum hexagon_regset {
REGSET_GENERAL,
};
static const struct user_regset hexagon_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
.regset_get = genregs_get,
.set = genregs_set,
},
};
static const struct user_regset_view hexagon_user_view = {
.name = "hexagon",
.e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI,
.regsets = hexagon_regsets,
.e_flags = ELF_CORE_EFLAGS,
.n = ARRAY_SIZE(hexagon_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &hexagon_user_view;
}
void ptrace_disable(struct task_struct *child)
{
/* Boilerplate - resolves to null inline if no HW single-step */
user_disable_single_step(child);
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
return ptrace_request(child, request, addr, data);
}
| linux-master | arch/hexagon/kernel/ptrace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kernel traps/events for Hexagon processor
*
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*/
#include <linux/init.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/kdebug.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/ptrace.h>
#include <asm/traps.h>
#include <asm/vm_fault.h>
#include <asm/syscall.h>
#include <asm/registers.h>
#include <asm/unistd.h>
#include <asm/sections.h>
#ifdef CONFIG_KGDB
# include <linux/kgdb.h>
#endif
#define TRAP_SYSCALL 1
#define TRAP_DEBUG 0xdb
#ifdef CONFIG_GENERIC_BUG
/* Maybe should resemble arch/sh/kernel/traps.c ?? */
int is_valid_bugaddr(unsigned long addr)
{
return 1;
}
#endif /* CONFIG_GENERIC_BUG */
static const char *ex_name(int ex)
{
switch (ex) {
case HVM_GE_C_XPROT:
case HVM_GE_C_XUSER:
return "Execute protection fault";
case HVM_GE_C_RPROT:
case HVM_GE_C_RUSER:
return "Read protection fault";
case HVM_GE_C_WPROT:
case HVM_GE_C_WUSER:
return "Write protection fault";
case HVM_GE_C_XMAL:
return "Misaligned instruction";
case HVM_GE_C_WREG:
return "Multiple writes to same register in packet";
case HVM_GE_C_PCAL:
return "Program counter values that are not properly aligned";
case HVM_GE_C_RMAL:
return "Misaligned data load";
case HVM_GE_C_WMAL:
return "Misaligned data store";
case HVM_GE_C_INVI:
case HVM_GE_C_PRIVI:
return "Illegal instruction";
case HVM_GE_C_BUS:
return "Precise bus error";
case HVM_GE_C_CACHE:
return "Cache error";
case 0xdb:
return "Debugger trap";
default:
return "Unrecognized exception";
}
}
static void do_show_stack(struct task_struct *task, unsigned long *fp,
unsigned long ip, const char *loglvl)
{
int kstack_depth_to_print = 24;
unsigned long offset, size;
const char *name = NULL;
unsigned long *newfp;
unsigned long low, high;
char tmpstr[128];
char *modname;
int i;
if (task == NULL)
task = current;
printk("%sCPU#%d, %s/%d, Call Trace:\n", loglvl, raw_smp_processor_id(),
task->comm, task_pid_nr(task));
if (fp == NULL) {
if (task == current) {
asm("%0 = r30" : "=r" (fp));
} else {
fp = (unsigned long *)
((struct hexagon_switch_stack *)
task->thread.switch_sp)->fp;
}
}
if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) {
printk("%s-- Corrupt frame pointer %p\n", loglvl, fp);
return;
}
/* Saved link reg is one word above FP */
if (!ip)
ip = *(fp+1);
/* Expect kernel stack to be in-bounds */
low = (unsigned long)task_stack_page(task);
high = low + THREAD_SIZE - 8;
low += sizeof(struct thread_info);
for (i = 0; i < kstack_depth_to_print; i++) {
name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr);
printk("%s[%p] 0x%lx: %s + 0x%lx", loglvl, fp, ip, name, offset);
if (((unsigned long) fp < low) || (high < (unsigned long) fp))
printk(KERN_CONT " (FP out of bounds!)");
if (modname)
printk(KERN_CONT " [%s] ", modname);
printk(KERN_CONT "\n");
newfp = (unsigned long *) *fp;
if (((unsigned long) newfp) & 0x3) {
printk("%s-- Corrupt frame pointer %p\n", loglvl, newfp);
break;
}
/* Attempt to continue past exception. */
if (0 == newfp) {
struct pt_regs *regs = (struct pt_regs *) (((void *)fp)
+ 8);
if (regs->syscall_nr != -1) {
printk("%s-- trap0 -- syscall_nr: %ld", loglvl,
regs->syscall_nr);
printk(KERN_CONT " psp: %lx elr: %lx\n",
pt_psp(regs), pt_elr(regs));
break;
} else {
/* really want to see more ... */
kstack_depth_to_print += 6;
printk("%s-- %s (0x%lx) badva: %lx\n", loglvl,
ex_name(pt_cause(regs)), pt_cause(regs),
pt_badva(regs));
}
newfp = (unsigned long *) regs->r30;
ip = pt_elr(regs);
} else {
ip = *(newfp + 1);
}
/* If link reg is null, we are done. */
if (ip == 0x0)
break;
/* If newfp isn't larger, we're tracing garbage. */
if (newfp > fp)
fp = newfp;
else
break;
}
}
void show_stack(struct task_struct *task, unsigned long *fp, const char *loglvl)
{
/* Saved link reg is one word above FP */
do_show_stack(task, fp, 0, loglvl);
}
int die(const char *str, struct pt_regs *regs, long err)
{
static struct {
spinlock_t lock;
int counter;
} die = {
.lock = __SPIN_LOCK_UNLOCKED(die.lock),
.counter = 0
};
console_verbose();
oops_enter();
spin_lock_irq(&die.lock);
bust_spinlocks(1);
printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
NOTIFY_STOP)
return 1;
print_modules();
show_regs(regs);
do_show_stack(current, ®s->r30, pt_elr(regs), KERN_EMERG);
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die.lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
oops_exit();
make_task_dead(err);
return 0;
}
int die_if_kernel(char *str, struct pt_regs *regs, long err)
{
if (!user_mode(regs))
return die(str, regs, err);
else
return 0;
}
/*
* It's not clear that misaligned fetches are ever recoverable.
*/
static void misaligned_instruction(struct pt_regs *regs)
{
die_if_kernel("Misaligned Instruction", regs, 0);
force_sig(SIGBUS);
}
/*
* Misaligned loads and stores, on the other hand, can be
* emulated, and probably should be, some day. But for now
* they will be considered fatal.
*/
static void misaligned_data_load(struct pt_regs *regs)
{
die_if_kernel("Misaligned Data Load", regs, 0);
force_sig(SIGBUS);
}
static void misaligned_data_store(struct pt_regs *regs)
{
die_if_kernel("Misaligned Data Store", regs, 0);
force_sig(SIGBUS);
}
static void illegal_instruction(struct pt_regs *regs)
{
die_if_kernel("Illegal Instruction", regs, 0);
force_sig(SIGILL);
}
/*
* Precise bus errors may be recoverable with a a retry,
* but for now, treat them as irrecoverable.
*/
static void precise_bus_error(struct pt_regs *regs)
{
die_if_kernel("Precise Bus Error", regs, 0);
force_sig(SIGBUS);
}
/*
* If anything is to be done here other than panic,
* it will probably be complex and migrate to another
* source module. For now, just die.
*/
static void cache_error(struct pt_regs *regs)
{
die("Cache Error", regs, 0);
}
/*
* General exception handler
*/
void do_genex(struct pt_regs *regs)
{
/*
* Decode Cause and Dispatch
*/
switch (pt_cause(regs)) {
case HVM_GE_C_XPROT:
case HVM_GE_C_XUSER:
execute_protection_fault(regs);
break;
case HVM_GE_C_RPROT:
case HVM_GE_C_RUSER:
read_protection_fault(regs);
break;
case HVM_GE_C_WPROT:
case HVM_GE_C_WUSER:
write_protection_fault(regs);
break;
case HVM_GE_C_XMAL:
misaligned_instruction(regs);
break;
case HVM_GE_C_WREG:
illegal_instruction(regs);
break;
case HVM_GE_C_PCAL:
misaligned_instruction(regs);
break;
case HVM_GE_C_RMAL:
misaligned_data_load(regs);
break;
case HVM_GE_C_WMAL:
misaligned_data_store(regs);
break;
case HVM_GE_C_INVI:
case HVM_GE_C_PRIVI:
illegal_instruction(regs);
break;
case HVM_GE_C_BUS:
precise_bus_error(regs);
break;
case HVM_GE_C_CACHE:
cache_error(regs);
break;
default:
/* Halt and catch fire */
panic("Unrecognized exception 0x%lx\n", pt_cause(regs));
break;
}
}
/* Indirect system call dispatch */
long sys_syscall(void)
{
printk(KERN_ERR "sys_syscall invoked!\n");
return -ENOSYS;
}
void do_trap0(struct pt_regs *regs)
{
syscall_fn syscall;
switch (pt_cause(regs)) {
case TRAP_SYSCALL:
/* System call is trap0 #1 */
/* allow strace to catch syscall args */
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE) &&
ptrace_report_syscall_entry(regs)))
return; /* return -ENOSYS somewhere? */
/* Interrupts should be re-enabled for syscall processing */
__vmsetie(VM_INT_ENABLE);
/*
* System call number is in r6, arguments in r0..r5.
* Fortunately, no Linux syscall has more than 6 arguments,
* and Hexagon ABI passes first 6 arguments in registers.
* 64-bit arguments are passed in odd/even register pairs.
* Fortunately, we have no system calls that take more
* than three arguments with more than one 64-bit value.
* Should that change, we'd need to redesign to copy
* between user and kernel stacks.
*/
regs->syscall_nr = regs->r06;
/*
* GPR R0 carries the first parameter, and is also used
* to report the return value. We need a backup of
* the user's value in case we need to do a late restart
* of the system call.
*/
regs->restart_r0 = regs->r00;
if ((unsigned long) regs->syscall_nr >= __NR_syscalls) {
regs->r00 = -1;
} else {
syscall = (syscall_fn)
(sys_call_table[regs->syscall_nr]);
regs->r00 = syscall(regs->r00, regs->r01,
regs->r02, regs->r03,
regs->r04, regs->r05);
}
/* allow strace to get the syscall return state */
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE)))
ptrace_report_syscall_exit(regs, 0);
break;
case TRAP_DEBUG:
/* Trap0 0xdb is debug breakpoint */
if (user_mode(regs)) {
/*
* Some architecures add some per-thread state
* to distinguish between breakpoint traps and
* trace traps. We may want to do that, and
* set the si_code value appropriately, or we
* may want to use a different trap0 flavor.
*/
force_sig_fault(SIGTRAP, TRAP_BRKPT,
(void __user *) pt_elr(regs));
} else {
#ifdef CONFIG_KGDB
kgdb_handle_exception(pt_cause(regs), SIGTRAP,
TRAP_BRKPT, regs);
#endif
}
break;
}
/* Ignore other trap0 codes for now, especially 0 (Angel calls) */
}
/*
* Machine check exception handler
*/
void do_machcheck(struct pt_regs *regs)
{
/* Halt and catch fire */
__vmstop();
}
/*
* Treat this like the old 0xdb trap.
*/
void do_debug_exception(struct pt_regs *regs)
{
regs->hvmer.vmest &= ~HVM_VMEST_CAUSE_MSK;
regs->hvmer.vmest |= (TRAP_DEBUG << HVM_VMEST_CAUSE_SFT);
do_trap0(regs);
}
| linux-master | arch/hexagon/kernel/traps.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 1996 David S. Miller
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Kevin Kissell, [email protected] and Carsten Langgaard, [email protected]
* Copyright (C) 2000 MIPS Technologies, Inc.
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*/
#include <linux/compat.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kbuild.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
/* This file is used to produce asm/linkerscript constants from header
files typically used in c. Specifically, it generates asm-offsets.h */
int main(void)
{
COMMENT("This is a comment.");
/* might get these from somewhere else. */
DEFINE(_PAGE_SIZE, PAGE_SIZE);
DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
BLANK();
COMMENT("Hexagon pt_regs definitions");
OFFSET(_PT_SYSCALL_NR, pt_regs, syscall_nr);
OFFSET(_PT_GPUGP, pt_regs, gpugp);
OFFSET(_PT_CS1CS0, pt_regs, cs1cs0);
OFFSET(_PT_R3130, pt_regs, r3130);
OFFSET(_PT_R2928, pt_regs, r2928);
OFFSET(_PT_R2726, pt_regs, r2726);
OFFSET(_PT_R2524, pt_regs, r2524);
OFFSET(_PT_R2322, pt_regs, r2322);
OFFSET(_PT_R2120, pt_regs, r2120);
OFFSET(_PT_R1918, pt_regs, r1918);
OFFSET(_PT_R1716, pt_regs, r1716);
OFFSET(_PT_R1514, pt_regs, r1514);
OFFSET(_PT_R1312, pt_regs, r1312);
OFFSET(_PT_R1110, pt_regs, r1110);
OFFSET(_PT_R0908, pt_regs, r0908);
OFFSET(_PT_R0706, pt_regs, r0706);
OFFSET(_PT_R0504, pt_regs, r0504);
OFFSET(_PT_R0302, pt_regs, r0302);
OFFSET(_PT_R0100, pt_regs, r0100);
OFFSET(_PT_LC0SA0, pt_regs, lc0sa0);
OFFSET(_PT_LC1SA1, pt_regs, lc1sa1);
OFFSET(_PT_M1M0, pt_regs, m1m0);
OFFSET(_PT_PREDSUSR, pt_regs, predsusr);
OFFSET(_PT_EVREC, pt_regs, hvmer);
OFFSET(_PT_ER_VMEL, pt_regs, hvmer.vmel);
OFFSET(_PT_ER_VMEST, pt_regs, hvmer.vmest);
OFFSET(_PT_ER_VMPSP, pt_regs, hvmer.vmpsp);
OFFSET(_PT_ER_VMBADVA, pt_regs, hvmer.vmbadva);
DEFINE(_PT_REGS_SIZE, sizeof(struct pt_regs));
BLANK();
COMMENT("Hexagon thread_info definitions");
OFFSET(_THREAD_INFO_FLAGS, thread_info, flags);
OFFSET(_THREAD_INFO_PT_REGS, thread_info, regs);
OFFSET(_THREAD_INFO_SP, thread_info, sp);
DEFINE(_THREAD_SIZE, THREAD_SIZE);
BLANK();
COMMENT("Hexagon hexagon_switch_stack definitions");
OFFSET(_SWITCH_R1716, hexagon_switch_stack, r1716);
OFFSET(_SWITCH_R1918, hexagon_switch_stack, r1918);
OFFSET(_SWITCH_R2120, hexagon_switch_stack, r2120);
OFFSET(_SWITCH_R2322, hexagon_switch_stack, r2322);
OFFSET(_SWITCH_R2524, hexagon_switch_stack, r2524);
OFFSET(_SWITCH_R2726, hexagon_switch_stack, r2726);
OFFSET(_SWITCH_FP, hexagon_switch_stack, fp);
OFFSET(_SWITCH_LR, hexagon_switch_stack, lr);
DEFINE(_SWITCH_STACK_SIZE, sizeof(struct hexagon_switch_stack));
BLANK();
COMMENT("Hexagon task_struct definitions");
OFFSET(_TASK_THREAD_INFO, task_struct, stack);
OFFSET(_TASK_STRUCT_THREAD, task_struct, thread);
COMMENT("Hexagon thread_struct definitions");
OFFSET(_THREAD_STRUCT_SWITCH_SP, thread_struct, switch_sp);
return 0;
}
| linux-master | arch/hexagon/kernel/asm-offsets.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Kernel module loader for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
#include <asm/module.h>
#include <linux/elf.h>
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt , ...)
#endif
/*
* module_frob_arch_sections - tweak got/plt sections.
* @hdr - pointer to elf header
* @sechdrs - pointer to elf load section headers
* @secstrings - symbol names
* @mod - pointer to module
*/
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings,
struct module *mod)
{
unsigned int i;
int found = 0;
/* Look for .plt and/or .got.plt and/or .init.plt sections */
for (i = 0; i < hdr->e_shnum; i++) {
DEBUGP("Section %d is %s\n", i,
secstrings + sechdrs[i].sh_name);
if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
found = i+1;
if (strcmp(secstrings + sechdrs[i].sh_name, ".got.plt") == 0)
found = i+1;
if (strcmp(secstrings + sechdrs[i].sh_name, ".rela.plt") == 0)
found = i+1;
}
/* At this time, we don't support modules comiled with -shared */
if (found) {
printk(KERN_WARNING
"Module '%s' contains unexpected .plt/.got sections.\n",
mod->name);
/* return -ENOEXEC; */
}
return 0;
}
/*
* apply_relocate_add - perform rela relocations.
* @sechdrs - pointer to section headers
* @strtab - some sort of start address?
* @symindex - symbol index offset or something?
* @relsec - address to relocate to?
* @module - pointer to module
*
* Perform rela relocations.
*/
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *module)
{
unsigned int i;
Elf32_Sym *sym;
uint32_t *location;
uint32_t value;
unsigned int nrelocs = sechdrs[relsec].sh_size / sizeof(Elf32_Rela);
Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf32_Word sym_info = sechdrs[relsec].sh_info;
Elf32_Sym *sym_base = (Elf32_Sym *) sechdrs[symindex].sh_addr;
void *loc_base = (void *) sechdrs[sym_info].sh_addr;
DEBUGP("Applying relocations in section %u to section %u base=%p\n",
relsec, sym_info, loc_base);
for (i = 0; i < nrelocs; i++) {
/* Symbol to relocate */
sym = sym_base + ELF32_R_SYM(rela[i].r_info);
/* Where to make the change */
location = loc_base + rela[i].r_offset;
/* `Everything is relative'. */
value = sym->st_value + rela[i].r_addend;
DEBUGP("%d: value=%08x loc=%p reloc=%d symbol=%s\n",
i, value, location, ELF32_R_TYPE(rela[i].r_info),
sym->st_name ?
&strtab[sym->st_name] : "(anonymous)");
switch (ELF32_R_TYPE(rela[i].r_info)) {
case R_HEXAGON_B22_PCREL: {
int dist = (int)(value - (uint32_t)location);
if ((dist < -0x00800000) ||
(dist >= 0x00800000)) {
printk(KERN_ERR
"%s: %s: %08x=%08x-%08x %s\n",
module->name,
"R_HEXAGON_B22_PCREL reloc out of range",
dist, value, (uint32_t)location,
sym->st_name ?
&strtab[sym->st_name] : "(anonymous)");
return -ENOEXEC;
}
DEBUGP("B22_PCREL contents: %08X.\n", *location);
*location &= ~0x01ff3fff;
*location |= 0x00003fff & dist;
*location |= 0x01ff0000 & (dist<<2);
DEBUGP("Contents after reloc: %08x\n", *location);
break;
}
case R_HEXAGON_HI16:
value = (value>>16) & 0xffff;
fallthrough;
case R_HEXAGON_LO16:
*location &= ~0x00c03fff;
*location |= value & 0x3fff;
*location |= (value & 0xc000) << 8;
break;
case R_HEXAGON_32:
*location = value;
break;
case R_HEXAGON_32_PCREL:
*location = value - (uint32_t)location;
break;
case R_HEXAGON_PLT_B22_PCREL:
case R_HEXAGON_GOTOFF_LO16:
case R_HEXAGON_GOTOFF_HI16:
printk(KERN_ERR "%s: GOT/PLT relocations unsupported\n",
module->name);
return -ENOEXEC;
default:
printk(KERN_ERR "%s: unknown relocation: %u\n",
module->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
| linux-master | arch/hexagon/kernel/module.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* vDSO implementation for Hexagon
*
* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*/
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/binfmts.h>
#include <asm/vdso.h>
static struct page *vdso_page;
/* Create a vDSO page holding the signal trampoline.
* We want this for a non-executable stack.
*/
static int __init vdso_init(void)
{
struct hexagon_vdso *vdso;
vdso_page = alloc_page(GFP_KERNEL);
if (!vdso_page)
panic("Cannot allocate vdso");
vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
if (!vdso)
panic("Cannot map vdso");
clear_page(vdso);
/* Install the signal trampoline; currently looks like this:
* r6 = #__NR_rt_sigreturn;
* trap0(#1);
*/
vdso->rt_signal_trampoline[0] = __rt_sigtramp_template[0];
vdso->rt_signal_trampoline[1] = __rt_sigtramp_template[1];
vunmap(vdso);
return 0;
}
arch_initcall(vdso_init);
/*
* Called from binfmt_elf. Create a VMA for the vDSO page.
*/
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
int ret;
unsigned long vdso_base;
struct mm_struct *mm = current->mm;
if (mmap_write_lock_killable(mm))
return -EINTR;
/* Try to get it loaded right near ld.so/glibc. */
vdso_base = STACK_TOP;
vdso_base = get_unmapped_area(NULL, vdso_base, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
ret = vdso_base;
goto up_fail;
}
/* MAYWRITE to allow gdb to COW and set breakpoints. */
ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&vdso_page);
if (ret)
goto up_fail;
mm->context.vdso = (void *)vdso_base;
up_fail:
mmap_write_unlock(mm);
return ret;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
return "[vdso]";
return NULL;
}
| linux-master | arch/hexagon/kernel/vdso.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
#include <linux/smp.h>
#include <asm/hexagon_vm.h>
void machine_power_off(void)
{
smp_send_stop();
__vmstop();
}
void machine_halt(void)
{
}
void machine_restart(char *cmd)
{
}
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
| linux-master | arch/hexagon/kernel/reset.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Arch related setup for Hexagon
*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/console.h>
#include <linux/of_fdt.h>
#include <asm/io.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/processor.h>
#include <asm/hexagon_vm.h>
#include <asm/vm_mmu.h>
#include <asm/time.h>
char cmd_line[COMMAND_LINE_SIZE];
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
int on_simulator;
void calibrate_delay(void)
{
loops_per_jiffy = thread_freq_mhz * 1000000 / HZ;
}
/*
* setup_arch - high level architectural setup routine
* @cmdline_p: pointer to pointer to command-line arguments
*/
void __init setup_arch(char **cmdline_p)
{
char *p = &external_cmdline_buffer;
/*
* These will eventually be pulled in via either some hypervisor
* or devicetree description. Hardwiring for now.
*/
pcycle_freq_mhz = 600;
thread_freq_mhz = 100;
sleep_clk_freq = 32000;
/*
* Set up event bindings to handle exceptions and interrupts.
*/
__vmsetvec(_K_VM_event_vector);
printk(KERN_INFO "PHYS_OFFSET=0x%08lx\n", PHYS_OFFSET);
/*
* Simulator has a few differences from the hardware.
* For now, check uninitialized-but-mapped memory
* prior to invoking setup_arch_memory().
*/
if (*(int *)((unsigned long)_end + 8) == 0x1f1f1f1f)
on_simulator = 1;
else
on_simulator = 0;
if (p[0] != '\0')
strscpy(boot_command_line, p, COMMAND_LINE_SIZE);
else
strscpy(boot_command_line, default_command_line,
COMMAND_LINE_SIZE);
/*
* boot_command_line and the value set up by setup_arch
* are both picked up by the init code. If no reason to
* make them different, pass the same pointer back.
*/
strscpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
parse_early_param();
setup_arch_memory();
#ifdef CONFIG_SMP
smp_start_cpus();
#endif
}
/*
* Functions for dumping CPU info via /proc
* Probably should move to kernel/proc.c or something.
*/
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
/*
* Eventually this will dump information about
* CPU properties like ISA level, TLB size, etc.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
int cpu = (unsigned long) v - 1;
#ifdef CONFIG_SMP
if (!cpu_online(cpu))
return 0;
#endif
seq_printf(m, "processor\t: %d\n", cpu);
seq_printf(m, "model name\t: Hexagon Virtual Machine\n");
seq_printf(m, "BogoMips\t: %lu.%02lu\n",
(loops_per_jiffy * HZ) / 500000,
((loops_per_jiffy * HZ) / 5000) % 100);
seq_printf(m, "\n");
return 0;
}
const struct seq_operations cpuinfo_op = {
.start = &c_start,
.next = &c_next,
.stop = &c_stop,
.show = &show_cpuinfo,
};
| linux-master | arch/hexagon/kernel/setup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* DMA implementation for Hexagon
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*/
#include <linux/dma-map-ops.h>
#include <linux/memblock.h>
#include <asm/page.h>
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
void *addr = phys_to_virt(paddr);
switch (dir) {
case DMA_TO_DEVICE:
hexagon_clean_dcache_range((unsigned long) addr,
(unsigned long) addr + size);
break;
case DMA_FROM_DEVICE:
hexagon_inv_dcache_range((unsigned long) addr,
(unsigned long) addr + size);
break;
case DMA_BIDIRECTIONAL:
flush_dcache_range((unsigned long) addr,
(unsigned long) addr + size);
break;
default:
BUG();
}
}
/*
* Our max_low_pfn should have been backed off by 16MB in mm/init.c to create
* DMA coherent space. Use that for the pool.
*/
static int __init hexagon_dma_init(void)
{
return dma_init_global_coherent(PFN_PHYS(max_low_pfn),
hexagon_coherent_pool_size);
}
core_initcall(hexagon_dma_init);
| linux-master | arch/hexagon/kernel/dma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Time related functions for Hexagon architecture
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
#include <linux/init.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/module.h>
#include <asm/hexagon_vm.h>
#define TIMER_ENABLE BIT(0)
/*
* For the clocksource we need:
* pcycle frequency (600MHz)
* For the loops_per_jiffy we need:
* thread/cpu frequency (100MHz)
* And for the timer, we need:
* sleep clock rate
*/
cycles_t pcycle_freq_mhz;
cycles_t thread_freq_mhz;
cycles_t sleep_clk_freq;
/*
* 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
* release 1.1, and then it's "adjustable" and probably not defaulted.
*/
#define RTOS_TIMER_INT 3
#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
static struct resource rtos_timer_resources[] = {
{
.start = RTOS_TIMER_REGS_ADDR,
.end = RTOS_TIMER_REGS_ADDR+PAGE_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device rtos_timer_device = {
.name = "rtos_timer",
.id = -1,
.num_resources = ARRAY_SIZE(rtos_timer_resources),
.resource = rtos_timer_resources,
};
/* A lot of this stuff should move into a platform specific section. */
struct adsp_hw_timer_struct {
u32 match; /* Match value */
u32 count;
u32 enable; /* [1] - CLR_ON_MATCH_EN, [0] - EN */
u32 clear; /* one-shot register that clears the count */
};
/* Look for "TCX0" for related constants. */
static __iomem struct adsp_hw_timer_struct *rtos_timer;
static u64 timer_get_cycles(struct clocksource *cs)
{
return (u64) __vmgettime();
}
static struct clocksource hexagon_clocksource = {
.name = "pcycles",
.rating = 250,
.read = timer_get_cycles,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int set_next_event(unsigned long delta, struct clock_event_device *evt)
{
/* Assuming the timer will be disabled when we enter here. */
iowrite32(1, &rtos_timer->clear);
iowrite32(0, &rtos_timer->clear);
iowrite32(delta, &rtos_timer->match);
iowrite32(TIMER_ENABLE, &rtos_timer->enable);
return 0;
}
#ifdef CONFIG_SMP
/* Broadcast mechanism */
static void broadcast(const struct cpumask *mask)
{
send_ipi(mask, IPI_TIMER);
}
#endif
/* XXX Implement set_state_shutdown() */
static struct clock_event_device hexagon_clockevent_dev = {
.name = "clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 400,
.irq = RTOS_TIMER_INT,
.set_next_event = set_next_event,
#ifdef CONFIG_SMP
.broadcast = broadcast,
#endif
};
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct clock_event_device, clock_events);
void setup_percpu_clockdev(void)
{
int cpu = smp_processor_id();
struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
struct clock_event_device *dummy_clock_dev =
&per_cpu(clock_events, cpu);
memcpy(dummy_clock_dev, ce_dev, sizeof(*dummy_clock_dev));
INIT_LIST_HEAD(&dummy_clock_dev->list);
dummy_clock_dev->features = CLOCK_EVT_FEAT_DUMMY;
dummy_clock_dev->cpumask = cpumask_of(cpu);
clockevents_register_device(dummy_clock_dev);
}
/* Called from smp.c for each CPU's timer ipi call */
void ipi_timer(void)
{
int cpu = smp_processor_id();
struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu);
ce_dev->event_handler(ce_dev);
}
#endif /* CONFIG_SMP */
static irqreturn_t timer_interrupt(int irq, void *devid)
{
struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
iowrite32(0, &rtos_timer->enable);
ce_dev->event_handler(ce_dev);
return IRQ_HANDLED;
}
/*
* time_init_deferred - called by start_kernel to set up timer/clock source
*
* Install the IRQ handler for the clock, setup timers.
* This is done late, as that way, we can use ioremap().
*
* This runs just before the delay loop is calibrated, and
* is used for delay calibration.
*/
void __init time_init_deferred(void)
{
struct resource *resource = NULL;
struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
unsigned long flag = IRQF_TIMER | IRQF_TRIGGER_RISING;
ce_dev->cpumask = cpu_all_mask;
if (!resource)
resource = rtos_timer_device.resource;
/* ioremap here means this has to run later, after paging init */
rtos_timer = ioremap(resource->start, resource_size(resource));
if (!rtos_timer) {
release_mem_region(resource->start, resource_size(resource));
}
clocksource_register_khz(&hexagon_clocksource, pcycle_freq_mhz * 1000);
/* Note: the sim generic RTOS clock is apparently really 18750Hz */
/*
* Last arg is some guaranteed seconds for which the conversion will
* work without overflow.
*/
clockevents_calc_mult_shift(ce_dev, sleep_clk_freq, 4);
ce_dev->max_delta_ns = clockevent_delta2ns(0x7fffffff, ce_dev);
ce_dev->max_delta_ticks = 0x7fffffff;
ce_dev->min_delta_ns = clockevent_delta2ns(0xf, ce_dev);
ce_dev->min_delta_ticks = 0xf;
#ifdef CONFIG_SMP
setup_percpu_clockdev();
#endif
clockevents_register_device(ce_dev);
if (request_irq(ce_dev->irq, timer_interrupt, flag, "rtos_timer", NULL))
pr_err("Failed to register rtos_timer interrupt\n");
}
void __init time_init(void)
{
late_time_init = time_init_deferred;
}
void __delay(unsigned long cycles)
{
unsigned long long start = __vmgettime();
while ((__vmgettime() - start) < cycles)
cpu_relax();
}
EXPORT_SYMBOL(__delay);
/*
* This could become parametric or perhaps even computed at run-time,
* but for now we take the observed simulator jitter.
*/
static long long fudgefactor = 350; /* Maybe lower if kernel optimized. */
void __udelay(unsigned long usecs)
{
unsigned long long start = __vmgettime();
unsigned long long finish = (pcycle_freq_mhz * usecs) - fudgefactor;
while ((__vmgettime() - start) < finish)
cpu_relax(); /* not sure how this improves readability */
}
EXPORT_SYMBOL(__udelay);
| linux-master | arch/hexagon/kernel/time.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* First-level interrupt controller model for Hexagon.
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <asm/hexagon_vm.h>
static void mask_irq(struct irq_data *data)
{
__vmintop_locdis((long) data->irq);
}
static void mask_irq_num(unsigned int irq)
{
__vmintop_locdis((long) irq);
}
static void unmask_irq(struct irq_data *data)
{
__vmintop_locen((long) data->irq);
}
/* This is actually all we need for handle_fasteoi_irq */
static void eoi_irq(struct irq_data *data)
{
__vmintop_globen((long) data->irq);
}
/* Power mamangement wake call. We don't need this, however,
* if this is absent, then an -ENXIO error is returned to the
* msm_serial driver, and it fails to correctly initialize.
* This is a bug in the msm_serial driver, but, for now, we
* work around it here, by providing this bogus handler.
* XXX FIXME!!! remove this when msm_serial is fixed.
*/
static int set_wake(struct irq_data *data, unsigned int on)
{
return 0;
}
static struct irq_chip hexagon_irq_chip = {
.name = "HEXAGON",
.irq_mask = mask_irq,
.irq_unmask = unmask_irq,
.irq_set_wake = set_wake,
.irq_eoi = eoi_irq
};
/**
* The hexagon core comes with a first-level interrupt controller
* with 32 total possible interrupts. When the core is embedded
* into different systems/platforms, it is typically wrapped by
* macro cells that provide one or more second-level interrupt
* controllers that are cascaded into one or more of the first-level
* interrupts handled here. The precise wiring of these other
* irqs varies from platform to platform, and are set up & configured
* in the platform-specific files.
*
* The first-level interrupt controller is wrapped by the VM, which
* virtualizes the interrupt controller for us. It provides a very
* simple, fast & efficient API, and so the fasteoi handler is
* appropriate for this case.
*/
void __init init_IRQ(void)
{
int irq;
for (irq = 0; irq < HEXAGON_CPUINTS; irq++) {
mask_irq_num(irq);
irq_set_chip_and_handler(irq, &hexagon_irq_chip,
handle_fasteoi_irq);
}
}
| linux-master | arch/hexagon/kernel/irq_cpu.c |
#include <linux/screen_info.h>
struct screen_info screen_info;
| linux-master | arch/hexagon/kernel/screen_info.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Stacktrace support for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/module.h>
struct stackframe {
unsigned long fp;
unsigned long rets;
};
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
unsigned long low, high;
unsigned long fp;
struct stackframe *frame;
int skip = trace->skip;
low = (unsigned long)task_stack_page(current);
high = low + THREAD_SIZE;
fp = (unsigned long)__builtin_frame_address(0);
while (fp >= low && fp <= (high - sizeof(*frame))) {
frame = (struct stackframe *)fp;
if (skip) {
skip--;
} else {
trace->entries[trace->nr_entries++] = frame->rets;
if (trace->nr_entries >= trace->max_entries)
break;
}
/*
* The next frame must be at a higher address than the
* current frame.
*/
low = fp + sizeof(*frame);
fp = frame->fp;
}
}
EXPORT_SYMBOL_GPL(save_stack_trace);
| linux-master | arch/hexagon/kernel/stacktrace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Signal support for Hexagon processor
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*/
#include <linux/linkage.h>
#include <linux/syscalls.h>
#include <linux/sched/task_stack.h>
#include <asm/registers.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <linux/uaccess.h>
#include <asm/ucontext.h>
#include <asm/cacheflush.h>
#include <asm/signal.h>
#include <asm/vdso.h>
struct rt_sigframe {
unsigned long tramp[2];
struct siginfo info;
struct ucontext uc;
};
static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
size_t frame_size)
{
unsigned long sp = sigsp(regs->r29, ksig);
return (void __user *)((sp - frame_size) & ~(sizeof(long long) - 1));
}
static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
unsigned long tmp;
int err = 0;
err |= copy_to_user(&sc->sc_regs.r0, ®s->r00,
32*sizeof(unsigned long));
err |= __put_user(regs->sa0, &sc->sc_regs.sa0);
err |= __put_user(regs->lc0, &sc->sc_regs.lc0);
err |= __put_user(regs->sa1, &sc->sc_regs.sa1);
err |= __put_user(regs->lc1, &sc->sc_regs.lc1);
err |= __put_user(regs->m0, &sc->sc_regs.m0);
err |= __put_user(regs->m1, &sc->sc_regs.m1);
err |= __put_user(regs->usr, &sc->sc_regs.usr);
err |= __put_user(regs->preds, &sc->sc_regs.p3_0);
err |= __put_user(regs->gp, &sc->sc_regs.gp);
err |= __put_user(regs->ugp, &sc->sc_regs.ugp);
#if CONFIG_HEXAGON_ARCH_VERSION >= 4
err |= __put_user(regs->cs0, &sc->sc_regs.cs0);
err |= __put_user(regs->cs1, &sc->sc_regs.cs1);
#endif
tmp = pt_elr(regs); err |= __put_user(tmp, &sc->sc_regs.pc);
tmp = pt_cause(regs); err |= __put_user(tmp, &sc->sc_regs.cause);
tmp = pt_badva(regs); err |= __put_user(tmp, &sc->sc_regs.badva);
return err;
}
static int restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
{
unsigned long tmp;
int err = 0;
err |= copy_from_user(®s->r00, &sc->sc_regs.r0,
32 * sizeof(unsigned long));
err |= __get_user(regs->sa0, &sc->sc_regs.sa0);
err |= __get_user(regs->lc0, &sc->sc_regs.lc0);
err |= __get_user(regs->sa1, &sc->sc_regs.sa1);
err |= __get_user(regs->lc1, &sc->sc_regs.lc1);
err |= __get_user(regs->m0, &sc->sc_regs.m0);
err |= __get_user(regs->m1, &sc->sc_regs.m1);
err |= __get_user(regs->usr, &sc->sc_regs.usr);
err |= __get_user(regs->preds, &sc->sc_regs.p3_0);
err |= __get_user(regs->gp, &sc->sc_regs.gp);
err |= __get_user(regs->ugp, &sc->sc_regs.ugp);
#if CONFIG_HEXAGON_ARCH_VERSION >= 4
err |= __get_user(regs->cs0, &sc->sc_regs.cs0);
err |= __get_user(regs->cs1, &sc->sc_regs.cs1);
#endif
err |= __get_user(tmp, &sc->sc_regs.pc); pt_set_elr(regs, tmp);
return err;
}
/*
* Setup signal stack frame with siginfo structure
*/
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
int err = 0;
struct rt_sigframe __user *frame;
struct hexagon_vdso *vdso = current->mm->context.vdso;
frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe));
if (!access_ok(frame, sizeof(struct rt_sigframe)))
return -EFAULT;
if (copy_siginfo_to_user(&frame->info, &ksig->info))
return -EFAULT;
/* The on-stack signal trampoline is no longer executed;
* however, the libgcc signal frame unwinding code checks for
* the presence of these two numeric magic values.
*/
err |= __put_user(0x7800d166, &frame->tramp[0]);
err |= __put_user(0x5400c004, &frame->tramp[1]);
err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
err |= __save_altstack(&frame->uc.uc_stack, user_stack_pointer(regs));
if (err)
return -EFAULT;
/* Load r0/r1 pair with signumber/siginfo pointer... */
regs->r0100 = ((unsigned long long)((unsigned long)&frame->info) << 32)
| (unsigned long long)ksig->sig;
regs->r02 = (unsigned long) &frame->uc;
regs->r31 = (unsigned long) vdso->rt_signal_trampoline;
pt_psp(regs) = (unsigned long) frame;
pt_set_elr(regs, (unsigned long)ksig->ka.sa.sa_handler);
return 0;
}
/*
* Setup invocation of signal handler
*/
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
int ret;
/*
* If we're handling a signal that aborted a system call,
* set up the error return value before adding the signal
* frame to the stack.
*/
if (regs->syscall_nr >= 0) {
switch (regs->r00) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
regs->r00 = -EINTR;
break;
case -ERESTARTSYS:
if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
regs->r00 = -EINTR;
break;
}
fallthrough;
case -ERESTARTNOINTR:
regs->r06 = regs->syscall_nr;
pt_set_elr(regs, pt_elr(regs) - 4);
regs->r00 = regs->restart_r0;
break;
default:
break;
}
}
/*
* Set up the stack frame; not doing the SA_SIGINFO thing. We
* only set up the rt_frame flavor.
*/
/* If there was an error on setup, no signal was delivered. */
ret = setup_rt_frame(ksig, sigmask_to_save(), regs);
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
}
/*
* Called from return-from-event code.
*/
void do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
if (!user_mode(regs))
return;
if (get_signal(&ksig)) {
handle_signal(&ksig, regs);
return;
}
/*
* No (more) signals; if we came from a system call, handle the restart.
*/
if (regs->syscall_nr >= 0) {
switch (regs->r00) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->r06 = regs->syscall_nr;
break;
case -ERESTART_RESTARTBLOCK:
regs->r06 = __NR_restart_syscall;
break;
default:
goto no_restart;
}
pt_set_elr(regs, pt_elr(regs) - 4);
regs->r00 = regs->restart_r0;
}
no_restart:
/* If there's no signal to deliver, put the saved sigmask back */
restore_saved_sigmask();
}
/*
* Architecture-specific wrappers for signal-related system calls
*/
asmlinkage int sys_rt_sigreturn(void)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
sigset_t blocked;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
frame = (struct rt_sigframe __user *)pt_psp(regs);
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked)))
goto badframe;
set_current_blocked(&blocked);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
/* Restore the user's stack as well */
pt_psp(regs) = regs->r29;
regs->syscall_nr = -1;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return regs->r00;
badframe:
force_sig(SIGSEGV);
return 0;
}
| linux-master | arch/hexagon/kernel/signal.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/hexagon/kernel/kgdb.c - Hexagon KGDB Support
*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*/
#include <linux/irq.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
/* All registers are 4 bytes, for now */
#define GDB_SIZEOF_REG 4
/* The register names are used during printing of the regs;
* Keep these at three letters to pretty-print. */
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
{ " r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, r00)},
{ " r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, r01)},
{ " r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, r02)},
{ " r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, r03)},
{ " r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, r04)},
{ " r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, r05)},
{ " r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, r06)},
{ " r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, r07)},
{ " r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, r08)},
{ " r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, r09)},
{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, r10)},
{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, r11)},
{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, r12)},
{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, r13)},
{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, r14)},
{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, r15)},
{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, r16)},
{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, r17)},
{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, r18)},
{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, r19)},
{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, r20)},
{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, r21)},
{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, r22)},
{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, r23)},
{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, r24)},
{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, r25)},
{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, r26)},
{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, r27)},
{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, r28)},
{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, r29)},
{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, r30)},
{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, r31)},
{ "usr", GDB_SIZEOF_REG, offsetof(struct pt_regs, usr)},
{ "preds", GDB_SIZEOF_REG, offsetof(struct pt_regs, preds)},
{ " m0", GDB_SIZEOF_REG, offsetof(struct pt_regs, m0)},
{ " m1", GDB_SIZEOF_REG, offsetof(struct pt_regs, m1)},
{ "sa0", GDB_SIZEOF_REG, offsetof(struct pt_regs, sa0)},
{ "sa1", GDB_SIZEOF_REG, offsetof(struct pt_regs, sa1)},
{ "lc0", GDB_SIZEOF_REG, offsetof(struct pt_regs, lc0)},
{ "lc1", GDB_SIZEOF_REG, offsetof(struct pt_regs, lc1)},
{ " gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
{ "ugp", GDB_SIZEOF_REG, offsetof(struct pt_regs, ugp)},
{ "cs0", GDB_SIZEOF_REG, offsetof(struct pt_regs, cs0)},
{ "cs1", GDB_SIZEOF_REG, offsetof(struct pt_regs, cs1)},
{ "psp", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmpsp)},
{ "elr", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmel)},
{ "est", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmest)},
{ "badva", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmbadva)},
{ "restart_r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, restart_r0)},
{ "syscall_nr", GDB_SIZEOF_REG, offsetof(struct pt_regs, syscall_nr)},
};
const struct kgdb_arch arch_kgdb_ops = {
/* trap0(#0xDB) 0x0cdb0054 */
.gdb_bpt_instr = {0x54, 0x00, 0xdb, 0x0c},
};
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
*((unsigned long *) mem) = *((unsigned long *) ((void *)regs +
dbg_reg_def[regno].offset));
return dbg_reg_def[regno].name;
}
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return -EINVAL;
*((unsigned long *) ((void *)regs + dbg_reg_def[regno].offset)) =
*((unsigned long *) mem);
return 0;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
instruction_pointer(regs) = pc;
}
/* Not yet working */
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
struct task_struct *task)
{
struct pt_regs *thread_regs;
if (task == NULL)
return;
/* Initialize to zero */
memset(gdb_regs, 0, NUMREGBYTES);
/* Otherwise, we have only some registers from switch_to() */
thread_regs = task_pt_regs(task);
gdb_regs[0] = thread_regs->r00;
}
/**
* kgdb_arch_handle_exception - Handle architecture specific GDB packets.
* @vector: The error vector of the exception that happened.
* @signo: The signal number of the exception that happened.
* @err_code: The error code of the exception that happened.
* @remcom_in_buffer: The buffer of the packet we have read.
* @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
* @regs: The &struct pt_regs of the current process.
*
* This function MUST handle the 'c' and 's' command packets,
* as well packets to set / remove a hardware breakpoint, if used.
* If there are additional packets which the hardware needs to handle,
* they are handled here. The code should return -1 if it wants to
* process more packets, and a %0 or %1 if it wants to exit from the
* kgdb callback.
*
* Not yet working.
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *linux_regs)
{
switch (remcom_in_buffer[0]) {
case 's':
case 'c':
return 0;
}
/* Stay in the debugger. */
return -1;
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
/* cpu roundup */
if (atomic_read(&kgdb_active) != -1) {
kgdb_nmicallback(smp_processor_id(), args->regs);
return NOTIFY_STOP;
}
if (user_mode(args->regs))
return NOTIFY_DONE;
if (kgdb_handle_exception(args->trapnr & 0xff, args->signr, args->err,
args->regs))
return NOTIFY_DONE;
return NOTIFY_STOP;
}
static int
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = __kgdb_notify(ptr, cmd);
local_irq_restore(flags);
return ret;
}
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,
/*
* Lowest-prio notifier priority, we want to be notified last:
*/
.priority = -INT_MAX,
};
/**
* kgdb_arch_init - Perform any architecture specific initialization.
*
* This function will handle the initialization of any architecture
* specific callbacks.
*/
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
}
/**
* kgdb_arch_exit - Perform any architecture specific uninitalization.
*
* This function will handle the uninitalization of any architecture
* specific callbacks, for dynamic registration and unregistration.
*/
void kgdb_arch_exit(void)
{
unregister_die_notifier(&kgdb_notifier);
}
| linux-master | arch/hexagon/kernel/kgdb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Export of symbols defined in assembly files and/or libgcc.
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include <asm/hexagon_vm.h>
#include <asm/io.h>
#include <linux/uaccess.h>
/* Additional functions */
EXPORT_SYMBOL(__clear_user_hexagon);
EXPORT_SYMBOL(raw_copy_from_user);
EXPORT_SYMBOL(raw_copy_to_user);
EXPORT_SYMBOL(__vmgetie);
EXPORT_SYMBOL(__vmsetie);
EXPORT_SYMBOL(__vmyield);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
/* Additional variables */
EXPORT_SYMBOL(__phys_offset);
EXPORT_SYMBOL(_dflt_cache_att);
#define DECLARE_EXPORT(name) \
extern void name(void); EXPORT_SYMBOL(name)
/* Symbols found in libgcc that assorted kernel modules need */
DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes);
/* Additional functions */
DECLARE_EXPORT(__hexagon_divsi3);
DECLARE_EXPORT(__hexagon_modsi3);
DECLARE_EXPORT(__hexagon_udivsi3);
DECLARE_EXPORT(__hexagon_umodsi3);
DECLARE_EXPORT(csum_tcpudp_magic);
| linux-master | arch/hexagon/kernel/hexagon_ksyms.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SMP support for Hexagon
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*/
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/sched/mm.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/cpu.h>
#include <linux/mm_types.h>
#include <asm/time.h> /* timer_interrupt */
#include <asm/hexagon_vm.h>
#define BASE_IPI_IRQ 26
/*
* cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas
* (which is prior to any of our smp_prepare_cpu crap), in order to set
* up the... per_cpu areas.
*/
struct ipi_data {
unsigned long bits;
};
static DEFINE_PER_CPU(struct ipi_data, ipi_data);
static inline void __handle_ipi(unsigned long *ops, struct ipi_data *ipi,
int cpu)
{
unsigned long msg = 0;
do {
msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
switch (msg) {
case IPI_TIMER:
ipi_timer();
break;
case IPI_CALL_FUNC:
generic_smp_call_function_interrupt();
break;
case IPI_CPU_STOP:
/*
* call vmstop()
*/
__vmstop();
break;
case IPI_RESCHEDULE:
scheduler_ipi();
break;
}
} while (msg < BITS_PER_LONG);
}
/* Used for IPI call from other CPU's to unmask int */
void smp_vm_unmask_irq(void *info)
{
__vmintop_locen((long) info);
}
/*
* This is based on Alpha's IPI stuff.
* Supposed to take (int, void*) as args now.
* Specifically, first arg is irq, second is the irq_desc.
*/
irqreturn_t handle_ipi(int irq, void *desc)
{
int cpu = smp_processor_id();
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
unsigned long ops;
while ((ops = xchg(&ipi->bits, 0)) != 0)
__handle_ipi(&ops, ipi, cpu);
return IRQ_HANDLED;
}
void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
{
unsigned long flags;
unsigned long cpu;
unsigned long retval;
local_irq_save(flags);
for_each_cpu(cpu, cpumask) {
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
set_bit(msg, &ipi->bits);
/* Possible barrier here */
retval = __vmintop_post(BASE_IPI_IRQ+cpu);
if (retval != 0) {
printk(KERN_ERR "interrupt %ld not configured?\n",
BASE_IPI_IRQ+cpu);
}
}
local_irq_restore(flags);
}
void __init smp_prepare_boot_cpu(void)
{
}
/*
* interrupts should already be disabled from the VM
* SP should already be correct; need to set THREADINFO_REG
* to point to current thread info
*/
void start_secondary(void)
{
unsigned long thread_ptr;
unsigned int cpu, irq;
/* Calculate thread_info pointer from stack pointer */
__asm__ __volatile__(
"%0 = SP;\n"
: "=r" (thread_ptr)
);
thread_ptr = thread_ptr & ~(THREAD_SIZE-1);
__asm__ __volatile__(
QUOTED_THREADINFO_REG " = %0;\n"
:
: "r" (thread_ptr)
);
/* Set the memory struct */
mmgrab(&init_mm);
current->active_mm = &init_mm;
cpu = smp_processor_id();
irq = BASE_IPI_IRQ + cpu;
if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING, "ipi_handler",
NULL))
pr_err("Failed to request irq %u (ipi_handler)\n", irq);
/* Register the clock_event dummy */
setup_percpu_clockdev();
printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu);
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
/*
* called once for each present cpu
* apparently starts up the CPU and then
* maintains control until "cpu_online(cpu)" is set.
*/
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
struct thread_info *thread = (struct thread_info *)idle->stack;
void *stack_start;
thread->cpu = cpu;
/* Boot to the head. */
stack_start = ((void *) thread) + THREAD_SIZE;
__vmstart(start_secondary, stack_start);
while (!cpu_online(cpu))
barrier();
return 0;
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int i, irq = BASE_IPI_IRQ;
/*
* should eventually have some sort of machine
* descriptor that has this stuff
*/
/* Right now, let's just fake it. */
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
/* Also need to register the interrupts for IPI */
if (max_cpus > 1) {
if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING,
"ipi_handler", NULL))
pr_err("Failed to request irq %d (ipi_handler)\n", irq);
}
}
void arch_smp_send_reschedule(int cpu)
{
send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
}
void smp_send_stop(void)
{
struct cpumask targets;
cpumask_copy(&targets, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &targets);
send_ipi(&targets, IPI_CPU_STOP);
}
void arch_send_call_function_single_ipi(int cpu)
{
send_ipi(cpumask_of(cpu), IPI_CALL_FUNC);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_ipi(mask, IPI_CALL_FUNC);
}
void smp_start_cpus(void)
{
int i;
for (i = 0; i < NR_CPUS; i++)
set_cpu_possible(i, true);
}
| linux-master | arch/hexagon/kernel/smp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* s390 ChaCha stream cipher.
*
* Copyright IBM Corp. 2021
*/
#define KMSG_COMPONENT "chacha_s390"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <crypto/internal/chacha.h>
#include <crypto/internal/skcipher.h>
#include <crypto/algapi.h>
#include <linux/cpufeature.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <asm/fpu/api.h>
#include "chacha-s390.h"
static void chacha20_crypt_s390(u32 *state, u8 *dst, const u8 *src,
unsigned int nbytes, const u32 *key,
u32 *counter)
{
struct kernel_fpu vxstate;
kernel_fpu_begin(&vxstate, KERNEL_VXR);
chacha20_vx(dst, src, nbytes, key, counter);
kernel_fpu_end(&vxstate, KERNEL_VXR);
*counter += round_up(nbytes, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE;
}
static int chacha20_s390(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
u32 state[CHACHA_STATE_WORDS] __aligned(16);
struct skcipher_walk walk;
unsigned int nbytes;
int rc;
rc = skcipher_walk_virt(&walk, req, false);
chacha_init_generic(state, ctx->key, req->iv);
while (walk.nbytes > 0) {
nbytes = walk.nbytes;
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
if (nbytes <= CHACHA_BLOCK_SIZE) {
chacha_crypt_generic(state, walk.dst.virt.addr,
walk.src.virt.addr, nbytes,
ctx->nrounds);
} else {
chacha20_crypt_s390(state, walk.dst.virt.addr,
walk.src.virt.addr, nbytes,
&state[4], &state[12]);
}
rc = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return rc;
}
void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
{
/* TODO: implement hchacha_block_arch() in assembly */
hchacha_block_generic(state, stream, nrounds);
}
EXPORT_SYMBOL(hchacha_block_arch);
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
{
chacha_init_generic(state, key, iv);
}
EXPORT_SYMBOL(chacha_init_arch);
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
/* s390 chacha20 implementation has 20 rounds hard-coded,
* it cannot handle a block of data or less, but otherwise
* it can handle data of arbitrary size
*/
if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !MACHINE_HAS_VX)
chacha_crypt_generic(state, dst, src, bytes, nrounds);
else
chacha20_crypt_s390(state, dst, src, bytes,
&state[4], &state[12]);
}
EXPORT_SYMBOL(chacha_crypt_arch);
static struct skcipher_alg chacha_algs[] = {
{
.base.cra_name = "chacha20",
.base.cra_driver_name = "chacha20-s390",
.base.cra_priority = 900,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = chacha20_s390,
.decrypt = chacha20_s390,
}
};
static int __init chacha_mod_init(void)
{
return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ?
crypto_register_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs)) : 0;
}
static void __exit chacha_mod_fini(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER))
crypto_unregister_skciphers(chacha_algs, ARRAY_SIZE(chacha_algs));
}
module_cpu_feature_match(S390_CPU_FEATURE_VXRS, chacha_mod_init);
module_exit(chacha_mod_fini);
MODULE_DESCRIPTION("ChaCha20 stream cipher");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("chacha20");
| linux-master | arch/s390/crypto/chacha-glue.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* s390 implementation of the SHA512 and SHA38 Secure Hash Algorithm.
*
* Copyright IBM Corp. 2007
* Author(s): Jan Glauber ([email protected])
*/
#include <crypto/internal/hash.h>
#include <crypto/sha2.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <asm/cpacf.h>
#include "sha.h"
static int sha512_init(struct shash_desc *desc)
{
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
*(__u64 *)&ctx->state[0] = SHA512_H0;
*(__u64 *)&ctx->state[2] = SHA512_H1;
*(__u64 *)&ctx->state[4] = SHA512_H2;
*(__u64 *)&ctx->state[6] = SHA512_H3;
*(__u64 *)&ctx->state[8] = SHA512_H4;
*(__u64 *)&ctx->state[10] = SHA512_H5;
*(__u64 *)&ctx->state[12] = SHA512_H6;
*(__u64 *)&ctx->state[14] = SHA512_H7;
ctx->count = 0;
ctx->func = CPACF_KIMD_SHA_512;
return 0;
}
static int sha512_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha512_state *octx = out;
octx->count[0] = sctx->count;
octx->count[1] = 0;
memcpy(octx->state, sctx->state, sizeof(octx->state));
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
return 0;
}
static int sha512_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha512_state *ictx = in;
if (unlikely(ictx->count[1]))
return -ERANGE;
sctx->count = ictx->count[0];
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = CPACF_KIMD_SHA_512;
return 0;
}
static struct shash_alg sha512_alg = {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_init,
.update = s390_sha_update,
.final = s390_sha_final,
.export = sha512_export,
.import = sha512_import,
.descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_driver_name= "sha512-s390",
.cra_priority = 300,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
MODULE_ALIAS_CRYPTO("sha512");
static int sha384_init(struct shash_desc *desc)
{
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
*(__u64 *)&ctx->state[0] = SHA384_H0;
*(__u64 *)&ctx->state[2] = SHA384_H1;
*(__u64 *)&ctx->state[4] = SHA384_H2;
*(__u64 *)&ctx->state[6] = SHA384_H3;
*(__u64 *)&ctx->state[8] = SHA384_H4;
*(__u64 *)&ctx->state[10] = SHA384_H5;
*(__u64 *)&ctx->state[12] = SHA384_H6;
*(__u64 *)&ctx->state[14] = SHA384_H7;
ctx->count = 0;
ctx->func = CPACF_KIMD_SHA_512;
return 0;
}
static struct shash_alg sha384_alg = {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_init,
.update = s390_sha_update,
.final = s390_sha_final,
.export = sha512_export,
.import = sha512_import,
.descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
.cra_driver_name= "sha384-s390",
.cra_priority = 300,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
.cra_module = THIS_MODULE,
}
};
MODULE_ALIAS_CRYPTO("sha384");
static int __init init(void)
{
int ret;
if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_512))
return -ENODEV;
if ((ret = crypto_register_shash(&sha512_alg)) < 0)
goto out;
if ((ret = crypto_register_shash(&sha384_alg)) < 0)
crypto_unregister_shash(&sha512_alg);
out:
return ret;
}
static void __exit fini(void)
{
crypto_unregister_shash(&sha512_alg);
crypto_unregister_shash(&sha384_alg);
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA512 and SHA-384 Secure Hash Algorithm");
| linux-master | arch/s390/crypto/sha512_s390.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* s390 implementation of the DES Cipher Algorithm.
*
* Copyright IBM Corp. 2003, 2011
* Author(s): Thomas Spatzier
* Jan Glauber ([email protected])
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/fips.h>
#include <linux/mutex.h>
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
#include <asm/cpacf.h>
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
static u8 *ctrblk;
static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
struct s390_des_ctx {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES3_KEY_SIZE];
};
static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
err = crypto_des_verify_key(tfm, key);
if (err)
return err;
memcpy(ctx->key, key, key_len);
return 0;
}
static int des_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
return des_setkey(crypto_skcipher_tfm(tfm), key, key_len);
}
static void s390_des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
cpacf_km(CPACF_KM_DEA, ctx->key, out, in, DES_BLOCK_SIZE);
}
static void s390_des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
cpacf_km(CPACF_KM_DEA | CPACF_DECRYPT,
ctx->key, out, in, DES_BLOCK_SIZE);
}
static struct crypto_alg des_alg = {
.cra_name = "des",
.cra_driver_name = "des-s390",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = DES_KEY_SIZE,
.cia_max_keysize = DES_KEY_SIZE,
.cia_setkey = des_setkey,
.cia_encrypt = s390_des_encrypt,
.cia_decrypt = s390_des_decrypt,
}
}
};
static int ecb_desall_crypt(struct skcipher_request *req, unsigned long fc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
ret = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(DES_BLOCK_SIZE - 1);
cpacf_km(fc, ctx->key, walk.dst.virt.addr,
walk.src.virt.addr, n);
ret = skcipher_walk_done(&walk, nbytes - n);
}
return ret;
}
static int cbc_desall_crypt(struct skcipher_request *req, unsigned long fc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
struct {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES3_KEY_SIZE];
} param;
ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
memcpy(param.iv, walk.iv, DES_BLOCK_SIZE);
memcpy(param.key, ctx->key, DES3_KEY_SIZE);
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(DES_BLOCK_SIZE - 1);
cpacf_kmc(fc, ¶m, walk.dst.virt.addr,
walk.src.virt.addr, n);
memcpy(walk.iv, param.iv, DES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, nbytes - n);
}
return ret;
}
static int ecb_des_encrypt(struct skcipher_request *req)
{
return ecb_desall_crypt(req, CPACF_KM_DEA);
}
static int ecb_des_decrypt(struct skcipher_request *req)
{
return ecb_desall_crypt(req, CPACF_KM_DEA | CPACF_DECRYPT);
}
static struct skcipher_alg ecb_des_alg = {
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "ecb-des-s390",
.base.cra_priority = 400, /* combo: des + ecb */
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = des_setkey_skcipher,
.encrypt = ecb_des_encrypt,
.decrypt = ecb_des_decrypt,
};
static int cbc_des_encrypt(struct skcipher_request *req)
{
return cbc_desall_crypt(req, CPACF_KMC_DEA);
}
static int cbc_des_decrypt(struct skcipher_request *req)
{
return cbc_desall_crypt(req, CPACF_KMC_DEA | CPACF_DECRYPT);
}
static struct skcipher_alg cbc_des_alg = {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-s390",
.base.cra_priority = 400, /* combo: des + cbc */
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des_setkey_skcipher,
.encrypt = cbc_des_encrypt,
.decrypt = cbc_des_decrypt,
};
/*
* RFC2451:
*
* For DES-EDE3, there is no known need to reject weak or
* complementation keys. Any weakness is obviated by the use of
* multiple keys.
*
* However, if the first two or last two independent 64-bit keys are
* equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
* same as DES. Implementers MUST reject keys that exhibit this
* property.
*
* In fips mode additionally check for all 3 keys are unique.
*
*/
static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
int err;
err = crypto_des3_ede_verify_key(tfm, key);
if (err)
return err;
memcpy(ctx->key, key, key_len);
return 0;
}
static int des3_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
return des3_setkey(crypto_skcipher_tfm(tfm), key, key_len);
}
static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
cpacf_km(CPACF_KM_TDEA_192, ctx->key, dst, src, DES_BLOCK_SIZE);
}
static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
cpacf_km(CPACF_KM_TDEA_192 | CPACF_DECRYPT,
ctx->key, dst, src, DES_BLOCK_SIZE);
}
static struct crypto_alg des3_alg = {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-s390",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_des_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = DES3_KEY_SIZE,
.cia_max_keysize = DES3_KEY_SIZE,
.cia_setkey = des3_setkey,
.cia_encrypt = des3_encrypt,
.cia_decrypt = des3_decrypt,
}
}
};
static int ecb_des3_encrypt(struct skcipher_request *req)
{
return ecb_desall_crypt(req, CPACF_KM_TDEA_192);
}
static int ecb_des3_decrypt(struct skcipher_request *req)
{
return ecb_desall_crypt(req, CPACF_KM_TDEA_192 | CPACF_DECRYPT);
}
static struct skcipher_alg ecb_des3_alg = {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "ecb-des3_ede-s390",
.base.cra_priority = 400, /* combo: des3 + ecb */
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES3_KEY_SIZE,
.max_keysize = DES3_KEY_SIZE,
.setkey = des3_setkey_skcipher,
.encrypt = ecb_des3_encrypt,
.decrypt = ecb_des3_decrypt,
};
static int cbc_des3_encrypt(struct skcipher_request *req)
{
return cbc_desall_crypt(req, CPACF_KMC_TDEA_192);
}
static int cbc_des3_decrypt(struct skcipher_request *req)
{
return cbc_desall_crypt(req, CPACF_KMC_TDEA_192 | CPACF_DECRYPT);
}
static struct skcipher_alg cbc_des3_alg = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3_ede-s390",
.base.cra_priority = 400, /* combo: des3 + cbc */
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES3_KEY_SIZE,
.max_keysize = DES3_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des3_setkey_skcipher,
.encrypt = cbc_des3_encrypt,
.decrypt = cbc_des3_decrypt,
};
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
{
unsigned int i, n;
/* align to block size, max. PAGE_SIZE */
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
memcpy(ctrptr, iv, DES_BLOCK_SIZE);
for (i = (n / DES_BLOCK_SIZE) - 1; i > 0; i--) {
memcpy(ctrptr + DES_BLOCK_SIZE, ctrptr, DES_BLOCK_SIZE);
crypto_inc(ctrptr + DES_BLOCK_SIZE, DES_BLOCK_SIZE);
ctrptr += DES_BLOCK_SIZE;
}
return n;
}
static int ctr_desall_crypt(struct skcipher_request *req, unsigned long fc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 buf[DES_BLOCK_SIZE], *ctrptr;
struct skcipher_walk walk;
unsigned int n, nbytes;
int ret, locked;
locked = mutex_trylock(&ctrblk_lock);
ret = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes) >= DES_BLOCK_SIZE) {
n = DES_BLOCK_SIZE;
if (nbytes >= 2*DES_BLOCK_SIZE && locked)
n = __ctrblk_init(ctrblk, walk.iv, nbytes);
ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk.iv;
cpacf_kmctr(fc, ctx->key, walk.dst.virt.addr,
walk.src.virt.addr, n, ctrptr);
if (ctrptr == ctrblk)
memcpy(walk.iv, ctrptr + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE);
crypto_inc(walk.iv, DES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, nbytes - n);
}
if (locked)
mutex_unlock(&ctrblk_lock);
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
cpacf_kmctr(fc, ctx->key, buf, walk.src.virt.addr,
DES_BLOCK_SIZE, walk.iv);
memcpy(walk.dst.virt.addr, buf, nbytes);
crypto_inc(walk.iv, DES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, 0);
}
return ret;
}
static int ctr_des_crypt(struct skcipher_request *req)
{
return ctr_desall_crypt(req, CPACF_KMCTR_DEA);
}
static struct skcipher_alg ctr_des_alg = {
.base.cra_name = "ctr(des)",
.base.cra_driver_name = "ctr-des-s390",
.base.cra_priority = 400, /* combo: des + ctr */
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des_setkey_skcipher,
.encrypt = ctr_des_crypt,
.decrypt = ctr_des_crypt,
.chunksize = DES_BLOCK_SIZE,
};
static int ctr_des3_crypt(struct skcipher_request *req)
{
return ctr_desall_crypt(req, CPACF_KMCTR_TDEA_192);
}
static struct skcipher_alg ctr_des3_alg = {
.base.cra_name = "ctr(des3_ede)",
.base.cra_driver_name = "ctr-des3_ede-s390",
.base.cra_priority = 400, /* combo: des3 + ede */
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct s390_des_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = DES3_KEY_SIZE,
.max_keysize = DES3_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des3_setkey_skcipher,
.encrypt = ctr_des3_crypt,
.decrypt = ctr_des3_crypt,
.chunksize = DES_BLOCK_SIZE,
};
static struct crypto_alg *des_s390_algs_ptr[2];
static int des_s390_algs_num;
static struct skcipher_alg *des_s390_skciphers_ptr[6];
static int des_s390_skciphers_num;
static int des_s390_register_alg(struct crypto_alg *alg)
{
int ret;
ret = crypto_register_alg(alg);
if (!ret)
des_s390_algs_ptr[des_s390_algs_num++] = alg;
return ret;
}
static int des_s390_register_skcipher(struct skcipher_alg *alg)
{
int ret;
ret = crypto_register_skcipher(alg);
if (!ret)
des_s390_skciphers_ptr[des_s390_skciphers_num++] = alg;
return ret;
}
static void des_s390_exit(void)
{
while (des_s390_algs_num--)
crypto_unregister_alg(des_s390_algs_ptr[des_s390_algs_num]);
while (des_s390_skciphers_num--)
crypto_unregister_skcipher(des_s390_skciphers_ptr[des_s390_skciphers_num]);
if (ctrblk)
free_page((unsigned long) ctrblk);
}
static int __init des_s390_init(void)
{
int ret;
/* Query available functions for KM, KMC and KMCTR */
cpacf_query(CPACF_KM, &km_functions);
cpacf_query(CPACF_KMC, &kmc_functions);
cpacf_query(CPACF_KMCTR, &kmctr_functions);
if (cpacf_test_func(&km_functions, CPACF_KM_DEA)) {
ret = des_s390_register_alg(&des_alg);
if (ret)
goto out_err;
ret = des_s390_register_skcipher(&ecb_des_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_DEA)) {
ret = des_s390_register_skcipher(&cbc_des_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_TDEA_192)) {
ret = des_s390_register_alg(&des3_alg);
if (ret)
goto out_err;
ret = des_s390_register_skcipher(&ecb_des3_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_TDEA_192)) {
ret = des_s390_register_skcipher(&cbc_des3_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
goto out_err;
}
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA)) {
ret = des_s390_register_skcipher(&ctr_des_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
ret = des_s390_register_skcipher(&ctr_des3_alg);
if (ret)
goto out_err;
}
return 0;
out_err:
des_s390_exit();
return ret;
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, des_s390_init);
module_exit(des_s390_exit);
MODULE_ALIAS_CRYPTO("des");
MODULE_ALIAS_CRYPTO("des3_ede");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
| linux-master | arch/s390/crypto/des_s390.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* s390 implementation of the SHA512 and SHA384 Secure Hash Algorithm.
*
* Copyright IBM Corp. 2019
* Author(s): Joerg Schmidbauer ([email protected])
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha3.h>
#include <asm/cpacf.h>
#include "sha.h"
static int sha3_512_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_512;
return 0;
}
static int sha3_512_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha3_state *octx = out;
octx->rsiz = sctx->count;
octx->rsizw = sctx->count >> 32;
memcpy(octx->st, sctx->state, sizeof(octx->st));
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
return 0;
}
static int sha3_512_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha3_state *ictx = in;
if (unlikely(ictx->rsizw))
return -ERANGE;
sctx->count = ictx->rsiz;
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = CPACF_KIMD_SHA3_512;
return 0;
}
static int sha3_384_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha3_state *ictx = in;
if (unlikely(ictx->rsizw))
return -ERANGE;
sctx->count = ictx->rsiz;
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = CPACF_KIMD_SHA3_384;
return 0;
}
static struct shash_alg sha3_512_alg = {
.digestsize = SHA3_512_DIGEST_SIZE,
.init = sha3_512_init,
.update = s390_sha_update,
.final = s390_sha_final,
.export = sha3_512_export,
.import = sha3_512_import,
.descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha3_state),
.base = {
.cra_name = "sha3-512",
.cra_driver_name = "sha3-512-s390",
.cra_priority = 300,
.cra_blocksize = SHA3_512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
MODULE_ALIAS_CRYPTO("sha3-512");
static int sha3_384_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_384;
return 0;
}
static struct shash_alg sha3_384_alg = {
.digestsize = SHA3_384_DIGEST_SIZE,
.init = sha3_384_init,
.update = s390_sha_update,
.final = s390_sha_final,
.export = sha3_512_export, /* same as for 512 */
.import = sha3_384_import, /* function code different! */
.descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha3_state),
.base = {
.cra_name = "sha3-384",
.cra_driver_name = "sha3-384-s390",
.cra_priority = 300,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha_ctx),
.cra_module = THIS_MODULE,
}
};
MODULE_ALIAS_CRYPTO("sha3-384");
static int __init init(void)
{
int ret;
if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA3_512))
return -ENODEV;
ret = crypto_register_shash(&sha3_512_alg);
if (ret < 0)
goto out;
ret = crypto_register_shash(&sha3_384_alg);
if (ret < 0)
crypto_unregister_shash(&sha3_512_alg);
out:
return ret;
}
static void __exit fini(void)
{
crypto_unregister_shash(&sha3_512_alg);
crypto_unregister_shash(&sha3_384_alg);
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA3-512 and SHA3-384 Secure Hash Algorithm");
| linux-master | arch/s390/crypto/sha3_512_s390.c |
// SPDX-License-Identifier: GPL-2.0
/*
* s390 arch random implementation.
*
* Copyright IBM Corp. 2017, 2020
* Author(s): Harald Freudenberger
*/
#include <linux/kernel.h>
#include <linux/atomic.h>
#include <linux/random.h>
#include <linux/static_key.h>
#include <asm/archrandom.h>
#include <asm/cpacf.h>
DEFINE_STATIC_KEY_FALSE(s390_arch_random_available);
atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0);
EXPORT_SYMBOL(s390_arch_random_counter);
| linux-master | arch/s390/crypto/arch_random.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* s390 generic implementation of the SHA Secure Hash Algorithms.
*
* Copyright IBM Corp. 2007
* Author(s): Jan Glauber ([email protected])
*/
#include <crypto/internal/hash.h>
#include <linux/module.h>
#include <asm/cpacf.h>
#include "sha.h"
int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
unsigned int index, n;
/* how much is already in the buffer? */
index = ctx->count % bsize;
ctx->count += len;
if ((index + len) < bsize)
goto store;
/* process one stored block */
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
data += bsize - index;
len -= bsize - index;
index = 0;
}
/* process as many blocks as possible */
if (len >= bsize) {
n = (len / bsize) * bsize;
cpacf_kimd(ctx->func, ctx->state, data, n);
data += n;
len -= n;
}
store:
if (len)
memcpy(ctx->buf + index , data, len);
return 0;
}
EXPORT_SYMBOL_GPL(s390_sha_update);
static int s390_crypto_shash_parmsize(int func)
{
switch (func) {
case CPACF_KLMD_SHA_1:
return 20;
case CPACF_KLMD_SHA_256:
return 32;
case CPACF_KLMD_SHA_512:
return 64;
case CPACF_KLMD_SHA3_224:
case CPACF_KLMD_SHA3_256:
case CPACF_KLMD_SHA3_384:
case CPACF_KLMD_SHA3_512:
return 200;
default:
return -EINVAL;
}
}
int s390_sha_final(struct shash_desc *desc, u8 *out)
{
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
u64 bits;
unsigned int n;
int mbl_offset;
n = ctx->count % bsize;
bits = ctx->count * 8;
mbl_offset = s390_crypto_shash_parmsize(ctx->func);
if (mbl_offset < 0)
return -EINVAL;
mbl_offset = mbl_offset / sizeof(u32);
/* set total msg bit length (mbl) in CPACF parmblock */
switch (ctx->func) {
case CPACF_KLMD_SHA_1:
case CPACF_KLMD_SHA_256:
memcpy(ctx->state + mbl_offset, &bits, sizeof(bits));
break;
case CPACF_KLMD_SHA_512:
/*
* the SHA512 parmblock has a 128-bit mbl field, clear
* high-order u64 field, copy bits to low-order u64 field
*/
memset(ctx->state + mbl_offset, 0x00, sizeof(bits));
mbl_offset += sizeof(u64) / sizeof(u32);
memcpy(ctx->state + mbl_offset, &bits, sizeof(bits));
break;
case CPACF_KLMD_SHA3_224:
case CPACF_KLMD_SHA3_256:
case CPACF_KLMD_SHA3_384:
case CPACF_KLMD_SHA3_512:
break;
default:
return -EINVAL;
}
cpacf_klmd(ctx->func, ctx->state, ctx->buf, n);
/* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
/* wipe context */
memset(ctx, 0, sizeof *ctx);
return 0;
}
EXPORT_SYMBOL_GPL(s390_sha_final);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("s390 SHA cipher common functions");
| linux-master | arch/s390/crypto/sha_common.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* s390 implementation of the SHA1 Secure Hash Algorithm.
*
* Derived from cryptoapi implementation, adapted for in-place
* scatterlist interface. Originally based on the public domain
* implementation written by Steve Reid.
*
* s390 Version:
* Copyright IBM Corp. 2003, 2007
* Author(s): Thomas Spatzier
* Jan Glauber ([email protected])
*
* Derived from "crypto/sha1_generic.c"
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <[email protected]>
* Copyright (c) Jean-Francois Dive <[email protected]>
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha1.h>
#include <asm/cpacf.h>
#include "sha.h"
static int s390_sha1_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA1_H0;
sctx->state[1] = SHA1_H1;
sctx->state[2] = SHA1_H2;
sctx->state[3] = SHA1_H3;
sctx->state[4] = SHA1_H4;
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA_1;
return 0;
}
static int s390_sha1_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha1_state *octx = out;
octx->count = sctx->count;
memcpy(octx->state, sctx->state, sizeof(octx->state));
memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer));
return 0;
}
static int s390_sha1_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha1_state *ictx = in;
sctx->count = ictx->count;
memcpy(sctx->state, ictx->state, sizeof(ictx->state));
memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer));
sctx->func = CPACF_KIMD_SHA_1;
return 0;
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = s390_sha1_init,
.update = s390_sha_update,
.final = s390_sha_final,
.export = s390_sha1_export,
.import = s390_sha1_import,
.descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-s390",
.cra_priority = 300,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sha1_s390_init(void)
{
if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_1))
return -ENODEV;
return crypto_register_shash(&alg);
}
static void __exit sha1_s390_fini(void)
{
crypto_unregister_shash(&alg);
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, sha1_s390_init);
module_exit(sha1_s390_fini);
MODULE_ALIAS_CRYPTO("sha1");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
| linux-master | arch/s390/crypto/sha1_s390.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2006, 2015
* Author(s): Jan Glauber <[email protected]>
* Harald Freudenberger <[email protected]>
* Driver for the s390 pseudo random number generator
*/
#define KMSG_COMPONENT "prng"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/fs.h>
#include <linux/fips.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/cpufeature.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <asm/debug.h>
#include <linux/uaccess.h>
#include <asm/timex.h>
#include <asm/cpacf.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 PRNG interface");
#define PRNG_MODE_AUTO 0
#define PRNG_MODE_TDES 1
#define PRNG_MODE_SHA512 2
static unsigned int prng_mode = PRNG_MODE_AUTO;
module_param_named(mode, prng_mode, int, 0);
MODULE_PARM_DESC(prng_mode, "PRNG mode: 0 - auto, 1 - TDES, 2 - SHA512");
#define PRNG_CHUNKSIZE_TDES_MIN 8
#define PRNG_CHUNKSIZE_TDES_MAX (64*1024)
#define PRNG_CHUNKSIZE_SHA512_MIN 64
#define PRNG_CHUNKSIZE_SHA512_MAX (64*1024)
static unsigned int prng_chunk_size = 256;
module_param_named(chunksize, prng_chunk_size, int, 0);
MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes");
#define PRNG_RESEED_LIMIT_TDES 4096
#define PRNG_RESEED_LIMIT_TDES_LOWER 4096
#define PRNG_RESEED_LIMIT_SHA512 100000
#define PRNG_RESEED_LIMIT_SHA512_LOWER 10000
static unsigned int prng_reseed_limit;
module_param_named(reseed_limit, prng_reseed_limit, int, 0);
MODULE_PARM_DESC(prng_reseed_limit, "PRNG reseed limit");
static bool trng_available;
/*
* Any one who considers arithmetical methods of producing random digits is,
* of course, in a state of sin. -- John von Neumann
*/
static int prng_errorflag;
#define PRNG_GEN_ENTROPY_FAILED 1
#define PRNG_SELFTEST_FAILED 2
#define PRNG_INSTANTIATE_FAILED 3
#define PRNG_SEED_FAILED 4
#define PRNG_RESEED_FAILED 5
#define PRNG_GEN_FAILED 6
struct prng_ws_s {
u8 parm_block[32];
u32 reseed_counter;
u64 byte_counter;
};
struct prno_ws_s {
u32 res;
u32 reseed_counter;
u64 stream_bytes;
u8 V[112];
u8 C[112];
};
struct prng_data_s {
struct mutex mutex;
union {
struct prng_ws_s prngws;
struct prno_ws_s prnows;
};
u8 *buf;
u32 rest;
u8 *prev;
};
static struct prng_data_s *prng_data;
/* initial parameter block for tdes mode, copied from libica */
static const u8 initial_parm_block[32] __initconst = {
0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0 };
/*** helper functions ***/
/*
* generate_entropy:
* This function fills a given buffer with random bytes. The entropy within
* the random bytes given back is assumed to have at least 50% - meaning
* a 64 bytes buffer has at least 64 * 8 / 2 = 256 bits of entropy.
* Within the function the entropy generation is done in junks of 64 bytes.
* So the caller should also ask for buffer fill in multiples of 64 bytes.
* The generation of the entropy is based on the assumption that every stckf()
* invocation produces 0.5 bits of entropy. To accumulate 256 bits of entropy
* at least 512 stckf() values are needed. The entropy relevant part of the
* stckf value is bit 51 (counting starts at the left with bit nr 0) so
* here we use the lower 4 bytes and exor the values into 2k of bufferspace.
* To be on the save side, if there is ever a problem with stckf() the
* other half of the page buffer is filled with bytes from urandom via
* get_random_bytes(), so this function consumes 2k of urandom for each
* requested 64 bytes output data. Finally the buffer page is condensed into
* a 64 byte value by hashing with a SHA512 hash.
*/
static int generate_entropy(u8 *ebuf, size_t nbytes)
{
int n, ret = 0;
u8 *pg, pblock[80] = {
/* 8 x 64 bit init values */
0x6A, 0x09, 0xE6, 0x67, 0xF3, 0xBC, 0xC9, 0x08,
0xBB, 0x67, 0xAE, 0x85, 0x84, 0xCA, 0xA7, 0x3B,
0x3C, 0x6E, 0xF3, 0x72, 0xFE, 0x94, 0xF8, 0x2B,
0xA5, 0x4F, 0xF5, 0x3A, 0x5F, 0x1D, 0x36, 0xF1,
0x51, 0x0E, 0x52, 0x7F, 0xAD, 0xE6, 0x82, 0xD1,
0x9B, 0x05, 0x68, 0x8C, 0x2B, 0x3E, 0x6C, 0x1F,
0x1F, 0x83, 0xD9, 0xAB, 0xFB, 0x41, 0xBD, 0x6B,
0x5B, 0xE0, 0xCD, 0x19, 0x13, 0x7E, 0x21, 0x79,
/* 128 bit counter total message bit length */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 };
/* allocate one page stckf buffer */
pg = (u8 *) __get_free_page(GFP_KERNEL);
if (!pg) {
prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
return -ENOMEM;
}
/* fill the ebuf in chunks of 64 byte each */
while (nbytes) {
/* fill lower 2k with urandom bytes */
get_random_bytes(pg, PAGE_SIZE / 2);
/* exor upper 2k with 512 stckf values, offset 4 bytes each */
for (n = 0; n < 512; n++) {
int offset = (PAGE_SIZE / 2) + (n * 4) - 4;
u64 *p = (u64 *)(pg + offset);
*p ^= get_tod_clock_fast();
}
/* hash over the filled page */
cpacf_klmd(CPACF_KLMD_SHA_512, pblock, pg, PAGE_SIZE);
n = (nbytes < 64) ? nbytes : 64;
memcpy(ebuf, pblock, n);
ret += n;
ebuf += n;
nbytes -= n;
}
memzero_explicit(pblock, sizeof(pblock));
memzero_explicit(pg, PAGE_SIZE);
free_page((unsigned long)pg);
return ret;
}
/*** tdes functions ***/
static void prng_tdes_add_entropy(void)
{
__u64 entropy[4];
unsigned int i;
for (i = 0; i < 16; i++) {
cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
(char *) entropy, (char *) entropy,
sizeof(entropy));
memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy));
}
}
static void prng_tdes_seed(int nbytes)
{
char buf[16];
int i = 0;
BUG_ON(nbytes > sizeof(buf));
get_random_bytes(buf, nbytes);
/* Add the entropy */
while (nbytes >= 8) {
*((__u64 *)prng_data->prngws.parm_block) ^= *((__u64 *)(buf+i));
prng_tdes_add_entropy();
i += 8;
nbytes -= 8;
}
prng_tdes_add_entropy();
prng_data->prngws.reseed_counter = 0;
}
static int __init prng_tdes_instantiate(void)
{
int datalen;
pr_debug("prng runs in TDES mode with "
"chunksize=%d and reseed_limit=%u\n",
prng_chunk_size, prng_reseed_limit);
/* memory allocation, prng_data struct init, mutex init */
datalen = sizeof(struct prng_data_s) + prng_chunk_size;
prng_data = kzalloc(datalen, GFP_KERNEL);
if (!prng_data) {
prng_errorflag = PRNG_INSTANTIATE_FAILED;
return -ENOMEM;
}
mutex_init(&prng_data->mutex);
prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s);
memcpy(prng_data->prngws.parm_block, initial_parm_block, 32);
/* initialize the PRNG, add 128 bits of entropy */
prng_tdes_seed(16);
return 0;
}
static void prng_tdes_deinstantiate(void)
{
pr_debug("The prng module stopped "
"after running in triple DES mode\n");
kfree_sensitive(prng_data);
}
/*** sha512 functions ***/
static int __init prng_sha512_selftest(void)
{
/* NIST DRBG testvector for Hash Drbg, Sha-512, Count #0 */
static const u8 seed[] __initconst = {
0x6b, 0x50, 0xa7, 0xd8, 0xf8, 0xa5, 0x5d, 0x7a,
0x3d, 0xf8, 0xbb, 0x40, 0xbc, 0xc3, 0xb7, 0x22,
0xd8, 0x70, 0x8d, 0xe6, 0x7f, 0xda, 0x01, 0x0b,
0x03, 0xc4, 0xc8, 0x4d, 0x72, 0x09, 0x6f, 0x8c,
0x3e, 0xc6, 0x49, 0xcc, 0x62, 0x56, 0xd9, 0xfa,
0x31, 0xdb, 0x7a, 0x29, 0x04, 0xaa, 0xf0, 0x25 };
static const u8 V0[] __initconst = {
0x00, 0xad, 0xe3, 0x6f, 0x9a, 0x01, 0xc7, 0x76,
0x61, 0x34, 0x35, 0xf5, 0x4e, 0x24, 0x74, 0x22,
0x21, 0x9a, 0x29, 0x89, 0xc7, 0x93, 0x2e, 0x60,
0x1e, 0xe8, 0x14, 0x24, 0x8d, 0xd5, 0x03, 0xf1,
0x65, 0x5d, 0x08, 0x22, 0x72, 0xd5, 0xad, 0x95,
0xe1, 0x23, 0x1e, 0x8a, 0xa7, 0x13, 0xd9, 0x2b,
0x5e, 0xbc, 0xbb, 0x80, 0xab, 0x8d, 0xe5, 0x79,
0xab, 0x5b, 0x47, 0x4e, 0xdd, 0xee, 0x6b, 0x03,
0x8f, 0x0f, 0x5c, 0x5e, 0xa9, 0x1a, 0x83, 0xdd,
0xd3, 0x88, 0xb2, 0x75, 0x4b, 0xce, 0x83, 0x36,
0x57, 0x4b, 0xf1, 0x5c, 0xca, 0x7e, 0x09, 0xc0,
0xd3, 0x89, 0xc6, 0xe0, 0xda, 0xc4, 0x81, 0x7e,
0x5b, 0xf9, 0xe1, 0x01, 0xc1, 0x92, 0x05, 0xea,
0xf5, 0x2f, 0xc6, 0xc6, 0xc7, 0x8f, 0xbc, 0xf4 };
static const u8 C0[] __initconst = {
0x00, 0xf4, 0xa3, 0xe5, 0xa0, 0x72, 0x63, 0x95,
0xc6, 0x4f, 0x48, 0xd0, 0x8b, 0x5b, 0x5f, 0x8e,
0x6b, 0x96, 0x1f, 0x16, 0xed, 0xbc, 0x66, 0x94,
0x45, 0x31, 0xd7, 0x47, 0x73, 0x22, 0xa5, 0x86,
0xce, 0xc0, 0x4c, 0xac, 0x63, 0xb8, 0x39, 0x50,
0xbf, 0xe6, 0x59, 0x6c, 0x38, 0x58, 0x99, 0x1f,
0x27, 0xa7, 0x9d, 0x71, 0x2a, 0xb3, 0x7b, 0xf9,
0xfb, 0x17, 0x86, 0xaa, 0x99, 0x81, 0xaa, 0x43,
0xe4, 0x37, 0xd3, 0x1e, 0x6e, 0xe5, 0xe6, 0xee,
0xc2, 0xed, 0x95, 0x4f, 0x53, 0x0e, 0x46, 0x8a,
0xcc, 0x45, 0xa5, 0xdb, 0x69, 0x0d, 0x81, 0xc9,
0x32, 0x92, 0xbc, 0x8f, 0x33, 0xe6, 0xf6, 0x09,
0x7c, 0x8e, 0x05, 0x19, 0x0d, 0xf1, 0xb6, 0xcc,
0xf3, 0x02, 0x21, 0x90, 0x25, 0xec, 0xed, 0x0e };
static const u8 random[] __initconst = {
0x95, 0xb7, 0xf1, 0x7e, 0x98, 0x02, 0xd3, 0x57,
0x73, 0x92, 0xc6, 0xa9, 0xc0, 0x80, 0x83, 0xb6,
0x7d, 0xd1, 0x29, 0x22, 0x65, 0xb5, 0xf4, 0x2d,
0x23, 0x7f, 0x1c, 0x55, 0xbb, 0x9b, 0x10, 0xbf,
0xcf, 0xd8, 0x2c, 0x77, 0xa3, 0x78, 0xb8, 0x26,
0x6a, 0x00, 0x99, 0x14, 0x3b, 0x3c, 0x2d, 0x64,
0x61, 0x1e, 0xee, 0xb6, 0x9a, 0xcd, 0xc0, 0x55,
0x95, 0x7c, 0x13, 0x9e, 0x8b, 0x19, 0x0c, 0x7a,
0x06, 0x95, 0x5f, 0x2c, 0x79, 0x7c, 0x27, 0x78,
0xde, 0x94, 0x03, 0x96, 0xa5, 0x01, 0xf4, 0x0e,
0x91, 0x39, 0x6a, 0xcf, 0x8d, 0x7e, 0x45, 0xeb,
0xdb, 0xb5, 0x3b, 0xbf, 0x8c, 0x97, 0x52, 0x30,
0xd2, 0xf0, 0xff, 0x91, 0x06, 0xc7, 0x61, 0x19,
0xae, 0x49, 0x8e, 0x7f, 0xbc, 0x03, 0xd9, 0x0f,
0x8e, 0x4c, 0x51, 0x62, 0x7a, 0xed, 0x5c, 0x8d,
0x42, 0x63, 0xd5, 0xd2, 0xb9, 0x78, 0x87, 0x3a,
0x0d, 0xe5, 0x96, 0xee, 0x6d, 0xc7, 0xf7, 0xc2,
0x9e, 0x37, 0xee, 0xe8, 0xb3, 0x4c, 0x90, 0xdd,
0x1c, 0xf6, 0xa9, 0xdd, 0xb2, 0x2b, 0x4c, 0xbd,
0x08, 0x6b, 0x14, 0xb3, 0x5d, 0xe9, 0x3d, 0xa2,
0xd5, 0xcb, 0x18, 0x06, 0x69, 0x8c, 0xbd, 0x7b,
0xbb, 0x67, 0xbf, 0xe3, 0xd3, 0x1f, 0xd2, 0xd1,
0xdb, 0xd2, 0xa1, 0xe0, 0x58, 0xa3, 0xeb, 0x99,
0xd7, 0xe5, 0x1f, 0x1a, 0x93, 0x8e, 0xed, 0x5e,
0x1c, 0x1d, 0xe2, 0x3a, 0x6b, 0x43, 0x45, 0xd3,
0x19, 0x14, 0x09, 0xf9, 0x2f, 0x39, 0xb3, 0x67,
0x0d, 0x8d, 0xbf, 0xb6, 0x35, 0xd8, 0xe6, 0xa3,
0x69, 0x32, 0xd8, 0x10, 0x33, 0xd1, 0x44, 0x8d,
0x63, 0xb4, 0x03, 0xdd, 0xf8, 0x8e, 0x12, 0x1b,
0x6e, 0x81, 0x9a, 0xc3, 0x81, 0x22, 0x6c, 0x13,
0x21, 0xe4, 0xb0, 0x86, 0x44, 0xf6, 0x72, 0x7c,
0x36, 0x8c, 0x5a, 0x9f, 0x7a, 0x4b, 0x3e, 0xe2 };
u8 buf[sizeof(random)];
struct prno_ws_s ws;
memset(&ws, 0, sizeof(ws));
/* initial seed */
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
&ws, NULL, 0, seed, sizeof(seed));
/* check working states V and C */
if (memcmp(ws.V, V0, sizeof(V0)) != 0
|| memcmp(ws.C, C0, sizeof(C0)) != 0) {
pr_err("The prng self test state test "
"for the SHA-512 mode failed\n");
prng_errorflag = PRNG_SELFTEST_FAILED;
return -EIO;
}
/* generate random bytes */
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf), NULL, 0);
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
&ws, buf, sizeof(buf), NULL, 0);
/* check against expected data */
if (memcmp(buf, random, sizeof(random)) != 0) {
pr_err("The prng self test data test "
"for the SHA-512 mode failed\n");
prng_errorflag = PRNG_SELFTEST_FAILED;
return -EIO;
}
return 0;
}
static int __init prng_sha512_instantiate(void)
{
int ret, datalen, seedlen;
u8 seed[128 + 16];
pr_debug("prng runs in SHA-512 mode "
"with chunksize=%d and reseed_limit=%u\n",
prng_chunk_size, prng_reseed_limit);
/* memory allocation, prng_data struct init, mutex init */
datalen = sizeof(struct prng_data_s) + prng_chunk_size;
if (fips_enabled)
datalen += prng_chunk_size;
prng_data = kzalloc(datalen, GFP_KERNEL);
if (!prng_data) {
prng_errorflag = PRNG_INSTANTIATE_FAILED;
return -ENOMEM;
}
mutex_init(&prng_data->mutex);
prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s);
/* selftest */
ret = prng_sha512_selftest();
if (ret)
goto outfree;
/* generate initial seed, we need at least 256 + 128 bits entropy. */
if (trng_available) {
/*
* Trng available, so use it. The trng works in chunks of
* 32 bytes and produces 100% entropy. So we pull 64 bytes
* which gives us 512 bits entropy.
*/
seedlen = 2 * 32;
cpacf_trng(NULL, 0, seed, seedlen);
} else {
/*
* No trng available, so use the generate_entropy() function.
* This function works in 64 byte junks and produces
* 50% entropy. So we pull 2*64 bytes which gives us 512 bits
* of entropy.
*/
seedlen = 2 * 64;
ret = generate_entropy(seed, seedlen);
if (ret != seedlen)
goto outfree;
}
/* append the seed by 16 bytes of unique nonce */
store_tod_clock_ext((union tod_clock *)(seed + seedlen));
seedlen += 16;
/* now initial seed of the prno drng */
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
&prng_data->prnows, NULL, 0, seed, seedlen);
memzero_explicit(seed, sizeof(seed));
/* if fips mode is enabled, generate a first block of random
bytes for the FIPS 140-2 Conditional Self Test */
if (fips_enabled) {
prng_data->prev = prng_data->buf + prng_chunk_size;
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
&prng_data->prnows,
prng_data->prev, prng_chunk_size, NULL, 0);
}
return 0;
outfree:
kfree(prng_data);
return ret;
}
static void prng_sha512_deinstantiate(void)
{
pr_debug("The prng module stopped after running in SHA-512 mode\n");
kfree_sensitive(prng_data);
}
static int prng_sha512_reseed(void)
{
int ret, seedlen;
u8 seed[64];
/* We need at least 256 bits of fresh entropy for reseeding */
if (trng_available) {
/* trng produces 256 bits entropy in 32 bytes */
seedlen = 32;
cpacf_trng(NULL, 0, seed, seedlen);
} else {
/* generate_entropy() produces 256 bits entropy in 64 bytes */
seedlen = 64;
ret = generate_entropy(seed, seedlen);
if (ret != sizeof(seed))
return ret;
}
/* do a reseed of the prno drng with this bytestring */
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
&prng_data->prnows, NULL, 0, seed, seedlen);
memzero_explicit(seed, sizeof(seed));
return 0;
}
static int prng_sha512_generate(u8 *buf, size_t nbytes)
{
int ret;
/* reseed needed ? */
if (prng_data->prnows.reseed_counter > prng_reseed_limit) {
ret = prng_sha512_reseed();
if (ret)
return ret;
}
/* PRNO generate */
cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN,
&prng_data->prnows, buf, nbytes, NULL, 0);
/* FIPS 140-2 Conditional Self Test */
if (fips_enabled) {
if (!memcmp(prng_data->prev, buf, nbytes)) {
prng_errorflag = PRNG_GEN_FAILED;
return -EILSEQ;
}
memcpy(prng_data->prev, buf, nbytes);
}
return nbytes;
}
/*** file io functions ***/
static int prng_open(struct inode *inode, struct file *file)
{
return nonseekable_open(inode, file);
}
static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
size_t nbytes, loff_t *ppos)
{
int chunk, n, ret = 0;
/* lock prng_data struct */
if (mutex_lock_interruptible(&prng_data->mutex))
return -ERESTARTSYS;
while (nbytes) {
if (need_resched()) {
if (signal_pending(current)) {
if (ret == 0)
ret = -ERESTARTSYS;
break;
}
/* give mutex free before calling schedule() */
mutex_unlock(&prng_data->mutex);
schedule();
/* occupy mutex again */
if (mutex_lock_interruptible(&prng_data->mutex)) {
if (ret == 0)
ret = -ERESTARTSYS;
return ret;
}
}
/*
* we lose some random bytes if an attacker issues
* reads < 8 bytes, but we don't care
*/
chunk = min_t(int, nbytes, prng_chunk_size);
/* PRNG only likes multiples of 8 bytes */
n = (chunk + 7) & -8;
if (prng_data->prngws.reseed_counter > prng_reseed_limit)
prng_tdes_seed(8);
/* if the CPU supports PRNG stckf is present too */
*((unsigned long long *)prng_data->buf) = get_tod_clock_fast();
/*
* Beside the STCKF the input for the TDES-EDE is the output
* of the last operation. We differ here from X9.17 since we
* only store one timestamp into the buffer. Padding the whole
* buffer with timestamps does not improve security, since
* successive stckf have nearly constant offsets.
* If an attacker knows the first timestamp it would be
* trivial to guess the additional values. One timestamp
* is therefore enough and still guarantees unique input values.
*
* Note: you can still get strict X9.17 conformity by setting
* prng_chunk_size to 8 bytes.
*/
cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
prng_data->buf, prng_data->buf, n);
prng_data->prngws.byte_counter += n;
prng_data->prngws.reseed_counter += n;
if (copy_to_user(ubuf, prng_data->buf, chunk)) {
ret = -EFAULT;
break;
}
nbytes -= chunk;
ret += chunk;
ubuf += chunk;
}
/* unlock prng_data struct */
mutex_unlock(&prng_data->mutex);
return ret;
}
static ssize_t prng_sha512_read(struct file *file, char __user *ubuf,
size_t nbytes, loff_t *ppos)
{
int n, ret = 0;
u8 *p;
/* if errorflag is set do nothing and return 'broken pipe' */
if (prng_errorflag)
return -EPIPE;
/* lock prng_data struct */
if (mutex_lock_interruptible(&prng_data->mutex))
return -ERESTARTSYS;
while (nbytes) {
if (need_resched()) {
if (signal_pending(current)) {
if (ret == 0)
ret = -ERESTARTSYS;
break;
}
/* give mutex free before calling schedule() */
mutex_unlock(&prng_data->mutex);
schedule();
/* occopy mutex again */
if (mutex_lock_interruptible(&prng_data->mutex)) {
if (ret == 0)
ret = -ERESTARTSYS;
return ret;
}
}
if (prng_data->rest) {
/* push left over random bytes from the previous read */
p = prng_data->buf + prng_chunk_size - prng_data->rest;
n = (nbytes < prng_data->rest) ?
nbytes : prng_data->rest;
prng_data->rest -= n;
} else {
/* generate one chunk of random bytes into read buf */
p = prng_data->buf;
n = prng_sha512_generate(p, prng_chunk_size);
if (n < 0) {
ret = n;
break;
}
if (nbytes < prng_chunk_size) {
n = nbytes;
prng_data->rest = prng_chunk_size - n;
} else {
n = prng_chunk_size;
prng_data->rest = 0;
}
}
if (copy_to_user(ubuf, p, n)) {
ret = -EFAULT;
break;
}
memzero_explicit(p, n);
ubuf += n;
nbytes -= n;
ret += n;
}
/* unlock prng_data struct */
mutex_unlock(&prng_data->mutex);
return ret;
}
/*** sysfs stuff ***/
static const struct file_operations prng_sha512_fops = {
.owner = THIS_MODULE,
.open = &prng_open,
.release = NULL,
.read = &prng_sha512_read,
.llseek = noop_llseek,
};
static const struct file_operations prng_tdes_fops = {
.owner = THIS_MODULE,
.open = &prng_open,
.release = NULL,
.read = &prng_tdes_read,
.llseek = noop_llseek,
};
/* chunksize attribute (ro) */
static ssize_t prng_chunksize_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size);
}
static DEVICE_ATTR(chunksize, 0444, prng_chunksize_show, NULL);
/* counter attribute (ro) */
static ssize_t prng_counter_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u64 counter;
if (mutex_lock_interruptible(&prng_data->mutex))
return -ERESTARTSYS;
if (prng_mode == PRNG_MODE_SHA512)
counter = prng_data->prnows.stream_bytes;
else
counter = prng_data->prngws.byte_counter;
mutex_unlock(&prng_data->mutex);
return scnprintf(buf, PAGE_SIZE, "%llu\n", counter);
}
static DEVICE_ATTR(byte_counter, 0444, prng_counter_show, NULL);
/* errorflag attribute (ro) */
static ssize_t prng_errorflag_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag);
}
static DEVICE_ATTR(errorflag, 0444, prng_errorflag_show, NULL);
/* mode attribute (ro) */
static ssize_t prng_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
if (prng_mode == PRNG_MODE_TDES)
return scnprintf(buf, PAGE_SIZE, "TDES\n");
else
return scnprintf(buf, PAGE_SIZE, "SHA512\n");
}
static DEVICE_ATTR(mode, 0444, prng_mode_show, NULL);
/* reseed attribute (w) */
static ssize_t prng_reseed_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
if (mutex_lock_interruptible(&prng_data->mutex))
return -ERESTARTSYS;
prng_sha512_reseed();
mutex_unlock(&prng_data->mutex);
return count;
}
static DEVICE_ATTR(reseed, 0200, NULL, prng_reseed_store);
/* reseed limit attribute (rw) */
static ssize_t prng_reseed_limit_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit);
}
static ssize_t prng_reseed_limit_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned limit;
if (sscanf(buf, "%u\n", &limit) != 1)
return -EINVAL;
if (prng_mode == PRNG_MODE_SHA512) {
if (limit < PRNG_RESEED_LIMIT_SHA512_LOWER)
return -EINVAL;
} else {
if (limit < PRNG_RESEED_LIMIT_TDES_LOWER)
return -EINVAL;
}
prng_reseed_limit = limit;
return count;
}
static DEVICE_ATTR(reseed_limit, 0644,
prng_reseed_limit_show, prng_reseed_limit_store);
/* strength attribute (ro) */
static ssize_t prng_strength_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return scnprintf(buf, PAGE_SIZE, "256\n");
}
static DEVICE_ATTR(strength, 0444, prng_strength_show, NULL);
static struct attribute *prng_sha512_dev_attrs[] = {
&dev_attr_errorflag.attr,
&dev_attr_chunksize.attr,
&dev_attr_byte_counter.attr,
&dev_attr_mode.attr,
&dev_attr_reseed.attr,
&dev_attr_reseed_limit.attr,
&dev_attr_strength.attr,
NULL
};
ATTRIBUTE_GROUPS(prng_sha512_dev);
static struct attribute *prng_tdes_dev_attrs[] = {
&dev_attr_chunksize.attr,
&dev_attr_byte_counter.attr,
&dev_attr_mode.attr,
NULL
};
ATTRIBUTE_GROUPS(prng_tdes_dev);
static struct miscdevice prng_sha512_dev = {
.name = "prandom",
.minor = MISC_DYNAMIC_MINOR,
.mode = 0644,
.fops = &prng_sha512_fops,
.groups = prng_sha512_dev_groups,
};
static struct miscdevice prng_tdes_dev = {
.name = "prandom",
.minor = MISC_DYNAMIC_MINOR,
.mode = 0644,
.fops = &prng_tdes_fops,
.groups = prng_tdes_dev_groups,
};
/*** module init and exit ***/
static int __init prng_init(void)
{
int ret;
/* check if the CPU has a PRNG */
if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG))
return -ENODEV;
/* check if TRNG subfunction is available */
if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
trng_available = true;
/* choose prng mode */
if (prng_mode != PRNG_MODE_TDES) {
/* check for MSA5 support for PRNO operations */
if (!cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) {
if (prng_mode == PRNG_MODE_SHA512) {
pr_err("The prng module cannot "
"start in SHA-512 mode\n");
return -ENODEV;
}
prng_mode = PRNG_MODE_TDES;
} else
prng_mode = PRNG_MODE_SHA512;
}
if (prng_mode == PRNG_MODE_SHA512) {
/* SHA512 mode */
if (prng_chunk_size < PRNG_CHUNKSIZE_SHA512_MIN
|| prng_chunk_size > PRNG_CHUNKSIZE_SHA512_MAX)
return -EINVAL;
prng_chunk_size = (prng_chunk_size + 0x3f) & ~0x3f;
if (prng_reseed_limit == 0)
prng_reseed_limit = PRNG_RESEED_LIMIT_SHA512;
else if (prng_reseed_limit < PRNG_RESEED_LIMIT_SHA512_LOWER)
return -EINVAL;
ret = prng_sha512_instantiate();
if (ret)
goto out;
ret = misc_register(&prng_sha512_dev);
if (ret) {
prng_sha512_deinstantiate();
goto out;
}
} else {
/* TDES mode */
if (prng_chunk_size < PRNG_CHUNKSIZE_TDES_MIN
|| prng_chunk_size > PRNG_CHUNKSIZE_TDES_MAX)
return -EINVAL;
prng_chunk_size = (prng_chunk_size + 0x07) & ~0x07;
if (prng_reseed_limit == 0)
prng_reseed_limit = PRNG_RESEED_LIMIT_TDES;
else if (prng_reseed_limit < PRNG_RESEED_LIMIT_TDES_LOWER)
return -EINVAL;
ret = prng_tdes_instantiate();
if (ret)
goto out;
ret = misc_register(&prng_tdes_dev);
if (ret) {
prng_tdes_deinstantiate();
goto out;
}
}
out:
return ret;
}
static void __exit prng_exit(void)
{
if (prng_mode == PRNG_MODE_SHA512) {
misc_deregister(&prng_sha512_dev);
prng_sha512_deinstantiate();
} else {
misc_deregister(&prng_tdes_dev);
prng_tdes_deinstantiate();
}
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, prng_init);
module_exit(prng_exit);
| linux-master | arch/s390/crypto/prng.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm.
*
* s390 Version:
* Copyright IBM Corp. 2019
* Author(s): Joerg Schmidbauer ([email protected])
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/sha3.h>
#include <asm/cpacf.h>
#include "sha.h"
static int sha3_256_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_256;
return 0;
}
static int sha3_256_export(struct shash_desc *desc, void *out)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
struct sha3_state *octx = out;
octx->rsiz = sctx->count;
memcpy(octx->st, sctx->state, sizeof(octx->st));
memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
return 0;
}
static int sha3_256_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha3_state *ictx = in;
sctx->count = ictx->rsiz;
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = CPACF_KIMD_SHA3_256;
return 0;
}
static int sha3_224_import(struct shash_desc *desc, const void *in)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
const struct sha3_state *ictx = in;
sctx->count = ictx->rsiz;
memcpy(sctx->state, ictx->st, sizeof(ictx->st));
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->func = CPACF_KIMD_SHA3_224;
return 0;
}
static struct shash_alg sha3_256_alg = {
.digestsize = SHA3_256_DIGEST_SIZE, /* = 32 */
.init = sha3_256_init,
.update = s390_sha_update,
.final = s390_sha_final,
.export = sha3_256_export,
.import = sha3_256_import,
.descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha3_state),
.base = {
.cra_name = "sha3-256",
.cra_driver_name = "sha3-256-s390",
.cra_priority = 300,
.cra_blocksize = SHA3_256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int sha3_224_init(struct shash_desc *desc)
{
struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
memset(sctx->state, 0, sizeof(sctx->state));
sctx->count = 0;
sctx->func = CPACF_KIMD_SHA3_224;
return 0;
}
static struct shash_alg sha3_224_alg = {
.digestsize = SHA3_224_DIGEST_SIZE,
.init = sha3_224_init,
.update = s390_sha_update,
.final = s390_sha_final,
.export = sha3_256_export, /* same as for 256 */
.import = sha3_224_import, /* function code different! */
.descsize = sizeof(struct s390_sha_ctx),
.statesize = sizeof(struct sha3_state),
.base = {
.cra_name = "sha3-224",
.cra_driver_name = "sha3-224-s390",
.cra_priority = 300,
.cra_blocksize = SHA3_224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sha3_256_s390_init(void)
{
int ret;
if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA3_256))
return -ENODEV;
ret = crypto_register_shash(&sha3_256_alg);
if (ret < 0)
goto out;
ret = crypto_register_shash(&sha3_224_alg);
if (ret < 0)
crypto_unregister_shash(&sha3_256_alg);
out:
return ret;
}
static void __exit sha3_256_s390_fini(void)
{
crypto_unregister_shash(&sha3_224_alg);
crypto_unregister_shash(&sha3_256_alg);
}
module_cpu_feature_match(S390_CPU_FEATURE_MSA, sha3_256_s390_init);
module_exit(sha3_256_s390_fini);
MODULE_ALIAS_CRYPTO("sha3-256");
MODULE_ALIAS_CRYPTO("sha3-224");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA3-256 and SHA3-224 Secure Hash Algorithm");
| linux-master | arch/s390/crypto/sha3_256_s390.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.